mirror of
https://github.com/mimblewimble/grin.git
synced 2025-02-01 08:51:08 +03:00
merge from master
This commit is contained in:
commit
91c91469a3
61 changed files with 1136 additions and 1298 deletions
18
.github/pull_request_template.md
vendored
Normal file
18
.github/pull_request_template.md
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
name: Pull Request
|
||||
about: Pull Request checklist
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
If your PR is a work in progress, please feel free to create it and include a [WIP] tag in the PR name. We encourage everyone to PR early and often so that other developers know what you're working on.
|
||||
|
||||
Before submitting your PR for final review, please ensure that it:
|
||||
|
||||
* Includes a proper description of what problems the PR addresses, as well as a detailed explanation as to what it changes
|
||||
* Explains whether/how the change is consensus breaking or breaks existing client functionality
|
||||
* Contains unit tests exercising new/changed functionality
|
||||
* Fully considers the potential impact of the change on other parts of the system
|
||||
* Describes how you've tested the change (e.g. against Floonet, etc)
|
||||
* Updates any documentation that's affected by the PR
|
|
@ -6,7 +6,23 @@ The [list of issues](https://github.com/mimblewimble/grin/issues) is a good plac
|
|||
|
||||
Additional tests are rewarded with an immense amount of positive karma.
|
||||
|
||||
More documentation or updates/fixes to existing documentation are also very welcome. However, if submitting a PR(Pull-Request) consisting of documentation changes only, please try to ensure that the change is significantly more substantial than one or two lines. For example, working through an install document and making changes and updates throughout as you find issues is worth a PR. For typos and other small changes, either contact one of the developers, or if you think it's a significant enough error to cause problems for other users, please feel free to open an issue.
|
||||
More documentation or updates/fixes to existing documentation are also very welcome.
|
||||
|
||||
# PR Guidelines
|
||||
|
||||
We generally prefer you to PR your work earlier rather than later. This ensures everyone else has a better idea of what's being worked on, and can help reduce wasted effort. If work on your PR has just begun, please feel free to create the PR with [WIP] (work in progress) in the PR title, and let us know when it's ready for review in the comments.
|
||||
|
||||
Since mainnet has been released, the bar for having PRs accepted has been raised. Before submitting your PR for approval, please be ensure it:
|
||||
* Includes a proper description of what problems the PR addresses, as well as a detailed explanation as to what it changes
|
||||
* Explains whether/how the change is consensus breaking or breaks existing client functionality
|
||||
* Contains unit tests exercising new/changed functionality
|
||||
* Fully considers the potential impact of the change on other parts of the system
|
||||
* Describes how you've tested the change (e.g. against Floonet, etc)
|
||||
* Updates any documentation that's affected by the PR
|
||||
|
||||
If submitting a PR consisting of documentation changes only, please try to ensure that the change is significantly more substantial than one or two lines. For example, working through an install document and making changes and updates throughout as you find issues is worth a PR. For typos and other small changes, either contact one of the developers, or if you think it's a significant enough error to cause problems for other users, please feel free to open an issue.
|
||||
|
||||
The development team will be happy to help and guide you with any of these points and work with you getting your PR submitted for approval. Create a PR with [WIP] in the title and ask for specific assistance within the issue, or contact the dev team on any of the channels below.
|
||||
|
||||
# Find Us
|
||||
|
||||
|
|
692
Cargo.lock
generated
692
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -30,9 +30,9 @@ futures = "0.1.21"
|
|||
rustls = "0.13"
|
||||
url = "1.7.0"
|
||||
|
||||
grin_core = { path = "../core", version = "1.0.1" }
|
||||
grin_chain = { path = "../chain", version = "1.0.1" }
|
||||
grin_p2p = { path = "../p2p", version = "1.0.1" }
|
||||
grin_pool = { path = "../pool", version = "1.0.1" }
|
||||
grin_store = { path = "../store", version = "1.0.1" }
|
||||
grin_util = { path = "../util", version = "1.0.1" }
|
||||
grin_core = { path = "../core", version = "1.1.0" }
|
||||
grin_chain = { path = "../chain", version = "1.1.0" }
|
||||
grin_p2p = { path = "../p2p", version = "1.1.0" }
|
||||
grin_pool = { path = "../pool", version = "1.1.0" }
|
||||
grin_store = { path = "../store", version = "1.1.0" }
|
||||
grin_util = { path = "../util", version = "1.1.0" }
|
||||
|
|
|
@ -52,6 +52,15 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
/// Helper function to easily issue a HTTP GET request
|
||||
/// on a given URL that returns nothing. Handles request
|
||||
/// building and response code checking.
|
||||
pub fn get_no_ret(url: &str, api_secret: Option<String>) -> Result<(), Error> {
|
||||
let req = build_request(url, "GET", api_secret, None)?;
|
||||
send_request(req)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to easily issue a HTTP POST request with the provided JSON
|
||||
/// object as body on a given URL that returns a JSON object. Handles request
|
||||
/// building, JSON serialization and deserialization, and response code
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::utils::{get_output, w};
|
||||
use crate::chain;
|
||||
use crate::core::core::hash::Hash;
|
||||
|
@ -68,10 +67,7 @@ impl HeaderHandler {
|
|||
|
||||
impl Handler for HeaderHandler {
|
||||
fn get(&self, req: Request<Body>) -> ResponseFuture {
|
||||
let el = match req.uri().path().trim_right_matches('/').rsplit('/').next() {
|
||||
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
|
||||
Some(el) => el,
|
||||
};
|
||||
let el = right_path_element!(req);
|
||||
result_to_response(self.get_header(el.to_string()))
|
||||
}
|
||||
}
|
||||
|
@ -130,11 +126,7 @@ fn check_block_param(input: &String) -> Result<(), Error> {
|
|||
|
||||
impl Handler for BlockHandler {
|
||||
fn get(&self, req: Request<Body>) -> ResponseFuture {
|
||||
let el = match req.uri().path().trim_right_matches('/').rsplit('/').next() {
|
||||
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
|
||||
Some(el) => el,
|
||||
};
|
||||
|
||||
let el = right_path_element!(req);
|
||||
let h = match self.parse_input(el.to_string()) {
|
||||
Err(e) => {
|
||||
return response(
|
||||
|
|
|
@ -22,9 +22,7 @@ use crate::util;
|
|||
use crate::util::secp::pedersen::Commitment;
|
||||
use crate::web::*;
|
||||
use hyper::{Body, Request, StatusCode};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Weak;
|
||||
use url::form_urlencoded;
|
||||
|
||||
/// Chain handler. Get the head details.
|
||||
/// GET /v1/chain
|
||||
|
@ -101,21 +99,9 @@ impl OutputHandler {
|
|||
fn outputs_by_ids(&self, req: &Request<Body>) -> Result<Vec<Output>, Error> {
|
||||
let mut commitments: Vec<String> = vec![];
|
||||
|
||||
let query = match req.uri().query() {
|
||||
Some(q) => q,
|
||||
None => return Err(ErrorKind::RequestError("no query string".to_owned()))?,
|
||||
};
|
||||
let params = form_urlencoded::parse(query.as_bytes())
|
||||
.into_owned()
|
||||
.collect::<Vec<(String, String)>>();
|
||||
|
||||
for (k, id) in params {
|
||||
if k == "id" {
|
||||
for id in id.split(',') {
|
||||
commitments.push(id.to_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
let query = must_get_query!(req);
|
||||
let params = QueryParams::from(query);
|
||||
params.process_multival_param("id", |id| commitments.push(id.to_owned()));
|
||||
|
||||
let mut outputs: Vec<Output> = vec![];
|
||||
for x in commitments {
|
||||
|
@ -159,49 +145,17 @@ impl OutputHandler {
|
|||
// returns outputs for a specified range of blocks
|
||||
fn outputs_block_batch(&self, req: &Request<Body>) -> Result<Vec<BlockOutputs>, Error> {
|
||||
let mut commitments: Vec<Commitment> = vec![];
|
||||
let mut start_height = 1;
|
||||
let mut end_height = 1;
|
||||
let mut include_rp = false;
|
||||
|
||||
let query = match req.uri().query() {
|
||||
Some(q) => q,
|
||||
None => return Err(ErrorKind::RequestError("no query string".to_owned()))?,
|
||||
};
|
||||
|
||||
let params = form_urlencoded::parse(query.as_bytes()).into_owned().fold(
|
||||
HashMap::new(),
|
||||
|mut hm, (k, v)| {
|
||||
hm.entry(k).or_insert(vec![]).push(v);
|
||||
hm
|
||||
},
|
||||
);
|
||||
|
||||
if let Some(ids) = params.get("id") {
|
||||
for id in ids {
|
||||
for id in id.split(',') {
|
||||
if let Ok(x) = util::from_hex(String::from(id)) {
|
||||
commitments.push(Commitment::from_vec(x));
|
||||
}
|
||||
}
|
||||
let query = must_get_query!(req);
|
||||
let params = QueryParams::from(query);
|
||||
params.process_multival_param("id", |id| {
|
||||
if let Ok(x) = util::from_hex(String::from(id)) {
|
||||
commitments.push(Commitment::from_vec(x));
|
||||
}
|
||||
}
|
||||
if let Some(heights) = params.get("start_height") {
|
||||
for height in heights {
|
||||
start_height = height
|
||||
.parse()
|
||||
.map_err(|_| ErrorKind::RequestError("invalid start_height".to_owned()))?;
|
||||
}
|
||||
}
|
||||
if let Some(heights) = params.get("end_height") {
|
||||
for height in heights {
|
||||
end_height = height
|
||||
.parse()
|
||||
.map_err(|_| ErrorKind::RequestError("invalid end_height".to_owned()))?;
|
||||
}
|
||||
}
|
||||
if let Some(_) = params.get("include_rp") {
|
||||
include_rp = true;
|
||||
}
|
||||
});
|
||||
let start_height = parse_param!(params, "start_height", 1);
|
||||
let end_height = parse_param!(params, "end_height", 1);
|
||||
let include_rp = params.get("include_rp").is_some();
|
||||
|
||||
debug!(
|
||||
"outputs_block_batch: {}-{}, {:?}, {:?}",
|
||||
|
@ -223,11 +177,7 @@ impl OutputHandler {
|
|||
|
||||
impl Handler for OutputHandler {
|
||||
fn get(&self, req: Request<Body>) -> ResponseFuture {
|
||||
let command = match req.uri().path().trim_right_matches('/').rsplit('/').next() {
|
||||
Some(c) => c,
|
||||
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
|
||||
};
|
||||
match command {
|
||||
match right_path_element!(req) {
|
||||
"byids" => result_to_response(self.outputs_by_ids(&req)),
|
||||
"byheight" => result_to_response(self.outputs_block_batch(&req)),
|
||||
_ => response(StatusCode::BAD_REQUEST, ""),
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
use super::utils::w;
|
||||
use crate::p2p;
|
||||
use crate::p2p::types::{PeerInfoDisplay, ReasonForBan};
|
||||
use crate::p2p::types::{PeerAddr, PeerInfoDisplay, ReasonForBan};
|
||||
use crate::router::{Handler, ResponseFuture};
|
||||
use crate::web::*;
|
||||
use hyper::{Body, Request, StatusCode};
|
||||
|
@ -37,11 +37,11 @@ pub struct PeersConnectedHandler {
|
|||
|
||||
impl Handler for PeersConnectedHandler {
|
||||
fn get(&self, _req: Request<Body>) -> ResponseFuture {
|
||||
let mut peers: Vec<PeerInfoDisplay> = vec![];
|
||||
for p in &w(&self.peers).connected_peers() {
|
||||
let peer_info = p.info.clone();
|
||||
peers.push(peer_info.into());
|
||||
}
|
||||
let peers: Vec<PeerInfoDisplay> = w(&self.peers)
|
||||
.connected_peers()
|
||||
.iter()
|
||||
.map(|p| p.info.clone().into())
|
||||
.collect();
|
||||
json_response(&peers)
|
||||
}
|
||||
}
|
||||
|
@ -56,20 +56,26 @@ pub struct PeerHandler {
|
|||
|
||||
impl Handler for PeerHandler {
|
||||
fn get(&self, req: Request<Body>) -> ResponseFuture {
|
||||
let command = match req.uri().path().trim_right_matches('/').rsplit('/').next() {
|
||||
Some(c) => c,
|
||||
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
|
||||
};
|
||||
if let Ok(addr) = command.parse() {
|
||||
match w(&self.peers).get_peer(addr) {
|
||||
Ok(peer) => json_response(&peer),
|
||||
Err(_) => response(StatusCode::NOT_FOUND, "peer not found"),
|
||||
}
|
||||
let command = right_path_element!(req);
|
||||
|
||||
// We support both "ip" and "ip:port" here for peer_addr.
|
||||
// "ip:port" is only really useful for local usernet testing on loopback address.
|
||||
// Normally we map peers to ip and only allow a single peer per ip address.
|
||||
let peer_addr;
|
||||
if let Ok(ip_addr) = command.parse() {
|
||||
peer_addr = PeerAddr::from_ip(ip_addr);
|
||||
} else if let Ok(addr) = command.parse() {
|
||||
peer_addr = PeerAddr(addr);
|
||||
} else {
|
||||
response(
|
||||
return response(
|
||||
StatusCode::BAD_REQUEST,
|
||||
format!("peer address unrecognized: {}", req.uri().path()),
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
match w(&self.peers).get_peer(peer_addr) {
|
||||
Ok(peer) => json_response(&peer),
|
||||
Err(_) => response(StatusCode::NOT_FOUND, "peer not found"),
|
||||
}
|
||||
}
|
||||
fn post(&self, req: Request<Body>) -> ResponseFuture {
|
||||
|
@ -80,20 +86,23 @@ impl Handler for PeerHandler {
|
|||
};
|
||||
let addr = match path_elems.next() {
|
||||
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
|
||||
Some(a) => match a.parse() {
|
||||
Err(e) => {
|
||||
Some(a) => {
|
||||
if let Ok(ip_addr) = a.parse() {
|
||||
PeerAddr::from_ip(ip_addr)
|
||||
} else if let Ok(addr) = a.parse() {
|
||||
PeerAddr(addr)
|
||||
} else {
|
||||
return response(
|
||||
StatusCode::BAD_REQUEST,
|
||||
format!("invalid peer address: {}", e),
|
||||
format!("invalid peer address: {}", req.uri().path()),
|
||||
);
|
||||
}
|
||||
Ok(addr) => addr,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
match command {
|
||||
"ban" => w(&self.peers).ban_peer(&addr, ReasonForBan::ManualBan),
|
||||
"unban" => w(&self.peers).unban_peer(&addr),
|
||||
"ban" => w(&self.peers).ban_peer(addr, ReasonForBan::ManualBan),
|
||||
"unban" => w(&self.peers).unban_peer(addr),
|
||||
_ => return response(StatusCode::BAD_REQUEST, "invalid command"),
|
||||
};
|
||||
|
||||
|
|
|
@ -23,12 +23,11 @@ use crate::types::*;
|
|||
use crate::util;
|
||||
use crate::util::RwLock;
|
||||
use crate::web::*;
|
||||
use failure::ResultExt;
|
||||
use futures::future::ok;
|
||||
use futures::Future;
|
||||
use hyper::{Body, Request, StatusCode};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Weak;
|
||||
use url::form_urlencoded;
|
||||
|
||||
/// Get basic information about the transaction pool.
|
||||
/// GET /v1/pool
|
||||
|
@ -61,15 +60,7 @@ pub struct PoolPushHandler {
|
|||
|
||||
impl PoolPushHandler {
|
||||
fn update_pool(&self, req: Request<Body>) -> Box<dyn Future<Item = (), Error = Error> + Send> {
|
||||
let params = match req.uri().query() {
|
||||
Some(query_string) => form_urlencoded::parse(query_string.as_bytes())
|
||||
.into_owned()
|
||||
.fold(HashMap::new(), |mut hm, (k, v)| {
|
||||
hm.entry(k).or_insert(vec![]).push(v);
|
||||
hm
|
||||
}),
|
||||
None => HashMap::new(),
|
||||
};
|
||||
let params = QueryParams::from(req.uri().query());
|
||||
|
||||
let fluff = params.get("fluff").is_some();
|
||||
let pool_arc = w(&self.tx_pool).clone();
|
||||
|
@ -99,13 +90,14 @@ impl PoolPushHandler {
|
|||
|
||||
// Push to tx pool.
|
||||
let mut tx_pool = pool_arc.write();
|
||||
let header = tx_pool.blockchain.chain_head().unwrap();
|
||||
tx_pool
|
||||
let header = tx_pool
|
||||
.blockchain
|
||||
.chain_head()
|
||||
.context(ErrorKind::Internal("Failed to get chain head".to_owned()))?;
|
||||
let res = tx_pool
|
||||
.add_to_pool(source, tx, !fluff, &header)
|
||||
.map_err(|e| {
|
||||
error!("update_pool: failed with error: {:?}", e);
|
||||
ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into()
|
||||
})
|
||||
.context(ErrorKind::Internal("Failed to update pool".to_owned()))?;
|
||||
Ok(res)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -22,9 +22,7 @@ use crate::util::secp::pedersen::Commitment;
|
|||
use crate::web::*;
|
||||
use failure::ResultExt;
|
||||
use hyper::{Body, Request, StatusCode};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Weak;
|
||||
use url::form_urlencoded;
|
||||
|
||||
// Sum tree handler. Retrieve the roots:
|
||||
// GET /v1/txhashset/roots
|
||||
|
@ -114,59 +112,14 @@ impl TxHashSetHandler {
|
|||
|
||||
impl Handler for TxHashSetHandler {
|
||||
fn get(&self, req: Request<Body>) -> ResponseFuture {
|
||||
let mut start_index = 1;
|
||||
let mut max = 100;
|
||||
let mut id = "".to_owned();
|
||||
|
||||
// TODO: probably need to set a reasonable max limit here
|
||||
let mut last_n = 10;
|
||||
if let Some(query_string) = req.uri().query() {
|
||||
let params = form_urlencoded::parse(query_string.as_bytes())
|
||||
.into_owned()
|
||||
.fold(HashMap::new(), |mut hm, (k, v)| {
|
||||
hm.entry(k).or_insert(vec![]).push(v);
|
||||
hm
|
||||
});
|
||||
if let Some(nums) = params.get("n") {
|
||||
for num in nums {
|
||||
if let Ok(n) = str::parse(num) {
|
||||
last_n = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(start_indexes) = params.get("start_index") {
|
||||
for si in start_indexes {
|
||||
if let Ok(s) = str::parse(si) {
|
||||
start_index = s;
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(maxes) = params.get("max") {
|
||||
for ma in maxes {
|
||||
if let Ok(m) = str::parse(ma) {
|
||||
max = m;
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(ids) = params.get("id") {
|
||||
if !ids.is_empty() {
|
||||
id = ids.last().unwrap().to_owned();
|
||||
}
|
||||
}
|
||||
}
|
||||
let command = match req
|
||||
.uri()
|
||||
.path()
|
||||
.trim_right()
|
||||
.trim_right_matches("/")
|
||||
.rsplit("/")
|
||||
.next()
|
||||
{
|
||||
Some(c) => c,
|
||||
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
|
||||
};
|
||||
let params = QueryParams::from(req.uri().query());
|
||||
let last_n = parse_param_no_err!(params, "n", 10);
|
||||
let start_index = parse_param_no_err!(params, "start_index", 1);
|
||||
let max = parse_param_no_err!(params, "max", 100);
|
||||
let id = parse_param_no_err!(params, "id", "".to_owned());
|
||||
|
||||
match command {
|
||||
match right_path_element!(req) {
|
||||
"roots" => json_response_pretty(&self.get_roots()),
|
||||
"lastoutputs" => json_response_pretty(&self.get_last_n_output(last_n)),
|
||||
"lastrangeproofs" => json_response_pretty(&self.get_last_n_rangeproof(last_n)),
|
||||
|
|
|
@ -48,12 +48,15 @@ pub fn get_output(
|
|||
OutputIdentifier::new(OutputFeatures::Coinbase, &commit),
|
||||
];
|
||||
|
||||
for x in outputs.iter() {
|
||||
if let Ok(_) = w(chain).is_unspent(&x) {
|
||||
let block_height = w(chain).get_header_for_output(&x).unwrap().height;
|
||||
let output_pos = w(chain).get_output_pos(&x.commit).unwrap_or(0);
|
||||
return Ok((Output::new(&commit, block_height, output_pos), x.clone()));
|
||||
}
|
||||
for x in outputs.iter().filter(|x| w(chain).is_unspent(x).is_ok()) {
|
||||
let block_height = w(chain)
|
||||
.get_header_for_output(&x)
|
||||
.context(ErrorKind::Internal(
|
||||
"Can't get header for output".to_owned(),
|
||||
))?
|
||||
.height;
|
||||
let output_pos = w(chain).get_output_pos(&x.commit).unwrap_or(0);
|
||||
return Ok((Output::new(&commit, block_height, output_pos), x.clone()));
|
||||
}
|
||||
Err(ErrorKind::NotFound)?
|
||||
}
|
||||
|
|
|
@ -30,13 +30,14 @@ extern crate serde_derive;
|
|||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
#[macro_use]
|
||||
mod web;
|
||||
pub mod auth;
|
||||
pub mod client;
|
||||
mod handlers;
|
||||
mod rest;
|
||||
mod router;
|
||||
mod types;
|
||||
mod web;
|
||||
|
||||
pub use crate::auth::BasicAuthMiddleware;
|
||||
pub use crate::handlers::start_rest_apis;
|
||||
|
|
|
@ -5,7 +5,9 @@ use futures::{Future, Stream};
|
|||
use hyper::{Body, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
use url::form_urlencoded;
|
||||
|
||||
/// Parse request body
|
||||
pub fn parse_body<T>(req: Request<Body>) -> Box<dyn Future<Item = T, Error = Error> + Send>
|
||||
|
@ -81,3 +83,100 @@ pub fn just_response<T: Into<Body> + Debug>(status: StatusCode, text: T) -> Resp
|
|||
pub fn response<T: Into<Body> + Debug>(status: StatusCode, text: T) -> ResponseFuture {
|
||||
Box::new(ok(just_response(status, text)))
|
||||
}
|
||||
|
||||
pub struct QueryParams {
|
||||
params: HashMap<String, Vec<String>>,
|
||||
}
|
||||
|
||||
impl QueryParams {
|
||||
pub fn process_multival_param<F>(&self, name: &str, mut f: F)
|
||||
where
|
||||
F: FnMut(&str),
|
||||
{
|
||||
if let Some(ids) = self.params.get(name) {
|
||||
for id in ids {
|
||||
for id in id.split(',') {
|
||||
f(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, name: &str) -> Option<&String> {
|
||||
match self.params.get(name) {
|
||||
None => None,
|
||||
Some(v) => v.first(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for QueryParams {
|
||||
fn from(query_string: &str) -> Self {
|
||||
let params = form_urlencoded::parse(query_string.as_bytes())
|
||||
.into_owned()
|
||||
.fold(HashMap::new(), |mut hm, (k, v)| {
|
||||
hm.entry(k).or_insert(vec![]).push(v);
|
||||
hm
|
||||
});
|
||||
QueryParams { params }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<&str>> for QueryParams {
|
||||
fn from(query_string: Option<&str>) -> Self {
|
||||
match query_string {
|
||||
Some(query_string) => Self::from(query_string),
|
||||
None => QueryParams {
|
||||
params: HashMap::new(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Request<Body>> for QueryParams {
|
||||
fn from(req: Request<Body>) -> Self {
|
||||
Self::from(req.uri().query())
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! right_path_element(
|
||||
($req: expr) =>(
|
||||
match $req.uri().path().trim_end_matches('/').rsplit('/').next() {
|
||||
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
|
||||
Some(el) => el,
|
||||
};
|
||||
));
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! must_get_query(
|
||||
($req: expr) =>(
|
||||
match $req.uri().query() {
|
||||
Some(q) => q,
|
||||
None => return Err(ErrorKind::RequestError("no query string".to_owned()))?,
|
||||
}
|
||||
));
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! parse_param(
|
||||
($param: expr, $name: expr, $default: expr) =>(
|
||||
match $param.get($name) {
|
||||
None => $default,
|
||||
Some(val) => match val.parse() {
|
||||
Ok(val) => val,
|
||||
Err(_) => return Err(ErrorKind::RequestError(format!("invalid value of parameter {}", $name)))?,
|
||||
}
|
||||
}
|
||||
));
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! parse_param_no_err(
|
||||
($param: expr, $name: expr, $default: expr) =>(
|
||||
match $param.get($name) {
|
||||
None => $default,
|
||||
Some(val) => match val.parse() {
|
||||
Ok(val) => val,
|
||||
Err(_) => $default,
|
||||
}
|
||||
}
|
||||
));
|
||||
|
|
|
@ -21,6 +21,8 @@ serde = "1"
|
|||
serde_derive = "1"
|
||||
chrono = "0.4.4"
|
||||
lru-cache = "0.1"
|
||||
lazy_static = "1"
|
||||
regex = "1"
|
||||
|
||||
grin_core = { path = "../core", version = "1.1.0" }
|
||||
grin_keychain = { path = "../keychain", version = "1.1.0" }
|
||||
|
|
|
@ -660,7 +660,7 @@ impl Chain {
|
|||
|
||||
/// Return a merkle proof valid for the current output pmmr state at the
|
||||
/// given pos
|
||||
pub fn get_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, String> {
|
||||
pub fn get_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, Error> {
|
||||
let mut txhashset = self.txhashset.write();
|
||||
txhashset.merkle_proof(commit)
|
||||
}
|
||||
|
|
|
@ -131,6 +131,9 @@ pub enum ErrorKind {
|
|||
/// We cannot process data once the Grin server has been stopped.
|
||||
#[fail(display = "Stopped (Grin Shutting Down)")]
|
||||
Stopped,
|
||||
/// Internal Roaring Bitmap error
|
||||
#[fail(display = "Roaring Bitmap error")]
|
||||
Bitmap,
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
|
|
|
@ -68,7 +68,8 @@ impl<'a> RewindableKernelView<'a> {
|
|||
/// fast sync where a reorg past the horizon could allow a whole rewrite of
|
||||
/// the kernel set.
|
||||
pub fn validate_root(&self) -> Result<(), Error> {
|
||||
if self.pmmr.root() != self.header.kernel_root {
|
||||
let root = self.pmmr.root().map_err(|_| ErrorKind::InvalidRoot)?;
|
||||
if root != self.header.kernel_root {
|
||||
return Err(ErrorKind::InvalidTxHashSet(format!(
|
||||
"Kernel root at {} does not match",
|
||||
self.header.height
|
||||
|
|
|
@ -272,9 +272,11 @@ impl TxHashSet {
|
|||
}
|
||||
|
||||
/// build a new merkle proof for the given position.
|
||||
pub fn merkle_proof(&mut self, commit: Commitment) -> Result<MerkleProof, String> {
|
||||
let pos = self.commit_index.get_output_pos(&commit).unwrap();
|
||||
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos).merkle_proof(pos)
|
||||
pub fn merkle_proof(&mut self, commit: Commitment) -> Result<MerkleProof, Error> {
|
||||
let pos = self.commit_index.get_output_pos(&commit)?;
|
||||
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos)
|
||||
.merkle_proof(pos)
|
||||
.map_err(|_| ErrorKind::MerkleProof.into())
|
||||
}
|
||||
|
||||
/// Compact the MMR data files and flush the rm logs
|
||||
|
@ -1204,11 +1206,19 @@ impl<'a> Extension<'a> {
|
|||
/// from the respective MMRs.
|
||||
/// For a significantly faster way of validating full kernel sums see BlockSums.
|
||||
pub fn validate_kernel_sums(&self) -> Result<((Commitment, Commitment)), Error> {
|
||||
let now = Instant::now();
|
||||
|
||||
let genesis = self.get_header_by_height(0)?;
|
||||
let (utxo_sum, kernel_sum) = self.verify_kernel_sums(
|
||||
self.header.total_overage(genesis.kernel_mmr_size > 0),
|
||||
self.header.total_kernel_offset(),
|
||||
)?;
|
||||
|
||||
debug!(
|
||||
"txhashset: validated total kernel sums, took {}s",
|
||||
now.elapsed().as_secs(),
|
||||
);
|
||||
|
||||
Ok((utxo_sum, kernel_sum))
|
||||
}
|
||||
|
||||
|
@ -1244,18 +1254,29 @@ impl<'a> Extension<'a> {
|
|||
Ok((output_sum, kernel_sum))
|
||||
}
|
||||
|
||||
/// Rebuild the index of MMR positions to the corresponding Output and
|
||||
/// kernel by iterating over the whole MMR data. This is a costly operation
|
||||
/// performed only when we receive a full new chain state.
|
||||
/// Rebuild the index of MMR positions to the corresponding UTXOs.
|
||||
/// This is a costly operation performed only when we receive a full new chain state.
|
||||
pub fn rebuild_index(&self) -> Result<(), Error> {
|
||||
let now = Instant::now();
|
||||
|
||||
let mut count = 0;
|
||||
|
||||
for n in 1..self.output_pmmr.unpruned_size() + 1 {
|
||||
// non-pruned leaves only
|
||||
if pmmr::bintree_postorder_height(n) == 0 {
|
||||
if let Some(out) = self.output_pmmr.get_data(n) {
|
||||
self.batch.save_output_pos(&out.commit, n)?;
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
"txhashset: rebuild_index ({} UTXOs), took {}s",
|
||||
count,
|
||||
now.elapsed().as_secs(),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1453,11 +1474,28 @@ pub fn zip_write(
|
|||
) -> Result<(), Error> {
|
||||
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
|
||||
fs::create_dir_all(txhashset_path.clone())?;
|
||||
zip::decompress(txhashset_data, &txhashset_path)
|
||||
zip::decompress(txhashset_data, &txhashset_path, expected_file)
|
||||
.map_err(|ze| ErrorKind::Other(ze.to_string()))?;
|
||||
check_and_remove_files(&txhashset_path, header)
|
||||
}
|
||||
|
||||
fn expected_file(path: &Path) -> bool {
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
let s_path = path.to_str().unwrap_or_else(|| "");
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new(
|
||||
format!(
|
||||
r#"^({}|{}|{})(/pmmr_(hash|data|leaf|prun)\.bin(\.\w*)?)?$"#,
|
||||
OUTPUT_SUBDIR, KERNEL_SUBDIR, RANGE_PROOF_SUBDIR
|
||||
)
|
||||
.as_str()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
RE.is_match(&s_path)
|
||||
}
|
||||
|
||||
/// Check a txhashset directory and remove any unexpected
|
||||
fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Result<(), Error> {
|
||||
// First compare the subdirectories
|
||||
|
@ -1523,19 +1561,27 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
|
|||
.difference(&pmmr_files_expected)
|
||||
.cloned()
|
||||
.collect();
|
||||
let mut removed = 0;
|
||||
if !difference.is_empty() {
|
||||
debug!(
|
||||
"Unexpected file(s) found in txhashset subfolder {:?}, removing.",
|
||||
&subdirectory_path
|
||||
);
|
||||
for diff in difference {
|
||||
for diff in &difference {
|
||||
let diff_path = subdirectory_path.join(diff);
|
||||
file::delete(diff_path.clone())?;
|
||||
debug!(
|
||||
"check_and_remove_files: unexpected file '{:?}' removed",
|
||||
diff_path
|
||||
);
|
||||
match file::delete(diff_path.clone()) {
|
||||
Err(e) => error!(
|
||||
"check_and_remove_files: fail to remove file '{:?}', Err: {:?}",
|
||||
diff_path, e,
|
||||
),
|
||||
Ok(_) => {
|
||||
removed += 1;
|
||||
trace!("check_and_remove_files: file '{:?}' removed", diff_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"{} tmp file(s) found in txhashset subfolder {:?}, {} removed.",
|
||||
difference.len(),
|
||||
&subdirectory_path,
|
||||
removed,
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -1590,6 +1636,25 @@ pub fn input_pos_to_rewind(
|
|||
current = batch.get_previous_header(¤t)?;
|
||||
}
|
||||
|
||||
let bitmap = bitmap_fast_or(None, &mut block_input_bitmaps).unwrap();
|
||||
Ok(bitmap)
|
||||
bitmap_fast_or(None, &mut block_input_bitmaps).ok_or_else(|| ErrorKind::Bitmap.into())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_expected_files() {
|
||||
assert!(!expected_file(Path::new("kernels")));
|
||||
assert!(!expected_file(Path::new("xkernel")));
|
||||
assert!(expected_file(Path::new("kernel")));
|
||||
assert!(expected_file(Path::new("kernel/pmmr_data.bin")));
|
||||
assert!(expected_file(Path::new("kernel/pmmr_hash.bin")));
|
||||
assert!(expected_file(Path::new("kernel/pmmr_leaf.bin")));
|
||||
assert!(expected_file(Path::new("kernel/pmmr_prun.bin")));
|
||||
assert!(expected_file(Path::new("kernel/pmmr_leaf.bin.deadbeef")));
|
||||
assert!(!expected_file(Path::new("xkernel/pmmr_data.bin")));
|
||||
assert!(!expected_file(Path::new("kernel/pmmrx_data.bin")));
|
||||
assert!(!expected_file(Path::new("kernel/pmmr_data.binx")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
//! Lightweight readonly view into output MMR for convenience.
|
||||
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::core::core::hash::{Hash, Hashed};
|
||||
use crate::core::core::pmmr::{self, ReadonlyPMMR};
|
||||
use crate::core::core::{Block, BlockHeader, Input, Output, Transaction};
|
||||
use crate::core::global;
|
||||
|
|
|
@ -73,10 +73,11 @@ impl Tip {
|
|||
total_difficulty: header.total_difficulty(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// *Really* easy to accidentally call hash() on a tip (thinking its a header).
|
||||
/// So lets make hash() do the right thing here.
|
||||
pub fn hash(&self) -> Hash {
|
||||
impl Hashed for Tip {
|
||||
/// The hash of the underlying block.
|
||||
fn hash(&self) -> Hash {
|
||||
self.last_block_h
|
||||
}
|
||||
}
|
||||
|
|
|
@ -144,9 +144,5 @@ fn txhashset_contains_expected_files(dirname: String, path_buf: PathBuf) -> bool
|
|||
let intersection: HashSet<_> = zip_files_hashset
|
||||
.difference(&expected_files_hashset)
|
||||
.collect();
|
||||
if intersection.is_empty() {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
intersection.is_empty()
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ use std::sync::Arc;
|
|||
use crate::consensus::{reward, REWARD};
|
||||
use crate::core::committed::{self, Committed};
|
||||
use crate::core::compact_block::{CompactBlock, CompactBlockBody};
|
||||
use crate::core::hash::{Hash, Hashed, ZERO_HASH};
|
||||
use crate::core::hash::{DefaultHashable, Hash, Hashed, ZERO_HASH};
|
||||
use crate::core::verifier_cache::VerifierCache;
|
||||
use crate::core::{
|
||||
transaction, Commitment, Input, Output, Transaction, TransactionBody, TxKernel, Weighting,
|
||||
|
@ -160,9 +160,9 @@ impl FixedLength for HeaderEntry {
|
|||
const LEN: usize = Hash::LEN + 8 + Difficulty::LEN + 4 + 1;
|
||||
}
|
||||
|
||||
impl HeaderEntry {
|
||||
impl Hashed for HeaderEntry {
|
||||
/// The hash of the underlying block.
|
||||
pub fn hash(&self) -> Hash {
|
||||
fn hash(&self) -> Hash {
|
||||
self.hash
|
||||
}
|
||||
}
|
||||
|
@ -197,6 +197,7 @@ pub struct BlockHeader {
|
|||
/// Proof of work and related
|
||||
pub pow: ProofOfWork,
|
||||
}
|
||||
impl DefaultHashable for BlockHeader {}
|
||||
|
||||
impl Default for BlockHeader {
|
||||
fn default() -> BlockHeader {
|
||||
|
@ -353,6 +354,13 @@ pub struct Block {
|
|||
body: TransactionBody,
|
||||
}
|
||||
|
||||
impl Hashed for Block {
|
||||
/// The hash of the underlying block.
|
||||
fn hash(&self) -> Hash {
|
||||
self.header.hash()
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of Writeable for a block, defines how to write the block to a
|
||||
/// binary writer. Differentiates between writing the block for the purpose of
|
||||
/// full serialization and the one of just extracting a hash.
|
||||
|
@ -570,11 +578,6 @@ impl Block {
|
|||
&mut self.body.kernels
|
||||
}
|
||||
|
||||
/// Blockhash, computed using only the POW
|
||||
pub fn hash(&self) -> Hash {
|
||||
self.header.hash()
|
||||
}
|
||||
|
||||
/// Sum of all fees (inputs less outputs) in the block
|
||||
pub fn total_fees(&self) -> u64 {
|
||||
self.body
|
||||
|
|
|
@ -20,17 +20,22 @@ use crate::keychain::BlindingFactor;
|
|||
use crate::util::secp::key::SecretKey;
|
||||
use crate::util::secp::pedersen::Commitment;
|
||||
use crate::util::{secp, secp_static, static_secp_instance};
|
||||
use failure::Fail;
|
||||
|
||||
/// Errors from summing and verifying kernel excesses via committed trait.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Fail)]
|
||||
pub enum Error {
|
||||
/// Keychain related error.
|
||||
#[fail(display = "Keychain error {}", _0)]
|
||||
Keychain(keychain::Error),
|
||||
/// Secp related error.
|
||||
#[fail(display = "Secp error {}", _0)]
|
||||
Secp(secp::Error),
|
||||
/// Kernel sums do not equal output sums.
|
||||
#[fail(display = "Kernel sum mismatch")]
|
||||
KernelSumMismatch,
|
||||
/// Committed overage (fee or reward) is invalid
|
||||
#[fail(display = "Invalid value")]
|
||||
InvalidValue,
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
use rand::{thread_rng, Rng};
|
||||
|
||||
use crate::core::block::{Block, BlockHeader, Error};
|
||||
use crate::core::hash::Hashed;
|
||||
use crate::core::hash::{DefaultHashable, Hashed};
|
||||
use crate::core::id::ShortIdentifiable;
|
||||
use crate::core::{Output, ShortId, TxKernel};
|
||||
use crate::ser::{self, read_multi, Readable, Reader, VerifySortedAndUnique, Writeable, Writer};
|
||||
|
@ -62,9 +62,9 @@ impl CompactBlockBody {
|
|||
|
||||
/// Sort everything.
|
||||
fn sort(&mut self) {
|
||||
self.out_full.sort();
|
||||
self.kern_full.sort();
|
||||
self.kern_ids.sort();
|
||||
self.out_full.sort_unstable();
|
||||
self.kern_full.sort_unstable();
|
||||
self.kern_ids.sort_unstable();
|
||||
}
|
||||
|
||||
/// "Lightweight" validation.
|
||||
|
@ -137,6 +137,8 @@ pub struct CompactBlock {
|
|||
body: CompactBlockBody,
|
||||
}
|
||||
|
||||
impl DefaultHashable for CompactBlock {}
|
||||
|
||||
impl CompactBlock {
|
||||
/// "Lightweight" validation.
|
||||
fn validate_read(&self) -> Result<(), Error> {
|
||||
|
|
|
@ -36,6 +36,19 @@ pub const ZERO_HASH: Hash = Hash([0; 32]);
|
|||
#[derive(Copy, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
||||
pub struct Hash([u8; 32]);
|
||||
|
||||
impl DefaultHashable for Hash {}
|
||||
|
||||
impl Hash {
|
||||
fn hash_with<T: Writeable>(&self, other: T) -> Hash {
|
||||
let mut hasher = HashWriter::default();
|
||||
ser::Writeable::write(self, &mut hasher).unwrap();
|
||||
ser::Writeable::write(&other, &mut hasher).unwrap();
|
||||
let mut ret = [0; 32];
|
||||
hasher.finalize(&mut ret);
|
||||
Hash(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Hash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let hash_hex = self.to_hex();
|
||||
|
@ -212,25 +225,26 @@ impl ser::Writer for HashWriter {
|
|||
pub trait Hashed {
|
||||
/// Obtain the hash of the object
|
||||
fn hash(&self) -> Hash;
|
||||
/// Hash the object together with another writeable object
|
||||
fn hash_with<T: Writeable>(&self, other: T) -> Hash;
|
||||
}
|
||||
|
||||
impl<W: ser::Writeable> Hashed for W {
|
||||
/// Implementing this trait enables the default
|
||||
/// hash implementation
|
||||
pub trait DefaultHashable: Writeable {}
|
||||
impl<D: DefaultHashable> Hashed for D {
|
||||
fn hash(&self) -> Hash {
|
||||
let mut hasher = HashWriter::default();
|
||||
ser::Writeable::write(self, &mut hasher).unwrap();
|
||||
let mut ret = [0; 32];
|
||||
hasher.finalize(&mut ret);
|
||||
Hash(ret)
|
||||
}
|
||||
|
||||
fn hash_with<T: Writeable>(&self, other: T) -> Hash {
|
||||
let mut hasher = HashWriter::default();
|
||||
ser::Writeable::write(self, &mut hasher).unwrap();
|
||||
ser::Writeable::write(&other, &mut hasher).unwrap();
|
||||
Writeable::write(self, &mut hasher).unwrap();
|
||||
let mut ret = [0; 32];
|
||||
hasher.finalize(&mut ret);
|
||||
Hash(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: DefaultHashable> DefaultHashable for &D {}
|
||||
impl<D: DefaultHashable, E: DefaultHashable> DefaultHashable for (D, E) {}
|
||||
impl<D: DefaultHashable, E: DefaultHashable, F: DefaultHashable> DefaultHashable for (D, E, F) {}
|
||||
|
||||
/// Implement Hashed trait for external types here
|
||||
impl DefaultHashable for crate::util::secp::pedersen::RangeProof {}
|
||||
impl DefaultHashable for Vec<u8> {}
|
||||
impl DefaultHashable for u64 {}
|
||||
|
|
|
@ -20,7 +20,7 @@ use std::cmp::Ordering;
|
|||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use siphasher::sip::SipHasher24;
|
||||
|
||||
use crate::core::hash::{Hash, Hashed};
|
||||
use crate::core::hash::{DefaultHashable, Hash, Hashed};
|
||||
use crate::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use crate::util;
|
||||
|
||||
|
@ -73,6 +73,7 @@ impl<H: Hashed> ShortIdentifiable for H {
|
|||
#[derive(Clone, Serialize, Deserialize, Hash)]
|
||||
pub struct ShortId([u8; 6]);
|
||||
|
||||
impl DefaultHashable for ShortId {}
|
||||
/// We want to sort short_ids in a canonical and consistent manner so we can
|
||||
/// verify sort order in the same way we do for full inputs|outputs|kernels
|
||||
/// themselves.
|
||||
|
@ -168,6 +169,8 @@ mod test {
|
|||
}
|
||||
}
|
||||
|
||||
impl DefaultHashable for Foo {}
|
||||
|
||||
let foo = Foo(0);
|
||||
|
||||
let expected_hash =
|
||||
|
|
|
@ -95,9 +95,9 @@ where
|
|||
|
||||
/// Computes the root of the MMR. Find all the peaks in the current
|
||||
/// tree and "bags" them to get a single peak.
|
||||
pub fn root(&self) -> Hash {
|
||||
pub fn root(&self) -> Result<Hash, String> {
|
||||
if self.is_empty() {
|
||||
return ZERO_HASH;
|
||||
return Ok(ZERO_HASH);
|
||||
}
|
||||
let mut res = None;
|
||||
for peak in self.peaks().iter().rev() {
|
||||
|
@ -106,7 +106,7 @@ where
|
|||
Some(rhash) => Some((*peak, rhash).hash_with_index(self.unpruned_size())),
|
||||
}
|
||||
}
|
||||
res.expect("no root, invalid tree")
|
||||
res.ok_or_else(|| "no root, invalid tree".to_owned())
|
||||
}
|
||||
|
||||
/// Returns a vec of the peaks of this MMR.
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
//! Transactions
|
||||
|
||||
use crate::core::hash::Hashed;
|
||||
use crate::core::hash::{DefaultHashable, Hashed};
|
||||
use crate::core::verifier_cache::VerifierCache;
|
||||
use crate::core::{committed, Committed};
|
||||
use crate::keychain::{self, BlindingFactor};
|
||||
|
@ -32,7 +32,6 @@ use crate::{consensus, global};
|
|||
use enum_primitive::FromPrimitive;
|
||||
use std::cmp::Ordering;
|
||||
use std::cmp::{max, min};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::{error, fmt};
|
||||
|
||||
|
@ -51,6 +50,8 @@ enum_from_primitive! {
|
|||
}
|
||||
}
|
||||
|
||||
impl DefaultHashable for KernelFeatures {}
|
||||
|
||||
impl Writeable for KernelFeatures {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
writer.write_u8(*self as u8)?;
|
||||
|
@ -176,6 +177,7 @@ pub struct TxKernel {
|
|||
pub excess_sig: secp::Signature,
|
||||
}
|
||||
|
||||
impl DefaultHashable for TxKernel {}
|
||||
hashable_ord!(TxKernel);
|
||||
|
||||
impl ::std::hash::Hash for TxKernel {
|
||||
|
@ -493,9 +495,9 @@ impl TransactionBody {
|
|||
|
||||
/// Sort the inputs|outputs|kernels.
|
||||
pub fn sort(&mut self) {
|
||||
self.inputs.sort();
|
||||
self.outputs.sort();
|
||||
self.kernels.sort();
|
||||
self.inputs.sort_unstable();
|
||||
self.outputs.sort_unstable();
|
||||
self.kernels.sort_unstable();
|
||||
}
|
||||
|
||||
/// Creates a new transaction body initialized with
|
||||
|
@ -507,7 +509,7 @@ impl TransactionBody {
|
|||
kernels: Vec<TxKernel>,
|
||||
verify_sorted: bool,
|
||||
) -> Result<TransactionBody, Error> {
|
||||
let body = TransactionBody {
|
||||
let mut body = TransactionBody {
|
||||
inputs,
|
||||
outputs,
|
||||
kernels,
|
||||
|
@ -517,52 +519,44 @@ impl TransactionBody {
|
|||
// If we are verifying sort order then verify and
|
||||
// return an error if not sorted lexicographically.
|
||||
body.verify_sorted()?;
|
||||
Ok(body)
|
||||
} else {
|
||||
// If we are not verifying sort order then sort in place and return.
|
||||
let mut body = body;
|
||||
body.sort();
|
||||
Ok(body)
|
||||
}
|
||||
Ok(body)
|
||||
}
|
||||
|
||||
/// Builds a new body with the provided inputs added. Existing
|
||||
/// inputs, if any, are kept intact.
|
||||
/// Sort order is maintained.
|
||||
pub fn with_input(self, input: Input) -> TransactionBody {
|
||||
let mut new_ins = self.inputs;
|
||||
new_ins.push(input);
|
||||
new_ins.sort();
|
||||
TransactionBody {
|
||||
inputs: new_ins,
|
||||
..self
|
||||
}
|
||||
pub fn with_input(mut self, input: Input) -> TransactionBody {
|
||||
self.inputs
|
||||
.binary_search(&input)
|
||||
.err()
|
||||
.map(|e| self.inputs.insert(e, input));
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds a new TransactionBody with the provided output added. Existing
|
||||
/// outputs, if any, are kept intact.
|
||||
/// Sort order is maintained.
|
||||
pub fn with_output(self, output: Output) -> TransactionBody {
|
||||
let mut new_outs = self.outputs;
|
||||
new_outs.push(output);
|
||||
new_outs.sort();
|
||||
TransactionBody {
|
||||
outputs: new_outs,
|
||||
..self
|
||||
}
|
||||
pub fn with_output(mut self, output: Output) -> TransactionBody {
|
||||
self.outputs
|
||||
.binary_search(&output)
|
||||
.err()
|
||||
.map(|e| self.outputs.insert(e, output));
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds a new TransactionBody with the provided kernel added. Existing
|
||||
/// kernels, if any, are kept intact.
|
||||
/// Sort order is maintained.
|
||||
pub fn with_kernel(self, kernel: TxKernel) -> TransactionBody {
|
||||
let mut new_kerns = self.kernels;
|
||||
new_kerns.push(kernel);
|
||||
new_kerns.sort();
|
||||
TransactionBody {
|
||||
kernels: new_kerns,
|
||||
..self
|
||||
}
|
||||
pub fn with_kernel(mut self, kernel: TxKernel) -> TransactionBody {
|
||||
self.kernels
|
||||
.binary_search(&kernel)
|
||||
.err()
|
||||
.map(|e| self.kernels.insert(e, kernel));
|
||||
self
|
||||
}
|
||||
|
||||
/// Total fee for a TransactionBody is the sum of fees of all kernels.
|
||||
|
@ -658,14 +652,21 @@ impl TransactionBody {
|
|||
}
|
||||
|
||||
// Verify that no input is spending an output from the same block.
|
||||
// Assumes inputs and outputs are sorted
|
||||
fn verify_cut_through(&self) -> Result<(), Error> {
|
||||
let mut out_set = HashSet::new();
|
||||
for out in &self.outputs {
|
||||
out_set.insert(out.commitment());
|
||||
}
|
||||
for inp in &self.inputs {
|
||||
if out_set.contains(&inp.commitment()) {
|
||||
return Err(Error::CutThrough);
|
||||
let mut inputs = self.inputs.iter().map(|x| x.hash()).peekable();
|
||||
let mut outputs = self.outputs.iter().map(|x| x.hash()).peekable();
|
||||
while let (Some(ih), Some(oh)) = (inputs.peek(), outputs.peek()) {
|
||||
match ih.cmp(oh) {
|
||||
Ordering::Less => {
|
||||
inputs.next();
|
||||
}
|
||||
Ordering::Greater => {
|
||||
outputs.next();
|
||||
}
|
||||
Ordering::Equal => {
|
||||
return Err(Error::CutThrough);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -771,6 +772,8 @@ pub struct Transaction {
|
|||
body: TransactionBody,
|
||||
}
|
||||
|
||||
impl DefaultHashable for Transaction {}
|
||||
|
||||
/// PartialEq
|
||||
impl PartialEq for Transaction {
|
||||
fn eq(&self, tx: &Transaction) -> bool {
|
||||
|
@ -981,24 +984,34 @@ impl Transaction {
|
|||
/// and outputs.
|
||||
pub fn cut_through(inputs: &mut Vec<Input>, outputs: &mut Vec<Output>) -> Result<(), Error> {
|
||||
// assemble output commitments set, checking they're all unique
|
||||
let mut out_set = HashSet::new();
|
||||
let all_uniq = { outputs.iter().all(|o| out_set.insert(o.commitment())) };
|
||||
if !all_uniq {
|
||||
outputs.sort_unstable();
|
||||
if outputs.windows(2).any(|pair| pair[0] == pair[1]) {
|
||||
return Err(Error::AggregationError);
|
||||
}
|
||||
|
||||
let in_set = inputs
|
||||
.iter()
|
||||
.map(|inp| inp.commitment())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
|
||||
|
||||
// filter and sort
|
||||
inputs.retain(|inp| !to_cut_through.contains(&inp.commitment()));
|
||||
outputs.retain(|out| !to_cut_through.contains(&out.commitment()));
|
||||
inputs.sort();
|
||||
outputs.sort();
|
||||
inputs.sort_unstable();
|
||||
let mut inputs_idx = 0;
|
||||
let mut outputs_idx = 0;
|
||||
let mut ncut = 0;
|
||||
while inputs_idx < inputs.len() && outputs_idx < outputs.len() {
|
||||
match inputs[inputs_idx].hash().cmp(&outputs[outputs_idx].hash()) {
|
||||
Ordering::Less => {
|
||||
inputs[inputs_idx - ncut] = inputs[inputs_idx];
|
||||
inputs_idx += 1;
|
||||
}
|
||||
Ordering::Greater => {
|
||||
outputs[outputs_idx - ncut] = outputs[outputs_idx];
|
||||
outputs_idx += 1;
|
||||
}
|
||||
Ordering::Equal => {
|
||||
inputs_idx += 1;
|
||||
outputs_idx += 1;
|
||||
ncut += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Cut elements that have already been copied
|
||||
outputs.drain(outputs_idx - ncut..outputs_idx);
|
||||
inputs.drain(inputs_idx - ncut..inputs_idx);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1010,15 +1023,22 @@ pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
|
|||
} else if txs.len() == 1 {
|
||||
return Ok(txs.pop().unwrap());
|
||||
}
|
||||
let mut n_inputs = 0;
|
||||
let mut n_outputs = 0;
|
||||
let mut n_kernels = 0;
|
||||
for tx in txs.iter() {
|
||||
n_inputs += tx.body.inputs.len();
|
||||
n_outputs += tx.body.outputs.len();
|
||||
n_kernels += tx.body.kernels.len();
|
||||
}
|
||||
|
||||
let mut inputs: Vec<Input> = vec![];
|
||||
let mut outputs: Vec<Output> = vec![];
|
||||
let mut kernels: Vec<TxKernel> = vec![];
|
||||
let mut inputs: Vec<Input> = Vec::with_capacity(n_inputs);
|
||||
let mut outputs: Vec<Output> = Vec::with_capacity(n_outputs);
|
||||
let mut kernels: Vec<TxKernel> = Vec::with_capacity(n_kernels);
|
||||
|
||||
// we will sum these together at the end to give us the overall offset for the
|
||||
// transaction
|
||||
let mut kernel_offsets: Vec<BlindingFactor> = vec![];
|
||||
|
||||
let mut kernel_offsets: Vec<BlindingFactor> = Vec::with_capacity(txs.len());
|
||||
for mut tx in txs {
|
||||
// we will sum these later to give a single aggregate offset
|
||||
kernel_offsets.push(tx.offset);
|
||||
|
@ -1032,7 +1052,7 @@ pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
|
|||
cut_through(&mut inputs, &mut outputs)?;
|
||||
|
||||
// Now sort kernels.
|
||||
kernels.sort();
|
||||
kernels.sort_unstable();
|
||||
|
||||
// now sum the kernel_offsets up to give us an aggregate offset for the
|
||||
// transaction
|
||||
|
@ -1103,9 +1123,9 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
|
|||
};
|
||||
|
||||
// Sorting them lexicographically
|
||||
inputs.sort();
|
||||
outputs.sort();
|
||||
kernels.sort();
|
||||
inputs.sort_unstable();
|
||||
outputs.sort_unstable();
|
||||
kernels.sort_unstable();
|
||||
|
||||
// Build a new tx from the above data.
|
||||
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
|
||||
|
@ -1115,7 +1135,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
|
|||
/// A transaction input.
|
||||
///
|
||||
/// Primarily a reference to an output being spent by the transaction.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
pub struct Input {
|
||||
/// The features of the output being spent.
|
||||
/// We will check maturity for coinbase output.
|
||||
|
@ -1128,6 +1148,7 @@ pub struct Input {
|
|||
pub commit: Commitment,
|
||||
}
|
||||
|
||||
impl DefaultHashable for Input {}
|
||||
hashable_ord!(Input);
|
||||
|
||||
impl ::std::hash::Hash for Input {
|
||||
|
@ -1241,6 +1262,7 @@ pub struct Output {
|
|||
pub proof: RangeProof,
|
||||
}
|
||||
|
||||
impl DefaultHashable for Output {}
|
||||
hashable_ord!(Output);
|
||||
|
||||
impl ::std::hash::Hash for Output {
|
||||
|
@ -1353,6 +1375,8 @@ pub struct OutputIdentifier {
|
|||
pub commit: Commitment,
|
||||
}
|
||||
|
||||
impl DefaultHashable for OutputIdentifier {}
|
||||
|
||||
impl OutputIdentifier {
|
||||
/// Build a new output_identifier.
|
||||
pub fn new(features: OutputFeatures, commit: &Commitment) -> OutputIdentifier {
|
||||
|
|
|
@ -102,7 +102,7 @@ pub fn create_siphash_keys(header: &[u8]) -> Result<[u64; 4], Error> {
|
|||
])
|
||||
}
|
||||
|
||||
/// Macros to clean up integer unwrapping
|
||||
/// Macro to clean up u64 unwrapping
|
||||
#[macro_export]
|
||||
macro_rules! to_u64 {
|
||||
($n:expr) => {
|
||||
|
@ -110,6 +110,7 @@ macro_rules! to_u64 {
|
|||
};
|
||||
}
|
||||
|
||||
/// Macro to clean up u64 unwrapping as u32
|
||||
#[macro_export]
|
||||
macro_rules! to_u32 {
|
||||
($n:expr) => {
|
||||
|
@ -117,6 +118,7 @@ macro_rules! to_u32 {
|
|||
};
|
||||
}
|
||||
|
||||
/// Macro to clean up u64 unwrapping as usize
|
||||
#[macro_export]
|
||||
macro_rules! to_usize {
|
||||
($n:expr) => {
|
||||
|
@ -124,6 +126,8 @@ macro_rules! to_usize {
|
|||
};
|
||||
}
|
||||
|
||||
/// Macro to clean up casting to edge type
|
||||
/// TODO: this macro uses unhygenic data T
|
||||
#[macro_export]
|
||||
macro_rules! to_edge {
|
||||
($n:expr) => {
|
||||
|
|
|
@ -265,7 +265,7 @@ where
|
|||
self.graph.solutions.pop();
|
||||
for s in &mut self.graph.solutions {
|
||||
s.nonces = map_vec!(s.nonces, |n| val[*n as usize]);
|
||||
s.nonces.sort();
|
||||
s.nonces.sort_unstable();
|
||||
}
|
||||
for s in &self.graph.solutions {
|
||||
self.verify_impl(&s)?;
|
||||
|
@ -353,6 +353,16 @@ mod test {
|
|||
0x1b2c20ad, 0x1bd7a83c, 0x1c05d5b0, 0x1c0b9caa,
|
||||
];
|
||||
|
||||
// Cuckatoo 31 Solution for Header [0u8;80] - nonce 99
|
||||
static V1_31: [u64; 42] = [
|
||||
0x1128e07, 0xc181131, 0x110fad36, 0x1135ddee, 0x1669c7d3, 0x1931e6ea, 0x1c0005f3, 0x1dd6ecca,
|
||||
0x1e29ce7e, 0x209736fc, 0x2692bf1a, 0x27b85aa9, 0x29bb7693, 0x2dc2a047, 0x2e28650a, 0x2f381195,
|
||||
0x350eb3f9, 0x3beed728, 0x3e861cbc, 0x41448cc1, 0x41f08f6d, 0x42fbc48a, 0x4383ab31, 0x4389c61f,
|
||||
0x4540a5ce, 0x49a17405, 0x50372ded, 0x512f0db0, 0x588b6288, 0x5a36aa46, 0x5c29e1fe, 0x6118ab16,
|
||||
0x634705b5, 0x6633d190, 0x6683782f, 0x6728b6e1, 0x67adfb45, 0x68ae2306, 0x6d60f5e1, 0x78af3c4f,
|
||||
0x7dde51ab, 0x7faced21
|
||||
];
|
||||
|
||||
#[test]
|
||||
fn cuckatoo() {
|
||||
let ret = basic_solve::<u32>();
|
||||
|
@ -371,6 +381,14 @@ mod test {
|
|||
if let Err(r) = ret {
|
||||
panic!("validate_29_vectors u64: Error: {}", r);
|
||||
}
|
||||
let ret = validate31_vectors::<u32>();
|
||||
if let Err(r) = ret {
|
||||
panic!("validate_31_vectors u32: Error: {}", r);
|
||||
}
|
||||
let ret = validate31_vectors::<u64>();
|
||||
if let Err(r) = ret {
|
||||
panic!("validate_31_vectors u64: Error: {}", r);
|
||||
}
|
||||
let ret = validate_fail::<u32>();
|
||||
if let Err(r) = ret {
|
||||
panic!("validate_fail u32: Error: {}", r);
|
||||
|
@ -391,6 +409,16 @@ mod test {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn validate31_vectors<T>() -> Result<(), Error>
|
||||
where
|
||||
T: EdgeType,
|
||||
{
|
||||
let mut ctx = CuckatooContext::<u32>::new_impl(31, 42, 10).unwrap();
|
||||
ctx.set_header_nonce([0u8; 80].to_vec(), Some(99), false)?;
|
||||
assert!(ctx.verify(&Proof::new(V1_31.to_vec().clone())).is_ok());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_fail<T>() -> Result<(), Error>
|
||||
where
|
||||
T: EdgeType,
|
||||
|
|
|
@ -22,7 +22,7 @@ use rand::{thread_rng, Rng};
|
|||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use crate::consensus::{graph_weight, MIN_DIFFICULTY, SECOND_POW_EDGE_BITS};
|
||||
use crate::core::hash::Hashed;
|
||||
use crate::core::hash::{DefaultHashable, Hashed};
|
||||
use crate::global;
|
||||
use crate::ser::{self, FixedLength, Readable, Reader, Writeable, Writer};
|
||||
|
||||
|
@ -324,6 +324,8 @@ pub struct Proof {
|
|||
pub nonces: Vec<u64>,
|
||||
}
|
||||
|
||||
impl DefaultHashable for Proof {}
|
||||
|
||||
impl fmt::Debug for Proof {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Cuckoo{}(", self.edge_bits)?;
|
||||
|
@ -342,7 +344,7 @@ impl Eq for Proof {}
|
|||
impl Proof {
|
||||
/// Builds a proof with provided nonces at default edge_bits
|
||||
pub fn new(mut in_nonces: Vec<u64>) -> Proof {
|
||||
in_nonces.sort();
|
||||
in_nonces.sort_unstable();
|
||||
Proof {
|
||||
edge_bits: global::min_edge_bits(),
|
||||
nonces: in_nonces,
|
||||
|
@ -369,7 +371,7 @@ impl Proof {
|
|||
.map(|()| (rng.gen::<u32>() & nonce_mask) as u64)
|
||||
.take(proof_size)
|
||||
.collect();
|
||||
v.sort();
|
||||
v.sort_unstable();
|
||||
Proof {
|
||||
edge_bits: global::min_edge_bits(),
|
||||
nonces: v,
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
//! To use it simply implement `Writeable` or `Readable` and then use the
|
||||
//! `serialize` or `deserialize` functions on them as appropriate.
|
||||
|
||||
use crate::core::hash::{Hash, Hashed};
|
||||
use crate::core::hash::{DefaultHashable, Hash, Hashed};
|
||||
use crate::keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
|
||||
use crate::util::read_write::read_exact;
|
||||
use crate::util::secp::constants::{
|
||||
|
@ -615,7 +615,7 @@ where
|
|||
match elem {
|
||||
Ok(e) => buf.push(e),
|
||||
Err(Error::IOErr(ref _d, ref kind)) if *kind == io::ErrorKind::UnexpectedEof => {
|
||||
break
|
||||
break;
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
|
@ -706,7 +706,7 @@ pub trait FixedLength {
|
|||
}
|
||||
|
||||
/// Trait for types that can be added to a PMMR.
|
||||
pub trait PMMRable: Writeable + Clone + Debug {
|
||||
pub trait PMMRable: Writeable + Clone + Debug + DefaultHashable {
|
||||
/// The type of element actually stored in the MMR data file.
|
||||
/// This allows us to store Hash elements in the header MMR for variable size BlockHeaders.
|
||||
type E: FixedLength + Readable + Writeable;
|
||||
|
@ -721,7 +721,7 @@ pub trait PMMRIndexHashable {
|
|||
fn hash_with_index(&self, index: u64) -> Hash;
|
||||
}
|
||||
|
||||
impl<T: Writeable> PMMRIndexHashable for T {
|
||||
impl<T: DefaultHashable> PMMRIndexHashable for T {
|
||||
fn hash_with_index(&self, index: u64) -> Hash {
|
||||
(index, self).hash()
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use self::core::core::hash::Hash;
|
||||
use self::core::core::hash::{DefaultHashable, Hash};
|
||||
use self::core::core::pmmr::{self, Backend};
|
||||
use self::core::core::BlockHeader;
|
||||
use self::core::ser;
|
||||
|
@ -25,6 +25,8 @@ use std::path::Path;
|
|||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct TestElem(pub [u32; 4]);
|
||||
|
||||
impl DefaultHashable for TestElem {}
|
||||
|
||||
impl FixedLength for TestElem {
|
||||
const LEN: usize = 16;
|
||||
}
|
||||
|
|
|
@ -26,5 +26,4 @@ ripemd160 = "0.7"
|
|||
sha2 = "0.7"
|
||||
pbkdf2 = "0.2"
|
||||
|
||||
|
||||
grin_util = { path = "../util", version = "1.1.0" }
|
||||
|
|
|
@ -5,9 +5,9 @@ extern crate grin_core;
|
|||
extern crate grin_p2p;
|
||||
|
||||
use grin_core::ser;
|
||||
use grin_p2p::msg::SockAddr;
|
||||
use grin_p2p::types::PeerAddr;
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
let mut d = data.clone();
|
||||
let _t: Result<SockAddr, ser::Error> = ser::deserialize(&mut d);
|
||||
let _t: Result<PeerAddr, ser::Error> = ser::deserialize(&mut d);
|
||||
});
|
|
@ -22,11 +22,9 @@ use rand::{thread_rng, Rng};
|
|||
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::msg::{
|
||||
read_message, write_message, Hand, Shake, SockAddr, Type, PROTOCOL_VERSION, USER_AGENT,
|
||||
};
|
||||
use crate::msg::{read_message, write_message, Hand, Shake, Type, PROTOCOL_VERSION, USER_AGENT};
|
||||
use crate::peer::Peer;
|
||||
use crate::types::{Capabilities, Direction, Error, P2PConfig, PeerInfo, PeerLiveInfo};
|
||||
use crate::types::{Capabilities, Direction, Error, P2PConfig, PeerAddr, PeerInfo, PeerLiveInfo};
|
||||
|
||||
/// Local generated nonce for peer connecting.
|
||||
/// Used for self-connecting detection (on receiver side),
|
||||
|
@ -44,7 +42,7 @@ pub struct Handshake {
|
|||
/// a node id.
|
||||
nonces: Arc<RwLock<VecDeque<u64>>>,
|
||||
/// Ring buffer of self addr(s) collected from PeerWithSelf detection (by nonce).
|
||||
pub addrs: Arc<RwLock<VecDeque<SocketAddr>>>,
|
||||
pub addrs: Arc<RwLock<VecDeque<PeerAddr>>>,
|
||||
/// The genesis block header of the chain seen by this node.
|
||||
/// We only want to connect to other nodes seeing the same chain (forks are
|
||||
/// ok).
|
||||
|
@ -67,13 +65,13 @@ impl Handshake {
|
|||
&self,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
self_addr: SocketAddr,
|
||||
self_addr: PeerAddr,
|
||||
conn: &mut TcpStream,
|
||||
) -> Result<PeerInfo, Error> {
|
||||
// prepare the first part of the handshake
|
||||
let nonce = self.next_nonce();
|
||||
let peer_addr = match conn.peer_addr() {
|
||||
Ok(pa) => pa,
|
||||
Ok(pa) => PeerAddr(pa),
|
||||
Err(e) => return Err(Error::Connection(e)),
|
||||
};
|
||||
|
||||
|
@ -83,8 +81,8 @@ impl Handshake {
|
|||
nonce: nonce,
|
||||
genesis: self.genesis,
|
||||
total_difficulty: total_difficulty,
|
||||
sender_addr: SockAddr(self_addr),
|
||||
receiver_addr: SockAddr(peer_addr),
|
||||
sender_addr: self_addr,
|
||||
receiver_addr: peer_addr,
|
||||
user_agent: USER_AGENT.to_string(),
|
||||
};
|
||||
|
||||
|
@ -118,7 +116,7 @@ impl Handshake {
|
|||
|
||||
// If denied then we want to close the connection
|
||||
// (without providing our peer with any details why).
|
||||
if Peer::is_denied(&self.config, &peer_info.addr) {
|
||||
if Peer::is_denied(&self.config, peer_info.addr) {
|
||||
return Err(Error::ConnectionClose);
|
||||
}
|
||||
|
||||
|
@ -155,7 +153,7 @@ impl Handshake {
|
|||
} else {
|
||||
// check the nonce to see if we are trying to connect to ourselves
|
||||
let nonces = self.nonces.read();
|
||||
let addr = extract_ip(&hand.sender_addr.0, &conn);
|
||||
let addr = resolve_peer_addr(hand.sender_addr, &conn);
|
||||
if nonces.contains(&hand.nonce) {
|
||||
// save ip addresses of ourselves
|
||||
let mut addrs = self.addrs.write();
|
||||
|
@ -171,7 +169,7 @@ impl Handshake {
|
|||
let peer_info = PeerInfo {
|
||||
capabilities: hand.capabilities,
|
||||
user_agent: hand.user_agent,
|
||||
addr: extract_ip(&hand.sender_addr.0, &conn),
|
||||
addr: resolve_peer_addr(hand.sender_addr, &conn),
|
||||
version: hand.version,
|
||||
live_info: Arc::new(RwLock::new(PeerLiveInfo {
|
||||
total_difficulty: hand.total_difficulty,
|
||||
|
@ -186,7 +184,7 @@ impl Handshake {
|
|||
// so check if we are configured to explicitly allow or deny it.
|
||||
// If denied then we want to close the connection
|
||||
// (without providing our peer with any details why).
|
||||
if Peer::is_denied(&self.config, &peer_info.addr) {
|
||||
if Peer::is_denied(&self.config, peer_info.addr) {
|
||||
return Err(Error::ConnectionClose);
|
||||
}
|
||||
|
||||
|
@ -219,28 +217,12 @@ impl Handshake {
|
|||
}
|
||||
}
|
||||
|
||||
// Attempts to make a best guess at the correct remote IP by checking if the
|
||||
// advertised address is the loopback and our TCP connection. Note that the
|
||||
// port reported by the connection is always incorrect for receiving
|
||||
// connections as it's dynamically allocated by the server.
|
||||
fn extract_ip(advertised: &SocketAddr, conn: &TcpStream) -> SocketAddr {
|
||||
match advertised {
|
||||
&SocketAddr::V4(v4sock) => {
|
||||
let ip = v4sock.ip();
|
||||
if ip.is_loopback() || ip.is_unspecified() {
|
||||
if let Ok(addr) = conn.peer_addr() {
|
||||
return SocketAddr::new(addr.ip(), advertised.port());
|
||||
}
|
||||
}
|
||||
}
|
||||
&SocketAddr::V6(v6sock) => {
|
||||
let ip = v6sock.ip();
|
||||
if ip.is_loopback() || ip.is_unspecified() {
|
||||
if let Ok(addr) = conn.peer_addr() {
|
||||
return SocketAddr::new(addr.ip(), advertised.port());
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Resolve the correct peer_addr based on the connection and the advertised port.
|
||||
fn resolve_peer_addr(advertised: PeerAddr, conn: &TcpStream) -> PeerAddr {
|
||||
let port = advertised.0.port();
|
||||
if let Ok(addr) = conn.peer_addr() {
|
||||
PeerAddr(SocketAddr::new(addr.ip(), port))
|
||||
} else {
|
||||
advertised
|
||||
}
|
||||
advertised.clone()
|
||||
}
|
||||
|
|
|
@ -52,6 +52,6 @@ pub use crate::peers::Peers;
|
|||
pub use crate::serv::{DummyAdapter, Server};
|
||||
pub use crate::store::{PeerData, State};
|
||||
pub use crate::types::{
|
||||
Capabilities, ChainAdapter, Direction, Error, P2PConfig, PeerInfo, ReasonForBan, Seeding,
|
||||
TxHashSetRead, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS,
|
||||
Capabilities, ChainAdapter, Direction, Error, P2PConfig, PeerAddr, PeerInfo, ReasonForBan,
|
||||
Seeding, TxHashSetRead, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS,
|
||||
};
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
use num::FromPrimitive;
|
||||
use std::io::{Read, Write};
|
||||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||
use std::time;
|
||||
|
||||
use crate::core::core::hash::Hash;
|
||||
|
@ -25,7 +24,7 @@ use crate::core::pow::Difficulty;
|
|||
use crate::core::ser::{self, FixedLength, Readable, Reader, StreamingReader, Writeable, Writer};
|
||||
use crate::core::{consensus, global};
|
||||
use crate::types::{
|
||||
Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS,
|
||||
Capabilities, Error, PeerAddr, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS,
|
||||
};
|
||||
use crate::util::read_write::read_exact;
|
||||
|
||||
|
@ -254,9 +253,9 @@ pub struct Hand {
|
|||
/// may be needed
|
||||
pub total_difficulty: Difficulty,
|
||||
/// network address of the sender
|
||||
pub sender_addr: SockAddr,
|
||||
pub sender_addr: PeerAddr,
|
||||
/// network address of the receiver
|
||||
pub receiver_addr: SockAddr,
|
||||
pub receiver_addr: PeerAddr,
|
||||
/// name of version of the software
|
||||
pub user_agent: String,
|
||||
}
|
||||
|
@ -283,8 +282,8 @@ impl Readable for Hand {
|
|||
let (version, capab, nonce) = ser_multiread!(reader, read_u32, read_u32, read_u64);
|
||||
let capabilities = Capabilities::from_bits_truncate(capab);
|
||||
let total_diff = Difficulty::read(reader)?;
|
||||
let sender_addr = SockAddr::read(reader)?;
|
||||
let receiver_addr = SockAddr::read(reader)?;
|
||||
let sender_addr = PeerAddr::read(reader)?;
|
||||
let receiver_addr = PeerAddr::read(reader)?;
|
||||
let ua = reader.read_bytes_len_prefix()?;
|
||||
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
|
||||
let genesis = Hash::read(reader)?;
|
||||
|
@ -373,7 +372,7 @@ impl Readable for GetPeerAddrs {
|
|||
/// GetPeerAddrs.
|
||||
#[derive(Debug)]
|
||||
pub struct PeerAddrs {
|
||||
pub peers: Vec<SockAddr>,
|
||||
pub peers: Vec<PeerAddr>,
|
||||
}
|
||||
|
||||
impl Writeable for PeerAddrs {
|
||||
|
@ -394,10 +393,9 @@ impl Readable for PeerAddrs {
|
|||
} else if peer_count == 0 {
|
||||
return Ok(PeerAddrs { peers: vec![] });
|
||||
}
|
||||
// let peers = try_map_vec!([0..peer_count], |_| SockAddr::read(reader));
|
||||
let mut peers = Vec::with_capacity(peer_count as usize);
|
||||
for _ in 0..peer_count {
|
||||
peers.push(SockAddr::read(reader)?);
|
||||
peers.push(PeerAddr::read(reader)?);
|
||||
}
|
||||
Ok(PeerAddrs { peers: peers })
|
||||
}
|
||||
|
@ -431,58 +429,6 @@ impl Readable for PeerError {
|
|||
}
|
||||
}
|
||||
|
||||
/// Only necessary so we can implement Readable and Writeable. Rust disallows
|
||||
/// implementing traits when both types are outside of this crate (which is the
|
||||
/// case for SocketAddr and Readable/Writeable).
|
||||
#[derive(Debug)]
|
||||
pub struct SockAddr(pub SocketAddr);
|
||||
|
||||
impl Writeable for SockAddr {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
match self.0 {
|
||||
SocketAddr::V4(sav4) => {
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u8, 0],
|
||||
[write_fixed_bytes, &sav4.ip().octets().to_vec()],
|
||||
[write_u16, sav4.port()]
|
||||
);
|
||||
}
|
||||
SocketAddr::V6(sav6) => {
|
||||
writer.write_u8(1)?;
|
||||
for seg in &sav6.ip().segments() {
|
||||
writer.write_u16(*seg)?;
|
||||
}
|
||||
writer.write_u16(sav6.port())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for SockAddr {
|
||||
fn read(reader: &mut dyn Reader) -> Result<SockAddr, ser::Error> {
|
||||
let v4_or_v6 = reader.read_u8()?;
|
||||
if v4_or_v6 == 0 {
|
||||
let ip = reader.read_fixed_bytes(4)?;
|
||||
let port = reader.read_u16()?;
|
||||
Ok(SockAddr(SocketAddr::V4(SocketAddrV4::new(
|
||||
Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]),
|
||||
port,
|
||||
))))
|
||||
} else {
|
||||
let ip = try_iter_map_vec!(0..8, |_| reader.read_u16());
|
||||
let port = reader.read_u16()?;
|
||||
Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new(
|
||||
Ipv6Addr::new(ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7]),
|
||||
port,
|
||||
0,
|
||||
0,
|
||||
))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Serializable wrapper for the block locator.
|
||||
#[derive(Debug)]
|
||||
pub struct Locator {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
use crate::util::{Mutex, RwLock};
|
||||
use std::fs::File;
|
||||
use std::net::{Shutdown, SocketAddr, TcpStream};
|
||||
use std::net::{Shutdown, TcpStream};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::conn;
|
||||
|
@ -25,7 +25,8 @@ use crate::handshake::Handshake;
|
|||
use crate::msg::{self, BanReason, GetPeerAddrs, Locator, Ping, TxHashSetRequest};
|
||||
use crate::protocol::Protocol;
|
||||
use crate::types::{
|
||||
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerInfo, ReasonForBan, TxHashSetRead,
|
||||
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerAddr, PeerInfo, ReasonForBan,
|
||||
TxHashSetRead,
|
||||
};
|
||||
use chrono::prelude::{DateTime, Utc};
|
||||
|
||||
|
@ -93,7 +94,7 @@ impl Peer {
|
|||
conn: &mut TcpStream,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
self_addr: SocketAddr,
|
||||
self_addr: PeerAddr,
|
||||
hs: &Handshake,
|
||||
na: Arc<dyn NetAdapter>,
|
||||
) -> Result<Peer, Error> {
|
||||
|
@ -124,10 +125,9 @@ impl Peer {
|
|||
self.connection = Some(Mutex::new(conn::listen(conn, handler)));
|
||||
}
|
||||
|
||||
pub fn is_denied(config: &P2PConfig, peer_addr: &SocketAddr) -> bool {
|
||||
let peer = format!("{}:{}", peer_addr.ip(), peer_addr.port());
|
||||
pub fn is_denied(config: &P2PConfig, peer_addr: PeerAddr) -> bool {
|
||||
if let Some(ref denied) = config.peers_deny {
|
||||
if denied.contains(&peer) {
|
||||
if denied.contains(&peer_addr) {
|
||||
debug!(
|
||||
"checking peer allowed/denied: {:?} explicitly denied",
|
||||
peer_addr
|
||||
|
@ -136,7 +136,7 @@ impl Peer {
|
|||
}
|
||||
}
|
||||
if let Some(ref allowed) = config.peers_allow {
|
||||
if allowed.contains(&peer) {
|
||||
if allowed.contains(&peer_addr) {
|
||||
debug!(
|
||||
"checking peer allowed/denied: {:?} explicitly allowed",
|
||||
peer_addr
|
||||
|
@ -566,7 +566,7 @@ impl ChainAdapter for TrackingAdapter {
|
|||
self.adapter.get_transaction(kernel_hash)
|
||||
}
|
||||
|
||||
fn tx_kernel_received(&self, kernel_hash: Hash, addr: SocketAddr) {
|
||||
fn tx_kernel_received(&self, kernel_hash: Hash, addr: PeerAddr) {
|
||||
self.push_recv(kernel_hash);
|
||||
self.adapter.tx_kernel_received(kernel_hash, addr)
|
||||
}
|
||||
|
@ -582,23 +582,23 @@ impl ChainAdapter for TrackingAdapter {
|
|||
self.adapter.transaction_received(tx, stem)
|
||||
}
|
||||
|
||||
fn block_received(&self, b: core::Block, addr: SocketAddr, _was_requested: bool) -> bool {
|
||||
fn block_received(&self, b: core::Block, addr: PeerAddr, _was_requested: bool) -> bool {
|
||||
let bh = b.hash();
|
||||
self.push_recv(bh);
|
||||
self.adapter.block_received(b, addr, self.has_req(bh))
|
||||
}
|
||||
|
||||
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
|
||||
fn compact_block_received(&self, cb: core::CompactBlock, addr: PeerAddr) -> bool {
|
||||
self.push_recv(cb.hash());
|
||||
self.adapter.compact_block_received(cb, addr)
|
||||
}
|
||||
|
||||
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
|
||||
fn header_received(&self, bh: core::BlockHeader, addr: PeerAddr) -> bool {
|
||||
self.push_recv(bh.hash());
|
||||
self.adapter.header_received(bh, addr)
|
||||
}
|
||||
|
||||
fn headers_received(&self, bh: &[core::BlockHeader], addr: SocketAddr) -> bool {
|
||||
fn headers_received(&self, bh: &[core::BlockHeader], addr: PeerAddr) -> bool {
|
||||
self.adapter.headers_received(bh, addr)
|
||||
}
|
||||
|
||||
|
@ -618,7 +618,7 @@ impl ChainAdapter for TrackingAdapter {
|
|||
self.adapter.txhashset_receive_ready()
|
||||
}
|
||||
|
||||
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool {
|
||||
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: PeerAddr) -> bool {
|
||||
self.adapter.txhashset_write(h, txhashset_data, peer_addr)
|
||||
}
|
||||
|
||||
|
@ -634,19 +634,19 @@ impl ChainAdapter for TrackingAdapter {
|
|||
}
|
||||
|
||||
impl NetAdapter for TrackingAdapter {
|
||||
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
|
||||
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<PeerAddr> {
|
||||
self.adapter.find_peer_addrs(capab)
|
||||
}
|
||||
|
||||
fn peer_addrs_received(&self, addrs: Vec<SocketAddr>) {
|
||||
fn peer_addrs_received(&self, addrs: Vec<PeerAddr>) {
|
||||
self.adapter.peer_addrs_received(addrs)
|
||||
}
|
||||
|
||||
fn peer_difficulty(&self, addr: SocketAddr, diff: Difficulty, height: u64) {
|
||||
fn peer_difficulty(&self, addr: PeerAddr, diff: Difficulty, height: u64) {
|
||||
self.adapter.peer_difficulty(addr, diff, height)
|
||||
}
|
||||
|
||||
fn is_banned(&self, addr: SocketAddr) -> bool {
|
||||
fn is_banned(&self, addr: PeerAddr) -> bool {
|
||||
self.adapter.is_banned(addr)
|
||||
}
|
||||
}
|
||||
|
|
175
p2p/src/peers.rs
175
p2p/src/peers.rs
|
@ -15,7 +15,6 @@
|
|||
use crate::util::RwLock;
|
||||
use std::collections::HashMap;
|
||||
use std::fs::File;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
|
@ -30,15 +29,15 @@ use chrono::Duration;
|
|||
use crate::peer::Peer;
|
||||
use crate::store::{PeerData, PeerStore, State};
|
||||
use crate::types::{
|
||||
Capabilities, ChainAdapter, Direction, Error, NetAdapter, P2PConfig, ReasonForBan,
|
||||
Capabilities, ChainAdapter, Direction, Error, NetAdapter, P2PConfig, PeerAddr, ReasonForBan,
|
||||
TxHashSetRead, MAX_PEER_ADDRS,
|
||||
};
|
||||
|
||||
pub struct Peers {
|
||||
pub adapter: Arc<dyn ChainAdapter>,
|
||||
store: PeerStore,
|
||||
peers: RwLock<HashMap<SocketAddr, Arc<Peer>>>,
|
||||
dandelion_relay: RwLock<HashMap<i64, Arc<Peer>>>,
|
||||
peers: RwLock<HashMap<PeerAddr, Arc<Peer>>>,
|
||||
dandelion_relay: RwLock<Option<(i64, Arc<Peer>)>>,
|
||||
config: P2PConfig,
|
||||
}
|
||||
|
||||
|
@ -49,40 +48,32 @@ impl Peers {
|
|||
store,
|
||||
config,
|
||||
peers: RwLock::new(HashMap::new()),
|
||||
dandelion_relay: RwLock::new(HashMap::new()),
|
||||
dandelion_relay: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds the peer to our internal peer mapping. Note that the peer is still
|
||||
/// returned so the server can run it.
|
||||
pub fn add_connected(&self, peer: Arc<Peer>) -> Result<(), Error> {
|
||||
let peer_data: PeerData;
|
||||
let addr: SocketAddr;
|
||||
{
|
||||
peer_data = PeerData {
|
||||
addr: peer.info.addr,
|
||||
capabilities: peer.info.capabilities,
|
||||
user_agent: peer.info.user_agent.clone(),
|
||||
flags: State::Healthy,
|
||||
last_banned: 0,
|
||||
ban_reason: ReasonForBan::None,
|
||||
last_connected: Utc::now().timestamp(),
|
||||
};
|
||||
addr = peer.info.addr.clone();
|
||||
}
|
||||
debug!("Saving newly connected peer {}.", addr);
|
||||
let peer_data = PeerData {
|
||||
addr: peer.info.addr,
|
||||
capabilities: peer.info.capabilities,
|
||||
user_agent: peer.info.user_agent.clone(),
|
||||
flags: State::Healthy,
|
||||
last_banned: 0,
|
||||
ban_reason: ReasonForBan::None,
|
||||
last_connected: Utc::now().timestamp(),
|
||||
};
|
||||
debug!("Saving newly connected peer {}.", peer_data.addr);
|
||||
self.save_peer(&peer_data)?;
|
||||
self.peers.write().insert(peer_data.addr, peer.clone());
|
||||
|
||||
{
|
||||
let mut peers = self.peers.write();
|
||||
peers.insert(addr, peer.clone());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a peer as banned to block future connections, usually due to failed
|
||||
/// handshake
|
||||
pub fn add_banned(&self, addr: SocketAddr, ban_reason: ReasonForBan) -> Result<(), Error> {
|
||||
pub fn add_banned(&self, addr: PeerAddr, ban_reason: ReasonForBan) -> Result<(), Error> {
|
||||
let peer_data = PeerData {
|
||||
addr,
|
||||
capabilities: Capabilities::UNKNOWN,
|
||||
|
@ -115,10 +106,9 @@ impl Peers {
|
|||
fn set_dandelion_relay(&self, peer: &Arc<Peer>) {
|
||||
// Clear the map and add new relay
|
||||
let dandelion_relay = &self.dandelion_relay;
|
||||
dandelion_relay.write().clear();
|
||||
dandelion_relay
|
||||
.write()
|
||||
.insert(Utc::now().timestamp(), peer.clone());
|
||||
.replace((Utc::now().timestamp(), peer.clone()));
|
||||
debug!(
|
||||
"Successfully updated Dandelion relay to: {}",
|
||||
peer.info.addr
|
||||
|
@ -126,22 +116,12 @@ impl Peers {
|
|||
}
|
||||
|
||||
// Get the dandelion relay
|
||||
pub fn get_dandelion_relay(&self) -> HashMap<i64, Arc<Peer>> {
|
||||
pub fn get_dandelion_relay(&self) -> Option<(i64, Arc<Peer>)> {
|
||||
self.dandelion_relay.read().clone()
|
||||
}
|
||||
|
||||
pub fn is_known(&self, addr: &SocketAddr) -> bool {
|
||||
self.peers.read().contains_key(addr)
|
||||
}
|
||||
|
||||
/// Check whether an ip address is in the active peers list, ignore the port
|
||||
pub fn is_known_ip(&self, addr: &SocketAddr) -> bool {
|
||||
for socket in self.peers.read().keys() {
|
||||
if addr.ip() == socket.ip() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
pub fn is_known(&self, addr: PeerAddr) -> bool {
|
||||
self.peers.read().contains_key(&addr)
|
||||
}
|
||||
|
||||
/// Get vec of peers we are currently connected to.
|
||||
|
@ -167,8 +147,8 @@ impl Peers {
|
|||
}
|
||||
|
||||
/// Get a peer we're connected to by address.
|
||||
pub fn get_connected_peer(&self, addr: &SocketAddr) -> Option<Arc<Peer>> {
|
||||
self.peers.read().get(addr).map(|p| p.clone())
|
||||
pub fn get_connected_peer(&self, addr: PeerAddr) -> Option<Arc<Peer>> {
|
||||
self.peers.read().get(&addr).map(|p| p.clone())
|
||||
}
|
||||
|
||||
/// Number of peers currently connected to.
|
||||
|
@ -258,31 +238,18 @@ impl Peers {
|
|||
self.most_work_peers().pop()
|
||||
}
|
||||
|
||||
pub fn is_banned(&self, peer_addr: SocketAddr) -> bool {
|
||||
if global::is_production_mode() {
|
||||
// Ban only cares about ip address, no mather what port.
|
||||
// so, we query all saved peers with one same ip address, and ignore port
|
||||
let peers_data = self.store.find_peers_by_ip(peer_addr);
|
||||
for peer_data in peers_data {
|
||||
if peer_data.flags == State::Banned {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// For travis-ci test, we need run multiple nodes in one server, with same ip address.
|
||||
// so, just query the ip address and the port
|
||||
if let Ok(peer_data) = self.store.get_peer(peer_addr) {
|
||||
if peer_data.flags == State::Banned {
|
||||
return true;
|
||||
}
|
||||
pub fn is_banned(&self, peer_addr: PeerAddr) -> bool {
|
||||
if let Ok(peer) = self.store.get_peer(peer_addr) {
|
||||
if peer.flags == State::Banned {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Ban a peer, disconnecting it if we're currently connected
|
||||
pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) {
|
||||
if let Err(e) = self.update_state(*peer_addr, State::Banned) {
|
||||
pub fn ban_peer(&self, peer_addr: PeerAddr, ban_reason: ReasonForBan) {
|
||||
if let Err(e) = self.update_state(peer_addr, State::Banned) {
|
||||
error!("Couldn't ban {}: {:?}", peer_addr, e);
|
||||
}
|
||||
|
||||
|
@ -296,12 +263,12 @@ impl Peers {
|
|||
}
|
||||
|
||||
/// Unban a peer, checks if it exists and banned then unban
|
||||
pub fn unban_peer(&self, peer_addr: &SocketAddr) {
|
||||
pub fn unban_peer(&self, peer_addr: PeerAddr) {
|
||||
debug!("unban_peer: peer {}", peer_addr);
|
||||
match self.get_peer(*peer_addr) {
|
||||
match self.get_peer(peer_addr) {
|
||||
Ok(_) => {
|
||||
if self.is_banned(*peer_addr) {
|
||||
if let Err(e) = self.update_state(*peer_addr, State::Healthy) {
|
||||
if self.is_banned(peer_addr) {
|
||||
if let Err(e) = self.update_state(peer_addr, State::Healthy) {
|
||||
error!("Couldn't unban {}: {:?}", peer_addr, e);
|
||||
}
|
||||
} else {
|
||||
|
@ -370,24 +337,22 @@ impl Peers {
|
|||
|
||||
/// Relays the provided stem transaction to our single stem peer.
|
||||
pub fn relay_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
||||
let dandelion_relay = self.get_dandelion_relay();
|
||||
if dandelion_relay.is_empty() {
|
||||
debug!("No dandelion relay, updating.");
|
||||
self.update_dandelion_relay();
|
||||
}
|
||||
// If still return an error, let the caller handle this as they see fit.
|
||||
// The caller will "fluff" at this point as the stem phase is finished.
|
||||
if dandelion_relay.is_empty() {
|
||||
return Err(Error::NoDandelionRelay);
|
||||
}
|
||||
for relay in dandelion_relay.values() {
|
||||
if relay.is_connected() {
|
||||
if let Err(e) = relay.send_stem_transaction(tx) {
|
||||
debug!("Error sending stem transaction to peer relay: {:?}", e);
|
||||
self.get_dandelion_relay()
|
||||
.or_else(|| {
|
||||
debug!("No dandelion relay, updating.");
|
||||
self.update_dandelion_relay();
|
||||
self.get_dandelion_relay()
|
||||
})
|
||||
// If still return an error, let the caller handle this as they see fit.
|
||||
// The caller will "fluff" at this point as the stem phase is finished.
|
||||
.ok_or(Error::NoDandelionRelay)
|
||||
.map(|(_, relay)| {
|
||||
if relay.is_connected() {
|
||||
if let Err(e) = relay.send_stem_transaction(tx) {
|
||||
debug!("Error sending stem transaction to peer relay: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Broadcasts the provided transaction to PEER_PREFERRED_COUNT of our
|
||||
|
@ -427,12 +392,12 @@ impl Peers {
|
|||
}
|
||||
|
||||
/// Get peer in store by address
|
||||
pub fn get_peer(&self, peer_addr: SocketAddr) -> Result<PeerData, Error> {
|
||||
pub fn get_peer(&self, peer_addr: PeerAddr) -> Result<PeerData, Error> {
|
||||
self.store.get_peer(peer_addr).map_err(From::from)
|
||||
}
|
||||
|
||||
/// Whether we've already seen a peer with the provided address
|
||||
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
|
||||
pub fn exists_peer(&self, peer_addr: PeerAddr) -> Result<bool, Error> {
|
||||
self.store.exists_peer(peer_addr).map_err(From::from)
|
||||
}
|
||||
|
||||
|
@ -442,7 +407,7 @@ impl Peers {
|
|||
}
|
||||
|
||||
/// Updates the state of a peer in store
|
||||
pub fn update_state(&self, peer_addr: SocketAddr, new_state: State) -> Result<(), Error> {
|
||||
pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> {
|
||||
self.store
|
||||
.update_state(peer_addr, new_state)
|
||||
.map_err(From::from)
|
||||
|
@ -498,9 +463,9 @@ impl Peers {
|
|||
// now clean up peer map based on the list to remove
|
||||
{
|
||||
let mut peers = self.peers.write();
|
||||
for p in rm {
|
||||
let _ = peers.get(&p).map(|p| p.stop());
|
||||
peers.remove(&p);
|
||||
for addr in rm {
|
||||
let _ = peers.get(&addr).map(|peer| peer.stop());
|
||||
peers.remove(&addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -561,7 +526,7 @@ impl ChainAdapter for Peers {
|
|||
self.adapter.get_transaction(kernel_hash)
|
||||
}
|
||||
|
||||
fn tx_kernel_received(&self, kernel_hash: Hash, addr: SocketAddr) {
|
||||
fn tx_kernel_received(&self, kernel_hash: Hash, addr: PeerAddr) {
|
||||
self.adapter.tx_kernel_received(kernel_hash, addr)
|
||||
}
|
||||
|
||||
|
@ -569,7 +534,7 @@ impl ChainAdapter for Peers {
|
|||
self.adapter.transaction_received(tx, stem)
|
||||
}
|
||||
|
||||
fn block_received(&self, b: core::Block, peer_addr: SocketAddr, was_requested: bool) -> bool {
|
||||
fn block_received(&self, b: core::Block, peer_addr: PeerAddr, was_requested: bool) -> bool {
|
||||
let hash = b.hash();
|
||||
if !self.adapter.block_received(b, peer_addr, was_requested) {
|
||||
// if the peer sent us a block that's intrinsically bad
|
||||
|
@ -578,45 +543,45 @@ impl ChainAdapter for Peers {
|
|||
"Received a bad block {} from {}, the peer will be banned",
|
||||
hash, peer_addr
|
||||
);
|
||||
self.ban_peer(&peer_addr, ReasonForBan::BadBlock);
|
||||
self.ban_peer(peer_addr, ReasonForBan::BadBlock);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn compact_block_received(&self, cb: core::CompactBlock, peer_addr: SocketAddr) -> bool {
|
||||
fn compact_block_received(&self, cb: core::CompactBlock, peer_addr: PeerAddr) -> bool {
|
||||
let hash = cb.hash();
|
||||
if !self.adapter.compact_block_received(cb, peer_addr) {
|
||||
// if the peer sent us a block that's intrinsically bad
|
||||
// they are either mistaken or malevolent, both of which require a ban
|
||||
debug!(
|
||||
"Received a bad compact block {} from {}, the peer will be banned",
|
||||
hash, &peer_addr
|
||||
hash, peer_addr
|
||||
);
|
||||
self.ban_peer(&peer_addr, ReasonForBan::BadCompactBlock);
|
||||
self.ban_peer(peer_addr, ReasonForBan::BadCompactBlock);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn header_received(&self, bh: core::BlockHeader, peer_addr: SocketAddr) -> bool {
|
||||
fn header_received(&self, bh: core::BlockHeader, peer_addr: PeerAddr) -> bool {
|
||||
if !self.adapter.header_received(bh, peer_addr) {
|
||||
// if the peer sent us a block header that's intrinsically bad
|
||||
// they are either mistaken or malevolent, both of which require a ban
|
||||
self.ban_peer(&peer_addr, ReasonForBan::BadBlockHeader);
|
||||
self.ban_peer(peer_addr, ReasonForBan::BadBlockHeader);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn headers_received(&self, headers: &[core::BlockHeader], peer_addr: SocketAddr) -> bool {
|
||||
fn headers_received(&self, headers: &[core::BlockHeader], peer_addr: PeerAddr) -> bool {
|
||||
if !self.adapter.headers_received(headers, peer_addr) {
|
||||
// if the peer sent us a block header that's intrinsically bad
|
||||
// they are either mistaken or malevolent, both of which require a ban
|
||||
self.ban_peer(&peer_addr, ReasonForBan::BadBlockHeader);
|
||||
self.ban_peer(peer_addr, ReasonForBan::BadBlockHeader);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
|
@ -639,13 +604,13 @@ impl ChainAdapter for Peers {
|
|||
self.adapter.txhashset_receive_ready()
|
||||
}
|
||||
|
||||
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool {
|
||||
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: PeerAddr) -> bool {
|
||||
if !self.adapter.txhashset_write(h, txhashset_data, peer_addr) {
|
||||
debug!(
|
||||
"Received a bad txhashset data from {}, the peer will be banned",
|
||||
&peer_addr
|
||||
);
|
||||
self.ban_peer(&peer_addr, ReasonForBan::BadTxHashSet);
|
||||
self.ban_peer(peer_addr, ReasonForBan::BadTxHashSet);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
|
@ -666,14 +631,14 @@ impl ChainAdapter for Peers {
|
|||
impl NetAdapter for Peers {
|
||||
/// Find good peers we know with the provided capability and return their
|
||||
/// addresses.
|
||||
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
|
||||
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<PeerAddr> {
|
||||
let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize);
|
||||
trace!("find_peer_addrs: {} healthy peers picked", peers.len());
|
||||
map_vec!(peers, |p| p.addr)
|
||||
}
|
||||
|
||||
/// A list of peers has been received from one of our peers.
|
||||
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
|
||||
fn peer_addrs_received(&self, peer_addrs: Vec<PeerAddr>) {
|
||||
trace!("Received {} peer addrs, saving.", peer_addrs.len());
|
||||
for pa in peer_addrs {
|
||||
if let Ok(e) = self.exists_peer(pa) {
|
||||
|
@ -696,13 +661,13 @@ impl NetAdapter for Peers {
|
|||
}
|
||||
}
|
||||
|
||||
fn peer_difficulty(&self, addr: SocketAddr, diff: Difficulty, height: u64) {
|
||||
if let Some(peer) = self.get_connected_peer(&addr) {
|
||||
fn peer_difficulty(&self, addr: PeerAddr, diff: Difficulty, height: u64) {
|
||||
if let Some(peer) = self.get_connected_peer(addr) {
|
||||
peer.info.update(height, diff);
|
||||
}
|
||||
}
|
||||
|
||||
fn is_banned(&self, addr: SocketAddr) -> bool {
|
||||
fn is_banned(&self, addr: PeerAddr) -> bool {
|
||||
if let Ok(peer) = self.get_peer(addr) {
|
||||
peer.flags == State::Banned
|
||||
} else {
|
||||
|
|
|
@ -16,7 +16,6 @@ use std::cmp;
|
|||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{BufWriter, Write};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::conn::{Message, MessageHandler, Response};
|
||||
|
@ -25,18 +24,18 @@ use crate::util::{RateCounter, RwLock};
|
|||
use chrono::prelude::Utc;
|
||||
|
||||
use crate::msg::{
|
||||
BanReason, GetPeerAddrs, Headers, Locator, PeerAddrs, Ping, Pong, SockAddr, TxHashSetArchive,
|
||||
BanReason, GetPeerAddrs, Headers, Locator, PeerAddrs, Ping, Pong, TxHashSetArchive,
|
||||
TxHashSetRequest, Type,
|
||||
};
|
||||
use crate::types::{Error, NetAdapter};
|
||||
use crate::types::{Error, NetAdapter, PeerAddr};
|
||||
|
||||
pub struct Protocol {
|
||||
adapter: Arc<dyn NetAdapter>,
|
||||
addr: SocketAddr,
|
||||
addr: PeerAddr,
|
||||
}
|
||||
|
||||
impl Protocol {
|
||||
pub fn new(adapter: Arc<dyn NetAdapter>, addr: SocketAddr) -> Protocol {
|
||||
pub fn new(adapter: Arc<dyn NetAdapter>, addr: PeerAddr) -> Protocol {
|
||||
Protocol { adapter, addr }
|
||||
}
|
||||
}
|
||||
|
@ -231,19 +230,17 @@ impl MessageHandler for Protocol {
|
|||
|
||||
Type::GetPeerAddrs => {
|
||||
let get_peers: GetPeerAddrs = msg.body()?;
|
||||
let peer_addrs = adapter.find_peer_addrs(get_peers.capabilities);
|
||||
let peers = adapter.find_peer_addrs(get_peers.capabilities);
|
||||
Ok(Some(Response::new(
|
||||
Type::PeerAddrs,
|
||||
PeerAddrs {
|
||||
peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(),
|
||||
},
|
||||
PeerAddrs { peers },
|
||||
writer,
|
||||
)))
|
||||
}
|
||||
|
||||
Type::PeerAddrs => {
|
||||
let peer_addrs: PeerAddrs = msg.body()?;
|
||||
adapter.peer_addrs_received(peer_addrs.peers.iter().map(|pa| pa.0).collect());
|
||||
adapter.peer_addrs_received(peer_addrs.peers);
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ use crate::peer::Peer;
|
|||
use crate::peers::Peers;
|
||||
use crate::store::PeerStore;
|
||||
use crate::types::{
|
||||
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, ReasonForBan, Seeding, TxHashSetRead,
|
||||
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerAddr, ReasonForBan, TxHashSetRead,
|
||||
};
|
||||
use crate::util::{Mutex, StopState};
|
||||
use chrono::prelude::{DateTime, Utc};
|
||||
|
@ -82,6 +82,8 @@ impl Server {
|
|||
|
||||
match listener.accept() {
|
||||
Ok((stream, peer_addr)) => {
|
||||
let peer_addr = PeerAddr(peer_addr);
|
||||
|
||||
if self.check_undesirable(&stream) {
|
||||
continue;
|
||||
}
|
||||
|
@ -107,8 +109,8 @@ impl Server {
|
|||
|
||||
/// Asks the server to connect to a new peer. Directly returns the peer if
|
||||
/// we're already connected to the provided address.
|
||||
pub fn connect(&self, addr: &SocketAddr) -> Result<Arc<Peer>, Error> {
|
||||
if Peer::is_denied(&self.config, &addr) {
|
||||
pub fn connect(&self, addr: PeerAddr) -> Result<Arc<Peer>, Error> {
|
||||
if Peer::is_denied(&self.config, addr) {
|
||||
debug!("connect_peer: peer {} denied, not connecting.", addr);
|
||||
return Err(Error::ConnectionClose);
|
||||
}
|
||||
|
@ -134,7 +136,7 @@ impl Server {
|
|||
self.config.port,
|
||||
addr
|
||||
);
|
||||
match TcpStream::connect_timeout(addr, Duration::from_secs(10)) {
|
||||
match TcpStream::connect_timeout(&addr.0, Duration::from_secs(10)) {
|
||||
Ok(mut stream) => {
|
||||
let addr = SocketAddr::new(self.config.host, self.config.port);
|
||||
let total_diff = self.peers.total_difficulty();
|
||||
|
@ -143,7 +145,7 @@ impl Server {
|
|||
&mut stream,
|
||||
self.capabilities,
|
||||
total_diff,
|
||||
addr,
|
||||
PeerAddr(addr),
|
||||
&self.handshake,
|
||||
self.peers.clone(),
|
||||
)?;
|
||||
|
@ -191,13 +193,17 @@ impl Server {
|
|||
/// different sets of peers themselves. In addition, it prevent potential
|
||||
/// duplicate connections, malicious or not.
|
||||
fn check_undesirable(&self, stream: &TcpStream) -> bool {
|
||||
// peer has been banned, go away!
|
||||
if let Ok(peer_addr) = stream.peer_addr() {
|
||||
let banned = self.peers.is_banned(peer_addr);
|
||||
let known_ip =
|
||||
self.peers.is_known_ip(&peer_addr) && self.config.seeding_type == Seeding::DNSSeed;
|
||||
if banned || known_ip {
|
||||
debug!("Peer {} banned or known, refusing connection.", peer_addr);
|
||||
let peer_addr = PeerAddr(peer_addr);
|
||||
if self.peers.is_banned(peer_addr) {
|
||||
debug!("Peer {} banned, refusing connection.", peer_addr);
|
||||
if let Err(e) = stream.shutdown(Shutdown::Both) {
|
||||
debug!("Error shutting down conn: {:?}", e);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
if self.peers.is_known(peer_addr) {
|
||||
debug!("Peer {} already known, refusing connection.", peer_addr);
|
||||
if let Err(e) = stream.shutdown(Shutdown::Both) {
|
||||
debug!("Error shutting down conn: {:?}", e);
|
||||
}
|
||||
|
@ -234,18 +240,18 @@ impl ChainAdapter for DummyAdapter {
|
|||
fn get_transaction(&self, _h: Hash) -> Option<core::Transaction> {
|
||||
None
|
||||
}
|
||||
fn tx_kernel_received(&self, _h: Hash, _addr: SocketAddr) {}
|
||||
fn tx_kernel_received(&self, _h: Hash, _addr: PeerAddr) {}
|
||||
fn transaction_received(&self, _: core::Transaction, _stem: bool) {}
|
||||
fn compact_block_received(&self, _cb: core::CompactBlock, _addr: SocketAddr) -> bool {
|
||||
fn compact_block_received(&self, _cb: core::CompactBlock, _addr: PeerAddr) -> bool {
|
||||
true
|
||||
}
|
||||
fn header_received(&self, _bh: core::BlockHeader, _addr: SocketAddr) -> bool {
|
||||
fn header_received(&self, _bh: core::BlockHeader, _addr: PeerAddr) -> bool {
|
||||
true
|
||||
}
|
||||
fn block_received(&self, _: core::Block, _: SocketAddr, _: bool) -> bool {
|
||||
fn block_received(&self, _: core::Block, _: PeerAddr, _: bool) -> bool {
|
||||
true
|
||||
}
|
||||
fn headers_received(&self, _: &[core::BlockHeader], _: SocketAddr) -> bool {
|
||||
fn headers_received(&self, _: &[core::BlockHeader], _: PeerAddr) -> bool {
|
||||
true
|
||||
}
|
||||
fn locate_headers(&self, _: &[Hash]) -> Vec<core::BlockHeader> {
|
||||
|
@ -262,7 +268,7 @@ impl ChainAdapter for DummyAdapter {
|
|||
false
|
||||
}
|
||||
|
||||
fn txhashset_write(&self, _h: Hash, _txhashset_data: File, _peer_addr: SocketAddr) -> bool {
|
||||
fn txhashset_write(&self, _h: Hash, _txhashset_data: File, _peer_addr: PeerAddr) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
|
@ -277,12 +283,12 @@ impl ChainAdapter for DummyAdapter {
|
|||
}
|
||||
|
||||
impl NetAdapter for DummyAdapter {
|
||||
fn find_peer_addrs(&self, _: Capabilities) -> Vec<SocketAddr> {
|
||||
fn find_peer_addrs(&self, _: Capabilities) -> Vec<PeerAddr> {
|
||||
vec![]
|
||||
}
|
||||
fn peer_addrs_received(&self, _: Vec<SocketAddr>) {}
|
||||
fn peer_difficulty(&self, _: SocketAddr, _: Difficulty, _: u64) {}
|
||||
fn is_banned(&self, _: SocketAddr) -> bool {
|
||||
fn peer_addrs_received(&self, _: Vec<PeerAddr>) {}
|
||||
fn peer_difficulty(&self, _: PeerAddr, _: Difficulty, _: u64) {}
|
||||
fn is_banned(&self, _: PeerAddr) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,19 +17,17 @@
|
|||
use chrono::Utc;
|
||||
use num::FromPrimitive;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::lmdb;
|
||||
|
||||
use crate::core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use crate::msg::SockAddr;
|
||||
use crate::types::{Capabilities, ReasonForBan};
|
||||
use crate::types::{Capabilities, PeerAddr, ReasonForBan};
|
||||
use grin_store::{self, option_to_not_found, to_key, Error};
|
||||
|
||||
const STORE_SUBPATH: &'static str = "peers";
|
||||
|
||||
const PEER_PREFIX: u8 = 'p' as u8;
|
||||
const PEER_PREFIX: u8 = 'P' as u8;
|
||||
|
||||
/// Types of messages
|
||||
enum_from_primitive! {
|
||||
|
@ -45,7 +43,7 @@ enum_from_primitive! {
|
|||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PeerData {
|
||||
/// Network address of the peer.
|
||||
pub addr: SocketAddr,
|
||||
pub addr: PeerAddr,
|
||||
/// What capabilities the peer advertises. Unknown until a successful
|
||||
/// connection.
|
||||
pub capabilities: Capabilities,
|
||||
|
@ -63,7 +61,7 @@ pub struct PeerData {
|
|||
|
||||
impl Writeable for PeerData {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
SockAddr(self.addr).write(writer)?;
|
||||
self.addr.write(writer)?;
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u32, self.capabilities.bits()],
|
||||
|
@ -79,7 +77,7 @@ impl Writeable for PeerData {
|
|||
|
||||
impl Readable for PeerData {
|
||||
fn read(reader: &mut dyn Reader) -> Result<PeerData, ser::Error> {
|
||||
let addr = SockAddr::read(reader)?;
|
||||
let addr = PeerAddr::read(reader)?;
|
||||
let capab = reader.read_u32()?;
|
||||
let ua = reader.read_bytes_len_prefix()?;
|
||||
let (fl, lb, br) = ser_multiread!(reader, read_u8, read_i64, read_i32);
|
||||
|
@ -99,7 +97,7 @@ impl Readable for PeerData {
|
|||
|
||||
match State::from_u8(fl) {
|
||||
Some(flags) => Ok(PeerData {
|
||||
addr: addr.0,
|
||||
addr,
|
||||
capabilities,
|
||||
user_agent,
|
||||
flags: flags,
|
||||
|
@ -132,20 +130,20 @@ impl PeerStore {
|
|||
batch.commit()
|
||||
}
|
||||
|
||||
pub fn get_peer(&self, peer_addr: SocketAddr) -> Result<PeerData, Error> {
|
||||
pub fn get_peer(&self, peer_addr: PeerAddr) -> Result<PeerData, Error> {
|
||||
option_to_not_found(
|
||||
self.db.get_ser(&peer_key(peer_addr)[..]),
|
||||
&format!("Peer at address: {}", peer_addr),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
|
||||
pub fn exists_peer(&self, peer_addr: PeerAddr) -> Result<bool, Error> {
|
||||
self.db.exists(&peer_key(peer_addr)[..])
|
||||
}
|
||||
|
||||
/// TODO - allow below added to avoid github issue reports
|
||||
#[allow(dead_code)]
|
||||
pub fn delete_peer(&self, peer_addr: SocketAddr) -> Result<(), Error> {
|
||||
pub fn delete_peer(&self, peer_addr: PeerAddr) -> Result<(), Error> {
|
||||
let batch = self.db.batch()?;
|
||||
batch.delete(&peer_key(peer_addr)[..])?;
|
||||
batch.commit()
|
||||
|
@ -162,17 +160,6 @@ impl PeerStore {
|
|||
peers.iter().take(count).cloned().collect()
|
||||
}
|
||||
|
||||
/// Query all peers with same IP address, and ignore the port
|
||||
pub fn find_peers_by_ip(&self, peer_addr: SocketAddr) -> Vec<PeerData> {
|
||||
self.db
|
||||
.iter::<PeerData>(&to_key(
|
||||
PEER_PREFIX,
|
||||
&mut format!("{}", peer_addr.ip()).into_bytes(),
|
||||
))
|
||||
.unwrap()
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// List all known peers
|
||||
/// Used for /v1/peers/all api endpoint
|
||||
pub fn all_peers(&self) -> Vec<PeerData> {
|
||||
|
@ -182,7 +169,7 @@ impl PeerStore {
|
|||
|
||||
/// Convenience method to load a peer data, update its status and save it
|
||||
/// back. If new state is Banned its last banned time will be updated too.
|
||||
pub fn update_state(&self, peer_addr: SocketAddr, new_state: State) -> Result<(), Error> {
|
||||
pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> {
|
||||
let batch = self.db.batch()?;
|
||||
|
||||
let mut peer = option_to_not_found(
|
||||
|
@ -194,7 +181,7 @@ impl PeerStore {
|
|||
peer.last_banned = Utc::now().timestamp();
|
||||
}
|
||||
|
||||
batch.put_ser(&peer_key(peer.addr)[..], &peer)?;
|
||||
batch.put_ser(&peer_key(peer_addr)[..], &peer)?;
|
||||
batch.commit()
|
||||
}
|
||||
|
||||
|
@ -226,9 +213,7 @@ impl PeerStore {
|
|||
}
|
||||
}
|
||||
|
||||
fn peer_key(peer_addr: SocketAddr) -> Vec<u8> {
|
||||
to_key(
|
||||
PEER_PREFIX,
|
||||
&mut format!("{}:{}", peer_addr.ip(), peer_addr.port()).into_bytes(),
|
||||
)
|
||||
// Ignore the port unless ip is loopback address.
|
||||
fn peer_key(peer_addr: PeerAddr) -> Vec<u8> {
|
||||
to_key(PEER_PREFIX, &mut peer_addr.as_key().into_bytes())
|
||||
}
|
||||
|
|
143
p2p/src/types.rs
143
p2p/src/types.rs
|
@ -16,15 +16,18 @@ use crate::util::RwLock;
|
|||
use std::convert::From;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::prelude::*;
|
||||
|
||||
use crate::core::core;
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::core::global;
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::core::{core, ser};
|
||||
use crate::core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use grin_store;
|
||||
|
||||
/// Maximum number of block headers a peer should ever send
|
||||
|
@ -95,6 +98,106 @@ impl<T> From<mpsc::TrySendError<T>> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct PeerAddr(pub SocketAddr);
|
||||
|
||||
impl Writeable for PeerAddr {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
match self.0 {
|
||||
SocketAddr::V4(sav4) => {
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u8, 0],
|
||||
[write_fixed_bytes, &sav4.ip().octets().to_vec()],
|
||||
[write_u16, sav4.port()]
|
||||
);
|
||||
}
|
||||
SocketAddr::V6(sav6) => {
|
||||
writer.write_u8(1)?;
|
||||
for seg in &sav6.ip().segments() {
|
||||
writer.write_u16(*seg)?;
|
||||
}
|
||||
writer.write_u16(sav6.port())?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for PeerAddr {
|
||||
fn read(reader: &mut dyn Reader) -> Result<PeerAddr, ser::Error> {
|
||||
let v4_or_v6 = reader.read_u8()?;
|
||||
if v4_or_v6 == 0 {
|
||||
let ip = reader.read_fixed_bytes(4)?;
|
||||
let port = reader.read_u16()?;
|
||||
Ok(PeerAddr(SocketAddr::V4(SocketAddrV4::new(
|
||||
Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]),
|
||||
port,
|
||||
))))
|
||||
} else {
|
||||
let ip = try_iter_map_vec!(0..8, |_| reader.read_u16());
|
||||
let port = reader.read_u16()?;
|
||||
Ok(PeerAddr(SocketAddr::V6(SocketAddrV6::new(
|
||||
Ipv6Addr::new(ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7]),
|
||||
port,
|
||||
0,
|
||||
0,
|
||||
))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::hash::Hash for PeerAddr {
|
||||
/// If loopback address then we care about ip and port.
|
||||
/// If regular address then we only care about the ip and ignore the port.
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
if self.0.ip().is_loopback() {
|
||||
self.0.hash(state);
|
||||
} else {
|
||||
self.0.ip().hash(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for PeerAddr {
|
||||
/// If loopback address then we care about ip and port.
|
||||
/// If regular address then we only care about the ip and ignore the port.
|
||||
fn eq(&self, other: &PeerAddr) -> bool {
|
||||
if self.0.ip().is_loopback() {
|
||||
self.0 == other.0
|
||||
} else {
|
||||
self.0.ip() == other.0.ip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for PeerAddr {}
|
||||
|
||||
impl std::fmt::Display for PeerAddr {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl PeerAddr {
|
||||
/// Convenient way of constructing a new peer_addr from an ip_addr
|
||||
/// defaults to port 3414 on mainnet and 13414 on floonet.
|
||||
pub fn from_ip(addr: IpAddr) -> PeerAddr {
|
||||
let port = if global::is_floonet() { 13414 } else { 3414 };
|
||||
PeerAddr(SocketAddr::new(addr, port))
|
||||
}
|
||||
|
||||
/// If the ip is loopback then our key is "ip:port" (mainly for local usernet testing).
|
||||
/// Otherwise we only care about the ip (we disallow multiple peers on the same ip address).
|
||||
pub fn as_key(&self) -> String {
|
||||
if self.0.ip().is_loopback() {
|
||||
format!("{}:{}", self.0.ip(), self.0.port())
|
||||
} else {
|
||||
format!("{}", self.0.ip())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for the peer-to-peer server.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct P2PConfig {
|
||||
|
@ -106,18 +209,18 @@ pub struct P2PConfig {
|
|||
pub seeding_type: Seeding,
|
||||
|
||||
/// The list of seed nodes, if using Seeding as a seed type
|
||||
pub seeds: Option<Vec<String>>,
|
||||
pub seeds: Option<Vec<PeerAddr>>,
|
||||
|
||||
/// Capabilities expose by this node, also conditions which other peers this
|
||||
/// node will have an affinity toward when connection.
|
||||
pub capabilities: Capabilities,
|
||||
|
||||
pub peers_allow: Option<Vec<String>>,
|
||||
pub peers_allow: Option<Vec<PeerAddr>>,
|
||||
|
||||
pub peers_deny: Option<Vec<String>>,
|
||||
pub peers_deny: Option<Vec<PeerAddr>>,
|
||||
|
||||
/// The list of preferred peers that we will try to connect to
|
||||
pub peers_preferred: Option<Vec<String>>,
|
||||
pub peers_preferred: Option<Vec<PeerAddr>>,
|
||||
|
||||
pub ban_window: Option<i64>,
|
||||
|
||||
|
@ -125,7 +228,7 @@ pub struct P2PConfig {
|
|||
|
||||
pub peer_min_preferred_count: Option<u32>,
|
||||
|
||||
pub dandelion_peer: Option<SocketAddr>,
|
||||
pub dandelion_peer: Option<PeerAddr>,
|
||||
}
|
||||
|
||||
/// Default address for peer-to-peer connections.
|
||||
|
@ -178,7 +281,7 @@ impl P2PConfig {
|
|||
}
|
||||
|
||||
/// Type of seeding the server will use to find other peers on the network.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
|
||||
pub enum Seeding {
|
||||
/// No seeding, mostly for tests that programmatically connect
|
||||
None,
|
||||
|
@ -262,7 +365,7 @@ pub struct PeerInfo {
|
|||
pub capabilities: Capabilities,
|
||||
pub user_agent: String,
|
||||
pub version: u32,
|
||||
pub addr: SocketAddr,
|
||||
pub addr: PeerAddr,
|
||||
pub direction: Direction,
|
||||
pub live_info: Arc<RwLock<PeerLiveInfo>>,
|
||||
}
|
||||
|
@ -307,7 +410,7 @@ pub struct PeerInfoDisplay {
|
|||
pub capabilities: Capabilities,
|
||||
pub user_agent: String,
|
||||
pub version: u32,
|
||||
pub addr: SocketAddr,
|
||||
pub addr: PeerAddr,
|
||||
pub direction: Direction,
|
||||
pub total_difficulty: Difficulty,
|
||||
pub height: u64,
|
||||
|
@ -353,22 +456,22 @@ pub trait ChainAdapter: Sync + Send {
|
|||
|
||||
fn get_transaction(&self, kernel_hash: Hash) -> Option<core::Transaction>;
|
||||
|
||||
fn tx_kernel_received(&self, kernel_hash: Hash, addr: SocketAddr);
|
||||
fn tx_kernel_received(&self, kernel_hash: Hash, addr: PeerAddr);
|
||||
|
||||
/// A block has been received from one of our peers. Returns true if the
|
||||
/// block could be handled properly and is not deemed defective by the
|
||||
/// chain. Returning false means the block will never be valid and
|
||||
/// may result in the peer being banned.
|
||||
fn block_received(&self, b: core::Block, addr: SocketAddr, was_requested: bool) -> bool;
|
||||
fn block_received(&self, b: core::Block, addr: PeerAddr, was_requested: bool) -> bool;
|
||||
|
||||
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool;
|
||||
fn compact_block_received(&self, cb: core::CompactBlock, addr: PeerAddr) -> bool;
|
||||
|
||||
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool;
|
||||
fn header_received(&self, bh: core::BlockHeader, addr: PeerAddr) -> bool;
|
||||
|
||||
/// A set of block header has been received, typically in response to a
|
||||
/// block
|
||||
/// header request.
|
||||
fn headers_received(&self, bh: &[core::BlockHeader], addr: SocketAddr) -> bool;
|
||||
fn headers_received(&self, bh: &[core::BlockHeader], addr: PeerAddr) -> bool;
|
||||
|
||||
/// Finds a list of block headers based on the provided locator. Tries to
|
||||
/// identify the common chain and gets the headers that follow it
|
||||
|
@ -401,7 +504,7 @@ pub trait ChainAdapter: Sync + Send {
|
|||
/// If we're willing to accept that new state, the data stream will be
|
||||
/// read as a zip file, unzipped and the resulting state files should be
|
||||
/// rewound to the provided indexes.
|
||||
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool;
|
||||
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: PeerAddr) -> bool;
|
||||
}
|
||||
|
||||
/// Additional methods required by the protocol that don't need to be
|
||||
|
@ -409,14 +512,14 @@ pub trait ChainAdapter: Sync + Send {
|
|||
pub trait NetAdapter: ChainAdapter {
|
||||
/// Find good peers we know with the provided capability and return their
|
||||
/// addresses.
|
||||
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr>;
|
||||
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<PeerAddr>;
|
||||
|
||||
/// A list of peers has been received from one of our peers.
|
||||
fn peer_addrs_received(&self, _: Vec<SocketAddr>);
|
||||
fn peer_addrs_received(&self, _: Vec<PeerAddr>);
|
||||
|
||||
/// Heard total_difficulty from a connected peer (via ping/pong).
|
||||
fn peer_difficulty(&self, _: SocketAddr, _: Difficulty, _: u64);
|
||||
fn peer_difficulty(&self, _: PeerAddr, _: Difficulty, _: u64);
|
||||
|
||||
/// Is this peer currently banned?
|
||||
fn is_banned(&self, addr: SocketAddr) -> bool;
|
||||
fn is_banned(&self, addr: PeerAddr) -> bool;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ use std::{thread, time};
|
|||
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::p2p::types::PeerAddr;
|
||||
use crate::p2p::Peer;
|
||||
|
||||
fn open_port() -> u16 {
|
||||
|
@ -70,7 +71,7 @@ fn peer_handshake() {
|
|||
let addr = SocketAddr::new(p2p_config.host, p2p_config.port);
|
||||
let mut socket = TcpStream::connect_timeout(&addr, time::Duration::from_secs(10)).unwrap();
|
||||
|
||||
let my_addr = "127.0.0.1:5000".parse().unwrap();
|
||||
let my_addr = PeerAddr("127.0.0.1:5000".parse().unwrap());
|
||||
let mut peer = Peer::connect(
|
||||
&mut socket,
|
||||
p2p::Capabilities::UNKNOWN,
|
||||
|
@ -89,7 +90,7 @@ fn peer_handshake() {
|
|||
peer.send_ping(Difficulty::min(), 0).unwrap();
|
||||
thread::sleep(time::Duration::from_secs(1));
|
||||
|
||||
let server_peer = server.peers.get_connected_peer(&my_addr).unwrap();
|
||||
let server_peer = server.peers.get_connected_peer(my_addr).unwrap();
|
||||
assert_eq!(server_peer.info.total_difficulty(), Difficulty::min());
|
||||
assert!(server.peers.peer_count() > 0);
|
||||
}
|
||||
|
|
|
@ -16,6 +16,8 @@ serde = "1"
|
|||
serde_derive = "1"
|
||||
log = "0.4"
|
||||
chrono = "0.4.4"
|
||||
failure = "0.1"
|
||||
failure_derive = "0.1"
|
||||
|
||||
grin_core = { path = "../core", version = "1.1.0" }
|
||||
grin_keychain = { path = "../keychain", version = "1.1.0" }
|
||||
|
|
|
@ -23,6 +23,7 @@ use self::core::core::hash::Hash;
|
|||
use self::core::core::transaction::{self, Transaction};
|
||||
use self::core::core::{BlockHeader, BlockSums};
|
||||
use self::core::{consensus, global};
|
||||
use failure::Fail;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
|
||||
|
@ -179,32 +180,44 @@ pub struct TxSource {
|
|||
}
|
||||
|
||||
/// Possible errors when interacting with the transaction pool.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Fail)]
|
||||
pub enum PoolError {
|
||||
/// An invalid pool entry caused by underlying tx validation error
|
||||
#[fail(display = "Invalid Tx {}", _0)]
|
||||
InvalidTx(transaction::Error),
|
||||
/// An invalid pool entry caused by underlying block validation error
|
||||
#[fail(display = "Invalid Block {}", _0)]
|
||||
InvalidBlock(block::Error),
|
||||
/// Underlying keychain error.
|
||||
#[fail(display = "Keychain error {}", _0)]
|
||||
Keychain(keychain::Error),
|
||||
/// Underlying "committed" error.
|
||||
#[fail(display = "Committed error {}", _0)]
|
||||
Committed(committed::Error),
|
||||
/// Attempt to add a transaction to the pool with lock_height
|
||||
/// greater than height of current block
|
||||
#[fail(display = "Immature transaction")]
|
||||
ImmatureTransaction,
|
||||
/// Attempt to spend a coinbase output before it has sufficiently matured.
|
||||
#[fail(display = "Immature coinbase")]
|
||||
ImmatureCoinbase,
|
||||
/// Problem propagating a stem tx to the next Dandelion relay node.
|
||||
#[fail(display = "Dandelion error")]
|
||||
DandelionError,
|
||||
/// Transaction pool is over capacity, can't accept more transactions
|
||||
#[fail(display = "Over capacity")]
|
||||
OverCapacity,
|
||||
/// Transaction fee is too low given its weight
|
||||
#[fail(display = "Low fee transaction {}", _0)]
|
||||
LowFeeTransaction(u64),
|
||||
/// Attempt to add a duplicate output to the pool.
|
||||
#[fail(display = "Duplicate commitment")]
|
||||
DuplicateCommitment,
|
||||
/// Attempt to add a duplicate tx to the pool.
|
||||
#[fail(display = "Duplicate tx")]
|
||||
DuplicateTx,
|
||||
/// Other kinds of error (not yet pulled out into meaningful errors).
|
||||
#[fail(display = "General pool error {}", _0)]
|
||||
Other(String),
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ edition = "2018"
|
|||
|
||||
[dependencies]
|
||||
hyper = "0.12"
|
||||
fs2 = "0.4"
|
||||
futures = "0.1"
|
||||
http = "0.1"
|
||||
itertools = "0.7"
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
use crate::util::RwLock;
|
||||
use std::fs::File;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::thread;
|
||||
use std::time::Instant;
|
||||
|
@ -31,6 +30,7 @@ use crate::core::core::{BlockHeader, BlockSums, CompactBlock};
|
|||
use crate::core::pow::Difficulty;
|
||||
use crate::core::{core, global};
|
||||
use crate::p2p;
|
||||
use crate::p2p::types::PeerAddr;
|
||||
use crate::pool;
|
||||
use crate::util::OneTime;
|
||||
use chrono::prelude::*;
|
||||
|
@ -62,7 +62,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
self.tx_pool.read().retrieve_tx_by_kernel_hash(kernel_hash)
|
||||
}
|
||||
|
||||
fn tx_kernel_received(&self, kernel_hash: Hash, addr: SocketAddr) {
|
||||
fn tx_kernel_received(&self, kernel_hash: Hash, addr: PeerAddr) {
|
||||
// nothing much we can do with a new transaction while syncing
|
||||
if self.sync_state.is_syncing() {
|
||||
return;
|
||||
|
@ -71,7 +71,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
let tx = self.tx_pool.read().retrieve_tx_by_kernel_hash(kernel_hash);
|
||||
|
||||
if tx.is_none() {
|
||||
self.request_transaction(kernel_hash, &addr);
|
||||
self.request_transaction(kernel_hash, addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
}
|
||||
}
|
||||
|
||||
fn block_received(&self, b: core::Block, addr: SocketAddr, was_requested: bool) -> bool {
|
||||
fn block_received(&self, b: core::Block, addr: PeerAddr, was_requested: bool) -> bool {
|
||||
debug!(
|
||||
"Received block {} at {} from {} [in/out/kern: {}/{}/{}] going to process.",
|
||||
b.hash(),
|
||||
|
@ -120,7 +120,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
self.process_block(b, addr, was_requested)
|
||||
}
|
||||
|
||||
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
|
||||
fn compact_block_received(&self, cb: core::CompactBlock, addr: PeerAddr) -> bool {
|
||||
let bhash = cb.hash();
|
||||
debug!(
|
||||
"Received compact_block {} at {} from {} [out/kern/kern_ids: {}/{}/{}] going to process.",
|
||||
|
@ -187,7 +187,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
} else {
|
||||
if self.sync_state.status() == SyncStatus::NoSync {
|
||||
debug!("adapter: block invalid after hydration, requesting full block");
|
||||
self.request_block(&cb.header, &addr);
|
||||
self.request_block(&cb.header, addr);
|
||||
true
|
||||
} else {
|
||||
debug!("block invalid after hydration, ignoring it, cause still syncing");
|
||||
|
@ -201,7 +201,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
}
|
||||
}
|
||||
|
||||
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
|
||||
fn header_received(&self, bh: core::BlockHeader, addr: PeerAddr) -> bool {
|
||||
let bhash = bh.hash();
|
||||
debug!(
|
||||
"Received block header {} at {} from {}, going to process.",
|
||||
|
@ -227,13 +227,13 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
|
||||
// we have successfully processed a block header
|
||||
// so we can go request the block itself
|
||||
self.request_compact_block(&bh, &addr);
|
||||
self.request_compact_block(&bh, addr);
|
||||
|
||||
// done receiving the header
|
||||
true
|
||||
}
|
||||
|
||||
fn headers_received(&self, bhs: &[core::BlockHeader], addr: SocketAddr) -> bool {
|
||||
fn headers_received(&self, bhs: &[core::BlockHeader], addr: PeerAddr) -> bool {
|
||||
info!("Received {} block headers from {}", bhs.len(), addr,);
|
||||
|
||||
if bhs.len() == 0 {
|
||||
|
@ -342,7 +342,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
/// If we're willing to accept that new state, the data stream will be
|
||||
/// read as a zip file, unzipped and the resulting state files should be
|
||||
/// rewound to the provided indexes.
|
||||
fn txhashset_write(&self, h: Hash, txhashset_data: File, _peer_addr: SocketAddr) -> bool {
|
||||
fn txhashset_write(&self, h: Hash, txhashset_data: File, _peer_addr: PeerAddr) -> bool {
|
||||
// check status again after download, in case 2 txhashsets made it somehow
|
||||
if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() {
|
||||
} else {
|
||||
|
@ -421,7 +421,7 @@ impl NetToChainAdapter {
|
|||
|
||||
// pushing the new block through the chain pipeline
|
||||
// remembering to reset the head if we have a bad block
|
||||
fn process_block(&self, b: core::Block, addr: SocketAddr, was_requested: bool) -> bool {
|
||||
fn process_block(&self, b: core::Block, addr: PeerAddr, was_requested: bool) -> bool {
|
||||
// We cannot process blocks earlier than the horizon so check for this here.
|
||||
{
|
||||
let head = self.chain().head().unwrap();
|
||||
|
@ -458,7 +458,7 @@ impl NetToChainAdapter {
|
|||
&& !self.sync_state.is_syncing()
|
||||
{
|
||||
debug!("process_block: received an orphan block, checking the parent: {:}", previous.hash());
|
||||
self.request_block_by_hash(previous.hash(), &addr)
|
||||
self.request_block_by_hash(previous.hash(), addr)
|
||||
}
|
||||
}
|
||||
true
|
||||
|
@ -525,7 +525,7 @@ impl NetToChainAdapter {
|
|||
}
|
||||
}
|
||||
|
||||
fn request_transaction(&self, h: Hash, addr: &SocketAddr) {
|
||||
fn request_transaction(&self, h: Hash, addr: PeerAddr) {
|
||||
self.send_tx_request_to_peer(h, addr, |peer, h| peer.send_tx_request(h))
|
||||
}
|
||||
|
||||
|
@ -533,24 +533,24 @@ impl NetToChainAdapter {
|
|||
// it into a full block then fallback to requesting the full block
|
||||
// from the same peer that gave us the compact block
|
||||
// consider additional peers for redundancy?
|
||||
fn request_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
||||
fn request_block(&self, bh: &BlockHeader, addr: PeerAddr) {
|
||||
self.request_block_by_hash(bh.hash(), addr)
|
||||
}
|
||||
|
||||
fn request_block_by_hash(&self, h: Hash, addr: &SocketAddr) {
|
||||
fn request_block_by_hash(&self, h: Hash, addr: PeerAddr) {
|
||||
self.send_block_request_to_peer(h, addr, |peer, h| peer.send_block_request(h))
|
||||
}
|
||||
|
||||
// After we have received a block header in "header first" propagation
|
||||
// we need to go request the block (compact representation) from the
|
||||
// same peer that gave us the header (unless we have already accepted the block)
|
||||
fn request_compact_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
||||
fn request_compact_block(&self, bh: &BlockHeader, addr: PeerAddr) {
|
||||
self.send_block_request_to_peer(bh.hash(), addr, |peer, h| {
|
||||
peer.send_compact_block_request(h)
|
||||
})
|
||||
}
|
||||
|
||||
fn send_tx_request_to_peer<F>(&self, h: Hash, addr: &SocketAddr, f: F)
|
||||
fn send_tx_request_to_peer<F>(&self, h: Hash, addr: PeerAddr, f: F)
|
||||
where
|
||||
F: Fn(&p2p::Peer, Hash) -> Result<(), p2p::Error>,
|
||||
{
|
||||
|
@ -567,7 +567,7 @@ impl NetToChainAdapter {
|
|||
}
|
||||
}
|
||||
|
||||
fn send_block_request_to_peer<F>(&self, h: Hash, addr: &SocketAddr, f: F)
|
||||
fn send_block_request_to_peer<F>(&self, h: Hash, addr: PeerAddr, f: F)
|
||||
where
|
||||
F: Fn(&p2p::Peer, Hash) -> Result<(), p2p::Error>,
|
||||
{
|
||||
|
|
|
@ -47,6 +47,8 @@ pub enum Error {
|
|||
ArgumentError(String),
|
||||
/// Wallet communication error
|
||||
WalletComm(String),
|
||||
/// Error originating from some I/O operation (likely a file on disk).
|
||||
IOError(std::io::Error),
|
||||
}
|
||||
|
||||
impl From<core::block::Error> for Error {
|
||||
|
@ -59,7 +61,11 @@ impl From<chain::Error> for Error {
|
|||
Error::Chain(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
fn from(e: std::io::Error) -> Error {
|
||||
Error::IOError(e)
|
||||
}
|
||||
}
|
||||
impl From<p2p::Error> for Error {
|
||||
fn from(e: p2p::Error) -> Error {
|
||||
Error::P2P(e)
|
||||
|
|
|
@ -21,12 +21,13 @@ use chrono::prelude::{DateTime, Utc};
|
|||
use chrono::{Duration, MIN_DATE};
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::collections::HashMap;
|
||||
use std::net::{SocketAddr, ToSocketAddrs};
|
||||
use std::net::ToSocketAddrs;
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::{cmp, str, thread, time};
|
||||
|
||||
use crate::core::global;
|
||||
use crate::p2p;
|
||||
use crate::p2p::types::PeerAddr;
|
||||
use crate::p2p::ChainAdapter;
|
||||
use crate::pool::DandelionConfig;
|
||||
use crate::util::{Mutex, StopState};
|
||||
|
@ -52,8 +53,8 @@ pub fn connect_and_monitor(
|
|||
p2p_server: Arc<p2p::Server>,
|
||||
capabilities: p2p::Capabilities,
|
||||
dandelion_config: DandelionConfig,
|
||||
seed_list: Box<dyn Fn() -> Vec<SocketAddr> + Send>,
|
||||
preferred_peers: Option<Vec<SocketAddr>>,
|
||||
seed_list: Box<dyn Fn() -> Vec<PeerAddr> + Send>,
|
||||
preferred_peers: Option<Vec<PeerAddr>>,
|
||||
stop_state: Arc<Mutex<StopState>>,
|
||||
) {
|
||||
let _ = thread::Builder::new()
|
||||
|
@ -78,7 +79,7 @@ pub fn connect_and_monitor(
|
|||
let mut prev_ping = Utc::now();
|
||||
let mut start_attempt = 0;
|
||||
|
||||
let mut connecting_history: HashMap<SocketAddr, DateTime<Utc>> = HashMap::new();
|
||||
let mut connecting_history: HashMap<PeerAddr, DateTime<Utc>> = HashMap::new();
|
||||
|
||||
loop {
|
||||
if stop_state.lock().is_stopped() {
|
||||
|
@ -140,8 +141,8 @@ pub fn connect_and_monitor(
|
|||
fn monitor_peers(
|
||||
peers: Arc<p2p::Peers>,
|
||||
config: p2p::P2PConfig,
|
||||
tx: mpsc::Sender<SocketAddr>,
|
||||
preferred_peers_list: Option<Vec<SocketAddr>>,
|
||||
tx: mpsc::Sender<PeerAddr>,
|
||||
preferred_peers_list: Option<Vec<PeerAddr>>,
|
||||
) {
|
||||
// regularly check if we need to acquire more peers and if so, gets
|
||||
// them from db
|
||||
|
@ -156,7 +157,7 @@ fn monitor_peers(
|
|||
let interval = Utc::now().timestamp() - x.last_banned;
|
||||
// Unban peer
|
||||
if interval >= config.ban_window() {
|
||||
peers.unban_peer(&x.addr);
|
||||
peers.unban_peer(x.addr);
|
||||
debug!(
|
||||
"monitor_peers: unbanned {} after {} seconds",
|
||||
x.addr, interval
|
||||
|
@ -192,7 +193,7 @@ fn monitor_peers(
|
|||
|
||||
// loop over connected peers
|
||||
// ask them for their list of peers
|
||||
let mut connected_peers: Vec<SocketAddr> = vec![];
|
||||
let mut connected_peers: Vec<PeerAddr> = vec![];
|
||||
for p in peers.connected_peers() {
|
||||
trace!(
|
||||
"monitor_peers: {}:{} ask {} for more peers",
|
||||
|
@ -205,19 +206,16 @@ fn monitor_peers(
|
|||
}
|
||||
|
||||
// Attempt to connect to preferred peers if there is some
|
||||
match preferred_peers_list {
|
||||
Some(preferred_peers) => {
|
||||
for p in preferred_peers {
|
||||
if !connected_peers.is_empty() {
|
||||
if !connected_peers.contains(&p) {
|
||||
tx.send(p).unwrap();
|
||||
}
|
||||
} else {
|
||||
if let Some(preferred_peers) = preferred_peers_list {
|
||||
for p in preferred_peers {
|
||||
if !connected_peers.is_empty() {
|
||||
if !connected_peers.contains(&p) {
|
||||
tx.send(p).unwrap();
|
||||
}
|
||||
} else {
|
||||
tx.send(p).unwrap();
|
||||
}
|
||||
}
|
||||
None => debug!("monitor_peers: no preferred peers"),
|
||||
}
|
||||
|
||||
// take a random defunct peer and mark it healthy: over a long period any
|
||||
|
@ -235,7 +233,7 @@ fn monitor_peers(
|
|||
config.peer_max_count() as usize,
|
||||
);
|
||||
|
||||
for p in new_peers.iter().filter(|p| !peers.is_known(&p.addr)) {
|
||||
for p in new_peers.iter().filter(|p| !peers.is_known(p.addr)) {
|
||||
trace!(
|
||||
"monitor_peers: on {}:{}, queue to soon try {}",
|
||||
config.host,
|
||||
|
@ -249,17 +247,15 @@ fn monitor_peers(
|
|||
fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionConfig) {
|
||||
// Dandelion Relay Updater
|
||||
let dandelion_relay = peers.get_dandelion_relay();
|
||||
if dandelion_relay.is_empty() {
|
||||
if let Some((last_added, _)) = dandelion_relay {
|
||||
let dandelion_interval = Utc::now().timestamp() - last_added;
|
||||
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
|
||||
debug!("monitor_peers: updating expired dandelion relay");
|
||||
peers.update_dandelion_relay();
|
||||
}
|
||||
} else {
|
||||
debug!("monitor_peers: no dandelion relay updating");
|
||||
peers.update_dandelion_relay();
|
||||
} else {
|
||||
for last_added in dandelion_relay.keys() {
|
||||
let dandelion_interval = Utc::now().timestamp() - last_added;
|
||||
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
|
||||
debug!("monitor_peers: updating expired dandelion relay");
|
||||
peers.update_dandelion_relay();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -267,9 +263,9 @@ fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionCon
|
|||
// otherwise use the seeds provided.
|
||||
fn connect_to_seeds_and_preferred_peers(
|
||||
peers: Arc<p2p::Peers>,
|
||||
tx: mpsc::Sender<SocketAddr>,
|
||||
seed_list: Box<dyn Fn() -> Vec<SocketAddr>>,
|
||||
peers_preferred_list: Option<Vec<SocketAddr>>,
|
||||
tx: mpsc::Sender<PeerAddr>,
|
||||
seed_list: Box<dyn Fn() -> Vec<PeerAddr>>,
|
||||
peers_preferred_list: Option<Vec<PeerAddr>>,
|
||||
) {
|
||||
// check if we have some peers in db
|
||||
// look for peers that are able to give us other peers (via PEER_LIST capability)
|
||||
|
@ -305,14 +301,14 @@ fn listen_for_addrs(
|
|||
peers: Arc<p2p::Peers>,
|
||||
p2p: Arc<p2p::Server>,
|
||||
capab: p2p::Capabilities,
|
||||
rx: &mpsc::Receiver<SocketAddr>,
|
||||
connecting_history: &mut HashMap<SocketAddr, DateTime<Utc>>,
|
||||
rx: &mpsc::Receiver<PeerAddr>,
|
||||
connecting_history: &mut HashMap<PeerAddr, DateTime<Utc>>,
|
||||
) {
|
||||
// Pull everything currently on the queue off the queue.
|
||||
// Does not block so addrs may be empty.
|
||||
// We will take(max_peers) from this later but we want to drain the rx queue
|
||||
// here to prevent it backing up.
|
||||
let addrs: Vec<SocketAddr> = rx.try_iter().collect();
|
||||
let addrs: Vec<PeerAddr> = rx.try_iter().collect();
|
||||
|
||||
// If we have a healthy number of outbound peers then we are done here.
|
||||
if peers.healthy_peers_mix() {
|
||||
|
@ -344,7 +340,7 @@ fn listen_for_addrs(
|
|||
let p2p_c = p2p.clone();
|
||||
let _ = thread::Builder::new()
|
||||
.name("peer_connect".to_string())
|
||||
.spawn(move || match p2p_c.connect(&addr) {
|
||||
.spawn(move || match p2p_c.connect(addr) {
|
||||
Ok(p) => {
|
||||
let _ = p.send_peer_request(capab);
|
||||
let _ = peers_c.update_state(addr, p2p::State::Healthy);
|
||||
|
@ -370,9 +366,9 @@ fn listen_for_addrs(
|
|||
}
|
||||
}
|
||||
|
||||
pub fn dns_seeds() -> Box<dyn Fn() -> Vec<SocketAddr> + Send> {
|
||||
pub fn dns_seeds() -> Box<dyn Fn() -> Vec<PeerAddr> + Send> {
|
||||
Box::new(|| {
|
||||
let mut addresses: Vec<SocketAddr> = vec![];
|
||||
let mut addresses: Vec<PeerAddr> = vec![];
|
||||
let net_seeds = if global::is_floonet() {
|
||||
FLOONET_DNS_SEEDS
|
||||
} else {
|
||||
|
@ -386,7 +382,7 @@ pub fn dns_seeds() -> Box<dyn Fn() -> Vec<SocketAddr> + Send> {
|
|||
&mut (addrs
|
||||
.map(|mut addr| {
|
||||
addr.set_port(if global::is_floonet() { 13414 } else { 3414 });
|
||||
addr
|
||||
PeerAddr(addr)
|
||||
})
|
||||
.filter(|addr| !temp_addresses.contains(addr))
|
||||
.collect()),
|
||||
|
@ -401,26 +397,6 @@ pub fn dns_seeds() -> Box<dyn Fn() -> Vec<SocketAddr> + Send> {
|
|||
|
||||
/// Convenience function when the seed list is immediately known. Mostly used
|
||||
/// for tests.
|
||||
pub fn predefined_seeds(addrs_str: Vec<String>) -> Box<dyn Fn() -> Vec<SocketAddr> + Send> {
|
||||
Box::new(move || {
|
||||
addrs_str
|
||||
.iter()
|
||||
.map(|s| s.parse().unwrap())
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
}
|
||||
|
||||
/// Convenience function when the seed list is immediately known. Mostly used
|
||||
/// for tests.
|
||||
pub fn preferred_peers(addrs_str: Vec<String>) -> Option<Vec<SocketAddr>> {
|
||||
if addrs_str.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
addrs_str
|
||||
.iter()
|
||||
.map(|s| s.parse().unwrap())
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
}
|
||||
pub fn predefined_seeds(addrs: Vec<PeerAddr>) -> Box<dyn Fn() -> Vec<PeerAddr> + Send> {
|
||||
Box::new(move || addrs.clone())
|
||||
}
|
||||
|
|
|
@ -16,10 +16,15 @@
|
|||
//! the peer-to-peer server, the blockchain and the transaction pool) and acts
|
||||
//! as a facade.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::{thread, time};
|
||||
|
||||
use fs2::FileExt;
|
||||
|
||||
use crate::api;
|
||||
use crate::api::TLSConfig;
|
||||
use crate::chain;
|
||||
|
@ -35,6 +40,7 @@ use crate::grin::{dandelion_monitor, seed, sync};
|
|||
use crate::mining::stratumserver;
|
||||
use crate::mining::test_miner::Miner;
|
||||
use crate::p2p;
|
||||
use crate::p2p::types::PeerAddr;
|
||||
use crate::pool;
|
||||
use crate::store;
|
||||
use crate::util::file::get_first_line;
|
||||
|
@ -59,6 +65,8 @@ pub struct Server {
|
|||
state_info: ServerStateInfo,
|
||||
/// Stop flag
|
||||
pub stop_state: Arc<Mutex<StopState>>,
|
||||
/// Maintain a lock_file so we do not run multiple Grin nodes from same dir.
|
||||
lock_file: Arc<File>,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
|
@ -102,8 +110,36 @@ impl Server {
|
|||
}
|
||||
}
|
||||
|
||||
// Exclusive (advisory) lock_file to ensure we do not run multiple
|
||||
// instance of grin server from the same dir.
|
||||
// This uses fs2 and should be safe cross-platform unless somebody abuses the file itself.
|
||||
fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> {
|
||||
let path = Path::new(&config.db_root);
|
||||
fs::create_dir_all(path.clone())?;
|
||||
let path = path.join("grin.lock");
|
||||
let lock_file = fs::OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(&path)?;
|
||||
lock_file.try_lock_exclusive().map_err(|e| {
|
||||
let mut stderr = std::io::stderr();
|
||||
writeln!(
|
||||
&mut stderr,
|
||||
"Failed to lock {:?} (grin server already running?)",
|
||||
path
|
||||
)
|
||||
.expect("Could not write to stderr");
|
||||
e
|
||||
})?;
|
||||
Ok(Arc::new(lock_file))
|
||||
}
|
||||
|
||||
/// Instantiates a new server associated with the provided future reactor.
|
||||
pub fn new(mut config: ServerConfig) -> Result<Server, Error> {
|
||||
pub fn new(config: ServerConfig) -> Result<Server, Error> {
|
||||
// Obtain our lock_file or fail immediately with an error.
|
||||
let lock_file = Server::one_grin_at_a_time(&config)?;
|
||||
|
||||
// Defaults to None (optional) in config file.
|
||||
// This translates to false here.
|
||||
let archive_mode = match config.archive_mode {
|
||||
|
@ -178,30 +214,25 @@ impl Server {
|
|||
pool_net_adapter.init(p2p_server.peers.clone());
|
||||
net_adapter.init(p2p_server.peers.clone());
|
||||
|
||||
if config.p2p_config.seeding_type.clone() != p2p::Seeding::Programmatic {
|
||||
let seeder = match config.p2p_config.seeding_type.clone() {
|
||||
if config.p2p_config.seeding_type != p2p::Seeding::Programmatic {
|
||||
let seeder = match config.p2p_config.seeding_type {
|
||||
p2p::Seeding::None => {
|
||||
warn!("No seed configured, will stay solo until connected to");
|
||||
seed::predefined_seeds(vec![])
|
||||
}
|
||||
p2p::Seeding::List => {
|
||||
seed::predefined_seeds(config.p2p_config.seeds.as_mut().unwrap().clone())
|
||||
seed::predefined_seeds(config.p2p_config.seeds.clone().unwrap())
|
||||
}
|
||||
p2p::Seeding::DNSSeed => seed::dns_seeds(),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let peers_preferred = match config.p2p_config.peers_preferred.clone() {
|
||||
Some(peers_preferred) => seed::preferred_peers(peers_preferred),
|
||||
None => None,
|
||||
};
|
||||
|
||||
seed::connect_and_monitor(
|
||||
p2p_server.clone(),
|
||||
config.p2p_config.capabilities,
|
||||
config.dandelion_config.clone(),
|
||||
seeder,
|
||||
peers_preferred,
|
||||
config.p2p_config.peers_preferred.clone(),
|
||||
stop_state.clone(),
|
||||
);
|
||||
}
|
||||
|
@ -269,12 +300,13 @@ impl Server {
|
|||
..Default::default()
|
||||
},
|
||||
stop_state,
|
||||
lock_file,
|
||||
})
|
||||
}
|
||||
|
||||
/// Asks the server to connect to a peer at the provided network address.
|
||||
pub fn connect_peer(&self, addr: SocketAddr) -> Result<(), Error> {
|
||||
self.p2p.connect(&addr)?;
|
||||
pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> {
|
||||
self.p2p.connect(addr)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -456,6 +488,7 @@ impl Server {
|
|||
pub fn stop(&self) {
|
||||
self.p2p.stop();
|
||||
self.stop_state.lock().stop();
|
||||
let _ = self.lock_file.unlock();
|
||||
}
|
||||
|
||||
/// Pause the p2p server.
|
||||
|
|
|
@ -148,7 +148,7 @@ impl HeaderSync {
|
|||
&& highest_height == peer.info.height()
|
||||
{
|
||||
self.peers
|
||||
.ban_peer(&peer.info.addr, ReasonForBan::FraudHeight);
|
||||
.ban_peer(peer.info.addr, ReasonForBan::FraudHeight);
|
||||
info!(
|
||||
"sync: ban a fraud peer: {}, claimed height: {}, total difficulty: {}",
|
||||
peer.info.addr,
|
||||
|
|
|
@ -35,6 +35,7 @@ use std::{cmp, thread};
|
|||
use crate::chain;
|
||||
use crate::common::stats::{StratumStats, WorkerStats};
|
||||
use crate::common::types::{StratumServerConfig, SyncState};
|
||||
use crate::core::core::hash::Hashed;
|
||||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
use crate::core::core::Block;
|
||||
use crate::core::{pow, ser};
|
||||
|
|
|
@ -24,7 +24,7 @@ use ctrlc;
|
|||
|
||||
use crate::config::GlobalConfig;
|
||||
use crate::core::global;
|
||||
use crate::p2p::Seeding;
|
||||
use crate::p2p::{PeerAddr, Seeding};
|
||||
use crate::servers;
|
||||
use crate::tui::ui;
|
||||
|
||||
|
@ -116,45 +116,15 @@ pub fn server_command(
|
|||
}
|
||||
|
||||
if let Some(seeds) = a.values_of("seed") {
|
||||
let seed_addrs = seeds
|
||||
.filter_map(|x| x.parse().ok())
|
||||
.map(|x| PeerAddr(x))
|
||||
.collect();
|
||||
server_config.p2p_config.seeding_type = Seeding::List;
|
||||
server_config.p2p_config.seeds = Some(seeds.map(|s| s.to_string()).collect());
|
||||
server_config.p2p_config.seeds = Some(seed_addrs);
|
||||
}
|
||||
}
|
||||
|
||||
/*if let Some(true) = server_config.run_wallet_listener {
|
||||
let mut wallet_config = global_config.members.as_ref().unwrap().wallet.clone();
|
||||
wallet::init_wallet_seed(wallet_config.clone());
|
||||
let wallet = wallet::instantiate_wallet(wallet_config.clone(), "");
|
||||
|
||||
let _ = thread::Builder::new()
|
||||
.name("wallet_listener".to_string())
|
||||
.spawn(move || {
|
||||
controller::foreign_listener(wallet, &wallet_config.api_listen_addr())
|
||||
.unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Error creating wallet listener: {:?} Config: {:?}",
|
||||
e, wallet_config
|
||||
)
|
||||
});
|
||||
});
|
||||
}
|
||||
if let Some(true) = server_config.run_wallet_owner_api {
|
||||
let mut wallet_config = global_config.members.unwrap().wallet;
|
||||
let wallet = wallet::instantiate_wallet(wallet_config.clone(), "");
|
||||
wallet::init_wallet_seed(wallet_config.clone());
|
||||
|
||||
let _ = thread::Builder::new()
|
||||
.name("wallet_owner_listener".to_string())
|
||||
.spawn(move || {
|
||||
controller::owner_listener(wallet, "127.0.0.1:13420").unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Error creating wallet api listener: {:?} Config: {:?}",
|
||||
e, wallet_config
|
||||
)
|
||||
});
|
||||
});
|
||||
}*/
|
||||
|
||||
if let Some(a) = server_args {
|
||||
match a.subcommand() {
|
||||
("run", _) => {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
name: grin
|
||||
version: "1.0.1"
|
||||
version: "1.0.2"
|
||||
about: Lightweight implementation of the MimbleWimble protocol.
|
||||
author: The Grin Team
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ use std::fs;
|
|||
use chrono::prelude::Utc;
|
||||
use croaring::Bitmap;
|
||||
|
||||
use crate::core::core::hash::DefaultHashable;
|
||||
use crate::core::core::pmmr::{Backend, PMMR};
|
||||
use crate::core::ser::{
|
||||
Error, FixedLength, PMMRIndexHashable, PMMRable, Readable, Reader, Writeable, Writer,
|
||||
|
@ -903,6 +904,8 @@ fn load(pos: u64, elems: &[TestElem], backend: &mut store::pmmr::PMMRBackend<Tes
|
|||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
struct TestElem(u32);
|
||||
|
||||
impl DefaultHashable for TestElem {}
|
||||
|
||||
impl FixedLength for TestElem {
|
||||
const LEN: usize = 4;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
//! Macros to support Rust BIP-32 code (though could conceivably be used for other things)
|
||||
|
||||
/// gives a newtype array wrapper standard array traits
|
||||
#[macro_export]
|
||||
macro_rules! impl_array_newtype {
|
||||
($thing:ident, $ty:ty, $len:expr) => {
|
||||
|
@ -86,16 +87,6 @@ macro_rules! impl_array_newtype {
|
|||
}
|
||||
}
|
||||
|
||||
impl ::std::ops::Index<usize> for $thing {
|
||||
type Output = $ty;
|
||||
|
||||
#[inline]
|
||||
fn index(&self, index: usize) -> &$ty {
|
||||
let &$thing(ref dat) = self;
|
||||
&dat[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl_index_newtype!($thing, $ty);
|
||||
|
||||
impl PartialEq for $thing {
|
||||
|
@ -164,6 +155,7 @@ macro_rules! impl_array_newtype {
|
|||
};
|
||||
}
|
||||
|
||||
/// gives a newtype array wrapper serialization and deserialization methods
|
||||
#[macro_export]
|
||||
macro_rules! impl_array_newtype_encodable {
|
||||
($thing:ident, $ty:ty, $len:expr) => {
|
||||
|
@ -193,7 +185,7 @@ macro_rules! impl_array_newtype_encodable {
|
|||
*item = match seq.next_element()? {
|
||||
Some(c) => c,
|
||||
None => {
|
||||
return Err($crate::serde::de::Error::custom("end of stream"))
|
||||
return Err($crate::serde::de::Error::custom("end of stream"));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -218,6 +210,7 @@ macro_rules! impl_array_newtype_encodable {
|
|||
};
|
||||
}
|
||||
|
||||
/// gives a newtype array wrapper the Debug trait
|
||||
#[macro_export]
|
||||
macro_rules! impl_array_newtype_show {
|
||||
($thing:ident) => {
|
||||
|
@ -229,9 +222,19 @@ macro_rules! impl_array_newtype_show {
|
|||
};
|
||||
}
|
||||
|
||||
/// gives a newtype array wrapper Index traits
|
||||
#[macro_export]
|
||||
macro_rules! impl_index_newtype {
|
||||
($thing:ident, $ty:ty) => {
|
||||
impl ::std::ops::Index<usize> for $thing {
|
||||
type Output = $ty;
|
||||
|
||||
#[inline]
|
||||
fn index(&self, index: usize) -> &$ty {
|
||||
let &$thing(ref dat) = self;
|
||||
&dat[index]
|
||||
}
|
||||
}
|
||||
impl ::std::ops::Index<::std::ops::Range<usize>> for $thing {
|
||||
type Output = [$ty];
|
||||
|
||||
|
|
|
@ -62,15 +62,22 @@ pub fn compress(src_dir: &Path, dst_file: &File) -> ZipResult<()> {
|
|||
}
|
||||
|
||||
/// Decompress a source file into the provided destination path.
|
||||
pub fn decompress<R>(src_file: R, dest: &Path) -> ZipResult<()>
|
||||
pub fn decompress<R, F>(src_file: R, dest: &Path, expected: F) -> ZipResult<usize>
|
||||
where
|
||||
R: io::Read + io::Seek,
|
||||
F: Fn(&Path) -> bool,
|
||||
{
|
||||
let mut decompressed = 0;
|
||||
let mut archive = zip_rs::ZipArchive::new(src_file)?;
|
||||
|
||||
for i in 0..archive.len() {
|
||||
let mut file = archive.by_index(i)?;
|
||||
let file_path = dest.join(file.name());
|
||||
let san_name = file.sanitized_name();
|
||||
if san_name.to_str().unwrap_or("") != file.name() || !expected(&san_name) {
|
||||
info!("ignoring a suspicious file: {}", file.name());
|
||||
continue;
|
||||
}
|
||||
let file_path = dest.join(san_name);
|
||||
|
||||
if (&*file.name()).ends_with('/') {
|
||||
fs::create_dir_all(&file_path)?;
|
||||
|
@ -80,7 +87,6 @@ where
|
|||
fs::create_dir_all(&p)?;
|
||||
}
|
||||
}
|
||||
//let mut outfile = fs::File::create(&file_path)?;
|
||||
let res = fs::File::create(&file_path);
|
||||
let mut outfile = match res {
|
||||
Err(e) => {
|
||||
|
@ -90,6 +96,7 @@ where
|
|||
Ok(r) => r,
|
||||
};
|
||||
io::copy(&mut file, &mut outfile)?;
|
||||
decompressed += 1;
|
||||
}
|
||||
|
||||
// Get and Set permissions
|
||||
|
@ -104,5 +111,5 @@ where
|
|||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
Ok(decompressed)
|
||||
}
|
||||
|
|
BIN
util/tests/test.zip
Normal file
BIN
util/tests/test.zip
Normal file
Binary file not shown.
|
@ -38,7 +38,7 @@ fn zip_unzip() {
|
|||
|
||||
fs::create_dir_all(root.join("./dezipped")).unwrap();
|
||||
let zip_file = File::open(zip_name).unwrap();
|
||||
zip::decompress(zip_file, &root.join("./dezipped")).unwrap();
|
||||
zip::decompress(zip_file, &root.join("./dezipped"), |_| true).unwrap();
|
||||
|
||||
assert!(root.join("to_zip/foo.txt").is_file());
|
||||
assert!(root.join("to_zip/bar.txt").is_file());
|
||||
|
@ -46,6 +46,14 @@ fn zip_unzip() {
|
|||
let lorem = root.join("to_zip/sub/lorem");
|
||||
assert!(lorem.is_file());
|
||||
assert!(lorem.metadata().unwrap().len() == 55);
|
||||
|
||||
let decompressed = zip::decompress(
|
||||
File::open("tests/test.zip").unwrap(),
|
||||
&root.join("./dezipped"),
|
||||
|_| true,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(decompressed, 1);
|
||||
}
|
||||
|
||||
fn write_files(dir_name: String, root: &Path) -> io::Result<()> {
|
||||
|
|
Loading…
Reference in a new issue