2018-03-05 22:33:44 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2017-12-12 19:40:26 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::util::RwLock;
|
2017-12-12 19:40:26 +03:00
|
|
|
use std::collections::HashMap;
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::fs::File;
|
2019-05-14 19:17:38 +03:00
|
|
|
use std::io::Read;
|
2019-04-23 02:54:36 +03:00
|
|
|
use std::path::PathBuf;
|
2018-10-20 03:13:07 +03:00
|
|
|
use std::sync::Arc;
|
2017-12-12 19:40:26 +03:00
|
|
|
|
2019-04-30 22:25:19 +03:00
|
|
|
use rand::seq::SliceRandom;
|
|
|
|
use rand::thread_rng;
|
2017-12-12 19:40:26 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
use crate::chain;
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::core::core;
|
|
|
|
use crate::core::core::hash::{Hash, Hashed};
|
|
|
|
use crate::core::global;
|
|
|
|
use crate::core::pow::Difficulty;
|
|
|
|
use crate::peer::Peer;
|
|
|
|
use crate::store::{PeerData, PeerStore, State};
|
|
|
|
use crate::types::{
|
2019-04-18 16:11:06 +03:00
|
|
|
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerAddr, PeerInfo, ReasonForBan,
|
2018-08-01 12:44:07 +03:00
|
|
|
TxHashSetRead, MAX_PEER_ADDRS,
|
|
|
|
};
|
2019-05-15 18:51:35 +03:00
|
|
|
use chrono::prelude::*;
|
|
|
|
use chrono::Duration;
|
|
|
|
|
|
|
|
const LOCK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(2);
|
2017-12-12 19:40:26 +03:00
|
|
|
|
|
|
|
pub struct Peers {
|
2018-12-08 02:59:40 +03:00
|
|
|
pub adapter: Arc<dyn ChainAdapter>,
|
2018-02-13 03:38:52 +03:00
|
|
|
store: PeerStore,
|
2019-02-18 15:15:32 +03:00
|
|
|
peers: RwLock<HashMap<PeerAddr, Arc<Peer>>>,
|
2018-09-07 23:01:54 +03:00
|
|
|
config: P2PConfig,
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Peers {
|
2018-12-08 02:59:40 +03:00
|
|
|
pub fn new(store: PeerStore, adapter: Arc<dyn ChainAdapter>, config: P2PConfig) -> Peers {
|
2017-12-12 19:40:26 +03:00
|
|
|
Peers {
|
2018-01-31 00:44:13 +03:00
|
|
|
adapter,
|
2018-02-13 03:38:52 +03:00
|
|
|
store,
|
2018-09-07 23:01:54 +03:00
|
|
|
config,
|
2018-02-13 03:38:52 +03:00
|
|
|
peers: RwLock::new(HashMap::new()),
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adds the peer to our internal peer mapping. Note that the peer is still
|
|
|
|
/// returned so the server can run it.
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn add_connected(&self, peer: Arc<Peer>) -> Result<(), Error> {
|
2019-05-15 18:51:35 +03:00
|
|
|
let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("add_connected: failed to get peers lock");
|
|
|
|
return Err(Error::Timeout);
|
|
|
|
}
|
|
|
|
};
|
2019-02-18 15:15:32 +03:00
|
|
|
let peer_data = PeerData {
|
|
|
|
addr: peer.info.addr,
|
|
|
|
capabilities: peer.info.capabilities,
|
|
|
|
user_agent: peer.info.user_agent.clone(),
|
|
|
|
flags: State::Healthy,
|
|
|
|
last_banned: 0,
|
|
|
|
ban_reason: ReasonForBan::None,
|
|
|
|
last_connected: Utc::now().timestamp(),
|
|
|
|
};
|
|
|
|
debug!("Saving newly connected peer {}.", peer_data.addr);
|
2018-08-21 01:32:13 +03:00
|
|
|
self.save_peer(&peer_data)?;
|
2019-05-15 18:51:35 +03:00
|
|
|
peers.insert(peer_data.addr, peer.clone());
|
2017-12-12 19:40:26 +03:00
|
|
|
|
2018-10-02 17:17:29 +03:00
|
|
|
Ok(())
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2019-01-10 04:22:48 +03:00
|
|
|
/// Add a peer as banned to block future connections, usually due to failed
|
|
|
|
/// handshake
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn add_banned(&self, addr: PeerAddr, ban_reason: ReasonForBan) -> Result<(), Error> {
|
2019-01-10 04:22:48 +03:00
|
|
|
let peer_data = PeerData {
|
|
|
|
addr,
|
|
|
|
capabilities: Capabilities::UNKNOWN,
|
|
|
|
user_agent: "".to_string(),
|
|
|
|
flags: State::Banned,
|
|
|
|
last_banned: Utc::now().timestamp(),
|
|
|
|
ban_reason,
|
|
|
|
last_connected: Utc::now().timestamp(),
|
|
|
|
};
|
|
|
|
debug!("Banning peer {}.", addr);
|
|
|
|
self.save_peer(&peer_data)
|
|
|
|
}
|
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn is_known(&self, addr: PeerAddr) -> bool {
|
2019-05-15 18:51:35 +03:00
|
|
|
let peers = match self.peers.try_read_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("is_known: failed to get peers lock");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
peers.contains_key(&addr)
|
2019-01-07 09:41:41 +03:00
|
|
|
}
|
|
|
|
|
2017-12-14 23:33:22 +03:00
|
|
|
/// Get vec of peers we are currently connected to.
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn connected_peers(&self) -> Vec<Arc<Peer>> {
|
2019-05-15 18:51:35 +03:00
|
|
|
let peers = match self.peers.try_read_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("connected_peers: failed to get peers lock");
|
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let mut res = peers
|
2018-01-18 21:39:56 +03:00
|
|
|
.values()
|
2018-10-09 10:27:34 +03:00
|
|
|
.filter(|p| p.is_connected())
|
2018-01-18 21:39:56 +03:00
|
|
|
.cloned()
|
|
|
|
.collect::<Vec<_>>();
|
2019-04-30 22:25:19 +03:00
|
|
|
res.shuffle(&mut thread_rng());
|
2017-12-14 23:33:22 +03:00
|
|
|
res
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2019-08-21 21:58:43 +03:00
|
|
|
/// Get vec of peers we currently have an outgoing connection with.
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn outgoing_connected_peers(&self) -> Vec<Arc<Peer>> {
|
2019-04-18 16:11:06 +03:00
|
|
|
self.connected_peers()
|
2018-08-21 01:32:13 +03:00
|
|
|
.into_iter()
|
2019-04-18 16:11:06 +03:00
|
|
|
.filter(|x| x.info.is_outbound())
|
|
|
|
.collect()
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
|
|
|
|
2019-08-21 21:58:43 +03:00
|
|
|
/// Get vec of peers we currently have an incoming connection with.
|
|
|
|
pub fn incoming_connected_peers(&self) -> Vec<Arc<Peer>> {
|
|
|
|
self.connected_peers()
|
|
|
|
.into_iter()
|
|
|
|
.filter(|x| x.info.is_inbound())
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
/// Get a peer we're connected to by address.
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn get_connected_peer(&self, addr: PeerAddr) -> Option<Arc<Peer>> {
|
2019-05-15 18:51:35 +03:00
|
|
|
let peers = match self.peers.try_read_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("get_connected_peer: failed to get peers lock");
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
peers.get(&addr).map(|p| p.clone())
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2019-01-19 19:13:09 +03:00
|
|
|
/// Number of peers currently connected to.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn peer_count(&self) -> u32 {
|
2019-04-18 16:11:06 +03:00
|
|
|
self.connected_peers().len() as u32
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2019-01-19 19:13:09 +03:00
|
|
|
/// Number of outbound peers currently connected to.
|
|
|
|
pub fn peer_outbound_count(&self) -> u32 {
|
2019-04-18 16:11:06 +03:00
|
|
|
self.outgoing_connected_peers().len() as u32
|
2019-01-19 19:13:09 +03:00
|
|
|
}
|
|
|
|
|
2019-08-21 21:58:43 +03:00
|
|
|
/// Number of inbound peers currently connected to.
|
|
|
|
pub fn peer_inbound_count(&self) -> u32 {
|
|
|
|
self.incoming_connected_peers().len() as u32
|
|
|
|
}
|
|
|
|
|
2018-01-18 21:39:56 +03:00
|
|
|
// Return vec of connected peers that currently advertise more work
|
|
|
|
// (total_difficulty) than we do.
|
2019-04-08 23:13:28 +03:00
|
|
|
pub fn more_work_peers(&self) -> Result<Vec<Arc<Peer>>, chain::Error> {
|
2017-12-29 03:49:27 +03:00
|
|
|
let peers = self.connected_peers();
|
|
|
|
if peers.len() == 0 {
|
2019-04-08 23:13:28 +03:00
|
|
|
return Ok(vec![]);
|
2017-12-29 03:49:27 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
let total_difficulty = self.total_difficulty()?;
|
2017-12-29 03:49:27 +03:00
|
|
|
|
|
|
|
let mut max_peers = peers
|
2018-08-21 01:32:13 +03:00
|
|
|
.into_iter()
|
2018-10-09 10:27:34 +03:00
|
|
|
.filter(|x| x.info.total_difficulty() > total_difficulty)
|
|
|
|
.collect::<Vec<_>>();
|
2017-12-29 03:49:27 +03:00
|
|
|
|
2019-04-30 22:25:19 +03:00
|
|
|
max_peers.shuffle(&mut thread_rng());
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(max_peers)
|
2017-12-29 03:49:27 +03:00
|
|
|
}
|
|
|
|
|
2019-01-12 02:38:27 +03:00
|
|
|
// Return number of connected peers that currently advertise more/same work
|
|
|
|
// (total_difficulty) than/as we do.
|
2019-04-08 23:13:28 +03:00
|
|
|
pub fn more_or_same_work_peers(&self) -> Result<usize, chain::Error> {
|
2019-01-12 02:38:27 +03:00
|
|
|
let peers = self.connected_peers();
|
|
|
|
if peers.len() == 0 {
|
2019-04-08 23:13:28 +03:00
|
|
|
return Ok(0);
|
2019-01-12 02:38:27 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
let total_difficulty = self.total_difficulty()?;
|
2019-01-12 02:38:27 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(peers
|
2019-01-12 02:38:27 +03:00
|
|
|
.iter()
|
|
|
|
.filter(|x| x.info.total_difficulty() >= total_difficulty)
|
2019-04-08 23:13:28 +03:00
|
|
|
.count())
|
2019-01-12 02:38:27 +03:00
|
|
|
}
|
|
|
|
|
2017-12-29 03:49:27 +03:00
|
|
|
/// Returns single random peer with more work than us.
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn more_work_peer(&self) -> Option<Arc<Peer>> {
|
2019-04-08 23:13:28 +03:00
|
|
|
match self.more_work_peers() {
|
|
|
|
Ok(mut peers) => peers.pop(),
|
|
|
|
Err(e) => {
|
|
|
|
error!("failed to get more work peers: {:?}", e);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
2017-12-29 03:49:27 +03:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Return vec of connected peers that currently have the most worked
|
|
|
|
/// branch, showing the highest total difficulty.
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn most_work_peers(&self) -> Vec<Arc<Peer>> {
|
2017-12-12 19:40:26 +03:00
|
|
|
let peers = self.connected_peers();
|
|
|
|
if peers.len() == 0 {
|
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
|
[1.1.0] Merge master into 1.1.0 (#2720)
* cleanup legacy "3 dot" check (#2625)
* Allow to peers behind NAT to get up to preferred_max connections (#2543)
Allow to peers behind NAT to get up to preffered_max connections
If peer has only outbound connections it's mot likely behind NAT and we should not stop it from getting more outbound connections
* Reduce usage of unwrap in p2p crate (#2627)
Also change store crate a bit
* Simplify (and fix) output_pos cleanup during chain compaction (#2609)
* expose leaf pos iterator
use it for various things in txhashset when iterating over outputs
* fix
* cleanup
* rebuild output_pos index (and clear it out first) when compacting the chain
* fixup tests
* refactor to match on (output, proof) tuple
* add comments to compact() to explain what is going on.
* get rid of some boxing around the leaf_set iterator
* cleanup
* [docs] Add switch commitment documentation (#2526)
* remove references to no-longer existing switch commitment hash
(as switch commitments were removed in ca8447f3bd49e80578770da841e5fbbac2c23cde
and moved into the blinding factor of the Pedersen Commitment)
* some rewording (points vs curves) and fix of small formatting issues
* Add switch commitment documentation
* [docs] Documents in grin repo had translated in Korean. (#2604)
* Start to M/W intro translate in Korean
* translate in Korean
* add korean translation on intro
* table_of_content.md translate in Korean.
* table_of_content_KR.md finish translate in Korean, start to translate State_KR.md
* add state_KR.md & commit some translation in State_KR.md
* WIP stat_KR.md translation
* add build_KR.md && stratum_KR.md
* finish translate stratum_KR.md & table_of_content_KR.md
* rename intro.KR.md to intro_KR.md
* add intro_KR.md file path each language's intro.md
* add Korean translation file path to stratum.md & table_of_contents.md
* fix difference with grin/master
* Fix TxHashSet file filter for Windows. (#2641)
* Fix TxHashSet file filter for Windows.
* rustfmt
* Updating regexp
* Adding in test case
* Display the current download rate rather than the average when syncing the chain (#2633)
* When syncing the chain, calculate the displayed download speed using the current rate from the most recent iteration, rather than the average download speed from the entire syncing process.
* Replace the explicitly ignored variables in the pattern with an implicit ignore
* remove root = true from editorconfig (#2655)
* Add Medium post to intro (#2654)
Spoke to @yeastplume who agreed it makes sense to add the "Grin Transactions Explained, Step-by-Step" Medium post to intro.md
Open for suggestions on a better location.
* add a new configure item for log_max_files (#2601)
* add a new configure item for log_max_files
* rustfmt
* use a constant instead of multiple 32
* rustfmt
* Fix the build warning of deprecated trim_right_matches (#2662)
* [DOC] state.md, build.md and chain directory documents translate in Korean. (#2649)
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* remove some useless md files for translation. this is rearrange set up translation order.
* add dot end of sentence & translate build.md in korean
* remove fast-sync_KR.md
* finish build_KR.md translation
* finish build_KR.md translation
* finish translation state_KR.md & add phrase in state.md to move other language md file
* translate blocks_and_headers.md && chain_sync.md in Korean
* add . in chain_sync.md , translation finished in doc/chain dir.
* fix some miss typos
* Api documentation fixes (#2646)
* Fix the API documentation for Chain Validate (v1/chain/validate). It was documented as a POST, but it is actually a GET request, which can be seen in its handler ChainValidationHandler
* Update the API V1 route list response to include the headers and merkleproof routes. Also clarify that for the chain/outputs route you must specify either byids or byheight to select outputs.
* refactor(ci): reorganize CI related code (#2658)
Break-down the CI related code into smaller more maintainable pieces.
* Specify grin or nanogrins in API docs where applicable (#2642)
* Set Content-Type in API client (#2680)
* Reduce number of unwraps in chain crate (#2679)
* fix: the restart of state sync doesn't work sometimes (#2687)
* let check_txhashset_needed return true on abnormal case (#2684)
* Reduce number of unwwaps in api crate (#2681)
* Reduce number of unwwaps in api crate
* Format use section
* Small QoL improvements for wallet developers (#2651)
* Small changes for wallet devs
* Move create_nonce into Keychain trait
* Replace match by map_err
* Add flag to Slate to skip fee check
* Fix secp dependency
* Remove check_fee flag in Slate
* Add Japanese edition of build.md (#2697)
* catch the panic to avoid peer thread quit early (#2686)
* catch the panic to avoid peer thread quit before taking the chance to ban
* move catch wrapper logic down into the util crate
* log the panic info
* keep txhashset.rs untouched
* remove a warning
* [DOC] dandelion.md, simulation.md ,fast-sync.md and pruning.md documents translate in Korean. (#2678)
* Show response code in API client error message (#2683)
It's hard to investigate what happens when an API client error is
printed out
* Add some better logging for get_outputs_by_id failure states (#2705)
* Switch commitment doc fixes (#2645)
Fix some typos and remove the use of parentheses in a
couple of places to make the reading flow a bit better.
* docs: update/add new README.md badges (#2708)
Replace existing badges with SVG counterparts and add a bunch of new ones.
* Update intro.md (#2702)
Add mention of censoring attack prevented by range proofs
* use sandbox folder for txhashset validation on state sync (#2685)
* use sandbox folder for txhashset validation on state sync
* rustfmt
* use temp directory as the sandbox instead actual db_root txhashset dir
* rustfmt
* move txhashset overwrite to the end of full validation
* fix travis-ci test
* rustfmt
* fix: hashset have 2 folders including txhashset and header
* rustfmt
*
(1)switch to rebuild_header_mmr instead of copy the sandbox header mmr
(2)lock txhashset when overwriting and opening and rebuild
* minor improve on sandbox_dir
* add Japanese edition of state.md (#2703)
* Attempt to fix broken TUI locale (#2713)
Can confirm that on the same machine 1.0.2 TUI looks great and is broken on
the current master. Bump of `cursive` version fixed it for me.
Fixes #2676
* clean the header folder in sandbox (#2716)
* forgot to clean the header folder in sandbox in #2685
* Reduce number of unwraps in servers crate (#2707)
It doesn't include stratum server which is sufficiently changed in 1.1
branch and adapters, which is big enough for a separate PR.
* rustfmt
* change version to beta
2019-04-01 13:47:48 +03:00
|
|
|
let max_total_difficulty = match peers.iter().map(|x| x.info.total_difficulty()).max() {
|
|
|
|
Some(v) => v,
|
|
|
|
None => return vec![],
|
|
|
|
};
|
2017-12-12 19:40:26 +03:00
|
|
|
|
|
|
|
let mut max_peers = peers
|
2018-08-21 01:32:13 +03:00
|
|
|
.into_iter()
|
2018-10-09 10:27:34 +03:00
|
|
|
.filter(|x| x.info.total_difficulty() == max_total_difficulty)
|
|
|
|
.collect::<Vec<_>>();
|
2017-12-12 19:40:26 +03:00
|
|
|
|
2019-04-30 22:25:19 +03:00
|
|
|
max_peers.shuffle(&mut thread_rng());
|
2017-12-12 19:40:26 +03:00
|
|
|
max_peers
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Returns single random peer with the most worked branch, showing the
|
|
|
|
/// highest total difficulty.
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn most_work_peer(&self) -> Option<Arc<Peer>> {
|
2018-08-21 01:32:13 +03:00
|
|
|
self.most_work_peers().pop()
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn is_banned(&self, peer_addr: PeerAddr) -> bool {
|
|
|
|
if let Ok(peer) = self.store.get_peer(peer_addr) {
|
2019-05-15 18:51:35 +03:00
|
|
|
return peer.flags == State::Banned;
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Ban a peer, disconnecting it if we're currently connected
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn ban_peer(&self, peer_addr: PeerAddr, ban_reason: ReasonForBan) {
|
|
|
|
if let Err(e) = self.update_state(peer_addr, State::Banned) {
|
2018-10-21 23:30:56 +03:00
|
|
|
error!("Couldn't ban {}: {:?}", peer_addr, e);
|
2019-05-15 18:51:35 +03:00
|
|
|
return;
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-01-03 04:03:44 +03:00
|
|
|
if let Some(peer) = self.get_connected_peer(peer_addr) {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("Banning peer {}", peer_addr);
|
2017-12-12 19:40:26 +03:00
|
|
|
// setting peer status will get it removed at the next clean_peer
|
[1.1.0] Merge master into 1.1.0 (#2720)
* cleanup legacy "3 dot" check (#2625)
* Allow to peers behind NAT to get up to preferred_max connections (#2543)
Allow to peers behind NAT to get up to preffered_max connections
If peer has only outbound connections it's mot likely behind NAT and we should not stop it from getting more outbound connections
* Reduce usage of unwrap in p2p crate (#2627)
Also change store crate a bit
* Simplify (and fix) output_pos cleanup during chain compaction (#2609)
* expose leaf pos iterator
use it for various things in txhashset when iterating over outputs
* fix
* cleanup
* rebuild output_pos index (and clear it out first) when compacting the chain
* fixup tests
* refactor to match on (output, proof) tuple
* add comments to compact() to explain what is going on.
* get rid of some boxing around the leaf_set iterator
* cleanup
* [docs] Add switch commitment documentation (#2526)
* remove references to no-longer existing switch commitment hash
(as switch commitments were removed in ca8447f3bd49e80578770da841e5fbbac2c23cde
and moved into the blinding factor of the Pedersen Commitment)
* some rewording (points vs curves) and fix of small formatting issues
* Add switch commitment documentation
* [docs] Documents in grin repo had translated in Korean. (#2604)
* Start to M/W intro translate in Korean
* translate in Korean
* add korean translation on intro
* table_of_content.md translate in Korean.
* table_of_content_KR.md finish translate in Korean, start to translate State_KR.md
* add state_KR.md & commit some translation in State_KR.md
* WIP stat_KR.md translation
* add build_KR.md && stratum_KR.md
* finish translate stratum_KR.md & table_of_content_KR.md
* rename intro.KR.md to intro_KR.md
* add intro_KR.md file path each language's intro.md
* add Korean translation file path to stratum.md & table_of_contents.md
* fix difference with grin/master
* Fix TxHashSet file filter for Windows. (#2641)
* Fix TxHashSet file filter for Windows.
* rustfmt
* Updating regexp
* Adding in test case
* Display the current download rate rather than the average when syncing the chain (#2633)
* When syncing the chain, calculate the displayed download speed using the current rate from the most recent iteration, rather than the average download speed from the entire syncing process.
* Replace the explicitly ignored variables in the pattern with an implicit ignore
* remove root = true from editorconfig (#2655)
* Add Medium post to intro (#2654)
Spoke to @yeastplume who agreed it makes sense to add the "Grin Transactions Explained, Step-by-Step" Medium post to intro.md
Open for suggestions on a better location.
* add a new configure item for log_max_files (#2601)
* add a new configure item for log_max_files
* rustfmt
* use a constant instead of multiple 32
* rustfmt
* Fix the build warning of deprecated trim_right_matches (#2662)
* [DOC] state.md, build.md and chain directory documents translate in Korean. (#2649)
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* remove some useless md files for translation. this is rearrange set up translation order.
* add dot end of sentence & translate build.md in korean
* remove fast-sync_KR.md
* finish build_KR.md translation
* finish build_KR.md translation
* finish translation state_KR.md & add phrase in state.md to move other language md file
* translate blocks_and_headers.md && chain_sync.md in Korean
* add . in chain_sync.md , translation finished in doc/chain dir.
* fix some miss typos
* Api documentation fixes (#2646)
* Fix the API documentation for Chain Validate (v1/chain/validate). It was documented as a POST, but it is actually a GET request, which can be seen in its handler ChainValidationHandler
* Update the API V1 route list response to include the headers and merkleproof routes. Also clarify that for the chain/outputs route you must specify either byids or byheight to select outputs.
* refactor(ci): reorganize CI related code (#2658)
Break-down the CI related code into smaller more maintainable pieces.
* Specify grin or nanogrins in API docs where applicable (#2642)
* Set Content-Type in API client (#2680)
* Reduce number of unwraps in chain crate (#2679)
* fix: the restart of state sync doesn't work sometimes (#2687)
* let check_txhashset_needed return true on abnormal case (#2684)
* Reduce number of unwwaps in api crate (#2681)
* Reduce number of unwwaps in api crate
* Format use section
* Small QoL improvements for wallet developers (#2651)
* Small changes for wallet devs
* Move create_nonce into Keychain trait
* Replace match by map_err
* Add flag to Slate to skip fee check
* Fix secp dependency
* Remove check_fee flag in Slate
* Add Japanese edition of build.md (#2697)
* catch the panic to avoid peer thread quit early (#2686)
* catch the panic to avoid peer thread quit before taking the chance to ban
* move catch wrapper logic down into the util crate
* log the panic info
* keep txhashset.rs untouched
* remove a warning
* [DOC] dandelion.md, simulation.md ,fast-sync.md and pruning.md documents translate in Korean. (#2678)
* Show response code in API client error message (#2683)
It's hard to investigate what happens when an API client error is
printed out
* Add some better logging for get_outputs_by_id failure states (#2705)
* Switch commitment doc fixes (#2645)
Fix some typos and remove the use of parentheses in a
couple of places to make the reading flow a bit better.
* docs: update/add new README.md badges (#2708)
Replace existing badges with SVG counterparts and add a bunch of new ones.
* Update intro.md (#2702)
Add mention of censoring attack prevented by range proofs
* use sandbox folder for txhashset validation on state sync (#2685)
* use sandbox folder for txhashset validation on state sync
* rustfmt
* use temp directory as the sandbox instead actual db_root txhashset dir
* rustfmt
* move txhashset overwrite to the end of full validation
* fix travis-ci test
* rustfmt
* fix: hashset have 2 folders including txhashset and header
* rustfmt
*
(1)switch to rebuild_header_mmr instead of copy the sandbox header mmr
(2)lock txhashset when overwriting and opening and rebuild
* minor improve on sandbox_dir
* add Japanese edition of state.md (#2703)
* Attempt to fix broken TUI locale (#2713)
Can confirm that on the same machine 1.0.2 TUI looks great and is broken on
the current master. Bump of `cursive` version fixed it for me.
Fixes #2676
* clean the header folder in sandbox (#2716)
* forgot to clean the header folder in sandbox in #2685
* Reduce number of unwraps in servers crate (#2707)
It doesn't include stratum server which is sufficiently changed in 1.1
branch and adapters, which is big enough for a separate PR.
* rustfmt
* change version to beta
2019-04-01 13:47:48 +03:00
|
|
|
match peer.send_ban_reason(ban_reason) {
|
|
|
|
Err(e) => error!("failed to send a ban reason to{}: {:?}", peer_addr, e),
|
|
|
|
Ok(_) => debug!("ban reason {:?} was sent to {}", ban_reason, peer_addr),
|
|
|
|
};
|
2017-12-12 19:40:26 +03:00
|
|
|
peer.set_banned();
|
|
|
|
peer.stop();
|
2019-05-15 18:51:35 +03:00
|
|
|
|
|
|
|
let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("ban_peer: failed to get peers lock");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
peers.remove(&peer.info.addr);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-13 19:03:34 +03:00
|
|
|
/// Unban a peer, checks if it exists and banned then unban
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn unban_peer(&self, peer_addr: PeerAddr) {
|
2018-11-10 06:27:52 +03:00
|
|
|
debug!("unban_peer: peer {}", peer_addr);
|
2019-02-18 15:15:32 +03:00
|
|
|
match self.get_peer(peer_addr) {
|
2018-01-04 06:25:14 +03:00
|
|
|
Ok(_) => {
|
2019-02-18 15:15:32 +03:00
|
|
|
if self.is_banned(peer_addr) {
|
|
|
|
if let Err(e) = self.update_state(peer_addr, State::Healthy) {
|
2018-10-21 23:30:56 +03:00
|
|
|
error!("Couldn't unban {}: {:?}", peer_addr, e);
|
2018-01-04 06:25:14 +03:00
|
|
|
}
|
|
|
|
} else {
|
2018-10-21 23:30:56 +03:00
|
|
|
error!("Couldn't unban {}: peer is not banned", peer_addr);
|
2018-01-04 06:25:14 +03:00
|
|
|
}
|
2018-01-18 21:39:56 +03:00
|
|
|
}
|
2018-10-21 23:30:56 +03:00
|
|
|
Err(e) => error!("Couldn't unban {}: {:?}", peer_addr, e),
|
2018-01-04 06:25:14 +03:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2019-08-22 18:35:31 +03:00
|
|
|
fn broadcast<F>(&self, obj_name: &str, inner: F) -> u32
|
2018-08-21 01:32:13 +03:00
|
|
|
where
|
2018-10-09 10:27:34 +03:00
|
|
|
F: Fn(&Peer) -> Result<bool, Error>,
|
2018-08-21 01:32:13 +03:00
|
|
|
{
|
2017-12-12 19:40:26 +03:00
|
|
|
let mut count = 0;
|
2018-10-06 02:53:55 +03:00
|
|
|
|
2018-10-09 10:27:34 +03:00
|
|
|
for p in self.connected_peers().iter() {
|
|
|
|
match inner(&p) {
|
|
|
|
Ok(true) => count += 1,
|
|
|
|
Ok(false) => (),
|
2019-05-03 17:35:43 +03:00
|
|
|
Err(e) => {
|
|
|
|
debug!(
|
|
|
|
"Error sending {:?} to peer {:?}: {:?}",
|
|
|
|
obj_name, &p.info.addr, e
|
|
|
|
);
|
2019-05-15 18:51:35 +03:00
|
|
|
|
|
|
|
let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("broadcast: failed to get peers lock");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
};
|
2019-05-03 17:35:43 +03:00
|
|
|
p.stop();
|
2019-05-15 18:51:35 +03:00
|
|
|
peers.remove(&p.info.addr);
|
2019-05-03 17:35:43 +03:00
|
|
|
}
|
2018-10-06 02:53:55 +03:00
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-08-21 01:32:13 +03:00
|
|
|
count
|
|
|
|
}
|
|
|
|
|
2019-08-22 18:35:31 +03:00
|
|
|
/// Broadcast a compact block to all our connected peers.
|
|
|
|
/// This is only used when initially broadcasting a newly mined block.
|
2018-01-31 23:39:55 +03:00
|
|
|
pub fn broadcast_compact_block(&self, b: &core::CompactBlock) {
|
2019-08-22 18:35:31 +03:00
|
|
|
let count = self.broadcast("compact block", |p| p.send_compact_block(b));
|
2018-01-31 23:39:55 +03:00
|
|
|
debug!(
|
|
|
|
"broadcast_compact_block: {}, {} at {}, to {} peers, done.",
|
|
|
|
b.hash(),
|
2018-09-11 01:36:57 +03:00
|
|
|
b.header.pow.total_difficulty,
|
2018-01-31 23:39:55 +03:00
|
|
|
b.header.height,
|
|
|
|
count,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-08-22 18:35:31 +03:00
|
|
|
/// Broadcast a block header to all our connected peers.
|
2018-01-30 17:42:04 +03:00
|
|
|
/// A peer implementation may drop the broadcast request
|
2018-08-21 01:32:13 +03:00
|
|
|
/// if it knows the remote peer already has the header.
|
2018-01-30 17:42:04 +03:00
|
|
|
pub fn broadcast_header(&self, bh: &core::BlockHeader) {
|
2019-08-22 18:35:31 +03:00
|
|
|
let count = self.broadcast("header", |p| p.send_header(bh));
|
2018-10-06 02:53:55 +03:00
|
|
|
debug!(
|
2018-01-30 17:42:04 +03:00
|
|
|
"broadcast_header: {}, {} at {}, to {} peers, done.",
|
|
|
|
bh.hash(),
|
2018-09-11 01:36:57 +03:00
|
|
|
bh.pow.total_difficulty,
|
2018-01-30 17:42:04 +03:00
|
|
|
bh.height,
|
|
|
|
count,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-08-22 18:35:31 +03:00
|
|
|
/// Broadcasts the provided transaction to all our connected peers.
|
2017-12-14 23:33:22 +03:00
|
|
|
/// A peer implementation may drop the broadcast request
|
|
|
|
/// if it knows the remote peer already has the transaction.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn broadcast_transaction(&self, tx: &core::Transaction) {
|
2019-08-22 18:35:31 +03:00
|
|
|
let count = self.broadcast("transaction", |p| p.send_transaction(tx));
|
2018-11-07 12:28:17 +03:00
|
|
|
debug!(
|
|
|
|
"broadcast_transaction: {} to {} peers, done.",
|
2018-08-21 01:32:13 +03:00
|
|
|
tx.hash(),
|
|
|
|
count,
|
|
|
|
);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Ping all our connected peers. Always automatically expects a pong back
|
|
|
|
/// or disconnects. This acts as a liveness test.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn check_all(&self, total_difficulty: Difficulty, height: u64) {
|
2019-05-03 17:35:43 +03:00
|
|
|
for p in self.connected_peers().iter() {
|
|
|
|
if let Err(e) = p.send_ping(total_difficulty, height) {
|
|
|
|
debug!("Error pinging peer {:?}: {:?}", &p.info.addr, e);
|
2019-05-15 18:51:35 +03:00
|
|
|
let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("check_all: failed to get peers lock");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
};
|
2019-05-03 17:35:43 +03:00
|
|
|
p.stop();
|
2019-05-15 18:51:35 +03:00
|
|
|
peers.remove(&p.info.addr);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// All peer information we have in storage
|
|
|
|
pub fn all_peers(&self) -> Vec<PeerData> {
|
[1.1.0] Merge master into 1.1.0 (#2720)
* cleanup legacy "3 dot" check (#2625)
* Allow to peers behind NAT to get up to preferred_max connections (#2543)
Allow to peers behind NAT to get up to preffered_max connections
If peer has only outbound connections it's mot likely behind NAT and we should not stop it from getting more outbound connections
* Reduce usage of unwrap in p2p crate (#2627)
Also change store crate a bit
* Simplify (and fix) output_pos cleanup during chain compaction (#2609)
* expose leaf pos iterator
use it for various things in txhashset when iterating over outputs
* fix
* cleanup
* rebuild output_pos index (and clear it out first) when compacting the chain
* fixup tests
* refactor to match on (output, proof) tuple
* add comments to compact() to explain what is going on.
* get rid of some boxing around the leaf_set iterator
* cleanup
* [docs] Add switch commitment documentation (#2526)
* remove references to no-longer existing switch commitment hash
(as switch commitments were removed in ca8447f3bd49e80578770da841e5fbbac2c23cde
and moved into the blinding factor of the Pedersen Commitment)
* some rewording (points vs curves) and fix of small formatting issues
* Add switch commitment documentation
* [docs] Documents in grin repo had translated in Korean. (#2604)
* Start to M/W intro translate in Korean
* translate in Korean
* add korean translation on intro
* table_of_content.md translate in Korean.
* table_of_content_KR.md finish translate in Korean, start to translate State_KR.md
* add state_KR.md & commit some translation in State_KR.md
* WIP stat_KR.md translation
* add build_KR.md && stratum_KR.md
* finish translate stratum_KR.md & table_of_content_KR.md
* rename intro.KR.md to intro_KR.md
* add intro_KR.md file path each language's intro.md
* add Korean translation file path to stratum.md & table_of_contents.md
* fix difference with grin/master
* Fix TxHashSet file filter for Windows. (#2641)
* Fix TxHashSet file filter for Windows.
* rustfmt
* Updating regexp
* Adding in test case
* Display the current download rate rather than the average when syncing the chain (#2633)
* When syncing the chain, calculate the displayed download speed using the current rate from the most recent iteration, rather than the average download speed from the entire syncing process.
* Replace the explicitly ignored variables in the pattern with an implicit ignore
* remove root = true from editorconfig (#2655)
* Add Medium post to intro (#2654)
Spoke to @yeastplume who agreed it makes sense to add the "Grin Transactions Explained, Step-by-Step" Medium post to intro.md
Open for suggestions on a better location.
* add a new configure item for log_max_files (#2601)
* add a new configure item for log_max_files
* rustfmt
* use a constant instead of multiple 32
* rustfmt
* Fix the build warning of deprecated trim_right_matches (#2662)
* [DOC] state.md, build.md and chain directory documents translate in Korean. (#2649)
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* remove some useless md files for translation. this is rearrange set up translation order.
* add dot end of sentence & translate build.md in korean
* remove fast-sync_KR.md
* finish build_KR.md translation
* finish build_KR.md translation
* finish translation state_KR.md & add phrase in state.md to move other language md file
* translate blocks_and_headers.md && chain_sync.md in Korean
* add . in chain_sync.md , translation finished in doc/chain dir.
* fix some miss typos
* Api documentation fixes (#2646)
* Fix the API documentation for Chain Validate (v1/chain/validate). It was documented as a POST, but it is actually a GET request, which can be seen in its handler ChainValidationHandler
* Update the API V1 route list response to include the headers and merkleproof routes. Also clarify that for the chain/outputs route you must specify either byids or byheight to select outputs.
* refactor(ci): reorganize CI related code (#2658)
Break-down the CI related code into smaller more maintainable pieces.
* Specify grin or nanogrins in API docs where applicable (#2642)
* Set Content-Type in API client (#2680)
* Reduce number of unwraps in chain crate (#2679)
* fix: the restart of state sync doesn't work sometimes (#2687)
* let check_txhashset_needed return true on abnormal case (#2684)
* Reduce number of unwwaps in api crate (#2681)
* Reduce number of unwwaps in api crate
* Format use section
* Small QoL improvements for wallet developers (#2651)
* Small changes for wallet devs
* Move create_nonce into Keychain trait
* Replace match by map_err
* Add flag to Slate to skip fee check
* Fix secp dependency
* Remove check_fee flag in Slate
* Add Japanese edition of build.md (#2697)
* catch the panic to avoid peer thread quit early (#2686)
* catch the panic to avoid peer thread quit before taking the chance to ban
* move catch wrapper logic down into the util crate
* log the panic info
* keep txhashset.rs untouched
* remove a warning
* [DOC] dandelion.md, simulation.md ,fast-sync.md and pruning.md documents translate in Korean. (#2678)
* Show response code in API client error message (#2683)
It's hard to investigate what happens when an API client error is
printed out
* Add some better logging for get_outputs_by_id failure states (#2705)
* Switch commitment doc fixes (#2645)
Fix some typos and remove the use of parentheses in a
couple of places to make the reading flow a bit better.
* docs: update/add new README.md badges (#2708)
Replace existing badges with SVG counterparts and add a bunch of new ones.
* Update intro.md (#2702)
Add mention of censoring attack prevented by range proofs
* use sandbox folder for txhashset validation on state sync (#2685)
* use sandbox folder for txhashset validation on state sync
* rustfmt
* use temp directory as the sandbox instead actual db_root txhashset dir
* rustfmt
* move txhashset overwrite to the end of full validation
* fix travis-ci test
* rustfmt
* fix: hashset have 2 folders including txhashset and header
* rustfmt
*
(1)switch to rebuild_header_mmr instead of copy the sandbox header mmr
(2)lock txhashset when overwriting and opening and rebuild
* minor improve on sandbox_dir
* add Japanese edition of state.md (#2703)
* Attempt to fix broken TUI locale (#2713)
Can confirm that on the same machine 1.0.2 TUI looks great and is broken on
the current master. Bump of `cursive` version fixed it for me.
Fixes #2676
* clean the header folder in sandbox (#2716)
* forgot to clean the header folder in sandbox in #2685
* Reduce number of unwraps in servers crate (#2707)
It doesn't include stratum server which is sufficiently changed in 1.1
branch and adapters, which is big enough for a separate PR.
* rustfmt
* change version to beta
2019-04-01 13:47:48 +03:00
|
|
|
match self.store.all_peers() {
|
|
|
|
Ok(peers) => peers,
|
|
|
|
Err(e) => {
|
|
|
|
error!("all_peers failed: {:?}", e);
|
|
|
|
vec![]
|
|
|
|
}
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Find peers in store (not necessarily connected) and return their data
|
|
|
|
pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> {
|
[1.1.0] Merge master into 1.1.0 (#2720)
* cleanup legacy "3 dot" check (#2625)
* Allow to peers behind NAT to get up to preferred_max connections (#2543)
Allow to peers behind NAT to get up to preffered_max connections
If peer has only outbound connections it's mot likely behind NAT and we should not stop it from getting more outbound connections
* Reduce usage of unwrap in p2p crate (#2627)
Also change store crate a bit
* Simplify (and fix) output_pos cleanup during chain compaction (#2609)
* expose leaf pos iterator
use it for various things in txhashset when iterating over outputs
* fix
* cleanup
* rebuild output_pos index (and clear it out first) when compacting the chain
* fixup tests
* refactor to match on (output, proof) tuple
* add comments to compact() to explain what is going on.
* get rid of some boxing around the leaf_set iterator
* cleanup
* [docs] Add switch commitment documentation (#2526)
* remove references to no-longer existing switch commitment hash
(as switch commitments were removed in ca8447f3bd49e80578770da841e5fbbac2c23cde
and moved into the blinding factor of the Pedersen Commitment)
* some rewording (points vs curves) and fix of small formatting issues
* Add switch commitment documentation
* [docs] Documents in grin repo had translated in Korean. (#2604)
* Start to M/W intro translate in Korean
* translate in Korean
* add korean translation on intro
* table_of_content.md translate in Korean.
* table_of_content_KR.md finish translate in Korean, start to translate State_KR.md
* add state_KR.md & commit some translation in State_KR.md
* WIP stat_KR.md translation
* add build_KR.md && stratum_KR.md
* finish translate stratum_KR.md & table_of_content_KR.md
* rename intro.KR.md to intro_KR.md
* add intro_KR.md file path each language's intro.md
* add Korean translation file path to stratum.md & table_of_contents.md
* fix difference with grin/master
* Fix TxHashSet file filter for Windows. (#2641)
* Fix TxHashSet file filter for Windows.
* rustfmt
* Updating regexp
* Adding in test case
* Display the current download rate rather than the average when syncing the chain (#2633)
* When syncing the chain, calculate the displayed download speed using the current rate from the most recent iteration, rather than the average download speed from the entire syncing process.
* Replace the explicitly ignored variables in the pattern with an implicit ignore
* remove root = true from editorconfig (#2655)
* Add Medium post to intro (#2654)
Spoke to @yeastplume who agreed it makes sense to add the "Grin Transactions Explained, Step-by-Step" Medium post to intro.md
Open for suggestions on a better location.
* add a new configure item for log_max_files (#2601)
* add a new configure item for log_max_files
* rustfmt
* use a constant instead of multiple 32
* rustfmt
* Fix the build warning of deprecated trim_right_matches (#2662)
* [DOC] state.md, build.md and chain directory documents translate in Korean. (#2649)
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* remove some useless md files for translation. this is rearrange set up translation order.
* add dot end of sentence & translate build.md in korean
* remove fast-sync_KR.md
* finish build_KR.md translation
* finish build_KR.md translation
* finish translation state_KR.md & add phrase in state.md to move other language md file
* translate blocks_and_headers.md && chain_sync.md in Korean
* add . in chain_sync.md , translation finished in doc/chain dir.
* fix some miss typos
* Api documentation fixes (#2646)
* Fix the API documentation for Chain Validate (v1/chain/validate). It was documented as a POST, but it is actually a GET request, which can be seen in its handler ChainValidationHandler
* Update the API V1 route list response to include the headers and merkleproof routes. Also clarify that for the chain/outputs route you must specify either byids or byheight to select outputs.
* refactor(ci): reorganize CI related code (#2658)
Break-down the CI related code into smaller more maintainable pieces.
* Specify grin or nanogrins in API docs where applicable (#2642)
* Set Content-Type in API client (#2680)
* Reduce number of unwraps in chain crate (#2679)
* fix: the restart of state sync doesn't work sometimes (#2687)
* let check_txhashset_needed return true on abnormal case (#2684)
* Reduce number of unwwaps in api crate (#2681)
* Reduce number of unwwaps in api crate
* Format use section
* Small QoL improvements for wallet developers (#2651)
* Small changes for wallet devs
* Move create_nonce into Keychain trait
* Replace match by map_err
* Add flag to Slate to skip fee check
* Fix secp dependency
* Remove check_fee flag in Slate
* Add Japanese edition of build.md (#2697)
* catch the panic to avoid peer thread quit early (#2686)
* catch the panic to avoid peer thread quit before taking the chance to ban
* move catch wrapper logic down into the util crate
* log the panic info
* keep txhashset.rs untouched
* remove a warning
* [DOC] dandelion.md, simulation.md ,fast-sync.md and pruning.md documents translate in Korean. (#2678)
* Show response code in API client error message (#2683)
It's hard to investigate what happens when an API client error is
printed out
* Add some better logging for get_outputs_by_id failure states (#2705)
* Switch commitment doc fixes (#2645)
Fix some typos and remove the use of parentheses in a
couple of places to make the reading flow a bit better.
* docs: update/add new README.md badges (#2708)
Replace existing badges with SVG counterparts and add a bunch of new ones.
* Update intro.md (#2702)
Add mention of censoring attack prevented by range proofs
* use sandbox folder for txhashset validation on state sync (#2685)
* use sandbox folder for txhashset validation on state sync
* rustfmt
* use temp directory as the sandbox instead actual db_root txhashset dir
* rustfmt
* move txhashset overwrite to the end of full validation
* fix travis-ci test
* rustfmt
* fix: hashset have 2 folders including txhashset and header
* rustfmt
*
(1)switch to rebuild_header_mmr instead of copy the sandbox header mmr
(2)lock txhashset when overwriting and opening and rebuild
* minor improve on sandbox_dir
* add Japanese edition of state.md (#2703)
* Attempt to fix broken TUI locale (#2713)
Can confirm that on the same machine 1.0.2 TUI looks great and is broken on
the current master. Bump of `cursive` version fixed it for me.
Fixes #2676
* clean the header folder in sandbox (#2716)
* forgot to clean the header folder in sandbox in #2685
* Reduce number of unwraps in servers crate (#2707)
It doesn't include stratum server which is sufficiently changed in 1.1
branch and adapters, which is big enough for a separate PR.
* rustfmt
* change version to beta
2019-04-01 13:47:48 +03:00
|
|
|
match self.store.find_peers(state, cap, count) {
|
|
|
|
Ok(peers) => peers,
|
|
|
|
Err(e) => {
|
|
|
|
error!("failed to find peers: {:?}", e);
|
|
|
|
vec![]
|
|
|
|
}
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-01-03 04:03:44 +03:00
|
|
|
/// Get peer in store by address
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn get_peer(&self, peer_addr: PeerAddr) -> Result<PeerData, Error> {
|
2018-01-03 04:03:44 +03:00
|
|
|
self.store.get_peer(peer_addr).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
/// Whether we've already seen a peer with the provided address
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn exists_peer(&self, peer_addr: PeerAddr) -> Result<bool, Error> {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.store.exists_peer(peer_addr).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Saves updated information about a peer
|
|
|
|
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
|
|
|
|
self.store.save_peer(p).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Updates the state of a peer in store
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> {
|
2018-01-18 21:39:56 +03:00
|
|
|
self.store
|
|
|
|
.update_state(peer_addr, new_state)
|
|
|
|
.map_err(From::from)
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
/// Iterate over the peer list and prune all peers we have
|
|
|
|
/// lost connection to or have been deemed problematic.
|
|
|
|
/// Also avoid connected peer count getting too high.
|
2019-08-21 21:58:43 +03:00
|
|
|
pub fn clean_peers(&self, max_inbound_count: usize, max_outbound_count: usize) {
|
2017-12-12 19:40:26 +03:00
|
|
|
let mut rm = vec![];
|
|
|
|
|
|
|
|
// build a list of peers to be cleaned up
|
2019-05-15 18:51:35 +03:00
|
|
|
{
|
|
|
|
let peers = match self.peers.try_read_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("clean_peers: can't get peers lock");
|
|
|
|
return;
|
[1.1.0] Merge master into 1.1.0 (#2720)
* cleanup legacy "3 dot" check (#2625)
* Allow to peers behind NAT to get up to preferred_max connections (#2543)
Allow to peers behind NAT to get up to preffered_max connections
If peer has only outbound connections it's mot likely behind NAT and we should not stop it from getting more outbound connections
* Reduce usage of unwrap in p2p crate (#2627)
Also change store crate a bit
* Simplify (and fix) output_pos cleanup during chain compaction (#2609)
* expose leaf pos iterator
use it for various things in txhashset when iterating over outputs
* fix
* cleanup
* rebuild output_pos index (and clear it out first) when compacting the chain
* fixup tests
* refactor to match on (output, proof) tuple
* add comments to compact() to explain what is going on.
* get rid of some boxing around the leaf_set iterator
* cleanup
* [docs] Add switch commitment documentation (#2526)
* remove references to no-longer existing switch commitment hash
(as switch commitments were removed in ca8447f3bd49e80578770da841e5fbbac2c23cde
and moved into the blinding factor of the Pedersen Commitment)
* some rewording (points vs curves) and fix of small formatting issues
* Add switch commitment documentation
* [docs] Documents in grin repo had translated in Korean. (#2604)
* Start to M/W intro translate in Korean
* translate in Korean
* add korean translation on intro
* table_of_content.md translate in Korean.
* table_of_content_KR.md finish translate in Korean, start to translate State_KR.md
* add state_KR.md & commit some translation in State_KR.md
* WIP stat_KR.md translation
* add build_KR.md && stratum_KR.md
* finish translate stratum_KR.md & table_of_content_KR.md
* rename intro.KR.md to intro_KR.md
* add intro_KR.md file path each language's intro.md
* add Korean translation file path to stratum.md & table_of_contents.md
* fix difference with grin/master
* Fix TxHashSet file filter for Windows. (#2641)
* Fix TxHashSet file filter for Windows.
* rustfmt
* Updating regexp
* Adding in test case
* Display the current download rate rather than the average when syncing the chain (#2633)
* When syncing the chain, calculate the displayed download speed using the current rate from the most recent iteration, rather than the average download speed from the entire syncing process.
* Replace the explicitly ignored variables in the pattern with an implicit ignore
* remove root = true from editorconfig (#2655)
* Add Medium post to intro (#2654)
Spoke to @yeastplume who agreed it makes sense to add the "Grin Transactions Explained, Step-by-Step" Medium post to intro.md
Open for suggestions on a better location.
* add a new configure item for log_max_files (#2601)
* add a new configure item for log_max_files
* rustfmt
* use a constant instead of multiple 32
* rustfmt
* Fix the build warning of deprecated trim_right_matches (#2662)
* [DOC] state.md, build.md and chain directory documents translate in Korean. (#2649)
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* remove some useless md files for translation. this is rearrange set up translation order.
* add dot end of sentence & translate build.md in korean
* remove fast-sync_KR.md
* finish build_KR.md translation
* finish build_KR.md translation
* finish translation state_KR.md & add phrase in state.md to move other language md file
* translate blocks_and_headers.md && chain_sync.md in Korean
* add . in chain_sync.md , translation finished in doc/chain dir.
* fix some miss typos
* Api documentation fixes (#2646)
* Fix the API documentation for Chain Validate (v1/chain/validate). It was documented as a POST, but it is actually a GET request, which can be seen in its handler ChainValidationHandler
* Update the API V1 route list response to include the headers and merkleproof routes. Also clarify that for the chain/outputs route you must specify either byids or byheight to select outputs.
* refactor(ci): reorganize CI related code (#2658)
Break-down the CI related code into smaller more maintainable pieces.
* Specify grin or nanogrins in API docs where applicable (#2642)
* Set Content-Type in API client (#2680)
* Reduce number of unwraps in chain crate (#2679)
* fix: the restart of state sync doesn't work sometimes (#2687)
* let check_txhashset_needed return true on abnormal case (#2684)
* Reduce number of unwwaps in api crate (#2681)
* Reduce number of unwwaps in api crate
* Format use section
* Small QoL improvements for wallet developers (#2651)
* Small changes for wallet devs
* Move create_nonce into Keychain trait
* Replace match by map_err
* Add flag to Slate to skip fee check
* Fix secp dependency
* Remove check_fee flag in Slate
* Add Japanese edition of build.md (#2697)
* catch the panic to avoid peer thread quit early (#2686)
* catch the panic to avoid peer thread quit before taking the chance to ban
* move catch wrapper logic down into the util crate
* log the panic info
* keep txhashset.rs untouched
* remove a warning
* [DOC] dandelion.md, simulation.md ,fast-sync.md and pruning.md documents translate in Korean. (#2678)
* Show response code in API client error message (#2683)
It's hard to investigate what happens when an API client error is
printed out
* Add some better logging for get_outputs_by_id failure states (#2705)
* Switch commitment doc fixes (#2645)
Fix some typos and remove the use of parentheses in a
couple of places to make the reading flow a bit better.
* docs: update/add new README.md badges (#2708)
Replace existing badges with SVG counterparts and add a bunch of new ones.
* Update intro.md (#2702)
Add mention of censoring attack prevented by range proofs
* use sandbox folder for txhashset validation on state sync (#2685)
* use sandbox folder for txhashset validation on state sync
* rustfmt
* use temp directory as the sandbox instead actual db_root txhashset dir
* rustfmt
* move txhashset overwrite to the end of full validation
* fix travis-ci test
* rustfmt
* fix: hashset have 2 folders including txhashset and header
* rustfmt
*
(1)switch to rebuild_header_mmr instead of copy the sandbox header mmr
(2)lock txhashset when overwriting and opening and rebuild
* minor improve on sandbox_dir
* add Japanese edition of state.md (#2703)
* Attempt to fix broken TUI locale (#2713)
Can confirm that on the same machine 1.0.2 TUI looks great and is broken on
the current master. Bump of `cursive` version fixed it for me.
Fixes #2676
* clean the header folder in sandbox (#2716)
* forgot to clean the header folder in sandbox in #2685
* Reduce number of unwraps in servers crate (#2707)
It doesn't include stratum server which is sufficiently changed in 1.1
branch and adapters, which is big enough for a separate PR.
* rustfmt
* change version to beta
2019-04-01 13:47:48 +03:00
|
|
|
}
|
2019-05-15 18:51:35 +03:00
|
|
|
};
|
|
|
|
for peer in peers.values() {
|
|
|
|
if peer.is_banned() {
|
|
|
|
debug!("clean_peers {:?}, peer banned", peer.info.addr);
|
|
|
|
rm.push(peer.info.addr.clone());
|
|
|
|
} else if !peer.is_connected() {
|
|
|
|
debug!("clean_peers {:?}, not connected", peer.info.addr);
|
|
|
|
rm.push(peer.info.addr.clone());
|
|
|
|
} else if peer.is_abusive() {
|
|
|
|
if let Some(counts) = peer.last_min_message_counts() {
|
|
|
|
debug!(
|
|
|
|
"clean_peers {:?}, abusive ({} sent, {} recv)",
|
|
|
|
peer.info.addr, counts.0, counts.1,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
let _ = self.update_state(peer.info.addr, State::Banned);
|
|
|
|
rm.push(peer.info.addr.clone());
|
|
|
|
} else {
|
|
|
|
let (stuck, diff) = peer.is_stuck();
|
|
|
|
match self.adapter.total_difficulty() {
|
|
|
|
Ok(total_difficulty) => {
|
|
|
|
if stuck && diff < total_difficulty {
|
|
|
|
debug!("clean_peers {:?}, stuck peer", peer.info.addr);
|
|
|
|
let _ = self.update_state(peer.info.addr, State::Defunct);
|
|
|
|
rm.push(peer.info.addr.clone());
|
|
|
|
}
|
2019-04-08 23:13:28 +03:00
|
|
|
}
|
2019-05-15 18:51:35 +03:00
|
|
|
Err(e) => error!("failed to get total difficulty: {:?}", e),
|
2019-04-08 23:13:28 +03:00
|
|
|
}
|
2018-10-16 19:14:16 +03:00
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-21 21:58:43 +03:00
|
|
|
// check here to make sure we don't have too many outgoing connections
|
|
|
|
let excess_outgoing_count =
|
|
|
|
(self.peer_outbound_count() as usize).saturating_sub(max_outbound_count);
|
|
|
|
if excess_outgoing_count > 0 {
|
|
|
|
let mut addrs = self
|
|
|
|
.outgoing_connected_peers()
|
|
|
|
.iter()
|
|
|
|
.take(excess_outgoing_count)
|
|
|
|
.map(|x| x.info.addr.clone())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
rm.append(&mut addrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// check here to make sure we don't have too many incoming connections
|
|
|
|
let excess_incoming_count =
|
|
|
|
(self.peer_inbound_count() as usize).saturating_sub(max_inbound_count);
|
|
|
|
if excess_incoming_count > 0 {
|
2018-11-07 04:51:22 +03:00
|
|
|
let mut addrs = self
|
2019-08-21 21:58:43 +03:00
|
|
|
.incoming_connected_peers()
|
2018-11-07 04:51:22 +03:00
|
|
|
.iter()
|
2019-08-21 21:58:43 +03:00
|
|
|
.take(excess_incoming_count)
|
2018-11-07 04:51:22 +03:00
|
|
|
.map(|x| x.info.addr.clone())
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
rm.append(&mut addrs);
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
// now clean up peer map based on the list to remove
|
|
|
|
{
|
2019-05-15 18:51:35 +03:00
|
|
|
let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("clean_peers: failed to get peers lock");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
2019-02-18 15:15:32 +03:00
|
|
|
for addr in rm {
|
|
|
|
let _ = peers.get(&addr).map(|peer| peer.stop());
|
|
|
|
peers.remove(&addr);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-01-18 21:39:56 +03:00
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn stop(&self) {
|
2018-10-20 03:13:07 +03:00
|
|
|
let mut peers = self.peers.write();
|
2019-05-30 03:03:12 +03:00
|
|
|
for peer in peers.values() {
|
|
|
|
peer.stop();
|
|
|
|
}
|
2018-02-13 03:38:52 +03:00
|
|
|
for (_, peer) in peers.drain() {
|
2019-05-30 03:03:12 +03:00
|
|
|
peer.wait();
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
2018-09-07 23:01:54 +03:00
|
|
|
|
2019-08-21 21:58:43 +03:00
|
|
|
/// We have enough outbound connected peers
|
|
|
|
pub fn enough_outbound_peers(&self) -> bool {
|
|
|
|
self.peer_outbound_count() >= self.config.peer_min_preferred_outbound_count()
|
2018-09-07 23:01:54 +03:00
|
|
|
}
|
2018-10-22 23:59:40 +03:00
|
|
|
|
|
|
|
/// Removes those peers that seem to have expired
|
|
|
|
pub fn remove_expired(&self) {
|
|
|
|
let now = Utc::now();
|
|
|
|
|
|
|
|
// Delete defunct peers from storage
|
|
|
|
let _ = self.store.delete_peers(|peer| {
|
|
|
|
let diff = now - Utc.timestamp(peer.last_connected, 0);
|
|
|
|
|
|
|
|
let should_remove = peer.flags == State::Defunct
|
|
|
|
&& diff > Duration::seconds(global::PEER_EXPIRATION_REMOVE_TIME);
|
|
|
|
|
|
|
|
if should_remove {
|
|
|
|
debug!(
|
|
|
|
"removing peer {:?}: last connected {} days {} hours {} minutes ago.",
|
|
|
|
peer.addr,
|
|
|
|
diff.num_days(),
|
|
|
|
diff.num_hours(),
|
|
|
|
diff.num_minutes()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
should_remove
|
|
|
|
});
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ChainAdapter for Peers {
|
2019-04-08 23:13:28 +03:00
|
|
|
fn total_difficulty(&self) -> Result<Difficulty, chain::Error> {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.adapter.total_difficulty()
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn total_height(&self) -> Result<u64, chain::Error> {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.adapter.total_height()
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2018-11-07 12:28:17 +03:00
|
|
|
fn get_transaction(&self, kernel_hash: Hash) -> Option<core::Transaction> {
|
|
|
|
self.adapter.get_transaction(kernel_hash)
|
|
|
|
}
|
|
|
|
|
2019-04-18 16:11:06 +03:00
|
|
|
fn tx_kernel_received(
|
|
|
|
&self,
|
|
|
|
kernel_hash: Hash,
|
|
|
|
peer_info: &PeerInfo,
|
|
|
|
) -> Result<bool, chain::Error> {
|
|
|
|
self.adapter.tx_kernel_received(kernel_hash, peer_info)
|
2018-11-07 12:28:17 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn transaction_received(
|
|
|
|
&self,
|
|
|
|
tx: core::Transaction,
|
|
|
|
stem: bool,
|
|
|
|
) -> Result<bool, chain::Error> {
|
2018-03-20 06:18:54 +03:00
|
|
|
self.adapter.transaction_received(tx, stem)
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn block_received(
|
|
|
|
&self,
|
|
|
|
b: core::Block,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
was_requested: bool,
|
|
|
|
) -> Result<bool, chain::Error> {
|
2018-02-27 23:33:40 +03:00
|
|
|
let hash = b.hash();
|
2019-04-18 16:11:06 +03:00
|
|
|
if !self.adapter.block_received(b, peer_info, was_requested)? {
|
2018-01-30 17:42:04 +03:00
|
|
|
// if the peer sent us a block that's intrinsically bad
|
2018-06-13 19:03:34 +03:00
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"Received a bad block {} from {}, the peer will be banned",
|
2019-04-18 16:11:06 +03:00
|
|
|
hash, peer_info.addr,
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2019-04-18 16:11:06 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadBlock);
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(false)
|
2018-01-30 17:42:04 +03:00
|
|
|
} else {
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn compact_block_received(
|
|
|
|
&self,
|
|
|
|
cb: core::CompactBlock,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2018-02-27 23:33:40 +03:00
|
|
|
let hash = cb.hash();
|
2019-04-18 16:11:06 +03:00
|
|
|
if !self.adapter.compact_block_received(cb, peer_info)? {
|
2018-01-31 23:39:55 +03:00
|
|
|
// if the peer sent us a block that's intrinsically bad
|
2018-06-13 19:03:34 +03:00
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
|
|
|
"Received a bad compact block {} from {}, the peer will be banned",
|
2019-04-18 16:11:06 +03:00
|
|
|
hash, peer_info.addr
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2019-04-18 16:11:06 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadCompactBlock);
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(false)
|
2018-01-31 23:39:55 +03:00
|
|
|
} else {
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn header_received(
|
|
|
|
&self,
|
|
|
|
bh: core::BlockHeader,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-04-18 16:11:06 +03:00
|
|
|
if !self.adapter.header_received(bh, peer_info)? {
|
2018-01-30 17:42:04 +03:00
|
|
|
// if the peer sent us a block header that's intrinsically bad
|
2018-06-13 19:03:34 +03:00
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2019-04-18 16:11:06 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadBlockHeader);
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(false)
|
2017-12-12 19:40:26 +03:00
|
|
|
} else {
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn headers_received(
|
|
|
|
&self,
|
|
|
|
headers: &[core::BlockHeader],
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-04-18 16:11:06 +03:00
|
|
|
if !self.adapter.headers_received(headers, peer_info)? {
|
2018-08-17 05:30:05 +03:00
|
|
|
// if the peer sent us a block header that's intrinsically bad
|
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2019-04-18 16:11:06 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadBlockHeader);
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(false)
|
2018-08-17 05:30:05 +03:00
|
|
|
} else {
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2018-08-17 05:30:05 +03:00
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn locate_headers(&self, hs: &[Hash]) -> Result<Vec<core::BlockHeader>, chain::Error> {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.adapter.locate_headers(hs)
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
|
|
|
self.adapter.get_block(h)
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-05-14 19:17:38 +03:00
|
|
|
fn kernel_data_read(&self) -> Result<File, chain::Error> {
|
|
|
|
self.adapter.kernel_data_read()
|
|
|
|
}
|
|
|
|
|
2019-08-26 23:17:47 +03:00
|
|
|
fn kernel_data_write(&self, reader: &mut dyn Read) -> Result<bool, chain::Error> {
|
2019-05-14 19:17:38 +03:00
|
|
|
self.adapter.kernel_data_write(reader)
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
fn txhashset_read(&self, h: Hash) -> Option<TxHashSetRead> {
|
|
|
|
self.adapter.txhashset_read(h)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-07-23 11:46:29 +03:00
|
|
|
fn txhashset_archive_header(&self) -> Result<core::BlockHeader, chain::Error> {
|
|
|
|
self.adapter.txhashset_archive_header()
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:06:52 +03:00
|
|
|
fn txhashset_receive_ready(&self) -> bool {
|
|
|
|
self.adapter.txhashset_receive_ready()
|
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn txhashset_write(
|
|
|
|
&self,
|
|
|
|
h: Hash,
|
|
|
|
txhashset_data: File,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-08-01 19:46:06 +03:00
|
|
|
if self.adapter.txhashset_write(h, txhashset_data, peer_info)? {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"Received a bad txhashset data from {}, the peer will be banned",
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info.addr
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2019-04-18 16:11:06 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadTxHashSet);
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2019-08-01 19:46:06 +03:00
|
|
|
} else {
|
|
|
|
Ok(false)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
}
|
2018-10-13 01:53:50 +03:00
|
|
|
|
|
|
|
fn txhashset_download_update(
|
|
|
|
&self,
|
|
|
|
start_time: DateTime<Utc>,
|
|
|
|
downloaded_size: u64,
|
|
|
|
total_size: u64,
|
|
|
|
) -> bool {
|
|
|
|
self.adapter
|
|
|
|
.txhashset_download_update(start_time, downloaded_size, total_size)
|
|
|
|
}
|
2019-04-23 02:54:36 +03:00
|
|
|
|
|
|
|
fn get_tmp_dir(&self) -> PathBuf {
|
|
|
|
self.adapter.get_tmp_dir()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_tmpfile_pathname(&self, tmpfile_name: String) -> PathBuf {
|
|
|
|
self.adapter.get_tmpfile_pathname(tmpfile_name)
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NetAdapter for Peers {
|
|
|
|
/// Find good peers we know with the provided capability and return their
|
|
|
|
/// addresses.
|
2019-02-18 15:15:32 +03:00
|
|
|
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<PeerAddr> {
|
2017-12-12 19:40:26 +03:00
|
|
|
let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize);
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("find_peer_addrs: {} healthy peers picked", peers.len());
|
2017-12-12 19:40:26 +03:00
|
|
|
map_vec!(peers, |p| p.addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A list of peers has been received from one of our peers.
|
2019-02-18 15:15:32 +03:00
|
|
|
fn peer_addrs_received(&self, peer_addrs: Vec<PeerAddr>) {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Received {} peer addrs, saving.", peer_addrs.len());
|
2017-12-12 19:40:26 +03:00
|
|
|
for pa in peer_addrs {
|
|
|
|
if let Ok(e) = self.exists_peer(pa) {
|
|
|
|
if e {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let peer = PeerData {
|
|
|
|
addr: pa,
|
2018-02-01 21:14:32 +03:00
|
|
|
capabilities: Capabilities::UNKNOWN,
|
2017-12-12 19:40:26 +03:00
|
|
|
user_agent: "".to_string(),
|
|
|
|
flags: State::Healthy,
|
2018-01-18 21:39:56 +03:00
|
|
|
last_banned: 0,
|
2018-05-29 05:45:31 +03:00
|
|
|
ban_reason: ReasonForBan::None,
|
2018-10-22 23:59:40 +03:00
|
|
|
last_connected: Utc::now().timestamp(),
|
2017-12-12 19:40:26 +03:00
|
|
|
};
|
|
|
|
if let Err(e) = self.save_peer(&peer) {
|
2018-10-21 23:30:56 +03:00
|
|
|
error!("Could not save received peer address: {:?}", e);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
fn peer_difficulty(&self, addr: PeerAddr, diff: Difficulty, height: u64) {
|
|
|
|
if let Some(peer) = self.get_connected_peer(addr) {
|
2018-10-09 10:27:34 +03:00
|
|
|
peer.info.update(height, diff);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
2018-03-27 19:09:41 +03:00
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
fn is_banned(&self, addr: PeerAddr) -> bool {
|
2018-12-29 00:54:37 +03:00
|
|
|
if let Ok(peer) = self.get_peer(addr) {
|
|
|
|
peer.flags == State::Banned
|
2018-03-27 19:09:41 +03:00
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|