mirror of
https://github.com/mimblewimble/grin.git
synced 2025-04-22 18:31:15 +03:00
Reduce number of unwraps in servers crate (#2707)
It doesn't include stratum server which is sufficiently changed in 1.1 branch and adapters, which is big enough for a separate PR.
This commit is contained in:
parent
340070f0f7
commit
325e32821d
11 changed files with 218 additions and 112 deletions
pool/src
servers
|
@ -59,6 +59,24 @@ pub struct DandelionConfig {
|
|||
pub stem_probability: Option<usize>,
|
||||
}
|
||||
|
||||
impl DandelionConfig {
|
||||
pub fn relay_secs(&self) -> u64 {
|
||||
self.relay_secs.unwrap_or(DANDELION_RELAY_SECS)
|
||||
}
|
||||
|
||||
pub fn embargo_secs(&self) -> u64 {
|
||||
self.embargo_secs.unwrap_or(DANDELION_EMBARGO_SECS)
|
||||
}
|
||||
|
||||
pub fn patience_secs(&self) -> u64 {
|
||||
self.patience_secs.unwrap_or(DANDELION_PATIENCE_SECS)
|
||||
}
|
||||
|
||||
pub fn stem_probability(&self) -> usize {
|
||||
self.stem_probability.unwrap_or(DANDELION_STEM_PROBABILITY)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DandelionConfig {
|
||||
fn default() -> DandelionConfig {
|
||||
DandelionConfig {
|
||||
|
|
|
@ -20,7 +20,8 @@ use std::sync::Arc;
|
|||
use crate::api;
|
||||
use crate::chain;
|
||||
use crate::core::global::ChainTypes;
|
||||
use crate::core::{core, pow};
|
||||
use crate::core::{core, libtx, pow};
|
||||
use crate::keychain;
|
||||
use crate::p2p;
|
||||
use crate::pool;
|
||||
use crate::store;
|
||||
|
@ -32,6 +33,8 @@ use chrono::prelude::{DateTime, Utc};
|
|||
pub enum Error {
|
||||
/// Error originating from the core implementation.
|
||||
Core(core::block::Error),
|
||||
/// Error originating from the libtx implementation.
|
||||
LibTx(libtx::Error),
|
||||
/// Error originating from the db storage.
|
||||
Store(store::Error),
|
||||
/// Error originating from the blockchain implementation.
|
||||
|
@ -46,10 +49,16 @@ pub enum Error {
|
|||
Cuckoo(pow::Error),
|
||||
/// Error originating from the transaction pool.
|
||||
Pool(pool::PoolError),
|
||||
/// Error originating from the keychain.
|
||||
Keychain(keychain::Error),
|
||||
/// Invalid Arguments.
|
||||
ArgumentError(String),
|
||||
/// Error originating from some I/O operation (likely a file on disk).
|
||||
IOError(std::io::Error),
|
||||
/// Configuration error
|
||||
Configuration(String),
|
||||
/// General error
|
||||
General(String),
|
||||
}
|
||||
|
||||
impl From<core::block::Error> for Error {
|
||||
|
@ -103,6 +112,18 @@ impl From<pool::PoolError> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<keychain::Error> for Error {
|
||||
fn from(e: keychain::Error) -> Error {
|
||||
Error::Keychain(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<libtx::Error> for Error {
|
||||
fn from(e: libtx::Error) -> Error {
|
||||
Error::LibTx(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// Type of seeding the server will use to find other peers on the network.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub enum ChainValidationMode {
|
||||
|
|
|
@ -49,7 +49,7 @@ pub fn monitor_transactions(
|
|||
}
|
||||
|
||||
// This is the patience timer, we loop every n secs.
|
||||
let patience_secs = dandelion_config.patience_secs.unwrap();
|
||||
let patience_secs = dandelion_config.patience_secs();
|
||||
thread::sleep(Duration::from_secs(patience_secs));
|
||||
|
||||
// Step 1: find all "ToStem" entries in stempool from last run.
|
||||
|
@ -199,7 +199,7 @@ fn process_fresh_entries(
|
|||
|
||||
for x in &mut fresh_entries.iter_mut() {
|
||||
let random = rng.gen_range(0, 101);
|
||||
if random <= dandelion_config.stem_probability.unwrap() {
|
||||
if random <= dandelion_config.stem_probability() {
|
||||
x.state = PoolEntryState::ToStem;
|
||||
} else {
|
||||
x.state = PoolEntryState::ToFluff;
|
||||
|
@ -214,7 +214,7 @@ fn process_expired_entries(
|
|||
tx_pool: Arc<RwLock<TransactionPool>>,
|
||||
) -> Result<(), PoolError> {
|
||||
let now = Utc::now().timestamp();
|
||||
let embargo_sec = dandelion_config.embargo_secs.unwrap() + thread_rng().gen_range(0, 31);
|
||||
let embargo_sec = dandelion_config.embargo_secs() + thread_rng().gen_range(0, 31);
|
||||
let cutoff = now - embargo_sec as i64;
|
||||
|
||||
let mut expired_entries = vec![];
|
||||
|
|
|
@ -249,7 +249,7 @@ fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionCon
|
|||
let dandelion_relay = peers.get_dandelion_relay();
|
||||
if let Some((last_added, _)) = dandelion_relay {
|
||||
let dandelion_interval = Utc::now().timestamp() - last_added;
|
||||
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
|
||||
if dandelion_interval >= dandelion_config.relay_secs() as i64 {
|
||||
debug!("monitor_peers: updating expired dandelion relay");
|
||||
peers.update_dandelion_relay();
|
||||
}
|
||||
|
@ -331,7 +331,9 @@ fn listen_for_addrs(
|
|||
);
|
||||
continue;
|
||||
} else {
|
||||
*connecting_history.get_mut(&addr).unwrap() = now;
|
||||
if let Some(history) = connecting_history.get_mut(&addr) {
|
||||
*history = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
connecting_history.insert(addr, now);
|
||||
|
|
|
@ -220,9 +220,14 @@ impl Server {
|
|||
warn!("No seed configured, will stay solo until connected to");
|
||||
seed::predefined_seeds(vec![])
|
||||
}
|
||||
p2p::Seeding::List => {
|
||||
seed::predefined_seeds(config.p2p_config.seeds.clone().unwrap())
|
||||
}
|
||||
p2p::Seeding::List => match &config.p2p_config.seeds {
|
||||
Some(seeds) => seed::predefined_seeds(seeds.clone()),
|
||||
None => {
|
||||
return Err(Error::Configuration(
|
||||
"Seeds must be configured for seeding type List".to_owned(),
|
||||
));
|
||||
}
|
||||
},
|
||||
p2p::Seeding::DNSSeed => seed::dns_seeds(),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
@ -389,13 +394,13 @@ impl Server {
|
|||
}
|
||||
|
||||
/// The chain head
|
||||
pub fn head(&self) -> chain::Tip {
|
||||
self.chain.head().unwrap()
|
||||
pub fn head(&self) -> Result<chain::Tip, Error> {
|
||||
self.chain.head().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// The head of the block header chain
|
||||
pub fn header_head(&self) -> chain::Tip {
|
||||
self.chain.header_head().unwrap()
|
||||
pub fn header_head(&self) -> Result<chain::Tip, Error> {
|
||||
self.chain.header_head().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Returns a set of stats about this server. This and the ServerStats
|
||||
|
@ -417,7 +422,7 @@ impl Server {
|
|||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let tip_height = self.chain.head().unwrap().height as i64;
|
||||
let tip_height = self.head()?.height as i64;
|
||||
let mut height = tip_height as i64 - last_blocks.len() as i64 + 1;
|
||||
|
||||
let txhashset = self.chain.txhashset();
|
||||
|
@ -475,8 +480,8 @@ impl Server {
|
|||
.collect();
|
||||
Ok(ServerStats {
|
||||
peer_count: self.peer_count(),
|
||||
head: self.head(),
|
||||
header_head: self.header_head(),
|
||||
head: self.head()?,
|
||||
header_head: self.header_head()?,
|
||||
sync_status: self.sync_state.status(),
|
||||
stratum_stats: stratum_stats,
|
||||
peer_stats: peer_stats,
|
||||
|
|
|
@ -51,11 +51,15 @@ impl BodySync {
|
|||
|
||||
/// Check whether a body sync is needed and run it if so.
|
||||
/// Return true if txhashset download is needed (when requested block is under the horizon).
|
||||
pub fn check_run(&mut self, head: &chain::Tip, highest_height: u64) -> bool {
|
||||
pub fn check_run(
|
||||
&mut self,
|
||||
head: &chain::Tip,
|
||||
highest_height: u64,
|
||||
) -> Result<bool, chain::Error> {
|
||||
// run the body_sync every 5s
|
||||
if self.body_sync_due() {
|
||||
if self.body_sync() {
|
||||
return true;
|
||||
if self.body_sync_due()? {
|
||||
if self.body_sync()? {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
self.sync_state.update(SyncStatus::BodySync {
|
||||
|
@ -63,11 +67,11 @@ impl BodySync {
|
|||
highest_height: highest_height,
|
||||
});
|
||||
}
|
||||
false
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/// Return true if txhashset download is needed (when requested block is under the horizon).
|
||||
fn body_sync(&mut self) -> bool {
|
||||
fn body_sync(&mut self) -> Result<bool, chain::Error> {
|
||||
let mut hashes: Option<Vec<Hash>> = Some(vec![]);
|
||||
let txhashset_needed = match self
|
||||
.chain
|
||||
|
@ -76,17 +80,24 @@ impl BodySync {
|
|||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("body_sync: failed to call txhashset_needed: {:?}", e);
|
||||
return false;
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
if txhashset_needed {
|
||||
debug!(
|
||||
"body_sync: cannot sync full blocks earlier than horizon. will request txhashset",
|
||||
);
|
||||
return true;
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
let mut hashes = hashes.unwrap();
|
||||
let mut hashes = match hashes {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
error!("unexpected: hashes is None");
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
hashes.reverse();
|
||||
|
||||
let peers = self.peers.more_work_peers();
|
||||
|
@ -110,8 +121,8 @@ impl BodySync {
|
|||
.collect::<Vec<_>>();
|
||||
|
||||
if hashes_to_get.len() > 0 {
|
||||
let body_head = self.chain.head().unwrap();
|
||||
let header_head = self.chain.header_head().unwrap();
|
||||
let body_head = self.chain.head()?;
|
||||
let header_head = self.chain.header_head()?;
|
||||
|
||||
debug!(
|
||||
"block_sync: {}/{} requesting blocks {:?} from {} peers",
|
||||
|
@ -136,12 +147,12 @@ impl BodySync {
|
|||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Should we run block body sync and ask for more full blocks?
|
||||
fn body_sync_due(&mut self) -> bool {
|
||||
let blocks_received = self.blocks_received();
|
||||
fn body_sync_due(&mut self) -> Result<bool, chain::Error> {
|
||||
let blocks_received = self.blocks_received()?;
|
||||
|
||||
// some blocks have been requested
|
||||
if self.blocks_requested > 0 {
|
||||
|
@ -152,7 +163,7 @@ impl BodySync {
|
|||
"body_sync: expecting {} more blocks and none received for a while",
|
||||
self.blocks_requested,
|
||||
);
|
||||
return true;
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,16 +180,16 @@ impl BodySync {
|
|||
if self.blocks_requested < 2 {
|
||||
// no pending block requests, ask more
|
||||
debug!("body_sync: no pending block request, asking more");
|
||||
return true;
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
return false;
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
// Total numbers received on this chain, including the head and orphans
|
||||
fn blocks_received(&self) -> u64 {
|
||||
self.chain.head().unwrap().height
|
||||
fn blocks_received(&self) -> Result<u64, chain::Error> {
|
||||
Ok((self.chain.head()?).height
|
||||
+ self.chain.orphans_len() as u64
|
||||
+ self.chain.orphans_evicted_len() as u64
|
||||
+ self.chain.orphans_evicted_len() as u64)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,9 +50,13 @@ impl HeaderSync {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn check_run(&mut self, header_head: &chain::Tip, highest_height: u64) -> bool {
|
||||
pub fn check_run(
|
||||
&mut self,
|
||||
header_head: &chain::Tip,
|
||||
highest_height: u64,
|
||||
) -> Result<bool, chain::Error> {
|
||||
if !self.header_sync_due(header_head) {
|
||||
return false;
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let enable_header_sync = match self.sync_state.status() {
|
||||
|
@ -60,7 +64,7 @@ impl HeaderSync {
|
|||
| SyncStatus::HeaderSync { .. }
|
||||
| SyncStatus::TxHashsetDone => true,
|
||||
SyncStatus::NoSync | SyncStatus::Initial | SyncStatus::AwaitingPeers(_) => {
|
||||
let sync_head = self.chain.get_sync_head().unwrap();
|
||||
let sync_head = self.chain.get_sync_head()?;
|
||||
debug!(
|
||||
"sync: initial transition to HeaderSync. sync_head: {} at {}, resetting to: {} at {}",
|
||||
sync_head.hash(),
|
||||
|
@ -77,10 +81,10 @@ impl HeaderSync {
|
|||
// correctly, so reset any previous (and potentially stale) sync_head to match
|
||||
// our last known "good" header_head.
|
||||
//
|
||||
self.chain.reset_sync_head().unwrap();
|
||||
self.chain.reset_sync_head()?;
|
||||
|
||||
// Rebuild the sync MMR to match our updated sync_head.
|
||||
self.chain.rebuild_sync_mmr(&header_head).unwrap();
|
||||
self.chain.rebuild_sync_mmr(&header_head)?;
|
||||
|
||||
self.history_locator.retain(|&x| x.0 == 0);
|
||||
true
|
||||
|
@ -95,9 +99,9 @@ impl HeaderSync {
|
|||
});
|
||||
|
||||
self.syncing_peer = self.header_sync();
|
||||
return true;
|
||||
return Ok(true);
|
||||
}
|
||||
false
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
fn header_sync_due(&mut self, header_head: &chain::Tip) -> bool {
|
||||
|
|
|
@ -166,9 +166,25 @@ impl StateSync {
|
|||
let mut txhashset_head = self
|
||||
.chain
|
||||
.get_block_header(&header_head.prev_block_h)
|
||||
.unwrap();
|
||||
.map_err(|e| {
|
||||
error!(
|
||||
"chain error dirung getting a block header {}: {:?}",
|
||||
&header_head.prev_block_h, e
|
||||
);
|
||||
p2p::Error::Internal
|
||||
})?;
|
||||
for _ in 0..threshold {
|
||||
txhashset_head = self.chain.get_previous_header(&txhashset_head).unwrap();
|
||||
txhashset_head = self
|
||||
.chain
|
||||
.get_previous_header(&txhashset_head)
|
||||
.map_err(|e| {
|
||||
error!(
|
||||
"chain error dirung getting a previous block header {}: {:?}",
|
||||
txhashset_head.hash(),
|
||||
e
|
||||
);
|
||||
p2p::Error::Internal
|
||||
})?;
|
||||
}
|
||||
let bhash = txhashset_head.hash();
|
||||
debug!(
|
||||
|
|
|
@ -62,7 +62,7 @@ impl SyncRunner {
|
|||
}
|
||||
}
|
||||
|
||||
fn wait_for_min_peers(&self) {
|
||||
fn wait_for_min_peers(&self) -> Result<(), chain::Error> {
|
||||
// Initial sleep to give us time to peer with some nodes.
|
||||
// Note: Even if we have skip peer wait we need to wait a
|
||||
// short period of time for tests to do the right thing.
|
||||
|
@ -72,7 +72,7 @@ impl SyncRunner {
|
|||
3
|
||||
};
|
||||
|
||||
let head = self.chain.head().unwrap();
|
||||
let head = self.chain.head()?;
|
||||
|
||||
let mut n = 0;
|
||||
const MIN_PEERS: usize = 3;
|
||||
|
@ -95,12 +95,27 @@ impl SyncRunner {
|
|||
thread::sleep(time::Duration::from_secs(1));
|
||||
n += 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Starts the syncing loop, just spawns two threads that loop forever
|
||||
fn sync_loop(&self) {
|
||||
macro_rules! unwrap_or_restart_loop(
|
||||
($obj: expr) =>(
|
||||
match $obj {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("unexpected error: {:?}", e);
|
||||
thread::sleep(time::Duration::from_secs(1));
|
||||
continue;
|
||||
},
|
||||
}
|
||||
));
|
||||
|
||||
// Wait for connections reach at least MIN_PEERS
|
||||
self.wait_for_min_peers();
|
||||
if let Err(e) = self.wait_for_min_peers() {
|
||||
error!("wait_for_min_peers failed: {:?}", e);
|
||||
}
|
||||
|
||||
// Our 3 main sync stages
|
||||
let mut header_sync = HeaderSync::new(
|
||||
|
@ -132,8 +147,7 @@ impl SyncRunner {
|
|||
thread::sleep(time::Duration::from_millis(10));
|
||||
|
||||
// check whether syncing is generally needed, when we compare our state with others
|
||||
let (syncing, most_work_height) = self.needs_syncing();
|
||||
|
||||
let (syncing, most_work_height) = unwrap_or_restart_loop!(self.needs_syncing());
|
||||
if most_work_height > 0 {
|
||||
// we can occasionally get a most work height of 0 if read locks fail
|
||||
highest_height = most_work_height;
|
||||
|
@ -147,13 +161,13 @@ impl SyncRunner {
|
|||
}
|
||||
|
||||
// if syncing is needed
|
||||
let head = self.chain.head().unwrap();
|
||||
let head = unwrap_or_restart_loop!(self.chain.head());
|
||||
let tail = self.chain.tail().unwrap_or_else(|_| head.clone());
|
||||
let header_head = self.chain.header_head().unwrap();
|
||||
let header_head = unwrap_or_restart_loop!(self.chain.header_head());
|
||||
|
||||
// run each sync stage, each of them deciding whether they're needed
|
||||
// except for state sync that only runs if body sync return true (means txhashset is needed)
|
||||
header_sync.check_run(&header_head, highest_height);
|
||||
unwrap_or_restart_loop!(header_sync.check_run(&header_head, highest_height));
|
||||
|
||||
let mut check_state_sync = false;
|
||||
match self.sync_state.status() {
|
||||
|
@ -168,7 +182,15 @@ impl SyncRunner {
|
|||
continue;
|
||||
}
|
||||
|
||||
if body_sync.check_run(&head, highest_height) {
|
||||
let check_run = match body_sync.check_run(&head, highest_height) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("check_run failed: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if check_run {
|
||||
check_state_sync = true;
|
||||
}
|
||||
}
|
||||
|
@ -182,8 +204,8 @@ impl SyncRunner {
|
|||
|
||||
/// Whether we're currently syncing the chain or we're fully caught up and
|
||||
/// just receiving blocks through gossip.
|
||||
fn needs_syncing(&self) -> (bool, u64) {
|
||||
let local_diff = self.chain.head().unwrap().total_difficulty;
|
||||
fn needs_syncing(&self) -> Result<(bool, u64), chain::Error> {
|
||||
let local_diff = self.chain.head()?.total_difficulty;
|
||||
let mut is_syncing = self.sync_state.is_syncing();
|
||||
let peer = self.peers.most_work_peer();
|
||||
|
||||
|
@ -191,14 +213,14 @@ impl SyncRunner {
|
|||
p.info.clone()
|
||||
} else {
|
||||
warn!("sync: no peers available, disabling sync");
|
||||
return (false, 0);
|
||||
return Ok((false, 0));
|
||||
};
|
||||
|
||||
// if we're already syncing, we're caught up if no peer has a higher
|
||||
// difficulty than us
|
||||
if is_syncing {
|
||||
if peer_info.total_difficulty() <= local_diff {
|
||||
let ch = self.chain.head().unwrap();
|
||||
let ch = self.chain.head()?;
|
||||
info!(
|
||||
"synchronized at {} @ {} [{}]",
|
||||
local_diff.to_num(),
|
||||
|
@ -215,7 +237,7 @@ impl SyncRunner {
|
|||
Err(e) => {
|
||||
error!("failed to get difficulty iterator: {:?}", e);
|
||||
// we handle 0 height in the caller
|
||||
return (false, 0);
|
||||
return Ok((false, 0));
|
||||
}
|
||||
};
|
||||
diff_iter
|
||||
|
@ -235,6 +257,6 @@ impl SyncRunner {
|
|||
is_syncing = true;
|
||||
}
|
||||
}
|
||||
(is_syncing, peer_info.height())
|
||||
Ok((is_syncing, peer_info.height()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -179,10 +179,9 @@ fn build_block(
|
|||
///
|
||||
fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
|
||||
warn!("Burning block fees: {:?}", block_fees);
|
||||
let keychain = ExtKeychain::from_random_seed(global::is_floonet()).unwrap();
|
||||
let keychain = ExtKeychain::from_random_seed(global::is_floonet())?;
|
||||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||
let (out, kernel) =
|
||||
crate::core::libtx::reward::output(&keychain, &key_id, block_fees.fees).unwrap();
|
||||
let (out, kernel) = crate::core::libtx::reward::output(&keychain, &key_id, block_fees.fees)?;
|
||||
Ok((out, kernel, block_fees))
|
||||
}
|
||||
|
||||
|
@ -199,12 +198,20 @@ fn get_coinbase(
|
|||
}
|
||||
Some(wallet_listener_url) => {
|
||||
let res = wallet::create_coinbase(&wallet_listener_url, &block_fees)?;
|
||||
let out_bin = util::from_hex(res.output).unwrap();
|
||||
let kern_bin = util::from_hex(res.kernel).unwrap();
|
||||
let key_id_bin = util::from_hex(res.key_id).unwrap();
|
||||
let output = ser::deserialize(&mut &out_bin[..]).unwrap();
|
||||
let kernel = ser::deserialize(&mut &kern_bin[..]).unwrap();
|
||||
let key_id = ser::deserialize(&mut &key_id_bin[..]).unwrap();
|
||||
let out_bin = util::from_hex(res.output)
|
||||
.map_err(|_| Error::General("failed to parse hex output".to_owned()))?;
|
||||
let kern_bin = util::from_hex(res.kernel)
|
||||
.map_err(|_| Error::General("failed to parse hex kernel".to_owned()))?;
|
||||
|
||||
let key_id_bin = util::from_hex(res.key_id)
|
||||
.map_err(|_| Error::General("failed to parse hex key id".to_owned()))?;
|
||||
let output = ser::deserialize(&mut &out_bin[..])
|
||||
.map_err(|_| Error::General("failed to deserialize output".to_owned()))?;
|
||||
|
||||
let kernel = ser::deserialize(&mut &kern_bin[..])
|
||||
.map_err(|_| Error::General("failed to deserialize kernel".to_owned()))?;
|
||||
let key_id = ser::deserialize(&mut &key_id_bin[..])
|
||||
.map_err(|_| Error::General("failed to deserialize key id".to_owned()))?;
|
||||
let block_fees = BlockFees {
|
||||
key_id: Some(key_id),
|
||||
..block_fees
|
||||
|
|
|
@ -229,7 +229,7 @@ fn simulate_block_propagation() {
|
|||
loop {
|
||||
let mut count = 0;
|
||||
for n in 0..5 {
|
||||
if servers[n].head().height > 3 {
|
||||
if servers[n].head().unwrap().height > 3 {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
@ -289,13 +289,13 @@ fn simulate_full_sync() {
|
|||
|
||||
// Wait for s2 to sync up to and including the header from s1.
|
||||
let mut time_spent = 0;
|
||||
while s2.head().height < s1_header.height {
|
||||
while s2.head().unwrap().height < s1_header.height {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
time_spent += 1;
|
||||
if time_spent >= 30 {
|
||||
info!(
|
||||
"sync fail. s2.head().height: {}, s1_header.height: {}",
|
||||
s2.head().height,
|
||||
"sync fail. s2.head().unwrap().height: {}, s1_header.height: {}",
|
||||
s2.head().unwrap().height,
|
||||
s1_header.height
|
||||
);
|
||||
break;
|
||||
|
@ -331,7 +331,7 @@ fn simulate_fast_sync() {
|
|||
let stop = Arc::new(Mutex::new(StopState::new()));
|
||||
s1.start_test_miner(None, stop.clone());
|
||||
|
||||
while s1.head().height < 20 {
|
||||
while s1.head().unwrap().height < 20 {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
}
|
||||
s1.stop_test_miner(stop);
|
||||
|
@ -346,13 +346,13 @@ fn simulate_fast_sync() {
|
|||
|
||||
// Wait for s2 to sync up to and including the header from s1.
|
||||
let mut total_wait = 0;
|
||||
while s2.head().height < s1_header.height {
|
||||
while s2.head().unwrap().height < s1_header.height {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
total_wait += 1;
|
||||
if total_wait >= 30 {
|
||||
error!(
|
||||
"simulate_fast_sync test fail on timeout! s2 height: {}, s1 height: {}",
|
||||
s2.head().height,
|
||||
s2.head().unwrap().height,
|
||||
s1_header.height,
|
||||
);
|
||||
break;
|
||||
|
@ -463,7 +463,7 @@ fn long_fork_test_preparation() -> Vec<servers::Server> {
|
|||
let stop = Arc::new(Mutex::new(StopState::new()));
|
||||
s[0].start_test_miner(None, stop.clone());
|
||||
|
||||
while s[0].head().height < global::cut_through_horizon() as u64 + 10 {
|
||||
while s[0].head().unwrap().height < global::cut_through_horizon() as u64 + 10 {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
}
|
||||
s[0].stop_test_miner(stop);
|
||||
|
@ -505,7 +505,7 @@ fn long_fork_test_preparation() -> Vec<servers::Server> {
|
|||
}
|
||||
min_height = s0_header.height;
|
||||
for i in 1..6 {
|
||||
min_height = cmp::min(s[i].head().height, min_height);
|
||||
min_height = cmp::min(s[i].head().unwrap().height, min_height);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -548,17 +548,17 @@ fn long_fork_test_mining(blocks: u64, n: u16, s: &servers::Server) {
|
|||
let stop = Arc::new(Mutex::new(StopState::new()));
|
||||
s.start_test_miner(None, stop.clone());
|
||||
|
||||
while s.head().height < sn_header.height + blocks {
|
||||
while s.head().unwrap().height < sn_header.height + blocks {
|
||||
thread::sleep(time::Duration::from_millis(1));
|
||||
}
|
||||
s.stop_test_miner(stop);
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
println!(
|
||||
"{} blocks mined on s{}. s{}.height: {} (old height: {})",
|
||||
s.head().height - sn_header.height,
|
||||
s.head().unwrap().height - sn_header.height,
|
||||
n,
|
||||
n,
|
||||
s.head().height,
|
||||
s.head().unwrap().height,
|
||||
sn_header.height,
|
||||
);
|
||||
|
||||
|
@ -566,7 +566,7 @@ fn long_fork_test_mining(blocks: u64, n: u16, s: &servers::Server) {
|
|||
let sn_header = s.chain.head().unwrap();
|
||||
let sn_tail = s.chain.tail().unwrap();
|
||||
println!(
|
||||
"after compacting, s{}.head().height: {}, s{}.tail().height: {}",
|
||||
"after compacting, s{}.head().unwrap().height: {}, s{}.tail().height: {}",
|
||||
n, sn_header.height, n, sn_tail.height,
|
||||
);
|
||||
}
|
||||
|
@ -584,7 +584,7 @@ fn long_fork_test_case_1(s: &Vec<servers::Server>) {
|
|||
let s0_header = s[0].chain.head().unwrap();
|
||||
let s0_tail = s[0].chain.tail().unwrap();
|
||||
println!(
|
||||
"test case 1: s0 start syncing with s2... s0.head().height: {}, s2.head().height: {}",
|
||||
"test case 1: s0 start syncing with s2... s0.head().unwrap().height: {}, s2.head().height: {}",
|
||||
s0_header.height, s2_header.height,
|
||||
);
|
||||
s[0].resume();
|
||||
|
@ -592,13 +592,13 @@ fn long_fork_test_case_1(s: &Vec<servers::Server>) {
|
|||
|
||||
// Check server s0 can sync to s2 without txhashset download.
|
||||
let mut total_wait = 0;
|
||||
while s[0].head().height < s2_header.height {
|
||||
while s[0].head().unwrap().height < s2_header.height {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
total_wait += 1;
|
||||
if total_wait >= 120 {
|
||||
println!(
|
||||
"test case 1: test fail on timeout! s0 height: {}, s2 height: {}",
|
||||
s[0].head().height,
|
||||
s[0].head().unwrap().height,
|
||||
s2_header.height,
|
||||
);
|
||||
exit(1);
|
||||
|
@ -607,11 +607,11 @@ fn long_fork_test_case_1(s: &Vec<servers::Server>) {
|
|||
let s0_tail_new = s[0].chain.tail().unwrap();
|
||||
assert_eq!(s0_tail_new.height, s0_tail.height);
|
||||
println!(
|
||||
"test case 1: s0.head().height: {}, s2_header.height: {}",
|
||||
s[0].head().height,
|
||||
"test case 1: s0.head().unwrap().height: {}, s2_header.height: {}",
|
||||
s[0].head().unwrap().height,
|
||||
s2_header.height,
|
||||
);
|
||||
assert_eq!(s[0].head().last_block_h, s2_header.last_block_h);
|
||||
assert_eq!(s[0].head().unwrap().last_block_h, s2_header.last_block_h);
|
||||
|
||||
s[0].pause();
|
||||
s[2].stop();
|
||||
|
@ -630,7 +630,7 @@ fn long_fork_test_case_2(s: &Vec<servers::Server>) {
|
|||
let s0_header = s[0].chain.head().unwrap();
|
||||
let s0_tail = s[0].chain.tail().unwrap();
|
||||
println!(
|
||||
"test case 2: s0 start syncing with s3. s0.head().height: {}, s3.head().height: {}",
|
||||
"test case 2: s0 start syncing with s3. s0.head().unwrap().height: {}, s3.head().height: {}",
|
||||
s0_header.height, s3_header.height,
|
||||
);
|
||||
s[0].resume();
|
||||
|
@ -638,13 +638,13 @@ fn long_fork_test_case_2(s: &Vec<servers::Server>) {
|
|||
|
||||
// Check server s0 can sync to s3 without txhashset download.
|
||||
let mut total_wait = 0;
|
||||
while s[0].head().height < s3_header.height {
|
||||
while s[0].head().unwrap().height < s3_header.height {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
total_wait += 1;
|
||||
if total_wait >= 120 {
|
||||
println!(
|
||||
"test case 2: test fail on timeout! s0 height: {}, s3 height: {}",
|
||||
s[0].head().height,
|
||||
s[0].head().unwrap().height,
|
||||
s3_header.height,
|
||||
);
|
||||
exit(1);
|
||||
|
@ -652,13 +652,13 @@ fn long_fork_test_case_2(s: &Vec<servers::Server>) {
|
|||
}
|
||||
let s0_tail_new = s[0].chain.tail().unwrap();
|
||||
assert_eq!(s0_tail_new.height, s0_tail.height);
|
||||
assert_eq!(s[0].head().hash(), s3_header.hash());
|
||||
assert_eq!(s[0].head().unwrap().hash(), s3_header.hash());
|
||||
|
||||
let _ = s[0].chain.compact();
|
||||
let s0_header = s[0].chain.head().unwrap();
|
||||
let s0_tail = s[0].chain.tail().unwrap();
|
||||
println!(
|
||||
"test case 2: after compacting, s0.head().height: {}, s0.tail().height: {}",
|
||||
"test case 2: after compacting, s0.head().unwrap().height: {}, s0.tail().height: {}",
|
||||
s0_header.height, s0_tail.height,
|
||||
);
|
||||
|
||||
|
@ -679,7 +679,7 @@ fn long_fork_test_case_3(s: &Vec<servers::Server>) {
|
|||
let s1_header = s[1].chain.head().unwrap();
|
||||
let s1_tail = s[1].chain.tail().unwrap();
|
||||
println!(
|
||||
"test case 3: s0/1 start syncing with s4. s0.head().height: {}, s0.tail().height: {}, s1.head().height: {}, s1.tail().height: {}, s4.head().height: {}",
|
||||
"test case 3: s0/1 start syncing with s4. s0.head().unwrap().height: {}, s0.tail().height: {}, s1.head().height: {}, s1.tail().height: {}, s4.head().height: {}",
|
||||
s0_header.height, s0_tail.height,
|
||||
s1_header.height, s1_tail.height,
|
||||
s4_header.height,
|
||||
|
@ -689,32 +689,32 @@ fn long_fork_test_case_3(s: &Vec<servers::Server>) {
|
|||
|
||||
// Check server s0 can sync to s4.
|
||||
let mut total_wait = 0;
|
||||
while s[0].head().height < s4_header.height {
|
||||
while s[0].head().unwrap().height < s4_header.height {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
total_wait += 1;
|
||||
if total_wait >= 120 {
|
||||
println!(
|
||||
"test case 3: test fail on timeout! s0 height: {}, s4 height: {}",
|
||||
s[0].head().height,
|
||||
s[0].head().unwrap().height,
|
||||
s4_header.height,
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
assert_eq!(s[0].head().hash(), s4_header.hash());
|
||||
assert_eq!(s[0].head().unwrap().hash(), s4_header.hash());
|
||||
|
||||
s[0].stop();
|
||||
s[1].resume();
|
||||
|
||||
// Check server s1 can sync to s4 but with txhashset download.
|
||||
let mut total_wait = 0;
|
||||
while s[1].head().height < s4_header.height {
|
||||
while s[1].head().unwrap().height < s4_header.height {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
total_wait += 1;
|
||||
if total_wait >= 120 {
|
||||
println!(
|
||||
"test case 3: test fail on timeout! s1 height: {}, s4 height: {}",
|
||||
s[1].head().height,
|
||||
s[1].head().unwrap().height,
|
||||
s4_header.height,
|
||||
);
|
||||
exit(1);
|
||||
|
@ -726,7 +726,7 @@ fn long_fork_test_case_3(s: &Vec<servers::Server>) {
|
|||
s1_tail_new.height, s1_tail.height
|
||||
);
|
||||
assert_ne!(s1_tail_new.height, s1_tail.height);
|
||||
assert_eq!(s[1].head().hash(), s4_header.hash());
|
||||
assert_eq!(s[1].head().unwrap().hash(), s4_header.hash());
|
||||
|
||||
s[1].pause();
|
||||
s[4].pause();
|
||||
|
@ -745,7 +745,7 @@ fn long_fork_test_case_4(s: &Vec<servers::Server>) {
|
|||
let s1_header = s[1].chain.head().unwrap();
|
||||
let s1_tail = s[1].chain.tail().unwrap();
|
||||
println!(
|
||||
"test case 4: s1 start syncing with s5. s1.head().height: {}, s1.tail().height: {}, s5.head().height: {}",
|
||||
"test case 4: s1 start syncing with s5. s1.head().unwrap().height: {}, s1.tail().height: {}, s5.head().height: {}",
|
||||
s1_header.height, s1_tail.height,
|
||||
s5_header.height,
|
||||
);
|
||||
|
@ -754,13 +754,13 @@ fn long_fork_test_case_4(s: &Vec<servers::Server>) {
|
|||
|
||||
// Check server s1 can sync to s5 with a new txhashset download.
|
||||
let mut total_wait = 0;
|
||||
while s[1].head().height < s5_header.height {
|
||||
while s[1].head().unwrap().height < s5_header.height {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
total_wait += 1;
|
||||
if total_wait >= 120 {
|
||||
println!(
|
||||
"test case 4: test fail on timeout! s1 height: {}, s5 height: {}",
|
||||
s[1].head().height,
|
||||
s[1].head().unwrap().height,
|
||||
s5_header.height,
|
||||
);
|
||||
exit(1);
|
||||
|
@ -772,7 +772,7 @@ fn long_fork_test_case_4(s: &Vec<servers::Server>) {
|
|||
s1_tail_new.height, s1_tail.height
|
||||
);
|
||||
assert_ne!(s1_tail_new.height, s1_tail.height);
|
||||
assert_eq!(s[1].head().hash(), s5_header.hash());
|
||||
assert_eq!(s[1].head().unwrap().hash(), s5_header.hash());
|
||||
|
||||
s[1].pause();
|
||||
s[5].pause();
|
||||
|
@ -792,7 +792,7 @@ fn long_fork_test_case_5(s: &Vec<servers::Server>) {
|
|||
let s1_header = s[1].chain.head().unwrap();
|
||||
let s1_tail = s[1].chain.tail().unwrap();
|
||||
println!(
|
||||
"test case 5: s1 start syncing with s5. s1.head().height: {}, s1.tail().height: {}, s5.head().height: {}",
|
||||
"test case 5: s1 start syncing with s5. s1.head().unwrap().height: {}, s1.tail().height: {}, s5.head().height: {}",
|
||||
s1_header.height, s1_tail.height,
|
||||
s5_header.height,
|
||||
);
|
||||
|
@ -801,13 +801,13 @@ fn long_fork_test_case_5(s: &Vec<servers::Server>) {
|
|||
|
||||
// Check server s1 can sync to s5 without a txhashset download (normal body sync)
|
||||
let mut total_wait = 0;
|
||||
while s[1].head().height < s5_header.height {
|
||||
while s[1].head().unwrap().height < s5_header.height {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
total_wait += 1;
|
||||
if total_wait >= 120 {
|
||||
println!(
|
||||
"test case 5: test fail on timeout! s1 height: {}, s5 height: {}",
|
||||
s[1].head().height,
|
||||
s[1].head().unwrap().height,
|
||||
s5_header.height,
|
||||
);
|
||||
exit(1);
|
||||
|
@ -819,7 +819,7 @@ fn long_fork_test_case_5(s: &Vec<servers::Server>) {
|
|||
s1_tail_new.height, s1_tail.height
|
||||
);
|
||||
assert_eq!(s1_tail_new.height, s1_tail.height);
|
||||
assert_eq!(s[1].head().hash(), s5_header.hash());
|
||||
assert_eq!(s[1].head().unwrap().hash(), s5_header.hash());
|
||||
|
||||
s[1].pause();
|
||||
s[5].pause();
|
||||
|
@ -840,7 +840,7 @@ fn long_fork_test_case_6(s: &Vec<servers::Server>) {
|
|||
let s1_header = s[1].chain.head().unwrap();
|
||||
let s1_tail = s[1].chain.tail().unwrap();
|
||||
println!(
|
||||
"test case 6: s1 start syncing with s5. s1.head().height: {}, s1.tail().height: {}, s5.head().height: {}",
|
||||
"test case 6: s1 start syncing with s5. s1.head().unwrap().height: {}, s1.tail().height: {}, s5.head().height: {}",
|
||||
s1_header.height, s1_tail.height,
|
||||
s5_header.height,
|
||||
);
|
||||
|
@ -849,13 +849,13 @@ fn long_fork_test_case_6(s: &Vec<servers::Server>) {
|
|||
|
||||
// Check server s1 can sync to s5 without a txhashset download (normal body sync)
|
||||
let mut total_wait = 0;
|
||||
while s[1].head().height < s5_header.height {
|
||||
while s[1].head().unwrap().height < s5_header.height {
|
||||
thread::sleep(time::Duration::from_millis(1_000));
|
||||
total_wait += 1;
|
||||
if total_wait >= 120 {
|
||||
println!(
|
||||
"test case 6: test fail on timeout! s1 height: {}, s5 height: {}",
|
||||
s[1].head().height,
|
||||
s[1].head().unwrap().height,
|
||||
s5_header.height,
|
||||
);
|
||||
exit(1);
|
||||
|
@ -867,7 +867,7 @@ fn long_fork_test_case_6(s: &Vec<servers::Server>) {
|
|||
s1_tail_new.height, s1_tail.height
|
||||
);
|
||||
assert_eq!(s1_tail_new.height, s1_tail.height);
|
||||
assert_eq!(s[1].head().hash(), s5_header.hash());
|
||||
assert_eq!(s[1].head().unwrap().hash(), s5_header.hash());
|
||||
|
||||
s[1].pause();
|
||||
s[5].pause();
|
||||
|
|
Loading…
Add table
Reference in a new issue