better error messages + some cleanup ()

* distinguish select (among futures) from select coins. Regex search in project for select\b shows we hardly use select, but maybe could use it to add timeouts more cleanly. ("Want to add a timeout to any future? Just do a select of that future and a timeout future!" from https://aturon.github.io/blog/2016/08/11/futures/)

* remove a trailing space

* FAQ.md - fix typo

* wallet: display problematic tx

* update FAQ build troubleshooting to cover 

* stdout_log_level = Info
file_log_level = Debug

* sync: show total diff @ height when syncronization is completed

* better wallet send dest format error

* move INFO "Client conn ... lost" and "Connected to peer" down to Debug

* move some level=Info to Debug, add 1000-block outputs
This commit is contained in:
Simon B 2017-12-18 14:17:11 +01:00 committed by AntiochP
parent 3b5e6d3e1f
commit 99186e90f0
10 changed files with 30 additions and 18 deletions

View file

@ -369,12 +369,16 @@ fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error>
.map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?;
}
ctx.head = tip.clone();
info!(
debug!(
LOGGER,
"Updated head to {} at {}.",
b.hash(),
b.header.height
);
if b.header.height % 500 == 0 {
info!(LOGGER, "pipe: chain head reached {} @ {} [{}]",
b.header.height, b.header.difficulty, b.hash());
}
Ok(Some(tip))
} else {
Ok(None)
@ -388,12 +392,15 @@ fn update_sync_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<T
.save_sync_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save sync head".to_owned()))?;
ctx.head = tip.clone();
info!(
debug!(
LOGGER,
"pipe: updated sync head to {} at {}.",
bh.hash(),
bh.height,
);
if bh.height % 1000 == 0 {
info!(LOGGER, "pipe: sync head reached {} [{}]", bh.height, bh.hash());
}
Ok(Some(tip))
}
@ -405,7 +412,7 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
.save_header_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
ctx.head = tip.clone();
info!(
debug!(
LOGGER,
"pipe: updated header head to {} at {}.",
bh.hash(),

View file

@ -31,10 +31,15 @@ Very welcome any solutions to give grin a "watchdog" solution that can restart
grin in case of trouble.
## Build error: Could not compile `tokio-retry`.
You need the latest rust. rustup, or [reinstall rust as described](build.md)
You might want to remove any previous rust installations to avoid conflicts.
Use `rustup` to [reinstall rust and cargo as described](build.md).
NOTE: If you install rust or cargo with your package manager (most Linuxes
anno 2017) you'll get too old versions. On Debian, you might have to manually
compile cmake or get it from non-detault repositories.
## Build error: `failed to select a version for 'serde_json'`
Run `cargo update? to fix this
Run `cargo update` to fix this
# Short term plans
## Transaction types

View file

@ -63,13 +63,13 @@ port = 13414
log_to_stdout = true
# Log level for stdout: Critical, Error, Warning, Info, Debug, Trace
stdout_log_level = "Warning"
stdout_log_level = "Info"
# Whether to log to a file
log_to_file = true
# Log level for file: Critical, Error, Warning, Info, Debug, Trace
file_log_level = "Trace"
file_log_level = "Debug"
# Log file path
log_file_path = "grin.log"
@ -95,7 +95,7 @@ use_cuckoo_miner = true
#Whether to use async mode for cuckoo miner, if the plugin supports it.
#this allows for many searches to be run in parallel, e.g. if the system
#has multiple GPUs, or if you want to mine using multiple plugins
#has multiple GPUs, or if you want to mine using multiple plugins
cuckoo_miner_async_mode = false

View file

@ -208,7 +208,7 @@ pub fn needs_syncing(
if let Some(peer) = peer {
if let Ok(peer) = peer.try_read() {
if peer.info.total_difficulty <= local_diff {
info!(LOGGER, "sync: caught up on most worked chain, disabling sync");
info!(LOGGER, "synchronize stopped, at {:?} @ {:?}", local_diff, chain.head().unwrap().height);
currently_syncing.store(false, Ordering::Relaxed);
}
}

View file

@ -90,7 +90,7 @@ fn simulate_seeding() {
pool_config.base_name = String::from(test_name_dir);
pool_config.run_length_in_seconds = 30;
// have to select different ports because of tests being run in parallel
// have to use different ports because of tests being run in parallel
pool_config.base_api_port = 30020;
pool_config.base_p2p_port = 31020;
pool_config.base_wallet_port = 32020;
@ -142,7 +142,7 @@ fn simulate_parallel_mining() {
let mut pool_config = LocalServerContainerPoolConfig::default();
pool_config.base_name = String::from(test_name_dir);
pool_config.run_length_in_seconds = 60;
// have to select different ports because of tests being run in parallel
// have to use different ports because of tests being run in parallel
pool_config.base_api_port = 30040;
pool_config.base_p2p_port = 31040;
pool_config.base_wallet_port = 32040;

View file

@ -106,8 +106,7 @@ impl Handshake {
total_difficulty: shake.total_difficulty,
};
info!(LOGGER, "Connected to peer {:?}", peer_info);
debug!(LOGGER, "Connected to peer {:?}", peer_info);
// when more than one protocol version is supported, choosing should go here
Ok((conn, ProtocolV1::new(), peer_info))
}

View file

@ -111,7 +111,7 @@ impl Peer {
}
Err(e) => {
*state = State::Disconnected;
info!(LOGGER, "Client {} connection lost: {:?}", addr, e);
debug!(LOGGER, "Client {} connection lost: {:?}", addr, e);
Ok(())
}
}

View file

@ -79,6 +79,7 @@ impl Handler for WalletReceiver {
if let Ok(Some(partial_tx)) = struct_body {
receive_json_tx(&self.config, &self.keychain, &partial_tx)
.map_err(|e| {
error!(LOGGER, "Problematic partial tx, looks like this: {:?}", partial_tx);
api::Error::Internal(
format!("Error processing partial transaction: {:?}", e),
)})

View file

@ -93,7 +93,7 @@ pub fn issue_send_tx(
}
}
} else {
panic!("dest not in expected format: {}", dest);
panic!("dest formatted as {} but send -d expected stdout or http://IP:port", dest);
}
Ok(())
}
@ -115,7 +115,7 @@ fn build_send_tx(
// select some spendable coins from the wallet
let coins = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
wallet_data.select(
wallet_data.select_coins(
key_id.clone(),
amount,
current_height,
@ -155,7 +155,7 @@ pub fn issue_burn_tx(
// select some spendable coins from the wallet
let coins = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
wallet_data.select(
wallet_data.select_coins(
key_id.clone(),
amount,
current_height,

View file

@ -516,7 +516,7 @@ impl WalletData {
/// Default strategy is to spend the maximum number of outputs (up to max_outputs).
/// Alternative strategy is to spend smallest outputs first but only as many as necessary.
/// When we introduce additional strategies we should pass something other than a bool in.
pub fn select(
pub fn select_coins(
&self,
root_key_id: keychain::Identifier,
amount: u64,