mirror of
https://github.com/mimblewimble/grin.git
synced 2025-04-30 14:21:14 +03:00
[1.1.0 -> Master] Merge 1.1.0 with master changes back into master
This commit is contained in:
commit
b403ccbeb1
144 changed files with 5223 additions and 20297 deletions
.cargo
.travis.ymlCargo.lockCargo.tomlapi
chain
config
core
doc/wallet
etc/gen_gen
keychain
p2p
pool
servers
src
store
4
.cargo/config
Normal file
4
.cargo/config
Normal file
|
@ -0,0 +1,4 @@
|
|||
[target.x86_64-pc-windows-msvc]
|
||||
rustflags = ["-Ctarget-feature=+crt-static"]
|
||||
[target.i686-pc-windows-msvc]
|
||||
rustflags = ["-Ctarget-feature=+crt-static"]
|
|
@ -58,7 +58,7 @@ matrix:
|
|||
- os: linux
|
||||
env: CI_JOB="test" CI_JOB_ARGS="pool p2p src"
|
||||
- os: linux
|
||||
env: CI_JOB="test" CI_JOB_ARGS="keychain wallet"
|
||||
env: CI_JOB="test" CI_JOB_ARGS="keychain"
|
||||
- os: linux
|
||||
env: CI_JOB="test" CI_JOB_ARGS="api util store"
|
||||
- os: linux
|
||||
|
@ -81,7 +81,7 @@ deploy:
|
|||
api_key:
|
||||
secure: PBTFcoUmiQITkDdtFzrBlNR/5OgYHTCw+xVWGYu205xwTlj/ARBgw7DNt8dIdptLx+jOM2V5SbJqSFxs/CJ2ZcOHQZ6ubwpAJlRfuk3xDAi5JmuHYfcY+4SQ9l/0MgHnGfuml093xP7vTIYm2Vwwgdq8fd3jdWmvwgk9zgaGXB4UIXQA0yIs3EzxZpqiLg629Ouv7edMfyffwlG+rgQ1koe6sqeMCxIs0N3p97GCx19kNe0TV4dC7XAN74HreMdHmwxPKAK4xG/jtA1Snm0pMQ50Z0Kizt+0yrGOPMLnWwO9sS38iosBn3Vh1R8HKle2xBGflTtT/LG9lHdQZ5NF572q6681x6t7str4OjJ5bboy1PtNLFxG7RJCVIpp9gbouzdxIaJWRTxIdlk8UNQMrD8ieiNE6V1vZtbHGtJHRSJN1vO/XxsLlQDCyakLhG/nmSKXgiT9wIsu+zj/3oDe+LBt5QetEGYGBrCwUewjaQ7EP1rsT7alQrHTMad5DPjYftJuvfR+yBtz1qbzQwZVJpQC1KY1c476mXPQsaywuUrj56hH92p7P3vl6aMN2OPJZP+zENOVSURHc56KeTsDS55+KKzcRjCMA2L0LR1hP33+V5kavMHgCRrWIkxAkZ4eRqnermalzp8vlzL6EEoGm0VFLzv4mJmzrY1mC1LyCHo=
|
||||
file_glob: true
|
||||
file: target/release/grin-*.tgz*
|
||||
file: target/release/grin-*.*
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: mimblewimble/grin
|
||||
|
|
1599
Cargo.lock
generated
1599
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
32
Cargo.toml
32
Cargo.toml
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -12,7 +12,7 @@ build = "src/build/build.rs"
|
|||
edition = "2018"
|
||||
|
||||
[workspace]
|
||||
members = ["api", "chain", "config", "core", "keychain", "p2p", "servers", "store", "util", "pool", "wallet"]
|
||||
members = ["api", "chain", "config", "core", "keychain", "p2p", "servers", "store", "util", "pool"]
|
||||
exclude = ["etc/gen_gen"]
|
||||
|
||||
[[bin]]
|
||||
|
@ -23,40 +23,34 @@ path = "src/bin/grin.rs"
|
|||
blake2-rfc = "0.2"
|
||||
chrono = "0.4.4"
|
||||
clap = { version = "2.31", features = ["yaml"] }
|
||||
rpassword = "2.0.0"
|
||||
ctrlc = { version = "3.1", features = ["termination"] }
|
||||
humansize = "1.1.0"
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
log = "0.4"
|
||||
term = "0.5"
|
||||
linefeed = "0.5"
|
||||
failure = "0.1"
|
||||
failure_derive = "0.1"
|
||||
|
||||
grin_api = { path = "./api", version = "1.0.3" }
|
||||
grin_config = { path = "./config", version = "1.0.3" }
|
||||
grin_core = { path = "./core", version = "1.0.3" }
|
||||
grin_keychain = { path = "./keychain", version = "1.0.3" }
|
||||
grin_p2p = { path = "./p2p", version = "1.0.3" }
|
||||
grin_servers = { path = "./servers", version = "1.0.3" }
|
||||
grin_util = { path = "./util", version = "1.0.3" }
|
||||
grin_wallet = { path = "./wallet", version = "1.0.3" }
|
||||
grin_api = { path = "./api", version = "1.1.0-beta.2" }
|
||||
grin_config = { path = "./config", version = "1.1.0-beta.2" }
|
||||
grin_core = { path = "./core", version = "1.1.0-beta.2" }
|
||||
grin_keychain = { path = "./keychain", version = "1.1.0-beta.2" }
|
||||
grin_p2p = { path = "./p2p", version = "1.1.0-beta.2" }
|
||||
grin_servers = { path = "./servers", version = "1.1.0-beta.2" }
|
||||
grin_util = { path = "./util", version = "1.1.0-beta.2" }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
cursive = { version = "0.10.0", default-features = false, features = ["pancurses-backend"] }
|
||||
cursive = { version = "0.11.0", default-features = false, features = ["pancurses-backend"] }
|
||||
[target.'cfg(windows)'.dependencies.pancurses]
|
||||
version = "0.16.0"
|
||||
features = ["win32"]
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
cursive = "0.10.0"
|
||||
cursive = "0.11.0"
|
||||
|
||||
[build-dependencies]
|
||||
built = "0.3"
|
||||
reqwest = "0.9"
|
||||
flate2 = "1.0"
|
||||
tar = "0.4"
|
||||
|
||||
[dev-dependencies]
|
||||
grin_chain = { path = "./chain", version = "1.0.3" }
|
||||
grin_store = { path = "./store", version = "1.0.3" }
|
||||
grin_chain = { path = "./chain", version = "1.1.0-beta.2" }
|
||||
grin_store = { path = "./store", version = "1.1.0-beta.2" }
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin_api"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "APIs for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -30,9 +30,9 @@ futures = "0.1.21"
|
|||
rustls = "0.13"
|
||||
url = "1.7.0"
|
||||
|
||||
grin_core = { path = "../core", version = "1.0.3" }
|
||||
grin_chain = { path = "../chain", version = "1.0.3" }
|
||||
grin_p2p = { path = "../p2p", version = "1.0.3" }
|
||||
grin_pool = { path = "../pool", version = "1.0.3" }
|
||||
grin_store = { path = "../store", version = "1.0.3" }
|
||||
grin_util = { path = "../util", version = "1.0.3" }
|
||||
grin_core = { path = "../core", version = "1.1.0-beta.2" }
|
||||
grin_chain = { path = "../chain", version = "1.1.0-beta.2" }
|
||||
grin_p2p = { path = "../p2p", version = "1.1.0-beta.2" }
|
||||
grin_pool = { path = "../pool", version = "1.1.0-beta.2" }
|
||||
grin_store = { path = "../store", version = "1.1.0-beta.2" }
|
||||
grin_util = { path = "../util", version = "1.1.0-beta.2" }
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin_chain"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -12,7 +12,6 @@ edition = "2018"
|
|||
[dependencies]
|
||||
bitflags = "1"
|
||||
byteorder = "1"
|
||||
lmdb-zero = "0.4.4"
|
||||
failure = "0.1"
|
||||
failure_derive = "0.1"
|
||||
croaring = "0.3"
|
||||
|
@ -24,10 +23,10 @@ lru-cache = "0.1"
|
|||
lazy_static = "1"
|
||||
regex = "1"
|
||||
|
||||
grin_core = { path = "../core", version = "1.0.3" }
|
||||
grin_keychain = { path = "../keychain", version = "1.0.3" }
|
||||
grin_store = { path = "../store", version = "1.0.3" }
|
||||
grin_util = { path = "../util", version = "1.0.3" }
|
||||
grin_core = { path = "../core", version = "1.1.0-beta.2" }
|
||||
grin_keychain = { path = "../keychain", version = "1.1.0-beta.2" }
|
||||
grin_store = { path = "../store", version = "1.1.0-beta.2" }
|
||||
grin_util = { path = "../util", version = "1.1.0-beta.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = "0.5"
|
||||
|
|
|
@ -24,7 +24,6 @@ use crate::core::core::{
|
|||
use crate::core::global;
|
||||
use crate::core::pow;
|
||||
use crate::error::{Error, ErrorKind};
|
||||
use crate::lmdb;
|
||||
use crate::pipe;
|
||||
use crate::store;
|
||||
use crate::txhashset;
|
||||
|
@ -162,7 +161,6 @@ impl Chain {
|
|||
/// based on the genesis block if necessary.
|
||||
pub fn init(
|
||||
db_root: String,
|
||||
db_env: Arc<lmdb::Environment>,
|
||||
adapter: Arc<dyn ChainAdapter + Send + Sync>,
|
||||
genesis: Block,
|
||||
pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>,
|
||||
|
@ -178,7 +176,7 @@ impl Chain {
|
|||
return Err(ErrorKind::Stopped.into());
|
||||
}
|
||||
|
||||
let store = Arc::new(store::ChainStore::new(db_env)?);
|
||||
let store = Arc::new(store::ChainStore::new(&db_root)?);
|
||||
|
||||
// open the txhashset, creating a new one if necessary
|
||||
let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone(), None)?;
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
|
||||
use lmdb_zero as lmdb;
|
||||
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
#[macro_use]
|
||||
|
|
|
@ -18,7 +18,6 @@ use crate::core::consensus::HeaderInfo;
|
|||
use crate::core::core::hash::{Hash, Hashed};
|
||||
use crate::core::core::{Block, BlockHeader, BlockSums};
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::lmdb;
|
||||
use crate::types::Tip;
|
||||
use crate::util::secp::pedersen::Commitment;
|
||||
use croaring::Bitmap;
|
||||
|
@ -45,8 +44,8 @@ pub struct ChainStore {
|
|||
|
||||
impl ChainStore {
|
||||
/// Create new chain store
|
||||
pub fn new(db_env: Arc<lmdb::Environment>) -> Result<ChainStore, Error> {
|
||||
let db = store::Store::open(db_env, STORE_SUBPATH);
|
||||
pub fn new(db_root: &str) -> Result<ChainStore, Error> {
|
||||
let db = store::Store::new(db_root, None, Some(STORE_SUBPATH.clone()), None)?;
|
||||
Ok(ChainStore { db })
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,6 +62,7 @@ impl<T: PMMRable> PMMRHandle<T> {
|
|||
sub_dir: &str,
|
||||
file_name: &str,
|
||||
prunable: bool,
|
||||
fixed_size: bool,
|
||||
header: Option<&BlockHeader>,
|
||||
) -> Result<PMMRHandle<T>, Error> {
|
||||
let path = Path::new(root_dir).join(sub_dir).join(file_name);
|
||||
|
@ -69,7 +70,7 @@ impl<T: PMMRable> PMMRHandle<T> {
|
|||
let path_str = path.to_str().ok_or(Error::from(ErrorKind::Other(
|
||||
"invalid file path".to_owned(),
|
||||
)))?;
|
||||
let backend = PMMRBackend::new(path_str.to_string(), prunable, header)?;
|
||||
let backend = PMMRBackend::new(path_str.to_string(), prunable, fixed_size, header)?;
|
||||
let last_pos = backend.unpruned_size();
|
||||
Ok(PMMRHandle { backend, last_pos })
|
||||
}
|
||||
|
@ -121,6 +122,7 @@ impl TxHashSet {
|
|||
HEADERHASHSET_SUBDIR,
|
||||
HEADER_HEAD_SUBDIR,
|
||||
false,
|
||||
true,
|
||||
None,
|
||||
)?,
|
||||
sync_pmmr_h: PMMRHandle::new(
|
||||
|
@ -128,6 +130,7 @@ impl TxHashSet {
|
|||
HEADERHASHSET_SUBDIR,
|
||||
SYNC_HEAD_SUBDIR,
|
||||
false,
|
||||
true,
|
||||
None,
|
||||
)?,
|
||||
output_pmmr_h: PMMRHandle::new(
|
||||
|
@ -135,6 +138,7 @@ impl TxHashSet {
|
|||
TXHASHSET_SUBDIR,
|
||||
OUTPUT_SUBDIR,
|
||||
true,
|
||||
true,
|
||||
header,
|
||||
)?,
|
||||
rproof_pmmr_h: PMMRHandle::new(
|
||||
|
@ -142,13 +146,15 @@ impl TxHashSet {
|
|||
TXHASHSET_SUBDIR,
|
||||
RANGE_PROOF_SUBDIR,
|
||||
true,
|
||||
true,
|
||||
header,
|
||||
)?,
|
||||
kernel_pmmr_h: PMMRHandle::new(
|
||||
&root_dir,
|
||||
TXHASHSET_SUBDIR,
|
||||
KERNEL_SUBDIR,
|
||||
false,
|
||||
false, // not prunable
|
||||
false, // variable size kernel data file
|
||||
None,
|
||||
)?,
|
||||
commit_index,
|
||||
|
@ -696,9 +702,7 @@ impl<'a> HeaderExtension<'a> {
|
|||
/// including the genesis block header.
|
||||
pub fn truncate(&mut self) -> Result<(), Error> {
|
||||
debug!("Truncating header extension.");
|
||||
self.pmmr
|
||||
.rewind(0, &Bitmap::create())
|
||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||
self.pmmr.truncate().map_err(&ErrorKind::TxHashSetErr)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ use chrono::Duration;
|
|||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_store as store;
|
||||
use grin_util as util;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
@ -41,10 +40,8 @@ fn setup(dir_name: &str) -> Chain {
|
|||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let genesis_block = pow::mine_genesis_block().unwrap();
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
let db_env = Arc::new(store::new_env(dir_name.to_string()));
|
||||
chain::Chain::init(
|
||||
dir_name.to_string(),
|
||||
db_env,
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis_block,
|
||||
pow::verify_size,
|
||||
|
@ -57,10 +54,8 @@ fn setup(dir_name: &str) -> Chain {
|
|||
|
||||
fn reload_chain(dir_name: &str) -> Chain {
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
let db_env = Arc::new(store::new_env(dir_name.to_string()));
|
||||
chain::Chain::init(
|
||||
dir_name.to_string(),
|
||||
db_env,
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis::genesis_dev(),
|
||||
pow::verify_size,
|
||||
|
@ -83,7 +78,7 @@ fn data_files() {
|
|||
let prev = chain.head_header().unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
|
||||
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &pk, 0, false).unwrap();
|
||||
let mut b =
|
||||
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
|
||||
.unwrap();
|
||||
|
@ -161,7 +156,7 @@ fn _prepare_block_nosum(
|
|||
let key_id = ExtKeychainPath::new(1, diff as u32, 0, 0, 0).to_identifier();
|
||||
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward = libtx::reward::output(kc, &key_id, fees).unwrap();
|
||||
let reward = libtx::reward::output(kc, &key_id, fees, false).unwrap();
|
||||
let mut b = match core::core::Block::new(
|
||||
prev,
|
||||
txs.into_iter().cloned().collect(),
|
||||
|
|
|
@ -28,7 +28,6 @@ use chrono::Duration;
|
|||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_store as store;
|
||||
use grin_util as util;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
@ -41,10 +40,8 @@ fn setup(dir_name: &str, genesis: Block) -> Chain {
|
|||
util::init_test_logger();
|
||||
clean_output_dir(dir_name);
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
let db_env = Arc::new(store::new_env(dir_name.to_string()));
|
||||
chain::Chain::init(
|
||||
dir_name.to_string(),
|
||||
db_env,
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis,
|
||||
pow::verify_size,
|
||||
|
@ -74,7 +71,7 @@ fn mine_genesis_reward_chain() {
|
|||
let mut genesis = genesis::genesis_dev();
|
||||
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
||||
let key_id = keychain::ExtKeychain::derive_key_id(0, 1, 0, 0, 0);
|
||||
let reward = reward::output(&keychain, &key_id, 0).unwrap();
|
||||
let reward = reward::output(&keychain, &key_id, 0, false).unwrap();
|
||||
genesis = genesis.with_reward(reward.0, reward.1);
|
||||
|
||||
let tmp_chain_dir = ".grin.tmp";
|
||||
|
@ -111,7 +108,7 @@ where
|
|||
let prev = chain.head_header().unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
|
||||
let reward = libtx::reward::output(keychain, &pk, 0).unwrap();
|
||||
let reward = libtx::reward::output(keychain, &pk, 0, false).unwrap();
|
||||
let mut b =
|
||||
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
|
||||
.unwrap();
|
||||
|
@ -436,7 +433,7 @@ fn output_header_mappings() {
|
|||
let prev = chain.head_header().unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
|
||||
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &pk, 0, false).unwrap();
|
||||
reward_outputs.push(reward.0.clone());
|
||||
let mut b =
|
||||
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
|
||||
|
@ -539,7 +536,7 @@ where
|
|||
let key_id = ExtKeychainPath::new(1, diff as u32, 0, 0, 0).to_identifier();
|
||||
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward = libtx::reward::output(kc, &key_id, fees).unwrap();
|
||||
let reward = libtx::reward::output(kc, &key_id, fees, false).unwrap();
|
||||
let mut b = match core::core::Block::new(
|
||||
prev,
|
||||
txs.into_iter().cloned().collect(),
|
||||
|
@ -560,11 +557,9 @@ where
|
|||
fn actual_diff_iter_output() {
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let genesis_block = pow::mine_genesis_block().unwrap();
|
||||
let db_env = Arc::new(store::new_env(".grin".to_string()));
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
let chain = chain::Chain::init(
|
||||
"../.grin".to_string(),
|
||||
db_env,
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis_block,
|
||||
pow::verify_size,
|
||||
|
|
|
@ -23,7 +23,6 @@ use env_logger;
|
|||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_store as store;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
||||
|
@ -54,51 +53,47 @@ fn test_various_store_indices() {
|
|||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let key_id = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||
|
||||
let chain_store = Arc::new(chain::store::ChainStore::new(chain_dir).unwrap());
|
||||
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let genesis = pow::mine_genesis_block().unwrap();
|
||||
|
||||
setup_chain(&genesis, chain_store.clone()).unwrap();
|
||||
|
||||
let reward = libtx::reward::output(&keychain, &key_id, 0, false).unwrap();
|
||||
let block = Block::new(&genesis.header, vec![], Difficulty::min(), reward).unwrap();
|
||||
let block_hash = block.hash();
|
||||
|
||||
{
|
||||
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
|
||||
let batch = chain_store.batch().unwrap();
|
||||
batch.save_block_header(&block.header).unwrap();
|
||||
batch.save_block(&block).unwrap();
|
||||
batch.commit().unwrap();
|
||||
}
|
||||
|
||||
let chain_store = Arc::new(chain::store::ChainStore::new(db_env).unwrap());
|
||||
let block_header = chain_store.get_block_header(&block_hash).unwrap();
|
||||
assert_eq!(block_header.hash(), block_hash);
|
||||
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let genesis = pow::mine_genesis_block().unwrap();
|
||||
// Test we can retrive the block from the db and that we can safely delete the
|
||||
// block from the db even though the block_sums are missing.
|
||||
{
|
||||
// Block exists in the db.
|
||||
assert!(chain_store.get_block(&block_hash).is_ok());
|
||||
|
||||
setup_chain(&genesis, chain_store.clone()).unwrap();
|
||||
|
||||
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
|
||||
let block = Block::new(&genesis.header, vec![], Difficulty::min(), reward).unwrap();
|
||||
let block_hash = block.hash();
|
||||
// Block sums do not exist (we never set them up).
|
||||
assert!(chain_store.get_block_sums(&block_hash).is_err());
|
||||
|
||||
{
|
||||
// Start a new batch and delete the block.
|
||||
let batch = chain_store.batch().unwrap();
|
||||
batch.save_block_header(&block.header).unwrap();
|
||||
batch.save_block(&block).unwrap();
|
||||
batch.commit().unwrap();
|
||||
assert!(batch.delete_block(&block_hash).is_ok());
|
||||
|
||||
// Block is deleted within this batch.
|
||||
assert!(batch.get_block(&block_hash).is_err());
|
||||
}
|
||||
|
||||
let block_header = chain_store.get_block_header(&block_hash).unwrap();
|
||||
assert_eq!(block_header.hash(), block_hash);
|
||||
|
||||
// Test we can retrive the block from the db and that we can safely delete the
|
||||
// block from the db even though the block_sums are missing.
|
||||
{
|
||||
// Block exists in the db.
|
||||
assert!(chain_store.get_block(&block_hash).is_ok());
|
||||
|
||||
// Block sums do not exist (we never set them up).
|
||||
assert!(chain_store.get_block_sums(&block_hash).is_err());
|
||||
|
||||
{
|
||||
// Start a new batch and delete the block.
|
||||
let batch = chain_store.batch().unwrap();
|
||||
assert!(batch.delete_block(&block_hash).is_ok());
|
||||
|
||||
// Block is deleted within this batch.
|
||||
assert!(batch.get_block(&block_hash).is_err());
|
||||
}
|
||||
|
||||
// Check the batch did not commit any changes to the store .
|
||||
assert!(chain_store.get_block(&block_hash).is_ok());
|
||||
}
|
||||
// Check the batch did not commit any changes to the store .
|
||||
assert!(chain_store.get_block(&block_hash).is_ok());
|
||||
}
|
||||
// Cleanup chain directory
|
||||
clean_output_dir(chain_dir);
|
||||
|
|
|
@ -26,7 +26,6 @@ use env_logger;
|
|||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_store as store;
|
||||
use grin_util as util;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
@ -47,10 +46,8 @@ fn test_coinbase_maturity() {
|
|||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
|
||||
{
|
||||
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
|
||||
let chain = chain::Chain::init(
|
||||
chain_dir.to_string(),
|
||||
db_env,
|
||||
".grin".to_string(),
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis_block,
|
||||
pow::verify_size,
|
||||
|
@ -69,7 +66,7 @@ fn test_coinbase_maturity() {
|
|||
let key_id4 = ExtKeychainPath::new(1, 4, 0, 0, 0).to_identifier();
|
||||
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let reward = libtx::reward::output(&keychain, &key_id1, 0).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &key_id1, 0, false).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
@ -113,7 +110,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let txs = vec![coinbase_txn.clone()];
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward = libtx::reward::output(&keychain, &key_id3, fees).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &key_id3, fees, false).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
|
@ -145,12 +142,13 @@ fn test_coinbase_maturity() {
|
|||
let prev = chain.head_header().unwrap();
|
||||
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let pk = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||
|
||||
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let reward = libtx::reward::output(&keychain, &key_id1, 0, false).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
|
@ -164,39 +162,121 @@ fn test_coinbase_maturity() {
|
|||
)
|
||||
.unwrap();
|
||||
|
||||
chain.process_block(block, chain::Options::MINE).unwrap();
|
||||
assert_eq!(block.outputs().len(), 1);
|
||||
let coinbase_output = block.outputs()[0];
|
||||
assert!(coinbase_output.is_coinbase());
|
||||
|
||||
chain
|
||||
.process_block(block.clone(), chain::Options::MINE)
|
||||
.unwrap();
|
||||
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
let amount = consensus::REWARD;
|
||||
|
||||
let lock_height = 1 + global::coinbase_maturity();
|
||||
assert_eq!(lock_height, 4);
|
||||
|
||||
// here we build a tx that attempts to spend the earlier coinbase output
|
||||
// this is not a valid tx as the coinbase output cannot be spent yet
|
||||
let coinbase_txn = build::transaction(
|
||||
vec![
|
||||
build::coinbase_input(amount, key_id1.clone()),
|
||||
build::output(amount - 2, key_id2.clone()),
|
||||
build::with_fee(2),
|
||||
],
|
||||
&keychain,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let txs = vec![coinbase_txn.clone()];
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward = libtx::reward::output(&keychain, &key_id3, fees, false).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
chain.set_txhashset_roots(&mut block).unwrap();
|
||||
|
||||
// Confirm the tx attempting to spend the coinbase output
|
||||
// is not valid at the current block height given the current chain state.
|
||||
match chain.verify_coinbase_maturity(&coinbase_txn) {
|
||||
Ok(_) => {}
|
||||
Err(e) => match e.kind() {
|
||||
ErrorKind::ImmatureCoinbase => {}
|
||||
_ => panic!("Expected transaction error with immature coinbase."),
|
||||
},
|
||||
}
|
||||
|
||||
pow::pow_size(
|
||||
&mut block.header,
|
||||
next_header_info.difficulty,
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// mine enough blocks to increase the height sufficiently for
|
||||
// coinbase to reach maturity and be spendable in the next block
|
||||
for _ in 0..3 {
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let pk = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||
|
||||
let reward = libtx::reward::output(&keychain, &pk, 0, false).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
|
||||
let next_header_info =
|
||||
consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
chain.set_txhashset_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut block.header,
|
||||
next_header_info.difficulty,
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
chain.process_block(block, chain::Options::MINE).unwrap();
|
||||
}
|
||||
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
// Confirm the tx spending the coinbase output is now valid.
|
||||
// The coinbase output has matured sufficiently based on current chain state.
|
||||
chain.verify_coinbase_maturity(&coinbase_txn).unwrap();
|
||||
|
||||
let txs = vec![coinbase_txn];
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let reward = libtx::reward::output(&keychain, &key_id4, fees, false).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
||||
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
chain.set_txhashset_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut block.header,
|
||||
next_header_info.difficulty,
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let result = chain.process_block(block, chain::Options::MINE);
|
||||
match result {
|
||||
Ok(_) => (),
|
||||
Err(_) => panic!("we did not expect an error here"),
|
||||
};
|
||||
}
|
||||
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
// Confirm the tx spending the coinbase output is now valid.
|
||||
// The coinbase output has matured sufficiently based on current chain state.
|
||||
chain.verify_coinbase_maturity(&coinbase_txn).unwrap();
|
||||
|
||||
let txs = vec![coinbase_txn];
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let reward = libtx::reward::output(&keychain, &key_id4, fees).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
||||
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
chain.set_txhashset_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut block.header,
|
||||
next_header_info.difficulty,
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let result = chain.process_block(block, chain::Options::MINE);
|
||||
match result {
|
||||
Ok(_) => (),
|
||||
Err(_) => panic!("we did not expect an error here"),
|
||||
};
|
||||
}
|
||||
// Cleanup chain directory
|
||||
clean_output_dir(chain_dir);
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
|
||||
use grin_store as store;
|
||||
use grin_util as util;
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
@ -42,8 +41,7 @@ fn test_unexpected_zip() {
|
|||
let db_root = format!(".grin_txhashset_zip");
|
||||
clean_output_dir(&db_root);
|
||||
{
|
||||
let db_env = Arc::new(store::new_env(db_root.clone()));
|
||||
let chain_store = ChainStore::new(db_env).unwrap();
|
||||
let chain_store = ChainStore::new(&db_root).unwrap();
|
||||
let store = Arc::new(chain_store);
|
||||
txhashset::TxHashSet::open(db_root.clone(), store.clone(), None).unwrap();
|
||||
// First check if everything works out of the box
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin_config"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "Configuration for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -16,11 +16,10 @@ serde_derive = "1"
|
|||
toml = "0.4"
|
||||
dirs = "1.0.3"
|
||||
|
||||
grin_core = { path = "../core", version = "1.0.3" }
|
||||
grin_servers = { path = "../servers", version = "1.0.3" }
|
||||
grin_p2p = { path = "../p2p", version = "1.0.3" }
|
||||
grin_util = { path = "../util", version = "1.0.3" }
|
||||
grin_wallet = { path = "../wallet", version = "1.0.3" }
|
||||
grin_core = { path = "../core", version = "1.1.0-beta.2" }
|
||||
grin_servers = { path = "../servers", version = "1.1.0-beta.2" }
|
||||
grin_p2p = { path = "../p2p", version = "1.1.0-beta.2" }
|
||||
grin_util = { path = "../util", version = "1.1.0-beta.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "0.5.1"
|
||||
|
|
|
@ -141,9 +141,17 @@ fn comments() -> HashMap<String, String> {
|
|||
);
|
||||
|
||||
retval.insert(
|
||||
"relay_secs".to_string(),
|
||||
"epoch_secs".to_string(),
|
||||
"
|
||||
#dandelion relay time (choose new relay peer every n secs)
|
||||
#dandelion epoch duration
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"aggregation_secs".to_string(),
|
||||
"
|
||||
#dandelion aggregation period in secs
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
|
@ -156,13 +164,6 @@ fn comments() -> HashMap<String, String> {
|
|||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"patience_secs".to_string(),
|
||||
"
|
||||
#run dandelion stem/fluff processing every n secs (stem tx aggregation in this window)
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
retval.insert(
|
||||
"stem_probability".to_string(),
|
||||
"
|
||||
|
@ -345,110 +346,6 @@ fn comments() -> HashMap<String, String> {
|
|||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"[wallet]".to_string(),
|
||||
"
|
||||
#########################################
|
||||
### WALLET CONFIGURATION ###
|
||||
#########################################
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"api_listen_interface".to_string(),
|
||||
"
|
||||
#host IP for wallet listener, change to \"0.0.0.0\" to receive grins
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"api_listen_port".to_string(),
|
||||
"
|
||||
#path of TLS certificate file, self-signed certificates are not supported
|
||||
#tls_certificate_file = \"\"
|
||||
#private key for the TLS certificate
|
||||
#tls_certificate_key = \"\"
|
||||
|
||||
#port for wallet listener
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"owner_api_listen_port".to_string(),
|
||||
"
|
||||
#port for wallet owner api
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"api_secret_path".to_string(),
|
||||
"
|
||||
#path of the secret token used by the API to authenticate the calls
|
||||
#comment it to disable basic auth
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
retval.insert(
|
||||
"check_node_api_http_addr".to_string(),
|
||||
"
|
||||
#where the wallet should find a running node
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
retval.insert(
|
||||
"node_api_secret_path".to_string(),
|
||||
"
|
||||
#location of the node api secret for basic auth on the Grin API
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
retval.insert(
|
||||
"owner_api_include_foreign".to_string(),
|
||||
"
|
||||
#include the foreign API endpoints on the same port as the owner
|
||||
#API. Useful for networking environments like AWS ECS that make
|
||||
#it difficult to access multiple ports on a single service.
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
retval.insert(
|
||||
"data_file_dir".to_string(),
|
||||
"
|
||||
#where to find wallet files (seed, data, etc)
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
retval.insert(
|
||||
"no_commit_cache".to_string(),
|
||||
"
|
||||
#If true, don't store calculated commits in the database
|
||||
#better privacy, but at a performance cost of having to
|
||||
#re-calculate commits every time they're used
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
retval.insert(
|
||||
"dark_background_color_scheme".to_string(),
|
||||
"
|
||||
#Whether to use the black background color scheme for command line
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
retval.insert(
|
||||
"keybase_notify_ttl".to_string(),
|
||||
"
|
||||
#The exploding lifetime for keybase notification on coins received.
|
||||
#Unit: Minute. Default value 1440 minutes for one day.
|
||||
#Refer to https://keybase.io/blog/keybase-exploding-messages for detail.
|
||||
#To disable this notification, set it as 0.
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"[logging]".to_string(),
|
||||
"
|
||||
|
|
|
@ -29,24 +29,16 @@ use crate::comments::insert_comments;
|
|||
use crate::core::global;
|
||||
use crate::p2p;
|
||||
use crate::servers::ServerConfig;
|
||||
use crate::types::{
|
||||
ConfigError, ConfigMembers, GlobalConfig, GlobalWalletConfig, GlobalWalletConfigMembers,
|
||||
};
|
||||
use crate::types::{ConfigError, ConfigMembers, GlobalConfig};
|
||||
use crate::util::LoggingConfig;
|
||||
use crate::wallet::WalletConfig;
|
||||
|
||||
/// The default file name to use when trying to derive
|
||||
/// the node config file location
|
||||
pub const SERVER_CONFIG_FILE_NAME: &'static str = "grin-server.toml";
|
||||
/// And a wallet configuration file name
|
||||
pub const WALLET_CONFIG_FILE_NAME: &'static str = "grin-wallet.toml";
|
||||
const SERVER_LOG_FILE_NAME: &'static str = "grin-server.log";
|
||||
const WALLET_LOG_FILE_NAME: &'static str = "grin-wallet.log";
|
||||
const GRIN_HOME: &'static str = ".grin";
|
||||
const GRIN_CHAIN_DIR: &'static str = "chain_data";
|
||||
/// Wallet data directory
|
||||
pub const GRIN_WALLET_DIR: &'static str = "wallet_data";
|
||||
/// API secret file name
|
||||
/// Node API secret
|
||||
pub const API_SECRET_FILE_NAME: &'static str = ".api_secret";
|
||||
|
||||
fn get_grin_path(chain_type: &global::ChainTypes) -> Result<PathBuf, ConfigError> {
|
||||
|
@ -141,34 +133,6 @@ pub fn initial_setup_server(chain_type: &global::ChainTypes) -> Result<GlobalCon
|
|||
}
|
||||
}
|
||||
|
||||
/// Handles setup and detection of paths for wallet
|
||||
pub fn initial_setup_wallet(
|
||||
chain_type: &global::ChainTypes,
|
||||
) -> Result<GlobalWalletConfig, ConfigError> {
|
||||
check_api_secret_file(chain_type)?;
|
||||
// Use config file if current directory if it exists, .grin home otherwise
|
||||
if let Some(p) = check_config_current_dir(WALLET_CONFIG_FILE_NAME) {
|
||||
GlobalWalletConfig::new(p.to_str().unwrap())
|
||||
} else {
|
||||
// Check if grin dir exists
|
||||
let grin_path = get_grin_path(chain_type)?;
|
||||
|
||||
// Get path to default config file
|
||||
let mut config_path = grin_path.clone();
|
||||
config_path.push(WALLET_CONFIG_FILE_NAME);
|
||||
|
||||
// Spit it out if it doesn't exist
|
||||
if !config_path.exists() {
|
||||
let mut default_config = GlobalWalletConfig::for_chain(chain_type);
|
||||
// update paths relative to current dir
|
||||
default_config.update_paths(&grin_path);
|
||||
default_config.write_to_file(config_path.to_str().unwrap())?;
|
||||
}
|
||||
|
||||
GlobalWalletConfig::new(config_path.to_str().unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the defaults, as strewn throughout the code
|
||||
impl Default for ConfigMembers {
|
||||
fn default() -> ConfigMembers {
|
||||
|
@ -188,24 +152,6 @@ impl Default for GlobalConfig {
|
|||
}
|
||||
}
|
||||
|
||||
impl Default for GlobalWalletConfigMembers {
|
||||
fn default() -> GlobalWalletConfigMembers {
|
||||
GlobalWalletConfigMembers {
|
||||
logging: Some(LoggingConfig::default()),
|
||||
wallet: WalletConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GlobalWalletConfig {
|
||||
fn default() -> GlobalWalletConfig {
|
||||
GlobalWalletConfig {
|
||||
config_file_path: None,
|
||||
members: Some(GlobalWalletConfigMembers::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl GlobalConfig {
|
||||
/// Same as GlobalConfig::default() but further tweaks parameters to
|
||||
/// apply defaults for each chain type
|
||||
|
@ -356,123 +302,3 @@ impl GlobalConfig {
|
|||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: Properly templatize these structs (if it's worth the effort)
|
||||
impl GlobalWalletConfig {
|
||||
/// Same as GlobalConfig::default() but further tweaks parameters to
|
||||
/// apply defaults for each chain type
|
||||
pub fn for_chain(chain_type: &global::ChainTypes) -> GlobalWalletConfig {
|
||||
let mut defaults_conf = GlobalWalletConfig::default();
|
||||
let mut defaults = &mut defaults_conf.members.as_mut().unwrap().wallet;
|
||||
defaults.chain_type = Some(chain_type.clone());
|
||||
|
||||
match *chain_type {
|
||||
global::ChainTypes::Mainnet => {}
|
||||
global::ChainTypes::Floonet => {
|
||||
defaults.api_listen_port = 13415;
|
||||
defaults.check_node_api_http_addr = "http://127.0.0.1:13413".to_owned();
|
||||
}
|
||||
global::ChainTypes::UserTesting => {
|
||||
defaults.api_listen_port = 23415;
|
||||
defaults.check_node_api_http_addr = "http://127.0.0.1:23413".to_owned();
|
||||
}
|
||||
global::ChainTypes::AutomatedTesting => {
|
||||
panic!("Can't run automated testing directly");
|
||||
}
|
||||
}
|
||||
defaults_conf
|
||||
}
|
||||
/// Requires the path to a config file
|
||||
pub fn new(file_path: &str) -> Result<GlobalWalletConfig, ConfigError> {
|
||||
let mut return_value = GlobalWalletConfig::default();
|
||||
return_value.config_file_path = Some(PathBuf::from(&file_path));
|
||||
|
||||
// Config file path is given but not valid
|
||||
let config_file = return_value.config_file_path.clone().unwrap();
|
||||
if !config_file.exists() {
|
||||
return Err(ConfigError::FileNotFoundError(String::from(
|
||||
config_file.to_str().unwrap(),
|
||||
)));
|
||||
}
|
||||
|
||||
// Try to parse the config file if it exists, explode if it does exist but
|
||||
// something's wrong with it
|
||||
return_value.read_config()
|
||||
}
|
||||
|
||||
/// Read config
|
||||
fn read_config(mut self) -> Result<GlobalWalletConfig, ConfigError> {
|
||||
let mut file = File::open(self.config_file_path.as_mut().unwrap())?;
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)?;
|
||||
let decoded: Result<GlobalWalletConfigMembers, toml::de::Error> = toml::from_str(&contents);
|
||||
match decoded {
|
||||
Ok(gc) => {
|
||||
self.members = Some(gc);
|
||||
return Ok(self);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(ConfigError::ParseError(
|
||||
String::from(
|
||||
self.config_file_path
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.clone(),
|
||||
),
|
||||
String::from(format!("{}", e)),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Update paths
|
||||
pub fn update_paths(&mut self, wallet_home: &PathBuf) {
|
||||
let mut wallet_path = wallet_home.clone();
|
||||
wallet_path.push(GRIN_WALLET_DIR);
|
||||
self.members.as_mut().unwrap().wallet.data_file_dir =
|
||||
wallet_path.to_str().unwrap().to_owned();
|
||||
let mut secret_path = wallet_home.clone();
|
||||
secret_path.push(API_SECRET_FILE_NAME);
|
||||
self.members.as_mut().unwrap().wallet.api_secret_path =
|
||||
Some(secret_path.to_str().unwrap().to_owned());
|
||||
let mut node_secret_path = wallet_home.clone();
|
||||
node_secret_path.push(API_SECRET_FILE_NAME);
|
||||
self.members.as_mut().unwrap().wallet.node_api_secret_path =
|
||||
Some(node_secret_path.to_str().unwrap().to_owned());
|
||||
let mut log_path = wallet_home.clone();
|
||||
log_path.push(WALLET_LOG_FILE_NAME);
|
||||
self.members
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.logging
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.log_file_path = log_path.to_str().unwrap().to_owned();
|
||||
}
|
||||
|
||||
/// Serialize config
|
||||
pub fn ser_config(&mut self) -> Result<String, ConfigError> {
|
||||
let encoded: Result<String, toml::ser::Error> =
|
||||
toml::to_string(self.members.as_mut().unwrap());
|
||||
match encoded {
|
||||
Ok(enc) => return Ok(enc),
|
||||
Err(e) => {
|
||||
return Err(ConfigError::SerializationError(String::from(format!(
|
||||
"{}",
|
||||
e
|
||||
))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Write configuration to a file
|
||||
pub fn write_to_file(&mut self, name: &str) -> Result<(), ConfigError> {
|
||||
let conf_out = self.ser_config()?;
|
||||
let conf_out = insert_comments(conf_out);
|
||||
let mut file = File::create(name)?;
|
||||
file.write_all(conf_out.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,11 +27,10 @@ use grin_core as core;
|
|||
use grin_p2p as p2p;
|
||||
use grin_servers as servers;
|
||||
use grin_util as util;
|
||||
use grin_wallet as wallet;
|
||||
|
||||
mod comments;
|
||||
pub mod config;
|
||||
pub mod types;
|
||||
|
||||
pub use crate::config::{initial_setup_server, initial_setup_wallet, GRIN_WALLET_DIR};
|
||||
pub use crate::types::{ConfigError, ConfigMembers, GlobalConfig, GlobalWalletConfig};
|
||||
pub use crate::config::initial_setup_server;
|
||||
pub use crate::types::{ConfigError, ConfigMembers, GlobalConfig};
|
||||
|
|
|
@ -20,7 +20,6 @@ use std::path::PathBuf;
|
|||
|
||||
use crate::servers::ServerConfig;
|
||||
use crate::util::LoggingConfig;
|
||||
use crate::wallet::WalletConfig;
|
||||
|
||||
/// Error type wrapping config errors.
|
||||
#[derive(Debug)]
|
||||
|
@ -95,22 +94,3 @@ pub struct ConfigMembers {
|
|||
/// Logging config
|
||||
pub logging: Option<LoggingConfig>,
|
||||
}
|
||||
|
||||
/// Wallet should be split into a separate configuration file
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub struct GlobalWalletConfig {
|
||||
/// Keep track of the file we've read
|
||||
pub config_file_path: Option<PathBuf>,
|
||||
/// Wallet members
|
||||
pub members: Option<GlobalWalletConfigMembers>,
|
||||
}
|
||||
|
||||
/// Wallet internal members
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
pub struct GlobalWalletConfigMembers {
|
||||
/// Wallet configuration
|
||||
#[serde(default)]
|
||||
pub wallet: WalletConfig,
|
||||
/// Logging config
|
||||
pub logging: Option<LoggingConfig>,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin_core"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -26,10 +26,10 @@ serde_derive = "1"
|
|||
siphasher = "0.2"
|
||||
uuid = { version = "0.6", features = ["serde", "v4"] }
|
||||
log = "0.4"
|
||||
chrono = "0.4.4"
|
||||
chrono = { version = "0.4.4", features = ["serde"] }
|
||||
|
||||
grin_keychain = { path = "../keychain", version = "1.0.3" }
|
||||
grin_util = { path = "../util", version = "1.0.3" }
|
||||
grin_keychain = { path = "../keychain", version = "1.1.0-beta.2" }
|
||||
grin_util = { path = "../util", version = "1.1.0-beta.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json = "1"
|
||||
|
|
1392
core/fuzz/Cargo.lock
generated
Normal file
1392
core/fuzz/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
|
@ -10,7 +10,6 @@ cargo-fuzz = true
|
|||
[dependencies]
|
||||
grin_core = { path = ".."}
|
||||
grin_keychain = { path = "../../keychain"}
|
||||
grin_wallet = { path = "../../wallet"}
|
||||
[dependencies.libfuzzer-sys]
|
||||
git = "https://github.com/rust-fuzz/libfuzzer-sys.git"
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ To run the tests make sure youre in folder `core` otherwise you may get
|
|||
some misleading errors, then run one of the following tests:
|
||||
|
||||
```
|
||||
cargo fuzz run tx_read
|
||||
cargo fuzz run transaction_read
|
||||
|
||||
cargo fuzz run block_read
|
||||
|
||||
|
|
|
@ -3,10 +3,10 @@ extern crate grin_core;
|
|||
#[macro_use]
|
||||
extern crate libfuzzer_sys;
|
||||
|
||||
use grin_core::core::block;
|
||||
use grin_core::core::Block;
|
||||
use grin_core::ser;
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
let mut d = data.clone();
|
||||
let _t: Result<block::Block, ser::Error> = ser::deserialize(&mut d);
|
||||
let _t: Result<Block, ser::Error> = ser::deserialize(&mut d);
|
||||
});
|
||||
|
|
|
@ -3,10 +3,10 @@ extern crate grin_core;
|
|||
#[macro_use]
|
||||
extern crate libfuzzer_sys;
|
||||
|
||||
use grin_core::core::block;
|
||||
use grin_core::core::CompactBlock;
|
||||
use grin_core::ser;
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
let mut d = data.clone();
|
||||
let _t: Result<block::CompactBlock, ser::Error> = ser::deserialize(&mut d);
|
||||
let _t: Result<CompactBlock, ser::Error> = ser::deserialize(&mut d);
|
||||
});
|
||||
|
|
|
@ -3,10 +3,10 @@ extern crate grin_core;
|
|||
#[macro_use]
|
||||
extern crate libfuzzer_sys;
|
||||
|
||||
use grin_core::core::transaction;
|
||||
use grin_core::core::Transaction;
|
||||
use grin_core::ser;
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
let mut d = data.clone();
|
||||
let _t: Result<transaction::Transaction, ser::Error> = ser::deserialize(&mut d);
|
||||
let _t: Result<Transaction, ser::Error> = ser::deserialize(&mut d);
|
||||
});
|
||||
|
|
|
@ -1,21 +1,15 @@
|
|||
extern crate grin_core;
|
||||
extern crate grin_keychain;
|
||||
extern crate grin_wallet;
|
||||
|
||||
use grin_core::core::target::Difficulty;
|
||||
use grin_core::core::{Block, BlockHeader, CompactBlock, Transaction};
|
||||
use grin_core::libtx::build::{input, output, transaction, with_fee};
|
||||
use grin_core::libtx::reward;
|
||||
use grin_core::core::{Block, CompactBlock, Transaction};
|
||||
use grin_core::ser;
|
||||
use grin_keychain::keychain::ExtKeychain;
|
||||
use grin_keychain::Keychain;
|
||||
use std::fs::{self, File};
|
||||
use std::path::Path;
|
||||
|
||||
fn main() {
|
||||
generate("transaction_read", &tx()).unwrap();
|
||||
generate("block_read", &block()).unwrap();
|
||||
generate("compact_block_read", &compact_block()).unwrap();
|
||||
generate("transaction_read", Transaction::default()).unwrap();
|
||||
generate("block_read", Block::default()).unwrap();
|
||||
generate("compact_block_read", CompactBlock::from(Block::default())).unwrap();
|
||||
}
|
||||
|
||||
fn generate<W: ser::Writeable>(target: &str, obj: W) -> Result<(), ser::Error> {
|
||||
|
@ -36,47 +30,3 @@ fn generate<W: ser::Writeable>(target: &str, obj: W) -> Result<(), ser::Error> {
|
|||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn block() -> Block {
|
||||
let keychain = ExtKeychain::from_random_seed().unwrap();
|
||||
let key_id = keychain.derive_key_id(1).unwrap();
|
||||
|
||||
let mut txs = Vec::new();
|
||||
for _ in 1..10 {
|
||||
txs.push(tx());
|
||||
}
|
||||
|
||||
let header = BlockHeader::default();
|
||||
|
||||
let reward = reward::output(&keychain, &key_id, 0, header.height).unwrap();
|
||||
|
||||
Block::new(&header, txs, Difficulty::min(), reward).unwrap()
|
||||
}
|
||||
|
||||
fn compact_block() -> CompactBlock {
|
||||
CompactBlock {
|
||||
header: BlockHeader::default(),
|
||||
nonce: 1,
|
||||
out_full: vec![],
|
||||
kern_full: vec![],
|
||||
kern_ids: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn tx() -> Transaction {
|
||||
let keychain = ExtKeychain::from_random_seed().unwrap();
|
||||
let key_id1 = keychain.derive_key_id(1).unwrap();
|
||||
let key_id2 = keychain.derive_key_id(2).unwrap();
|
||||
let key_id3 = keychain.derive_key_id(3).unwrap();
|
||||
|
||||
transaction(
|
||||
vec![
|
||||
input(10, key_id1),
|
||||
input(11, key_id2),
|
||||
output(19, key_id3),
|
||||
with_fee(2),
|
||||
],
|
||||
&keychain,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
|
|
@ -111,6 +111,7 @@ impl fmt::Display for Error {
|
|||
/// Header entry for storing in the header MMR.
|
||||
/// Note: we hash the block header itself and maintain the hash in the entry.
|
||||
/// This allows us to lookup the original header from the db as necessary.
|
||||
#[derive(Debug)]
|
||||
pub struct HeaderEntry {
|
||||
hash: Hash,
|
||||
timestamp: u64,
|
||||
|
@ -168,7 +169,7 @@ impl Hashed for HeaderEntry {
|
|||
}
|
||||
|
||||
/// Block header, fairly standard compared to other blockchains.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize)]
|
||||
pub struct BlockHeader {
|
||||
/// Version of the block
|
||||
pub version: u16,
|
||||
|
@ -346,7 +347,7 @@ impl BlockHeader {
|
|||
/// non-explicit, assumed to be deducible from block height (similar to
|
||||
/// bitcoin's schedule) and expressed as a global transaction fee (added v.H),
|
||||
/// additive to the total of fees ever collected.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct Block {
|
||||
/// The header with metadata and commitments to the rest of the data
|
||||
pub header: BlockHeader,
|
||||
|
|
|
@ -23,7 +23,7 @@ use crate::util::{secp, secp_static, static_secp_instance};
|
|||
use failure::Fail;
|
||||
|
||||
/// Errors from summing and verifying kernel excesses via committed trait.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Fail)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Fail, Serialize, Deserialize)]
|
||||
pub enum Error {
|
||||
/// Keychain related error.
|
||||
#[fail(display = "Keychain error {}", _0)]
|
||||
|
|
|
@ -217,6 +217,13 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Truncate the MMR by rewinding back to empty state.
|
||||
pub fn truncate(&mut self) -> Result<(), String> {
|
||||
self.backend.rewind(0, &Bitmap::create())?;
|
||||
self.last_pos = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Rewind the PMMR to a previous position, as if all push operations after
|
||||
/// that had been canceled. Expects a position in the PMMR to rewind and
|
||||
/// bitmaps representing the positions added and removed that we want to
|
||||
|
|
|
@ -18,6 +18,7 @@ use crate::core::hash::{DefaultHashable, Hashed};
|
|||
use crate::core::verifier_cache::VerifierCache;
|
||||
use crate::core::{committed, Committed};
|
||||
use crate::keychain::{self, BlindingFactor};
|
||||
use crate::libtx::secp_ser;
|
||||
use crate::ser::{
|
||||
self, read_multi, FixedLength, PMMRable, Readable, Reader, VerifySortedAndUnique, Writeable,
|
||||
Writer,
|
||||
|
@ -67,7 +68,7 @@ impl Readable for KernelFeatures {
|
|||
}
|
||||
|
||||
/// Errors thrown by Transaction validation
|
||||
#[derive(Clone, Eq, Debug, PartialEq)]
|
||||
#[derive(Clone, Eq, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum Error {
|
||||
/// Underlying Secp256k1 error (signature validation or invalid public key
|
||||
/// typically)
|
||||
|
@ -158,16 +159,23 @@ pub struct TxKernel {
|
|||
/// Options for a kernel's structure or use
|
||||
pub features: KernelFeatures,
|
||||
/// Fee originally included in the transaction this proof is for.
|
||||
#[serde(with = "secp_ser::string_or_u64")]
|
||||
pub fee: u64,
|
||||
/// This kernel is not valid earlier than lock_height blocks
|
||||
/// The max lock_height of all *inputs* to this transaction
|
||||
#[serde(with = "secp_ser::string_or_u64")]
|
||||
pub lock_height: u64,
|
||||
/// Remainder of the sum of all transaction commitments. If the transaction
|
||||
/// is well formed, amounts components should sum to zero and the excess
|
||||
/// is hence a valid public key.
|
||||
#[serde(
|
||||
serialize_with = "secp_ser::as_hex",
|
||||
deserialize_with = "secp_ser::commitment_from_hex"
|
||||
)]
|
||||
pub excess: Commitment,
|
||||
/// The signature proving the excess is a valid public key, which signs
|
||||
/// the transaction fee.
|
||||
#[serde(with = "secp_ser::sig_serde")]
|
||||
pub excess_sig: secp::Signature,
|
||||
}
|
||||
|
||||
|
@ -757,6 +765,10 @@ impl TransactionBody {
|
|||
pub struct Transaction {
|
||||
/// The kernel "offset" k2
|
||||
/// excess is k1G after splitting the key k = k1 + k2
|
||||
#[serde(
|
||||
serialize_with = "secp_ser::as_hex",
|
||||
deserialize_with = "secp_ser::blind_from_hex"
|
||||
)]
|
||||
pub offset: BlindingFactor,
|
||||
/// The transaction body - inputs/outputs/kernels
|
||||
body: TransactionBody,
|
||||
|
@ -1131,6 +1143,10 @@ pub struct Input {
|
|||
/// We will check maturity for coinbase output.
|
||||
pub features: OutputFeatures,
|
||||
/// The commit referencing the output being spent.
|
||||
#[serde(
|
||||
serialize_with = "secp_ser::as_hex",
|
||||
deserialize_with = "secp_ser::commitment_from_hex"
|
||||
)]
|
||||
pub commit: Commitment,
|
||||
}
|
||||
|
||||
|
@ -1232,8 +1248,16 @@ pub struct Output {
|
|||
/// Options for an output's structure or use
|
||||
pub features: OutputFeatures,
|
||||
/// The homomorphic commitment representing the output amount
|
||||
#[serde(
|
||||
serialize_with = "secp_ser::as_hex",
|
||||
deserialize_with = "secp_ser::commitment_from_hex"
|
||||
)]
|
||||
pub commit: Commitment,
|
||||
/// A proof that the commitment is in the right range
|
||||
#[serde(
|
||||
serialize_with = "secp_ser::as_hex",
|
||||
deserialize_with = "secp_ser::rangeproof_from_hex"
|
||||
)]
|
||||
pub proof: RangeProof,
|
||||
}
|
||||
|
||||
|
|
|
@ -251,7 +251,7 @@ pub fn verify_partial_sig(
|
|||
/// let msg = kernel_sig_msg(0, height, KernelFeatures::HeightLocked).unwrap();
|
||||
/// let excess = secp.commit_sum(vec![out_commit], vec![over_commit]).unwrap();
|
||||
/// let pubkey = excess.to_pubkey(&secp).unwrap();
|
||||
/// let sig = aggsig::sign_from_key_id(&secp, &keychain, &msg, value, &key_id, Some(&pubkey)).unwrap();
|
||||
/// let sig = aggsig::sign_from_key_id(&secp, &keychain, &msg, value, &key_id, None, Some(&pubkey)).unwrap();
|
||||
/// ```
|
||||
|
||||
pub fn sign_from_key_id<K>(
|
||||
|
@ -260,13 +260,14 @@ pub fn sign_from_key_id<K>(
|
|||
msg: &Message,
|
||||
value: u64,
|
||||
key_id: &Identifier,
|
||||
s_nonce: Option<&SecretKey>,
|
||||
blind_sum: Option<&PublicKey>,
|
||||
) -> Result<Signature, Error>
|
||||
where
|
||||
K: Keychain,
|
||||
{
|
||||
let skey = k.derive_key(value, key_id)?;
|
||||
let sig = aggsig::sign_single(secp, &msg, &skey, None, None, None, blind_sum, None)?;
|
||||
let sig = aggsig::sign_single(secp, &msg, &skey, s_nonce, None, None, blind_sum, None)?;
|
||||
Ok(sig)
|
||||
}
|
||||
|
||||
|
@ -316,7 +317,7 @@ where
|
|||
/// let msg = kernel_sig_msg(0, height, KernelFeatures::HeightLocked).unwrap();
|
||||
/// let excess = secp.commit_sum(vec![out_commit], vec![over_commit]).unwrap();
|
||||
/// let pubkey = excess.to_pubkey(&secp).unwrap();
|
||||
/// let sig = aggsig::sign_from_key_id(&secp, &keychain, &msg, value, &key_id, Some(&pubkey)).unwrap();
|
||||
/// let sig = aggsig::sign_from_key_id(&secp, &keychain, &msg, value, &key_id, None, Some(&pubkey)).unwrap();
|
||||
///
|
||||
/// // Verify the signature from the excess commit
|
||||
/// let sig_verifies =
|
||||
|
@ -421,14 +422,15 @@ pub fn add_signatures(
|
|||
Ok(sig)
|
||||
}
|
||||
|
||||
/// Just a simple sig, creates its own nonce, etc
|
||||
/// Just a simple sig, creates its own nonce if not provided
|
||||
pub fn sign_single(
|
||||
secp: &Secp256k1,
|
||||
msg: &Message,
|
||||
skey: &SecretKey,
|
||||
snonce: Option<&SecretKey>,
|
||||
pubkey_sum: Option<&PublicKey>,
|
||||
) -> Result<Signature, Error> {
|
||||
let sig = aggsig::sign_single(secp, &msg, skey, None, None, None, pubkey_sum, None)?;
|
||||
let sig = aggsig::sign_single(secp, &msg, skey, snonce, None, None, pubkey_sum, None)?;
|
||||
Ok(sig)
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ pub struct Error {
|
|||
inner: Context<ErrorKind>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, Fail, PartialEq)]
|
||||
#[derive(Clone, Debug, Eq, Fail, PartialEq, Serialize, Deserialize)]
|
||||
/// Libwallet error types
|
||||
pub enum ErrorKind {
|
||||
/// SECP error
|
||||
|
|
|
@ -20,10 +20,15 @@ use crate::core::{KernelFeatures, Output, OutputFeatures, TxKernel};
|
|||
use crate::keychain::{Identifier, Keychain};
|
||||
use crate::libtx::error::Error;
|
||||
use crate::libtx::{aggsig, proof};
|
||||
use crate::util::static_secp_instance;
|
||||
use crate::util::{secp, static_secp_instance};
|
||||
|
||||
/// output a reward output
|
||||
pub fn output<K>(keychain: &K, key_id: &Identifier, fees: u64) -> Result<(Output, TxKernel), Error>
|
||||
pub fn output<K>(
|
||||
keychain: &K,
|
||||
key_id: &Identifier,
|
||||
fees: u64,
|
||||
test_mode: bool,
|
||||
) -> Result<(Output, TxKernel), Error>
|
||||
where
|
||||
K: Keychain,
|
||||
{
|
||||
|
@ -50,7 +55,23 @@ where
|
|||
// NOTE: Remember we sign the fee *and* the lock_height.
|
||||
// For a coinbase output the fee is 0 and the lock_height is 0
|
||||
let msg = kernel_sig_msg(0, 0, KernelFeatures::Coinbase)?;
|
||||
let sig = aggsig::sign_from_key_id(&secp, keychain, &msg, value, &key_id, Some(&pubkey))?;
|
||||
let sig = match test_mode {
|
||||
true => {
|
||||
let test_nonce = secp::key::SecretKey::from_slice(&secp, &[1; 32])?;
|
||||
aggsig::sign_from_key_id(
|
||||
&secp,
|
||||
keychain,
|
||||
&msg,
|
||||
value,
|
||||
&key_id,
|
||||
Some(&test_nonce),
|
||||
Some(&pubkey),
|
||||
)?
|
||||
}
|
||||
false => {
|
||||
aggsig::sign_from_key_id(&secp, keychain, &msg, value, &key_id, None, Some(&pubkey))?
|
||||
}
|
||||
};
|
||||
|
||||
let proof = TxKernel {
|
||||
features: KernelFeatures::Coinbase,
|
||||
|
|
|
@ -32,7 +32,7 @@ pub mod pubkey_serde {
|
|||
{
|
||||
let static_secp = static_secp_instance();
|
||||
let static_secp = static_secp.lock();
|
||||
serializer.serialize_str(&to_hex(key.serialize_vec(&static_secp, false).to_vec()))
|
||||
serializer.serialize_str(&to_hex(key.serialize_vec(&static_secp, true).to_vec()))
|
||||
}
|
||||
|
||||
///
|
||||
|
@ -56,7 +56,6 @@ pub mod pubkey_serde {
|
|||
pub mod option_sig_serde {
|
||||
use crate::serde::{Deserialize, Deserializer, Serializer};
|
||||
use crate::util::secp;
|
||||
use crate::util::static_secp_instance;
|
||||
use crate::util::{from_hex, to_hex};
|
||||
use serde::de::Error;
|
||||
|
||||
|
@ -66,11 +65,7 @@ pub mod option_sig_serde {
|
|||
S: Serializer,
|
||||
{
|
||||
match sig {
|
||||
Some(sig) => {
|
||||
let static_secp = static_secp_instance();
|
||||
let static_secp = static_secp.lock();
|
||||
serializer.serialize_str(&to_hex(sig.serialize_der(&static_secp)))
|
||||
}
|
||||
Some(sig) => serializer.serialize_str(&to_hex(sig.to_raw_data().to_vec())),
|
||||
None => serializer.serialize_none(),
|
||||
}
|
||||
}
|
||||
|
@ -80,14 +75,13 @@ pub mod option_sig_serde {
|
|||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let static_secp = static_secp_instance();
|
||||
let static_secp = static_secp.lock();
|
||||
|
||||
Option::<&str>::deserialize(deserializer).and_then(|res| match res {
|
||||
Option::<String>::deserialize(deserializer).and_then(|res| match res {
|
||||
Some(string) => from_hex(string.to_string())
|
||||
.map_err(|err| Error::custom(err.to_string()))
|
||||
.and_then(|bytes: Vec<u8>| {
|
||||
secp::Signature::from_der(&static_secp, &bytes)
|
||||
let mut b = [0u8; 64];
|
||||
b.copy_from_slice(&bytes[0..64]);
|
||||
secp::Signature::from_raw_data(&b)
|
||||
.map(|val| Some(val))
|
||||
.map_err(|err| Error::custom(err.to_string()))
|
||||
}),
|
||||
|
@ -101,7 +95,6 @@ pub mod option_sig_serde {
|
|||
pub mod sig_serde {
|
||||
use crate::serde::{Deserialize, Deserializer, Serializer};
|
||||
use crate::util::secp;
|
||||
use crate::util::static_secp_instance;
|
||||
use crate::util::{from_hex, to_hex};
|
||||
use serde::de::Error;
|
||||
|
||||
|
@ -110,9 +103,7 @@ pub mod sig_serde {
|
|||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let static_secp = static_secp_instance();
|
||||
let static_secp = static_secp.lock();
|
||||
serializer.serialize_str(&to_hex(sig.serialize_der(&static_secp)))
|
||||
serializer.serialize_str(&to_hex(sig.to_raw_data().to_vec()))
|
||||
}
|
||||
|
||||
///
|
||||
|
@ -120,13 +111,12 @@ pub mod sig_serde {
|
|||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let static_secp = static_secp_instance();
|
||||
let static_secp = static_secp.lock();
|
||||
String::deserialize(deserializer)
|
||||
.and_then(|string| from_hex(string).map_err(|err| Error::custom(err.to_string())))
|
||||
.and_then(|bytes: Vec<u8>| {
|
||||
secp::Signature::from_der(&static_secp, &bytes)
|
||||
.map_err(|err| Error::custom(err.to_string()))
|
||||
let mut b = [0u8; 64];
|
||||
b.copy_from_slice(&bytes[0..64]);
|
||||
secp::Signature::from_raw_data(&b).map_err(|err| Error::custom(err.to_string()))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -174,6 +164,104 @@ where
|
|||
serializer.serialize_str(&to_hex(bytes.as_ref().to_vec()))
|
||||
}
|
||||
|
||||
/// Used to ensure u64s are serialised in json
|
||||
/// as strings by default, since it can't be guaranteed that consumers
|
||||
/// will know what to do with u64 literals (e.g. Javascript). However,
|
||||
/// fields using this tag can be deserialized from literals or strings.
|
||||
/// From solutions on:
|
||||
/// https://github.com/serde-rs/json/issues/329
|
||||
pub mod string_or_u64 {
|
||||
use std::fmt;
|
||||
|
||||
use serde::{de, Deserializer, Serializer};
|
||||
|
||||
/// serialize into a string
|
||||
pub fn serialize<T, S>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
T: fmt::Display,
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.collect_str(value)
|
||||
}
|
||||
|
||||
/// deserialize from either literal or string
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<u64, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
struct Visitor;
|
||||
impl<'a> de::Visitor<'a> for Visitor {
|
||||
type Value = u64;
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
formatter,
|
||||
"a string containing digits or an int fitting into u64"
|
||||
)
|
||||
}
|
||||
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
|
||||
Ok(v)
|
||||
}
|
||||
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
s.parse().map_err(de::Error::custom)
|
||||
}
|
||||
}
|
||||
deserializer.deserialize_any(Visitor)
|
||||
}
|
||||
}
|
||||
|
||||
/// As above, for Options
|
||||
pub mod opt_string_or_u64 {
|
||||
use std::fmt;
|
||||
|
||||
use serde::{de, Deserializer, Serializer};
|
||||
|
||||
/// serialize into string or none
|
||||
pub fn serialize<T, S>(value: &Option<T>, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
T: fmt::Display,
|
||||
S: Serializer,
|
||||
{
|
||||
match value {
|
||||
Some(v) => serializer.collect_str(v),
|
||||
None => serializer.serialize_none(),
|
||||
}
|
||||
}
|
||||
|
||||
/// deser from 'null', literal or string
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<u64>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
struct Visitor;
|
||||
impl<'a> de::Visitor<'a> for Visitor {
|
||||
type Value = Option<u64>;
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
formatter,
|
||||
"null, a string containing digits or an int fitting into u64"
|
||||
)
|
||||
}
|
||||
fn visit_unit<E>(self) -> Result<Self::Value, E> {
|
||||
Ok(None)
|
||||
}
|
||||
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> {
|
||||
Ok(Some(v))
|
||||
}
|
||||
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
let val: u64 = s.parse().map_err(de::Error::custom)?;
|
||||
Ok(Some(val))
|
||||
}
|
||||
}
|
||||
deserializer.deserialize_any(Visitor)
|
||||
}
|
||||
}
|
||||
|
||||
// Test serialization methods of components that are being used
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
@ -195,6 +283,10 @@ mod test {
|
|||
pub opt_sig: Option<Signature>,
|
||||
#[serde(with = "sig_serde")]
|
||||
pub sig: Signature,
|
||||
#[serde(with = "string_or_u64")]
|
||||
pub num: u64,
|
||||
#[serde(with = "opt_string_or_u64")]
|
||||
pub opt_num: Option<u64>,
|
||||
}
|
||||
|
||||
impl SerTest {
|
||||
|
@ -205,11 +297,13 @@ mod test {
|
|||
let mut msg = [0u8; 32];
|
||||
thread_rng().fill(&mut msg);
|
||||
let msg = Message::from_slice(&msg).unwrap();
|
||||
let sig = aggsig::sign_single(&secp, &msg, &sk, None).unwrap();
|
||||
let sig = aggsig::sign_single(&secp, &msg, &sk, None, None).unwrap();
|
||||
SerTest {
|
||||
pub_key: PublicKey::from_secret_key(&secp, &sk).unwrap(),
|
||||
opt_sig: Some(sig.clone()),
|
||||
sig: sig.clone(),
|
||||
num: 30,
|
||||
opt_num: Some(33),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -215,7 +215,7 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
|
|||
}
|
||||
|
||||
/// Block header information pertaining to the proof of work
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug, PartialEq, Serialize)]
|
||||
pub struct ProofOfWork {
|
||||
/// Total accumulated difficulty since genesis block
|
||||
pub total_difficulty: Difficulty,
|
||||
|
@ -316,7 +316,7 @@ impl ProofOfWork {
|
|||
/// them at their exact bit size. The resulting bit sequence is padded to be
|
||||
/// byte-aligned.
|
||||
///
|
||||
#[derive(Clone, PartialOrd, PartialEq)]
|
||||
#[derive(Clone, PartialOrd, PartialEq, Serialize)]
|
||||
pub struct Proof {
|
||||
/// Power of 2 used for the size of the cuckoo graph
|
||||
pub edge_bits: u8,
|
||||
|
|
456
core/src/ser.rs
456
core/src/ser.rs
|
@ -35,10 +35,17 @@ use std::time::Duration;
|
|||
use std::{cmp, error, fmt};
|
||||
|
||||
/// Possible errors deriving from serializing or deserializing.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
#[derive(Clone, Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||
pub enum Error {
|
||||
/// Wraps an io error produced when reading or writing
|
||||
IOErr(String, io::ErrorKind),
|
||||
IOErr(
|
||||
String,
|
||||
#[serde(
|
||||
serialize_with = "serialize_error_kind",
|
||||
deserialize_with = "deserialize_error_kind"
|
||||
)]
|
||||
io::ErrorKind,
|
||||
),
|
||||
/// Expected a given value that wasn't found
|
||||
UnexpectedData {
|
||||
/// What we wanted
|
||||
|
@ -709,7 +716,7 @@ pub trait FixedLength {
|
|||
pub trait PMMRable: Writeable + Clone + Debug + DefaultHashable {
|
||||
/// The type of element actually stored in the MMR data file.
|
||||
/// This allows us to store Hash elements in the header MMR for variable size BlockHeaders.
|
||||
type E: FixedLength + Readable + Writeable;
|
||||
type E: FixedLength + Readable + Writeable + Debug;
|
||||
|
||||
/// Convert the pmmrable into the element to be stored in the MMR data file.
|
||||
fn as_elmt(&self) -> Self::E;
|
||||
|
@ -813,3 +820,446 @@ impl AsFixedBytes for crate::keychain::Identifier {
|
|||
IDENTIFIER_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
// serializer for io::Errorkind, originally auto-generated by serde-derive
|
||||
// slightly modified to handle the #[non_exhaustive] tag on io::ErrorKind
|
||||
fn serialize_error_kind<S>(
|
||||
kind: &io::ErrorKind,
|
||||
serializer: S,
|
||||
) -> serde::export::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match *kind {
|
||||
io::ErrorKind::NotFound => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 0u32, "NotFound")
|
||||
}
|
||||
io::ErrorKind::PermissionDenied => serde::Serializer::serialize_unit_variant(
|
||||
serializer,
|
||||
"ErrorKind",
|
||||
1u32,
|
||||
"PermissionDenied",
|
||||
),
|
||||
io::ErrorKind::ConnectionRefused => serde::Serializer::serialize_unit_variant(
|
||||
serializer,
|
||||
"ErrorKind",
|
||||
2u32,
|
||||
"ConnectionRefused",
|
||||
),
|
||||
io::ErrorKind::ConnectionReset => serde::Serializer::serialize_unit_variant(
|
||||
serializer,
|
||||
"ErrorKind",
|
||||
3u32,
|
||||
"ConnectionReset",
|
||||
),
|
||||
io::ErrorKind::ConnectionAborted => serde::Serializer::serialize_unit_variant(
|
||||
serializer,
|
||||
"ErrorKind",
|
||||
4u32,
|
||||
"ConnectionAborted",
|
||||
),
|
||||
io::ErrorKind::NotConnected => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 5u32, "NotConnected")
|
||||
}
|
||||
io::ErrorKind::AddrInUse => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 6u32, "AddrInUse")
|
||||
}
|
||||
io::ErrorKind::AddrNotAvailable => serde::Serializer::serialize_unit_variant(
|
||||
serializer,
|
||||
"ErrorKind",
|
||||
7u32,
|
||||
"AddrNotAvailable",
|
||||
),
|
||||
io::ErrorKind::BrokenPipe => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 8u32, "BrokenPipe")
|
||||
}
|
||||
io::ErrorKind::AlreadyExists => serde::Serializer::serialize_unit_variant(
|
||||
serializer,
|
||||
"ErrorKind",
|
||||
9u32,
|
||||
"AlreadyExists",
|
||||
),
|
||||
io::ErrorKind::WouldBlock => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 10u32, "WouldBlock")
|
||||
}
|
||||
io::ErrorKind::InvalidInput => serde::Serializer::serialize_unit_variant(
|
||||
serializer,
|
||||
"ErrorKind",
|
||||
11u32,
|
||||
"InvalidInput",
|
||||
),
|
||||
io::ErrorKind::InvalidData => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 12u32, "InvalidData")
|
||||
}
|
||||
io::ErrorKind::TimedOut => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 13u32, "TimedOut")
|
||||
}
|
||||
io::ErrorKind::WriteZero => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 14u32, "WriteZero")
|
||||
}
|
||||
io::ErrorKind::Interrupted => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 15u32, "Interrupted")
|
||||
}
|
||||
io::ErrorKind::Other => {
|
||||
serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 16u32, "Other")
|
||||
}
|
||||
io::ErrorKind::UnexpectedEof => serde::Serializer::serialize_unit_variant(
|
||||
serializer,
|
||||
"ErrorKind",
|
||||
17u32,
|
||||
"UnexpectedEof",
|
||||
),
|
||||
// #[non_exhaustive] is used on the definition of ErrorKind for future compatability
|
||||
// That means match statements always need to match on _.
|
||||
// The downside here is that rustc won't be able to warn us if io::ErrorKind another
|
||||
// field is added to io::ErrorKind
|
||||
_ => serde::Serializer::serialize_unit_variant(serializer, "ErrorKind", 16u32, "Other"),
|
||||
}
|
||||
}
|
||||
|
||||
// deserializer for io::Errorkind, originally auto-generated by serde-derive
|
||||
fn deserialize_error_kind<'de, D>(deserializer: D) -> serde::export::Result<io::ErrorKind, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
#[allow(non_camel_case_types)]
|
||||
enum Field {
|
||||
field0,
|
||||
field1,
|
||||
field2,
|
||||
field3,
|
||||
field4,
|
||||
field5,
|
||||
field6,
|
||||
field7,
|
||||
field8,
|
||||
field9,
|
||||
field10,
|
||||
field11,
|
||||
field12,
|
||||
field13,
|
||||
field14,
|
||||
field15,
|
||||
field16,
|
||||
field17,
|
||||
}
|
||||
struct FieldVisitor;
|
||||
impl<'de> serde::de::Visitor<'de> for FieldVisitor {
|
||||
type Value = Field;
|
||||
fn expecting(
|
||||
&self,
|
||||
formatter: &mut serde::export::Formatter,
|
||||
) -> serde::export::fmt::Result {
|
||||
serde::export::Formatter::write_str(formatter, "variant identifier")
|
||||
}
|
||||
fn visit_u64<E>(self, value: u64) -> serde::export::Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
match value {
|
||||
0u64 => serde::export::Ok(Field::field0),
|
||||
1u64 => serde::export::Ok(Field::field1),
|
||||
2u64 => serde::export::Ok(Field::field2),
|
||||
3u64 => serde::export::Ok(Field::field3),
|
||||
4u64 => serde::export::Ok(Field::field4),
|
||||
5u64 => serde::export::Ok(Field::field5),
|
||||
6u64 => serde::export::Ok(Field::field6),
|
||||
7u64 => serde::export::Ok(Field::field7),
|
||||
8u64 => serde::export::Ok(Field::field8),
|
||||
9u64 => serde::export::Ok(Field::field9),
|
||||
10u64 => serde::export::Ok(Field::field10),
|
||||
11u64 => serde::export::Ok(Field::field11),
|
||||
12u64 => serde::export::Ok(Field::field12),
|
||||
13u64 => serde::export::Ok(Field::field13),
|
||||
14u64 => serde::export::Ok(Field::field14),
|
||||
15u64 => serde::export::Ok(Field::field15),
|
||||
16u64 => serde::export::Ok(Field::field16),
|
||||
17u64 => serde::export::Ok(Field::field17),
|
||||
_ => serde::export::Err(serde::de::Error::invalid_value(
|
||||
serde::de::Unexpected::Unsigned(value),
|
||||
&"variant index 0 <= i < 18",
|
||||
)),
|
||||
}
|
||||
}
|
||||
fn visit_str<E>(self, value: &str) -> serde::export::Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
match value {
|
||||
"NotFound" => serde::export::Ok(Field::field0),
|
||||
"PermissionDenied" => serde::export::Ok(Field::field1),
|
||||
"ConnectionRefused" => serde::export::Ok(Field::field2),
|
||||
"ConnectionReset" => serde::export::Ok(Field::field3),
|
||||
"ConnectionAborted" => serde::export::Ok(Field::field4),
|
||||
"NotConnected" => serde::export::Ok(Field::field5),
|
||||
"AddrInUse" => serde::export::Ok(Field::field6),
|
||||
"AddrNotAvailable" => serde::export::Ok(Field::field7),
|
||||
"BrokenPipe" => serde::export::Ok(Field::field8),
|
||||
"AlreadyExists" => serde::export::Ok(Field::field9),
|
||||
"WouldBlock" => serde::export::Ok(Field::field10),
|
||||
"InvalidInput" => serde::export::Ok(Field::field11),
|
||||
"InvalidData" => serde::export::Ok(Field::field12),
|
||||
"TimedOut" => serde::export::Ok(Field::field13),
|
||||
"WriteZero" => serde::export::Ok(Field::field14),
|
||||
"Interrupted" => serde::export::Ok(Field::field15),
|
||||
"Other" => serde::export::Ok(Field::field16),
|
||||
"UnexpectedEof" => serde::export::Ok(Field::field17),
|
||||
_ => serde::export::Err(serde::de::Error::unknown_variant(value, VARIANTS)),
|
||||
}
|
||||
}
|
||||
fn visit_bytes<E>(self, value: &[u8]) -> serde::export::Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
match value {
|
||||
b"NotFound" => serde::export::Ok(Field::field0),
|
||||
b"PermissionDenied" => serde::export::Ok(Field::field1),
|
||||
b"ConnectionRefused" => serde::export::Ok(Field::field2),
|
||||
b"ConnectionReset" => serde::export::Ok(Field::field3),
|
||||
b"ConnectionAborted" => serde::export::Ok(Field::field4),
|
||||
b"NotConnected" => serde::export::Ok(Field::field5),
|
||||
b"AddrInUse" => serde::export::Ok(Field::field6),
|
||||
b"AddrNotAvailable" => serde::export::Ok(Field::field7),
|
||||
b"BrokenPipe" => serde::export::Ok(Field::field8),
|
||||
b"AlreadyExists" => serde::export::Ok(Field::field9),
|
||||
b"WouldBlock" => serde::export::Ok(Field::field10),
|
||||
b"InvalidInput" => serde::export::Ok(Field::field11),
|
||||
b"InvalidData" => serde::export::Ok(Field::field12),
|
||||
b"TimedOut" => serde::export::Ok(Field::field13),
|
||||
b"WriteZero" => serde::export::Ok(Field::field14),
|
||||
b"Interrupted" => serde::export::Ok(Field::field15),
|
||||
b"Other" => serde::export::Ok(Field::field16),
|
||||
b"UnexpectedEof" => serde::export::Ok(Field::field17),
|
||||
_ => {
|
||||
let value = &serde::export::from_utf8_lossy(value);
|
||||
serde::export::Err(serde::de::Error::unknown_variant(value, VARIANTS))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'de> serde::Deserialize<'de> for Field {
|
||||
#[inline]
|
||||
fn deserialize<D>(deserializer: D) -> serde::export::Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
serde::Deserializer::deserialize_identifier(deserializer, FieldVisitor)
|
||||
}
|
||||
}
|
||||
struct Visitor<'de> {
|
||||
marker: serde::export::PhantomData<io::ErrorKind>,
|
||||
lifetime: serde::export::PhantomData<&'de ()>,
|
||||
}
|
||||
impl<'de> serde::de::Visitor<'de> for Visitor<'de> {
|
||||
type Value = io::ErrorKind;
|
||||
fn expecting(
|
||||
&self,
|
||||
formatter: &mut serde::export::Formatter,
|
||||
) -> serde::export::fmt::Result {
|
||||
serde::export::Formatter::write_str(formatter, "enum io::ErrorKind")
|
||||
}
|
||||
fn visit_enum<A>(self, data: A) -> serde::export::Result<Self::Value, A::Error>
|
||||
where
|
||||
A: serde::de::EnumAccess<'de>,
|
||||
{
|
||||
match match serde::de::EnumAccess::variant(data) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
} {
|
||||
(Field::field0, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::NotFound)
|
||||
}
|
||||
(Field::field1, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::PermissionDenied)
|
||||
}
|
||||
(Field::field2, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::ConnectionRefused)
|
||||
}
|
||||
(Field::field3, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::ConnectionReset)
|
||||
}
|
||||
(Field::field4, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::ConnectionAborted)
|
||||
}
|
||||
(Field::field5, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::NotConnected)
|
||||
}
|
||||
(Field::field6, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::AddrInUse)
|
||||
}
|
||||
(Field::field7, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::AddrNotAvailable)
|
||||
}
|
||||
(Field::field8, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::BrokenPipe)
|
||||
}
|
||||
(Field::field9, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::AlreadyExists)
|
||||
}
|
||||
(Field::field10, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::WouldBlock)
|
||||
}
|
||||
(Field::field11, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::InvalidInput)
|
||||
}
|
||||
(Field::field12, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::InvalidData)
|
||||
}
|
||||
(Field::field13, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::TimedOut)
|
||||
}
|
||||
(Field::field14, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::WriteZero)
|
||||
}
|
||||
(Field::field15, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::Interrupted)
|
||||
}
|
||||
(Field::field16, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::Other)
|
||||
}
|
||||
(Field::field17, variant) => {
|
||||
match serde::de::VariantAccess::unit_variant(variant) {
|
||||
serde::export::Ok(val) => val,
|
||||
serde::export::Err(err) => {
|
||||
return serde::export::Err(err);
|
||||
}
|
||||
};
|
||||
serde::export::Ok(io::ErrorKind::UnexpectedEof)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
const VARIANTS: &'static [&'static str] = &[
|
||||
"NotFound",
|
||||
"PermissionDenied",
|
||||
"ConnectionRefused",
|
||||
"ConnectionReset",
|
||||
"ConnectionAborted",
|
||||
"NotConnected",
|
||||
"AddrInUse",
|
||||
"AddrNotAvailable",
|
||||
"BrokenPipe",
|
||||
"AlreadyExists",
|
||||
"WouldBlock",
|
||||
"InvalidInput",
|
||||
"InvalidData",
|
||||
"TimedOut",
|
||||
"WriteZero",
|
||||
"Interrupted",
|
||||
"Other",
|
||||
"UnexpectedEof",
|
||||
];
|
||||
serde::Deserializer::deserialize_enum(
|
||||
deserializer,
|
||||
"ErrorKind",
|
||||
VARIANTS,
|
||||
Visitor {
|
||||
marker: serde::export::PhantomData::<io::ErrorKind>,
|
||||
lifetime: serde::export::PhantomData,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ where
|
|||
K: Keychain,
|
||||
{
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward_output = reward::output(keychain, &key_id, fees).unwrap();
|
||||
let reward_output = reward::output(keychain, &key_id, fees, false).unwrap();
|
||||
Block::new(
|
||||
&previous_header,
|
||||
txs.into_iter().cloned().collect(),
|
||||
|
|
|
@ -1,89 +0,0 @@
|
|||
# Grin Wallet + Library Design
|
||||
|
||||

|
||||
|
||||
## High Level Wallet Design Overview
|
||||
|
||||
The current Grin `wallet` crate provides several layers of libraries, services, and traits that can be mixed, matched and reimplemented to support
|
||||
various needs within the default Grin wallet as well as provide a set of useful library functions for 3rd-party implementors. At a very high level,
|
||||
the code is organized into the following components (from highest-level to lowest):
|
||||
|
||||
* **Command Line Client** - The command line client invoked by `grin wallet [command]`, simply instantiates the other components below
|
||||
and parses command line arguments as needed.
|
||||
* **Web Wallet Client** - [Work In Progress] A web wallet client accessible from the local machine only. Current code can be viewed here:
|
||||
https://github.com/mimblewimble/grin-web-wallet
|
||||
* **Static File Server** - [TBD] A means of serving up the web wallet client above to the user (still under consideration)
|
||||
* **libWallet** - A high level wallet library that provides functions for the default grin wallet. The functions in here can be somewhat
|
||||
specific to how the grin wallet does things, but could still be reused by 3rd party implementors following the same basic principles as grin
|
||||
does. Major functionality is split into:
|
||||
* **Owner API** - An API that provides information that should only be viewable by the wallet owner
|
||||
* **Foreign API** - An API to communicate with other wallets and external grin nodes
|
||||
* **Service Controller** - A Controller that instantiates the above APIs (either locally or via web services)
|
||||
* **Internal Functions** Helper functions to perform needed wallet tasks, such as selecting coins, updating wallet outputs with
|
||||
results from a Grin node, etc.
|
||||
* **libTx** - Library that provides lower-level transaction building, rangeproof and signing functions, highly-reusable by wallet implementors.
|
||||
* **Wallet Traits** - A set of generic traits defined within libWallet and the `keychain` crate . A wallet implementation such as Grin's current
|
||||
default only needs to implement these traits in order to provide a wallet:
|
||||
* **NodeClient** - Defines communication between the wallet, a running grin node and/or other wallets
|
||||
* **WalletBackend** - Defines the storage implementation of the wallet
|
||||
* **KeyChain** - Defines key derivation operations
|
||||
|
||||
## Module-Specific Notes
|
||||
|
||||
A full API-Description for each of these parts is still TBD (and should be generated by rustdoc rather than repeated here). However a few design
|
||||
notes on each module are worth mentioning here.
|
||||
|
||||
### Web Wallet Client / Static File Server
|
||||
|
||||
This component is not a 3rd-party hosted 'Web Wallet' , but a client meant to be run on the local machine only by the wallet owner. It should provide
|
||||
a usable browser interface into the wallet, that should be functionally equivalent to using the command line but (hopefully) far easier to use.
|
||||
It is currently not being included by a default grin build, although the required listener is currently being run by default. To build and test this
|
||||
component, see instructions on the [project page](https://github.com/mimblewimble/grin-web-wallet). The 'Static File Server' is still under
|
||||
discussion, and concerns how to provide the web-wallet to the user in a default Grin build.
|
||||
|
||||
### Owner API / Foreign API
|
||||
|
||||
The high-level wallet API has been split into two, to allow for different requirements on each. For instance, the Foreign API would listen on
|
||||
an external-facing port, and therefore potentially has different security requirements from the Owner API, which can simply be bound to localhost
|
||||
only.
|
||||
|
||||
### libTX
|
||||
|
||||
Transactions are built using the concept of a 'Slate', which is a data structure that gets passed around to all participants in a transaction,
|
||||
with each appending their Inputs, Outputs or Signatures to it to build a completed wallet transaction. Although the current mode of operation in
|
||||
the default client only supports single-user - single recipient, an arbitrary number of participants to a transaction is supported within libTX.
|
||||
|
||||
### Wallet Traits
|
||||
|
||||
In the current code, a Wallet implementation is just a combination of these three traits. The vast majority of functions within libwallet
|
||||
and libTX have a signature similar to the following:
|
||||
|
||||
```rust
|
||||
pub fn retrieve_outputs<T: ?Sized, C, K>(
|
||||
!·wallet: &mut T,
|
||||
!·show_spent: bool,
|
||||
!·tx_id: Option<u32>,
|
||||
) -> Result<Vec<OutputData>, Error>
|
||||
where
|
||||
!·T: WalletBackend<C, K>,
|
||||
!·C: NodeClient,
|
||||
!·K: Keychain,
|
||||
{
|
||||
```
|
||||
|
||||
With `T` in this instance being a class that implements the `WalletBackend` trait, which is further parameterized with implementations of
|
||||
`NodeClient` and `Keychain`.
|
||||
|
||||
There is currently only a single implementation of the Keychain trait within the Grin code, in the `keychain` crate exported as `ExtKeyChain`.
|
||||
The `Keychain` trait makes several assumptions about the underlying implementation, particularly that it will adhere to a
|
||||
[BIP-38 style](https://github.com/bitcoin/bips/blob/master/bip-0038.mediawiki) 'master key -> child key' model.
|
||||
|
||||
There are two implementations of `NodeClient` within the code, the main version being the `HTTPNodeClient` found within `wallet/src/client.rs` and
|
||||
the seconds a test client that communicates with an in-process instance of a chain. The NodeClient isolates all network calls, so upgrading wallet
|
||||
communication from the current simple http interaction to a more secure protocol (or allowing for many options) should be a simple
|
||||
matter of dropping in different `NodeClient` implementations.
|
||||
|
||||
There are also two implementations of `WalletBackend` within the code at the base of the `wallet` crate. `LMDBBackend` found within
|
||||
`wallet/src/lmdb_wallet.rs` is the main implementation, and is now used by all grin wallet commands. The earlier `FileWallet` still exists
|
||||
within the code, however it is not invoked, and given there are no real advantages to running it over a DB implementation, development on it
|
||||
has been dropped in favour of the LMDB implementation.
|
|
@ -1,82 +0,0 @@
|
|||
|
||||
Mode of Interactions
|
||||
====================
|
||||
|
||||
There's a variety of ways wallet software can be integrated with, from hardware
|
||||
to automated bots to the more classic desktop wallets. No single implementation
|
||||
can hope to accommodate all possible interactions, especially if it wants to
|
||||
remain user friendly (who or whatever the user may be). With that in mind, Grin
|
||||
needs to provide a healthy base for a more complete wallet ecosystem to
|
||||
develop.
|
||||
|
||||
We propose to achieve this by implementing, as part of the "standard" wallet:
|
||||
|
||||
* A good set of APIs that are flexible enough for most cases.
|
||||
* One or two default main mode of interaction.
|
||||
|
||||
While not being exhaustive, the different ways we can imagine wallet software
|
||||
working with Grin are the following:
|
||||
|
||||
1. A receive-only online wallet server. This should have some well-known network
|
||||
address that can be reached by a client. There should be a spending key kept
|
||||
offline.
|
||||
1. A fully offline interaction. The sender uses her wallet to dump a file that's
|
||||
sent to the receiver in any practical way. The receiver builds upon that file,
|
||||
sending it back to the sender. The sender finalizes the transaction and sends it
|
||||
to a Grin node.
|
||||
1. Fully online interaction through a non-trusted 3rd party. In this mode
|
||||
receiver and sender both connect to a web server that facilitates the
|
||||
interaction. Exchanges can be all be encrypted.
|
||||
1. Hardware wallet. Similar to offline but the hardware wallet interacts with
|
||||
a computer to produce required public keys and signatures.
|
||||
1. Web wallet. A 3rd party runs the required software behind the scenes and
|
||||
handles some of the key generation. This could be done in a custodial,
|
||||
non-custodial and multisig fashion.
|
||||
1. Fully programmatic. Similar to the online server, but both for receiving and
|
||||
sending, most likely by an automated bot of some sorts.
|
||||
|
||||
As part of the Grin project, we will only consider the first 2 modes of
|
||||
interaction. We hope that other projects and businesses will tackle other modes
|
||||
and perhaps even create new ones we haven't considered.
|
||||
|
||||
Design Considerations
|
||||
=====================
|
||||
|
||||
Lower-level APIs
|
||||
----------------
|
||||
|
||||
Rust can easily be [reused by other languages](https://doc.rust-lang.org/1.2.0/book/rust-inside-other-languages.html)
|
||||
like Ruby, Python or node.js, using standard FFI libraries. By providing APIs
|
||||
to build and manipulate commitments, related bulletproofs and aggregate
|
||||
signatures we can kill many birds with one stone:
|
||||
|
||||
* Make the job of wallet implementers easier. The underlying cryptographic
|
||||
concepts can be quite complex.
|
||||
* Make wallet implementations more secure. As we provide a higher level API,
|
||||
there is less risk in misusing lower-level constructs.
|
||||
* Provide some standardization in the way aggregations are done. There are
|
||||
sometimes multiple ways to build a commitment or aggregate signatures or proofs
|
||||
in a multiparty output.
|
||||
* Provide more eyeballs and more security to the standard library. We need to
|
||||
have the wallet APIs thoroughly reviewed regardless.
|
||||
|
||||
Receive-only Online Wallet
|
||||
--------------------------
|
||||
|
||||
To be receive only we need an aggregation between a "hot" receiving key and an
|
||||
offline spending key. To receive, only the receiving key should be required, to
|
||||
spend both keys are needed.
|
||||
|
||||
This can work by forming a multi-party output (multisig) where only the public
|
||||
part of the spending key is known to the receiving server. Practically a master
|
||||
public key that can be derived similarly to Hierarchical Deterministic wallets
|
||||
would provide the best security and privacy.
|
||||
|
||||
TODO figure out what's needed for the bulletproof. Maybe pre-compute multiple
|
||||
of them for ranges of receiving amounts (i.e. 1-10 grins, 10-100 grins, etc).
|
||||
|
||||
Offline Wallet
|
||||
--------------
|
||||
|
||||
This is likely the simplest to implement, with each interaction dumping its
|
||||
intermediate values to a file and building off each other.
|
Binary file not shown.
Before ![]() (image error) Size: 113 KiB |
|
@ -1,110 +0,0 @@
|
|||
@startuml grin-wallet-overview
|
||||
skinparam componentStyle uml2
|
||||
|
||||
[Grin Node] as grin_node
|
||||
|
||||
folder "Provided by Grin" as services {
|
||||
component foreign_api [
|
||||
**Foreign API**
|
||||
External-Facing functions
|
||||
- receive_tx, build coinbase
|
||||
]
|
||||
|
||||
component owner_api [
|
||||
**Owner API**
|
||||
Functions used by wallet owner only
|
||||
- retrieve outputs, retrieve txs,
|
||||
get balances, send, etc. . .
|
||||
|
||||
]
|
||||
component libtx [
|
||||
**Transaction Library (libTx)**
|
||||
Lower-Level transaction functions
|
||||
- Build transaction (via Slate), sign,
|
||||
build reward, fees, etc. . .
|
||||
]
|
||||
component libwallet [
|
||||
**Wallet Library (libWallet) **
|
||||
- Higher level wallet functions (select coins,
|
||||
update wallet from node, etc)
|
||||
- Service Controller
|
||||
(instantiate libs, start listeners)
|
||||
]
|
||||
() "Owner HTTP Listener (localhost only)" as owner_http
|
||||
() "Foreign HTTP Listener" as foreign_http
|
||||
() "Owner Single-Use" as owner_single
|
||||
() "Foreign Single-Use" as foreign_single
|
||||
}
|
||||
|
||||
' Trait definitions
|
||||
package "Traits Implemented by Wallets" as traits {
|
||||
database "WalletBackend" as wallet_backend
|
||||
database "KeyChain" as keychain
|
||||
component "NodeClient" as wallet_client
|
||||
}
|
||||
|
||||
note left of wallet_client
|
||||
- Communication layer implementation
|
||||
- Handles underlying communication with grin node
|
||||
or other wallets
|
||||
- HTTP implementation provided currently, (Other,
|
||||
more secure protocols possible.)
|
||||
end note
|
||||
|
||||
note bottom of keychain
|
||||
- Handles all key derivation operations
|
||||
end note
|
||||
|
||||
note bottom of wallet_backend
|
||||
- Implements underlying storage for wallet data
|
||||
- LMDB storage provided in default client, others
|
||||
possible (Flat-file, other DBs, etc)
|
||||
end note
|
||||
|
||||
libtx <--> traits
|
||||
libwallet <--> traits
|
||||
|
||||
note right of traits
|
||||
**Default Wallet simply a struct that provides**
|
||||
**implementations for these 3 traits**
|
||||
end note
|
||||
|
||||
' Client Side
|
||||
'package "Provided as reference implementation" {
|
||||
[Pure JS Wallet Client Implementation] as js_client
|
||||
[Command Line Wallet Client] as cl_client
|
||||
component web_server [
|
||||
V. Light Rust Web Server - Serve static files (TBD)
|
||||
(Provided by default - localhost only)
|
||||
(Serve up pure JS client)
|
||||
]
|
||||
'}
|
||||
|
||||
[External Wallets] as external_wallets
|
||||
[External Wallets] as external_wallets_2
|
||||
|
||||
wallet_client <--> grin_node
|
||||
wallet_client <--> external_wallets_2
|
||||
|
||||
web_server <--> owner_http
|
||||
js_client <-- web_server
|
||||
cl_client <--> owner_single
|
||||
cl_client <--> foreign_single
|
||||
|
||||
owner_single <--> owner_api
|
||||
foreign_single <--> foreign_api
|
||||
|
||||
libwallet <--> libtx
|
||||
|
||||
foreign_api --> libwallet
|
||||
owner_api --> libwallet
|
||||
|
||||
js_client <--> owner_http
|
||||
owner_http <--> owner_api
|
||||
external_wallets <--> foreign_http
|
||||
foreign_http <--> foreign_api
|
||||
|
||||
'layout fix
|
||||
'grin_node -[hidden]- wallet_backend
|
||||
|
||||
@enduml
|
|
@ -1,88 +0,0 @@
|
|||
# Wallet TLS setup
|
||||
|
||||
## What you need
|
||||
* A server with a static IP address (eg `3.3.3.3`)
|
||||
* A domain name ownership (`example.com`)
|
||||
* DNS configuration for this IP (`grin1.example.com` -> `3.3.3.3`)
|
||||
|
||||
If you don't have a static IP you may want to consider using services like DynDNS which support dynamic IP resolving, this case is not covered by this guide, but all the next steps are equally applicable.
|
||||
|
||||
If you don't have a domain name there is a possibility to get a TLS certificate for your IP, but you have to pay for that (so perhaps it's cheaper to buy a domain name) and it's rarely supported by certificate providers.
|
||||
|
||||
## I have a TLS certificate already
|
||||
Uncomment and update the following lines in wallet config (by default `~/.grin/grin-wallet.toml`):
|
||||
|
||||
```toml
|
||||
tls_certificate_file = "/path/to/my/cerificate/fullchain.pem"
|
||||
tls_certificate_key = "/path/to/my/cerificate/privkey.pem"
|
||||
```
|
||||
|
||||
And update `api_listen_interface` to your static IP if you want to lock your wallet only to external interface
|
||||
|
||||
```toml
|
||||
api_listen_interface = "3.3.3.3"
|
||||
```
|
||||
|
||||
Or, in case you are using DynDNS or `localhost` in order to comunicate with your wallet, just put `0.0.0.0` as mentioned in the inline instruction.
|
||||
|
||||
```toml
|
||||
api_listen_interface = "0.0.0.0"
|
||||
```
|
||||
|
||||
If you have Stratum server enabled (you run a miner) make sure that wallet listener URL starts with `https` in node config (by default `~/.grin/grin-server.toml`):
|
||||
|
||||
```toml
|
||||
wallet_listener_url = "https://grin1.example.com:13415"
|
||||
```
|
||||
|
||||
Make sure your user has read access to the files (see below for how to do it). Restart wallet. If you changed your node configuration restart `grin` too. When you (or someone else) send grins to this wallet the destination (`-d` option) must start with `https://`, not with `http://`.
|
||||
|
||||
## I don't have a TLS certificate
|
||||
You can get it for free from [Let's Encrypt](https://letsencrypt.org/). To simplify the process we need `certbot`.
|
||||
|
||||
### Install certbot
|
||||
Go to [Certbot home page](https://certbot.eff.org/), choose I'm using `None of the above` and your OS (eg `Ubuntu 18.04` which will be used as an example). You will be redirected to a page with instructions like [steps for Ubuntu](https://certbot.eff.org/lets-encrypt/ubuntubionic-other). Follow instructions from `Install` section. As result you should have `certbot` installed.
|
||||
|
||||
### Obtain certificate
|
||||
If you have experince with `certboot` feel free to use any type of challenge. This guide covers the simplest case of HTTP challenge. For this you need to have a web server listening on port `80`, which requires running it as root in the simplest case. We will use the server provided by certbot. **Make sure you have port 80 open**
|
||||
|
||||
```sh
|
||||
sudo certbot certonly --standalone -d grin1.example.com
|
||||
```
|
||||
|
||||
It will ask you some questions, as result you should see something like:
|
||||
|
||||
```
|
||||
Congratulations! Your certificate and chain have been saved at:
|
||||
/etc/letsencrypt/live/grin1.example.com/fullchain.pem
|
||||
Your key file has been saved at:
|
||||
/etc/letsencrypt/live/grin1.example.com/privkey.pem
|
||||
Your cert will expire on 2019-01-16. To obtain a new or tweaked
|
||||
version of this certificate in the future, simply run certbot
|
||||
again. To non-interactively renew *all* of your certificates, run
|
||||
"certbot renew"
|
||||
```
|
||||
|
||||
### Change permissions
|
||||
Now you have the certificate files but only root user can read it. We run grin as `ubuntu` user. There are different scenarios how to fix it, the simplest one is to create a group which will have access to `/etc/letsencrypt` directory and add our user to this group.
|
||||
|
||||
```sh
|
||||
sudo groupadd tls-cert
|
||||
sudo usermod -a -G tls-cert ubuntu
|
||||
sudo chgrp -R tls-cert /etc/letsencrypt
|
||||
sudo chmod -R g=rX /etc/letsencrypt
|
||||
sudo chmod 2755 /etc/letsencrypt
|
||||
```
|
||||
|
||||
The last step is needed for renewal, it makes sure that all new files will have the same group ownership.
|
||||
|
||||
Now you need to logout so the user's group membership modification can take place.
|
||||
|
||||
### Update wallet config
|
||||
Refer to `I have a TLS certificate already` because you have it now. Use the folowing values:
|
||||
|
||||
```toml
|
||||
tls_certificate_file = "/etc/letsencrypt/live/grin1.example.com/fullchain.pem"
|
||||
tls_certificate_key = "/etc/letsencrypt/live/grin1.example.com/privkey.pem"
|
||||
```
|
||||
|
Binary file not shown.
Before ![]() (image error) Size: 154 KiB |
|
@ -1,97 +0,0 @@
|
|||
@startuml grin-transaction
|
||||
|
||||
title
|
||||
**Current Grin Tranaction Workflow**
|
||||
Accurate as of Oct 10, 2018 - Master branch only
|
||||
end title
|
||||
|
||||
actor "Sender" as sender
|
||||
actor "Recipient" as recipient
|
||||
entity "Grin Node" as grin_node
|
||||
|
||||
== Round 1 ==
|
||||
|
||||
note left of sender
|
||||
1: Create Transaction **UUID** (for reference and maintaining correct state)
|
||||
2: Set **lock_height** for transaction kernel (current chain height)
|
||||
3: Select **inputs** using desired selection strategy
|
||||
4: Calculate sum **inputs** blinding factors **xI**
|
||||
5: Create **change_output**
|
||||
6: Select blinding factor **xC** for **change_output**
|
||||
7: Create lock function **sF** that locks **inputs** and stores **change_output** in wallet
|
||||
and identifying wallet transaction log entry **TS** linking **inputs + outputs**
|
||||
(Not executed at this point)
|
||||
end note
|
||||
note left of sender
|
||||
8: Calculate **tx_weight**: MAX(-1 * **num_inputs** + 4 * (**num_change_outputs** + 1), 1)
|
||||
(+1 covers a single output on the receiver's side)
|
||||
9: Calculate **fee**: **tx_weight** * 1_000_000 nG
|
||||
10: Calculate total blinding excess sum for all inputs and outputs **xS1** = **xC** - **xI** (private scalar)
|
||||
11: Select a random nonce **kS** (private scalar)
|
||||
12: Subtract random kernel offset **oS** from **xS1**. Calculate **xS** = **xS1** - **oS**
|
||||
13: Multiply **xS** and **kS** by generator G to create public curve points **xSG** and **kSG**
|
||||
14: Add values to **Slate** for passing to other participants: **UUID, inputs, change_outputs,**
|
||||
**fee, amount, lock_height, kSG, xSG, oS**
|
||||
end note
|
||||
sender -> recipient: **Slate**
|
||||
== Round 2 ==
|
||||
note right of recipient
|
||||
1: Check fee against number of **inputs**, **change_outputs** +1 * **receiver_output**)
|
||||
2: Create **receiver_output**
|
||||
3: Choose random blinding factor for **receiver_output** **xR** (private scalar)
|
||||
end note
|
||||
note right of recipient
|
||||
4: Calculate message **M** = **fee | lock_height **
|
||||
5: Choose random nonce **kR** (private scalar)
|
||||
6: Multiply **xR** and **kR** by generator G to create public curve points **xRG** and **kRG**
|
||||
7: Compute Schnorr challenge **e** = SHA256(**kRG** + **kSG** | **xRG** + **xSG** | **M**)
|
||||
8: Compute Recipient Schnorr signature **sR** = **kR** + **e** * **xR**
|
||||
9: Add **sR, xRG, kRG** to **Slate**
|
||||
10: Create wallet output function **rF** that stores **receiver_output** in wallet with status "Unconfirmed"
|
||||
and identifying transaction log entry **TR** linking **receiver_output** with transaction.
|
||||
end note
|
||||
alt All Okay
|
||||
recipient --> sender: Okay - **Slate**
|
||||
recipient -> recipient: execute wallet output function **rF**
|
||||
else Any Failure
|
||||
recipient ->x]: Abort
|
||||
recipient --> sender: Error
|
||||
[x<- sender: Abort
|
||||
end
|
||||
== Finalize Transaction ==
|
||||
note left of sender
|
||||
1: Calculate message **M** = **fee | lock_height **
|
||||
2: Compute Schnorr challenge **e** = SHA256(**kRG** + **kSG** | **xRG** + **xSG** | **M**)
|
||||
3: Verify **sR** by verifying **kRG** + **e** * **xRG** = **sRG**
|
||||
4: Compute Sender Schnorr signature **sS** = **kS** + **e** * **xS**
|
||||
5: Calculate final signature **s** = (**kSG**+**kRG**, **sS**+**sR**)
|
||||
6: Calculate public key for **s**: **xG** = **xRG** + **xSG**
|
||||
7: Verify **s** against excess values in final transaction using **xG**
|
||||
8: Create Transaction Kernel Containing:
|
||||
Excess signature **s**
|
||||
Public excess **xG**
|
||||
**fee**
|
||||
**lock_height**
|
||||
end note
|
||||
sender -> sender: Create final transaction **tx** from **Slate**
|
||||
sender -> grin_node: Post **tx** to mempool
|
||||
grin_node --> recipient: "Ok"
|
||||
alt All Okay
|
||||
recipient --> sender: "Ok" - **UUID**
|
||||
sender -> sender: Execute wallet lock function **sF**
|
||||
...Await confirmation...
|
||||
recipient -> grin_node: Confirm **receiver_output**
|
||||
recipient -> recipient: Change status of **receiver_output** to "Confirmed"
|
||||
sender -> grin_node: Confirm **change_output**
|
||||
sender -> sender: Change status of **inputs** to "Spent"
|
||||
sender -> sender: Change status of **change_output** to "Confirmed"
|
||||
else Any Error
|
||||
recipient -> recipient: Manually remove **receiver_output** from wallet using transaction log entry **TR**
|
||||
recipient ->x]: Abort
|
||||
recipient --> sender: Error
|
||||
sender -> sender: Unlock **inputs** and delete **change_output** identified in transaction log entry **TS**
|
||||
[x<- sender: Abort
|
||||
end
|
||||
|
||||
|
||||
@enduml
|
|
@ -1,5 +1,7 @@
|
|||
# Genesis Genesis
|
||||
|
||||
N.B: This crate's `Cargo.toml` file has been disabled by renaming it to `_Cargo.toml`. It no longer builds due to changes in the project structure.
|
||||
|
||||
This crate isn't strictly part of grin but allows the generation and release of a new Grin Genesis in an automated fashion. The process is the following:
|
||||
|
||||
* Prepare a multisig output and kernel to use as coinbase. In the case of Grin mainnet, this is done and owned by the council treasurers. This can be down a few days prior.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin_keychain"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -26,5 +26,4 @@ ripemd160 = "0.7"
|
|||
sha2 = "0.7"
|
||||
pbkdf2 = "0.2"
|
||||
|
||||
|
||||
grin_util = { path = "../util", version = "1.0.3" }
|
||||
grin_util = { path = "../util", version = "1.1.0-beta.2" }
|
||||
|
|
|
@ -295,7 +295,7 @@ impl serde::Serialize for ChildNumber {
|
|||
}
|
||||
|
||||
/// A BIP32 error
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
||||
pub enum Error {
|
||||
/// A pk->pk derivation was attempted on a hardened key
|
||||
CannotDeriveFromHardenedKey,
|
||||
|
|
|
@ -29,7 +29,7 @@ lazy_static! {
|
|||
}
|
||||
|
||||
/// An error that might occur during mnemonic decoding
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
|
||||
pub enum Error {
|
||||
/// Invalid word encountered
|
||||
BadWord(String),
|
||||
|
|
|
@ -38,7 +38,7 @@ use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
|
|||
// Size of an identifier in bytes
|
||||
pub const IDENTIFIER_SIZE: usize = 17;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum Error {
|
||||
Secp(secp::Error),
|
||||
KeyDerivation(extkey_bip32::Error),
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin_p2p"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -13,7 +13,6 @@ edition = "2018"
|
|||
bitflags = "1"
|
||||
bytes = "0.4"
|
||||
enum_primitive = "0.1"
|
||||
lmdb-zero = "0.4.4"
|
||||
net2 = "0.2"
|
||||
num = "0.1"
|
||||
rand = "0.5"
|
||||
|
@ -22,10 +21,11 @@ serde_derive = "1"
|
|||
log = "0.4"
|
||||
chrono = { version = "0.4.4", features = ["serde"] }
|
||||
|
||||
grin_core = { path = "../core", version = "1.0.3" }
|
||||
grin_store = { path = "../store", version = "1.0.3" }
|
||||
grin_chain = { path = "../chain", version = "1.0.3" }
|
||||
grin_util = { path = "../util", version = "1.0.3" }
|
||||
grin_core = { path = "../core", version = "1.1.0-beta.2" }
|
||||
grin_store = { path = "../store", version = "1.1.0-beta.2" }
|
||||
grin_util = { path = "../util", version = "1.1.0-beta.2" }
|
||||
grin_chain = { path = "../chain", version = "1.1.0-beta.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
grin_pool = { path = "../pool", version = "1.0.3" }
|
||||
grin_pool = { path = "../pool", version = "1.1.0-beta.2" }
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ extern crate bitflags;
|
|||
|
||||
#[macro_use]
|
||||
extern crate enum_primitive;
|
||||
use lmdb_zero as lmdb;
|
||||
|
||||
#[macro_use]
|
||||
extern crate grin_core as core;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
use crate::util::{Mutex, RwLock};
|
||||
use std::fmt;
|
||||
use std::fs::File;
|
||||
use std::net::{Shutdown, TcpStream};
|
||||
use std::sync::Arc;
|
||||
|
@ -64,6 +65,12 @@ macro_rules! connection {
|
|||
};
|
||||
}
|
||||
|
||||
impl fmt::Debug for Peer {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Peer({:?})", &self.info)
|
||||
}
|
||||
}
|
||||
|
||||
impl Peer {
|
||||
// Only accept and connect can be externally used to build a peer
|
||||
fn new(info: PeerInfo, adapter: Arc<dyn NetAdapter>) -> Peer {
|
||||
|
|
|
@ -38,7 +38,6 @@ pub struct Peers {
|
|||
pub adapter: Arc<dyn ChainAdapter>,
|
||||
store: PeerStore,
|
||||
peers: RwLock<HashMap<PeerAddr, Arc<Peer>>>,
|
||||
dandelion_relay: RwLock<Option<(i64, Arc<Peer>)>>,
|
||||
config: P2PConfig,
|
||||
}
|
||||
|
||||
|
@ -49,7 +48,6 @@ impl Peers {
|
|||
store,
|
||||
config,
|
||||
peers: RwLock::new(HashMap::new()),
|
||||
dandelion_relay: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,39 +86,6 @@ impl Peers {
|
|||
self.save_peer(&peer_data)
|
||||
}
|
||||
|
||||
// Update the dandelion relay
|
||||
pub fn update_dandelion_relay(&self) {
|
||||
let peers = self.outgoing_connected_peers();
|
||||
|
||||
let peer = &self
|
||||
.config
|
||||
.dandelion_peer
|
||||
.and_then(|ip| peers.iter().find(|x| x.info.addr == ip))
|
||||
.or(thread_rng().choose(&peers));
|
||||
|
||||
match peer {
|
||||
Some(peer) => self.set_dandelion_relay(peer),
|
||||
None => debug!("Could not update dandelion relay"),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_dandelion_relay(&self, peer: &Arc<Peer>) {
|
||||
// Clear the map and add new relay
|
||||
let dandelion_relay = &self.dandelion_relay;
|
||||
dandelion_relay
|
||||
.write()
|
||||
.replace((Utc::now().timestamp(), peer.clone()));
|
||||
debug!(
|
||||
"Successfully updated Dandelion relay to: {}",
|
||||
peer.info.addr
|
||||
);
|
||||
}
|
||||
|
||||
// Get the dandelion relay
|
||||
pub fn get_dandelion_relay(&self) -> Option<(i64, Arc<Peer>)> {
|
||||
self.dandelion_relay.read().clone()
|
||||
}
|
||||
|
||||
pub fn is_known(&self, addr: PeerAddr) -> bool {
|
||||
self.peers.read().contains_key(&addr)
|
||||
}
|
||||
|
@ -344,26 +309,6 @@ impl Peers {
|
|||
);
|
||||
}
|
||||
|
||||
/// Relays the provided stem transaction to our single stem peer.
|
||||
pub fn relay_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
||||
self.get_dandelion_relay()
|
||||
.or_else(|| {
|
||||
debug!("No dandelion relay, updating.");
|
||||
self.update_dandelion_relay();
|
||||
self.get_dandelion_relay()
|
||||
})
|
||||
// If still return an error, let the caller handle this as they see fit.
|
||||
// The caller will "fluff" at this point as the stem phase is finished.
|
||||
.ok_or(Error::NoDandelionRelay)
|
||||
.map(|(_, relay)| {
|
||||
if relay.is_connected() {
|
||||
if let Err(e) = relay.send_stem_transaction(tx) {
|
||||
debug!("Error sending stem transaction to peer relay: {:?}", e);
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Broadcasts the provided transaction to PEER_PREFERRED_COUNT of our
|
||||
/// peers. We may be connected to PEER_MAX_COUNT peers so we only
|
||||
/// want to broadcast to a random subset of peers.
|
||||
|
|
|
@ -12,13 +12,18 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::fs::File;
|
||||
use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{io, thread};
|
||||
|
||||
use crate::chain;
|
||||
use crate::core::core;
|
||||
use crate::core::core::hash::Hash;
|
||||
use crate::core::global;
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::handshake::Handshake;
|
||||
use crate::lmdb;
|
||||
use crate::peer::Peer;
|
||||
use crate::peers::Peers;
|
||||
use crate::store::PeerStore;
|
||||
|
@ -27,11 +32,6 @@ use crate::types::{
|
|||
};
|
||||
use crate::util::{Mutex, StopState};
|
||||
use chrono::prelude::{DateTime, Utc};
|
||||
use std::fs::File;
|
||||
use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{io, thread};
|
||||
|
||||
/// P2P server implementation, handling bootstrapping to find and connect to
|
||||
/// peers, receiving connections from other peers and keep track of all of them.
|
||||
|
@ -47,7 +47,7 @@ pub struct Server {
|
|||
impl Server {
|
||||
/// Creates a new idle p2p server with no peers
|
||||
pub fn new(
|
||||
db_env: Arc<lmdb::Environment>,
|
||||
db_root: &str,
|
||||
capab: Capabilities,
|
||||
config: P2PConfig,
|
||||
adapter: Arc<dyn ChainAdapter>,
|
||||
|
@ -58,7 +58,7 @@ impl Server {
|
|||
config: config.clone(),
|
||||
capabilities: capab,
|
||||
handshake: Arc::new(Handshake::new(genesis, config.clone())),
|
||||
peers: Arc::new(Peers::new(PeerStore::new(db_env)?, adapter, config)),
|
||||
peers: Arc::new(Peers::new(PeerStore::new(db_root)?, adapter, config)),
|
||||
stop_state,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -17,14 +17,12 @@
|
|||
use chrono::Utc;
|
||||
use num::FromPrimitive;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::lmdb;
|
||||
|
||||
use crate::core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use crate::types::{Capabilities, PeerAddr, ReasonForBan};
|
||||
use grin_store::{self, option_to_not_found, to_key, Error};
|
||||
|
||||
const DB_NAME: &'static str = "peer";
|
||||
const STORE_SUBPATH: &'static str = "peers";
|
||||
|
||||
const PEER_PREFIX: u8 = 'P' as u8;
|
||||
|
@ -116,8 +114,8 @@ pub struct PeerStore {
|
|||
|
||||
impl PeerStore {
|
||||
/// Instantiates a new peer store under the provided root path.
|
||||
pub fn new(db_env: Arc<lmdb::Environment>) -> Result<PeerStore, Error> {
|
||||
let db = grin_store::Store::open(db_env, STORE_SUBPATH);
|
||||
pub fn new(db_root: &str) -> Result<PeerStore, Error> {
|
||||
let db = grin_store::Store::new(db_root, Some(DB_NAME), Some(STORE_SUBPATH), None)?;
|
||||
Ok(PeerStore { db: db })
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
use grin_core as core;
|
||||
use grin_p2p as p2p;
|
||||
|
||||
use grin_store as store;
|
||||
use grin_util as util;
|
||||
use grin_util::{Mutex, StopState};
|
||||
|
||||
|
@ -50,10 +49,9 @@ fn peer_handshake() {
|
|||
..p2p::P2PConfig::default()
|
||||
};
|
||||
let net_adapter = Arc::new(p2p::DummyAdapter {});
|
||||
let db_env = Arc::new(store::new_env(".grin".to_string()));
|
||||
let server = Arc::new(
|
||||
p2p::Server::new(
|
||||
db_env,
|
||||
".grin",
|
||||
p2p::Capabilities::UNKNOWN,
|
||||
p2p_config.clone(),
|
||||
net_adapter.clone(),
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin_pool"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "Chain implementation for grin, a simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -19,10 +19,10 @@ chrono = "0.4.4"
|
|||
failure = "0.1"
|
||||
failure_derive = "0.1"
|
||||
|
||||
grin_core = { path = "../core", version = "1.0.3" }
|
||||
grin_keychain = { path = "../keychain", version = "1.0.3" }
|
||||
grin_store = { path = "../store", version = "1.0.3" }
|
||||
grin_util = { path = "../util", version = "1.0.3" }
|
||||
grin_core = { path = "../core", version = "1.1.0-beta.2" }
|
||||
grin_keychain = { path = "../keychain", version = "1.1.0-beta.2" }
|
||||
grin_store = { path = "../store", version = "1.1.0-beta.2" }
|
||||
grin_util = { path = "../util", version = "1.1.0-beta.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
grin_chain = { path = "../chain", version = "1.0.3" }
|
||||
grin_chain = { path = "../chain", version = "1.1.0-beta.2" }
|
||||
|
|
|
@ -34,7 +34,8 @@ mod pool;
|
|||
pub mod transaction_pool;
|
||||
pub mod types;
|
||||
|
||||
pub use crate::pool::Pool;
|
||||
pub use crate::transaction_pool::TransactionPool;
|
||||
pub use crate::types::{
|
||||
BlockChain, DandelionConfig, PoolAdapter, PoolConfig, PoolEntryState, PoolError, TxSource,
|
||||
BlockChain, DandelionConfig, PoolAdapter, PoolConfig, PoolEntry, PoolError, TxSource,
|
||||
};
|
||||
|
|
|
@ -23,7 +23,7 @@ use self::core::core::{
|
|||
Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel, Weighting,
|
||||
};
|
||||
use self::util::RwLock;
|
||||
use crate::types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
|
||||
use crate::types::{BlockChain, PoolEntry, PoolError};
|
||||
use grin_core as core;
|
||||
use grin_util as util;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
@ -139,7 +139,7 @@ impl Pool {
|
|||
// Verify these txs produce an aggregated tx below max tx weight.
|
||||
// Return a vec of all the valid txs.
|
||||
let txs = self.validate_raw_txs(
|
||||
tx_buckets,
|
||||
&tx_buckets,
|
||||
None,
|
||||
&header,
|
||||
Weighting::AsLimitedTransaction { max_weight },
|
||||
|
@ -167,33 +167,6 @@ impl Pool {
|
|||
Ok(Some(tx))
|
||||
}
|
||||
|
||||
pub fn select_valid_transactions(
|
||||
&self,
|
||||
txs: Vec<Transaction>,
|
||||
extra_tx: Option<Transaction>,
|
||||
header: &BlockHeader,
|
||||
) -> Result<Vec<Transaction>, PoolError> {
|
||||
let valid_txs = self.validate_raw_txs(txs, extra_tx, header, Weighting::NoLimit)?;
|
||||
Ok(valid_txs)
|
||||
}
|
||||
|
||||
pub fn get_transactions_in_state(&self, state: PoolEntryState) -> Vec<Transaction> {
|
||||
self.entries
|
||||
.iter()
|
||||
.filter(|x| x.state == state)
|
||||
.map(|x| x.tx.clone())
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
// Transition the specified pool entries to the new state.
|
||||
pub fn transition_to_state(&mut self, txs: &[Transaction], state: PoolEntryState) {
|
||||
for x in &mut self.entries {
|
||||
if txs.contains(&x.tx) {
|
||||
x.state = state;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate this new tx with all existing txs in the pool.
|
||||
// If we can validate the aggregated tx against the current chain state
|
||||
// then we can safely add the tx to the pool.
|
||||
|
@ -267,9 +240,9 @@ impl Pool {
|
|||
Ok(new_sums)
|
||||
}
|
||||
|
||||
fn validate_raw_txs(
|
||||
pub fn validate_raw_txs(
|
||||
&self,
|
||||
txs: Vec<Transaction>,
|
||||
txs: &[Transaction],
|
||||
extra_tx: Option<Transaction>,
|
||||
header: &BlockHeader,
|
||||
weighting: Weighting,
|
||||
|
@ -289,7 +262,7 @@ impl Pool {
|
|||
|
||||
// We know the tx is valid if the entire aggregate tx is valid.
|
||||
if self.validate_raw_tx(&agg_tx, header, weighting).is_ok() {
|
||||
valid_txs.push(tx);
|
||||
valid_txs.push(tx.clone());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,9 +23,7 @@ use self::core::core::verifier_cache::VerifierCache;
|
|||
use self::core::core::{transaction, Block, BlockHeader, Transaction, Weighting};
|
||||
use self::util::RwLock;
|
||||
use crate::pool::Pool;
|
||||
use crate::types::{
|
||||
BlockChain, PoolAdapter, PoolConfig, PoolEntry, PoolEntryState, PoolError, TxSource,
|
||||
};
|
||||
use crate::types::{BlockChain, PoolAdapter, PoolConfig, PoolEntry, PoolError, TxSource};
|
||||
use chrono::prelude::*;
|
||||
use grin_core as core;
|
||||
use grin_util as util;
|
||||
|
@ -76,13 +74,10 @@ impl TransactionPool {
|
|||
self.blockchain.chain_head()
|
||||
}
|
||||
|
||||
// Add tx to stempool (passing in all txs from txpool to validate against).
|
||||
fn add_to_stempool(&mut self, entry: PoolEntry, header: &BlockHeader) -> Result<(), PoolError> {
|
||||
// Add tx to stempool (passing in all txs from txpool to validate against).
|
||||
self.stempool
|
||||
.add_to_pool(entry, self.txpool.all_transactions(), header)?;
|
||||
|
||||
// Note: we do not notify the adapter here,
|
||||
// we let the dandelion monitor handle this.
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -124,8 +119,6 @@ impl TransactionPool {
|
|||
let txpool_tx = self.txpool.all_transactions_aggregate()?;
|
||||
self.stempool.reconcile(txpool_tx, header)?;
|
||||
}
|
||||
|
||||
self.adapter.tx_accepted(&entry.tx);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -159,28 +152,25 @@ impl TransactionPool {
|
|||
self.blockchain.verify_coinbase_maturity(&tx)?;
|
||||
|
||||
let entry = PoolEntry {
|
||||
state: PoolEntryState::Fresh,
|
||||
src,
|
||||
tx_at: Utc::now(),
|
||||
tx,
|
||||
};
|
||||
|
||||
// If we are in "stem" mode then check if this is a new tx or if we have seen it before.
|
||||
// If new tx - add it to our stempool.
|
||||
// If we have seen any of the kernels before then fallback to fluff,
|
||||
// adding directly to txpool.
|
||||
if stem
|
||||
&& self
|
||||
.stempool
|
||||
.find_matching_transactions(entry.tx.kernels())
|
||||
.is_empty()
|
||||
// If not stem then we are fluff.
|
||||
// If this is a stem tx then attempt to stem.
|
||||
// Any problems during stem, fallback to fluff.
|
||||
if !stem
|
||||
|| self
|
||||
.add_to_stempool(entry.clone(), header)
|
||||
.and_then(|_| self.adapter.stem_tx_accepted(&entry.tx))
|
||||
.is_err()
|
||||
{
|
||||
self.add_to_stempool(entry, header)?;
|
||||
return Ok(());
|
||||
self.add_to_txpool(entry.clone(), header)?;
|
||||
self.add_to_reorg_cache(entry.clone());
|
||||
self.adapter.tx_accepted(&entry.tx);
|
||||
}
|
||||
|
||||
self.add_to_txpool(entry.clone(), header)?;
|
||||
self.add_to_reorg_cache(entry);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -27,80 +27,61 @@ use failure::Fail;
|
|||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
|
||||
/// Dandelion relay timer
|
||||
const DANDELION_RELAY_SECS: u64 = 600;
|
||||
/// Dandelion "epoch" length.
|
||||
const DANDELION_EPOCH_SECS: u16 = 600;
|
||||
|
||||
/// Dandelion embargo timer
|
||||
const DANDELION_EMBARGO_SECS: u64 = 180;
|
||||
/// Dandelion embargo timer.
|
||||
const DANDELION_EMBARGO_SECS: u16 = 180;
|
||||
|
||||
/// Dandelion patience timer
|
||||
const DANDELION_PATIENCE_SECS: u64 = 10;
|
||||
/// Dandelion aggregation timer.
|
||||
const DANDELION_AGGREGATION_SECS: u16 = 30;
|
||||
|
||||
/// Dandelion stem probability (stem 90% of the time, fluff 10%).
|
||||
const DANDELION_STEM_PROBABILITY: usize = 90;
|
||||
const DANDELION_STEM_PROBABILITY: u8 = 90;
|
||||
|
||||
/// Configuration for "Dandelion".
|
||||
/// Note: shared between p2p and pool.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct DandelionConfig {
|
||||
/// Choose new Dandelion relay peer every n secs.
|
||||
#[serde = "default_dandelion_relay_secs"]
|
||||
pub relay_secs: Option<u64>,
|
||||
/// Dandelion embargo, fluff and broadcast tx if not seen on network before
|
||||
/// embargo expires.
|
||||
#[serde = "default_dandelion_embargo_secs"]
|
||||
pub embargo_secs: Option<u64>,
|
||||
/// Dandelion patience timer, fluff/stem processing runs every n secs.
|
||||
/// Tx aggregation happens on stem txs received within this window.
|
||||
#[serde = "default_dandelion_patience_secs"]
|
||||
pub patience_secs: Option<u64>,
|
||||
/// Length of each "epoch".
|
||||
#[serde(default = "default_dandelion_epoch_secs")]
|
||||
pub epoch_secs: Option<u16>,
|
||||
/// Dandelion embargo timer. Fluff and broadcast individual txs if not seen
|
||||
/// on network before embargo expires.
|
||||
#[serde(default = "default_dandelion_embargo_secs")]
|
||||
pub embargo_secs: Option<u16>,
|
||||
/// Dandelion aggregation timer.
|
||||
#[serde(default = "default_dandelion_aggregation_secs")]
|
||||
pub aggregation_secs: Option<u16>,
|
||||
/// Dandelion stem probability (stem 90% of the time, fluff 10% etc.)
|
||||
#[serde = "default_dandelion_stem_probability"]
|
||||
pub stem_probability: Option<usize>,
|
||||
}
|
||||
|
||||
impl DandelionConfig {
|
||||
pub fn relay_secs(&self) -> u64 {
|
||||
self.relay_secs.unwrap_or(DANDELION_RELAY_SECS)
|
||||
}
|
||||
|
||||
pub fn embargo_secs(&self) -> u64 {
|
||||
self.embargo_secs.unwrap_or(DANDELION_EMBARGO_SECS)
|
||||
}
|
||||
|
||||
pub fn patience_secs(&self) -> u64 {
|
||||
self.patience_secs.unwrap_or(DANDELION_PATIENCE_SECS)
|
||||
}
|
||||
|
||||
pub fn stem_probability(&self) -> usize {
|
||||
self.stem_probability.unwrap_or(DANDELION_STEM_PROBABILITY)
|
||||
}
|
||||
#[serde(default = "default_dandelion_stem_probability")]
|
||||
pub stem_probability: Option<u8>,
|
||||
}
|
||||
|
||||
impl Default for DandelionConfig {
|
||||
fn default() -> DandelionConfig {
|
||||
DandelionConfig {
|
||||
relay_secs: default_dandelion_relay_secs(),
|
||||
epoch_secs: default_dandelion_epoch_secs(),
|
||||
embargo_secs: default_dandelion_embargo_secs(),
|
||||
patience_secs: default_dandelion_patience_secs(),
|
||||
aggregation_secs: default_dandelion_aggregation_secs(),
|
||||
stem_probability: default_dandelion_stem_probability(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_dandelion_relay_secs() -> Option<u64> {
|
||||
Some(DANDELION_RELAY_SECS)
|
||||
fn default_dandelion_epoch_secs() -> Option<u16> {
|
||||
Some(DANDELION_EPOCH_SECS)
|
||||
}
|
||||
|
||||
fn default_dandelion_embargo_secs() -> Option<u64> {
|
||||
fn default_dandelion_embargo_secs() -> Option<u16> {
|
||||
Some(DANDELION_EMBARGO_SECS)
|
||||
}
|
||||
|
||||
fn default_dandelion_patience_secs() -> Option<u64> {
|
||||
Some(DANDELION_PATIENCE_SECS)
|
||||
fn default_dandelion_aggregation_secs() -> Option<u16> {
|
||||
Some(DANDELION_AGGREGATION_SECS)
|
||||
}
|
||||
|
||||
fn default_dandelion_stem_probability() -> Option<usize> {
|
||||
fn default_dandelion_stem_probability() -> Option<u8> {
|
||||
Some(DANDELION_STEM_PROBABILITY)
|
||||
}
|
||||
|
||||
|
@ -156,8 +137,6 @@ fn default_mineable_max_weight() -> usize {
|
|||
/// A single (possibly aggregated) transaction.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PoolEntry {
|
||||
/// The state of the pool entry.
|
||||
pub state: PoolEntryState,
|
||||
/// Info on where this tx originated from.
|
||||
pub src: TxSource,
|
||||
/// Timestamp of when this tx was originally added to the pool.
|
||||
|
@ -166,21 +145,6 @@ pub struct PoolEntry {
|
|||
pub tx: Transaction,
|
||||
}
|
||||
|
||||
/// The possible states a pool entry can be in.
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum PoolEntryState {
|
||||
/// A new entry, not yet processed.
|
||||
Fresh,
|
||||
/// Tx to be included in the next "stem" run.
|
||||
ToStem,
|
||||
/// Tx previously "stemmed" and propagated.
|
||||
Stemmed,
|
||||
/// Tx to be included in the next "fluff" run.
|
||||
ToFluff,
|
||||
/// Tx previously "fluffed" and broadcast.
|
||||
Fluffed,
|
||||
}
|
||||
|
||||
/// Placeholder: the data representing where we heard about a tx from.
|
||||
///
|
||||
/// Used to make decisions based on transaction acceptance priority from
|
||||
|
@ -285,12 +249,10 @@ pub trait BlockChain: Sync + Send {
|
|||
/// downstream processing of valid transactions by the rest of the system, most
|
||||
/// importantly the broadcasting of transactions to our peers.
|
||||
pub trait PoolAdapter: Send + Sync {
|
||||
/// The transaction pool has accepted this transactions as valid and added
|
||||
/// it to its internal cache.
|
||||
/// The transaction pool has accepted this transaction as valid.
|
||||
fn tx_accepted(&self, tx: &transaction::Transaction);
|
||||
/// The stem transaction pool has accepted this transactions as valid and
|
||||
/// added it to its internal cache, we have waited for the "patience" timer
|
||||
/// to fire and we now want to propagate the tx to the next Dandelion relay.
|
||||
|
||||
/// The stem transaction pool has accepted this transactions as valid.
|
||||
fn stem_tx_accepted(&self, tx: &transaction::Transaction) -> Result<(), PoolError>;
|
||||
}
|
||||
|
||||
|
@ -299,9 +261,8 @@ pub trait PoolAdapter: Send + Sync {
|
|||
pub struct NoopAdapter {}
|
||||
|
||||
impl PoolAdapter for NoopAdapter {
|
||||
fn tx_accepted(&self, _: &transaction::Transaction) {}
|
||||
|
||||
fn stem_tx_accepted(&self, _: &transaction::Transaction) -> Result<(), PoolError> {
|
||||
fn tx_accepted(&self, _tx: &transaction::Transaction) {}
|
||||
fn stem_tx_accepted(&self, _tx: &transaction::Transaction) -> Result<(), PoolError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ fn test_transaction_pool_block_building() {
|
|||
|
||||
let db_root = ".grin_block_building".to_string();
|
||||
clean_output_dir(db_root.clone());
|
||||
|
||||
{
|
||||
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
|
||||
|
||||
|
@ -46,7 +47,7 @@ fn test_transaction_pool_block_building() {
|
|||
let height = prev_header.height + 1;
|
||||
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
||||
let fee = txs.iter().map(|x| x.fee()).sum();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, fee, false).unwrap();
|
||||
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
|
||||
|
||||
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
||||
|
|
|
@ -51,7 +51,7 @@ fn test_block_building_max_weight() {
|
|||
let height = prev_header.height + 1;
|
||||
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
||||
let fee = txs.iter().map(|x| x.fee()).sum();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, fee, false).unwrap();
|
||||
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
|
||||
|
||||
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
||||
|
|
|
@ -45,7 +45,7 @@ fn test_transaction_pool_block_reconciliation() {
|
|||
let header = {
|
||||
let height = 1;
|
||||
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
||||
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, 0, false).unwrap();
|
||||
let genesis = BlockHeader::default();
|
||||
let mut block = Block::new(&genesis, vec![], Difficulty::min(), reward).unwrap();
|
||||
|
||||
|
@ -65,7 +65,7 @@ fn test_transaction_pool_block_reconciliation() {
|
|||
let block = {
|
||||
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
|
||||
let fees = initial_tx.fee();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, fees, false).unwrap();
|
||||
let mut block =
|
||||
Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap();
|
||||
|
||||
|
@ -159,7 +159,7 @@ fn test_transaction_pool_block_reconciliation() {
|
|||
let block = {
|
||||
let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
|
||||
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, fees, false).unwrap();
|
||||
let mut block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap();
|
||||
|
||||
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
||||
|
|
|
@ -29,7 +29,6 @@ use grin_chain as chain;
|
|||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_pool as pool;
|
||||
use grin_store as store;
|
||||
use grin_util as util;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
|
@ -37,17 +36,16 @@ use std::sync::Arc;
|
|||
|
||||
#[derive(Clone)]
|
||||
pub struct ChainAdapter {
|
||||
pub store: Arc<ChainStore>,
|
||||
pub store: Arc<RwLock<ChainStore>>,
|
||||
pub utxo: Arc<RwLock<HashSet<Commitment>>>,
|
||||
}
|
||||
|
||||
impl ChainAdapter {
|
||||
pub fn init(db_root: String) -> Result<ChainAdapter, String> {
|
||||
let target_dir = format!("target/{}", db_root);
|
||||
let db_env = Arc::new(store::new_env(target_dir.clone()));
|
||||
let chain_store =
|
||||
ChainStore::new(db_env).map_err(|e| format!("failed to init chain_store, {:?}", e))?;
|
||||
let store = Arc::new(chain_store);
|
||||
let chain_store = ChainStore::new(&target_dir)
|
||||
.map_err(|e| format!("failed to init chain_store, {:?}", e))?;
|
||||
let store = Arc::new(RwLock::new(chain_store));
|
||||
let utxo = Arc::new(RwLock::new(HashSet::new()));
|
||||
|
||||
Ok(ChainAdapter { store, utxo })
|
||||
|
@ -56,7 +54,8 @@ impl ChainAdapter {
|
|||
pub fn update_db_for_block(&self, block: &Block) {
|
||||
let header = &block.header;
|
||||
let tip = Tip::from_header(header);
|
||||
let batch = self.store.batch().unwrap();
|
||||
let s = self.store.write();
|
||||
let batch = s.batch().unwrap();
|
||||
|
||||
batch.save_block_header(header).unwrap();
|
||||
batch.save_head(&tip).unwrap();
|
||||
|
@ -102,20 +101,20 @@ impl ChainAdapter {
|
|||
|
||||
impl BlockChain for ChainAdapter {
|
||||
fn chain_head(&self) -> Result<BlockHeader, PoolError> {
|
||||
self.store
|
||||
.head_header()
|
||||
let s = self.store.read();
|
||||
s.head_header()
|
||||
.map_err(|_| PoolError::Other(format!("failed to get chain head")))
|
||||
}
|
||||
|
||||
fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, PoolError> {
|
||||
self.store
|
||||
.get_block_header(hash)
|
||||
let s = self.store.read();
|
||||
s.get_block_header(hash)
|
||||
.map_err(|_| PoolError::Other(format!("failed to get block header")))
|
||||
}
|
||||
|
||||
fn get_block_sums(&self, hash: &Hash) -> Result<BlockSums, PoolError> {
|
||||
self.store
|
||||
.get_block_sums(hash)
|
||||
let s = self.store.read();
|
||||
s.get_block_sums(hash)
|
||||
.map_err(|_| PoolError::Other(format!("failed to get block sums")))
|
||||
}
|
||||
|
||||
|
|
|
@ -33,6 +33,208 @@ fn test_the_transaction_pool() {
|
|||
|
||||
let db_root = ".grin_transaction_pool".to_string();
|
||||
clean_output_dir(db_root.clone());
|
||||
|
||||
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
|
||||
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
|
||||
// Initialize a new pool with our chain adapter.
|
||||
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
|
||||
|
||||
let header = {
|
||||
let height = 1;
|
||||
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
||||
let reward = libtx::reward::output(&keychain, &key_id, 0, false).unwrap();
|
||||
let block = Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
|
||||
|
||||
chain.update_db_for_block(&block);
|
||||
|
||||
block.header
|
||||
};
|
||||
|
||||
// Now create tx to spend a coinbase, giving us some useful outputs for testing
|
||||
// with.
|
||||
let initial_tx = {
|
||||
test_transaction_spending_coinbase(
|
||||
&keychain,
|
||||
&header,
|
||||
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
|
||||
)
|
||||
};
|
||||
|
||||
// Add this tx to the pool (stem=false, direct to txpool).
|
||||
{
|
||||
let mut write_pool = pool.write();
|
||||
write_pool
|
||||
.add_to_pool(test_source(), initial_tx, false, &header)
|
||||
.unwrap();
|
||||
assert_eq!(write_pool.total_size(), 1);
|
||||
}
|
||||
|
||||
// Test adding a tx that "double spends" an output currently spent by a tx
|
||||
// already in the txpool. In this case we attempt to spend the original coinbase twice.
|
||||
{
|
||||
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
|
||||
let mut write_pool = pool.write();
|
||||
assert!(write_pool
|
||||
.add_to_pool(test_source(), tx, false, &header)
|
||||
.is_err());
|
||||
}
|
||||
|
||||
// tx1 spends some outputs from the initial test tx.
|
||||
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
||||
// tx2 spends some outputs from both tx1 and the initial test tx.
|
||||
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
|
||||
|
||||
// Take a write lock and add a couple of tx entries to the pool.
|
||||
{
|
||||
let mut write_pool = pool.write();
|
||||
|
||||
// Check we have a single initial tx in the pool.
|
||||
assert_eq!(write_pool.total_size(), 1);
|
||||
|
||||
// First, add a simple tx directly to the txpool (stem = false).
|
||||
write_pool
|
||||
.add_to_pool(test_source(), tx1.clone(), false, &header)
|
||||
.unwrap();
|
||||
assert_eq!(write_pool.total_size(), 2);
|
||||
|
||||
// Add another tx spending outputs from the previous tx.
|
||||
write_pool
|
||||
.add_to_pool(test_source(), tx2.clone(), false, &header)
|
||||
.unwrap();
|
||||
assert_eq!(write_pool.total_size(), 3);
|
||||
}
|
||||
|
||||
// Test adding the exact same tx multiple times (same kernel signature).
|
||||
// This will fail for stem=false during tx aggregation due to duplicate
|
||||
// outputs and duplicate kernels.
|
||||
{
|
||||
let mut write_pool = pool.write();
|
||||
assert!(write_pool
|
||||
.add_to_pool(test_source(), tx1.clone(), false, &header)
|
||||
.is_err());
|
||||
}
|
||||
|
||||
// Test adding a duplicate tx with the same input and outputs.
|
||||
// Note: not the *same* tx, just same underlying inputs/outputs.
|
||||
{
|
||||
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
||||
let mut write_pool = pool.write();
|
||||
assert!(write_pool
|
||||
.add_to_pool(test_source(), tx1a, false, &header)
|
||||
.is_err());
|
||||
}
|
||||
|
||||
// Test adding a tx attempting to spend a non-existent output.
|
||||
{
|
||||
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
|
||||
let mut write_pool = pool.write();
|
||||
assert!(write_pool
|
||||
.add_to_pool(test_source(), bad_tx, false, &header)
|
||||
.is_err());
|
||||
}
|
||||
|
||||
// Test adding a tx that would result in a duplicate output (conflicts with
|
||||
// output from tx2). For reasons of security all outputs in the UTXO set must
|
||||
// be unique. Otherwise spending one will almost certainly cause the other
|
||||
// to be immediately stolen via a "replay" tx.
|
||||
{
|
||||
let tx = test_transaction(&keychain, vec![900], vec![498]);
|
||||
let mut write_pool = pool.write();
|
||||
assert!(write_pool
|
||||
.add_to_pool(test_source(), tx, false, &header)
|
||||
.is_err());
|
||||
}
|
||||
|
||||
// Confirm the tx pool correctly identifies an invalid tx (already spent).
|
||||
{
|
||||
let mut write_pool = pool.write();
|
||||
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
|
||||
assert!(write_pool
|
||||
.add_to_pool(test_source(), tx3, false, &header)
|
||||
.is_err());
|
||||
assert_eq!(write_pool.total_size(), 3);
|
||||
}
|
||||
|
||||
// Now add a couple of txs to the stempool (stem = true).
|
||||
{
|
||||
let mut write_pool = pool.write();
|
||||
let tx = test_transaction(&keychain, vec![599], vec![598]);
|
||||
write_pool
|
||||
.add_to_pool(test_source(), tx, true, &header)
|
||||
.unwrap();
|
||||
let tx2 = test_transaction(&keychain, vec![598], vec![597]);
|
||||
write_pool
|
||||
.add_to_pool(test_source(), tx2, true, &header)
|
||||
.unwrap();
|
||||
assert_eq!(write_pool.total_size(), 3);
|
||||
assert_eq!(write_pool.stempool.size(), 2);
|
||||
}
|
||||
|
||||
// Check we can take some entries from the stempool and "fluff" them into the
|
||||
// txpool. This also exercises multi-kernel txs.
|
||||
{
|
||||
let mut write_pool = pool.write();
|
||||
let agg_tx = write_pool
|
||||
.stempool
|
||||
.all_transactions_aggregate()
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(agg_tx.kernels().len(), 2);
|
||||
write_pool
|
||||
.add_to_pool(test_source(), agg_tx, false, &header)
|
||||
.unwrap();
|
||||
assert_eq!(write_pool.total_size(), 4);
|
||||
assert!(write_pool.stempool.is_empty());
|
||||
}
|
||||
|
||||
// Adding a duplicate tx to the stempool will result in it being fluffed.
|
||||
// This handles the case of the stem path having a cycle in it.
|
||||
{
|
||||
let mut write_pool = pool.write();
|
||||
let tx = test_transaction(&keychain, vec![597], vec![596]);
|
||||
write_pool
|
||||
.add_to_pool(test_source(), tx.clone(), true, &header)
|
||||
.unwrap();
|
||||
assert_eq!(write_pool.total_size(), 4);
|
||||
assert_eq!(write_pool.stempool.size(), 1);
|
||||
|
||||
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
|
||||
write_pool
|
||||
.add_to_pool(test_source(), tx.clone(), true, &header)
|
||||
.unwrap();
|
||||
assert_eq!(write_pool.total_size(), 5);
|
||||
assert!(write_pool.stempool.is_empty());
|
||||
}
|
||||
|
||||
// Now check we can correctly deaggregate a multi-kernel tx based on current
|
||||
// contents of the txpool.
|
||||
// We will do this be adding a new tx to the pool
|
||||
// that is a superset of a tx already in the pool.
|
||||
{
|
||||
let mut write_pool = pool.write();
|
||||
|
||||
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
|
||||
// tx1 and tx2 are already in the txpool (in aggregated form)
|
||||
// tx4 is the "new" part of this aggregated tx that we care about
|
||||
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
|
||||
|
||||
agg_tx
|
||||
.validate(Weighting::AsTransaction, verifier_cache.clone())
|
||||
.unwrap();
|
||||
|
||||
write_pool
|
||||
.add_to_pool(test_source(), agg_tx, false, &header)
|
||||
.unwrap();
|
||||
assert_eq!(write_pool.total_size(), 6);
|
||||
let entry = write_pool.txpool.entries.last().unwrap();
|
||||
assert_eq!(entry.tx.kernels().len(), 1);
|
||||
assert_eq!(entry.src.debug_name, "deagg");
|
||||
}
|
||||
|
||||
// Check we cannot "double spend" an output spent in a previous block.
|
||||
// We use the initial coinbase output here for convenience.
|
||||
{
|
||||
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
|
||||
|
||||
|
@ -44,7 +246,7 @@ fn test_the_transaction_pool() {
|
|||
let header = {
|
||||
let height = 1;
|
||||
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
||||
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
|
||||
let reward = libtx::reward::output(&keychain, &key_id, 0, false).unwrap();
|
||||
let block =
|
||||
Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin_servers"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -11,10 +11,10 @@ edition = "2018"
|
|||
|
||||
[dependencies]
|
||||
hyper = "0.12"
|
||||
hyper-rustls = "0.14"
|
||||
fs2 = "0.4"
|
||||
futures = "0.1"
|
||||
http = "0.1"
|
||||
hyper-staticfile = "0.3"
|
||||
itertools = "0.7"
|
||||
lmdb-zero = "0.4.4"
|
||||
rand = "0.5"
|
||||
|
@ -23,18 +23,13 @@ log = "0.4"
|
|||
serde_derive = "1"
|
||||
serde_json = "1"
|
||||
chrono = "0.4.4"
|
||||
bufstream = "~0.1"
|
||||
jsonrpc-core = "~8.0"
|
||||
tokio = "0.1.11"
|
||||
|
||||
grin_api = { path = "../api", version = "1.0.3" }
|
||||
grin_chain = { path = "../chain", version = "1.0.3" }
|
||||
grin_core = { path = "../core", version = "1.0.3" }
|
||||
grin_keychain = { path = "../keychain", version = "1.0.3" }
|
||||
grin_p2p = { path = "../p2p", version = "1.0.3" }
|
||||
grin_pool = { path = "../pool", version = "1.0.3" }
|
||||
grin_store = { path = "../store", version = "1.0.3" }
|
||||
grin_util = { path = "../util", version = "1.0.3" }
|
||||
grin_wallet = { path = "../wallet", version = "1.0.3" }
|
||||
|
||||
[dev-dependencies]
|
||||
blake2-rfc = "0.2"
|
||||
grin_api = { path = "../api", version = "1.1.0-beta.2" }
|
||||
grin_chain = { path = "../chain", version = "1.1.0-beta.2" }
|
||||
grin_core = { path = "../core", version = "1.1.0-beta.2" }
|
||||
grin_keychain = { path = "../keychain", version = "1.1.0-beta.2" }
|
||||
grin_p2p = { path = "../p2p", version = "1.1.0-beta.2" }
|
||||
grin_pool = { path = "../pool", version = "1.1.0-beta.2" }
|
||||
grin_store = { path = "../store", version = "1.1.0-beta.2" }
|
||||
grin_util = { path = "../util", version = "1.1.0-beta.2" }
|
||||
|
|
|
@ -17,3 +17,4 @@
|
|||
pub mod adapters;
|
||||
pub mod stats;
|
||||
pub mod types;
|
||||
pub mod hooks;
|
||||
|
|
|
@ -22,7 +22,10 @@ use std::thread;
|
|||
use std::time::Instant;
|
||||
|
||||
use crate::chain::{self, BlockStatus, ChainAdapter, Options};
|
||||
use crate::common::types::{self, ChainValidationMode, ServerConfig, SyncState, SyncStatus};
|
||||
use crate::common::hooks::{ChainEvents, NetEvents};
|
||||
use crate::common::types::{
|
||||
self, ChainValidationMode, DandelionEpoch, ServerConfig, SyncState, SyncStatus,
|
||||
};
|
||||
use crate::core::core::hash::{Hash, Hashed};
|
||||
use crate::core::core::transaction::Transaction;
|
||||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
|
@ -32,6 +35,7 @@ use crate::core::{core, global};
|
|||
use crate::p2p;
|
||||
use crate::p2p::types::PeerAddr;
|
||||
use crate::pool;
|
||||
use crate::pool::types::DandelionConfig;
|
||||
use crate::util::OneTime;
|
||||
use chrono::prelude::*;
|
||||
use chrono::Duration;
|
||||
|
@ -47,6 +51,7 @@ pub struct NetToChainAdapter {
|
|||
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
||||
peers: OneTime<Weak<p2p::Peers>>,
|
||||
config: ServerConfig,
|
||||
hooks: Vec<Box<dyn NetEvents + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl p2p::ChainAdapter for NetToChainAdapter {
|
||||
|
@ -91,16 +96,13 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
|
||||
let tx_hash = tx.hash();
|
||||
let header = self.chain().head_header()?;
|
||||
|
||||
debug!(
|
||||
"Received tx {}, [in/out/kern: {}/{}/{}] going to process.",
|
||||
tx_hash,
|
||||
tx.inputs().len(),
|
||||
tx.outputs().len(),
|
||||
tx.kernels().len(),
|
||||
);
|
||||
for hook in &self.hooks {
|
||||
hook.on_transaction_received(&tx);
|
||||
}
|
||||
|
||||
let tx_hash = tx.hash();
|
||||
|
||||
let mut tx_pool = self.tx_pool.write();
|
||||
match tx_pool.add_to_pool(source, tx, stem, &header) {
|
||||
|
@ -150,7 +152,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
if cb.kern_ids().is_empty() {
|
||||
// push the freshly hydrated block through the chain pipeline
|
||||
match core::Block::hydrate_from(cb, vec![]) {
|
||||
Ok(block) => self.process_block(block, addr, false),
|
||||
Ok(block) => {
|
||||
if !self.sync_state.is_syncing() {
|
||||
for hook in &self.hooks {
|
||||
hook.on_block_received(&block, &addr);
|
||||
}
|
||||
}
|
||||
self.process_block(block, addr, false)
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Invalid hydrated block {}: {:?}", cb_hash, e);
|
||||
return Ok(false);
|
||||
|
@ -184,7 +193,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
// 3) we hydrate an invalid block (peer sent us a "bad" compact block) - [TBD]
|
||||
|
||||
let block = match core::Block::hydrate_from(cb.clone(), txs) {
|
||||
Ok(block) => block,
|
||||
Ok(block) => {
|
||||
if !self.sync_state.is_syncing() {
|
||||
for hook in &self.hooks {
|
||||
hook.on_block_received(&block, &addr);
|
||||
}
|
||||
}
|
||||
block
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Invalid hydrated block {}: {:?}", cb.hash(), e);
|
||||
return Ok(false);
|
||||
|
@ -405,6 +421,7 @@ impl NetToChainAdapter {
|
|||
tx_pool: Arc<RwLock<pool::TransactionPool>>,
|
||||
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
||||
config: ServerConfig,
|
||||
hooks: Vec<Box<dyn NetEvents + Send + Sync>>,
|
||||
) -> NetToChainAdapter {
|
||||
NetToChainAdapter {
|
||||
sync_state,
|
||||
|
@ -413,6 +430,7 @@ impl NetToChainAdapter {
|
|||
verifier_cache,
|
||||
peers: OneTime::new(),
|
||||
config,
|
||||
hooks,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -646,39 +664,16 @@ impl NetToChainAdapter {
|
|||
pub struct ChainToPoolAndNetAdapter {
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool>>,
|
||||
peers: OneTime<Weak<p2p::Peers>>,
|
||||
hooks: Vec<Box<dyn ChainEvents + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl ChainAdapter for ChainToPoolAndNetAdapter {
|
||||
fn block_accepted(&self, b: &core::Block, status: BlockStatus, opts: Options) {
|
||||
match status {
|
||||
BlockStatus::Reorg => {
|
||||
warn!(
|
||||
"block_accepted (REORG!): {:?} at {} (diff: {})",
|
||||
b.hash(),
|
||||
b.header.height,
|
||||
b.header.total_difficulty(),
|
||||
);
|
||||
}
|
||||
BlockStatus::Fork => {
|
||||
debug!(
|
||||
"block_accepted (fork?): {:?} at {} (diff: {})",
|
||||
b.hash(),
|
||||
b.header.height,
|
||||
b.header.total_difficulty(),
|
||||
);
|
||||
}
|
||||
BlockStatus::Next => {
|
||||
debug!(
|
||||
"block_accepted (head+): {:?} at {} (diff: {})",
|
||||
b.hash(),
|
||||
b.header.height,
|
||||
b.header.total_difficulty(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// not broadcasting blocks received through sync
|
||||
if !opts.contains(chain::Options::SYNC) {
|
||||
for hook in &self.hooks {
|
||||
hook.on_block_accepted(b, &status);
|
||||
}
|
||||
// If we mined the block then we want to broadcast the compact block.
|
||||
// If we received the block from another node then broadcast "header first"
|
||||
// to minimize network traffic.
|
||||
|
@ -713,10 +708,14 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
|
|||
|
||||
impl ChainToPoolAndNetAdapter {
|
||||
/// Construct a ChainToPoolAndNetAdapter instance.
|
||||
pub fn new(tx_pool: Arc<RwLock<pool::TransactionPool>>) -> ChainToPoolAndNetAdapter {
|
||||
pub fn new(
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool>>,
|
||||
hooks: Vec<Box<dyn ChainEvents + Send + Sync>>,
|
||||
) -> ChainToPoolAndNetAdapter {
|
||||
ChainToPoolAndNetAdapter {
|
||||
tx_pool,
|
||||
peers: OneTime::new(),
|
||||
hooks: hooks,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -738,26 +737,77 @@ impl ChainToPoolAndNetAdapter {
|
|||
/// transactions that have been accepted.
|
||||
pub struct PoolToNetAdapter {
|
||||
peers: OneTime<Weak<p2p::Peers>>,
|
||||
dandelion_epoch: Arc<RwLock<DandelionEpoch>>,
|
||||
}
|
||||
|
||||
/// Adapter between the Dandelion monitor and the current Dandelion "epoch".
|
||||
pub trait DandelionAdapter: Send + Sync {
|
||||
/// Is the node stemming (or fluffing) transactions in the current epoch?
|
||||
fn is_stem(&self) -> bool;
|
||||
|
||||
/// Is the current Dandelion epoch expired?
|
||||
fn is_expired(&self) -> bool;
|
||||
|
||||
/// Transition to the next Dandelion epoch (new stem/fluff state, select new relay peer).
|
||||
fn next_epoch(&self);
|
||||
}
|
||||
|
||||
impl DandelionAdapter for PoolToNetAdapter {
|
||||
fn is_stem(&self) -> bool {
|
||||
self.dandelion_epoch.read().is_stem()
|
||||
}
|
||||
|
||||
fn is_expired(&self) -> bool {
|
||||
self.dandelion_epoch.read().is_expired()
|
||||
}
|
||||
|
||||
fn next_epoch(&self) {
|
||||
self.dandelion_epoch.write().next_epoch(&self.peers());
|
||||
}
|
||||
}
|
||||
|
||||
impl pool::PoolAdapter for PoolToNetAdapter {
|
||||
fn stem_tx_accepted(&self, tx: &core::Transaction) -> Result<(), pool::PoolError> {
|
||||
self.peers()
|
||||
.relay_stem_transaction(tx)
|
||||
.map_err(|_| pool::PoolError::DandelionError)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn tx_accepted(&self, tx: &core::Transaction) {
|
||||
self.peers().broadcast_transaction(tx);
|
||||
}
|
||||
|
||||
fn stem_tx_accepted(&self, tx: &core::Transaction) -> Result<(), pool::PoolError> {
|
||||
// Take write lock on the current epoch.
|
||||
// We need to be able to update the current relay peer if not currently connected.
|
||||
let mut epoch = self.dandelion_epoch.write();
|
||||
|
||||
// If "stem" epoch attempt to relay the tx to the next Dandelion relay.
|
||||
// Fallback to immediately fluffing the tx if we cannot stem for any reason.
|
||||
// If "fluff" epoch then nothing to do right now (fluff via Dandelion monitor).
|
||||
if epoch.is_stem() {
|
||||
if let Some(peer) = epoch.relay_peer(&self.peers()) {
|
||||
match peer.send_stem_transaction(tx) {
|
||||
Ok(_) => {
|
||||
info!("Stemming this epoch, relaying to next peer.");
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Stemming tx failed. Fluffing. {:?}", e);
|
||||
Err(pool::PoolError::DandelionError)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error!("No relay peer. Fluffing.");
|
||||
Err(pool::PoolError::DandelionError)
|
||||
}
|
||||
} else {
|
||||
info!("Fluff epoch. Aggregating stem tx(s). Will fluff via Dandelion monitor.");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PoolToNetAdapter {
|
||||
/// Create a new pool to net adapter
|
||||
pub fn new() -> PoolToNetAdapter {
|
||||
pub fn new(config: DandelionConfig) -> PoolToNetAdapter {
|
||||
PoolToNetAdapter {
|
||||
peers: OneTime::new(),
|
||||
dandelion_epoch: Arc::new(RwLock::new(DandelionEpoch::new(config))),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
327
servers/src/common/hooks.rs
Normal file
327
servers/src/common/hooks.rs
Normal file
|
@ -0,0 +1,327 @@
|
|||
// Copyright 2019 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! This module allows to register callbacks on certain events. To add a custom
|
||||
//! callback simply implement the coresponding trait and add it to the init function
|
||||
|
||||
extern crate hyper;
|
||||
extern crate hyper_rustls;
|
||||
extern crate tokio;
|
||||
|
||||
use crate::chain::BlockStatus;
|
||||
use crate::common::types::{ServerConfig, WebHooksConfig};
|
||||
use crate::core::core;
|
||||
use crate::core::core::hash::Hashed;
|
||||
use crate::p2p::types::PeerAddr;
|
||||
use futures::future::Future;
|
||||
use hyper::client::HttpConnector;
|
||||
use hyper::header::HeaderValue;
|
||||
use hyper::Client;
|
||||
use hyper::{Body, Method, Request};
|
||||
use hyper_rustls::HttpsConnector;
|
||||
use serde::Serialize;
|
||||
use serde_json::{json, to_string};
|
||||
use std::time::Duration;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
/// Returns the list of event hooks that will be initialized for network events
|
||||
pub fn init_net_hooks(config: &ServerConfig) -> Vec<Box<dyn NetEvents + Send + Sync>> {
|
||||
let mut list: Vec<Box<NetEvents + Send + Sync>> = Vec::new();
|
||||
list.push(Box::new(EventLogger));
|
||||
if config.webhook_config.block_received_url.is_some()
|
||||
|| config.webhook_config.tx_received_url.is_some()
|
||||
|| config.webhook_config.header_received_url.is_some()
|
||||
{
|
||||
list.push(Box::new(WebHook::from_config(&config.webhook_config)));
|
||||
}
|
||||
list
|
||||
}
|
||||
|
||||
/// Returns the list of event hooks that will be initialized for chain events
|
||||
pub fn init_chain_hooks(config: &ServerConfig) -> Vec<Box<dyn ChainEvents + Send + Sync>> {
|
||||
let mut list: Vec<Box<ChainEvents + Send + Sync>> = Vec::new();
|
||||
list.push(Box::new(EventLogger));
|
||||
if config.webhook_config.block_accepted_url.is_some() {
|
||||
list.push(Box::new(WebHook::from_config(&config.webhook_config)));
|
||||
}
|
||||
list
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
/// Trait to be implemented by Network Event Hooks
|
||||
pub trait NetEvents {
|
||||
/// Triggers when a new transaction arrives
|
||||
fn on_transaction_received(&self, tx: &core::Transaction) {}
|
||||
|
||||
/// Triggers when a new block arrives
|
||||
fn on_block_received(&self, block: &core::Block, addr: &PeerAddr) {}
|
||||
|
||||
/// Triggers when a new block header arrives
|
||||
fn on_header_received(&self, header: &core::BlockHeader, addr: &PeerAddr) {}
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
/// Trait to be implemented by Chain Event Hooks
|
||||
pub trait ChainEvents {
|
||||
/// Triggers when a new block is accepted by the chain (might be a Reorg or a Fork)
|
||||
fn on_block_accepted(&self, block: &core::Block, status: &BlockStatus) {}
|
||||
}
|
||||
|
||||
/// Basic Logger
|
||||
struct EventLogger;
|
||||
|
||||
impl NetEvents for EventLogger {
|
||||
fn on_transaction_received(&self, tx: &core::Transaction) {
|
||||
debug!(
|
||||
"Received tx {}, [in/out/kern: {}/{}/{}] going to process.",
|
||||
tx.hash(),
|
||||
tx.inputs().len(),
|
||||
tx.outputs().len(),
|
||||
tx.kernels().len(),
|
||||
);
|
||||
}
|
||||
|
||||
fn on_block_received(&self, block: &core::Block, addr: &PeerAddr) {
|
||||
debug!(
|
||||
"Received block {} at {} from {} [in/out/kern: {}/{}/{}] going to process.",
|
||||
block.hash(),
|
||||
block.header.height,
|
||||
addr,
|
||||
block.inputs().len(),
|
||||
block.outputs().len(),
|
||||
block.kernels().len(),
|
||||
);
|
||||
}
|
||||
|
||||
fn on_header_received(&self, header: &core::BlockHeader, addr: &PeerAddr) {
|
||||
debug!(
|
||||
"Received block header {} at {} from {}, going to process.",
|
||||
header.hash(),
|
||||
header.height,
|
||||
addr
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl ChainEvents for EventLogger {
|
||||
fn on_block_accepted(&self, block: &core::Block, status: &BlockStatus) {
|
||||
match status {
|
||||
BlockStatus::Reorg => {
|
||||
warn!(
|
||||
"block_accepted (REORG!): {:?} at {} (diff: {})",
|
||||
block.hash(),
|
||||
block.header.height,
|
||||
block.header.total_difficulty(),
|
||||
);
|
||||
}
|
||||
BlockStatus::Fork => {
|
||||
debug!(
|
||||
"block_accepted (fork?): {:?} at {} (diff: {})",
|
||||
block.hash(),
|
||||
block.header.height,
|
||||
block.header.total_difficulty(),
|
||||
);
|
||||
}
|
||||
BlockStatus::Next => {
|
||||
debug!(
|
||||
"block_accepted (head+): {:?} at {} (diff: {})",
|
||||
block.hash(),
|
||||
block.header.height,
|
||||
block.header.total_difficulty(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_url(value: &Option<String>) -> Option<hyper::Uri> {
|
||||
match value {
|
||||
Some(url) => {
|
||||
let uri: hyper::Uri = match url.parse() {
|
||||
Ok(value) => value,
|
||||
Err(_) => panic!("Invalid url : {}", url),
|
||||
};
|
||||
let scheme = uri.scheme_part().map(|s| s.as_str());
|
||||
if (scheme != Some("http")) && (scheme != Some("https")) {
|
||||
panic!(
|
||||
"Invalid url scheme {}, expected one of ['http', https']",
|
||||
url
|
||||
)
|
||||
};
|
||||
Some(uri)
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct that holds the hyper/tokio runtime.
|
||||
struct WebHook {
|
||||
/// url to POST transaction data when a new transaction arrives from a peer
|
||||
tx_received_url: Option<hyper::Uri>,
|
||||
/// url to POST header data when a new header arrives from a peer
|
||||
header_received_url: Option<hyper::Uri>,
|
||||
/// url to POST block data when a new block arrives from a peer
|
||||
block_received_url: Option<hyper::Uri>,
|
||||
/// url to POST block data when a new block is accepted by our node (might be a reorg or a fork)
|
||||
block_accepted_url: Option<hyper::Uri>,
|
||||
/// The hyper client to be used for all requests
|
||||
client: Client<HttpsConnector<HttpConnector>>,
|
||||
/// The tokio event loop
|
||||
runtime: Runtime,
|
||||
}
|
||||
|
||||
impl WebHook {
|
||||
/// Instantiates a Webhook struct
|
||||
fn new(
|
||||
tx_received_url: Option<hyper::Uri>,
|
||||
header_received_url: Option<hyper::Uri>,
|
||||
block_received_url: Option<hyper::Uri>,
|
||||
block_accepted_url: Option<hyper::Uri>,
|
||||
nthreads: u16,
|
||||
timeout: u16,
|
||||
) -> WebHook {
|
||||
let keep_alive = Duration::from_secs(timeout as u64);
|
||||
|
||||
info!(
|
||||
"Spawning {} threads for webhooks (timeout set to {} secs)",
|
||||
nthreads, timeout
|
||||
);
|
||||
|
||||
let https = HttpsConnector::new(nthreads as usize);
|
||||
let client = Client::builder()
|
||||
.keep_alive_timeout(keep_alive)
|
||||
.build::<_, hyper::Body>(https);
|
||||
|
||||
WebHook {
|
||||
tx_received_url,
|
||||
block_received_url,
|
||||
header_received_url,
|
||||
block_accepted_url,
|
||||
client,
|
||||
runtime: Runtime::new().unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Instantiates a Webhook struct from a configuration file
|
||||
fn from_config(config: &WebHooksConfig) -> WebHook {
|
||||
WebHook::new(
|
||||
parse_url(&config.tx_received_url),
|
||||
parse_url(&config.header_received_url),
|
||||
parse_url(&config.block_received_url),
|
||||
parse_url(&config.block_accepted_url),
|
||||
config.nthreads,
|
||||
config.timeout,
|
||||
)
|
||||
}
|
||||
|
||||
fn post(&self, url: hyper::Uri, data: String) {
|
||||
let mut req = Request::new(Body::from(data));
|
||||
*req.method_mut() = Method::POST;
|
||||
*req.uri_mut() = url.clone();
|
||||
req.headers_mut().insert(
|
||||
hyper::header::CONTENT_TYPE,
|
||||
HeaderValue::from_static("application/json"),
|
||||
);
|
||||
|
||||
let future = self
|
||||
.client
|
||||
.request(req)
|
||||
.map(|_res| {})
|
||||
.map_err(move |_res| {
|
||||
warn!("Error sending POST request to {}", url);
|
||||
});
|
||||
|
||||
let handle = self.runtime.executor();
|
||||
handle.spawn(future);
|
||||
}
|
||||
fn make_request<T: Serialize>(&self, payload: &T, uri: &Option<hyper::Uri>) -> bool {
|
||||
if let Some(url) = uri {
|
||||
let payload = match to_string(payload) {
|
||||
Ok(serialized) => serialized,
|
||||
Err(_) => {
|
||||
return false; // print error message
|
||||
}
|
||||
};
|
||||
self.post(url.clone(), payload);
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl ChainEvents for WebHook {
|
||||
fn on_block_accepted(&self, block: &core::Block, status: &BlockStatus) {
|
||||
let status = match status {
|
||||
BlockStatus::Reorg => "reorg",
|
||||
BlockStatus::Fork => "fork",
|
||||
BlockStatus::Next => "head",
|
||||
};
|
||||
let payload = json!({
|
||||
"hash": block.header.hash().to_hex(),
|
||||
"status": status,
|
||||
"data": block
|
||||
});
|
||||
if !self.make_request(&payload, &self.block_accepted_url) {
|
||||
error!(
|
||||
"Failed to serialize block {} at height {}",
|
||||
block.hash(),
|
||||
block.header.height
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NetEvents for WebHook {
|
||||
/// Triggers when a new transaction arrives
|
||||
fn on_transaction_received(&self, tx: &core::Transaction) {
|
||||
let payload = json!({
|
||||
"hash": tx.hash().to_hex(),
|
||||
"data": tx
|
||||
});
|
||||
if !self.make_request(&payload, &self.tx_received_url) {
|
||||
error!("Failed to serialize transaction {}", tx.hash());
|
||||
}
|
||||
}
|
||||
|
||||
/// Triggers when a new block arrives
|
||||
fn on_block_received(&self, block: &core::Block, addr: &PeerAddr) {
|
||||
let payload = json!({
|
||||
"hash": block.header.hash().to_hex(),
|
||||
"peer": addr,
|
||||
"data": block
|
||||
});
|
||||
if !self.make_request(&payload, &self.block_received_url) {
|
||||
error!(
|
||||
"Failed to serialize block {} at height {}",
|
||||
block.hash().to_hex(),
|
||||
block.header.height
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Triggers when a new block header arrives
|
||||
fn on_header_received(&self, header: &core::BlockHeader, addr: &PeerAddr) {
|
||||
let payload = json!({
|
||||
"hash": header.hash().to_hex(),
|
||||
"peer": addr,
|
||||
"data": header
|
||||
});
|
||||
if !self.make_request(&payload, &self.header_received_url) {
|
||||
error!(
|
||||
"Failed to serialize header {} at height {}",
|
||||
header.hash(),
|
||||
header.height
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -13,10 +13,12 @@
|
|||
// limitations under the License.
|
||||
|
||||
//! Server types
|
||||
use crate::util::RwLock;
|
||||
use std::convert::From;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::prelude::{DateTime, Utc};
|
||||
use rand::prelude::*;
|
||||
|
||||
use crate::api;
|
||||
use crate::chain;
|
||||
use crate::core::global::ChainTypes;
|
||||
|
@ -24,9 +26,9 @@ use crate::core::{core, libtx, pow};
|
|||
use crate::keychain;
|
||||
use crate::p2p;
|
||||
use crate::pool;
|
||||
use crate::pool::types::DandelionConfig;
|
||||
use crate::store;
|
||||
use crate::wallet;
|
||||
use chrono::prelude::{DateTime, Utc};
|
||||
use crate::util::RwLock;
|
||||
|
||||
/// Error type wrapping underlying module errors.
|
||||
#[derive(Debug)]
|
||||
|
@ -43,8 +45,6 @@ pub enum Error {
|
|||
P2P(p2p::Error),
|
||||
/// Error originating from HTTP API calls.
|
||||
API(api::Error),
|
||||
/// Error originating from wallet API.
|
||||
Wallet(wallet::Error),
|
||||
/// Error originating from the cuckoo miner
|
||||
Cuckoo(pow::Error),
|
||||
/// Error originating from the transaction pool.
|
||||
|
@ -53,6 +53,8 @@ pub enum Error {
|
|||
Keychain(keychain::Error),
|
||||
/// Invalid Arguments.
|
||||
ArgumentError(String),
|
||||
/// Wallet communication error
|
||||
WalletComm(String),
|
||||
/// Error originating from some I/O operation (likely a file on disk).
|
||||
IOError(std::io::Error),
|
||||
/// Configuration error
|
||||
|
@ -100,12 +102,6 @@ impl From<api::Error> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<wallet::Error> for Error {
|
||||
fn from(e: wallet::Error) -> Error {
|
||||
Error::Wallet(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<pool::PoolError> for Error {
|
||||
fn from(e: pool::PoolError) -> Error {
|
||||
Error::Pool(e)
|
||||
|
@ -177,9 +173,6 @@ pub struct ServerConfig {
|
|||
/// if enabled, this will disable logging to stdout
|
||||
pub run_tui: Option<bool>,
|
||||
|
||||
/// Whether to use the DB wallet backend implementation
|
||||
pub use_db_wallet: Option<bool>,
|
||||
|
||||
/// Whether to run the test miner (internal, cuckoo 16)
|
||||
pub run_test_miner: Option<bool>,
|
||||
|
||||
|
@ -200,6 +193,10 @@ pub struct ServerConfig {
|
|||
/// Configuration for the mining daemon
|
||||
#[serde(default)]
|
||||
pub stratum_mining_config: Option<StratumServerConfig>,
|
||||
|
||||
/// Configuration for the webhooks that trigger on certain events
|
||||
#[serde(default)]
|
||||
pub webhook_config: WebHooksConfig,
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
|
@ -219,9 +216,9 @@ impl Default for ServerConfig {
|
|||
pool_config: pool::PoolConfig::default(),
|
||||
skip_sync_wait: Some(false),
|
||||
run_tui: Some(true),
|
||||
use_db_wallet: None,
|
||||
run_test_miner: Some(false),
|
||||
test_miner_wallet_url: None,
|
||||
webhook_config: WebHooksConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -264,6 +261,46 @@ impl Default for StratumServerConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Web hooks configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct WebHooksConfig {
|
||||
/// url to POST transaction data when a new transaction arrives from a peer
|
||||
pub tx_received_url: Option<String>,
|
||||
/// url to POST header data when a new header arrives from a peer
|
||||
pub header_received_url: Option<String>,
|
||||
/// url to POST block data when a new block arrives from a peer
|
||||
pub block_received_url: Option<String>,
|
||||
/// url to POST block data when a new block is accepted by our node (might be a reorg or a fork)
|
||||
pub block_accepted_url: Option<String>,
|
||||
/// number of worker threads in the tokio runtime
|
||||
#[serde(default = "default_nthreads")]
|
||||
pub nthreads: u16,
|
||||
/// timeout in seconds for the http request
|
||||
#[serde(default = "default_timeout")]
|
||||
pub timeout: u16,
|
||||
}
|
||||
|
||||
fn default_timeout() -> u16 {
|
||||
10
|
||||
}
|
||||
|
||||
fn default_nthreads() -> u16 {
|
||||
4
|
||||
}
|
||||
|
||||
impl Default for WebHooksConfig {
|
||||
fn default() -> WebHooksConfig {
|
||||
WebHooksConfig {
|
||||
tx_received_url: None,
|
||||
header_received_url: None,
|
||||
block_received_url: None,
|
||||
block_accepted_url: None,
|
||||
nthreads: default_nthreads(),
|
||||
timeout: default_timeout(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Various status sync can be in, whether it's fast sync or archival.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||
#[allow(missing_docs)]
|
||||
|
@ -427,3 +464,94 @@ impl chain::TxHashsetWriteStatus for SyncState {
|
|||
self.update(SyncStatus::TxHashsetDone);
|
||||
}
|
||||
}
|
||||
|
||||
/// A node is either "stem" of "fluff" for the duration of a single epoch.
|
||||
/// A node also maintains an outbound relay peer for the epoch.
|
||||
#[derive(Debug)]
|
||||
pub struct DandelionEpoch {
|
||||
config: DandelionConfig,
|
||||
// When did this epoch start?
|
||||
start_time: Option<i64>,
|
||||
// Are we in "stem" mode or "fluff" mode for this epoch?
|
||||
is_stem: bool,
|
||||
// Our current Dandelion relay peer (effective for this epoch).
|
||||
relay_peer: Option<Arc<p2p::Peer>>,
|
||||
}
|
||||
|
||||
impl DandelionEpoch {
|
||||
/// Create a new Dandelion epoch, defaulting to "stem" and no outbound relay peer.
|
||||
pub fn new(config: DandelionConfig) -> DandelionEpoch {
|
||||
DandelionEpoch {
|
||||
config,
|
||||
start_time: None,
|
||||
is_stem: true,
|
||||
relay_peer: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Is the current Dandelion epoch expired?
|
||||
/// It is expired if start_time is older than the configured epoch_secs.
|
||||
pub fn is_expired(&self) -> bool {
|
||||
match self.start_time {
|
||||
None => true,
|
||||
Some(start_time) => {
|
||||
let epoch_secs = self.config.epoch_secs.expect("epoch_secs config missing") as i64;
|
||||
Utc::now().timestamp().saturating_sub(start_time) > epoch_secs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Transition to next Dandelion epoch.
|
||||
/// Select stem/fluff based on configured stem_probability.
|
||||
/// Choose a new outbound stem relay peer.
|
||||
pub fn next_epoch(&mut self, peers: &Arc<p2p::Peers>) {
|
||||
self.start_time = Some(Utc::now().timestamp());
|
||||
self.relay_peer = peers.outgoing_connected_peers().first().cloned();
|
||||
|
||||
// If stem_probability == 90 then we stem 90% of the time.
|
||||
let mut rng = rand::thread_rng();
|
||||
let stem_probability = self
|
||||
.config
|
||||
.stem_probability
|
||||
.expect("stem_probability config missing");
|
||||
self.is_stem = rng.gen_range(0, 100) < stem_probability;
|
||||
|
||||
let addr = self.relay_peer.clone().map(|p| p.info.addr);
|
||||
info!(
|
||||
"DandelionEpoch: next_epoch: is_stem: {} ({}%), relay: {:?}",
|
||||
self.is_stem, stem_probability, addr
|
||||
);
|
||||
}
|
||||
|
||||
/// Are we stemming (or fluffing) transactions in this epoch?
|
||||
pub fn is_stem(&self) -> bool {
|
||||
self.is_stem
|
||||
}
|
||||
|
||||
/// What is our current relay peer?
|
||||
/// If it is not connected then choose a new one.
|
||||
pub fn relay_peer(&mut self, peers: &Arc<p2p::Peers>) -> Option<Arc<p2p::Peer>> {
|
||||
let mut update_relay = false;
|
||||
if let Some(peer) = &self.relay_peer {
|
||||
if !peer.is_connected() {
|
||||
info!(
|
||||
"DandelionEpoch: relay_peer: {:?} not connected, choosing a new one.",
|
||||
peer.info.addr
|
||||
);
|
||||
update_relay = true;
|
||||
}
|
||||
} else {
|
||||
update_relay = true;
|
||||
}
|
||||
|
||||
if update_relay {
|
||||
self.relay_peer = peers.outgoing_connected_peers().first().cloned();
|
||||
info!(
|
||||
"DandelionEpoch: relay_peer: new peer chosen: {:?}",
|
||||
self.relay_peer.clone().map(|p| p.info.addr)
|
||||
);
|
||||
}
|
||||
|
||||
self.relay_peer.clone()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,17 +12,18 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::util::{Mutex, RwLock, StopState};
|
||||
use chrono::prelude::Utc;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::common::adapters::DandelionAdapter;
|
||||
use crate::core::core::hash::Hashed;
|
||||
use crate::core::core::transaction;
|
||||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
use crate::pool::{DandelionConfig, PoolEntryState, PoolError, TransactionPool, TxSource};
|
||||
use crate::pool::{DandelionConfig, Pool, PoolEntry, PoolError, TransactionPool, TxSource};
|
||||
use crate::util::{Mutex, RwLock, StopState};
|
||||
|
||||
/// A process to monitor transactions in the stempool.
|
||||
/// With Dandelion, transaction can be broadcasted in stem or fluff phase.
|
||||
|
@ -35,6 +36,7 @@ use crate::pool::{DandelionConfig, PoolEntryState, PoolError, TransactionPool, T
|
|||
pub fn monitor_transactions(
|
||||
dandelion_config: DandelionConfig,
|
||||
tx_pool: Arc<RwLock<TransactionPool>>,
|
||||
adapter: Arc<DandelionAdapter>,
|
||||
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
||||
stop_state: Arc<Mutex<StopState>>,
|
||||
) {
|
||||
|
@ -44,211 +46,142 @@ pub fn monitor_transactions(
|
|||
.name("dandelion".to_string())
|
||||
.spawn(move || {
|
||||
loop {
|
||||
// Halt Dandelion monitor if we have been notified that we are stopping.
|
||||
if stop_state.lock().is_stopped() {
|
||||
break;
|
||||
}
|
||||
|
||||
// This is the patience timer, we loop every n secs.
|
||||
let patience_secs = dandelion_config.patience_secs();
|
||||
thread::sleep(Duration::from_secs(patience_secs));
|
||||
|
||||
// Step 1: find all "ToStem" entries in stempool from last run.
|
||||
// Aggregate them up to give a single (valid) aggregated tx and propagate it
|
||||
// to the next Dandelion relay along the stem.
|
||||
if process_stem_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
|
||||
error!("dand_mon: Problem with stem phase.");
|
||||
if !adapter.is_stem() {
|
||||
let _ =
|
||||
process_fluff_phase(&dandelion_config, &tx_pool, &adapter, &verifier_cache)
|
||||
.map_err(|e| {
|
||||
error!("dand_mon: Problem processing fluff phase. {:?}", e);
|
||||
});
|
||||
}
|
||||
|
||||
// Step 2: find all "ToFluff" entries in stempool from last run.
|
||||
// Aggregate them up to give a single (valid) aggregated tx and (re)add it
|
||||
// to our pool with stem=false (which will then broadcast it).
|
||||
if process_fluff_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
|
||||
error!("dand_mon: Problem with fluff phase.");
|
||||
// Now find all expired entries based on embargo timer.
|
||||
let _ = process_expired_entries(&dandelion_config, &tx_pool).map_err(|e| {
|
||||
error!("dand_mon: Problem processing expired entries. {:?}", e);
|
||||
});
|
||||
|
||||
// Handle the tx above *before* we transition to next epoch.
|
||||
// This gives us an opportunity to do the final "fluff" before we start
|
||||
// stemming on the subsequent epoch.
|
||||
if adapter.is_expired() {
|
||||
adapter.next_epoch();
|
||||
}
|
||||
|
||||
// Step 3: now find all "Fresh" entries in stempool since last run.
|
||||
// Coin flip for each (90/10) and label them as either "ToStem" or "ToFluff".
|
||||
// We will process these in the next run (waiting patience secs).
|
||||
if process_fresh_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
|
||||
error!("dand_mon: Problem processing fresh pool entries.");
|
||||
}
|
||||
|
||||
// Step 4: now find all expired entries based on embargo timer.
|
||||
if process_expired_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
|
||||
error!("dand_mon: Problem processing fresh pool entries.");
|
||||
}
|
||||
// Monitor loops every 10s.
|
||||
thread::sleep(Duration::from_secs(10));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn process_stem_phase(
|
||||
tx_pool: Arc<RwLock<TransactionPool>>,
|
||||
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
||||
) -> Result<(), PoolError> {
|
||||
let mut tx_pool = tx_pool.write();
|
||||
|
||||
let header = tx_pool.chain_head()?;
|
||||
|
||||
let stem_txs = tx_pool
|
||||
.stempool
|
||||
.get_transactions_in_state(PoolEntryState::ToStem);
|
||||
|
||||
if stem_txs.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Get the aggregate tx representing the entire txpool.
|
||||
let txpool_tx = tx_pool.txpool.all_transactions_aggregate()?;
|
||||
|
||||
let stem_txs = tx_pool
|
||||
.stempool
|
||||
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
|
||||
tx_pool
|
||||
.stempool
|
||||
.transition_to_state(&stem_txs, PoolEntryState::Stemmed);
|
||||
|
||||
if stem_txs.len() > 0 {
|
||||
debug!("dand_mon: Found {} txs for stemming.", stem_txs.len());
|
||||
|
||||
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||
agg_tx.validate(
|
||||
transaction::Weighting::AsTransaction,
|
||||
verifier_cache.clone(),
|
||||
)?;
|
||||
|
||||
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
|
||||
if res.is_err() {
|
||||
debug!("dand_mon: Unable to propagate stem tx. No relay, fluffing instead.");
|
||||
|
||||
let src = TxSource {
|
||||
debug_name: "no_relay".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
|
||||
tx_pool.add_to_pool(src, agg_tx, false, &header)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
// Query the pool for transactions older than the cutoff.
|
||||
// Used for both periodic fluffing and handling expired embargo timer.
|
||||
fn select_txs_cutoff(pool: &Pool, cutoff_secs: u16) -> Vec<PoolEntry> {
|
||||
let cutoff = Utc::now().timestamp() - cutoff_secs as i64;
|
||||
pool.entries
|
||||
.iter()
|
||||
.filter(|x| x.tx_at.timestamp() < cutoff)
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn process_fluff_phase(
|
||||
tx_pool: Arc<RwLock<TransactionPool>>,
|
||||
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
||||
dandelion_config: &DandelionConfig,
|
||||
tx_pool: &Arc<RwLock<TransactionPool>>,
|
||||
adapter: &Arc<DandelionAdapter>,
|
||||
verifier_cache: &Arc<RwLock<dyn VerifierCache>>,
|
||||
) -> Result<(), PoolError> {
|
||||
// Take a write lock on the txpool for the duration of this processing.
|
||||
let mut tx_pool = tx_pool.write();
|
||||
|
||||
let header = tx_pool.chain_head()?;
|
||||
|
||||
let stem_txs = tx_pool
|
||||
.stempool
|
||||
.get_transactions_in_state(PoolEntryState::ToFluff);
|
||||
|
||||
if stem_txs.is_empty() {
|
||||
let all_entries = tx_pool.stempool.entries.clone();
|
||||
if all_entries.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Get the aggregate tx representing the entire txpool.
|
||||
let txpool_tx = tx_pool.txpool.all_transactions_aggregate()?;
|
||||
let cutoff_secs = dandelion_config
|
||||
.aggregation_secs
|
||||
.expect("aggregation secs config missing");
|
||||
let cutoff_entries = select_txs_cutoff(&tx_pool.stempool, cutoff_secs);
|
||||
|
||||
let stem_txs = tx_pool
|
||||
.stempool
|
||||
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
|
||||
tx_pool
|
||||
.stempool
|
||||
.transition_to_state(&stem_txs, PoolEntryState::Fluffed);
|
||||
|
||||
if stem_txs.len() > 0 {
|
||||
debug!("dand_mon: Found {} txs for fluffing.", stem_txs.len());
|
||||
|
||||
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||
agg_tx.validate(
|
||||
transaction::Weighting::AsTransaction,
|
||||
verifier_cache.clone(),
|
||||
)?;
|
||||
|
||||
let src = TxSource {
|
||||
debug_name: "fluff".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
|
||||
tx_pool.add_to_pool(src, agg_tx, false, &header)?;
|
||||
// If epoch is expired, fluff *all* outstanding entries in stempool.
|
||||
// If *any* entry older than aggregation_secs (30s) then fluff *all* entries.
|
||||
// Otherwise we are done for now and we can give txs more time to aggregate.
|
||||
if !adapter.is_expired() && cutoff_entries.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_fresh_entries(
|
||||
dandelion_config: DandelionConfig,
|
||||
tx_pool: Arc<RwLock<TransactionPool>>,
|
||||
) -> Result<(), PoolError> {
|
||||
let mut tx_pool = tx_pool.write();
|
||||
let header = tx_pool.chain_head()?;
|
||||
|
||||
let mut rng = thread_rng();
|
||||
let fluffable_txs = {
|
||||
let txpool_tx = tx_pool.txpool.all_transactions_aggregate()?;
|
||||
let txs: Vec<_> = all_entries.into_iter().map(|x| x.tx).collect();
|
||||
tx_pool.stempool.validate_raw_txs(
|
||||
&txs,
|
||||
txpool_tx,
|
||||
&header,
|
||||
transaction::Weighting::NoLimit,
|
||||
)?
|
||||
};
|
||||
|
||||
let fresh_entries = &mut tx_pool
|
||||
.stempool
|
||||
.entries
|
||||
.iter_mut()
|
||||
.filter(|x| x.state == PoolEntryState::Fresh)
|
||||
.collect::<Vec<_>>();
|
||||
debug!(
|
||||
"dand_mon: Found {} txs in local stempool to fluff",
|
||||
fluffable_txs.len()
|
||||
);
|
||||
|
||||
if fresh_entries.len() > 0 {
|
||||
debug!(
|
||||
"dand_mon: Found {} fresh entries in stempool.",
|
||||
fresh_entries.len()
|
||||
);
|
||||
let agg_tx = transaction::aggregate(fluffable_txs)?;
|
||||
agg_tx.validate(
|
||||
transaction::Weighting::AsTransaction,
|
||||
verifier_cache.clone(),
|
||||
)?;
|
||||
|
||||
for x in &mut fresh_entries.iter_mut() {
|
||||
let random = rng.gen_range(0, 101);
|
||||
if random <= dandelion_config.stem_probability() {
|
||||
x.state = PoolEntryState::ToStem;
|
||||
} else {
|
||||
x.state = PoolEntryState::ToFluff;
|
||||
}
|
||||
}
|
||||
}
|
||||
let src = TxSource {
|
||||
debug_name: "fluff".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
|
||||
tx_pool.add_to_pool(src, agg_tx, false, &header)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_expired_entries(
|
||||
dandelion_config: DandelionConfig,
|
||||
tx_pool: Arc<RwLock<TransactionPool>>,
|
||||
dandelion_config: &DandelionConfig,
|
||||
tx_pool: &Arc<RwLock<TransactionPool>>,
|
||||
) -> Result<(), PoolError> {
|
||||
let now = Utc::now().timestamp();
|
||||
let embargo_sec = dandelion_config.embargo_secs() + thread_rng().gen_range(0, 31);
|
||||
let cutoff = now - embargo_sec as i64;
|
||||
// Take a write lock on the txpool for the duration of this processing.
|
||||
let mut tx_pool = tx_pool.write();
|
||||
|
||||
let mut expired_entries = vec![];
|
||||
{
|
||||
let tx_pool = tx_pool.read();
|
||||
for entry in tx_pool
|
||||
.stempool
|
||||
.entries
|
||||
.iter()
|
||||
.filter(|x| x.tx_at.timestamp() < cutoff)
|
||||
{
|
||||
debug!("dand_mon: Embargo timer expired for {:?}", entry.tx.hash());
|
||||
expired_entries.push(entry.clone());
|
||||
}
|
||||
let embargo_secs = dandelion_config
|
||||
.embargo_secs
|
||||
.expect("embargo_secs config missing")
|
||||
+ thread_rng().gen_range(0, 31);
|
||||
let expired_entries = select_txs_cutoff(&tx_pool.stempool, embargo_secs);
|
||||
|
||||
if expired_entries.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if expired_entries.len() > 0 {
|
||||
debug!("dand_mon: Found {} expired txs.", expired_entries.len());
|
||||
debug!("dand_mon: Found {} expired txs.", expired_entries.len());
|
||||
|
||||
{
|
||||
let mut tx_pool = tx_pool.write();
|
||||
let header = tx_pool.chain_head()?;
|
||||
let header = tx_pool.chain_head()?;
|
||||
|
||||
for entry in expired_entries {
|
||||
let src = TxSource {
|
||||
debug_name: "embargo_expired".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
match tx_pool.add_to_pool(src, entry.tx, false, &header) {
|
||||
Ok(_) => debug!("dand_mon: embargo expired, fluffed tx successfully."),
|
||||
Err(e) => debug!("dand_mon: Failed to fluff expired tx - {:?}", e),
|
||||
};
|
||||
}
|
||||
}
|
||||
let src = TxSource {
|
||||
debug_name: "embargo_expired".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
|
||||
for entry in expired_entries {
|
||||
let txhash = entry.tx.hash();
|
||||
match tx_pool.add_to_pool(src.clone(), entry.tx, false, &header) {
|
||||
Ok(_) => info!(
|
||||
"dand_mon: embargo expired for {}, fluffed successfully.",
|
||||
txhash
|
||||
),
|
||||
Err(e) => warn!("dand_mon: failed to fluff expired tx {}, {:?}", txhash, e),
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ use crate::core::global;
|
|||
use crate::p2p;
|
||||
use crate::p2p::types::PeerAddr;
|
||||
use crate::p2p::ChainAdapter;
|
||||
use crate::pool::DandelionConfig;
|
||||
use crate::util::{Mutex, StopState};
|
||||
|
||||
// DNS Seeds with contact email associated
|
||||
|
@ -52,7 +51,6 @@ const FLOONET_DNS_SEEDS: &'static [&'static str] = &[
|
|||
pub fn connect_and_monitor(
|
||||
p2p_server: Arc<p2p::Server>,
|
||||
capabilities: p2p::Capabilities,
|
||||
dandelion_config: DandelionConfig,
|
||||
seed_list: Box<dyn Fn() -> Vec<PeerAddr> + Send>,
|
||||
preferred_peers: Option<Vec<PeerAddr>>,
|
||||
stop_state: Arc<Mutex<StopState>>,
|
||||
|
@ -119,8 +117,6 @@ pub fn connect_and_monitor(
|
|||
preferred_peers.clone(),
|
||||
);
|
||||
|
||||
update_dandelion_relay(peers.clone(), dandelion_config.clone());
|
||||
|
||||
prev = Utc::now();
|
||||
start_attempt = cmp::min(6, start_attempt + 1);
|
||||
}
|
||||
|
@ -248,21 +244,6 @@ fn monitor_peers(
|
|||
}
|
||||
}
|
||||
|
||||
fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionConfig) {
|
||||
// Dandelion Relay Updater
|
||||
let dandelion_relay = peers.get_dandelion_relay();
|
||||
if let Some((last_added, _)) = dandelion_relay {
|
||||
let dandelion_interval = Utc::now().timestamp() - last_added;
|
||||
if dandelion_interval >= dandelion_config.relay_secs() as i64 {
|
||||
debug!("monitor_peers: updating expired dandelion relay");
|
||||
peers.update_dandelion_relay();
|
||||
}
|
||||
} else {
|
||||
debug!("monitor_peers: no dandelion relay updating");
|
||||
peers.update_dandelion_relay();
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have any pre-existing peer in db. If so, start with those,
|
||||
// otherwise use the seeds provided.
|
||||
fn connect_to_seeds_and_preferred_peers(
|
||||
|
|
|
@ -31,6 +31,7 @@ use crate::chain;
|
|||
use crate::common::adapters::{
|
||||
ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter,
|
||||
};
|
||||
use crate::common::hooks::{init_chain_hooks, init_net_hooks};
|
||||
use crate::common::stats::{DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats};
|
||||
use crate::common::types::{Error, ServerConfig, StratumServerConfig, SyncState, SyncStatus};
|
||||
use crate::core::core::hash::{Hashed, ZERO_HASH};
|
||||
|
@ -42,7 +43,6 @@ use crate::mining::test_miner::Miner;
|
|||
use crate::p2p;
|
||||
use crate::p2p::types::PeerAddr;
|
||||
use crate::pool;
|
||||
use crate::store;
|
||||
use crate::util::file::get_first_line;
|
||||
use crate::util::{Mutex, RwLock, StopState};
|
||||
|
||||
|
@ -154,7 +154,7 @@ impl Server {
|
|||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
|
||||
let pool_adapter = Arc::new(PoolToChainAdapter::new());
|
||||
let pool_net_adapter = Arc::new(PoolToNetAdapter::new());
|
||||
let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone()));
|
||||
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(
|
||||
config.pool_config.clone(),
|
||||
pool_adapter.clone(),
|
||||
|
@ -164,7 +164,10 @@ impl Server {
|
|||
|
||||
let sync_state = Arc::new(SyncState::new());
|
||||
|
||||
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(tx_pool.clone()));
|
||||
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(
|
||||
tx_pool.clone(),
|
||||
init_chain_hooks(&config),
|
||||
));
|
||||
|
||||
let genesis = match config.chain_type {
|
||||
global::ChainTypes::AutomatedTesting => genesis::genesis_dev(),
|
||||
|
@ -175,10 +178,8 @@ impl Server {
|
|||
|
||||
info!("Starting server, genesis block: {}", genesis.hash());
|
||||
|
||||
let db_env = Arc::new(store::new_env(config.db_root.clone()));
|
||||
let shared_chain = Arc::new(chain::Chain::init(
|
||||
config.db_root.clone(),
|
||||
db_env,
|
||||
chain_adapter.clone(),
|
||||
genesis.clone(),
|
||||
pow::verify_size,
|
||||
|
@ -195,21 +196,19 @@ impl Server {
|
|||
tx_pool.clone(),
|
||||
verifier_cache.clone(),
|
||||
config.clone(),
|
||||
init_net_hooks(&config),
|
||||
));
|
||||
|
||||
let peer_db_env = Arc::new(store::new_named_env(
|
||||
config.db_root.clone(),
|
||||
"peer".into(),
|
||||
config.p2p_config.peer_max_count,
|
||||
));
|
||||
let p2p_server = Arc::new(p2p::Server::new(
|
||||
peer_db_env,
|
||||
&config.db_root,
|
||||
config.p2p_config.capabilities,
|
||||
config.p2p_config.clone(),
|
||||
net_adapter.clone(),
|
||||
genesis.hash(),
|
||||
stop_state.clone(),
|
||||
)?);
|
||||
|
||||
// Initialize various adapters with our dynamic set of connected peers.
|
||||
chain_adapter.init(p2p_server.peers.clone());
|
||||
pool_net_adapter.init(p2p_server.peers.clone());
|
||||
net_adapter.init(p2p_server.peers.clone());
|
||||
|
@ -235,7 +234,6 @@ impl Server {
|
|||
seed::connect_and_monitor(
|
||||
p2p_server.clone(),
|
||||
config.p2p_config.capabilities,
|
||||
config.dandelion_config.clone(),
|
||||
seeder,
|
||||
config.p2p_config.peers_preferred.clone(),
|
||||
stop_state.clone(),
|
||||
|
@ -289,6 +287,7 @@ impl Server {
|
|||
dandelion_monitor::monitor_transactions(
|
||||
config.dandelion_config.clone(),
|
||||
tx_pool.clone(),
|
||||
pool_net_adapter.clone(),
|
||||
verifier_cache.clone(),
|
||||
stop_state.clone(),
|
||||
);
|
||||
|
@ -339,12 +338,12 @@ impl Server {
|
|||
self.chain.clone(),
|
||||
self.tx_pool.clone(),
|
||||
self.verifier_cache.clone(),
|
||||
self.state_info.stratum_stats.clone(),
|
||||
);
|
||||
let stratum_stats = self.state_info.stratum_stats.clone();
|
||||
let _ = thread::Builder::new()
|
||||
.name("stratum_server".to_string())
|
||||
.spawn(move || {
|
||||
stratum_server.run_loop(stratum_stats, edge_bits as u32, proof_size, sync_state);
|
||||
stratum_server.run_loop(edge_bits as u32, proof_size, sync_state);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -34,14 +34,11 @@ use grin_p2p as p2p;
|
|||
use grin_pool as pool;
|
||||
use grin_store as store;
|
||||
use grin_util as util;
|
||||
use grin_wallet as wallet;
|
||||
|
||||
pub mod common;
|
||||
mod grin;
|
||||
mod mining;
|
||||
mod webwallet;
|
||||
|
||||
pub use crate::common::stats::{DiffBlock, PeerStats, ServerStats, StratumStats, WorkerStats};
|
||||
pub use crate::common::types::{ServerConfig, StratumServerConfig};
|
||||
pub use crate::grin::server::Server;
|
||||
pub use crate::webwallet::server::start_webwallet_server;
|
||||
|
|
|
@ -22,14 +22,47 @@ use std::sync::Arc;
|
|||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::api;
|
||||
use crate::chain;
|
||||
use crate::common::types::Error;
|
||||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
use crate::core::{consensus, core, global, ser};
|
||||
use crate::core::core::{Output, TxKernel};
|
||||
use crate::core::libtx::secp_ser;
|
||||
use crate::core::{consensus, core, global};
|
||||
use crate::keychain::{ExtKeychain, Identifier, Keychain};
|
||||
use crate::pool;
|
||||
use crate::util;
|
||||
use crate::wallet::{self, BlockFees};
|
||||
|
||||
/// Fees in block to use for coinbase amount calculation
|
||||
/// (Duplicated from Grin wallet project)
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct BlockFees {
|
||||
/// fees
|
||||
#[serde(with = "secp_ser::string_or_u64")]
|
||||
pub fees: u64,
|
||||
/// height
|
||||
#[serde(with = "secp_ser::string_or_u64")]
|
||||
pub height: u64,
|
||||
/// key id
|
||||
pub key_id: Option<Identifier>,
|
||||
}
|
||||
|
||||
impl BlockFees {
|
||||
/// return key id
|
||||
pub fn key_id(&self) -> Option<Identifier> {
|
||||
self.key_id.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Response to build a coinbase output.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct CbData {
|
||||
/// Output
|
||||
pub output: Output,
|
||||
/// Kernel
|
||||
pub kernel: TxKernel,
|
||||
/// Key Id
|
||||
pub key_id: Option<Identifier>,
|
||||
}
|
||||
|
||||
// Ensure a block suitable for mining is built and returned
|
||||
// If a wallet listener URL is not provided the reward will be "burnt"
|
||||
|
@ -65,7 +98,7 @@ pub fn get_block(
|
|||
error!("Chain Error: {}", c);
|
||||
}
|
||||
},
|
||||
self::Error::Wallet(_) => {
|
||||
self::Error::WalletComm(_) => {
|
||||
error!(
|
||||
"Error building new block: Can't connect to wallet listener at {:?}; will retry",
|
||||
wallet_listener_url.as_ref().unwrap()
|
||||
|
@ -190,7 +223,8 @@ fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, B
|
|||
warn!("Burning block fees: {:?}", block_fees);
|
||||
let keychain = ExtKeychain::from_random_seed(global::is_floonet())?;
|
||||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||
let (out, kernel) = crate::core::libtx::reward::output(&keychain, &key_id, block_fees.fees)?;
|
||||
let (out, kernel) =
|
||||
crate::core::libtx::reward::output(&keychain, &key_id, block_fees.fees, false).unwrap();
|
||||
Ok((out, kernel, block_fees))
|
||||
}
|
||||
|
||||
|
@ -206,23 +240,12 @@ fn get_coinbase(
|
|||
return burn_reward(block_fees);
|
||||
}
|
||||
Some(wallet_listener_url) => {
|
||||
let res = wallet::create_coinbase(&wallet_listener_url, &block_fees)?;
|
||||
let out_bin = util::from_hex(res.output)
|
||||
.map_err(|_| Error::General("failed to parse hex output".to_owned()))?;
|
||||
let kern_bin = util::from_hex(res.kernel)
|
||||
.map_err(|_| Error::General("failed to parse hex kernel".to_owned()))?;
|
||||
|
||||
let key_id_bin = util::from_hex(res.key_id)
|
||||
.map_err(|_| Error::General("failed to parse hex key id".to_owned()))?;
|
||||
let output = ser::deserialize(&mut &out_bin[..])
|
||||
.map_err(|_| Error::General("failed to deserialize output".to_owned()))?;
|
||||
|
||||
let kernel = ser::deserialize(&mut &kern_bin[..])
|
||||
.map_err(|_| Error::General("failed to deserialize kernel".to_owned()))?;
|
||||
let key_id = ser::deserialize(&mut &key_id_bin[..])
|
||||
.map_err(|_| Error::General("failed to deserialize key id".to_owned()))?;
|
||||
let res = create_coinbase(&wallet_listener_url, &block_fees)?;
|
||||
let output = res.output;
|
||||
let kernel = res.kernel;
|
||||
let key_id = res.key_id;
|
||||
let block_fees = BlockFees {
|
||||
key_id: Some(key_id),
|
||||
key_id: key_id,
|
||||
..block_fees
|
||||
};
|
||||
|
||||
|
@ -231,3 +254,19 @@ fn get_coinbase(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Call the wallet API to create a coinbase output for the given block_fees.
|
||||
/// Will retry based on default "retry forever with backoff" behavior.
|
||||
fn create_coinbase(dest: &str, block_fees: &BlockFees) -> Result<CbData, Error> {
|
||||
let url = format!("{}/v1/wallet/foreign/build_coinbase", dest);
|
||||
match api::client::post(&url, None, &block_fees) {
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to get coinbase from {}. Is the wallet listening?",
|
||||
url
|
||||
);
|
||||
Err(Error::WalletComm(format!("{}", e)))
|
||||
}
|
||||
Ok(res) => Ok(res),
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,17 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Web wallet application static file server
|
||||
|
||||
pub mod server;
|
|
@ -1,98 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Integrated static file server to serve up a pre-compiled web-wallet
|
||||
//! application locally
|
||||
|
||||
use futures::{future, Async::*, Future, Poll};
|
||||
use http::response::Builder as ResponseBuilder;
|
||||
use http::{header, Request, Response, StatusCode};
|
||||
use hyper::service::Service;
|
||||
use hyper::{rt, Body, Server};
|
||||
use hyper_staticfile::{Static, StaticFuture};
|
||||
use std::env;
|
||||
use std::io::Error;
|
||||
use std::thread;
|
||||
|
||||
/// Future returned from `MainService`.
|
||||
enum MainFuture {
|
||||
Root,
|
||||
Static(StaticFuture<Body>),
|
||||
}
|
||||
|
||||
impl Future for MainFuture {
|
||||
type Item = Response<Body>;
|
||||
type Error = Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match *self {
|
||||
MainFuture::Root => {
|
||||
let res = ResponseBuilder::new()
|
||||
.status(StatusCode::MOVED_PERMANENTLY)
|
||||
.header(header::LOCATION, "/index.html")
|
||||
.body(Body::empty())
|
||||
.expect("unable to build response");
|
||||
Ok(Ready(res))
|
||||
}
|
||||
MainFuture::Static(ref mut future) => future.poll(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Hyper `Service` implementation that serves all requests.
|
||||
struct MainService {
|
||||
static_: Static,
|
||||
}
|
||||
|
||||
impl MainService {
|
||||
fn new() -> MainService {
|
||||
// Set up directory relative to executable for the time being
|
||||
let mut exe_path = env::current_exe().unwrap();
|
||||
exe_path.pop();
|
||||
exe_path.push("grin-wallet");
|
||||
MainService {
|
||||
static_: Static::new(exe_path),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Service for MainService {
|
||||
type ReqBody = Body;
|
||||
type ResBody = Body;
|
||||
type Error = Error;
|
||||
type Future = MainFuture;
|
||||
|
||||
fn call(&mut self, req: Request<Body>) -> MainFuture {
|
||||
if req.uri().path() == "/" {
|
||||
MainFuture::Root
|
||||
} else {
|
||||
MainFuture::Static(self.static_.serve(req))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the webwallet server to serve up static files from the given
|
||||
/// directory
|
||||
pub fn start_webwallet_server() {
|
||||
let _ = thread::Builder::new()
|
||||
.name("webwallet_server".to_string())
|
||||
.spawn(move || {
|
||||
let addr = ([127, 0, 0, 1], 13421).into();
|
||||
let server = Server::bind(&addr)
|
||||
.serve(|| future::ok::<_, Error>(MainService::new()))
|
||||
.map_err(|e| eprintln!("server error: {}", e));
|
||||
warn!("Grin Web-Wallet Application is running at http://{}/", addr);
|
||||
rt::run(server);
|
||||
});
|
||||
}
|
|
@ -1,550 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
mod framework;
|
||||
|
||||
use self::core::global::{self, ChainTypes};
|
||||
use self::util::{init_test_logger, to_hex, Mutex};
|
||||
use crate::framework::{LocalServerContainer, LocalServerContainerConfig};
|
||||
use grin_api as api;
|
||||
use grin_core as core;
|
||||
use grin_p2p as p2p;
|
||||
use grin_util as util;
|
||||
use std::sync::Arc;
|
||||
use std::{thread, time};
|
||||
|
||||
#[test]
|
||||
fn simple_server_wallet() {
|
||||
init_test_logger();
|
||||
info!("starting simple_server_wallet");
|
||||
let _test_name_dir = "test_servers";
|
||||
core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting);
|
||||
|
||||
// Run a separate coinbase wallet for coinbase transactions
|
||||
let coinbase_dir = "coinbase_wallet_api";
|
||||
framework::clean_all_output(coinbase_dir);
|
||||
let mut coinbase_config = LocalServerContainerConfig::default();
|
||||
coinbase_config.name = String::from(coinbase_dir);
|
||||
coinbase_config.wallet_validating_node_url = String::from("http://127.0.0.1:40001");
|
||||
coinbase_config.wallet_port = 50002;
|
||||
let coinbase_wallet = Arc::new(Mutex::new(
|
||||
LocalServerContainer::new(coinbase_config).unwrap(),
|
||||
));
|
||||
|
||||
let _ = thread::spawn(move || {
|
||||
let mut w = coinbase_wallet.lock();
|
||||
w.run_wallet(0);
|
||||
});
|
||||
|
||||
// Wait for the wallet to start
|
||||
thread::sleep(time::Duration::from_millis(1000));
|
||||
|
||||
let api_server_one_dir = "api_server_one";
|
||||
framework::clean_all_output(api_server_one_dir);
|
||||
let mut server_config = LocalServerContainerConfig::default();
|
||||
server_config.name = String::from(api_server_one_dir);
|
||||
server_config.p2p_server_port = 40000;
|
||||
server_config.api_server_port = 40001;
|
||||
server_config.start_miner = true;
|
||||
server_config.start_wallet = false;
|
||||
server_config.coinbase_wallet_address =
|
||||
String::from(format!("http://{}:{}", server_config.base_addr, 50002));
|
||||
let mut server_one = LocalServerContainer::new(server_config.clone()).unwrap();
|
||||
|
||||
// Spawn server and let it run for a bit
|
||||
let _ = thread::spawn(move || server_one.run_server(120));
|
||||
|
||||
//Wait for chain to build
|
||||
thread::sleep(time::Duration::from_millis(5000));
|
||||
|
||||
// Starting tests
|
||||
let base_addr = server_config.base_addr;
|
||||
let api_server_port = server_config.api_server_port;
|
||||
|
||||
warn!("Testing chain handler");
|
||||
let tip = get_tip(&base_addr, api_server_port);
|
||||
assert!(tip.is_ok());
|
||||
assert!(validate_chain(&base_addr, api_server_port).is_ok());
|
||||
|
||||
warn!("Testing status handler");
|
||||
let status = get_status(&base_addr, api_server_port);
|
||||
assert!(status.is_ok());
|
||||
|
||||
// Be sure that at least a block is mined by Travis
|
||||
let mut current_tip = get_tip(&base_addr, api_server_port).unwrap();
|
||||
while current_tip.height == 0 {
|
||||
thread::sleep(time::Duration::from_millis(1000));
|
||||
current_tip = get_tip(&base_addr, api_server_port).unwrap();
|
||||
}
|
||||
|
||||
warn!("Testing block handler");
|
||||
let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height);
|
||||
assert!(last_block_by_height.is_ok());
|
||||
let block_hash = current_tip.last_block_pushed;
|
||||
let last_block_by_height_compact =
|
||||
get_block_by_height_compact(&base_addr, api_server_port, current_tip.height);
|
||||
assert!(last_block_by_height_compact.is_ok());
|
||||
|
||||
let unspent_commit = get_unspent_output(&last_block_by_height.unwrap()).unwrap();
|
||||
|
||||
let last_block_by_hash = get_block_by_hash(&base_addr, api_server_port, &block_hash);
|
||||
assert!(last_block_by_hash.is_ok());
|
||||
let last_block_by_hash_compact =
|
||||
get_block_by_hash_compact(&base_addr, api_server_port, &block_hash);
|
||||
assert!(last_block_by_hash_compact.is_ok());
|
||||
|
||||
warn!("Testing header handler");
|
||||
let last_header_by_height =
|
||||
get_header_by_height(&base_addr, api_server_port, current_tip.height);
|
||||
assert!(last_header_by_height.is_ok());
|
||||
|
||||
let last_header_by_hash = get_header_by_hash(&base_addr, api_server_port, &block_hash);
|
||||
assert!(last_header_by_hash.is_ok());
|
||||
|
||||
let last_header_by_commit = get_header_by_commit(&base_addr, api_server_port, &unspent_commit);
|
||||
assert!(last_header_by_commit.is_ok());
|
||||
|
||||
warn!("Testing chain output handler");
|
||||
let start_height = 0;
|
||||
let end_height = current_tip.height;
|
||||
let outputs_by_height =
|
||||
get_outputs_by_height(&base_addr, api_server_port, start_height, end_height);
|
||||
assert!(outputs_by_height.is_ok());
|
||||
let ids = get_ids_from_block_outputs(outputs_by_height.unwrap());
|
||||
let outputs_by_ids1 = get_outputs_by_ids1(&base_addr, api_server_port, ids.clone());
|
||||
assert!(outputs_by_ids1.is_ok());
|
||||
let outputs_by_ids2 = get_outputs_by_ids2(&base_addr, api_server_port, ids.clone());
|
||||
assert!(outputs_by_ids2.is_ok());
|
||||
|
||||
warn!("Testing txhashset handler");
|
||||
let roots = get_txhashset_roots(&base_addr, api_server_port);
|
||||
assert!(roots.is_ok());
|
||||
let last_10_outputs = get_txhashset_lastoutputs(&base_addr, api_server_port, 0);
|
||||
assert!(last_10_outputs.is_ok());
|
||||
let last_5_outputs = get_txhashset_lastoutputs(&base_addr, api_server_port, 5);
|
||||
assert!(last_5_outputs.is_ok());
|
||||
let last_10_rangeproofs = get_txhashset_lastrangeproofs(&base_addr, api_server_port, 0);
|
||||
assert!(last_10_rangeproofs.is_ok());
|
||||
let last_5_rangeproofs = get_txhashset_lastrangeproofs(&base_addr, api_server_port, 5);
|
||||
assert!(last_5_rangeproofs.is_ok());
|
||||
let last_10_kernels = get_txhashset_lastkernels(&base_addr, api_server_port, 0);
|
||||
assert!(last_10_kernels.is_ok());
|
||||
let last_5_kernels = get_txhashset_lastkernels(&base_addr, api_server_port, 5);
|
||||
assert!(last_5_kernels.is_ok());
|
||||
|
||||
//let some more mining happen, make sure nothing pukes
|
||||
thread::sleep(time::Duration::from_millis(5000));
|
||||
}
|
||||
|
||||
/// Creates 2 servers and test P2P API
|
||||
#[test]
|
||||
fn test_p2p() {
|
||||
init_test_logger();
|
||||
info!("starting test_p2p");
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
|
||||
let _test_name_dir = "test_servers";
|
||||
|
||||
// Spawn server and let it run for a bit
|
||||
let server_one_dir = "p2p_server_one";
|
||||
framework::clean_all_output(server_one_dir);
|
||||
let mut server_config_one = LocalServerContainerConfig::default();
|
||||
server_config_one.name = String::from(server_one_dir);
|
||||
server_config_one.p2p_server_port = 40002;
|
||||
server_config_one.api_server_port = 40003;
|
||||
server_config_one.start_miner = false;
|
||||
server_config_one.start_wallet = false;
|
||||
server_config_one.is_seeding = true;
|
||||
let mut server_one = LocalServerContainer::new(server_config_one.clone()).unwrap();
|
||||
let _ = thread::spawn(move || server_one.run_server(120));
|
||||
|
||||
thread::sleep(time::Duration::from_millis(1000));
|
||||
|
||||
// Spawn server and let it run for a bit
|
||||
let server_two_dir = "p2p_server_two";
|
||||
framework::clean_all_output(server_two_dir);
|
||||
let mut server_config_two = LocalServerContainerConfig::default();
|
||||
server_config_two.name = String::from(server_two_dir);
|
||||
server_config_two.p2p_server_port = 40004;
|
||||
server_config_two.api_server_port = 40005;
|
||||
server_config_two.start_miner = false;
|
||||
server_config_two.start_wallet = false;
|
||||
server_config_two.is_seeding = false;
|
||||
let mut server_two = LocalServerContainer::new(server_config_two.clone()).unwrap();
|
||||
server_two.add_peer(format!(
|
||||
"{}:{}",
|
||||
server_config_one.base_addr, server_config_one.p2p_server_port
|
||||
));
|
||||
let _ = thread::spawn(move || server_two.run_server(120));
|
||||
|
||||
// Let them do the handshake
|
||||
thread::sleep(time::Duration::from_millis(2000));
|
||||
|
||||
// Starting tests
|
||||
warn!("Starting P2P Tests");
|
||||
let base_addr = server_config_one.base_addr;
|
||||
let api_server_port = server_config_one.api_server_port;
|
||||
|
||||
// Check that peer all is also working
|
||||
let mut peers_all = get_all_peers(&base_addr, api_server_port);
|
||||
assert!(peers_all.is_ok());
|
||||
let pall = peers_all.unwrap();
|
||||
assert_eq!(pall.len(), 2);
|
||||
|
||||
// Check that when we get peer connected the peer is here
|
||||
let peers_connected = get_connected_peers(&base_addr, api_server_port);
|
||||
assert!(peers_connected.is_ok());
|
||||
let pc = peers_connected.unwrap();
|
||||
assert_eq!(pc.len(), 1);
|
||||
|
||||
// Check that the peer status is Healthy
|
||||
let addr = format!(
|
||||
"{}:{}",
|
||||
server_config_two.base_addr, server_config_two.p2p_server_port
|
||||
);
|
||||
let peer = get_peer(&base_addr, api_server_port, &addr);
|
||||
assert!(peer.is_ok());
|
||||
assert_eq!(peer.unwrap().flags, p2p::State::Healthy);
|
||||
|
||||
// Ban the peer
|
||||
let ban_result = ban_peer(&base_addr, api_server_port, &addr);
|
||||
assert!(ban_result.is_ok());
|
||||
thread::sleep(time::Duration::from_millis(2000));
|
||||
|
||||
// Check its status is banned with get peer
|
||||
let peer = get_peer(&base_addr, api_server_port, &addr);
|
||||
assert!(peer.is_ok());
|
||||
assert_eq!(peer.unwrap().flags, p2p::State::Banned);
|
||||
|
||||
// Check from peer all
|
||||
peers_all = get_all_peers(&base_addr, api_server_port);
|
||||
assert!(peers_all.is_ok());
|
||||
assert_eq!(peers_all.unwrap().len(), 2);
|
||||
|
||||
// Unban
|
||||
let unban_result = unban_peer(&base_addr, api_server_port, &addr);
|
||||
assert!(unban_result.is_ok());
|
||||
|
||||
// Check from peer connected
|
||||
let peers_connected = get_connected_peers(&base_addr, api_server_port);
|
||||
assert!(peers_connected.is_ok());
|
||||
assert_eq!(peers_connected.unwrap().len(), 0);
|
||||
|
||||
// Check its status is healthy with get peer
|
||||
let peer = get_peer(&base_addr, api_server_port, &addr);
|
||||
assert!(peer.is_ok());
|
||||
assert_eq!(peer.unwrap().flags, p2p::State::Healthy);
|
||||
}
|
||||
|
||||
// Tip handler function
|
||||
fn get_tip(base_addr: &String, api_server_port: u16) -> Result<api::Tip, Error> {
|
||||
let url = format!("http://{}:{}/v1/chain", base_addr, api_server_port);
|
||||
api::client::get::<api::Tip>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
// Status handler function
|
||||
fn get_status(base_addr: &String, api_server_port: u16) -> Result<api::Status, Error> {
|
||||
let url = format!("http://{}:{}/v1/status", base_addr, api_server_port);
|
||||
api::client::get::<api::Status>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
// Block handler functions
|
||||
fn get_block_by_height(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
height: u64,
|
||||
) -> Result<api::BlockPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/blocks/{}",
|
||||
base_addr, api_server_port, height
|
||||
);
|
||||
api::client::get::<api::BlockPrintable>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_block_by_height_compact(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
height: u64,
|
||||
) -> Result<api::CompactBlockPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/blocks/{}?compact",
|
||||
base_addr, api_server_port, height
|
||||
);
|
||||
api::client::get::<api::CompactBlockPrintable>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_block_by_hash(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
block_hash: &String,
|
||||
) -> Result<api::BlockPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/blocks/{}",
|
||||
base_addr, api_server_port, block_hash
|
||||
);
|
||||
api::client::get::<api::BlockPrintable>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_header_by_commit(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
commit: &api::PrintableCommitment,
|
||||
) -> Result<api::BlockHeaderPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/headers/{}",
|
||||
base_addr,
|
||||
api_server_port,
|
||||
to_hex(commit.to_vec())
|
||||
);
|
||||
api::client::get::<api::BlockHeaderPrintable>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_block_by_hash_compact(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
block_hash: &String,
|
||||
) -> Result<api::CompactBlockPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/blocks/{}?compact",
|
||||
base_addr, api_server_port, block_hash
|
||||
);
|
||||
api::client::get::<api::CompactBlockPrintable>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
// Header handler functions
|
||||
fn get_header_by_height(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
height: u64,
|
||||
) -> Result<api::BlockHeaderPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/headers/{}",
|
||||
base_addr, api_server_port, height
|
||||
);
|
||||
api::client::get::<api::BlockHeaderPrintable>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_header_by_hash(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
header_hash: &String,
|
||||
) -> Result<api::BlockHeaderPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/headers/{}",
|
||||
base_addr, api_server_port, header_hash
|
||||
);
|
||||
api::client::get::<api::BlockHeaderPrintable>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
// Chain output handler functions
|
||||
fn get_outputs_by_ids1(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
ids: Vec<String>,
|
||||
) -> Result<Vec<api::Output>, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/chain/outputs/byids?id={}",
|
||||
base_addr,
|
||||
api_server_port,
|
||||
ids.join(",")
|
||||
);
|
||||
api::client::get::<Vec<api::Output>>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_outputs_by_ids2(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
ids: Vec<String>,
|
||||
) -> Result<Vec<api::Output>, Error> {
|
||||
let mut ids_string: String = String::from("");
|
||||
for id in ids {
|
||||
ids_string = ids_string + "?id=" + &id;
|
||||
}
|
||||
let ids_string = String::from(&ids_string[1..ids_string.len()]);
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/chain/outputs/byids?{}",
|
||||
base_addr, api_server_port, ids_string
|
||||
);
|
||||
api::client::get::<Vec<api::Output>>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_outputs_by_height(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
start_height: u64,
|
||||
end_height: u64,
|
||||
) -> Result<Vec<api::BlockOutputs>, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/chain/outputs/byheight?start_height={}&end_height={}",
|
||||
base_addr, api_server_port, start_height, end_height
|
||||
);
|
||||
api::client::get::<Vec<api::BlockOutputs>>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn validate_chain(base_addr: &String, api_server_port: u16) -> Result<(), Error> {
|
||||
let url = format!("http://{}:{}/v1/chain/validate", base_addr, api_server_port);
|
||||
api::client::get_no_ret(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
// TxHashSet handler functions
|
||||
fn get_txhashset_roots(base_addr: &String, api_server_port: u16) -> Result<api::TxHashSet, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/txhashset/roots",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
api::client::get::<api::TxHashSet>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_txhashset_lastoutputs(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
n: u64,
|
||||
) -> Result<Vec<api::TxHashSetNode>, Error> {
|
||||
let url: String;
|
||||
if n == 0 {
|
||||
url = format!(
|
||||
"http://{}:{}/v1/txhashset/lastoutputs",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
} else {
|
||||
url = format!(
|
||||
"http://{}:{}/v1/txhashset/lastoutputs?n={}",
|
||||
base_addr, api_server_port, n
|
||||
);
|
||||
}
|
||||
api::client::get::<Vec<api::TxHashSetNode>>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_txhashset_lastrangeproofs(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
n: u64,
|
||||
) -> Result<Vec<api::TxHashSetNode>, Error> {
|
||||
let url: String;
|
||||
if n == 0 {
|
||||
url = format!(
|
||||
"http://{}:{}/v1/txhashset/lastrangeproofs",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
} else {
|
||||
url = format!(
|
||||
"http://{}:{}/v1/txhashset/lastrangeproofs?n={}",
|
||||
base_addr, api_server_port, n
|
||||
);
|
||||
}
|
||||
api::client::get::<Vec<api::TxHashSetNode>>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_txhashset_lastkernels(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
n: u64,
|
||||
) -> Result<Vec<api::TxHashSetNode>, Error> {
|
||||
let url: String;
|
||||
if n == 0 {
|
||||
url = format!(
|
||||
"http://{}:{}/v1/txhashset/lastkernels",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
} else {
|
||||
url = format!(
|
||||
"http://{}:{}/v1/txhashset/lastkernels?n={}",
|
||||
base_addr, api_server_port, n
|
||||
);
|
||||
}
|
||||
api::client::get::<Vec<api::TxHashSetNode>>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
pub fn ban_peer(base_addr: &String, api_server_port: u16, peer_addr: &String) -> Result<(), Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/peers/{}/ban",
|
||||
base_addr, api_server_port, peer_addr
|
||||
);
|
||||
api::client::post_no_ret(url.as_str(), None, &"").map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
pub fn unban_peer(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
peer_addr: &String,
|
||||
) -> Result<(), Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/peers/{}/unban",
|
||||
base_addr, api_server_port, peer_addr
|
||||
);
|
||||
api::client::post_no_ret(url.as_str(), None, &"").map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
pub fn get_peer(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
peer_addr: &String,
|
||||
) -> Result<p2p::PeerData, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/peers/{}",
|
||||
base_addr, api_server_port, peer_addr
|
||||
);
|
||||
api::client::get::<p2p::PeerData>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
pub fn get_connected_peers(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
) -> Result<Vec<p2p::types::PeerInfoDisplay>, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/peers/connected",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
api::client::get::<Vec<p2p::types::PeerInfoDisplay>>(url.as_str(), None)
|
||||
.map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
pub fn get_all_peers(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
) -> Result<Vec<p2p::PeerData>, Error> {
|
||||
let url = format!("http://{}:{}/v1/peers/all", base_addr, api_server_port);
|
||||
api::client::get::<Vec<p2p::PeerData>>(url.as_str(), None).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
// Helper function to get a vec of commitment output ids from a vec of block
|
||||
// outputs
|
||||
fn get_ids_from_block_outputs(block_outputs: Vec<api::BlockOutputs>) -> Vec<String> {
|
||||
let mut ids: Vec<String> = Vec::new();
|
||||
for block_output in block_outputs {
|
||||
let outputs = &block_output.outputs;
|
||||
for output in outputs {
|
||||
ids.push(util::to_hex(output.clone().commit.0.to_vec()));
|
||||
}
|
||||
}
|
||||
ids.into_iter().take(100).collect()
|
||||
}
|
||||
|
||||
fn get_unspent_output(block: &api::BlockPrintable) -> Option<api::PrintableCommitment> {
|
||||
match block.outputs.iter().find(|o| !o.spent) {
|
||||
None => None,
|
||||
Some(output) => Some(api::PrintableCommitment {
|
||||
commit: output.commit.clone(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
/// Error type wrapping underlying module errors.
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
/// Error originating from HTTP API calls.
|
||||
API(api::Error),
|
||||
}
|
|
@ -1,157 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
mod framework;
|
||||
|
||||
use self::util::Mutex;
|
||||
use crate::framework::{LocalServerContainer, LocalServerContainerConfig};
|
||||
use grin_core as core;
|
||||
use grin_util as util;
|
||||
use std::sync::Arc;
|
||||
use std::{thread, time};
|
||||
|
||||
/// Start 1 node mining, 1 non mining node and two wallets.
|
||||
/// Then send a transaction from one wallet to another and propagate it a stem
|
||||
/// transaction but without stem relay and check if the transaction is still
|
||||
/// broadcasted.
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn test_dandelion_timeout() {
|
||||
let test_name_dir = "test_dandelion_timeout";
|
||||
core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting);
|
||||
framework::clean_all_output(test_name_dir);
|
||||
let mut log_config = util::LoggingConfig::default();
|
||||
//log_config.stdout_log_level = util::LogLevel::Trace;
|
||||
log_config.stdout_log_level = util::LogLevel::Info;
|
||||
//init_logger(Some(log_config));
|
||||
util::init_test_logger();
|
||||
|
||||
// Run a separate coinbase wallet for coinbase transactions
|
||||
let mut coinbase_config = LocalServerContainerConfig::default();
|
||||
coinbase_config.name = String::from("coinbase_wallet");
|
||||
coinbase_config.wallet_validating_node_url = String::from("http://127.0.0.1:30001");
|
||||
coinbase_config.wallet_port = 10002;
|
||||
let coinbase_wallet = Arc::new(Mutex::new(
|
||||
LocalServerContainer::new(coinbase_config).unwrap(),
|
||||
));
|
||||
let coinbase_wallet_config = { coinbase_wallet.lock().wallet_config.clone() };
|
||||
|
||||
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
|
||||
|
||||
let _ = thread::spawn(move || {
|
||||
let mut w = coinbase_wallet.lock();
|
||||
w.run_wallet(0);
|
||||
});
|
||||
|
||||
let mut recp_config = LocalServerContainerConfig::default();
|
||||
recp_config.name = String::from("target_wallet");
|
||||
recp_config.wallet_validating_node_url = String::from("http://127.0.0.1:30001");
|
||||
recp_config.wallet_port = 20002;
|
||||
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
|
||||
let target_wallet_cloned = target_wallet.clone();
|
||||
let recp_wallet_config = { target_wallet.lock().wallet_config.clone() };
|
||||
|
||||
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
|
||||
//Start up a second wallet, to receive
|
||||
let _ = thread::spawn(move || {
|
||||
let mut w = target_wallet_cloned.lock();
|
||||
w.run_wallet(0);
|
||||
});
|
||||
|
||||
// Spawn server and let it run for a bit
|
||||
let mut server_one_config = LocalServerContainerConfig::default();
|
||||
server_one_config.name = String::from("server_one");
|
||||
server_one_config.p2p_server_port = 30000;
|
||||
server_one_config.api_server_port = 30001;
|
||||
server_one_config.start_miner = true;
|
||||
server_one_config.start_wallet = false;
|
||||
server_one_config.is_seeding = false;
|
||||
server_one_config.coinbase_wallet_address =
|
||||
String::from(format!("http://{}:{}", server_one_config.base_addr, 10002));
|
||||
let mut server_one = LocalServerContainer::new(server_one_config).unwrap();
|
||||
|
||||
let mut server_two_config = LocalServerContainerConfig::default();
|
||||
server_two_config.name = String::from("server_two");
|
||||
server_two_config.p2p_server_port = 40000;
|
||||
server_two_config.api_server_port = 40001;
|
||||
server_two_config.start_miner = false;
|
||||
server_two_config.start_wallet = false;
|
||||
server_two_config.is_seeding = true;
|
||||
let mut server_two = LocalServerContainer::new(server_two_config.clone()).unwrap();
|
||||
|
||||
server_one.add_peer(format!(
|
||||
"{}:{}",
|
||||
server_two_config.base_addr, server_two_config.p2p_server_port
|
||||
));
|
||||
|
||||
// Spawn servers and let them run for a bit
|
||||
let _ = thread::spawn(move || {
|
||||
server_two.run_server(120);
|
||||
});
|
||||
|
||||
// Wait for the first server to start
|
||||
thread::sleep(time::Duration::from_millis(5000));
|
||||
|
||||
let _ = thread::spawn(move || {
|
||||
server_one.run_server(120);
|
||||
});
|
||||
|
||||
// Let them do a handshake and properly update their peer relay
|
||||
thread::sleep(time::Duration::from_millis(30000));
|
||||
|
||||
//Wait until we have some funds to send
|
||||
let mut coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
let mut slept_time = 0;
|
||||
while coinbase_info.amount_currently_spendable < 100000000000 {
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
slept_time += 500;
|
||||
if slept_time > 10000 {
|
||||
panic!("Coinbase not confirming in time");
|
||||
}
|
||||
coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
}
|
||||
|
||||
warn!("Sending 50 Grins to recipient wallet");
|
||||
|
||||
// Sending stem transaction
|
||||
LocalServerContainer::send_amount_to(
|
||||
&coinbase_wallet_config,
|
||||
"50.00",
|
||||
1,
|
||||
"not_all",
|
||||
"http://127.0.0.1:20002",
|
||||
false,
|
||||
);
|
||||
|
||||
let coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
println!("Coinbase wallet info: {:?}", coinbase_info);
|
||||
|
||||
let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed);
|
||||
|
||||
// The transaction should be waiting in the node stempool thus cannot be mined.
|
||||
println!("Recipient wallet info: {:?}", recipient_info);
|
||||
assert!(recipient_info.amount_awaiting_confirmation == 50000000000);
|
||||
|
||||
// Wait for stem timeout
|
||||
thread::sleep(time::Duration::from_millis(35000));
|
||||
println!("Recipient wallet info: {:?}", recipient_info);
|
||||
let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed);
|
||||
assert!(recipient_info.amount_currently_spendable == 50000000000);
|
||||
}
|
|
@ -1,674 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use self::keychain::Keychain;
|
||||
use self::p2p::PeerAddr;
|
||||
use self::util::Mutex;
|
||||
use self::wallet::{HTTPNodeClient, HTTPWalletCommAdapter, LMDBBackend, WalletConfig};
|
||||
use blake2_rfc as blake2;
|
||||
use grin_api as api;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_p2p as p2p;
|
||||
use grin_servers as servers;
|
||||
use grin_util as util;
|
||||
use grin_wallet as wallet;
|
||||
use std::default::Default;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use std::{fs, thread, time};
|
||||
|
||||
/// Just removes all results from previous runs
|
||||
pub fn clean_all_output(test_name_dir: &str) {
|
||||
let target_dir = format!("target/tmp/{}", test_name_dir);
|
||||
if let Err(e) = fs::remove_dir_all(target_dir) {
|
||||
println!("can't remove output from previous test :{}, may be ok", e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Errors that can be returned by LocalServerContainer
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
pub enum Error {
|
||||
Internal(String),
|
||||
Argument(String),
|
||||
NotFound,
|
||||
}
|
||||
|
||||
/// All-in-one server configuration struct, for convenience
|
||||
///
|
||||
#[derive(Clone)]
|
||||
pub struct LocalServerContainerConfig {
|
||||
// user friendly name for the server, also denotes what dir
|
||||
// the data files will appear in
|
||||
pub name: String,
|
||||
|
||||
// Base IP address
|
||||
pub base_addr: String,
|
||||
|
||||
// Port the server (p2p) is running on
|
||||
pub p2p_server_port: u16,
|
||||
|
||||
// Port the API server is running on
|
||||
pub api_server_port: u16,
|
||||
|
||||
// Port the wallet server is running on
|
||||
pub wallet_port: u16,
|
||||
|
||||
// Port the wallet owner API is running on
|
||||
pub owner_port: u16,
|
||||
|
||||
// Whether to include the foreign API endpoints in the owner API
|
||||
pub owner_api_include_foreign: bool,
|
||||
|
||||
// Whether we're going to mine
|
||||
pub start_miner: bool,
|
||||
|
||||
// time in millis by which to artificially slow down the mining loop
|
||||
// in this container
|
||||
pub miner_slowdown_in_millis: u64,
|
||||
|
||||
// Whether we're going to run a wallet as well,
|
||||
// can use same server instance as a validating node for convenience
|
||||
pub start_wallet: bool,
|
||||
|
||||
// address of a server to use as a seed
|
||||
pub seed_addr: String,
|
||||
|
||||
// keep track of whether this server is supposed to be seeding
|
||||
pub is_seeding: bool,
|
||||
|
||||
// Whether to burn mining rewards
|
||||
pub burn_mining_rewards: bool,
|
||||
|
||||
// full address to send coinbase rewards to
|
||||
pub coinbase_wallet_address: String,
|
||||
|
||||
// When running a wallet, the address to check inputs and send
|
||||
// finalised transactions to,
|
||||
pub wallet_validating_node_url: String,
|
||||
}
|
||||
|
||||
/// Default server config
|
||||
impl Default for LocalServerContainerConfig {
|
||||
fn default() -> LocalServerContainerConfig {
|
||||
LocalServerContainerConfig {
|
||||
name: String::from("test_host"),
|
||||
base_addr: String::from("127.0.0.1"),
|
||||
api_server_port: 13413,
|
||||
p2p_server_port: 13414,
|
||||
wallet_port: 13415,
|
||||
owner_port: 13420,
|
||||
owner_api_include_foreign: false,
|
||||
seed_addr: String::from(""),
|
||||
is_seeding: false,
|
||||
start_miner: false,
|
||||
start_wallet: false,
|
||||
burn_mining_rewards: false,
|
||||
coinbase_wallet_address: String::from(""),
|
||||
wallet_validating_node_url: String::from(""),
|
||||
miner_slowdown_in_millis: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A top-level container to hold everything that might be running
|
||||
/// on a server, i.e. server, wallet in send or receive mode
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct LocalServerContainer {
|
||||
// Configuration
|
||||
config: LocalServerContainerConfig,
|
||||
|
||||
// Structure of references to the
|
||||
// internal server data
|
||||
pub p2p_server_stats: Option<servers::ServerStats>,
|
||||
|
||||
// The API server instance
|
||||
api_server: Option<api::ApiServer>,
|
||||
|
||||
// whether the server is running
|
||||
pub server_is_running: bool,
|
||||
|
||||
// Whether the server is mining
|
||||
pub server_is_mining: bool,
|
||||
|
||||
// Whether the server is also running a wallet
|
||||
// Not used if running wallet without server
|
||||
pub wallet_is_running: bool,
|
||||
|
||||
// the list of peers to connect to
|
||||
pub peer_list: Vec<String>,
|
||||
|
||||
// base directory for the server instance
|
||||
pub working_dir: String,
|
||||
|
||||
// Wallet configuration
|
||||
pub wallet_config: WalletConfig,
|
||||
}
|
||||
|
||||
impl LocalServerContainer {
|
||||
/// Create a new local server container with defaults, with the given name
|
||||
/// all related files will be created in the directory
|
||||
/// target/tmp/{name}
|
||||
|
||||
pub fn new(config: LocalServerContainerConfig) -> Result<LocalServerContainer, Error> {
|
||||
let working_dir = format!("target/tmp/{}", config.name);
|
||||
let mut wallet_config = WalletConfig::default();
|
||||
|
||||
wallet_config.api_listen_port = config.wallet_port;
|
||||
wallet_config.check_node_api_http_addr = config.wallet_validating_node_url.clone();
|
||||
wallet_config.owner_api_include_foreign = Some(config.owner_api_include_foreign);
|
||||
wallet_config.data_file_dir = working_dir.clone();
|
||||
Ok(LocalServerContainer {
|
||||
config: config,
|
||||
p2p_server_stats: None,
|
||||
api_server: None,
|
||||
server_is_running: false,
|
||||
server_is_mining: false,
|
||||
wallet_is_running: false,
|
||||
working_dir: working_dir,
|
||||
peer_list: Vec::new(),
|
||||
wallet_config: wallet_config,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn run_server(&mut self, duration_in_seconds: u64) -> servers::Server {
|
||||
let api_addr = format!("{}:{}", self.config.base_addr, self.config.api_server_port);
|
||||
|
||||
let mut seeding_type = p2p::Seeding::None;
|
||||
let mut seeds = Vec::new();
|
||||
|
||||
if self.config.seed_addr.len() > 0 {
|
||||
seeding_type = p2p::Seeding::List;
|
||||
seeds = vec![PeerAddr(self.config.seed_addr.parse().unwrap())];
|
||||
}
|
||||
|
||||
let s = servers::Server::new(servers::ServerConfig {
|
||||
api_http_addr: api_addr,
|
||||
api_secret_path: None,
|
||||
db_root: format!("{}/.grin", self.working_dir),
|
||||
p2p_config: p2p::P2PConfig {
|
||||
port: self.config.p2p_server_port,
|
||||
seeds: Some(seeds),
|
||||
seeding_type: seeding_type,
|
||||
..p2p::P2PConfig::default()
|
||||
},
|
||||
chain_type: core::global::ChainTypes::AutomatedTesting,
|
||||
skip_sync_wait: Some(true),
|
||||
stratum_mining_config: None,
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
self.p2p_server_stats = Some(s.get_server_stats().unwrap());
|
||||
|
||||
let mut wallet_url = None;
|
||||
|
||||
if self.config.start_wallet == true {
|
||||
self.run_wallet(duration_in_seconds + 5);
|
||||
// give a second to start wallet before continuing
|
||||
thread::sleep(time::Duration::from_millis(1000));
|
||||
wallet_url = Some(format!(
|
||||
"http://{}:{}",
|
||||
self.config.base_addr, self.config.wallet_port
|
||||
));
|
||||
}
|
||||
|
||||
if self.config.start_miner == true {
|
||||
println!(
|
||||
"starting test Miner on port {}",
|
||||
self.config.p2p_server_port
|
||||
);
|
||||
s.start_test_miner(wallet_url, s.stop_state.clone());
|
||||
}
|
||||
|
||||
for p in &self.peer_list {
|
||||
println!("{} connecting to peer: {}", self.config.p2p_server_port, p);
|
||||
let _ = s.connect_peer(PeerAddr(p.parse().unwrap()));
|
||||
}
|
||||
|
||||
if self.wallet_is_running {
|
||||
self.stop_wallet();
|
||||
}
|
||||
|
||||
s
|
||||
}
|
||||
|
||||
/// Make a wallet for use in test endpoints (run_wallet and run_owner).
|
||||
fn make_wallet_for_tests(
|
||||
&mut self,
|
||||
) -> Arc<Mutex<LMDBBackend<HTTPNodeClient, keychain::ExtKeychain>>> {
|
||||
// URL on which to start the wallet listener (i.e. api server)
|
||||
let _url = format!("{}:{}", self.config.base_addr, self.config.wallet_port);
|
||||
|
||||
// Just use the name of the server for a seed for now
|
||||
let seed = format!("{}", self.config.name);
|
||||
|
||||
let _seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes());
|
||||
|
||||
println!(
|
||||
"Starting the Grin wallet receiving daemon on {} ",
|
||||
self.config.wallet_port
|
||||
);
|
||||
|
||||
self.wallet_config = WalletConfig::default();
|
||||
|
||||
self.wallet_config.api_listen_port = self.config.wallet_port;
|
||||
self.wallet_config.check_node_api_http_addr =
|
||||
self.config.wallet_validating_node_url.clone();
|
||||
self.wallet_config.data_file_dir = self.working_dir.clone();
|
||||
self.wallet_config.owner_api_include_foreign = Some(self.config.owner_api_include_foreign);
|
||||
|
||||
let _ = fs::create_dir_all(self.wallet_config.clone().data_file_dir);
|
||||
let r = wallet::WalletSeed::init_file(&self.wallet_config, 32, None, "");
|
||||
|
||||
let client_n = HTTPNodeClient::new(&self.wallet_config.check_node_api_http_addr, None);
|
||||
|
||||
if let Err(_e) = r {
|
||||
//panic!("Error initializing wallet seed: {}", e);
|
||||
}
|
||||
|
||||
let wallet: LMDBBackend<HTTPNodeClient, keychain::ExtKeychain> =
|
||||
LMDBBackend::new(self.wallet_config.clone(), "", client_n).unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Error creating wallet: {:?} Config: {:?}",
|
||||
e, self.wallet_config
|
||||
)
|
||||
});
|
||||
|
||||
Arc::new(Mutex::new(wallet))
|
||||
}
|
||||
|
||||
/// Starts a wallet daemon to receive
|
||||
pub fn run_wallet(&mut self, _duration_in_mills: u64) {
|
||||
let wallet = self.make_wallet_for_tests();
|
||||
|
||||
wallet::controller::foreign_listener(wallet, &self.wallet_config.api_listen_addr(), None)
|
||||
.unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Error creating wallet listener: {:?} Config: {:?}",
|
||||
e, self.wallet_config
|
||||
)
|
||||
});
|
||||
|
||||
self.wallet_is_running = true;
|
||||
}
|
||||
|
||||
/// Starts a wallet owner daemon
|
||||
#[allow(dead_code)]
|
||||
pub fn run_owner(&mut self) {
|
||||
let wallet = self.make_wallet_for_tests();
|
||||
|
||||
// WalletConfig doesn't allow changing the owner API path, so we build
|
||||
// the path ourselves
|
||||
let owner_listen_addr = format!("127.0.0.1:{}", self.config.owner_port);
|
||||
|
||||
wallet::controller::owner_listener(
|
||||
wallet,
|
||||
&owner_listen_addr,
|
||||
None,
|
||||
None,
|
||||
self.wallet_config.owner_api_include_foreign.clone(),
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
panic!(
|
||||
"Error creating wallet owner listener: {:?} Config: {:?}",
|
||||
e, self.wallet_config
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn get_wallet_seed(config: &WalletConfig) -> wallet::WalletSeed {
|
||||
let _ = fs::create_dir_all(config.clone().data_file_dir);
|
||||
wallet::WalletSeed::init_file(config, 32, None, "").unwrap();
|
||||
let wallet_seed =
|
||||
wallet::WalletSeed::from_file(config, "").expect("Failed to read wallet seed file.");
|
||||
wallet_seed
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn get_wallet_info(
|
||||
config: &WalletConfig,
|
||||
wallet_seed: &wallet::WalletSeed,
|
||||
) -> wallet::WalletInfo {
|
||||
let keychain: keychain::ExtKeychain = wallet_seed
|
||||
.derive_keychain(false)
|
||||
.expect("Failed to derive keychain from seed file and passphrase.");
|
||||
let client_n = HTTPNodeClient::new(&config.check_node_api_http_addr, None);
|
||||
let mut wallet = LMDBBackend::new(config.clone(), "", client_n)
|
||||
.unwrap_or_else(|e| panic!("Error creating wallet: {:?} Config: {:?}", e, config));
|
||||
wallet.keychain = Some(keychain);
|
||||
let parent_id = keychain::ExtKeychain::derive_key_id(2, 0, 0, 0, 0);
|
||||
let _ =
|
||||
wallet::libwallet::internal::updater::refresh_outputs(&mut wallet, &parent_id, false);
|
||||
wallet::libwallet::internal::updater::retrieve_info(&mut wallet, &parent_id, 1).unwrap()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn send_amount_to(
|
||||
config: &WalletConfig,
|
||||
amount: &str,
|
||||
minimum_confirmations: u64,
|
||||
selection_strategy: &str,
|
||||
dest: &str,
|
||||
_fluff: bool,
|
||||
) {
|
||||
let amount = core::core::amount_from_hr_string(amount)
|
||||
.expect("Could not parse amount as a number with optional decimal point.");
|
||||
|
||||
let wallet_seed =
|
||||
wallet::WalletSeed::from_file(config, "").expect("Failed to read wallet seed file.");
|
||||
|
||||
let keychain: keychain::ExtKeychain = wallet_seed
|
||||
.derive_keychain(false)
|
||||
.expect("Failed to derive keychain from seed file and passphrase.");
|
||||
|
||||
let client_n = HTTPNodeClient::new(&config.check_node_api_http_addr, None);
|
||||
let client_w = HTTPWalletCommAdapter::new();
|
||||
|
||||
let change_outputs = 1;
|
||||
|
||||
let mut wallet = LMDBBackend::new(config.clone(), "", client_n)
|
||||
.unwrap_or_else(|e| panic!("Error creating wallet: {:?} Config: {:?}", e, config));
|
||||
wallet.keychain = Some(keychain);
|
||||
let _ = wallet::controller::owner_single_use(Arc::new(Mutex::new(wallet)), |api| {
|
||||
let (mut slate, lock_fn) = api.initiate_tx(
|
||||
None,
|
||||
amount,
|
||||
minimum_confirmations,
|
||||
change_outputs,
|
||||
selection_strategy == "all",
|
||||
None,
|
||||
)?;
|
||||
slate = client_w.send_tx_sync(dest, &slate)?;
|
||||
api.finalize_tx(&mut slate)?;
|
||||
api.tx_lock_outputs(&slate, lock_fn)?;
|
||||
println!(
|
||||
"Tx sent: {} grin to {} (strategy '{}')",
|
||||
core::core::amount_to_hr_string(amount, false),
|
||||
dest,
|
||||
selection_strategy,
|
||||
);
|
||||
Ok(())
|
||||
})
|
||||
.unwrap_or_else(|e| panic!("Error creating wallet: {:?} Config: {:?}", e, config));
|
||||
}
|
||||
|
||||
/// Stops the running wallet server
|
||||
pub fn stop_wallet(&mut self) {
|
||||
println!("Stop wallet!");
|
||||
let api_server = self.api_server.as_mut().unwrap();
|
||||
api_server.stop();
|
||||
}
|
||||
|
||||
/// Adds a peer to this server to connect to upon running
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn add_peer(&mut self, addr: String) {
|
||||
self.peer_list.push(addr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration values for container pool
|
||||
|
||||
pub struct LocalServerContainerPoolConfig {
|
||||
// Base name to append to all the servers in this pool
|
||||
pub base_name: String,
|
||||
|
||||
// Base http address for all of the servers in this pool
|
||||
pub base_http_addr: String,
|
||||
|
||||
// Base port server for all of the servers in this pool
|
||||
// Increment the number by 1 for each new server
|
||||
pub base_p2p_port: u16,
|
||||
|
||||
// Base api port for all of the servers in this pool
|
||||
// Increment this number by 1 for each new server
|
||||
pub base_api_port: u16,
|
||||
|
||||
// Base wallet port for this server
|
||||
//
|
||||
pub base_wallet_port: u16,
|
||||
|
||||
// Base wallet owner port for this server
|
||||
//
|
||||
pub base_owner_port: u16,
|
||||
|
||||
// How long the servers in the pool are going to run
|
||||
pub run_length_in_seconds: u64,
|
||||
}
|
||||
|
||||
/// Default server config
|
||||
///
|
||||
impl Default for LocalServerContainerPoolConfig {
|
||||
fn default() -> LocalServerContainerPoolConfig {
|
||||
LocalServerContainerPoolConfig {
|
||||
base_name: String::from("test_pool"),
|
||||
base_http_addr: String::from("127.0.0.1"),
|
||||
base_p2p_port: 10000,
|
||||
base_api_port: 11000,
|
||||
base_wallet_port: 12000,
|
||||
base_owner_port: 13000,
|
||||
run_length_in_seconds: 30,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A convenience pool for running many servers simultaneously
|
||||
/// without necessarily having to configure each one manually
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct LocalServerContainerPool {
|
||||
// configuration
|
||||
pub config: LocalServerContainerPoolConfig,
|
||||
|
||||
// keep ahold of all the created servers thread-safely
|
||||
server_containers: Vec<LocalServerContainer>,
|
||||
|
||||
// Keep track of what the last ports a server was opened on
|
||||
next_p2p_port: u16,
|
||||
|
||||
next_api_port: u16,
|
||||
|
||||
next_wallet_port: u16,
|
||||
|
||||
next_owner_port: u16,
|
||||
|
||||
// keep track of whether a seed exists, and pause a bit if so
|
||||
is_seeding: bool,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl LocalServerContainerPool {
|
||||
pub fn new(config: LocalServerContainerPoolConfig) -> LocalServerContainerPool {
|
||||
(LocalServerContainerPool {
|
||||
next_api_port: config.base_api_port,
|
||||
next_p2p_port: config.base_p2p_port,
|
||||
next_wallet_port: config.base_wallet_port,
|
||||
next_owner_port: config.base_owner_port,
|
||||
config: config,
|
||||
server_containers: Vec::new(),
|
||||
is_seeding: false,
|
||||
})
|
||||
}
|
||||
|
||||
/// adds a single server on the next available port
|
||||
/// overriding passed-in values as necessary. Config object is an OUT value
|
||||
/// with
|
||||
/// ports/addresses filled in
|
||||
///
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn create_server(&mut self, server_config: &mut LocalServerContainerConfig) {
|
||||
// If we're calling it this way, need to override these
|
||||
server_config.p2p_server_port = self.next_p2p_port;
|
||||
server_config.api_server_port = self.next_api_port;
|
||||
server_config.wallet_port = self.next_wallet_port;
|
||||
server_config.owner_port = self.next_owner_port;
|
||||
|
||||
server_config.name = String::from(format!(
|
||||
"{}/{}-{}",
|
||||
self.config.base_name, self.config.base_name, server_config.p2p_server_port
|
||||
));
|
||||
|
||||
// Use self as coinbase wallet
|
||||
server_config.coinbase_wallet_address = String::from(format!(
|
||||
"http://{}:{}",
|
||||
server_config.base_addr, server_config.wallet_port
|
||||
));
|
||||
|
||||
self.next_p2p_port += 1;
|
||||
self.next_api_port += 1;
|
||||
self.next_wallet_port += 1;
|
||||
self.next_owner_port += 1;
|
||||
|
||||
if server_config.is_seeding {
|
||||
self.is_seeding = true;
|
||||
}
|
||||
|
||||
let _server_address = format!(
|
||||
"{}:{}",
|
||||
server_config.base_addr, server_config.p2p_server_port
|
||||
);
|
||||
|
||||
let server_container = LocalServerContainer::new(server_config.clone()).unwrap();
|
||||
// self.server_containers.push(server_arc);
|
||||
|
||||
// Create a future that runs the server for however many seconds
|
||||
// collect them all and run them in the run_all_servers
|
||||
let _run_time = self.config.run_length_in_seconds;
|
||||
|
||||
self.server_containers.push(server_container);
|
||||
}
|
||||
|
||||
/// adds n servers, ready to run
|
||||
///
|
||||
///
|
||||
#[allow(dead_code)]
|
||||
pub fn create_servers(&mut self, number: u16) {
|
||||
for _ in 0..number {
|
||||
// self.create_server();
|
||||
}
|
||||
}
|
||||
|
||||
/// runs all servers, and returns a vector of references to the servers
|
||||
/// once they've all been run
|
||||
///
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn run_all_servers(self) -> Arc<Mutex<Vec<servers::Server>>> {
|
||||
let run_length = self.config.run_length_in_seconds;
|
||||
let mut handles = vec![];
|
||||
|
||||
// return handles to all of the servers, wrapped in mutexes, handles, etc
|
||||
let return_containers = Arc::new(Mutex::new(Vec::new()));
|
||||
|
||||
let is_seeding = self.is_seeding.clone();
|
||||
|
||||
for mut s in self.server_containers {
|
||||
let return_container_ref = return_containers.clone();
|
||||
let handle = thread::spawn(move || {
|
||||
if is_seeding && !s.config.is_seeding {
|
||||
// there's a seed and we're not it, so hang around longer and give the seed
|
||||
// a chance to start
|
||||
thread::sleep(time::Duration::from_millis(2000));
|
||||
}
|
||||
let server_ref = s.run_server(run_length);
|
||||
return_container_ref.lock().push(server_ref);
|
||||
});
|
||||
// Not a big fan of sleeping hack here, but there appears to be a
|
||||
// concurrency issue when creating files in rocksdb that causes
|
||||
// failure if we don't pause a bit before starting the next server
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
match handle.join() {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
println!("Error starting server thread: {:?}", e);
|
||||
panic!(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// return a much simplified version of the results
|
||||
return_containers.clone()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn connect_all_peers(&mut self) {
|
||||
// just pull out all currently active servers, build a list,
|
||||
// and feed into all servers
|
||||
let mut server_addresses: Vec<String> = Vec::new();
|
||||
for s in &self.server_containers {
|
||||
let server_address = format!("{}:{}", s.config.base_addr, s.config.p2p_server_port);
|
||||
server_addresses.push(server_address);
|
||||
}
|
||||
|
||||
for a in server_addresses {
|
||||
for s in &mut self.server_containers {
|
||||
if format!("{}:{}", s.config.base_addr, s.config.p2p_server_port) != a {
|
||||
s.add_peer(a.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn stop_all_servers(servers: Arc<Mutex<Vec<servers::Server>>>) {
|
||||
let locked_servs = servers.lock();
|
||||
for s in locked_servs.deref() {
|
||||
s.stop();
|
||||
}
|
||||
}
|
||||
|
||||
/// Create and return a ServerConfig
|
||||
#[allow(dead_code)]
|
||||
pub fn config(n: u16, test_name_dir: &str, seed_n: u16) -> servers::ServerConfig {
|
||||
servers::ServerConfig {
|
||||
api_http_addr: format!("127.0.0.1:{}", 20000 + n),
|
||||
api_secret_path: None,
|
||||
db_root: format!("target/tmp/{}/grin-sync-{}", test_name_dir, n),
|
||||
p2p_config: p2p::P2PConfig {
|
||||
port: 10000 + n,
|
||||
seeding_type: p2p::Seeding::List,
|
||||
seeds: Some(vec![PeerAddr(
|
||||
format!("127.0.0.1:{}", 10000 + seed_n).parse().unwrap(),
|
||||
)]),
|
||||
..p2p::P2PConfig::default()
|
||||
},
|
||||
chain_type: core::global::ChainTypes::AutomatedTesting,
|
||||
archive_mode: Some(true),
|
||||
skip_sync_wait: Some(true),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// return stratum mining config
|
||||
#[allow(dead_code)]
|
||||
pub fn stratum_config() -> servers::common::types::StratumServerConfig {
|
||||
servers::common::types::StratumServerConfig {
|
||||
enable_stratum_server: Some(true),
|
||||
stratum_server_addr: Some(String::from("127.0.0.1:13416")),
|
||||
attempt_time_per_block: 60,
|
||||
minimum_share_difficulty: 1,
|
||||
wallet_listener_url: String::from("http://127.0.0.1:13415"),
|
||||
burn_reward: false,
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,177 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
mod framework;
|
||||
|
||||
use self::core::global::{self, ChainTypes};
|
||||
use crate::framework::{config, stratum_config};
|
||||
use bufstream::BufStream;
|
||||
use grin_core as core;
|
||||
use grin_servers as servers;
|
||||
use grin_util as util;
|
||||
use grin_util::{Mutex, StopState};
|
||||
use serde_json::Value;
|
||||
use std::io::prelude::{BufRead, Write};
|
||||
use std::net::TcpStream;
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
use std::{thread, time};
|
||||
|
||||
// Create a grin server, and a stratum server.
|
||||
// Simulate a few JSONRpc requests and verify the results.
|
||||
// Validate disconnected workers
|
||||
// Validate broadcasting new jobs
|
||||
#[test]
|
||||
fn basic_stratum_server() {
|
||||
util::init_test_logger();
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
|
||||
let test_name_dir = "stratum_server";
|
||||
framework::clean_all_output(test_name_dir);
|
||||
|
||||
// Create a server
|
||||
let s = servers::Server::new(config(4000, test_name_dir, 0)).unwrap();
|
||||
|
||||
// Get mining config with stratumserver enabled
|
||||
let mut stratum_cfg = stratum_config();
|
||||
stratum_cfg.burn_reward = true;
|
||||
stratum_cfg.attempt_time_per_block = 999;
|
||||
stratum_cfg.enable_stratum_server = Some(true);
|
||||
stratum_cfg.stratum_server_addr = Some(String::from("127.0.0.1:11101"));
|
||||
|
||||
// Start stratum server
|
||||
s.start_stratum_server(stratum_cfg);
|
||||
|
||||
// Wait for stratum server to start and
|
||||
// Verify stratum server accepts connections
|
||||
loop {
|
||||
if let Ok(_stream) = TcpStream::connect("127.0.0.1:11101") {
|
||||
break;
|
||||
} else {
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
}
|
||||
// As this stream falls out of scope it will be disconnected
|
||||
}
|
||||
info!("stratum server connected");
|
||||
|
||||
// Create a few new worker connections
|
||||
let mut workers = vec![];
|
||||
for _n in 0..5 {
|
||||
let w = TcpStream::connect("127.0.0.1:11101").unwrap();
|
||||
w.set_nonblocking(true)
|
||||
.expect("Failed to set TcpStream to non-blocking");
|
||||
let stream = BufStream::new(w);
|
||||
workers.push(stream);
|
||||
}
|
||||
assert!(workers.len() == 5);
|
||||
info!("workers length verification ok");
|
||||
|
||||
// Simulate a worker lost connection
|
||||
workers.remove(4);
|
||||
|
||||
// Swallow the genesis block
|
||||
thread::sleep(time::Duration::from_secs(5)); // Wait for the server to broadcast
|
||||
let mut response = String::new();
|
||||
for n in 0..workers.len() {
|
||||
let _result = workers[n].read_line(&mut response);
|
||||
}
|
||||
|
||||
// Verify a few stratum JSONRpc commands
|
||||
// getjobtemplate - expected block template result
|
||||
let mut response = String::new();
|
||||
let job_req = "{\"id\": \"Stratum\", \"jsonrpc\": \"2.0\", \"method\": \"getjobtemplate\"}\n";
|
||||
workers[2].write(job_req.as_bytes()).unwrap();
|
||||
workers[2].flush().unwrap();
|
||||
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
|
||||
match workers[2].read_line(&mut response) {
|
||||
Ok(_) => {
|
||||
let r: Value = serde_json::from_str(&response).unwrap();
|
||||
assert_eq!(r["error"], serde_json::Value::Null);
|
||||
assert_ne!(r["result"], serde_json::Value::Null);
|
||||
}
|
||||
Err(_e) => {
|
||||
assert!(false);
|
||||
}
|
||||
}
|
||||
info!("a few stratum JSONRpc commands verification ok");
|
||||
|
||||
// keepalive - expected "ok" result
|
||||
let mut response = String::new();
|
||||
let job_req = "{\"id\":\"3\",\"jsonrpc\":\"2.0\",\"method\":\"keepalive\"}\n";
|
||||
let ok_resp = "{\"id\":\"3\",\"jsonrpc\":\"2.0\",\"method\":\"keepalive\",\"result\":\"ok\",\"error\":null}\n";
|
||||
workers[2].write(job_req.as_bytes()).unwrap();
|
||||
workers[2].flush().unwrap();
|
||||
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
|
||||
let _st = workers[2].read_line(&mut response);
|
||||
assert_eq!(response.as_str(), ok_resp);
|
||||
info!("keepalive test ok");
|
||||
|
||||
// "doesnotexist" - error expected
|
||||
let mut response = String::new();
|
||||
let job_req = "{\"id\":\"4\",\"jsonrpc\":\"2.0\",\"method\":\"doesnotexist\"}\n";
|
||||
let ok_resp = "{\"id\":\"4\",\"jsonrpc\":\"2.0\",\"method\":\"doesnotexist\",\"result\":null,\"error\":{\"code\":-32601,\"message\":\"Method not found\"}}\n";
|
||||
workers[3].write(job_req.as_bytes()).unwrap();
|
||||
workers[3].flush().unwrap();
|
||||
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
|
||||
let _st = workers[3].read_line(&mut response);
|
||||
assert_eq!(response.as_str(), ok_resp);
|
||||
info!("worker doesnotexist test ok");
|
||||
|
||||
// Verify stratum server and worker stats
|
||||
let stats = s.get_server_stats().unwrap();
|
||||
assert_eq!(stats.stratum_stats.block_height, 1); // just 1 genesis block
|
||||
assert_eq!(stats.stratum_stats.num_workers, 4); // 5 - 1 = 4
|
||||
assert_eq!(stats.stratum_stats.worker_stats[5].is_connected, false); // worker was removed
|
||||
assert_eq!(stats.stratum_stats.worker_stats[1].is_connected, true);
|
||||
info!("stratum server and worker stats verification ok");
|
||||
|
||||
// Start mining blocks
|
||||
let stop = Arc::new(Mutex::new(StopState::new()));
|
||||
s.start_test_miner(None, stop.clone());
|
||||
info!("test miner started");
|
||||
|
||||
// This test is supposed to complete in 3 seconds,
|
||||
// so let's set a timeout on 10s to avoid infinite waiting happened in Travis-CI.
|
||||
let _handler = thread::spawn(|| {
|
||||
thread::sleep(time::Duration::from_secs(10));
|
||||
error!("basic_stratum_server test fail on timeout!");
|
||||
thread::sleep(time::Duration::from_millis(100));
|
||||
process::exit(1);
|
||||
});
|
||||
|
||||
// Simulate a worker lost connection
|
||||
workers.remove(1);
|
||||
|
||||
// Wait for a few mined blocks
|
||||
thread::sleep(time::Duration::from_secs(3));
|
||||
s.stop_test_miner(stop);
|
||||
|
||||
// Verify blocks are being broadcast to workers
|
||||
let expected = String::from("job");
|
||||
let mut jobtemplate = String::new();
|
||||
let _st = workers[2].read_line(&mut jobtemplate);
|
||||
let job_template: Value = serde_json::from_str(&jobtemplate).unwrap();
|
||||
assert_eq!(job_template["method"], expected);
|
||||
info!("blocks broadcasting to workers test ok");
|
||||
|
||||
// Verify stratum server and worker stats
|
||||
let stats = s.get_server_stats().unwrap();
|
||||
assert_eq!(stats.stratum_stats.num_workers, 3); // 5 - 2 = 3
|
||||
assert_eq!(stats.stratum_stats.worker_stats[2].is_connected, false); // worker was removed
|
||||
assert_ne!(stats.stratum_stats.block_height, 1);
|
||||
info!("basic_stratum_server test done and ok.");
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
mod framework;
|
||||
|
||||
use self::util::Mutex;
|
||||
use crate::framework::{LocalServerContainer, LocalServerContainerConfig};
|
||||
use grin_core as core;
|
||||
use grin_util as util;
|
||||
use grin_wallet as wallet;
|
||||
use std::sync::Arc;
|
||||
use std::{thread, time};
|
||||
|
||||
/// Start 1 node mining and two wallets, then send a few
|
||||
/// transactions from one to the other
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn basic_wallet_transactions() {
|
||||
let test_name_dir = "test_servers";
|
||||
core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting);
|
||||
framework::clean_all_output(test_name_dir);
|
||||
let mut log_config = util::LoggingConfig::default();
|
||||
//log_config.stdout_log_level = util::LogLevel::Trace;
|
||||
log_config.stdout_log_level = util::LogLevel::Info;
|
||||
//init_logger(Some(log_config));
|
||||
util::init_test_logger();
|
||||
|
||||
// Run a separate coinbase wallet for coinbase transactions
|
||||
let mut coinbase_config = LocalServerContainerConfig::default();
|
||||
coinbase_config.name = String::from("coinbase_wallet");
|
||||
coinbase_config.wallet_validating_node_url = String::from("http://127.0.0.1:30001");
|
||||
coinbase_config.coinbase_wallet_address = String::from("http://127.0.0.1:13415");
|
||||
coinbase_config.wallet_port = 10002;
|
||||
let coinbase_wallet = Arc::new(Mutex::new(
|
||||
LocalServerContainer::new(coinbase_config).unwrap(),
|
||||
));
|
||||
let coinbase_wallet_config = { coinbase_wallet.lock().wallet_config.clone() };
|
||||
|
||||
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
|
||||
let _ = thread::spawn(move || {
|
||||
let mut w = coinbase_wallet.lock();
|
||||
w.run_wallet(0);
|
||||
});
|
||||
|
||||
let mut recp_config = LocalServerContainerConfig::default();
|
||||
recp_config.name = String::from("target_wallet");
|
||||
recp_config.wallet_validating_node_url = String::from("http://127.0.0.1:30001");
|
||||
recp_config.wallet_port = 20002;
|
||||
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
|
||||
let target_wallet_cloned = target_wallet.clone();
|
||||
let recp_wallet_config = { target_wallet.lock().wallet_config.clone() };
|
||||
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
|
||||
//Start up a second wallet, to receive
|
||||
let _ = thread::spawn(move || {
|
||||
let mut w = target_wallet_cloned.lock();
|
||||
w.run_wallet(0);
|
||||
});
|
||||
|
||||
let mut server_config = LocalServerContainerConfig::default();
|
||||
server_config.name = String::from("server_one");
|
||||
server_config.p2p_server_port = 30000;
|
||||
server_config.api_server_port = 30001;
|
||||
server_config.start_miner = true;
|
||||
server_config.start_wallet = false;
|
||||
server_config.coinbase_wallet_address =
|
||||
String::from(format!("http://{}:{}", server_config.base_addr, 10002));
|
||||
// Spawn server and let it run for a bit
|
||||
let _ = thread::spawn(move || {
|
||||
let mut server_one = LocalServerContainer::new(server_config).unwrap();
|
||||
server_one.run_server(120);
|
||||
});
|
||||
|
||||
//Wait until we have some funds to send
|
||||
let mut coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
let mut slept_time = 0;
|
||||
while coinbase_info.amount_currently_spendable < 100000000000 {
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
slept_time += 500;
|
||||
if slept_time > 10000 {
|
||||
panic!("Coinbase not confirming in time");
|
||||
}
|
||||
coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
}
|
||||
warn!("Sending 50 Grins to recipient wallet");
|
||||
LocalServerContainer::send_amount_to(
|
||||
&coinbase_wallet_config,
|
||||
"50.00",
|
||||
1,
|
||||
"not_all",
|
||||
"http://127.0.0.1:20002",
|
||||
false,
|
||||
);
|
||||
|
||||
//Wait for a confirmation
|
||||
thread::sleep(time::Duration::from_millis(3000));
|
||||
let coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
println!("Coinbase wallet info: {:?}", coinbase_info);
|
||||
|
||||
let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed);
|
||||
println!("Recipient wallet info: {:?}", recipient_info);
|
||||
assert!(recipient_info.amount_currently_spendable == 50000000000);
|
||||
|
||||
warn!("Sending many small transactions to recipient wallet");
|
||||
for _i in 0..10 {
|
||||
LocalServerContainer::send_amount_to(
|
||||
&coinbase_wallet_config,
|
||||
"1.00",
|
||||
1,
|
||||
"not_all",
|
||||
"http://127.0.0.1:20002",
|
||||
false,
|
||||
);
|
||||
}
|
||||
|
||||
thread::sleep(time::Duration::from_millis(10000));
|
||||
let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed);
|
||||
println!(
|
||||
"Recipient wallet info post little sends: {:?}",
|
||||
recipient_info
|
||||
);
|
||||
|
||||
assert!(recipient_info.amount_currently_spendable == 60000000000);
|
||||
//send some cash right back
|
||||
LocalServerContainer::send_amount_to(
|
||||
&recp_wallet_config,
|
||||
"25.00",
|
||||
1,
|
||||
"all",
|
||||
"http://127.0.0.1:10002",
|
||||
false,
|
||||
);
|
||||
|
||||
thread::sleep(time::Duration::from_millis(5000));
|
||||
|
||||
let coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
println!("Coinbase wallet info final: {:?}", coinbase_info);
|
||||
}
|
||||
|
||||
/// Tests the owner_api_include_foreign configuration option.
|
||||
#[test]
|
||||
fn wallet_config_owner_api_include_foreign() {
|
||||
// Test setup
|
||||
let test_name_dir = "test_servers";
|
||||
core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting);
|
||||
framework::clean_all_output(test_name_dir);
|
||||
let mut log_config = util::LoggingConfig::default();
|
||||
log_config.stdout_log_level = util::LogLevel::Info;
|
||||
util::init_test_logger();
|
||||
|
||||
// This is just used for testing whether the API endpoint exists
|
||||
// so we have nonsense values
|
||||
let block_fees = wallet::BlockFees {
|
||||
fees: 1,
|
||||
height: 2,
|
||||
key_id: None,
|
||||
};
|
||||
|
||||
let mut base_config = LocalServerContainerConfig::default();
|
||||
base_config.name = String::from("test_owner_api_include_foreign");
|
||||
base_config.start_wallet = true;
|
||||
|
||||
// Start up the wallet owner API with the default config, and make sure
|
||||
// we get an error when trying to hit the coinbase endpoint
|
||||
let mut default_config = base_config.clone();
|
||||
default_config.owner_port = 20005;
|
||||
let _ = thread::spawn(move || {
|
||||
let mut default_owner = LocalServerContainer::new(default_config).unwrap();
|
||||
default_owner.run_owner();
|
||||
});
|
||||
thread::sleep(time::Duration::from_millis(1000));
|
||||
assert!(wallet::create_coinbase("http://127.0.0.1:20005", &block_fees).is_err());
|
||||
|
||||
// Start up the wallet owner API with the owner_api_include_foreign setting set,
|
||||
// and confirm that we can hit the endpoint
|
||||
let mut foreign_config = base_config.clone();
|
||||
foreign_config.owner_port = 20006;
|
||||
foreign_config.owner_api_include_foreign = true;
|
||||
let _ = thread::spawn(move || {
|
||||
let mut owner_with_foreign = LocalServerContainer::new(foreign_config).unwrap();
|
||||
owner_with_foreign.run_owner();
|
||||
});
|
||||
thread::sleep(time::Duration::from_millis(1000));
|
||||
assert!(wallet::create_coinbase("http://127.0.0.1:20006", &block_fees).is_ok());
|
||||
}
|
|
@ -13,7 +13,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
/// Grin configuration file output command
|
||||
use crate::config::{config, GlobalConfig, GlobalWalletConfig, GRIN_WALLET_DIR};
|
||||
use crate::config::GlobalConfig;
|
||||
use crate::core::global;
|
||||
use std::env;
|
||||
|
||||
|
@ -43,48 +43,3 @@ pub fn config_command_server(chain_type: &global::ChainTypes, file_name: &str) {
|
|||
file_name
|
||||
);
|
||||
}
|
||||
|
||||
/// Create a config file in the current directory
|
||||
pub fn config_command_wallet(chain_type: &global::ChainTypes, file_name: &str) {
|
||||
let mut default_config = GlobalWalletConfig::for_chain(chain_type);
|
||||
let current_dir = env::current_dir().unwrap_or_else(|e| {
|
||||
panic!("Error creating config file: {}", e);
|
||||
});
|
||||
let mut config_file_name = current_dir.clone();
|
||||
config_file_name.push(file_name);
|
||||
|
||||
let mut data_dir_name = current_dir.clone();
|
||||
data_dir_name.push(GRIN_WALLET_DIR);
|
||||
|
||||
if config_file_name.exists() && data_dir_name.exists() {
|
||||
panic!(
|
||||
"{} already exists in the target directory. Please remove it first",
|
||||
file_name
|
||||
);
|
||||
}
|
||||
|
||||
// just leave as is if file exists but there's no data dir
|
||||
if config_file_name.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
default_config.update_paths(¤t_dir);
|
||||
default_config
|
||||
.write_to_file(config_file_name.to_str().unwrap())
|
||||
.unwrap_or_else(|e| {
|
||||
panic!("Error creating config file: {}", e);
|
||||
});
|
||||
|
||||
println!(
|
||||
"File {} configured and created",
|
||||
config_file_name.to_str().unwrap(),
|
||||
);
|
||||
|
||||
let mut api_secret_path = current_dir.clone();
|
||||
api_secret_path.push(config::API_SECRET_FILE_NAME);
|
||||
if !api_secret_path.exists() {
|
||||
config::init_api_secret(&api_secret_path).unwrap();
|
||||
} else {
|
||||
config::check_api_secret(&api_secret_path).unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,11 +15,7 @@
|
|||
mod client;
|
||||
mod config;
|
||||
mod server;
|
||||
mod wallet;
|
||||
mod wallet_args;
|
||||
mod wallet_tests;
|
||||
|
||||
pub use self::client::client_command;
|
||||
pub use self::config::{config_command_server, config_command_wallet};
|
||||
pub use self::config::config_command_server;
|
||||
pub use self::server::server_command;
|
||||
pub use self::wallet::{seed_exists, wallet_command};
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::cmd::wallet_args;
|
||||
use crate::config::GlobalWalletConfig;
|
||||
use clap::ArgMatches;
|
||||
use grin_wallet::{self, HTTPNodeClient, WalletConfig, WalletSeed};
|
||||
use std::path::PathBuf;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn _init_wallet_seed(wallet_config: WalletConfig, password: &str) {
|
||||
if let Err(_) = WalletSeed::from_file(&wallet_config, password) {
|
||||
WalletSeed::init_file(&wallet_config, 32, None, password)
|
||||
.expect("Failed to create wallet seed file.");
|
||||
};
|
||||
}
|
||||
|
||||
pub fn seed_exists(wallet_config: WalletConfig) -> bool {
|
||||
let mut data_file_dir = PathBuf::new();
|
||||
data_file_dir.push(wallet_config.data_file_dir);
|
||||
data_file_dir.push(grin_wallet::SEED_FILE);
|
||||
if data_file_dir.exists() {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wallet_command(wallet_args: &ArgMatches<'_>, config: GlobalWalletConfig) -> i32 {
|
||||
// just get defaults from the global config
|
||||
let wallet_config = config.members.unwrap().wallet;
|
||||
|
||||
// web wallet http server must be started from here
|
||||
// NB: Turned off for the time being
|
||||
/*let _ = match wallet_args.subcommand() {
|
||||
("web", Some(_)) => start_webwallet_server(),
|
||||
_ => {}
|
||||
};*/
|
||||
|
||||
let node_client = HTTPNodeClient::new(&wallet_config.check_node_api_http_addr, None);
|
||||
let res = wallet_args::wallet_command(wallet_args, wallet_config, node_client);
|
||||
|
||||
// we need to give log output a chance to catch up before exiting
|
||||
thread::sleep(Duration::from_millis(100));
|
||||
|
||||
if let Err(e) = res {
|
||||
println!("Wallet command failed: {}", e);
|
||||
1
|
||||
} else {
|
||||
println!(
|
||||
"Command '{}' completed successfully",
|
||||
wallet_args.subcommand().0
|
||||
);
|
||||
0
|
||||
}
|
||||
}
|
|
@ -1,630 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::api::TLSConfig;
|
||||
use crate::util::file::get_first_line;
|
||||
use crate::util::{Mutex, ZeroingString};
|
||||
/// Argument parsing and error handling for wallet commands
|
||||
use clap::ArgMatches;
|
||||
use failure::Fail;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_wallet::{command, instantiate_wallet, NodeClient, WalletConfig, WalletInst, WalletSeed};
|
||||
use grin_wallet::{Error, ErrorKind};
|
||||
use linefeed::terminal::Signal;
|
||||
use linefeed::{Interface, ReadResult};
|
||||
use rpassword;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
// define what to do on argument error
|
||||
macro_rules! arg_parse {
|
||||
( $r:expr ) => {
|
||||
match $r {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
return Err(ErrorKind::ArgumentError(format!("{}", e)).into());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
/// Simple error definition, just so we can return errors from all commands
|
||||
/// and let the caller figure out what to do
|
||||
#[derive(Clone, Eq, PartialEq, Debug, Fail)]
|
||||
pub enum ParseError {
|
||||
#[fail(display = "Invalid Arguments: {}", _0)]
|
||||
ArgumentError(String),
|
||||
#[fail(display = "Parsing IO error: {}", _0)]
|
||||
IOError(String),
|
||||
#[fail(display = "User Cancelled")]
|
||||
CancelledError,
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for ParseError {
|
||||
fn from(e: std::io::Error) -> ParseError {
|
||||
ParseError::IOError(format!("{}", e))
|
||||
}
|
||||
}
|
||||
|
||||
fn prompt_password_stdout(prompt: &str) -> ZeroingString {
|
||||
ZeroingString::from(rpassword::prompt_password_stdout(prompt).unwrap())
|
||||
}
|
||||
|
||||
pub fn prompt_password(password: &Option<ZeroingString>) -> ZeroingString {
|
||||
match password {
|
||||
None => prompt_password_stdout("Password: "),
|
||||
Some(p) => p.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn prompt_password_confirm() -> ZeroingString {
|
||||
let mut first = ZeroingString::from("first");
|
||||
let mut second = ZeroingString::from("second");
|
||||
while first != second {
|
||||
first = prompt_password_stdout("Password: ");
|
||||
second = prompt_password_stdout("Confirm Password: ");
|
||||
}
|
||||
first
|
||||
}
|
||||
|
||||
fn prompt_replace_seed() -> Result<bool, ParseError> {
|
||||
let interface = Arc::new(Interface::new("replace_seed")?);
|
||||
interface.set_report_signal(Signal::Interrupt, true);
|
||||
interface.set_prompt("Replace seed? (y/n)> ")?;
|
||||
println!();
|
||||
println!("Existing wallet.seed file already exists. Continue?");
|
||||
println!("Continuing will back up your existing 'wallet.seed' file as 'wallet.seed.bak'");
|
||||
println!();
|
||||
loop {
|
||||
let res = interface.read_line()?;
|
||||
match res {
|
||||
ReadResult::Eof => return Ok(false),
|
||||
ReadResult::Signal(sig) => {
|
||||
if sig == Signal::Interrupt {
|
||||
interface.cancel_read_line()?;
|
||||
return Err(ParseError::CancelledError);
|
||||
}
|
||||
}
|
||||
ReadResult::Input(line) => match line.trim() {
|
||||
"Y" | "y" => return Ok(true),
|
||||
"N" | "n" => return Ok(false),
|
||||
_ => println!("Please respond y or n"),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn prompt_recovery_phrase() -> Result<ZeroingString, ParseError> {
|
||||
let interface = Arc::new(Interface::new("recover")?);
|
||||
let mut phrase = ZeroingString::from("");
|
||||
interface.set_report_signal(Signal::Interrupt, true);
|
||||
interface.set_prompt("phrase> ")?;
|
||||
loop {
|
||||
println!("Please enter your recovery phrase:");
|
||||
let res = interface.read_line()?;
|
||||
match res {
|
||||
ReadResult::Eof => break,
|
||||
ReadResult::Signal(sig) => {
|
||||
if sig == Signal::Interrupt {
|
||||
interface.cancel_read_line()?;
|
||||
return Err(ParseError::CancelledError);
|
||||
}
|
||||
}
|
||||
ReadResult::Input(line) => {
|
||||
if WalletSeed::from_mnemonic(&line).is_ok() {
|
||||
phrase = ZeroingString::from(line);
|
||||
break;
|
||||
} else {
|
||||
println!();
|
||||
println!("Recovery word phrase is invalid.");
|
||||
println!();
|
||||
interface.set_buffer(&line)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(phrase)
|
||||
}
|
||||
|
||||
// instantiate wallet (needed by most functions)
|
||||
|
||||
pub fn inst_wallet(
|
||||
config: WalletConfig,
|
||||
g_args: &command::GlobalArgs,
|
||||
node_client: impl NodeClient + 'static,
|
||||
) -> Result<Arc<Mutex<WalletInst<impl NodeClient + 'static, keychain::ExtKeychain>>>, ParseError> {
|
||||
let passphrase = prompt_password(&g_args.password);
|
||||
let res = instantiate_wallet(config.clone(), node_client, &passphrase, &g_args.account);
|
||||
match res {
|
||||
Ok(p) => Ok(p),
|
||||
Err(e) => {
|
||||
let msg = {
|
||||
match e.kind() {
|
||||
ErrorKind::Encryption => {
|
||||
format!("Error decrypting wallet seed (check provided password)")
|
||||
}
|
||||
_ => format!("Error instantiating wallet: {}", e),
|
||||
}
|
||||
};
|
||||
Err(ParseError::ArgumentError(msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parses a required value, or throws error with message otherwise
|
||||
fn parse_required<'a>(args: &'a ArgMatches, name: &str) -> Result<&'a str, ParseError> {
|
||||
let arg = args.value_of(name);
|
||||
match arg {
|
||||
Some(ar) => Ok(ar),
|
||||
None => {
|
||||
let msg = format!("Value for argument '{}' is required in this context", name,);
|
||||
Err(ParseError::ArgumentError(msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parses a number, or throws error with message otherwise
|
||||
fn parse_u64(arg: &str, name: &str) -> Result<u64, ParseError> {
|
||||
let val = arg.parse::<u64>();
|
||||
match val {
|
||||
Ok(v) => Ok(v),
|
||||
Err(e) => {
|
||||
let msg = format!("Could not parse {} as a whole number. e={}", name, e);
|
||||
Err(ParseError::ArgumentError(msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse_global_args(
|
||||
config: &WalletConfig,
|
||||
args: &ArgMatches,
|
||||
) -> Result<command::GlobalArgs, ParseError> {
|
||||
let account = parse_required(args, "account")?;
|
||||
let mut show_spent = false;
|
||||
if args.is_present("show_spent") {
|
||||
show_spent = true;
|
||||
}
|
||||
let node_api_secret = get_first_line(config.node_api_secret_path.clone());
|
||||
let password = match args.value_of("pass") {
|
||||
None => None,
|
||||
Some(p) => Some(ZeroingString::from(p)),
|
||||
};
|
||||
|
||||
let tls_conf = match config.tls_certificate_file.clone() {
|
||||
None => None,
|
||||
Some(file) => {
|
||||
let key = match config.tls_certificate_key.clone() {
|
||||
Some(k) => k,
|
||||
None => {
|
||||
let msg = format!("Private key for certificate is not set");
|
||||
return Err(ParseError::ArgumentError(msg));
|
||||
}
|
||||
};
|
||||
Some(TLSConfig::new(file, key))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(command::GlobalArgs {
|
||||
account: account.to_owned(),
|
||||
show_spent: show_spent,
|
||||
node_api_secret: node_api_secret,
|
||||
password: password,
|
||||
tls_conf: tls_conf,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_init_args(
|
||||
config: &WalletConfig,
|
||||
g_args: &command::GlobalArgs,
|
||||
args: &ArgMatches,
|
||||
) -> Result<command::InitArgs, ParseError> {
|
||||
if let Err(e) = WalletSeed::seed_file_exists(config) {
|
||||
let msg = format!("Not creating wallet - {}", e.inner);
|
||||
return Err(ParseError::ArgumentError(msg));
|
||||
}
|
||||
let list_length = match args.is_present("short_wordlist") {
|
||||
false => 32,
|
||||
true => 16,
|
||||
};
|
||||
let recovery_phrase = match args.is_present("recover") {
|
||||
true => Some(prompt_recovery_phrase()?),
|
||||
false => None,
|
||||
};
|
||||
|
||||
if recovery_phrase.is_some() {
|
||||
println!("Please provide a new password for the recovered wallet");
|
||||
} else {
|
||||
println!("Please enter a password for your new wallet");
|
||||
}
|
||||
|
||||
let password = match g_args.password.clone() {
|
||||
Some(p) => p,
|
||||
None => prompt_password_confirm(),
|
||||
};
|
||||
|
||||
Ok(command::InitArgs {
|
||||
list_length: list_length,
|
||||
password: password,
|
||||
config: config.clone(),
|
||||
recovery_phrase: recovery_phrase,
|
||||
restore: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_recover_args(
|
||||
config: &WalletConfig,
|
||||
g_args: &command::GlobalArgs,
|
||||
args: &ArgMatches,
|
||||
) -> Result<command::RecoverArgs, ParseError> {
|
||||
let (passphrase, recovery_phrase) = {
|
||||
match args.is_present("display") {
|
||||
true => (prompt_password(&g_args.password), None),
|
||||
false => {
|
||||
let cont = {
|
||||
if command::wallet_seed_exists(config).is_err() {
|
||||
prompt_replace_seed()?
|
||||
} else {
|
||||
true
|
||||
}
|
||||
};
|
||||
if !cont {
|
||||
return Err(ParseError::CancelledError);
|
||||
}
|
||||
let phrase = prompt_recovery_phrase()?;
|
||||
println!("Please provide a new password for the recovered wallet");
|
||||
(prompt_password_confirm(), Some(phrase.to_owned()))
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(command::RecoverArgs {
|
||||
passphrase: passphrase,
|
||||
recovery_phrase: recovery_phrase,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_listen_args(
|
||||
config: &mut WalletConfig,
|
||||
g_args: &mut command::GlobalArgs,
|
||||
args: &ArgMatches,
|
||||
) -> Result<command::ListenArgs, ParseError> {
|
||||
// listen args
|
||||
let pass = match g_args.password.clone() {
|
||||
Some(p) => Some(p.to_owned()),
|
||||
None => Some(prompt_password(&None)),
|
||||
};
|
||||
g_args.password = pass;
|
||||
if let Some(port) = args.value_of("port") {
|
||||
config.api_listen_port = port.parse().unwrap();
|
||||
}
|
||||
let method = parse_required(args, "method")?;
|
||||
Ok(command::ListenArgs {
|
||||
method: method.to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_account_args(account_args: &ArgMatches) -> Result<command::AccountArgs, ParseError> {
|
||||
let create = match account_args.value_of("create") {
|
||||
None => None,
|
||||
Some(s) => Some(s.to_owned()),
|
||||
};
|
||||
Ok(command::AccountArgs { create: create })
|
||||
}
|
||||
|
||||
pub fn parse_send_args(args: &ArgMatches) -> Result<command::SendArgs, ParseError> {
|
||||
// amount
|
||||
let amount = parse_required(args, "amount")?;
|
||||
let amount = core::core::amount_from_hr_string(amount);
|
||||
let amount = match amount {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
let msg = format!(
|
||||
"Could not parse amount as a number with optional decimal point. e={}",
|
||||
e
|
||||
);
|
||||
return Err(ParseError::ArgumentError(msg));
|
||||
}
|
||||
};
|
||||
|
||||
// message
|
||||
let message = match args.is_present("message") {
|
||||
true => Some(args.value_of("message").unwrap().to_owned()),
|
||||
false => None,
|
||||
};
|
||||
|
||||
// minimum_confirmations
|
||||
let min_c = parse_required(args, "minimum_confirmations")?;
|
||||
let min_c = parse_u64(min_c, "minimum_confirmations")?;
|
||||
|
||||
// selection_strategy
|
||||
let selection_strategy = parse_required(args, "selection_strategy")?;
|
||||
|
||||
// estimate_selection_strategies
|
||||
let estimate_selection_strategies = args.is_present("estimate_selection_strategies");
|
||||
|
||||
// method
|
||||
let method = parse_required(args, "method")?;
|
||||
|
||||
// dest
|
||||
let dest = {
|
||||
if method == "self" {
|
||||
match args.value_of("dest") {
|
||||
Some(d) => d,
|
||||
None => "default",
|
||||
}
|
||||
} else {
|
||||
if !estimate_selection_strategies {
|
||||
parse_required(args, "dest")?
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
};
|
||||
if !estimate_selection_strategies
|
||||
&& method == "http"
|
||||
&& !dest.starts_with("http://")
|
||||
&& !dest.starts_with("https://")
|
||||
{
|
||||
let msg = format!(
|
||||
"HTTP Destination should start with http://: or https://: {}",
|
||||
dest,
|
||||
);
|
||||
return Err(ParseError::ArgumentError(msg));
|
||||
}
|
||||
|
||||
// change_outputs
|
||||
let change_outputs = parse_required(args, "change_outputs")?;
|
||||
let change_outputs = parse_u64(change_outputs, "change_outputs")? as usize;
|
||||
|
||||
// fluff
|
||||
let fluff = args.is_present("fluff");
|
||||
|
||||
Ok(command::SendArgs {
|
||||
amount: amount,
|
||||
message: message,
|
||||
minimum_confirmations: min_c,
|
||||
selection_strategy: selection_strategy.to_owned(),
|
||||
estimate_selection_strategies,
|
||||
method: method.to_owned(),
|
||||
dest: dest.to_owned(),
|
||||
change_outputs: change_outputs,
|
||||
fluff: fluff,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_receive_args(receive_args: &ArgMatches) -> Result<command::ReceiveArgs, ParseError> {
|
||||
// message
|
||||
let message = match receive_args.is_present("message") {
|
||||
true => Some(receive_args.value_of("message").unwrap().to_owned()),
|
||||
false => None,
|
||||
};
|
||||
|
||||
// input
|
||||
let tx_file = parse_required(receive_args, "input")?;
|
||||
|
||||
// validate input
|
||||
if !Path::new(&tx_file).is_file() {
|
||||
let msg = format!("File {} not found.", &tx_file);
|
||||
return Err(ParseError::ArgumentError(msg));
|
||||
}
|
||||
|
||||
Ok(command::ReceiveArgs {
|
||||
input: tx_file.to_owned(),
|
||||
message: message,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_finalize_args(args: &ArgMatches) -> Result<command::FinalizeArgs, ParseError> {
|
||||
let fluff = args.is_present("fluff");
|
||||
let tx_file = parse_required(args, "input")?;
|
||||
|
||||
if !Path::new(&tx_file).is_file() {
|
||||
let msg = format!("File {} not found.", tx_file);
|
||||
return Err(ParseError::ArgumentError(msg));
|
||||
}
|
||||
Ok(command::FinalizeArgs {
|
||||
input: tx_file.to_owned(),
|
||||
fluff: fluff,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_info_args(args: &ArgMatches) -> Result<command::InfoArgs, ParseError> {
|
||||
// minimum_confirmations
|
||||
let mc = parse_required(args, "minimum_confirmations")?;
|
||||
let mc = parse_u64(mc, "minimum_confirmations")?;
|
||||
Ok(command::InfoArgs {
|
||||
minimum_confirmations: mc,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_txs_args(args: &ArgMatches) -> Result<command::TxsArgs, ParseError> {
|
||||
let tx_id = match args.value_of("id") {
|
||||
None => None,
|
||||
Some(tx) => Some(parse_u64(tx, "id")? as u32),
|
||||
};
|
||||
Ok(command::TxsArgs { id: tx_id })
|
||||
}
|
||||
|
||||
pub fn parse_repost_args(args: &ArgMatches) -> Result<command::RepostArgs, ParseError> {
|
||||
let tx_id = match args.value_of("id") {
|
||||
None => None,
|
||||
Some(tx) => Some(parse_u64(tx, "id")? as u32),
|
||||
};
|
||||
|
||||
let fluff = args.is_present("fluff");
|
||||
let dump_file = match args.value_of("dumpfile") {
|
||||
None => None,
|
||||
Some(d) => Some(d.to_owned()),
|
||||
};
|
||||
|
||||
Ok(command::RepostArgs {
|
||||
id: tx_id.unwrap(),
|
||||
dump_file: dump_file,
|
||||
fluff: fluff,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_cancel_args(args: &ArgMatches) -> Result<command::CancelArgs, ParseError> {
|
||||
let mut tx_id_string = "";
|
||||
let tx_id = match args.value_of("id") {
|
||||
None => None,
|
||||
Some(tx) => Some(parse_u64(tx, "id")? as u32),
|
||||
};
|
||||
let tx_slate_id = match args.value_of("txid") {
|
||||
None => None,
|
||||
Some(tx) => match tx.parse() {
|
||||
Ok(t) => {
|
||||
tx_id_string = tx;
|
||||
Some(t)
|
||||
}
|
||||
Err(e) => {
|
||||
let msg = format!("Could not parse txid parameter. e={}", e);
|
||||
return Err(ParseError::ArgumentError(msg));
|
||||
}
|
||||
},
|
||||
};
|
||||
if (tx_id.is_none() && tx_slate_id.is_none()) || (tx_id.is_some() && tx_slate_id.is_some()) {
|
||||
let msg = format!("'id' (-i) or 'txid' (-t) argument is required.");
|
||||
return Err(ParseError::ArgumentError(msg));
|
||||
}
|
||||
Ok(command::CancelArgs {
|
||||
tx_id: tx_id,
|
||||
tx_slate_id: tx_slate_id,
|
||||
tx_id_string: tx_id_string.to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn wallet_command(
|
||||
wallet_args: &ArgMatches,
|
||||
mut wallet_config: WalletConfig,
|
||||
mut node_client: impl NodeClient + 'static,
|
||||
) -> Result<String, Error> {
|
||||
if let Some(t) = wallet_config.chain_type.clone() {
|
||||
core::global::set_mining_mode(t);
|
||||
}
|
||||
|
||||
if wallet_args.is_present("external") {
|
||||
wallet_config.api_listen_interface = "0.0.0.0".to_string();
|
||||
}
|
||||
|
||||
if let Some(dir) = wallet_args.value_of("data_dir") {
|
||||
wallet_config.data_file_dir = dir.to_string().clone();
|
||||
}
|
||||
|
||||
if let Some(sa) = wallet_args.value_of("api_server_address") {
|
||||
wallet_config.check_node_api_http_addr = sa.to_string().clone();
|
||||
}
|
||||
|
||||
let global_wallet_args = arg_parse!(parse_global_args(&wallet_config, &wallet_args));
|
||||
|
||||
node_client.set_node_url(&wallet_config.check_node_api_http_addr);
|
||||
node_client.set_node_api_secret(global_wallet_args.node_api_secret.clone());
|
||||
|
||||
// closure to instantiate wallet as needed by each subcommand
|
||||
let inst_wallet = || {
|
||||
let res = inst_wallet(wallet_config.clone(), &global_wallet_args, node_client);
|
||||
res.unwrap_or_else(|e| {
|
||||
println!("{}", e);
|
||||
std::process::exit(1);
|
||||
})
|
||||
};
|
||||
|
||||
let res = match wallet_args.subcommand() {
|
||||
("init", Some(args)) => {
|
||||
let a = arg_parse!(parse_init_args(&wallet_config, &global_wallet_args, &args));
|
||||
command::init(&global_wallet_args, a)
|
||||
}
|
||||
("recover", Some(args)) => {
|
||||
let a = arg_parse!(parse_recover_args(
|
||||
&wallet_config,
|
||||
&global_wallet_args,
|
||||
&args
|
||||
));
|
||||
command::recover(&wallet_config, a)
|
||||
}
|
||||
("listen", Some(args)) => {
|
||||
let mut c = wallet_config.clone();
|
||||
let mut g = global_wallet_args.clone();
|
||||
let a = arg_parse!(parse_listen_args(&mut c, &mut g, &args));
|
||||
command::listen(&wallet_config, &a, &g)
|
||||
}
|
||||
("owner_api", Some(_)) => {
|
||||
let mut g = global_wallet_args.clone();
|
||||
g.tls_conf = None;
|
||||
command::owner_api(inst_wallet(), &wallet_config, &g)
|
||||
}
|
||||
("web", Some(_)) => command::owner_api(inst_wallet(), &wallet_config, &global_wallet_args),
|
||||
("account", Some(args)) => {
|
||||
let a = arg_parse!(parse_account_args(&args));
|
||||
command::account(inst_wallet(), a)
|
||||
}
|
||||
("send", Some(args)) => {
|
||||
let a = arg_parse!(parse_send_args(&args));
|
||||
command::send(
|
||||
inst_wallet(),
|
||||
a,
|
||||
wallet_config.dark_background_color_scheme.unwrap_or(true),
|
||||
)
|
||||
}
|
||||
("receive", Some(args)) => {
|
||||
let a = arg_parse!(parse_receive_args(&args));
|
||||
command::receive(inst_wallet(), &global_wallet_args, a)
|
||||
}
|
||||
("finalize", Some(args)) => {
|
||||
let a = arg_parse!(parse_finalize_args(&args));
|
||||
command::finalize(inst_wallet(), a)
|
||||
}
|
||||
("info", Some(args)) => {
|
||||
let a = arg_parse!(parse_info_args(&args));
|
||||
command::info(
|
||||
inst_wallet(),
|
||||
&global_wallet_args,
|
||||
a,
|
||||
wallet_config.dark_background_color_scheme.unwrap_or(true),
|
||||
)
|
||||
}
|
||||
("outputs", Some(_)) => command::outputs(
|
||||
inst_wallet(),
|
||||
&global_wallet_args,
|
||||
wallet_config.dark_background_color_scheme.unwrap_or(true),
|
||||
),
|
||||
("txs", Some(args)) => {
|
||||
let a = arg_parse!(parse_txs_args(&args));
|
||||
command::txs(
|
||||
inst_wallet(),
|
||||
&global_wallet_args,
|
||||
a,
|
||||
wallet_config.dark_background_color_scheme.unwrap_or(true),
|
||||
)
|
||||
}
|
||||
("repost", Some(args)) => {
|
||||
let a = arg_parse!(parse_repost_args(&args));
|
||||
command::repost(inst_wallet(), a)
|
||||
}
|
||||
("cancel", Some(args)) => {
|
||||
let a = arg_parse!(parse_cancel_args(&args));
|
||||
command::cancel(inst_wallet(), a)
|
||||
}
|
||||
("restore", Some(_)) => command::restore(inst_wallet()),
|
||||
("check", Some(_)) => command::check_repair(inst_wallet()),
|
||||
_ => {
|
||||
let msg = format!("Unknown wallet command, use 'grin help wallet' for details");
|
||||
return Err(ErrorKind::ArgumentError(msg).into());
|
||||
}
|
||||
};
|
||||
if let Err(e) = res {
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(wallet_args.subcommand().0.to_owned())
|
||||
}
|
||||
}
|
|
@ -1,528 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Test wallet command line works as expected
|
||||
#[cfg(test)]
|
||||
mod wallet_tests {
|
||||
use clap;
|
||||
use grin_util as util;
|
||||
use grin_wallet;
|
||||
|
||||
use grin_wallet::test_framework::{self, LocalWalletClient, WalletProxy};
|
||||
|
||||
use clap::{App, ArgMatches};
|
||||
use grin_util::Mutex;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use std::{env, fs};
|
||||
|
||||
use grin_config::GlobalWalletConfig;
|
||||
use grin_core::global;
|
||||
use grin_core::global::ChainTypes;
|
||||
use grin_keychain::ExtKeychain;
|
||||
use grin_wallet::{LMDBBackend, WalletBackend, WalletConfig, WalletInst, WalletSeed};
|
||||
|
||||
use super::super::wallet_args;
|
||||
|
||||
fn clean_output_dir(test_dir: &str) {
|
||||
let _ = fs::remove_dir_all(test_dir);
|
||||
}
|
||||
|
||||
fn setup(test_dir: &str) {
|
||||
util::init_test_logger();
|
||||
clean_output_dir(test_dir);
|
||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
}
|
||||
|
||||
/// Create a wallet config file in the given current directory
|
||||
pub fn config_command_wallet(
|
||||
dir_name: &str,
|
||||
wallet_name: &str,
|
||||
) -> Result<(), grin_wallet::Error> {
|
||||
let mut current_dir;
|
||||
let mut default_config = GlobalWalletConfig::default();
|
||||
current_dir = env::current_dir().unwrap_or_else(|e| {
|
||||
panic!("Error creating config file: {}", e);
|
||||
});
|
||||
current_dir.push(dir_name);
|
||||
current_dir.push(wallet_name);
|
||||
let _ = fs::create_dir_all(current_dir.clone());
|
||||
let mut config_file_name = current_dir.clone();
|
||||
config_file_name.push("grin-wallet.toml");
|
||||
if config_file_name.exists() {
|
||||
return Err(grin_wallet::ErrorKind::ArgumentError(
|
||||
"grin-wallet.toml already exists in the target directory. Please remove it first"
|
||||
.to_owned(),
|
||||
))?;
|
||||
}
|
||||
default_config.update_paths(¤t_dir);
|
||||
default_config
|
||||
.write_to_file(config_file_name.to_str().unwrap())
|
||||
.unwrap_or_else(|e| {
|
||||
panic!("Error creating config file: {}", e);
|
||||
});
|
||||
|
||||
println!(
|
||||
"File {} configured and created",
|
||||
config_file_name.to_str().unwrap(),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles setup and detection of paths for wallet
|
||||
pub fn initial_setup_wallet(dir_name: &str, wallet_name: &str) -> WalletConfig {
|
||||
let mut current_dir;
|
||||
current_dir = env::current_dir().unwrap_or_else(|e| {
|
||||
panic!("Error creating config file: {}", e);
|
||||
});
|
||||
current_dir.push(dir_name);
|
||||
current_dir.push(wallet_name);
|
||||
let _ = fs::create_dir_all(current_dir.clone());
|
||||
let mut config_file_name = current_dir.clone();
|
||||
config_file_name.push("grin-wallet.toml");
|
||||
GlobalWalletConfig::new(config_file_name.to_str().unwrap())
|
||||
.unwrap()
|
||||
.members
|
||||
.unwrap()
|
||||
.wallet
|
||||
}
|
||||
|
||||
fn get_wallet_subcommand<'a>(
|
||||
wallet_dir: &str,
|
||||
wallet_name: &str,
|
||||
args: ArgMatches<'a>,
|
||||
) -> ArgMatches<'a> {
|
||||
match args.subcommand() {
|
||||
("wallet", Some(wallet_args)) => {
|
||||
// wallet init command should spit out its config file then continue
|
||||
// (if desired)
|
||||
if let ("init", Some(init_args)) = wallet_args.subcommand() {
|
||||
if init_args.is_present("here") {
|
||||
let _ = config_command_wallet(wallet_dir, wallet_name);
|
||||
}
|
||||
}
|
||||
wallet_args.to_owned()
|
||||
}
|
||||
_ => ArgMatches::new(),
|
||||
}
|
||||
}
|
||||
//
|
||||
// Helper to create an instance of the LMDB wallet
|
||||
fn instantiate_wallet(
|
||||
mut wallet_config: WalletConfig,
|
||||
node_client: LocalWalletClient,
|
||||
passphrase: &str,
|
||||
account: &str,
|
||||
) -> Result<Arc<Mutex<WalletInst<LocalWalletClient, ExtKeychain>>>, grin_wallet::Error> {
|
||||
wallet_config.chain_type = None;
|
||||
// First test decryption, so we can abort early if we have the wrong password
|
||||
let _ = WalletSeed::from_file(&wallet_config, passphrase)?;
|
||||
let mut db_wallet = LMDBBackend::new(wallet_config.clone(), passphrase, node_client)?;
|
||||
db_wallet.set_parent_key_id_by_name(account)?;
|
||||
info!("Using LMDB Backend for wallet");
|
||||
Ok(Arc::new(Mutex::new(db_wallet)))
|
||||
}
|
||||
|
||||
fn execute_command(
|
||||
app: &App,
|
||||
test_dir: &str,
|
||||
wallet_name: &str,
|
||||
client: &LocalWalletClient,
|
||||
arg_vec: Vec<&str>,
|
||||
) -> Result<String, grin_wallet::Error> {
|
||||
let args = app.clone().get_matches_from(arg_vec);
|
||||
let args = get_wallet_subcommand(test_dir, wallet_name, args.clone());
|
||||
let mut config = initial_setup_wallet(test_dir, wallet_name);
|
||||
//unset chain type so it doesn't get reset
|
||||
config.chain_type = None;
|
||||
wallet_args::wallet_command(&args, config.clone(), client.clone())
|
||||
}
|
||||
|
||||
/// command line tests
|
||||
fn command_line_test_impl(test_dir: &str) -> Result<(), grin_wallet::Error> {
|
||||
setup(test_dir);
|
||||
// Create a new proxy to simulate server and wallet responses
|
||||
let mut wallet_proxy: WalletProxy<LocalWalletClient, ExtKeychain> =
|
||||
WalletProxy::new(test_dir);
|
||||
let chain = wallet_proxy.chain.clone();
|
||||
|
||||
// load app yaml. If it don't exist, just say so and exit
|
||||
let yml = load_yaml!("../grin.yml");
|
||||
let app = App::from_yaml(yml);
|
||||
|
||||
// wallet init
|
||||
let arg_vec = vec!["grin", "wallet", "-p", "password", "init", "-h"];
|
||||
// should create new wallet file
|
||||
let client1 = LocalWalletClient::new("wallet1", wallet_proxy.tx.clone());
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec.clone())?;
|
||||
|
||||
// trying to init twice - should fail
|
||||
assert!(execute_command(&app, test_dir, "wallet1", &client1, arg_vec.clone()).is_err());
|
||||
let client1 = LocalWalletClient::new("wallet1", wallet_proxy.tx.clone());
|
||||
|
||||
// add wallet to proxy
|
||||
//let wallet1 = test_framework::create_wallet(&format!("{}/wallet1", test_dir), client1.clone());
|
||||
let config1 = initial_setup_wallet(test_dir, "wallet1");
|
||||
let wallet1 = instantiate_wallet(config1.clone(), client1.clone(), "password", "default")?;
|
||||
wallet_proxy.add_wallet("wallet1", client1.get_send_instance(), wallet1.clone());
|
||||
|
||||
// Create wallet 2
|
||||
let client2 = LocalWalletClient::new("wallet2", wallet_proxy.tx.clone());
|
||||
execute_command(&app, test_dir, "wallet2", &client2, arg_vec.clone())?;
|
||||
|
||||
let config2 = initial_setup_wallet(test_dir, "wallet2");
|
||||
let wallet2 = instantiate_wallet(config2.clone(), client2.clone(), "password", "default")?;
|
||||
wallet_proxy.add_wallet("wallet2", client2.get_send_instance(), wallet2.clone());
|
||||
|
||||
// Set the wallet proxy listener running
|
||||
thread::spawn(move || {
|
||||
if let Err(e) = wallet_proxy.run() {
|
||||
error!("Wallet Proxy error: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
// Create some accounts in wallet 1
|
||||
let arg_vec = vec![
|
||||
"grin", "wallet", "-p", "password", "account", "-c", "mining",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"account",
|
||||
"-c",
|
||||
"account_1",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
// Create some accounts in wallet 2
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"account",
|
||||
"-c",
|
||||
"account_1",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet2", &client2, arg_vec.clone())?;
|
||||
// already exists
|
||||
assert!(execute_command(&app, test_dir, "wallet2", &client2, arg_vec).is_err());
|
||||
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"account",
|
||||
"-c",
|
||||
"account_2",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet2", &client2, arg_vec)?;
|
||||
|
||||
// let's see those accounts
|
||||
let arg_vec = vec!["grin", "wallet", "-p", "password", "account"];
|
||||
execute_command(&app, test_dir, "wallet2", &client2, arg_vec)?;
|
||||
|
||||
// let's see those accounts
|
||||
let arg_vec = vec!["grin", "wallet", "-p", "password", "account"];
|
||||
execute_command(&app, test_dir, "wallet2", &client2, arg_vec)?;
|
||||
|
||||
// Mine a bit into wallet 1 so we have something to send
|
||||
// (TODO: Be able to stop listeners so we can test this better)
|
||||
let wallet1 = instantiate_wallet(config1.clone(), client1.clone(), "password", "default")?;
|
||||
grin_wallet::controller::owner_single_use(wallet1.clone(), |api| {
|
||||
api.set_active_account("mining")?;
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
let mut bh = 10u64;
|
||||
let _ = test_framework::award_blocks_to_wallet(&chain, wallet1.clone(), bh as usize);
|
||||
|
||||
let very_long_message = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef\
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef\
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef\
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef\
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef\
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef\
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef\
|
||||
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef\
|
||||
This part should all be truncated";
|
||||
|
||||
// Update info and check
|
||||
let arg_vec = vec!["grin", "wallet", "-p", "password", "-a", "mining", "info"];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
// try a file exchange
|
||||
let file_name = format!("{}/tx1.part_tx", test_dir);
|
||||
let response_file_name = format!("{}/tx1.part_tx.response", test_dir);
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"-a",
|
||||
"mining",
|
||||
"send",
|
||||
"-m",
|
||||
"file",
|
||||
"-d",
|
||||
&file_name,
|
||||
"-g",
|
||||
very_long_message,
|
||||
"10",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"-a",
|
||||
"account_1",
|
||||
"receive",
|
||||
"-i",
|
||||
&file_name,
|
||||
"-g",
|
||||
"Thanks, Yeast!",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet2", &client2, arg_vec.clone())?;
|
||||
|
||||
// shouldn't be allowed to receive twice
|
||||
assert!(execute_command(&app, test_dir, "wallet2", &client2, arg_vec).is_err());
|
||||
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"finalize",
|
||||
"-i",
|
||||
&response_file_name,
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
bh += 1;
|
||||
|
||||
let wallet1 = instantiate_wallet(config1.clone(), client1.clone(), "password", "default")?;
|
||||
|
||||
// Check our transaction log, should have 10 entries
|
||||
grin_wallet::controller::owner_single_use(wallet1.clone(), |api| {
|
||||
api.set_active_account("mining")?;
|
||||
let (refreshed, txs) = api.retrieve_txs(true, None, None)?;
|
||||
assert!(refreshed);
|
||||
assert_eq!(txs.len(), bh as usize);
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
let _ = test_framework::award_blocks_to_wallet(&chain, wallet1.clone(), 10);
|
||||
bh += 10;
|
||||
|
||||
// update info for each
|
||||
let arg_vec = vec!["grin", "wallet", "-p", "password", "-a", "mining", "info"];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"-a",
|
||||
"account_1",
|
||||
"info",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet2", &client1, arg_vec)?;
|
||||
|
||||
// check results in wallet 2
|
||||
let wallet2 = instantiate_wallet(config2.clone(), client2.clone(), "password", "default")?;
|
||||
grin_wallet::controller::owner_single_use(wallet2.clone(), |api| {
|
||||
api.set_active_account("account_1")?;
|
||||
let (_, wallet1_info) = api.retrieve_summary_info(true, 1)?;
|
||||
assert_eq!(wallet1_info.last_confirmed_height, bh);
|
||||
assert_eq!(wallet1_info.amount_currently_spendable, 10_000_000_000);
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
// Self-send to same account, using smallest strategy
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"-a",
|
||||
"mining",
|
||||
"send",
|
||||
"-m",
|
||||
"file",
|
||||
"-d",
|
||||
&file_name,
|
||||
"-g",
|
||||
"Love, Yeast, Smallest",
|
||||
"-s",
|
||||
"smallest",
|
||||
"10",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"-a",
|
||||
"mining",
|
||||
"receive",
|
||||
"-i",
|
||||
&file_name,
|
||||
"-g",
|
||||
"Thanks, Yeast!",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec.clone())?;
|
||||
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"finalize",
|
||||
"-i",
|
||||
&response_file_name,
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
bh += 1;
|
||||
|
||||
// Check our transaction log, should have bh entries + one for the self receive
|
||||
let wallet1 = instantiate_wallet(config1.clone(), client1.clone(), "password", "default")?;
|
||||
|
||||
grin_wallet::controller::owner_single_use(wallet1.clone(), |api| {
|
||||
api.set_active_account("mining")?;
|
||||
let (refreshed, txs) = api.retrieve_txs(true, None, None)?;
|
||||
assert!(refreshed);
|
||||
assert_eq!(txs.len(), bh as usize + 1);
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
// Try using the self-send method, splitting up outputs for the fun of it
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"-a",
|
||||
"mining",
|
||||
"send",
|
||||
"-m",
|
||||
"self",
|
||||
"-d",
|
||||
"mining",
|
||||
"-g",
|
||||
"Self love",
|
||||
"-o",
|
||||
"3",
|
||||
"-s",
|
||||
"smallest",
|
||||
"10",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
bh += 1;
|
||||
|
||||
// Check our transaction log, should have bh entries + 2 for the self receives
|
||||
let wallet1 = instantiate_wallet(config1.clone(), client1.clone(), "password", "default")?;
|
||||
|
||||
grin_wallet::controller::owner_single_use(wallet1.clone(), |api| {
|
||||
api.set_active_account("mining")?;
|
||||
let (refreshed, txs) = api.retrieve_txs(true, None, None)?;
|
||||
assert!(refreshed);
|
||||
assert_eq!(txs.len(), bh as usize + 2);
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
// Another file exchange, don't send, but unlock with repair command
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"-a",
|
||||
"mining",
|
||||
"send",
|
||||
"-m",
|
||||
"file",
|
||||
"-d",
|
||||
&file_name,
|
||||
"-g",
|
||||
"Ain't sending",
|
||||
"10",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
let arg_vec = vec!["grin", "wallet", "-p", "password", "check"];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
// Another file exchange, cancel this time
|
||||
let arg_vec = vec![
|
||||
"grin",
|
||||
"wallet",
|
||||
"-p",
|
||||
"password",
|
||||
"-a",
|
||||
"mining",
|
||||
"send",
|
||||
"-m",
|
||||
"file",
|
||||
"-d",
|
||||
&file_name,
|
||||
"-g",
|
||||
"Ain't sending 2",
|
||||
"10",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
let arg_vec = vec![
|
||||
"grin", "wallet", "-p", "password", "-a", "mining", "cancel", "-i", "26",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
// txs and outputs (mostly spit out for a visual in test logs)
|
||||
let arg_vec = vec!["grin", "wallet", "-p", "password", "-a", "mining", "txs"];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
// message output (mostly spit out for a visual in test logs)
|
||||
let arg_vec = vec![
|
||||
"grin", "wallet", "-p", "password", "-a", "mining", "txs", "-i", "10",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
// txs and outputs (mostly spit out for a visual in test logs)
|
||||
let arg_vec = vec![
|
||||
"grin", "wallet", "-p", "password", "-a", "mining", "outputs",
|
||||
];
|
||||
execute_command(&app, test_dir, "wallet1", &client1, arg_vec)?;
|
||||
|
||||
// let logging finish
|
||||
thread::sleep(Duration::from_millis(200));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wallet_command_line() {
|
||||
let test_dir = "target/test_output/command_line";
|
||||
if let Err(e) = command_line_test_impl(test_dir) {
|
||||
panic!("Libwallet Error: {} - {}", e, e.backtrace().unwrap());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,7 +19,7 @@ extern crate clap;
|
|||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
use crate::config::config::{SERVER_CONFIG_FILE_NAME, WALLET_CONFIG_FILE_NAME};
|
||||
use crate::config::config::SERVER_CONFIG_FILE_NAME;
|
||||
use crate::core::global;
|
||||
use crate::util::init_logger;
|
||||
use clap::App;
|
||||
|
@ -29,7 +29,6 @@ use grin_core as core;
|
|||
use grin_p2p as p2p;
|
||||
use grin_servers as servers;
|
||||
use grin_util as util;
|
||||
use std::process::exit;
|
||||
|
||||
mod cmd;
|
||||
pub mod tui;
|
||||
|
@ -72,8 +71,7 @@ fn main() {
|
|||
fn real_main() -> i32 {
|
||||
let yml = load_yaml!("grin.yml");
|
||||
let args = App::from_yaml(yml).get_matches();
|
||||
let mut wallet_config = None;
|
||||
let mut node_config = None;
|
||||
let node_config;
|
||||
|
||||
let chain_type = if args.is_present("floonet") {
|
||||
global::ChainTypes::Floonet
|
||||
|
@ -92,41 +90,11 @@ fn real_main() -> i32 {
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
("wallet", Some(wallet_args)) => {
|
||||
// wallet init command should spit out its config file then continue
|
||||
// (if desired)
|
||||
if let ("init", Some(init_args)) = wallet_args.subcommand() {
|
||||
if init_args.is_present("here") {
|
||||
cmd::config_command_wallet(&chain_type, WALLET_CONFIG_FILE_NAME);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Load relevant config
|
||||
match args.subcommand() {
|
||||
// If it's a wallet command, try and load a wallet config file
|
||||
("wallet", Some(wallet_args)) => {
|
||||
let mut w = config::initial_setup_wallet(&chain_type).unwrap_or_else(|e| {
|
||||
panic!("Error loading wallet configuration: {}", e);
|
||||
});
|
||||
if !cmd::seed_exists(w.members.as_ref().unwrap().wallet.clone()) {
|
||||
if "init" == wallet_args.subcommand().0 || "recover" == wallet_args.subcommand().0 {
|
||||
} else {
|
||||
println!("Wallet seed file doesn't exist. Run `grin wallet init` first");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
let mut l = w.members.as_mut().unwrap().logging.clone().unwrap();
|
||||
l.tui_running = Some(false);
|
||||
init_logger(Some(l));
|
||||
info!(
|
||||
"Using wallet configuration file at {}",
|
||||
w.config_file_path.as_ref().unwrap().to_str().unwrap()
|
||||
);
|
||||
wallet_config = Some(w);
|
||||
}
|
||||
// When the subscommand is 'server' take into account the 'config_file' flag
|
||||
("server", Some(server_args)) => {
|
||||
if let Some(_path) = server_args.value_of("config_file") {
|
||||
|
@ -184,9 +152,6 @@ fn real_main() -> i32 {
|
|||
// client commands and options
|
||||
("client", Some(client_args)) => cmd::client_command(client_args, node_config.unwrap()),
|
||||
|
||||
// client commands and options
|
||||
("wallet", Some(wallet_args)) => cmd::wallet_command(wallet_args, wallet_config.unwrap()),
|
||||
|
||||
// If nothing is specified, try to just use the config file instead
|
||||
// this could possibly become the way to configure most things
|
||||
// with most command line options being phased out
|
||||
|
|
230
src/bin/grin.yml
230
src/bin/grin.yml
|
@ -71,233 +71,3 @@ subcommands:
|
|||
long: peer
|
||||
required: true
|
||||
takes_value: true
|
||||
- wallet:
|
||||
about: Wallet software for Grin
|
||||
args:
|
||||
- pass:
|
||||
help: Wallet passphrase used to encrypt wallet seed
|
||||
short: p
|
||||
long: pass
|
||||
takes_value: true
|
||||
- account:
|
||||
help: Wallet account to use for this operation
|
||||
short: a
|
||||
long: account
|
||||
takes_value: true
|
||||
default_value: default
|
||||
- data_dir:
|
||||
help: Directory in which to store wallet files
|
||||
short: dd
|
||||
long: data_dir
|
||||
takes_value: true
|
||||
- external:
|
||||
help: Listen on 0.0.0.0 interface to allow external connections (default is 127.0.0.1)
|
||||
short: e
|
||||
long: external
|
||||
takes_value: false
|
||||
- show_spent:
|
||||
help: Show spent outputs on wallet output commands
|
||||
short: s
|
||||
long: show_spent
|
||||
takes_value: false
|
||||
- api_server_address:
|
||||
help: Api address of running node on which to check inputs and post transactions
|
||||
short: r
|
||||
long: api_server_address
|
||||
takes_value: true
|
||||
subcommands:
|
||||
- account:
|
||||
about: List wallet accounts or create a new account
|
||||
args:
|
||||
- create:
|
||||
help: Create a new wallet account with provided name
|
||||
short: c
|
||||
long: create
|
||||
takes_value: true
|
||||
- listen:
|
||||
about: Runs the wallet in listening mode waiting for transactions
|
||||
args:
|
||||
- port:
|
||||
help: Port on which to run the wallet listener
|
||||
short: l
|
||||
long: port
|
||||
takes_value: true
|
||||
- method:
|
||||
help: Which method to use for communication
|
||||
short: m
|
||||
long: method
|
||||
possible_values:
|
||||
- http
|
||||
- keybase
|
||||
default_value: http
|
||||
takes_value: true
|
||||
- owner_api:
|
||||
about: Runs the wallet's local web API
|
||||
# Turned off, for now
|
||||
# - web:
|
||||
# about: Runs the local web wallet which can be accessed through a browser
|
||||
- send:
|
||||
about: Builds a transaction to send coins and sends to the specified listener directly
|
||||
args:
|
||||
- amount:
|
||||
help: Number of coins to send with optional fraction, e.g. 12.423
|
||||
index: 1
|
||||
- minimum_confirmations:
|
||||
help: Minimum number of confirmations required for an output to be spendable
|
||||
short: c
|
||||
long: min_conf
|
||||
default_value: "10"
|
||||
takes_value: true
|
||||
- selection_strategy:
|
||||
help: Coin/Output selection strategy.
|
||||
short: s
|
||||
long: selection
|
||||
possible_values:
|
||||
- all
|
||||
- smallest
|
||||
default_value: all
|
||||
takes_value: true
|
||||
- estimate_selection_strategies:
|
||||
help: Estimates all possible Coin/Output selection strategies.
|
||||
short: e
|
||||
long: estimate-selection
|
||||
- change_outputs:
|
||||
help: Number of change outputs to generate (mainly for testing)
|
||||
short: o
|
||||
long: change_outputs
|
||||
default_value: "1"
|
||||
takes_value: true
|
||||
- method:
|
||||
help: Method for sending this transaction
|
||||
short: m
|
||||
long: method
|
||||
possible_values:
|
||||
- http
|
||||
- file
|
||||
- self
|
||||
- keybase
|
||||
default_value: http
|
||||
takes_value: true
|
||||
- dest:
|
||||
help: Send the transaction to the provided server (start with http://) or save as file.
|
||||
short: d
|
||||
long: dest
|
||||
takes_value: true
|
||||
- fluff:
|
||||
help: Fluff the transaction (ignore Dandelion relay protocol)
|
||||
short: f
|
||||
long: fluff
|
||||
- message:
|
||||
help: Optional participant message to include
|
||||
short: g
|
||||
long: message
|
||||
takes_value: true
|
||||
- stored_tx:
|
||||
help: If present, use the previously stored Unconfirmed transaction with given id
|
||||
short: t
|
||||
long: stored_tx
|
||||
takes_value: true
|
||||
- receive:
|
||||
about: Processes a transaction file to accept a transfer from a sender
|
||||
args:
|
||||
- message:
|
||||
help: Optional participant message to include
|
||||
short: g
|
||||
long: message
|
||||
takes_value: true
|
||||
- input:
|
||||
help: Partial transaction to process, expects the sender's transaction file.
|
||||
short: i
|
||||
long: input
|
||||
takes_value: true
|
||||
- finalize:
|
||||
about: Processes a receiver's transaction file to finalize a transfer.
|
||||
args:
|
||||
- input:
|
||||
help: Partial transaction to process, expects the receiver's transaction file.
|
||||
short: i
|
||||
long: input
|
||||
takes_value: true
|
||||
- fluff:
|
||||
help: Fluff the transaction (ignore Dandelion relay protocol)
|
||||
short: f
|
||||
long: fluff
|
||||
- outputs:
|
||||
about: Raw wallet output info (list of outputs)
|
||||
- txs:
|
||||
about: Display transaction information
|
||||
args:
|
||||
- id:
|
||||
help: If specified, display transaction with given Id and all associated Inputs/Outputs
|
||||
short: i
|
||||
long: id
|
||||
takes_value: true
|
||||
- repost:
|
||||
about: Reposts a stored, completed but unconfirmed transaction to the chain, or dumps it to a file
|
||||
args:
|
||||
- id:
|
||||
help: Transaction ID containing the stored completed transaction
|
||||
short: i
|
||||
long: id
|
||||
takes_value: true
|
||||
- dumpfile:
|
||||
help: File name to duMp the transaction to instead of posting
|
||||
short: m
|
||||
long: dumpfile
|
||||
takes_value: true
|
||||
- fluff:
|
||||
help: Fluff the transaction (ignore Dandelion relay protocol)
|
||||
short: f
|
||||
long: fluff
|
||||
- cancel:
|
||||
about: Cancels an previously created transaction, freeing previously locked outputs for use again
|
||||
args:
|
||||
- id:
|
||||
help: The ID of the transaction to cancel
|
||||
short: i
|
||||
long: id
|
||||
takes_value: true
|
||||
- txid:
|
||||
help: The TxID UUID of the transaction to cancel
|
||||
short: t
|
||||
long: txid
|
||||
takes_value: true
|
||||
- info:
|
||||
about: Basic wallet contents summary
|
||||
args:
|
||||
- minimum_confirmations:
|
||||
help: Minimum number of confirmations required for an output to be spendable
|
||||
short: c
|
||||
long: min_conf
|
||||
default_value: "10"
|
||||
takes_value: true
|
||||
- init:
|
||||
about: Initialize a new wallet seed file and database
|
||||
args:
|
||||
- here:
|
||||
help: Create wallet files in the current directory instead of the default ~/.grin directory
|
||||
short: h
|
||||
long: here
|
||||
takes_value: false
|
||||
- short_wordlist:
|
||||
help: Generate a 12-word recovery phrase/seed instead of default 24
|
||||
short: s
|
||||
long: short_wordlist
|
||||
takes_value: false
|
||||
- recover:
|
||||
help: Initialize new wallet using a recovery phrase
|
||||
short: r
|
||||
long: recover
|
||||
takes_value: false
|
||||
- recover:
|
||||
about: Recover a wallet.seed file from a recovery phrase (default) or displays a recovery phrase for an existing seed file
|
||||
args:
|
||||
- display:
|
||||
help: Display wallet recovery phrase
|
||||
short: d
|
||||
long: display
|
||||
takes_value: false
|
||||
- restore:
|
||||
about: Restores a wallet contents from a seed file
|
||||
- check:
|
||||
about: Checks a wallet's outputs against a live node, repairing and restoring missing outputs if required
|
||||
|
|
|
@ -113,7 +113,7 @@ impl UI {
|
|||
.send(ControllerMessage::Shutdown)
|
||||
.unwrap();
|
||||
});
|
||||
grin_ui.cursive.set_fps(4);
|
||||
grin_ui.cursive.set_autorefresh(true);
|
||||
grin_ui
|
||||
}
|
||||
|
||||
|
|
|
@ -16,20 +16,10 @@
|
|||
|
||||
use built;
|
||||
|
||||
use reqwest;
|
||||
|
||||
use flate2::read::GzDecoder;
|
||||
use std::env;
|
||||
use std::fs::{self, File};
|
||||
use std::io::prelude::*;
|
||||
use std::io::Read;
|
||||
use std::path::{self, Path, PathBuf};
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
|
||||
use tar::Archive;
|
||||
|
||||
const _WEB_WALLET_TAG: &str = "0.3.0.1";
|
||||
|
||||
fn main() {
|
||||
// Setting up git hooks in the project: rustfmt and so on.
|
||||
let git_hooks = format!(
|
||||
|
@ -58,93 +48,4 @@ fn main() {
|
|||
env!("CARGO_MANIFEST_DIR"),
|
||||
format!("{}{}", env::var("OUT_DIR").unwrap(), "/built.rs"),
|
||||
);
|
||||
|
||||
// NB: Removed for the time being
|
||||
/*let web_wallet_install = install_web_wallet();
|
||||
match web_wallet_install {
|
||||
Ok(true) => {}
|
||||
_ => println!(
|
||||
"WARNING : Web wallet could not be installed due to {:?}",
|
||||
web_wallet_install
|
||||
),
|
||||
}*/
|
||||
}
|
||||
|
||||
fn _download_and_decompress(target_file: &str) -> Result<bool, Box<std::error::Error>> {
|
||||
let req_path = format!("https://github.com/mimblewimble/grin-web-wallet/releases/download/{}/grin-web-wallet.tar.gz", _WEB_WALLET_TAG);
|
||||
let mut resp = reqwest::get(&req_path)?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// read response
|
||||
let mut out: Vec<u8> = vec![];
|
||||
resp.read_to_end(&mut out)?;
|
||||
|
||||
// Gunzip
|
||||
let mut d = GzDecoder::new(&out[..]);
|
||||
let mut decomp: Vec<u8> = vec![];
|
||||
d.read_to_end(&mut decomp)?;
|
||||
|
||||
// write temp file
|
||||
let mut buffer = File::create(target_file.clone())?;
|
||||
buffer.write_all(&decomp)?;
|
||||
buffer.flush()?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Download and unzip tagged web-wallet build
|
||||
fn _install_web_wallet() -> Result<bool, Box<std::error::Error>> {
|
||||
let target_file = format!(
|
||||
"{}/grin-web-wallet-{}.tar",
|
||||
env::var("OUT_DIR")?,
|
||||
_WEB_WALLET_TAG
|
||||
);
|
||||
let out_dir = env::var("OUT_DIR")?;
|
||||
let mut out_path = PathBuf::from(&out_dir);
|
||||
out_path.pop();
|
||||
out_path.pop();
|
||||
out_path.pop();
|
||||
|
||||
// only re-download if needed
|
||||
println!("{}", target_file);
|
||||
if !Path::new(&target_file).is_file() {
|
||||
let success = _download_and_decompress(&target_file)?;
|
||||
if !success {
|
||||
return Ok(false); // could not download and decompress
|
||||
}
|
||||
}
|
||||
|
||||
// remove old version
|
||||
let mut remove_path = out_path.clone();
|
||||
remove_path.push("grin-wallet");
|
||||
let _ = fs::remove_dir_all(remove_path);
|
||||
|
||||
// Untar
|
||||
let file = File::open(target_file)?;
|
||||
let mut a = Archive::new(file);
|
||||
|
||||
for file in a.entries()? {
|
||||
let mut file = file?;
|
||||
let h = file.header().clone();
|
||||
let path = h.path()?.clone().into_owned();
|
||||
let is_dir = path.to_str().unwrap().ends_with(path::MAIN_SEPARATOR);
|
||||
let path = path.strip_prefix("dist")?;
|
||||
let mut final_path = out_path.clone();
|
||||
final_path.push(path);
|
||||
|
||||
let mut tmp: Vec<u8> = vec![];
|
||||
file.read_to_end(&mut tmp)?;
|
||||
if is_dir {
|
||||
fs::create_dir_all(final_path)?;
|
||||
} else {
|
||||
let mut buffer = File::create(final_path)?;
|
||||
buffer.write_all(&tmp)?;
|
||||
buffer.flush()?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "grin_store"
|
||||
version = "1.0.3"
|
||||
version = "1.1.0-beta.2"
|
||||
authors = ["Grin Developers <mimblewimble@lists.launchpad.net>"]
|
||||
description = "Simple, private and scalable cryptocurrency implementation based on the MimbleWimble chain format."
|
||||
license = "Apache-2.0"
|
||||
|
@ -22,8 +22,8 @@ serde = "1"
|
|||
serde_derive = "1"
|
||||
log = "0.4"
|
||||
|
||||
grin_core = { path = "../core", version = "1.0.3" }
|
||||
grin_util = { path = "../util", version = "1.0.3" }
|
||||
grin_core = { path = "../core", version = "1.1.0-beta.2" }
|
||||
grin_util = { path = "../util", version = "1.1.0-beta.2" }
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4.4"
|
||||
|
|
|
@ -27,11 +27,12 @@ use failure;
|
|||
extern crate failure_derive;
|
||||
#[macro_use]
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_util as util;
|
||||
|
||||
//use grin_core as core;
|
||||
|
||||
pub mod leaf_set;
|
||||
mod lmdb;
|
||||
pub mod lmdb;
|
||||
pub mod pmmr;
|
||||
pub mod prune_list;
|
||||
pub mod types;
|
||||
|
|
|
@ -23,6 +23,14 @@ use lmdb_zero::traits::CreateCursor;
|
|||
use lmdb_zero::LmdbResultExt;
|
||||
|
||||
use crate::core::ser;
|
||||
use crate::util::{RwLock, RwLockReadGuard};
|
||||
|
||||
/// number of bytes to grow the database by when needed
|
||||
pub const ALLOC_CHUNK_SIZE: usize = 134_217_728; //128 MB
|
||||
const RESIZE_PERCENT: f32 = 0.9;
|
||||
/// Want to ensure that each resize gives us at least this %
|
||||
/// of total space free
|
||||
const RESIZE_MIN_TARGET_PERCENT: f32 = 0.65;
|
||||
|
||||
/// Main error type for this lmdb
|
||||
#[derive(Clone, Eq, PartialEq, Debug, Fail)]
|
||||
|
@ -54,77 +62,152 @@ pub fn option_to_not_found<T>(res: Result<Option<T>, Error>, field_name: &str) -
|
|||
}
|
||||
}
|
||||
|
||||
/// Create a new LMDB env under the provided directory.
|
||||
/// By default creates an environment named "lmdb".
|
||||
/// Be aware of transactional semantics in lmdb
|
||||
/// (transactions are per environment, not per database).
|
||||
pub fn new_env(path: String) -> lmdb::Environment {
|
||||
new_named_env(path, "lmdb".into(), None)
|
||||
}
|
||||
|
||||
/// TODO - We probably need more flexibility here, 500GB probably too big for peers...
|
||||
/// Create a new LMDB env under the provided directory with the provided name.
|
||||
pub fn new_named_env(path: String, name: String, max_readers: Option<u32>) -> lmdb::Environment {
|
||||
let full_path = [path, name].join("/");
|
||||
fs::create_dir_all(&full_path)
|
||||
.expect("Unable to create directory 'db_root' to store chain_data");
|
||||
|
||||
let mut env_builder = lmdb::EnvBuilder::new().unwrap();
|
||||
env_builder.set_maxdbs(8).unwrap();
|
||||
// half a TB should give us plenty room, will be an issue on 32 bits
|
||||
// (which we don't support anyway)
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
env_builder.set_mapsize(5_368_709_120).unwrap_or_else(|e| {
|
||||
panic!("Unable to allocate LMDB space: {:?}", e);
|
||||
});
|
||||
//TODO: This is temporary to support (beta) windows support
|
||||
//Windows allocates the entire file at once, so this needs to
|
||||
//be changed to allocate as little as possible and increase as needed
|
||||
#[cfg(target_os = "windows")]
|
||||
env_builder.set_mapsize(524_288_000).unwrap_or_else(|e| {
|
||||
panic!("Unable to allocate LMDB space: {:?}", e);
|
||||
});
|
||||
|
||||
if let Some(max_readers) = max_readers {
|
||||
env_builder
|
||||
.set_maxreaders(max_readers)
|
||||
.expect("Unable set max_readers");
|
||||
}
|
||||
unsafe {
|
||||
env_builder
|
||||
.open(&full_path, lmdb::open::NOTLS, 0o600)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// LMDB-backed store facilitating data access and serialization. All writes
|
||||
/// are done through a Batch abstraction providing atomicity.
|
||||
pub struct Store {
|
||||
env: Arc<lmdb::Environment>,
|
||||
db: Arc<lmdb::Database<'static>>,
|
||||
db: RwLock<Option<Arc<lmdb::Database<'static>>>>,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl Store {
|
||||
/// Creates a new store with the provided name under the specified
|
||||
/// environment
|
||||
pub fn open(env: Arc<lmdb::Environment>, name: &str) -> Store {
|
||||
let db = Arc::new(
|
||||
lmdb::Database::open(
|
||||
env.clone(),
|
||||
Some(name),
|
||||
&lmdb::DatabaseOptions::new(lmdb::db::CREATE),
|
||||
)
|
||||
.unwrap(),
|
||||
/// Create a new LMDB env under the provided directory.
|
||||
/// By default creates an environment named "lmdb".
|
||||
/// Be aware of transactional semantics in lmdb
|
||||
/// (transactions are per environment, not per database).
|
||||
pub fn new(
|
||||
root_path: &str,
|
||||
env_name: Option<&str>,
|
||||
db_name: Option<&str>,
|
||||
max_readers: Option<u32>,
|
||||
) -> Result<Store, Error> {
|
||||
let name = match env_name {
|
||||
Some(n) => n.to_owned(),
|
||||
None => "lmdb".to_owned(),
|
||||
};
|
||||
let db_name = match db_name {
|
||||
Some(n) => n.to_owned(),
|
||||
None => "lmdb".to_owned(),
|
||||
};
|
||||
let full_path = [root_path.to_owned(), name.clone()].join("/");
|
||||
fs::create_dir_all(&full_path)
|
||||
.expect("Unable to create directory 'db_root' to store chain_data");
|
||||
|
||||
let mut env_builder = lmdb::EnvBuilder::new().unwrap();
|
||||
env_builder.set_maxdbs(8)?;
|
||||
|
||||
if let Some(max_readers) = max_readers {
|
||||
env_builder.set_maxreaders(max_readers)?;
|
||||
}
|
||||
|
||||
let env = unsafe { env_builder.open(&full_path, lmdb::open::NOTLS, 0o600)? };
|
||||
|
||||
debug!(
|
||||
"DB Mapsize for {} is {}",
|
||||
full_path,
|
||||
env.info().as_ref().unwrap().mapsize
|
||||
);
|
||||
Store { env, db }
|
||||
let res = Store {
|
||||
env: Arc::new(env),
|
||||
db: RwLock::new(None),
|
||||
name: db_name,
|
||||
};
|
||||
|
||||
{
|
||||
let mut w = res.db.write();
|
||||
*w = Some(Arc::new(lmdb::Database::open(
|
||||
res.env.clone(),
|
||||
Some(&res.name),
|
||||
&lmdb::DatabaseOptions::new(lmdb::db::CREATE),
|
||||
)?));
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Opens the database environment
|
||||
pub fn open(&self) -> Result<(), Error> {
|
||||
let mut w = self.db.write();
|
||||
*w = Some(Arc::new(lmdb::Database::open(
|
||||
self.env.clone(),
|
||||
Some(&self.name),
|
||||
&lmdb::DatabaseOptions::new(lmdb::db::CREATE),
|
||||
)?));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Determines whether the environment needs a resize based on a simple percentage threshold
|
||||
pub fn needs_resize(&self) -> Result<bool, Error> {
|
||||
let env_info = self.env.info()?;
|
||||
let stat = self.env.stat()?;
|
||||
|
||||
let size_used = stat.psize as usize * env_info.last_pgno;
|
||||
trace!("DB map size: {}", env_info.mapsize);
|
||||
trace!("Space used: {}", size_used);
|
||||
trace!("Space remaining: {}", env_info.mapsize - size_used);
|
||||
let resize_percent = RESIZE_PERCENT;
|
||||
trace!(
|
||||
"Percent used: {:.*} Percent threshold: {:.*}",
|
||||
4,
|
||||
size_used as f64 / env_info.mapsize as f64,
|
||||
4,
|
||||
resize_percent
|
||||
);
|
||||
|
||||
if size_used as f32 / env_info.mapsize as f32 > resize_percent
|
||||
|| env_info.mapsize < ALLOC_CHUNK_SIZE
|
||||
{
|
||||
trace!("Resize threshold met (percent-based)");
|
||||
Ok(true)
|
||||
} else {
|
||||
trace!("Resize threshold not met (percent-based)");
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// Increments the database size by as many ALLOC_CHUNK_SIZES
|
||||
/// to give a minimum threshold of free space
|
||||
pub fn do_resize(&self) -> Result<(), Error> {
|
||||
let env_info = self.env.info()?;
|
||||
let stat = self.env.stat()?;
|
||||
let size_used = stat.psize as usize * env_info.last_pgno;
|
||||
|
||||
let new_mapsize = if env_info.mapsize < ALLOC_CHUNK_SIZE {
|
||||
ALLOC_CHUNK_SIZE
|
||||
} else {
|
||||
let mut tot = env_info.mapsize;
|
||||
while size_used as f32 / tot as f32 > RESIZE_MIN_TARGET_PERCENT {
|
||||
tot += ALLOC_CHUNK_SIZE;
|
||||
}
|
||||
tot
|
||||
};
|
||||
|
||||
// close
|
||||
let mut w = self.db.write();
|
||||
*w = None;
|
||||
|
||||
unsafe {
|
||||
self.env.set_mapsize(new_mapsize)?;
|
||||
}
|
||||
|
||||
*w = Some(Arc::new(lmdb::Database::open(
|
||||
self.env.clone(),
|
||||
Some(&self.name),
|
||||
&lmdb::DatabaseOptions::new(lmdb::db::CREATE),
|
||||
)?));
|
||||
|
||||
info!(
|
||||
"Resized database from {} to {}",
|
||||
env_info.mapsize, new_mapsize
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets a value from the db, provided its key
|
||||
pub fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
||||
let db = self.db.read();
|
||||
let txn = lmdb::ReadTransaction::new(self.env.clone())?;
|
||||
let access = txn.access();
|
||||
let res = access.get(&self.db, key);
|
||||
let res = access.get(&db.as_ref().unwrap(), key);
|
||||
res.map(|res: &[u8]| res.to_vec())
|
||||
.to_opt()
|
||||
.map_err(From::from)
|
||||
|
@ -133,17 +216,19 @@ impl Store {
|
|||
/// Gets a `Readable` value from the db, provided its key. Encapsulates
|
||||
/// serialization.
|
||||
pub fn get_ser<T: ser::Readable>(&self, key: &[u8]) -> Result<Option<T>, Error> {
|
||||
let db = self.db.read();
|
||||
let txn = lmdb::ReadTransaction::new(self.env.clone())?;
|
||||
let access = txn.access();
|
||||
self.get_ser_access(key, &access)
|
||||
self.get_ser_access(key, &access, db)
|
||||
}
|
||||
|
||||
fn get_ser_access<T: ser::Readable>(
|
||||
&self,
|
||||
key: &[u8],
|
||||
access: &lmdb::ConstAccessor<'_>,
|
||||
db: RwLockReadGuard<'_, Option<Arc<lmdb::Database<'static>>>>,
|
||||
) -> Result<Option<T>, Error> {
|
||||
let res: lmdb::error::Result<&[u8]> = access.get(&self.db, key);
|
||||
let res: lmdb::error::Result<&[u8]> = access.get(&db.as_ref().unwrap(), key);
|
||||
match res.to_opt() {
|
||||
Ok(Some(mut res)) => match ser::deserialize(&mut res) {
|
||||
Ok(res) => Ok(Some(res)),
|
||||
|
@ -156,17 +241,19 @@ impl Store {
|
|||
|
||||
/// Whether the provided key exists
|
||||
pub fn exists(&self, key: &[u8]) -> Result<bool, Error> {
|
||||
let db = self.db.read();
|
||||
let txn = lmdb::ReadTransaction::new(self.env.clone())?;
|
||||
let access = txn.access();
|
||||
let res: lmdb::error::Result<&lmdb::Ignore> = access.get(&self.db, key);
|
||||
let res: lmdb::error::Result<&lmdb::Ignore> = access.get(&db.as_ref().unwrap(), key);
|
||||
res.to_opt().map(|r| r.is_some()).map_err(From::from)
|
||||
}
|
||||
|
||||
/// Produces an iterator of (key, value) pairs, where values are `Readable` types
|
||||
/// moving forward from the provided key.
|
||||
pub fn iter<T: ser::Readable>(&self, from: &[u8]) -> Result<SerIterator<T>, Error> {
|
||||
let db = self.db.read();
|
||||
let tx = Arc::new(lmdb::ReadTransaction::new(self.env.clone())?);
|
||||
let cursor = Arc::new(tx.cursor(self.db.clone())?);
|
||||
let cursor = Arc::new(tx.cursor(db.as_ref().unwrap().clone()).unwrap());
|
||||
Ok(SerIterator {
|
||||
tx,
|
||||
cursor,
|
||||
|
@ -178,6 +265,10 @@ impl Store {
|
|||
|
||||
/// Builds a new batch to be used with this store.
|
||||
pub fn batch(&self) -> Result<Batch<'_>, Error> {
|
||||
// check if the db needs resizing before returning the batch
|
||||
if self.needs_resize()? {
|
||||
self.do_resize()?;
|
||||
}
|
||||
let txn = lmdb::WriteTransaction::new(self.env.clone())?;
|
||||
Ok(Batch {
|
||||
store: self,
|
||||
|
@ -195,9 +286,10 @@ pub struct Batch<'a> {
|
|||
impl<'a> Batch<'a> {
|
||||
/// Writes a single key/value pair to the db
|
||||
pub fn put(&self, key: &[u8], value: &[u8]) -> Result<(), Error> {
|
||||
let db = self.store.db.read();
|
||||
self.tx
|
||||
.access()
|
||||
.put(&self.store.db, key, value, lmdb::put::Flags::empty())?;
|
||||
.put(&db.as_ref().unwrap(), key, value, lmdb::put::Flags::empty())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -231,12 +323,14 @@ impl<'a> Batch<'a> {
|
|||
/// content of the current batch into account.
|
||||
pub fn get_ser<T: ser::Readable>(&self, key: &[u8]) -> Result<Option<T>, Error> {
|
||||
let access = self.tx.access();
|
||||
self.store.get_ser_access(key, &access)
|
||||
let db = self.store.db.read();
|
||||
self.store.get_ser_access(key, &access, db)
|
||||
}
|
||||
|
||||
/// Deletes a key/value pair from the db
|
||||
pub fn delete(&self, key: &[u8]) -> Result<(), Error> {
|
||||
self.tx.access().del_key(&self.store.db, key)?;
|
||||
let db = self.store.db.read();
|
||||
self.tx.access().del_key(&db.as_ref().unwrap(), key)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue