Fix 658 and compiler complaints (#661)

* Tried but failed to fix `cargo build` complaint about unused #[macro use]. See also 7a803a8dc1
* Compiler complaints be-gone
* Give sumtree tests method-tagged folder names so they don't overwrite each others' files
Fixes #658
This commit is contained in:
Simon B 2018-01-28 07:12:33 +01:00 committed by Ignotus Peverell
parent 783e4c250d
commit 86ff4e5bd0
6 changed files with 40 additions and 30 deletions

View file

@ -174,7 +174,7 @@ impl LocalServerContainer {
wallet_config.check_node_api_http_addr = config.wallet_validating_node_url.clone();
wallet_config.data_file_dir = working_dir.clone();
Ok(
(LocalServerContainer {
LocalServerContainer {
config: config,
p2p_server_stats: None,
api_server: None,
@ -184,7 +184,7 @@ impl LocalServerContainer {
working_dir: working_dir,
peer_list: Vec::new(),
wallet_config:wallet_config,
}),
},
)
}
@ -327,7 +327,7 @@ impl LocalServerContainer {
minimum_confirmations,
dest.to_string(),
max_outputs,
(selection_strategy == "all"),
selection_strategy == "all",
);
match result {
Ok(_) => {
@ -349,7 +349,7 @@ impl LocalServerContainer {
}
};
}
/// Stops the running wallet server
pub fn stop_wallet(&mut self) {
println!("Stop wallet!");
@ -483,7 +483,7 @@ impl LocalServerContainerPool {
// self.server_containers.push(server_arc);
// Create a future that runs the server for however many seconds
// collect them all and run them in the run_all_servers
// collect them all and run them in the run_all_servers
let _run_time = self.config.run_length_in_seconds;
self.server_containers.push(server_container);
@ -517,15 +517,15 @@ impl LocalServerContainerPool {
let handle = thread::spawn(move || {
if is_seeding && !s.config.is_seeding {
// there's a seed and we're not it, so hang around longer and give the seed
// a chance to start
// a chance to start
thread::sleep(time::Duration::from_millis(2000));
}
let server_ref = s.run_server(run_length);
return_container_ref.lock().unwrap().push(server_ref);
});
// Not a big fan of sleeping hack here, but there appears to be a
// concurrency issue when creating files in rocksdb that causes
// failure if we don't pause a bit before starting the next server
// concurrency issue when creating files in rocksdb that causes
// failure if we don't pause a bit before starting the next server
thread::sleep(time::Duration::from_millis(500));
handles.push(handle);
}

View file

@ -31,11 +31,9 @@ mod framework;
use std::{thread, time};
use std::sync::{Arc, Mutex};
use framework::{LocalServerContainer,
LocalServerContainerConfig,
LocalServerContainerPoolConfig};
use framework::{LocalServerContainer,LocalServerContainerConfig};
use util::{init_logger, LOGGER};
use util::LOGGER;
/// Start 1 node mining and two wallets, then send a few
/// transactions from one to the other
@ -110,4 +108,3 @@ fn basic_wallet_transactions() {
LocalServerContainer::send_amount_to(&recp_wallet_config, "25.00", 1, "all", "http://127.0.0.1:10002");
thread::sleep(time::Duration::from_millis(5000));
}

View file

@ -107,7 +107,7 @@ pub fn write_msg<T>(
where
T: Writeable + 'static,
{
let write_msg = ok((conn)).and_then(move |conn| {
let write_msg = ok(conn).and_then(move |conn| {
// prepare the body first so we know its serialized length
let mut body_buf = vec![];
ser::serialize(&mut body_buf, &msg).unwrap();

View file

@ -32,7 +32,8 @@ extern crate grin_keychain as keychain;
extern crate grin_util as util;
extern crate rand;
extern crate serde;
#[macro_use]
#[allow(unused_imports)]
#[macro_use] // Needed for Serialize/Deserialize. The compiler complaining here is a bug.
extern crate serde_derive;
#[macro_use]
extern crate slog;

View file

@ -499,7 +499,7 @@ fn wallet_command(wallet_args: &ArgMatches, global_config: GlobalConfig) {
minimum_confirmations,
dest.to_string(),
max_outputs,
(selection_strategy == "all"),
selection_strategy == "all",
);
match result {
Ok(_) => info!(

View file

@ -25,8 +25,8 @@ use core::core::hash::Hashed;
#[test]
fn sumtree_append() {
let (data_dir, elems) = setup();
let mut backend = store::sumtree::PMMRBackend::new(data_dir).unwrap();
let (data_dir, elems) = setup("append");
let mut backend = store::sumtree::PMMRBackend::new(data_dir.to_string()).unwrap();
// adding first set of 4 elements and sync
let mut mmr_size = load(0, &elems[0..4], &mut backend);
@ -64,14 +64,16 @@ fn sumtree_append() {
let pmmr = PMMR::at(&mut backend, mmr_size);
assert_eq!(pmmr.root(), sum9);
}
teardown(data_dir);
}
#[test]
fn sumtree_prune_compact() {
let (data_dir, elems) = setup();
let (data_dir, elems) = setup("prune_compact");
// setup the mmr store with all elements
let mut backend = store::sumtree::PMMRBackend::new(data_dir).unwrap();
let mut backend = store::sumtree::PMMRBackend::new(data_dir.to_string()).unwrap();
let mmr_size = load(0, &elems[..], &mut backend);
backend.sync().unwrap();
@ -105,17 +107,19 @@ fn sumtree_prune_compact() {
let pmmr = PMMR::at(&mut backend, mmr_size);
assert_eq!(root, pmmr.root());
}
teardown(data_dir);
}
#[test]
fn sumtree_reload() {
let (data_dir, elems) = setup();
let (data_dir, elems) = setup("reload");
// set everything up with a first backend
let mmr_size: u64;
let root: HashSum<TestElem>;
{
let mut backend = store::sumtree::PMMRBackend::new(data_dir.clone()).unwrap();
let mut backend = store::sumtree::PMMRBackend::new(data_dir.to_string()).unwrap();
mmr_size = load(0, &elems[..], &mut backend);
backend.sync().unwrap();
@ -129,7 +133,7 @@ fn sumtree_reload() {
backend.sync().unwrap();
backend.check_compact(1).unwrap();
backend.sync().unwrap();
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
// prune some more to get rm log data
{
@ -137,25 +141,27 @@ fn sumtree_reload() {
pmmr.prune(5, 1).unwrap();
}
backend.sync().unwrap();
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
}
// create a new backend and check everything is kosher
{
let mut backend = store::sumtree::PMMRBackend::new(data_dir).unwrap();
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
let mut backend = store::sumtree::PMMRBackend::new(data_dir.to_string()).unwrap();
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
{
let pmmr = PMMR::at(&mut backend, mmr_size);
assert_eq!(root, pmmr.root());
}
assert_eq!(backend.get(5), None);
}
teardown(data_dir);
}
#[test]
fn sumtree_rewind() {
let (data_dir, elems) = setup();
let mut backend = store::sumtree::PMMRBackend::new(data_dir).unwrap();
let (data_dir, elems) = setup("rewind");
let mut backend = store::sumtree::PMMRBackend::new(data_dir.clone()).unwrap();
// adding elements and keeping the corresponding root
let mut mmr_size = load(0, &elems[0..4], &mut backend);
@ -208,12 +214,14 @@ fn sumtree_rewind() {
let pmmr = PMMR::at(&mut backend, 7);
assert_eq!(pmmr.root(), root1);
}
teardown(data_dir);
}
fn setup() -> (String, Vec<TestElem>) {
fn setup(tag: &str) -> (String, Vec<TestElem>) {
let _ = env_logger::init();
let t = time::get_time();
let data_dir = format!("./target/{}.{}", t.sec, t.nsec);
let data_dir = format!("./target/{}.{}-{}", t.sec, t.nsec, tag);
fs::create_dir_all(data_dir.clone()).unwrap();
let elems = vec![
@ -230,6 +238,10 @@ fn setup() -> (String, Vec<TestElem>) {
(data_dir, elems)
}
fn teardown(data_dir: String) {
fs::remove_dir_all(data_dir).unwrap();
}
fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend<TestElem>) -> u64 {
let mut pmmr = PMMR::at(backend, pos);
for elem in elems {