replace stdlib RwLock and Mutex with parking_lot (#1793)

* replace all stdlib RwLock with parking_lot RwLock

* replace stdlib Mutex with parking_lot Mutex

* rustfmt
This commit is contained in:
Gary Yu 2018-10-20 08:13:07 +08:00 committed by GitHub
parent f2949efbfd
commit 0d06561a91
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
57 changed files with 324 additions and 280 deletions

1
Cargo.lock generated
View file

@ -888,6 +888,7 @@ dependencies = [
"base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", "base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
"secp256k1zkp 0.7.1 (git+https://github.com/mimblewimble/rust-secp256k1-zkp?tag=grin_integration_28)", "secp256k1zkp 0.7.1 (git+https://github.com/mimblewimble/rust-secp256k1-zkp?tag=grin_integration_28)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",

View file

@ -14,7 +14,8 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::{Arc, RwLock, Weak}; use std::sync::{Arc, Weak};
use util::RwLock;
use failure::ResultExt; use failure::ResultExt;
use futures::future::ok; use futures::future::ok;
@ -695,7 +696,7 @@ struct PoolInfoHandler {
impl Handler for PoolInfoHandler { impl Handler for PoolInfoHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture { fn get(&self, _req: Request<Body>) -> ResponseFuture {
let pool_arc = w(&self.tx_pool); let pool_arc = w(&self.tx_pool);
let pool = pool_arc.read().unwrap(); let pool = pool_arc.read();
json_response(&PoolInfo { json_response(&PoolInfo {
pool_size: pool.total_size(), pool_size: pool.total_size(),
@ -753,7 +754,7 @@ impl PoolPushHandler {
); );
// Push to tx pool. // Push to tx pool.
let mut tx_pool = pool_arc.write().unwrap(); let mut tx_pool = pool_arc.write();
let header = tx_pool.blockchain.chain_head().unwrap(); let header = tx_pool.blockchain.chain_head().unwrap();
tx_pool tx_pool
.add_to_pool(source, tx, !fluff, &header) .add_to_pool(source, tx, !fluff, &header)

View file

@ -18,8 +18,9 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::fs::File; use std::fs::File;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use util::RwLock;
use lmdb; use lmdb;
use lru_cache::LruCache; use lru_cache::LruCache;
@ -75,7 +76,7 @@ impl OrphanBlockPool {
} }
fn len(&self) -> usize { fn len(&self) -> usize {
let orphans = self.orphans.read().unwrap(); let orphans = self.orphans.read();
orphans.len() orphans.len()
} }
@ -84,8 +85,8 @@ impl OrphanBlockPool {
} }
fn add(&self, orphan: Orphan) { fn add(&self, orphan: Orphan) {
let mut orphans = self.orphans.write().unwrap(); let mut orphans = self.orphans.write();
let mut height_idx = self.height_idx.write().unwrap(); let mut height_idx = self.height_idx.write();
{ {
let height_hashes = height_idx let height_hashes = height_idx
.entry(orphan.block.header.height) .entry(orphan.block.header.height)
@ -125,15 +126,15 @@ impl OrphanBlockPool {
/// Get an orphan from the pool indexed by the hash of its parent, removing /// Get an orphan from the pool indexed by the hash of its parent, removing
/// it at the same time, preventing clone /// it at the same time, preventing clone
fn remove_by_height(&self, height: &u64) -> Option<Vec<Orphan>> { fn remove_by_height(&self, height: &u64) -> Option<Vec<Orphan>> {
let mut orphans = self.orphans.write().unwrap(); let mut orphans = self.orphans.write();
let mut height_idx = self.height_idx.write().unwrap(); let mut height_idx = self.height_idx.write();
height_idx height_idx
.remove(height) .remove(height)
.map(|hs| hs.iter().filter_map(|h| orphans.remove(h)).collect()) .map(|hs| hs.iter().filter_map(|h| orphans.remove(h)).collect())
} }
pub fn contains(&self, hash: &Hash) -> bool { pub fn contains(&self, hash: &Hash) -> bool {
let orphans = self.orphans.read().unwrap(); let orphans = self.orphans.read();
orphans.contains_key(hash) orphans.contains_key(hash)
} }
} }
@ -221,7 +222,7 @@ impl Chain {
fn process_block_single(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> { fn process_block_single(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
let maybe_new_head: Result<Option<Tip>, Error>; let maybe_new_head: Result<Option<Tip>, Error>;
{ {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?; let batch = self.store.batch()?;
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?; let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
@ -235,7 +236,7 @@ impl Chain {
let add_to_hash_cache = |hash: Hash| { let add_to_hash_cache = |hash: Hash| {
// only add to hash cache below if block is definitively accepted // only add to hash cache below if block is definitively accepted
// or rejected // or rejected
let mut cache = self.block_hashes_cache.write().unwrap(); let mut cache = self.block_hashes_cache.write();
cache.insert(hash, true); cache.insert(hash, true);
}; };
@ -299,7 +300,7 @@ impl Chain {
/// Process a block header received during "header first" propagation. /// Process a block header received during "header first" propagation.
pub fn process_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<(), Error> { pub fn process_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<(), Error> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?; let batch = self.store.batch()?;
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?; let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
pipe::process_block_header(bh, &mut ctx)?; pipe::process_block_header(bh, &mut ctx)?;
@ -315,7 +316,7 @@ impl Chain {
headers: &Vec<BlockHeader>, headers: &Vec<BlockHeader>,
opts: Options, opts: Options,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?; let batch = self.store.batch()?;
let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?; let mut ctx = self.new_ctx(opts, batch, &mut txhashset)?;
@ -417,7 +418,7 @@ impl Chain {
/// current chain state, specifically the current winning (valid, most /// current chain state, specifically the current winning (valid, most
/// work) fork. /// work) fork.
pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, Error> { pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, Error> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
let res = txhashset.is_unspent(output_ref); let res = txhashset.is_unspent(output_ref);
match res { match res {
Err(e) => Err(e), Err(e) => Err(e),
@ -427,7 +428,7 @@ impl Chain {
/// Validate the tx against the current UTXO set. /// Validate the tx against the current UTXO set.
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> { pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
let txhashset = self.txhashset.read().unwrap(); let txhashset = self.txhashset.read();
txhashset::utxo_view(&txhashset, |utxo| { txhashset::utxo_view(&txhashset, |utxo| {
utxo.validate_tx(tx)?; utxo.validate_tx(tx)?;
Ok(()) Ok(())
@ -443,7 +444,7 @@ impl Chain {
/// that has not yet sufficiently matured. /// that has not yet sufficiently matured.
pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> { pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> {
let height = self.next_block_height()?; let height = self.next_block_height()?;
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
txhashset::extending_readonly(&mut txhashset, |extension| { txhashset::extending_readonly(&mut txhashset, |extension| {
extension.verify_coinbase_maturity(&tx.inputs(), height)?; extension.verify_coinbase_maturity(&tx.inputs(), height)?;
Ok(()) Ok(())
@ -470,7 +471,7 @@ impl Chain {
return Ok(()); return Ok(());
} }
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
// Now create an extension from the txhashset and validate against the // Now create an extension from the txhashset and validate against the
// latest block header. Rewind the extension to the specified header to // latest block header. Rewind the extension to the specified header to
@ -485,7 +486,7 @@ impl Chain {
/// Sets the txhashset roots on a brand new block by applying the block on /// Sets the txhashset roots on a brand new block by applying the block on
/// the current txhashset state. /// the current txhashset state.
pub fn set_txhashset_roots(&self, b: &mut Block, is_fork: bool) -> Result<(), Error> { pub fn set_txhashset_roots(&self, b: &mut Block, is_fork: bool) -> Result<(), Error> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
let (prev_root, roots, sizes) = let (prev_root, roots, sizes) =
txhashset::extending_readonly(&mut txhashset, |extension| { txhashset::extending_readonly(&mut txhashset, |extension| {
if is_fork { if is_fork {
@ -526,7 +527,7 @@ impl Chain {
output: &OutputIdentifier, output: &OutputIdentifier,
block_header: &BlockHeader, block_header: &BlockHeader,
) -> Result<MerkleProof, Error> { ) -> Result<MerkleProof, Error> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
let merkle_proof = txhashset::extending_readonly(&mut txhashset, |extension| { let merkle_proof = txhashset::extending_readonly(&mut txhashset, |extension| {
extension.rewind(&block_header)?; extension.rewind(&block_header)?;
@ -539,13 +540,13 @@ impl Chain {
/// Return a merkle proof valid for the current output pmmr state at the /// Return a merkle proof valid for the current output pmmr state at the
/// given pos /// given pos
pub fn get_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, String> { pub fn get_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, String> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
txhashset.merkle_proof(commit) txhashset.merkle_proof(commit)
} }
/// Returns current txhashset roots /// Returns current txhashset roots
pub fn get_txhashset_roots(&self) -> TxHashSetRoots { pub fn get_txhashset_roots(&self) -> TxHashSetRoots {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
txhashset.roots() txhashset.roots()
} }
@ -560,7 +561,7 @@ impl Chain {
// to rewind after receiving the txhashset zip. // to rewind after receiving the txhashset zip.
let header = self.get_block_header(&h)?; let header = self.get_block_header(&h)?;
{ {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
txhashset::extending_readonly(&mut txhashset, |extension| { txhashset::extending_readonly(&mut txhashset, |extension| {
extension.rewind(&header)?; extension.rewind(&header)?;
extension.snapshot()?; extension.snapshot()?;
@ -617,7 +618,7 @@ impl Chain {
/// have an MMR we can safely rewind based on the headers received from a peer. /// have an MMR we can safely rewind based on the headers received from a peer.
/// TODO - think about how to optimize this. /// TODO - think about how to optimize this.
pub fn rebuild_sync_mmr(&self, head: &Tip) -> Result<(), Error> { pub fn rebuild_sync_mmr(&self, head: &Tip) -> Result<(), Error> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
let mut batch = self.store.batch()?; let mut batch = self.store.batch()?;
txhashset::sync_extending(&mut txhashset, &mut batch, |extension| { txhashset::sync_extending(&mut txhashset, &mut batch, |extension| {
extension.rebuild(head, &self.genesis)?; extension.rebuild(head, &self.genesis)?;
@ -733,7 +734,7 @@ impl Chain {
// Replace the chain txhashset with the newly built one. // Replace the chain txhashset with the newly built one.
{ {
let mut txhashset_ref = self.txhashset.write().unwrap(); let mut txhashset_ref = self.txhashset.write();
*txhashset_ref = txhashset; *txhashset_ref = txhashset;
} }
@ -772,7 +773,7 @@ impl Chain {
debug!(LOGGER, "Starting blockchain compaction."); debug!(LOGGER, "Starting blockchain compaction.");
// Compact the txhashset via the extension. // Compact the txhashset via the extension.
{ {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
txhashset.compact()?; txhashset.compact()?;
// print out useful debug info after compaction // print out useful debug info after compaction
@ -836,19 +837,19 @@ impl Chain {
/// returns the last n nodes inserted into the output sum tree /// returns the last n nodes inserted into the output sum tree
pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> { pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
txhashset.last_n_output(distance) txhashset.last_n_output(distance)
} }
/// as above, for rangeproofs /// as above, for rangeproofs
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> { pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
txhashset.last_n_rangeproof(distance) txhashset.last_n_rangeproof(distance)
} }
/// as above, for kernels /// as above, for kernels
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> { pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
txhashset.last_n_kernel(distance) txhashset.last_n_kernel(distance)
} }
@ -858,7 +859,7 @@ impl Chain {
start_index: u64, start_index: u64,
max: u64, max: u64,
) -> Result<(u64, u64, Vec<Output>), Error> { ) -> Result<(u64, u64, Vec<Output>), Error> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
let max_index = txhashset.highest_output_insertion_index(); let max_index = txhashset.highest_output_insertion_index();
let outputs = txhashset.outputs_by_insertion_index(start_index, max); let outputs = txhashset.outputs_by_insertion_index(start_index, max);
let rangeproofs = txhashset.rangeproofs_by_insertion_index(start_index, max); let rangeproofs = txhashset.rangeproofs_by_insertion_index(start_index, max);
@ -945,7 +946,7 @@ impl Chain {
&self, &self,
output_ref: &OutputIdentifier, output_ref: &OutputIdentifier,
) -> Result<BlockHeader, Error> { ) -> Result<BlockHeader, Error> {
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write();
let (_, pos) = txhashset.is_unspent(output_ref)?; let (_, pos) = txhashset.is_unspent(output_ref)?;
let mut min = 1; let mut min = 1;
let mut max = { let mut max = {

View file

@ -14,7 +14,8 @@
//! Implementation of the chain block acceptance (or refusal) pipeline. //! Implementation of the chain block acceptance (or refusal) pipeline.
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use chrono::prelude::Utc; use chrono::prelude::Utc;
use chrono::Duration; use chrono::Duration;
@ -288,7 +289,7 @@ fn check_known_head(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(),
/// Keeps duplicates from the network in check. /// Keeps duplicates from the network in check.
/// Checks against the cache of recently processed block hashes. /// Checks against the cache of recently processed block hashes.
fn check_known_cache(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> { fn check_known_cache(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
let mut cache = ctx.block_hashes_cache.write().unwrap(); let mut cache = ctx.block_hashes_cache.write();
if cache.contains_key(&header.hash()) { if cache.contains_key(&header.hash()) {
return Err(ErrorKind::Unfit("already known in cache".to_string()).into()); return Err(ErrorKind::Unfit("already known in cache".to_string()).into());
} }

View file

@ -14,7 +14,8 @@
//! Implements storage primitives required by the chain //! Implements storage primitives required by the chain
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use croaring::Bitmap; use croaring::Bitmap;
use lmdb; use lmdb;
@ -96,7 +97,7 @@ impl ChainStore {
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> { pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
{ {
let mut block_sums_cache = self.block_sums_cache.write().unwrap(); let mut block_sums_cache = self.block_sums_cache.write();
// cache hit - return the value from the cache // cache hit - return the value from the cache
if let Some(block_sums) = block_sums_cache.get_mut(h) { if let Some(block_sums) = block_sums_cache.get_mut(h) {
@ -112,7 +113,7 @@ impl ChainStore {
// cache miss - so adding to the cache for next time // cache miss - so adding to the cache for next time
if let Ok(block_sums) = block_sums { if let Ok(block_sums) = block_sums {
{ {
let mut block_sums_cache = self.block_sums_cache.write().unwrap(); let mut block_sums_cache = self.block_sums_cache.write();
block_sums_cache.insert(*h, block_sums.clone()); block_sums_cache.insert(*h, block_sums.clone());
} }
Ok(block_sums) Ok(block_sums)
@ -123,7 +124,7 @@ impl ChainStore {
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> { pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
{ {
let mut header_cache = self.header_cache.write().unwrap(); let mut header_cache = self.header_cache.write();
// cache hit - return the value from the cache // cache hit - return the value from the cache
if let Some(header) = header_cache.get_mut(h) { if let Some(header) = header_cache.get_mut(h) {
@ -140,7 +141,7 @@ impl ChainStore {
// cache miss - so adding to the cache for next time // cache miss - so adding to the cache for next time
if let Ok(header) = header { if let Ok(header) = header {
{ {
let mut header_cache = self.header_cache.write().unwrap(); let mut header_cache = self.header_cache.write();
header_cache.insert(*h, header.clone()); header_cache.insert(*h, header.clone());
} }
Ok(header) Ok(header)
@ -310,7 +311,7 @@ impl<'a> Batch<'a> {
let hash = header.hash(); let hash = header.hash();
{ {
let mut header_cache = self.header_cache.write().unwrap(); let mut header_cache = self.header_cache.write();
header_cache.insert(hash, header.clone()); header_cache.insert(hash, header.clone());
} }
@ -350,7 +351,7 @@ impl<'a> Batch<'a> {
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> { pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
{ {
let mut header_cache = self.header_cache.write().unwrap(); let mut header_cache = self.header_cache.write();
// cache hit - return the value from the cache // cache hit - return the value from the cache
if let Some(header) = header_cache.get_mut(h) { if let Some(header) = header_cache.get_mut(h) {
@ -367,7 +368,7 @@ impl<'a> Batch<'a> {
// cache miss - so adding to the cache for next time // cache miss - so adding to the cache for next time
if let Ok(header) = header { if let Ok(header) = header {
{ {
let mut header_cache = self.header_cache.write().unwrap(); let mut header_cache = self.header_cache.write();
header_cache.insert(*h, header.clone()); header_cache.insert(*h, header.clone());
} }
Ok(header) Ok(header)
@ -390,7 +391,7 @@ impl<'a> Batch<'a> {
pub fn save_block_sums(&self, h: &Hash, sums: &BlockSums) -> Result<(), Error> { pub fn save_block_sums(&self, h: &Hash, sums: &BlockSums) -> Result<(), Error> {
{ {
let mut block_sums_cache = self.block_sums_cache.write().unwrap(); let mut block_sums_cache = self.block_sums_cache.write();
block_sums_cache.insert(*h, sums.clone()); block_sums_cache.insert(*h, sums.clone());
} }
@ -400,7 +401,7 @@ impl<'a> Batch<'a> {
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> { pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
{ {
let mut block_sums_cache = self.block_sums_cache.write().unwrap(); let mut block_sums_cache = self.block_sums_cache.write();
// cache hit - return the value from the cache // cache hit - return the value from the cache
if let Some(block_sums) = block_sums_cache.get_mut(h) { if let Some(block_sums) = block_sums_cache.get_mut(h) {
@ -416,7 +417,7 @@ impl<'a> Batch<'a> {
// cache miss - so adding to the cache for next time // cache miss - so adding to the cache for next time
if let Ok(block_sums) = block_sums { if let Ok(block_sums) = block_sums {
{ {
let mut block_sums_cache = self.block_sums_cache.write().unwrap(); let mut block_sums_cache = self.block_sums_cache.write();
block_sums_cache.insert(*h, block_sums.clone()); block_sums_cache.insert(*h, block_sums.clone());
} }
Ok(block_sums) Ok(block_sums)
@ -511,7 +512,7 @@ impl<'a> Batch<'a> {
self.save_block_input_bitmap(&block.hash(), &bitmap)?; self.save_block_input_bitmap(&block.hash(), &bitmap)?;
// Finally cache it locally for use later. // Finally cache it locally for use later.
let mut cache = self.block_input_bitmap_cache.write().unwrap(); let mut cache = self.block_input_bitmap_cache.write();
cache.insert(block.hash(), bitmap.serialize()); cache.insert(block.hash(), bitmap.serialize());
Ok(bitmap) Ok(bitmap)
@ -519,7 +520,7 @@ impl<'a> Batch<'a> {
pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> { pub fn get_block_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> {
{ {
let mut cache = self.block_input_bitmap_cache.write().unwrap(); let mut cache = self.block_input_bitmap_cache.write();
// cache hit - return the value from the cache // cache hit - return the value from the cache
if let Some(bytes) = cache.get_mut(bh) { if let Some(bytes) = cache.get_mut(bh) {

View file

@ -24,7 +24,8 @@ extern crate rand;
use chrono::Duration; use chrono::Duration;
use std::fs; use std::fs;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use chain::types::NoopAdapter; use chain::types::NoopAdapter;
use chain::Chain; use chain::Chain;

View file

@ -23,7 +23,8 @@ extern crate rand;
use chrono::Duration; use chrono::Duration;
use std::fs; use std::fs;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use chain::types::NoopAdapter; use chain::types::NoopAdapter;
use chain::Chain; use chain::Chain;

View file

@ -18,12 +18,14 @@ extern crate grin_chain as chain;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_keychain as keychain; extern crate grin_keychain as keychain;
extern crate grin_store as store; extern crate grin_store as store;
extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
extern crate rand; extern crate rand;
use chrono::Duration; use chrono::Duration;
use std::fs; use std::fs;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use chain::types::NoopAdapter; use chain::types::NoopAdapter;
use chain::ErrorKind; use chain::ErrorKind;

View file

@ -20,7 +20,8 @@ use std::collections::HashSet;
use std::fmt; use std::fmt;
use std::iter::FromIterator; use std::iter::FromIterator;
use std::mem; use std::mem;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use consensus::{self, reward, REWARD}; use consensus::{self, reward, REWARD};
use core::committed::{self, Committed}; use core::committed::{self, Committed};
@ -648,7 +649,7 @@ impl Block {
{ {
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
let over_commit = secp.commit_value(reward(self.total_fees()))?; let over_commit = secp.commit_value(reward(self.total_fees()))?;
let out_adjust_sum = secp.commit_sum( let out_adjust_sum = secp.commit_sum(

View file

@ -66,7 +66,7 @@ pub trait Committed {
// commit to zero built from the offset // commit to zero built from the offset
let kernel_sum_plus_offset = { let kernel_sum_plus_offset = {
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
let mut commits = vec![kernel_sum]; let mut commits = vec![kernel_sum];
if *offset != BlindingFactor::zero() { if *offset != BlindingFactor::zero() {
let key = offset.secret_key(&secp)?; let key = offset.secret_key(&secp)?;
@ -90,7 +90,7 @@ pub trait Committed {
if overage != 0 { if overage != 0 {
let over_commit = { let over_commit = {
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
let overage_abs = overage.checked_abs().ok_or_else(|| Error::InvalidValue)? as u64; let overage_abs = overage.checked_abs().ok_or_else(|| Error::InvalidValue)? as u64;
secp.commit_value(overage_abs).unwrap() secp.commit_value(overage_abs).unwrap()
}; };
@ -144,7 +144,7 @@ pub fn sum_commits(
positive.retain(|x| *x != zero_commit); positive.retain(|x| *x != zero_commit);
negative.retain(|x| *x != zero_commit); negative.retain(|x| *x != zero_commit);
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
Ok(secp.commit_sum(positive, negative)?) Ok(secp.commit_sum(positive, negative)?)
} }
@ -156,7 +156,7 @@ pub fn sum_kernel_offsets(
negative: Vec<BlindingFactor>, negative: Vec<BlindingFactor>,
) -> Result<BlindingFactor, Error> { ) -> Result<BlindingFactor, Error> {
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
let positive = to_secrets(positive, &secp); let positive = to_secrets(positive, &secp);
let negative = to_secrets(negative, &secp); let negative = to_secrets(negative, &secp);

View file

@ -17,8 +17,9 @@
use std::cmp::max; use std::cmp::max;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::HashSet; use std::collections::HashSet;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use std::{error, fmt}; use std::{error, fmt};
use util::RwLock;
use consensus::{self, VerifySortOrder}; use consensus::{self, VerifySortOrder};
use core::hash::Hashed; use core::hash::Hashed;
@ -197,7 +198,7 @@ impl TxKernel {
pub fn verify(&self) -> Result<(), secp::Error> { pub fn verify(&self) -> Result<(), secp::Error> {
let msg = Message::from_slice(&kernel_sig_msg(self.fee, self.lock_height))?; let msg = Message::from_slice(&kernel_sig_msg(self.fee, self.lock_height))?;
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
let sig = &self.excess_sig; let sig = &self.excess_sig;
// Verify aggsig directly in libsecp // Verify aggsig directly in libsecp
let pubkey = &self.excess.to_pubkey(&secp)?; let pubkey = &self.excess.to_pubkey(&secp)?;
@ -553,7 +554,7 @@ impl TransactionBody {
// Find all the outputs that have not had their rangeproofs verified. // Find all the outputs that have not had their rangeproofs verified.
let outputs = { let outputs = {
let mut verifier = verifier.write().unwrap(); let mut verifier = verifier.write();
verifier.filter_rangeproof_unverified(&self.outputs) verifier.filter_rangeproof_unverified(&self.outputs)
}; };
@ -570,7 +571,7 @@ impl TransactionBody {
// Find all the kernels that have not yet been verified. // Find all the kernels that have not yet been verified.
let kernels = { let kernels = {
let mut verifier = verifier.write().unwrap(); let mut verifier = verifier.write();
verifier.filter_kernel_sig_unverified(&self.kernels) verifier.filter_kernel_sig_unverified(&self.kernels)
}; };
@ -583,7 +584,7 @@ impl TransactionBody {
// Cache the successful verification results for the new outputs and kernels. // Cache the successful verification results for the new outputs and kernels.
{ {
let mut verifier = verifier.write().unwrap(); let mut verifier = verifier.write();
verifier.add_rangeproof_verified(outputs); verifier.add_rangeproof_verified(outputs);
verifier.add_kernel_sig_verified(kernels); verifier.add_kernel_sig_verified(kernels);
} }
@ -911,7 +912,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
// now compute the total kernel offset // now compute the total kernel offset
let total_kernel_offset = { let total_kernel_offset = {
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
let mut positive_key = vec![mk_tx.offset] let mut positive_key = vec![mk_tx.offset]
.into_iter() .into_iter()
.filter(|x| *x != BlindingFactor::zero()) .filter(|x| *x != BlindingFactor::zero())
@ -1092,7 +1093,7 @@ impl Output {
/// Validates the range proof using the commitment /// Validates the range proof using the commitment
pub fn verify_proof(&self) -> Result<(), secp::Error> { pub fn verify_proof(&self) -> Result<(), secp::Error> {
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
match secp.verify_bullet_proof(self.commit, self.proof, None) { match secp.verify_bullet_proof(self.commit, self.proof, None) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(e) => Err(e), Err(e) => Err(e),
@ -1105,7 +1106,7 @@ impl Output {
proofs: &Vec<RangeProof>, proofs: &Vec<RangeProof>,
) -> Result<(), secp::Error> { ) -> Result<(), secp::Error> {
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
match secp.verify_bullet_proof_multi(commits.clone(), proofs.clone(), None) { match secp.verify_bullet_proof_multi(commits.clone(), proofs.clone(), None) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(e) => Err(e), Err(e) => Err(e),

View file

@ -26,7 +26,7 @@ use pow::{self, CuckatooContext, EdgeType, PoWContext};
/// code wherever mining is needed. This should allow for /// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes, /// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values /// e.g. CI, User testing, production values
use std::sync::RwLock; use util::RwLock;
/// Define these here, as they should be developer-set, not really tweakable /// Define these here, as they should be developer-set, not really tweakable
/// by users /// by users
@ -125,7 +125,7 @@ lazy_static!{
/// Set the mining mode /// Set the mining mode
pub fn set_mining_mode(mode: ChainTypes) { pub fn set_mining_mode(mode: ChainTypes) {
let mut param_ref = CHAIN_TYPE.write().unwrap(); let mut param_ref = CHAIN_TYPE.write();
*param_ref = mode; *param_ref = mode;
} }
@ -149,7 +149,7 @@ pub fn pow_type() -> PoWContextTypes {
/// The minimum acceptable edge_bits /// The minimum acceptable edge_bits
pub fn min_edge_bits() -> u8 { pub fn min_edge_bits() -> u8 {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
match *param_ref { match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS, ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS, ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
@ -162,7 +162,7 @@ pub fn min_edge_bits() -> u8 {
/// while the min_edge_bits can be changed on a soft fork, changing /// while the min_edge_bits can be changed on a soft fork, changing
/// base_edge_bits is a hard fork. /// base_edge_bits is a hard fork.
pub fn base_edge_bits() -> u8 { pub fn base_edge_bits() -> u8 {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
match *param_ref { match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS, ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS, ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
@ -173,7 +173,7 @@ pub fn base_edge_bits() -> u8 {
/// The proofsize /// The proofsize
pub fn proofsize() -> usize { pub fn proofsize() -> usize {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
match *param_ref { match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE, ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE, ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
@ -183,7 +183,7 @@ pub fn proofsize() -> usize {
/// Coinbase maturity for coinbases to be spent /// Coinbase maturity for coinbases to be spent
pub fn coinbase_maturity() -> u64 { pub fn coinbase_maturity() -> u64 {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
match *param_ref { match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY, ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY, ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
@ -193,7 +193,7 @@ pub fn coinbase_maturity() -> u64 {
/// Initial mining difficulty /// Initial mining difficulty
pub fn initial_block_difficulty() -> u64 { pub fn initial_block_difficulty() -> u64 {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
match *param_ref { match *param_ref {
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY, ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY, ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
@ -206,7 +206,7 @@ pub fn initial_block_difficulty() -> u64 {
} }
/// Initial mining secondary scale /// Initial mining secondary scale
pub fn initial_graph_weight() -> u32 { pub fn initial_graph_weight() -> u32 {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
match *param_ref { match *param_ref {
ChainTypes::AutomatedTesting => TESTING_INITIAL_GRAPH_WEIGHT, ChainTypes::AutomatedTesting => TESTING_INITIAL_GRAPH_WEIGHT,
ChainTypes::UserTesting => TESTING_INITIAL_GRAPH_WEIGHT, ChainTypes::UserTesting => TESTING_INITIAL_GRAPH_WEIGHT,
@ -220,7 +220,7 @@ pub fn initial_graph_weight() -> u32 {
/// Horizon at which we can cut-through and do full local pruning /// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 { pub fn cut_through_horizon() -> u32 {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
match *param_ref { match *param_ref {
ChainTypes::AutomatedTesting => TESTING_CUT_THROUGH_HORIZON, ChainTypes::AutomatedTesting => TESTING_CUT_THROUGH_HORIZON,
ChainTypes::UserTesting => TESTING_CUT_THROUGH_HORIZON, ChainTypes::UserTesting => TESTING_CUT_THROUGH_HORIZON,
@ -230,19 +230,19 @@ pub fn cut_through_horizon() -> u32 {
/// Are we in automated testing mode? /// Are we in automated testing mode?
pub fn is_automated_testing_mode() -> bool { pub fn is_automated_testing_mode() -> bool {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
ChainTypes::AutomatedTesting == *param_ref ChainTypes::AutomatedTesting == *param_ref
} }
/// Are we in user testing mode? /// Are we in user testing mode?
pub fn is_user_testing_mode() -> bool { pub fn is_user_testing_mode() -> bool {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
ChainTypes::UserTesting == *param_ref ChainTypes::UserTesting == *param_ref
} }
/// Are we in production mode (a live public network)? /// Are we in production mode (a live public network)?
pub fn is_production_mode() -> bool { pub fn is_production_mode() -> bool {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
ChainTypes::Testnet1 == *param_ref ChainTypes::Testnet1 == *param_ref
|| ChainTypes::Testnet2 == *param_ref || ChainTypes::Testnet2 == *param_ref
|| ChainTypes::Testnet3 == *param_ref || ChainTypes::Testnet3 == *param_ref
@ -255,7 +255,7 @@ pub fn is_production_mode() -> bool {
/// as the genesis block POW solution turns out to be the same for every new /// as the genesis block POW solution turns out to be the same for every new
/// block chain at the moment /// block chain at the moment
pub fn get_genesis_nonce() -> u64 { pub fn get_genesis_nonce() -> u64 {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read();
match *param_ref { match *param_ref {
// won't make a difference // won't make a difference
ChainTypes::AutomatedTesting => 0, ChainTypes::AutomatedTesting => 0,

View file

@ -18,8 +18,9 @@ extern crate grin_keychain as keychain;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use std::time::Instant; use std::time::Instant;
use util::RwLock;
pub mod common; pub mod common;

View file

@ -18,7 +18,8 @@ extern crate grin_keychain as keychain;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
pub mod common; pub mod common;
@ -350,7 +351,7 @@ fn blind_tx() {
let Output { proof, .. } = btx.outputs()[0]; let Output { proof, .. } = btx.outputs()[0];
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
let info = secp.range_proof_info(proof); let info = secp.range_proof_info(proof);
assert!(info.min == 0); assert!(info.min == 0);

View file

@ -18,7 +18,8 @@ extern crate grin_keychain as keychain;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
pub mod common; pub mod common;
@ -48,20 +49,20 @@ fn test_verifier_cache_rangeproofs() {
// Check our output is not verified according to the cache. // Check our output is not verified according to the cache.
{ {
let mut cache = cache.write().unwrap(); let mut cache = cache.write();
let unverified = cache.filter_rangeproof_unverified(&vec![out]); let unverified = cache.filter_rangeproof_unverified(&vec![out]);
assert_eq!(unverified, vec![out]); assert_eq!(unverified, vec![out]);
} }
// Add our output to the cache. // Add our output to the cache.
{ {
let mut cache = cache.write().unwrap(); let mut cache = cache.write();
cache.add_rangeproof_verified(vec![out]); cache.add_rangeproof_verified(vec![out]);
} }
// Check it shows as verified according to the cache. // Check it shows as verified according to the cache.
{ {
let mut cache = cache.write().unwrap(); let mut cache = cache.write();
let unverified = cache.filter_rangeproof_unverified(&vec![out]); let unverified = cache.filter_rangeproof_unverified(&vec![out]);
assert_eq!(unverified, vec![]); assert_eq!(unverified, vec![]);
} }

View file

@ -244,7 +244,7 @@ impl Add for BlindingFactor {
// //
fn add(self, other: BlindingFactor) -> Self::Output { fn add(self, other: BlindingFactor) -> Self::Output {
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
let keys = vec![self, other] let keys = vec![self, other]
.into_iter() .into_iter()
.filter(|x| *x != BlindingFactor::zero()) .filter(|x| *x != BlindingFactor::zero())

View file

@ -24,8 +24,9 @@ use std::fs::File;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use std::mem::size_of; use std::mem::size_of;
use std::net::TcpStream; use std::net::TcpStream;
use std::sync::{mpsc, Arc, RwLock}; use std::sync::{mpsc, Arc};
use std::{cmp, thread, time}; use std::{cmp, thread, time};
use util::RwLock;
use core::ser; use core::ser;
use msg::{read_body, read_exact, read_header, write_all, write_to_buf, MsgHeader, Type}; use msg::{read_body, read_exact, read_header, write_all, write_to_buf, MsgHeader, Type};
@ -168,9 +169,8 @@ impl Tracker {
self.send_channel.try_send(buf)?; self.send_channel.try_send(buf)?;
// Increase sent bytes counter // Increase sent bytes counter
if let Ok(mut sent_bytes) = self.sent_bytes.write() { let mut sent_bytes = self.sent_bytes.write();
*sent_bytes += buf_len as u64; *sent_bytes += buf_len as u64;
}
Ok(()) Ok(())
} }
@ -241,7 +241,8 @@ fn poll<H>(
); );
// Increase received bytes counter // Increase received bytes counter
if let Ok(mut received_bytes) = received_bytes.write() { {
let mut received_bytes = received_bytes.write();
let header_size = size_of::<MsgHeader>() as u64; let header_size = size_of::<MsgHeader>() as u64;
*received_bytes += header_size + msg.header.msg_len; *received_bytes += header_size + msg.header.msg_len;
} }

View file

@ -14,7 +14,8 @@
use std::collections::VecDeque; use std::collections::VecDeque;
use std::net::{SocketAddr, TcpStream}; use std::net::{SocketAddr, TcpStream};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use chrono::prelude::*; use chrono::prelude::*;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
@ -146,7 +147,7 @@ impl Handshake {
}); });
} else { } else {
// check the nonce to see if we are trying to connect to ourselves // check the nonce to see if we are trying to connect to ourselves
let nonces = self.nonces.read().unwrap(); let nonces = self.nonces.read();
if nonces.contains(&hand.nonce) { if nonces.contains(&hand.nonce) {
return Err(Error::PeerWithSelf); return Err(Error::PeerWithSelf);
} }
@ -195,7 +196,7 @@ impl Handshake {
fn next_nonce(&self) -> u64 { fn next_nonce(&self) -> u64 {
let nonce = thread_rng().gen(); let nonce = thread_rng().gen();
let mut nonces = self.nonces.write().unwrap(); let mut nonces = self.nonces.write();
nonces.push_back(nonce); nonces.push_back(nonce);
if nonces.len() >= NONCES_CAP { if nonces.len() >= NONCES_CAP {
nonces.pop_front(); nonces.pop_front();

View file

@ -14,7 +14,8 @@
use std::fs::File; use std::fs::File;
use std::net::{SocketAddr, TcpStream}; use std::net::{SocketAddr, TcpStream};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use chrono::prelude::{DateTime, Utc}; use chrono::prelude::{DateTime, Utc};
use conn; use conn;
@ -137,12 +138,12 @@ impl Peer {
/// Whether this peer has been banned. /// Whether this peer has been banned.
pub fn is_banned(&self) -> bool { pub fn is_banned(&self) -> bool {
State::Banned == *self.state.read().unwrap() State::Banned == *self.state.read()
} }
/// Whether this peer is stuck on sync. /// Whether this peer is stuck on sync.
pub fn is_stuck(&self) -> (bool, Difficulty) { pub fn is_stuck(&self) -> (bool, Difficulty) {
let peer_live_info = self.info.live_info.read().unwrap(); let peer_live_info = self.info.live_info.read();
let now = Utc::now().timestamp_millis(); let now = Utc::now().timestamp_millis();
// if last updated difficulty is 2 hours ago, we're sure this peer is a stuck node. // if last updated difficulty is 2 hours ago, we're sure this peer is a stuck node.
if now > peer_live_info.stuck_detector.timestamp_millis() + global::STUCK_PEER_KICK_TIME { if now > peer_live_info.stuck_detector.timestamp_millis() + global::STUCK_PEER_KICK_TIME {
@ -155,26 +156,24 @@ impl Peer {
/// Number of bytes sent to the peer /// Number of bytes sent to the peer
pub fn sent_bytes(&self) -> Option<u64> { pub fn sent_bytes(&self) -> Option<u64> {
if let Some(ref tracker) = self.connection { if let Some(ref tracker) = self.connection {
if let Ok(sent_bytes) = tracker.sent_bytes.read() { let sent_bytes = tracker.sent_bytes.read();
return Some(*sent_bytes); return Some(*sent_bytes);
} }
}
None None
} }
/// Number of bytes received from the peer /// Number of bytes received from the peer
pub fn received_bytes(&self) -> Option<u64> { pub fn received_bytes(&self) -> Option<u64> {
if let Some(ref tracker) = self.connection { if let Some(ref tracker) = self.connection {
if let Ok(received_bytes) = tracker.received_bytes.read() { let received_bytes = tracker.received_bytes.read();
return Some(*received_bytes); return Some(*received_bytes);
} }
}
None None
} }
/// Set this peer status to banned /// Set this peer status to banned
pub fn set_banned(&self) { pub fn set_banned(&self) {
*self.state.write().unwrap() = State::Banned; *self.state.write() = State::Banned;
} }
/// Send a ping to the remote peer, providing our local difficulty and /// Send a ping to the remote peer, providing our local difficulty and
@ -369,7 +368,7 @@ impl Peer {
match self.connection.as_ref().unwrap().error_channel.try_recv() { match self.connection.as_ref().unwrap().error_channel.try_recv() {
Ok(Error::Serialization(e)) => { Ok(Error::Serialization(e)) => {
let need_stop = { let need_stop = {
let mut state = self.state.write().unwrap(); let mut state = self.state.write();
if State::Banned != *state { if State::Banned != *state {
*state = State::Disconnected; *state = State::Disconnected;
true true
@ -388,7 +387,7 @@ impl Peer {
} }
Ok(e) => { Ok(e) => {
let need_stop = { let need_stop = {
let mut state = self.state.write().unwrap(); let mut state = self.state.write();
if State::Disconnected != *state { if State::Disconnected != *state {
*state = State::Disconnected; *state = State::Disconnected;
true true
@ -403,7 +402,7 @@ impl Peer {
false false
} }
Err(_) => { Err(_) => {
let state = self.state.read().unwrap(); let state = self.state.read();
State::Connected == *state State::Connected == *state
} }
} }
@ -427,14 +426,14 @@ impl TrackingAdapter {
} }
fn has(&self, hash: Hash) -> bool { fn has(&self, hash: Hash) -> bool {
let known = self.known.read().unwrap(); let known = self.known.read();
// may become too slow, an ordered set (by timestamp for eviction) may // may become too slow, an ordered set (by timestamp for eviction) may
// end up being a better choice // end up being a better choice
known.contains(&hash) known.contains(&hash)
} }
fn push(&self, hash: Hash) { fn push(&self, hash: Hash) {
let mut known = self.known.write().unwrap(); let mut known = self.known.write();
if known.len() > MAX_TRACK_SIZE { if known.len() > MAX_TRACK_SIZE {
known.truncate(MAX_TRACK_SIZE); known.truncate(MAX_TRACK_SIZE);
} }

View file

@ -15,7 +15,8 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::fs::File; use std::fs::File;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
@ -74,7 +75,7 @@ impl Peers {
self.save_peer(&peer_data)?; self.save_peer(&peer_data)?;
{ {
let mut peers = self.peers.write().unwrap(); let mut peers = self.peers.write();
peers.insert(addr, peer.clone()); peers.insert(addr, peer.clone());
} }
Ok(()) Ok(())
@ -88,10 +89,9 @@ impl Peers {
Some(peer) => { Some(peer) => {
// Clear the map and add new relay // Clear the map and add new relay
let dandelion_relay = &self.dandelion_relay; let dandelion_relay = &self.dandelion_relay;
dandelion_relay.write().unwrap().clear(); dandelion_relay.write().clear();
dandelion_relay dandelion_relay
.write() .write()
.unwrap()
.insert(Utc::now().timestamp(), peer.clone()); .insert(Utc::now().timestamp(), peer.clone());
debug!( debug!(
LOGGER, LOGGER,
@ -104,11 +104,11 @@ impl Peers {
// Get the dandelion relay // Get the dandelion relay
pub fn get_dandelion_relay(&self) -> HashMap<i64, Arc<Peer>> { pub fn get_dandelion_relay(&self) -> HashMap<i64, Arc<Peer>> {
self.dandelion_relay.read().unwrap().clone() self.dandelion_relay.read().clone()
} }
pub fn is_known(&self, addr: &SocketAddr) -> bool { pub fn is_known(&self, addr: &SocketAddr) -> bool {
self.peers.read().unwrap().contains_key(addr) self.peers.read().contains_key(addr)
} }
/// Get vec of peers we are currently connected to. /// Get vec of peers we are currently connected to.
@ -116,7 +116,6 @@ impl Peers {
let mut res = self let mut res = self
.peers .peers
.read() .read()
.unwrap()
.values() .values()
.filter(|p| p.is_connected()) .filter(|p| p.is_connected())
.cloned() .cloned()
@ -136,14 +135,13 @@ impl Peers {
/// Get a peer we're connected to by address. /// Get a peer we're connected to by address.
pub fn get_connected_peer(&self, addr: &SocketAddr) -> Option<Arc<Peer>> { pub fn get_connected_peer(&self, addr: &SocketAddr) -> Option<Arc<Peer>> {
self.peers.read().unwrap().get(addr).map(|p| p.clone()) self.peers.read().get(addr).map(|p| p.clone())
} }
/// Number of peers we're currently connected to. /// Number of peers we're currently connected to.
pub fn peer_count(&self) -> u32 { pub fn peer_count(&self) -> u32 {
self.peers self.peers
.read() .read()
.unwrap()
.values() .values()
.filter(|x| x.is_connected()) .filter(|x| x.is_connected())
.count() as u32 .count() as u32
@ -370,7 +368,7 @@ impl Peers {
/// Ping all our connected peers. Always automatically expects a pong back /// Ping all our connected peers. Always automatically expects a pong back
/// or disconnects. This acts as a liveness test. /// or disconnects. This acts as a liveness test.
pub fn check_all(&self, total_difficulty: Difficulty, height: u64) { pub fn check_all(&self, total_difficulty: Difficulty, height: u64) {
let peers_map = self.peers.read().unwrap(); let peers_map = self.peers.read();
for p in peers_map.values() { for p in peers_map.values() {
if p.is_connected() { if p.is_connected() {
let _ = p.send_ping(total_difficulty, height); let _ = p.send_ping(total_difficulty, height);
@ -417,7 +415,7 @@ impl Peers {
let mut rm = vec![]; let mut rm = vec![];
// build a list of peers to be cleaned up // build a list of peers to be cleaned up
for peer in self.peers.read().unwrap().values() { for peer in self.peers.read().values() {
if peer.is_banned() { if peer.is_banned() {
debug!(LOGGER, "clean_peers {:?}, peer banned", peer.info.addr); debug!(LOGGER, "clean_peers {:?}, peer banned", peer.info.addr);
rm.push(peer.clone()); rm.push(peer.clone());
@ -437,7 +435,7 @@ impl Peers {
// now clean up peer map based on the list to remove // now clean up peer map based on the list to remove
{ {
let mut peers = self.peers.write().unwrap(); let mut peers = self.peers.write();
for p in rm { for p in rm {
peers.remove(&p.info.addr); peers.remove(&p.info.addr);
} }
@ -463,13 +461,13 @@ impl Peers {
// now remove them taking a short-lived write lock each time // now remove them taking a short-lived write lock each time
// maybe better to take write lock once and remove them all? // maybe better to take write lock once and remove them all?
for x in addrs.iter().take(excess_count) { for x in addrs.iter().take(excess_count) {
let mut peers = self.peers.write().unwrap(); let mut peers = self.peers.write();
peers.remove(x); peers.remove(x);
} }
} }
pub fn stop(&self) { pub fn stop(&self) {
let mut peers = self.peers.write().unwrap(); let mut peers = self.peers.write();
for (_, peer) in peers.drain() { for (_, peer) in peers.drain() {
peer.stop(); peer.stop();
} }

View file

@ -17,7 +17,8 @@ use std::fs::File;
use std::io; use std::io;
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::sync::mpsc; use std::sync::mpsc;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use chrono::prelude::*; use chrono::prelude::*;
@ -258,23 +259,23 @@ pub struct PeerInfo {
impl PeerInfo { impl PeerInfo {
/// The current total_difficulty of the peer. /// The current total_difficulty of the peer.
pub fn total_difficulty(&self) -> Difficulty { pub fn total_difficulty(&self) -> Difficulty {
self.live_info.read().unwrap().total_difficulty self.live_info.read().total_difficulty
} }
/// The current height of the peer. /// The current height of the peer.
pub fn height(&self) -> u64 { pub fn height(&self) -> u64 {
self.live_info.read().unwrap().height self.live_info.read().height
} }
/// Time of last_seen for this peer (via ping/pong). /// Time of last_seen for this peer (via ping/pong).
pub fn last_seen(&self) -> DateTime<Utc> { pub fn last_seen(&self) -> DateTime<Utc> {
self.live_info.read().unwrap().last_seen self.live_info.read().last_seen
} }
/// Update the total_difficulty, height and last_seen of the peer. /// Update the total_difficulty, height and last_seen of the peer.
/// Takes a write lock on the live_info. /// Takes a write lock on the live_info.
pub fn update(&self, height: u64, total_difficulty: Difficulty) { pub fn update(&self, height: u64, total_difficulty: Difficulty) {
let mut live_info = self.live_info.write().unwrap(); let mut live_info = self.live_info.write();
if total_difficulty != live_info.total_difficulty { if total_difficulty != live_info.total_difficulty {
live_info.stuck_detector = Utc::now(); live_info.stuck_detector = Utc::now();
} }

View file

@ -16,7 +16,8 @@
//! Used for both the txpool and stempool layers in the pool. //! Used for both the txpool and stempool layers in the pool.
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use core::consensus; use core::consensus;
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};

View file

@ -17,7 +17,8 @@
//! resulting tx pool can be added to the current chain state to produce a //! resulting tx pool can be added to the current chain state to produce a
//! valid chain state. //! valid chain state.
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use chrono::prelude::Utc; use chrono::prelude::Utc;

View file

@ -25,7 +25,8 @@ extern crate rand;
pub mod common; pub mod common;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use core::core::verifier_cache::LruVerifierCache; use core::core::verifier_cache::LruVerifierCache;
use core::core::{Block, BlockHeader, Transaction}; use core::core::{Block, BlockHeader, Transaction};
@ -80,7 +81,7 @@ fn test_transaction_pool_block_building() {
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]); let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
// Add the three root txs to the pool. // Add the three root txs to the pool.
write_pool write_pool
@ -105,7 +106,7 @@ fn test_transaction_pool_block_building() {
} }
let txs = { let txs = {
let read_pool = pool.read().unwrap(); let read_pool = pool.read();
read_pool.prepare_mineable_transactions().unwrap() read_pool.prepare_mineable_transactions().unwrap()
}; };
// children should have been aggregated into parents // children should have been aggregated into parents
@ -123,7 +124,7 @@ fn test_transaction_pool_block_building() {
// Now reconcile the transaction pool with the new block // Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect. // and check the resulting contents of the pool are what we expect.
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap(); write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 0); assert_eq!(write_pool.total_size(), 0);

View file

@ -25,7 +25,8 @@ extern crate rand;
pub mod common; pub mod common;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use core::core::{Block, BlockHeader}; use core::core::{Block, BlockHeader};
@ -127,7 +128,7 @@ fn test_transaction_pool_block_reconciliation() {
// First we add the above transactions to the pool. // First we add the above transactions to the pool.
// All should be accepted. // All should be accepted.
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
assert_eq!(write_pool.total_size(), 0); assert_eq!(write_pool.total_size(), 0);
for tx in &txs_to_add { for tx in &txs_to_add {
@ -165,13 +166,13 @@ fn test_transaction_pool_block_reconciliation() {
// Check the pool still contains everything we expect at this point. // Check the pool still contains everything we expect at this point.
{ {
let write_pool = pool.write().unwrap(); let write_pool = pool.write();
assert_eq!(write_pool.total_size(), txs_to_add.len()); assert_eq!(write_pool.total_size(), txs_to_add.len());
} }
// And reconcile the pool with this latest block. // And reconcile the pool with this latest block.
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap(); write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 4); assert_eq!(write_pool.total_size(), 4);

View file

@ -25,7 +25,8 @@ extern crate rand;
pub mod common; pub mod common;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use common::*; use common::*;
use core::core::hash::Hash; use core::core::hash::Hash;
@ -83,7 +84,7 @@ fn test_coinbase_maturity() {
let pool = RwLock::new(test_setup(chain, verifier_cache)); let pool = RwLock::new(test_setup(chain, verifier_cache));
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![50], vec![49]); let tx = test_transaction(&keychain, vec![50], vec![49]);
match write_pool.add_to_pool(test_source(), tx.clone(), true, &BlockHeader::default()) { match write_pool.add_to_pool(test_source(), tx.clone(), true, &BlockHeader::default()) {
Err(PoolError::ImmatureCoinbase) => {} Err(PoolError::ImmatureCoinbase) => {}

View file

@ -28,7 +28,8 @@ extern crate rand;
use std::collections::HashSet; use std::collections::HashSet;
use std::fs; use std::fs;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::core::verifier_cache::VerifierCache; use core::core::verifier_cache::VerifierCache;
@ -98,7 +99,7 @@ impl ChainAdapter {
batch.commit().unwrap(); batch.commit().unwrap();
{ {
let mut utxo = self.utxo.write().unwrap(); let mut utxo = self.utxo.write();
for x in block.inputs() { for x in block.inputs() {
utxo.remove(&x.commitment()); utxo.remove(&x.commitment());
} }
@ -129,7 +130,7 @@ impl BlockChain for ChainAdapter {
} }
fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> { fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
let utxo = self.utxo.read().unwrap(); let utxo = self.utxo.read();
for x in tx.outputs() { for x in tx.outputs() {
if utxo.contains(&x.commitment()) { if utxo.contains(&x.commitment()) {

View file

@ -25,7 +25,8 @@ extern crate rand;
pub mod common; pub mod common;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use common::*; use common::*;
use core::core::verifier_cache::LruVerifierCache; use core::core::verifier_cache::LruVerifierCache;
@ -72,7 +73,7 @@ fn test_the_transaction_pool() {
// Add this tx to the pool (stem=false, direct to txpool). // Add this tx to the pool (stem=false, direct to txpool).
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
write_pool write_pool
.add_to_pool(test_source(), initial_tx, false, &header) .add_to_pool(test_source(), initial_tx, false, &header)
.unwrap(); .unwrap();
@ -86,7 +87,7 @@ fn test_the_transaction_pool() {
// Take a write lock and add a couple of tx entries to the pool. // Take a write lock and add a couple of tx entries to the pool.
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
// Check we have a single initial tx in the pool. // Check we have a single initial tx in the pool.
assert_eq!(write_pool.total_size(), 1); assert_eq!(write_pool.total_size(), 1);
@ -110,7 +111,7 @@ fn test_the_transaction_pool() {
// This will fail during tx aggregation due to duplicate outputs and duplicate // This will fail during tx aggregation due to duplicate outputs and duplicate
// kernels. // kernels.
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
assert!( assert!(
write_pool write_pool
.add_to_pool(test_source(), tx1.clone(), true, &header) .add_to_pool(test_source(), tx1.clone(), true, &header)
@ -122,7 +123,7 @@ fn test_the_transaction_pool() {
// tx). // tx).
{ {
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]); let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
assert!( assert!(
write_pool write_pool
.add_to_pool(test_source(), tx1a, true, &header) .add_to_pool(test_source(), tx1a, true, &header)
@ -133,7 +134,7 @@ fn test_the_transaction_pool() {
// Test adding a tx attempting to spend a non-existent output. // Test adding a tx attempting to spend a non-existent output.
{ {
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]); let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
assert!( assert!(
write_pool write_pool
.add_to_pool(test_source(), bad_tx, true, &header) .add_to_pool(test_source(), bad_tx, true, &header)
@ -147,7 +148,7 @@ fn test_the_transaction_pool() {
// to be immediately stolen via a "replay" tx. // to be immediately stolen via a "replay" tx.
{ {
let tx = test_transaction(&keychain, vec![900], vec![498]); let tx = test_transaction(&keychain, vec![900], vec![498]);
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
assert!( assert!(
write_pool write_pool
.add_to_pool(test_source(), tx, true, &header) .add_to_pool(test_source(), tx, true, &header)
@ -157,7 +158,7 @@ fn test_the_transaction_pool() {
// Confirm the tx pool correctly identifies an invalid tx (already spent). // Confirm the tx pool correctly identifies an invalid tx (already spent).
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
let tx3 = test_transaction(&keychain, vec![500], vec![497]); let tx3 = test_transaction(&keychain, vec![500], vec![497]);
assert!( assert!(
write_pool write_pool
@ -171,7 +172,7 @@ fn test_the_transaction_pool() {
// Check we can take some entries from the stempool and "fluff" them into the // Check we can take some entries from the stempool and "fluff" them into the
// txpool. This also exercises multi-kernel txs. // txpool. This also exercises multi-kernel txs.
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
let agg_tx = write_pool let agg_tx = write_pool
.stempool .stempool
.aggregate_transaction() .aggregate_transaction()
@ -189,7 +190,7 @@ fn test_the_transaction_pool() {
// We will do this be adding a new tx to the pool // We will do this be adding a new tx to the pool
// that is a superset of a tx already in the pool. // that is a superset of a tx already in the pool.
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
let tx4 = test_transaction(&keychain, vec![800], vec![799]); let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form) // tx1 and tx2 are already in the txpool (in aggregated form)
@ -210,7 +211,7 @@ fn test_the_transaction_pool() {
// Check we cannot "double spend" an output spent in a previous block. // Check we cannot "double spend" an output spent in a previous block.
// We use the initial coinbase output here for convenience. // We use the initial coinbase output here for convenience.
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write();
let double_spend_tx = let double_spend_tx =
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) }; { test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };

View file

@ -17,9 +17,10 @@
use std::fs::File; use std::fs::File;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::{Arc, RwLock, Weak}; use std::sync::{Arc, Weak};
use std::thread; use std::thread;
use std::time::Instant; use std::time::Instant;
use util::RwLock;
use chain::{self, ChainAdapter, Options}; use chain::{self, ChainAdapter, Options};
use chrono::prelude::{DateTime, Utc}; use chrono::prelude::{DateTime, Utc};
@ -82,7 +83,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
); );
let res = { let res = {
let mut tx_pool = self.tx_pool.write().unwrap(); let mut tx_pool = self.tx_pool.write();
tx_pool.add_to_pool(source, tx, stem, &header) tx_pool.add_to_pool(source, tx, stem, &header)
}; };
@ -139,7 +140,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
} }
let (txs, missing_short_ids) = { let (txs, missing_short_ids) = {
let tx_pool = self.tx_pool.read().unwrap(); let tx_pool = self.tx_pool.read();
tx_pool.retrieve_transactions(cb.hash(), cb.nonce, cb.kern_ids()) tx_pool.retrieve_transactions(cb.hash(), cb.nonce, cb.kern_ids())
}; };
@ -640,7 +641,7 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
debug!(LOGGER, "adapter: block_accepted: {:?}", b.hash()); debug!(LOGGER, "adapter: block_accepted: {:?}", b.hash());
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) { if let Err(e) = self.tx_pool.write().reconcile_block(b) {
error!( error!(
LOGGER, LOGGER,
"Pool could not update itself at block {}: {:?}", "Pool could not update itself at block {}: {:?}",

View file

@ -16,8 +16,9 @@
//! to collect information about server status //! to collect information about server status
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use std::time::SystemTime; use std::time::SystemTime;
use util::RwLock;
use core::pow::Difficulty; use core::pow::Difficulty;

View file

@ -14,7 +14,8 @@
//! Server types //! Server types
use std::convert::From; use std::convert::From;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use api; use api;
use chain; use chain;
@ -297,12 +298,12 @@ impl SyncState {
/// Whether the current state matches any active syncing operation. /// Whether the current state matches any active syncing operation.
/// Note: This includes our "initial" state. /// Note: This includes our "initial" state.
pub fn is_syncing(&self) -> bool { pub fn is_syncing(&self) -> bool {
*self.current.read().unwrap() != SyncStatus::NoSync *self.current.read() != SyncStatus::NoSync
} }
/// Current syncing status /// Current syncing status
pub fn status(&self) -> SyncStatus { pub fn status(&self) -> SyncStatus {
*self.current.read().unwrap() *self.current.read()
} }
/// Update the syncing status /// Update the syncing status
@ -311,7 +312,7 @@ impl SyncState {
return; return;
} }
let mut status = self.current.write().unwrap(); let mut status = self.current.write();
debug!( debug!(
LOGGER, LOGGER,
@ -324,7 +325,7 @@ impl SyncState {
/// Update txhashset downloading progress /// Update txhashset downloading progress
pub fn update_txhashset_download(&self, new_status: SyncStatus) -> bool { pub fn update_txhashset_download(&self, new_status: SyncStatus) -> bool {
if let SyncStatus::TxHashsetDownload { .. } = new_status { if let SyncStatus::TxHashsetDownload { .. } = new_status {
let mut status = self.current.write().unwrap(); let mut status = self.current.write();
*status = new_status; *status = new_status;
true true
} else { } else {
@ -334,7 +335,7 @@ impl SyncState {
/// Communicate sync error /// Communicate sync error
pub fn set_sync_error(&self, error: Error) { pub fn set_sync_error(&self, error: Error) {
*self.sync_error.write().unwrap() = Some(error); *self.sync_error.write() = Some(error);
} }
/// Get sync error /// Get sync error
@ -344,7 +345,7 @@ impl SyncState {
/// Clear sync error /// Clear sync error
pub fn clear_sync_error(&self) { pub fn clear_sync_error(&self) {
*self.sync_error.write().unwrap() = None; *self.sync_error.write() = None;
} }
} }
@ -354,7 +355,7 @@ impl chain::TxHashsetWriteStatus for SyncState {
} }
fn on_validation(&self, vkernels: u64, vkernel_total: u64, vrproofs: u64, vrproof_total: u64) { fn on_validation(&self, vkernels: u64, vkernel_total: u64, vrproofs: u64, vrproof_total: u64) {
let mut status = self.current.write().unwrap(); let mut status = self.current.write();
match *status { match *status {
SyncStatus::TxHashsetValidation { SyncStatus::TxHashsetValidation {
kernels, kernels,

View file

@ -15,9 +15,10 @@
use chrono::prelude::Utc; use chrono::prelude::Utc;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use util::RwLock;
use core::core::hash::Hashed; use core::core::hash::Hashed;
use core::core::transaction; use core::core::transaction;
@ -86,7 +87,7 @@ fn process_stem_phase(
tx_pool: Arc<RwLock<TransactionPool>>, tx_pool: Arc<RwLock<TransactionPool>>,
verifier_cache: Arc<RwLock<VerifierCache>>, verifier_cache: Arc<RwLock<VerifierCache>>,
) -> Result<(), PoolError> { ) -> Result<(), PoolError> {
let mut tx_pool = tx_pool.write().unwrap(); let mut tx_pool = tx_pool.write();
let header = tx_pool.chain_head()?; let header = tx_pool.chain_head()?;
@ -133,7 +134,7 @@ fn process_fluff_phase(
tx_pool: Arc<RwLock<TransactionPool>>, tx_pool: Arc<RwLock<TransactionPool>>,
verifier_cache: Arc<RwLock<VerifierCache>>, verifier_cache: Arc<RwLock<VerifierCache>>,
) -> Result<(), PoolError> { ) -> Result<(), PoolError> {
let mut tx_pool = tx_pool.write().unwrap(); let mut tx_pool = tx_pool.write();
let header = tx_pool.chain_head()?; let header = tx_pool.chain_head()?;
@ -172,7 +173,7 @@ fn process_fresh_entries(
dandelion_config: DandelionConfig, dandelion_config: DandelionConfig,
tx_pool: Arc<RwLock<TransactionPool>>, tx_pool: Arc<RwLock<TransactionPool>>,
) -> Result<(), PoolError> { ) -> Result<(), PoolError> {
let mut tx_pool = tx_pool.write().unwrap(); let mut tx_pool = tx_pool.write();
let mut rng = thread_rng(); let mut rng = thread_rng();
@ -212,7 +213,7 @@ fn process_expired_entries(
let mut expired_entries = vec![]; let mut expired_entries = vec![];
{ {
let tx_pool = tx_pool.read().unwrap(); let tx_pool = tx_pool.read();
for entry in tx_pool for entry in tx_pool
.stempool .stempool
.entries .entries
@ -236,7 +237,7 @@ fn process_expired_entries(
); );
{ {
let mut tx_pool = tx_pool.write().unwrap(); let mut tx_pool = tx_pool.write();
let header = tx_pool.chain_head()?; let header = tx_pool.chain_head()?;
for entry in expired_entries { for entry in expired_entries {

View file

@ -18,8 +18,9 @@
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
use util::RwLock;
use api; use api;
use chain; use chain;
@ -79,7 +80,7 @@ impl Server {
if let Some(s) = enable_stratum_server { if let Some(s) = enable_stratum_server {
if s { if s {
{ {
let mut stratum_stats = serv.state_info.stratum_stats.write().unwrap(); let mut stratum_stats = serv.state_info.stratum_stats.write();
stratum_stats.is_enabled = true; stratum_stats.is_enabled = true;
} }
serv.start_stratum_server(c.clone()); serv.start_stratum_server(c.clone());
@ -388,7 +389,7 @@ impl Server {
/// other /// other
/// consumers /// consumers
pub fn get_server_stats(&self) -> Result<ServerStats, Error> { pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
let stratum_stats = self.state_info.stratum_stats.read().unwrap().clone(); let stratum_stats = self.state_info.stratum_stats.read().clone();
let awaiting_peers = self.state_info.awaiting_peers.load(Ordering::Relaxed); let awaiting_peers = self.state_info.awaiting_peers.load(Ordering::Relaxed);
// Fill out stats on our current difficulty calculation // Fill out stats on our current difficulty calculation

View file

@ -76,7 +76,7 @@ impl StateSync {
// check sync error // check sync error
{ {
let clone = self.sync_state.sync_error(); let clone = self.sync_state.sync_error();
if let Some(ref sync_error) = *clone.read().unwrap() { if let Some(ref sync_error) = *clone.read() {
error!( error!(
LOGGER, LOGGER,
"fast_sync: error = {:?}. restart fast sync", sync_error "fast_sync: error = {:?}. restart fast sync", sync_error

View file

@ -17,9 +17,10 @@
use chrono::prelude::{DateTime, NaiveDateTime, Utc}; use chrono::prelude::{DateTime, NaiveDateTime, Utc};
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use util::RwLock;
use chain; use chain;
use common::types::Error; use common::types::Error;
@ -110,11 +111,7 @@ fn build_block(
// extract current transaction from the pool // extract current transaction from the pool
// TODO - we have a lot of unwrap() going on in this fn... // TODO - we have a lot of unwrap() going on in this fn...
let txs = tx_pool let txs = tx_pool.read().prepare_mineable_transactions().unwrap();
.read()
.unwrap()
.prepare_mineable_transactions()
.unwrap();
// build the coinbase and the block itself // build the coinbase and the block itself
let fees = txs.iter().map(|tx| tx.fee()).sum(); let fees = txs.iter().map(|tx| tx.fee()).sum();

View file

@ -21,9 +21,10 @@ use serde_json::Value;
use std::error::Error; use std::error::Error;
use std::io::{BufRead, ErrorKind, Write}; use std::io::{BufRead, ErrorKind, Write};
use std::net::{TcpListener, TcpStream}; use std::net::{TcpListener, TcpStream};
use std::sync::{Arc, Mutex, RwLock}; use std::sync::Arc;
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
use std::{cmp, thread}; use std::{cmp, thread};
use util::{Mutex, RwLock};
use chain; use chain;
use common::stats::{StratumStats, WorkerStats}; use common::stats::{StratumStats, WorkerStats};
@ -122,14 +123,14 @@ fn accept_workers(
.set_nonblocking(true) .set_nonblocking(true)
.expect("set_nonblocking call failed"); .expect("set_nonblocking call failed");
let mut worker = Worker::new(worker_id.to_string(), BufStream::new(stream)); let mut worker = Worker::new(worker_id.to_string(), BufStream::new(stream));
workers.lock().unwrap().push(worker); workers.lock().push(worker);
// stats for this worker (worker stat objects are added and updated but never // stats for this worker (worker stat objects are added and updated but never
// removed) // removed)
let mut worker_stats = WorkerStats::default(); let mut worker_stats = WorkerStats::default();
worker_stats.is_connected = true; worker_stats.is_connected = true;
worker_stats.id = worker_id.to_string(); worker_stats.id = worker_id.to_string();
worker_stats.pow_difficulty = 1; // XXX TODO worker_stats.pow_difficulty = 1; // XXX TODO
let mut stratum_stats = stratum_stats.write().unwrap(); let mut stratum_stats = stratum_stats.write();
stratum_stats.worker_stats.push(worker_stats); stratum_stats.worker_stats.push(worker_stats);
worker_id = worker_id + 1; worker_id = worker_id + 1;
} }
@ -285,7 +286,7 @@ impl StratumServer {
// Handle an RPC request message from the worker(s) // Handle an RPC request message from the worker(s)
fn handle_rpc_requests(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) { fn handle_rpc_requests(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) {
let mut workers_l = self.workers.lock().unwrap(); let mut workers_l = self.workers.lock();
for num in 0..workers_l.len() { for num in 0..workers_l.len() {
match workers_l[num].read_message() { match workers_l[num].read_message() {
Some(the_message) => { Some(the_message) => {
@ -306,7 +307,7 @@ impl StratumServer {
} }
}; };
let mut stratum_stats = stratum_stats.write().unwrap(); let mut stratum_stats = stratum_stats.write();
let worker_stats_id = stratum_stats let worker_stats_id = stratum_stats
.worker_stats .worker_stats
.iter() .iter()
@ -582,7 +583,7 @@ impl StratumServer {
// Purge dead/sick workers - remove all workers marked in error state // Purge dead/sick workers - remove all workers marked in error state
fn clean_workers(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) -> usize { fn clean_workers(&mut self, stratum_stats: &mut Arc<RwLock<StratumStats>>) -> usize {
let mut start = 0; let mut start = 0;
let mut workers_l = self.workers.lock().unwrap(); let mut workers_l = self.workers.lock();
loop { loop {
for num in start..workers_l.len() { for num in start..workers_l.len() {
if workers_l[num].error == true { if workers_l[num].error == true {
@ -593,7 +594,7 @@ impl StratumServer {
workers_l[num].id; workers_l[num].id;
); );
// Update worker stats // Update worker stats
let mut stratum_stats = stratum_stats.write().unwrap(); let mut stratum_stats = stratum_stats.write();
let worker_stats_id = stratum_stats let worker_stats_id = stratum_stats
.worker_stats .worker_stats
.iter() .iter()
@ -607,7 +608,7 @@ impl StratumServer {
start = num + 1; start = num + 1;
} }
if start >= workers_l.len() { if start >= workers_l.len() {
let mut stratum_stats = stratum_stats.write().unwrap(); let mut stratum_stats = stratum_stats.write();
stratum_stats.num_workers = workers_l.len(); stratum_stats.num_workers = workers_l.len();
return stratum_stats.num_workers; return stratum_stats.num_workers;
} }
@ -639,7 +640,7 @@ impl StratumServer {
// Push the new block to all connected clients // Push the new block to all connected clients
// NOTE: We do not give a unique nonce (should we?) so miners need // NOTE: We do not give a unique nonce (should we?) so miners need
// to choose one for themselves // to choose one for themselves
let mut workers_l = self.workers.lock().unwrap(); let mut workers_l = self.workers.lock();
for num in 0..workers_l.len() { for num in 0..workers_l.len() {
workers_l[num].write_message(job_request_json.clone()); workers_l[num].write_message(job_request_json.clone());
} }
@ -691,7 +692,7 @@ impl StratumServer {
// We have started // We have started
{ {
let mut stratum_stats = stratum_stats.write().unwrap(); let mut stratum_stats = stratum_stats.write();
stratum_stats.is_running = true; stratum_stats.is_running = true;
stratum_stats.edge_bits = edge_bits as u16; stratum_stats.edge_bits = edge_bits as u16;
} }
@ -753,7 +754,7 @@ impl StratumServer {
deadline = Utc::now().timestamp() + attempt_time_per_block as i64; deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
{ {
let mut stratum_stats = stratum_stats.write().unwrap(); let mut stratum_stats = stratum_stats.write();
stratum_stats.block_height = new_block.header.height; stratum_stats.block_height = new_block.header.height;
stratum_stats.network_difficulty = self.current_difficulty; stratum_stats.network_difficulty = self.current_difficulty;
} }

View file

@ -19,7 +19,8 @@
use chrono::prelude::Utc; use chrono::prelude::Utc;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use chain; use chain;
use common::types::StratumServerConfig; use common::types::StratumServerConfig;

View file

@ -26,8 +26,9 @@ extern crate grin_wallet as wallet;
mod framework; mod framework;
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
use util::Mutex;
use core::global::{self, ChainTypes}; use core::global::{self, ChainTypes};
@ -52,7 +53,7 @@ fn simple_server_wallet() {
)); ));
let _ = thread::spawn(move || { let _ = thread::spawn(move || {
let mut w = coinbase_wallet.lock().unwrap(); let mut w = coinbase_wallet.lock();
w.run_wallet(0); w.run_wallet(0);
}); });

View file

@ -27,8 +27,9 @@ extern crate grin_wallet as wallet;
mod framework; mod framework;
use framework::{LocalServerContainer, LocalServerContainerConfig}; use framework::{LocalServerContainer, LocalServerContainerConfig};
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
use util::Mutex;
use util::LOGGER; use util::LOGGER;
@ -56,12 +57,12 @@ fn test_dandelion_timeout() {
let coinbase_wallet = Arc::new(Mutex::new( let coinbase_wallet = Arc::new(Mutex::new(
LocalServerContainer::new(coinbase_config).unwrap(), LocalServerContainer::new(coinbase_config).unwrap(),
)); ));
let coinbase_wallet_config = { coinbase_wallet.lock().unwrap().wallet_config.clone() }; let coinbase_wallet_config = { coinbase_wallet.lock().wallet_config.clone() };
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config); let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
let _ = thread::spawn(move || { let _ = thread::spawn(move || {
let mut w = coinbase_wallet.lock().unwrap(); let mut w = coinbase_wallet.lock();
w.run_wallet(0); w.run_wallet(0);
}); });
@ -71,12 +72,12 @@ fn test_dandelion_timeout() {
recp_config.wallet_port = 20002; recp_config.wallet_port = 20002;
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap())); let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
let target_wallet_cloned = target_wallet.clone(); let target_wallet_cloned = target_wallet.clone();
let recp_wallet_config = { target_wallet.lock().unwrap().wallet_config.clone() }; let recp_wallet_config = { target_wallet.lock().wallet_config.clone() };
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config); let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
//Start up a second wallet, to receive //Start up a second wallet, to receive
let _ = thread::spawn(move || { let _ = thread::spawn(move || {
let mut w = target_wallet_cloned.lock().unwrap(); let mut w = target_wallet_cloned.lock();
w.run_wallet(0); w.run_wallet(0);
}); });

View file

@ -25,8 +25,9 @@ extern crate blake2_rfc as blake2;
use std::default::Default; use std::default::Default;
use std::ops::Deref; use std::ops::Deref;
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use std::{fs, thread, time}; use std::{fs, thread, time};
use util::Mutex;
use framework::keychain::Keychain; use framework::keychain::Keychain;
use wallet::{HTTPWalletClient, LMDBBackend, WalletConfig}; use wallet::{HTTPWalletClient, LMDBBackend, WalletConfig};
@ -532,7 +533,7 @@ impl LocalServerContainerPool {
thread::sleep(time::Duration::from_millis(2000)); thread::sleep(time::Duration::from_millis(2000));
} }
let server_ref = s.run_server(run_length); let server_ref = s.run_server(run_length);
return_container_ref.lock().unwrap().push(server_ref); return_container_ref.lock().push(server_ref);
}); });
// Not a big fan of sleeping hack here, but there appears to be a // Not a big fan of sleeping hack here, but there appears to be a
// concurrency issue when creating files in rocksdb that causes // concurrency issue when creating files in rocksdb that causes
@ -575,7 +576,7 @@ impl LocalServerContainerPool {
} }
pub fn stop_all_servers(servers: Arc<Mutex<Vec<servers::Server>>>) { pub fn stop_all_servers(servers: Arc<Mutex<Vec<servers::Server>>>) {
let locked_servs = servers.lock().unwrap(); let locked_servs = servers.lock();
for s in locked_servs.deref() { for s in locked_servs.deref() {
s.stop(); s.stop();
} }

View file

@ -27,8 +27,9 @@ mod framework;
use std::default::Default; use std::default::Default;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
use util::Mutex;
use core::core::hash::Hashed; use core::core::hash::Hashed;
use core::global::{self, ChainTypes}; use core::global::{self, ChainTypes};

View file

@ -27,8 +27,9 @@ extern crate grin_wallet as wallet;
mod framework; mod framework;
use framework::{LocalServerContainer, LocalServerContainerConfig}; use framework::{LocalServerContainer, LocalServerContainerConfig};
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
use util::Mutex;
use util::LOGGER; use util::LOGGER;
@ -55,11 +56,11 @@ fn basic_wallet_transactions() {
let coinbase_wallet = Arc::new(Mutex::new( let coinbase_wallet = Arc::new(Mutex::new(
LocalServerContainer::new(coinbase_config).unwrap(), LocalServerContainer::new(coinbase_config).unwrap(),
)); ));
let coinbase_wallet_config = { coinbase_wallet.lock().unwrap().wallet_config.clone() }; let coinbase_wallet_config = { coinbase_wallet.lock().wallet_config.clone() };
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config); let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
let _ = thread::spawn(move || { let _ = thread::spawn(move || {
let mut w = coinbase_wallet.lock().unwrap(); let mut w = coinbase_wallet.lock();
w.run_wallet(0); w.run_wallet(0);
}); });
@ -69,11 +70,11 @@ fn basic_wallet_transactions() {
recp_config.wallet_port = 20002; recp_config.wallet_port = 20002;
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap())); let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
let target_wallet_cloned = target_wallet.clone(); let target_wallet_cloned = target_wallet.clone();
let recp_wallet_config = { target_wallet.lock().unwrap().wallet_config.clone() }; let recp_wallet_config = { target_wallet.lock().wallet_config.clone() };
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config); let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
//Start up a second wallet, to receive //Start up a second wallet, to receive
let _ = thread::spawn(move || { let _ = thread::spawn(move || {
let mut w = target_wallet_cloned.lock().unwrap(); let mut w = target_wallet_cloned.lock();
w.run_wallet(0); w.run_wallet(0);
}); });

View file

@ -18,9 +18,10 @@ use std::io::Read;
use std::path::PathBuf; use std::path::PathBuf;
/// Wallet commands processing /// Wallet commands processing
use std::process::exit; use std::process::exit;
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use std::{process, thread}; use std::{process, thread};
use util::Mutex;
use clap::ArgMatches; use clap::ArgMatches;

View file

@ -18,6 +18,7 @@ slog-term = "~2.4"
slog-async = "~2.3" slog-async = "~2.3"
walkdir = "2" walkdir = "2"
zip = "0.4" zip = "0.4"
parking_lot = {version = "0.6"}
[dependencies.secp256k1zkp] [dependencies.secp256k1zkp]
git = "https://github.com/mimblewimble/rust-secp256k1-zkp" git = "https://github.com/mimblewimble/rust-secp256k1-zkp"

View file

@ -38,6 +38,10 @@ extern crate serde;
extern crate serde_derive; extern crate serde_derive;
extern crate walkdir; extern crate walkdir;
extern crate zip as zip_rs; extern crate zip as zip_rs;
// Re-export so only has to be included once
extern crate parking_lot;
pub use parking_lot::Mutex;
pub use parking_lot::RwLock;
// Re-export so only has to be included once // Re-export so only has to be included once
pub extern crate secp256k1zkp as secp; pub extern crate secp256k1zkp as secp;
@ -59,7 +63,7 @@ pub mod macros;
use byteorder::{BigEndian, ByteOrder}; use byteorder::{BigEndian, ByteOrder};
#[allow(unused_imports)] #[allow(unused_imports)]
use std::ops::Deref; use std::ops::Deref;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
mod hex; mod hex;
pub use hex::*; pub use hex::*;
@ -93,7 +97,7 @@ where
/// Initializes the OneTime, should only be called once after construction. /// Initializes the OneTime, should only be called once after construction.
/// Will panic (via assert) if called more than once. /// Will panic (via assert) if called more than once.
pub fn init(&self, value: T) { pub fn init(&self, value: T) {
let mut inner = self.inner.write().unwrap(); let mut inner = self.inner.write();
assert!(inner.is_none()); assert!(inner.is_none());
*inner = Some(value); *inner = Some(value);
} }
@ -101,7 +105,7 @@ where
/// Borrows the OneTime, should only be called after initialization. /// Borrows the OneTime, should only be called after initialization.
/// Will panic (via expect) if called before initialization. /// Will panic (via expect) if called before initialization.
pub fn borrow(&self) -> T { pub fn borrow(&self) -> T {
let inner = self.inner.read().unwrap(); let inner = self.inner.read();
inner inner
.clone() .clone()
.expect("Cannot borrow one_time before initialization.") .expect("Cannot borrow one_time before initialization.")

View file

@ -17,7 +17,7 @@ use slog_async;
use slog_term; use slog_term;
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Mutex; use Mutex;
use backtrace::Backtrace; use backtrace::Backtrace;
use std::{panic, thread}; use std::{panic, thread};
@ -46,12 +46,12 @@ lazy_static! {
/// And a static reference to the logger itself, accessible from all crates /// And a static reference to the logger itself, accessible from all crates
pub static ref LOGGER: Logger = { pub static ref LOGGER: Logger = {
let was_init = WAS_INIT.lock().unwrap().clone(); let was_init = WAS_INIT.lock().clone();
let config = LOGGING_CONFIG.lock().unwrap(); let config = LOGGING_CONFIG.lock();
let slog_level_stdout = convert_log_level(&config.stdout_log_level); let slog_level_stdout = convert_log_level(&config.stdout_log_level);
let slog_level_file = convert_log_level(&config.file_log_level); let slog_level_file = convert_log_level(&config.file_log_level);
if config.tui_running.is_some() && config.tui_running.unwrap() { if config.tui_running.is_some() && config.tui_running.unwrap() {
let mut tui_running_ref = TUI_RUNNING.lock().unwrap(); let mut tui_running_ref = TUI_RUNNING.lock();
*tui_running_ref = true; *tui_running_ref = true;
} }
@ -91,10 +91,10 @@ lazy_static! {
/// Initialize the logger with the given configuration /// Initialize the logger with the given configuration
pub fn init_logger(config: Option<LoggingConfig>) { pub fn init_logger(config: Option<LoggingConfig>) {
if let Some(c) = config { if let Some(c) = config {
let mut config_ref = LOGGING_CONFIG.lock().unwrap(); let mut config_ref = LOGGING_CONFIG.lock();
*config_ref = c.clone(); *config_ref = c.clone();
// Logger configuration successfully injected into LOGGING_CONFIG... // Logger configuration successfully injected into LOGGING_CONFIG...
let mut was_init_ref = WAS_INIT.lock().unwrap(); let mut was_init_ref = WAS_INIT.lock();
*was_init_ref = true; *was_init_ref = true;
// .. allow logging, having ensured that paths etc are immutable // .. allow logging, having ensured that paths etc are immutable
} }
@ -103,14 +103,14 @@ pub fn init_logger(config: Option<LoggingConfig>) {
/// Initializes the logger for unit and integration tests /// Initializes the logger for unit and integration tests
pub fn init_test_logger() { pub fn init_test_logger() {
let mut was_init_ref = WAS_INIT.lock().unwrap(); let mut was_init_ref = WAS_INIT.lock();
if *was_init_ref.deref() { if *was_init_ref.deref() {
return; return;
} }
let mut logger = LoggingConfig::default(); let mut logger = LoggingConfig::default();
logger.log_to_file = false; logger.log_to_file = false;
logger.stdout_log_level = LogLevel::Debug; logger.stdout_log_level = LogLevel::Debug;
let mut config_ref = LOGGING_CONFIG.lock().unwrap(); let mut config_ref = LOGGING_CONFIG.lock();
*config_ref = logger; *config_ref = logger;
*was_init_ref = true; *was_init_ref = true;
} }
@ -149,7 +149,7 @@ fn send_panic_to_log() {
), ),
} }
//also print to stderr //also print to stderr
let tui_running = TUI_RUNNING.lock().unwrap().clone(); let tui_running = TUI_RUNNING.lock().clone();
if !tui_running { if !tui_running {
eprintln!( eprintln!(
"Thread '{}' panicked with message:\n\"{}\"\nSee grin.log for further details.", "Thread '{}' panicked with message:\n\"{}\"\nSee grin.log for further details.",

View file

@ -17,7 +17,8 @@
use rand::thread_rng; use rand::thread_rng;
use secp; use secp;
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use Mutex;
lazy_static! { lazy_static! {
/// Static reference to secp instance /// Static reference to secp instance
@ -28,7 +29,7 @@ lazy_static! {
/// Returns the static instance, but calls randomize on it as well /// Returns the static instance, but calls randomize on it as well
/// (Recommended to avoid side channel attacks /// (Recommended to avoid side channel attacks
pub fn static_secp_instance() -> Arc<Mutex<secp::Secp256k1>> { pub fn static_secp_instance() -> Arc<Mutex<secp::Secp256k1>> {
let mut secp_inst = SECP256K1.lock().unwrap(); let mut secp_inst = SECP256K1.lock();
secp_inst.randomize(&mut thread_rng()); secp_inst.randomize(&mut thread_rng());
SECP256K1.clone() SECP256K1.clone()
} }

View file

@ -255,7 +255,8 @@ where
// Just a simple test, most exhaustive tests in the core mod.rs. // Just a simple test, most exhaustive tests in the core mod.rs.
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use super::*; use super::*;
use core::core::verifier_cache::{LruVerifierCache, VerifierCache}; use core::core::verifier_cache::{LruVerifierCache, VerifierCache};

View file

@ -47,7 +47,7 @@ where
}; };
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock();
let over_commit = secp.commit_value(reward(fees))?; let over_commit = secp.commit_value(reward(fees))?;
let out_commit = output.commitment(); let out_commit = output.commitment();
let excess = secp.commit_sum(vec![out_commit], vec![over_commit])?; let excess = secp.commit_sum(vec![out_commit], vec![over_commit])?;

View file

@ -16,7 +16,8 @@
//! around during an interactive wallet exchange //! around during an interactive wallet exchange
use rand::thread_rng; use rand::thread_rng;
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use uuid::Uuid; use uuid::Uuid;
use core::core::committed::Committed; use core::core::committed::Committed;

View file

@ -20,7 +20,8 @@
use std::fs::File; use std::fs::File;
use std::io::{Read, Write}; use std::io::{Read, Write};
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use util::Mutex;
use serde_json as json; use serde_json as json;
@ -77,7 +78,7 @@ where
refresh_from_node: bool, refresh_from_node: bool,
tx_id: Option<u32>, tx_id: Option<u32>,
) -> Result<(bool, Vec<(OutputData, pedersen::Commitment)>), Error> { ) -> Result<(bool, Vec<(OutputData, pedersen::Commitment)>), Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
@ -102,7 +103,7 @@ where
refresh_from_node: bool, refresh_from_node: bool,
tx_id: Option<u32>, tx_id: Option<u32>,
) -> Result<(bool, Vec<TxLogEntry>), Error> { ) -> Result<(bool, Vec<TxLogEntry>), Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
@ -125,7 +126,7 @@ where
&mut self, &mut self,
refresh_from_node: bool, refresh_from_node: bool,
) -> Result<(bool, WalletInfo), Error> { ) -> Result<(bool, WalletInfo), Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
@ -143,13 +144,13 @@ where
/// Return list of existing account -> Path mappings /// Return list of existing account -> Path mappings
pub fn accounts(&mut self) -> Result<Vec<AcctPathMapping>, Error> { pub fn accounts(&mut self) -> Result<Vec<AcctPathMapping>, Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
keys::accounts(&mut **w) keys::accounts(&mut **w)
} }
/// Create a new account path /// Create a new account path
pub fn new_account_path(&mut self, label: &str) -> Result<Identifier, Error> { pub fn new_account_path(&mut self, label: &str) -> Result<Identifier, Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
keys::new_acct_path(&mut **w, label) keys::new_acct_path(&mut **w, label)
} }
@ -163,7 +164,7 @@ where
num_change_outputs: usize, num_change_outputs: usize,
selection_strategy_is_use_all: bool, selection_strategy_is_use_all: bool,
) -> Result<Slate, Error> { ) -> Result<Slate, Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
@ -216,7 +217,7 @@ where
num_change_outputs: usize, num_change_outputs: usize,
selection_strategy_is_use_all: bool, selection_strategy_is_use_all: bool,
) -> Result<Slate, Error> { ) -> Result<Slate, Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
@ -254,7 +255,7 @@ where
/// Builds the complete transaction and sends it to a grin node for /// Builds the complete transaction and sends it to a grin node for
/// propagation. /// propagation.
pub fn finalize_tx(&mut self, slate: &mut Slate) -> Result<(), Error> { pub fn finalize_tx(&mut self, slate: &mut Slate) -> Result<(), Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let context = w.get_private_context(slate.id.as_bytes())?; let context = w.get_private_context(slate.id.as_bytes())?;
@ -275,7 +276,7 @@ where
/// with the transaction used when a transaction is created but never /// with the transaction used when a transaction is created but never
/// posted /// posted
pub fn cancel_tx(&mut self, tx_id: u32) -> Result<(), Error> { pub fn cancel_tx(&mut self, tx_id: u32) -> Result<(), Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
if !self.update_outputs(&mut w) { if !self.update_outputs(&mut w) {
@ -295,7 +296,7 @@ where
minimum_confirmations: u64, minimum_confirmations: u64,
max_outputs: usize, max_outputs: usize,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
let tx_burn = tx::issue_burn_tx( let tx_burn = tx::issue_burn_tx(
@ -315,7 +316,7 @@ where
pub fn post_tx(&self, slate: &Slate, fluff: bool) -> Result<(), Error> { pub fn post_tx(&self, slate: &Slate, fluff: bool) -> Result<(), Error> {
let tx_hex = util::to_hex(ser::ser_vec(&slate.tx).unwrap()); let tx_hex = util::to_hex(ser::ser_vec(&slate.tx).unwrap());
let client = { let client = {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.client().clone() w.client().clone()
}; };
let res = client.post_tx(&TxWrapper { tx_hex: tx_hex }, fluff); let res = client.post_tx(&TxWrapper { tx_hex: tx_hex }, fluff);
@ -341,7 +342,7 @@ where
dest: &str, dest: &str,
) -> Result<Transaction, Error> { ) -> Result<Transaction, Error> {
let (confirmed, tx_hex) = { let (confirmed, tx_hex) = {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
let res = tx::retrieve_tx_hex(&mut **w, &parent_key_id, tx_id)?; let res = tx::retrieve_tx_hex(&mut **w, &parent_key_id, tx_id)?;
@ -375,7 +376,7 @@ where
pub fn post_stored_tx(&self, tx_id: u32, fluff: bool) -> Result<(), Error> { pub fn post_stored_tx(&self, tx_id: u32, fluff: bool) -> Result<(), Error> {
let client; let client;
let (confirmed, tx_hex) = { let (confirmed, tx_hex) = {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
client = w.client().clone(); client = w.client().clone();
@ -418,7 +419,7 @@ where
/// Attempt to restore contents of wallet /// Attempt to restore contents of wallet
pub fn restore(&mut self) -> Result<(), Error> { pub fn restore(&mut self) -> Result<(), Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let res = w.restore(); let res = w.restore();
w.close()?; w.close()?;
@ -428,7 +429,7 @@ where
/// Retrieve current height from node /// Retrieve current height from node
pub fn node_height(&mut self) -> Result<(u64, bool), Error> { pub fn node_height(&mut self) -> Result<(u64, bool), Error> {
let res = { let res = {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
w.client().get_chain_height() w.client().get_chain_height()
}; };
@ -487,7 +488,7 @@ where
/// Build a new (potential) coinbase transaction in the wallet /// Build a new (potential) coinbase transaction in the wallet
pub fn build_coinbase(&mut self, block_fees: &BlockFees) -> Result<CbData, Error> { pub fn build_coinbase(&mut self, block_fees: &BlockFees) -> Result<CbData, Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let res = updater::build_coinbase(&mut **w, block_fees); let res = updater::build_coinbase(&mut **w, block_fees);
w.close()?; w.close()?;
@ -503,7 +504,7 @@ where
pub_tx_f.read_to_string(&mut content)?; pub_tx_f.read_to_string(&mut content)?;
let mut slate: Slate = json::from_str(&content).map_err(|_| ErrorKind::Format)?; let mut slate: Slate = json::from_str(&content).map_err(|_| ErrorKind::Format)?;
let mut wallet = self.wallet.lock().unwrap(); let mut wallet = self.wallet.lock();
wallet.open_with_credentials()?; wallet.open_with_credentials()?;
let parent_key_id = wallet.parent_key_id(); let parent_key_id = wallet.parent_key_id();
@ -533,7 +534,7 @@ where
/// Receive a transaction from a sender /// Receive a transaction from a sender
pub fn receive_tx(&mut self, slate: &mut Slate) -> Result<(), Error> { pub fn receive_tx(&mut self, slate: &mut Slate) -> Result<(), Error> {
let mut w = self.wallet.lock().unwrap(); let mut w = self.wallet.lock();
w.open_with_credentials()?; w.open_with_credentials()?;
let parent_key_id = w.parent_key_id(); let parent_key_id = w.parent_key_id();
let res = tx::receive_tx(&mut **w, slate, &parent_key_id); let res = tx::receive_tx(&mut **w, slate, &parent_key_id);

View file

@ -33,9 +33,10 @@ use serde_json;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use url::form_urlencoded; use url::form_urlencoded;
use util::secp::pedersen; use util::secp::pedersen;
use util::Mutex;
use util::{to_base64, LOGGER}; use util::{to_base64, LOGGER};
/// Instantiate wallet Owner API for a single-use (command line) call /// Instantiate wallet Owner API for a single-use (command line) call

View file

@ -14,7 +14,8 @@
//! Transaction building functions //! Transaction building functions
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use util::RwLock;
use core::core::verifier_cache::LruVerifierCache; use core::core::verifier_cache::LruVerifierCache;
use core::core::Transaction; use core::core::Transaction;

View file

@ -108,20 +108,20 @@ fn accounts_test_impl(test_dir: &str) -> Result<(), libwallet::Error> {
// Default wallet 2 to listen on that account // Default wallet 2 to listen on that account
{ {
let mut w = wallet2.lock().unwrap(); let mut w = wallet2.lock();
w.set_parent_key_id_by_name("listener_account")?; w.set_parent_key_id_by_name("listener_account")?;
} }
// Mine into two different accounts in the same wallet // Mine into two different accounts in the same wallet
{ {
let mut w = wallet1.lock().unwrap(); let mut w = wallet1.lock();
w.set_parent_key_id_by_name("account1")?; w.set_parent_key_id_by_name("account1")?;
assert_eq!(w.parent_key_id(), ExtKeychain::derive_key_id(2, 1, 0, 0, 0)); assert_eq!(w.parent_key_id(), ExtKeychain::derive_key_id(2, 1, 0, 0, 0));
} }
let _ = common::award_blocks_to_wallet(&chain, wallet1.clone(), 7); let _ = common::award_blocks_to_wallet(&chain, wallet1.clone(), 7);
{ {
let mut w = wallet1.lock().unwrap(); let mut w = wallet1.lock();
w.set_parent_key_id_by_name("account2")?; w.set_parent_key_id_by_name("account2")?;
assert_eq!(w.parent_key_id(), ExtKeychain::derive_key_id(2, 2, 0, 0, 0)); assert_eq!(w.parent_key_id(), ExtKeychain::derive_key_id(2, 2, 0, 0, 0));
} }
@ -141,7 +141,7 @@ fn accounts_test_impl(test_dir: &str) -> Result<(), libwallet::Error> {
})?; })?;
// now check second account // now check second account
{ {
let mut w = wallet1.lock().unwrap(); let mut w = wallet1.lock();
w.set_parent_key_id_by_name("account1")?; w.set_parent_key_id_by_name("account1")?;
} }
wallet::controller::owner_single_use(wallet1.clone(), |api| { wallet::controller::owner_single_use(wallet1.clone(), |api| {
@ -161,7 +161,7 @@ fn accounts_test_impl(test_dir: &str) -> Result<(), libwallet::Error> {
// should be nothing in default account // should be nothing in default account
{ {
let mut w = wallet1.lock().unwrap(); let mut w = wallet1.lock();
w.set_parent_key_id_by_name("default")?; w.set_parent_key_id_by_name("default")?;
} }
wallet::controller::owner_single_use(wallet1.clone(), |api| { wallet::controller::owner_single_use(wallet1.clone(), |api| {
@ -180,7 +180,7 @@ fn accounts_test_impl(test_dir: &str) -> Result<(), libwallet::Error> {
// Send a tx to another wallet // Send a tx to another wallet
{ {
let mut w = wallet1.lock().unwrap(); let mut w = wallet1.lock();
w.set_parent_key_id_by_name("account1")?; w.set_parent_key_id_by_name("account1")?;
} }
@ -208,7 +208,7 @@ fn accounts_test_impl(test_dir: &str) -> Result<(), libwallet::Error> {
// other account should be untouched // other account should be untouched
{ {
let mut w = wallet1.lock().unwrap(); let mut w = wallet1.lock();
w.set_parent_key_id_by_name("account2")?; w.set_parent_key_id_by_name("account2")?;
} }
wallet::controller::owner_single_use(wallet1.clone(), |api| { wallet::controller::owner_single_use(wallet1.clone(), |api| {
@ -233,7 +233,7 @@ fn accounts_test_impl(test_dir: &str) -> Result<(), libwallet::Error> {
})?; })?;
// Default account on wallet 2 should be untouched // Default account on wallet 2 should be untouched
{ {
let mut w = wallet2.lock().unwrap(); let mut w = wallet2.lock();
w.set_parent_key_id_by_name("default")?; w.set_parent_key_id_by_name("default")?;
} }
wallet::controller::owner_single_use(wallet2.clone(), |api| { wallet::controller::owner_single_use(wallet2.clone(), |api| {

View file

@ -22,7 +22,8 @@ extern crate grin_wallet as wallet;
extern crate serde_json; extern crate serde_json;
use chrono::Duration; use chrono::Duration;
use std::sync::{Arc, Mutex}; use std::sync::Arc;
use util::Mutex;
use chain::Chain; use chain::Chain;
use core::core::{OutputFeatures, OutputIdentifier, Transaction}; use core::core::{OutputFeatures, OutputIdentifier, Transaction};
@ -38,13 +39,13 @@ use util::secp::pedersen;
pub mod testclient; pub mod testclient;
/// types of backends tests should iterate through /// types of backends tests should iterate through
#[derive(Clone)] //#[derive(Clone)]
pub enum BackendType { //pub enum BackendType {
/// File // /// File
FileBackend, // FileBackend,
/// LMDB // /// LMDB
LMDBBackend, // LMDBBackend,
} //}
/// Get an output from the chain locally and present it back as an API output /// Get an output from the chain locally and present it back as an API output
fn get_output_local(chain: &chain::Chain, commit: &pedersen::Commitment) -> Option<api::Output> { fn get_output_local(chain: &chain::Chain, commit: &pedersen::Commitment) -> Option<api::Output> {

View file

@ -20,9 +20,10 @@ use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex, RwLock}; use std::sync::Arc;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
use util::{Mutex, RwLock};
use common::api; use common::api;
use common::serde_json; use common::serde_json;
@ -306,7 +307,7 @@ impl LocalWalletClient {
/// get an instance of the send queue for other senders /// get an instance of the send queue for other senders
pub fn get_send_instance(&self) -> Sender<WalletProxyMessage> { pub fn get_send_instance(&self) -> Sender<WalletProxyMessage> {
self.tx.lock().unwrap().clone() self.tx.lock().clone()
} }
} }
@ -338,11 +339,11 @@ impl WalletClient for LocalWalletClient {
body: serde_json::to_string(slate).unwrap(), body: serde_json::to_string(slate).unwrap(),
}; };
{ {
let p = self.proxy_tx.lock().unwrap(); let p = self.proxy_tx.lock();
p.send(m) p.send(m)
.context(libwallet::ErrorKind::ClientCallback("Send TX Slate"))?; .context(libwallet::ErrorKind::ClientCallback("Send TX Slate"))?;
} }
let r = self.rx.lock().unwrap(); let r = self.rx.lock();
let m = r.recv().unwrap(); let m = r.recv().unwrap();
trace!(LOGGER, "Received send_tx_slate response: {:?}", m.clone()); trace!(LOGGER, "Received send_tx_slate response: {:?}", m.clone());
Ok( Ok(
@ -362,11 +363,11 @@ impl WalletClient for LocalWalletClient {
body: serde_json::to_string(tx).unwrap(), body: serde_json::to_string(tx).unwrap(),
}; };
{ {
let p = self.proxy_tx.lock().unwrap(); let p = self.proxy_tx.lock();
p.send(m) p.send(m)
.context(libwallet::ErrorKind::ClientCallback("post_tx send"))?; .context(libwallet::ErrorKind::ClientCallback("post_tx send"))?;
} }
let r = self.rx.lock().unwrap(); let r = self.rx.lock();
let m = r.recv().unwrap(); let m = r.recv().unwrap();
trace!(LOGGER, "Received post_tx response: {:?}", m.clone()); trace!(LOGGER, "Received post_tx response: {:?}", m.clone());
Ok(()) Ok(())
@ -381,12 +382,12 @@ impl WalletClient for LocalWalletClient {
body: "".to_owned(), body: "".to_owned(),
}; };
{ {
let p = self.proxy_tx.lock().unwrap(); let p = self.proxy_tx.lock();
p.send(m).context(libwallet::ErrorKind::ClientCallback( p.send(m).context(libwallet::ErrorKind::ClientCallback(
"Get chain height send", "Get chain height send",
))?; ))?;
} }
let r = self.rx.lock().unwrap(); let r = self.rx.lock();
let m = r.recv().unwrap(); let m = r.recv().unwrap();
trace!( trace!(
LOGGER, LOGGER,
@ -417,12 +418,12 @@ impl WalletClient for LocalWalletClient {
body: query_str, body: query_str,
}; };
{ {
let p = self.proxy_tx.lock().unwrap(); let p = self.proxy_tx.lock();
p.send(m).context(libwallet::ErrorKind::ClientCallback( p.send(m).context(libwallet::ErrorKind::ClientCallback(
"Get outputs from node send", "Get outputs from node send",
))?; ))?;
} }
let r = self.rx.lock().unwrap(); let r = self.rx.lock();
let m = r.recv().unwrap(); let m = r.recv().unwrap();
let outputs: Vec<api::Output> = serde_json::from_str(&m.body).unwrap(); let outputs: Vec<api::Output> = serde_json::from_str(&m.body).unwrap();
let mut api_outputs: HashMap<pedersen::Commitment, (String, u64)> = HashMap::new(); let mut api_outputs: HashMap<pedersen::Commitment, (String, u64)> = HashMap::new();
@ -456,13 +457,13 @@ impl WalletClient for LocalWalletClient {
body: query_str, body: query_str,
}; };
{ {
let p = self.proxy_tx.lock().unwrap(); let p = self.proxy_tx.lock();
p.send(m).context(libwallet::ErrorKind::ClientCallback( p.send(m).context(libwallet::ErrorKind::ClientCallback(
"Get outputs from node by PMMR index send", "Get outputs from node by PMMR index send",
))?; ))?;
} }
let r = self.rx.lock().unwrap(); let r = self.rx.lock();
let m = r.recv().unwrap(); let m = r.recv().unwrap();
let o: api::OutputListing = serde_json::from_str(&m.body).unwrap(); let o: api::OutputListing = serde_json::from_str(&m.body).unwrap();

View file

@ -109,12 +109,12 @@ fn compare_wallet_restore(
); );
{ {
let mut w = wallet_source.lock().unwrap(); let mut w = wallet_source.lock();
w.set_parent_key_id(account_path.clone()); w.set_parent_key_id(account_path.clone());
} }
{ {
let mut w = wallet_dest.lock().unwrap(); let mut w = wallet_dest.lock();
w.set_parent_key_id(account_path.clone()); w.set_parent_key_id(account_path.clone());
} }
@ -206,7 +206,7 @@ fn setup_restore(test_dir: &str) -> Result<(), libwallet::Error> {
// Default wallet 2 to listen on that account // Default wallet 2 to listen on that account
{ {
let mut w = wallet2.lock().unwrap(); let mut w = wallet2.lock();
w.set_parent_key_id_by_name("account1")?; w.set_parent_key_id_by_name("account1")?;
} }
@ -281,7 +281,7 @@ fn setup_restore(test_dir: &str) -> Result<(), libwallet::Error> {
// Another listener account on wallet 2 // Another listener account on wallet 2
{ {
let mut w = wallet2.lock().unwrap(); let mut w = wallet2.lock();
w.set_parent_key_id_by_name("account2")?; w.set_parent_key_id_by_name("account2")?;
} }