Rework pool tests to use real chain (was mock chain) (#3342)

* rework pool tests to use real chain (was mock chain) to better reflect reality (tx/block validation rules etc.)

* cleanup
This commit is contained in:
Antioch Peverell 2020-06-07 09:26:08 +01:00 committed by GitHub
parent c7c9a32b9b
commit c54568e69f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 677 additions and 1093 deletions

View file

@ -78,7 +78,7 @@ pub const TESTING_INITIAL_GRAPH_WEIGHT: u32 = 1;
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1; pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs). /// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: usize = 150; pub const TESTING_MAX_BLOCK_WEIGHT: usize = 250;
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours, /// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers. /// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.

View file

@ -287,9 +287,9 @@ pub trait PoolAdapter: Send + Sync {
/// Dummy adapter used as a placeholder for real implementations /// Dummy adapter used as a placeholder for real implementations
#[allow(dead_code)] #[allow(dead_code)]
pub struct NoopAdapter {} pub struct NoopPoolAdapter {}
impl PoolAdapter for NoopAdapter { impl PoolAdapter for NoopPoolAdapter {
fn tx_accepted(&self, _entry: &PoolEntry) {} fn tx_accepted(&self, _entry: &PoolEntry) {}
fn stem_tx_accepted(&self, _entry: &PoolEntry) -> Result<(), PoolError> { fn stem_tx_accepted(&self, _entry: &PoolEntry) -> Result<(), PoolError> {
Ok(()) Ok(())

View file

@ -16,69 +16,50 @@ pub mod common;
use self::core::core::hash::Hashed; use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{Block, BlockHeader, Transaction}; use self::core::global;
use self::core::pow::Difficulty;
use self::core::{global, libtx};
use self::keychain::{ExtKeychain, Keychain}; use self::keychain::{ExtKeychain, Keychain};
use self::pool::PoolError;
use self::util::RwLock; use self::util::RwLock;
use crate::common::*; use crate::common::*;
use grin_core as core; use grin_core as core;
use grin_keychain as keychain; use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util; use grin_util as util;
use std::sync::Arc; use std::sync::Arc;
#[test] #[test]
fn test_transaction_pool_block_building() { fn test_transaction_pool_block_building() -> Result<(), PoolError> {
util::init_test_logger(); util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting); global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_block_building".to_string(); let db_root = "target/.block_building";
clean_output_dir(db_root.clone()); clean_output_dir(db_root.into());
{
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let add_block =
|prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fee,
false,
)
.unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs
let block = add_block(header, vec![initial_tx], &mut chain);
let header = block.header;
// Initialize a new pool with our chain adapter. // Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache)); let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
add_some_blocks(&chain, 3, &keychain);
let header_1 = chain.get_header_by_height(1).unwrap();
// Now create tx to spend an early coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, vec![initial_tx], &keychain);
let header = chain.head_header().unwrap();
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]); let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]); let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
@ -88,33 +69,24 @@ fn test_transaction_pool_block_building() {
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]); let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
{ {
let mut write_pool = pool.write();
// Add the three root txs to the pool. // Add the three root txs to the pool.
write_pool pool.add_to_pool(test_source(), root_tx_1.clone(), false, &header)?;
.add_to_pool(test_source(), root_tx_1.clone(), false, &header) pool.add_to_pool(test_source(), root_tx_2.clone(), false, &header)?;
.unwrap(); pool.add_to_pool(test_source(), root_tx_3.clone(), false, &header)?;
write_pool
.add_to_pool(test_source(), root_tx_2.clone(), false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), root_tx_3.clone(), false, &header)
.unwrap();
// Now add the two child txs to the pool. // Now add the two child txs to the pool.
write_pool pool.add_to_pool(test_source(), child_tx_1.clone(), false, &header)?;
.add_to_pool(test_source(), child_tx_1.clone(), false, &header) pool.add_to_pool(test_source(), child_tx_2.clone(), false, &header)?;
.unwrap();
write_pool
.add_to_pool(test_source(), child_tx_2.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 5); assert_eq!(pool.total_size(), 5);
} }
let txs = pool.read().prepare_mineable_transactions().unwrap(); let txs = pool.prepare_mineable_transactions()?;
let block = add_block(header, txs, &mut chain); add_block(&chain, txs, &keychain);
// Get full block from head of the chain (block we just processed).
let block = chain.get_block(&chain.head().unwrap().hash()).unwrap();
// Check the block contains what we expect. // Check the block contains what we expect.
assert_eq!(block.inputs().len(), 4); assert_eq!(block.inputs().len(), 4);
@ -130,12 +102,12 @@ fn test_transaction_pool_block_building() {
// Now reconcile the transaction pool with the new block // Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect. // and check the resulting contents of the pool are what we expect.
{ {
let mut write_pool = pool.write(); pool.reconcile_block(&block)?;
write_pool.reconcile_block(&block).unwrap(); assert_eq!(pool.total_size(), 0);
}
assert_eq!(write_pool.total_size(), 0);
}
}
// Cleanup db directory // Cleanup db directory
clean_output_dir(db_root.clone()); clean_output_dir(db_root.into());
Ok(())
} }

View file

@ -15,13 +15,9 @@
//! Test coverage for block building at the limit of max_block_weight. //! Test coverage for block building at the limit of max_block_weight.
pub mod common; pub mod common;
use self::core::core::hash::Hashed; use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{Block, BlockHeader, Transaction};
use self::core::global; use self::core::global;
use self::core::libtx;
use self::core::pow::Difficulty;
use self::keychain::{ExtKeychain, Keychain}; use self::keychain::{ExtKeychain, Keychain};
use self::util::RwLock; use self::util::RwLock;
use crate::common::*; use crate::common::*;
@ -37,57 +33,39 @@ fn test_block_building_max_weight() {
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_block_building_max_weight".to_string(); let db_root = "target/.block_max_weight";
clean_output_dir(db_root.clone()); clean_output_dir(db_root.into());
{
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Convenient was to add a new block to the chain. // Initialize a new pool with our chain adapter.
let add_block = let mut pool = init_transaction_pool(
|prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| { Arc::new(ChainAdapter {
let height = prev_header.height + 1; chain: chain.clone(),
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0); }),
let fee = txs.iter().map(|x| x.fee()).sum(); verifier_cache,
let reward = libtx::reward::output( );
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fee,
false,
)
.unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). add_some_blocks(&chain, 3, &keychain);
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block); let header_1 = chain.get_header_by_height(1).unwrap();
block
};
// Initialize the chain/txhashset with an initial block // Now create tx to spend an early coinbase (now matured).
// so we have a non-empty UTXO set.
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with. // Provides us with some useful outputs to test with.
let initial_tx = let initial_tx =
test_transaction_spending_coinbase(&keychain, &header, vec![100, 200, 300]); test_transaction_spending_coinbase(&keychain, &header_1, vec![100, 200, 300, 1000]);
// Mine that initial tx so we can spend it with multiple txs // Mine that initial tx so we can spend it with multiple txs.
let block = add_block(header, vec![initial_tx], &mut chain); add_block(&chain, vec![initial_tx], &keychain);
let header = block.header;
// Initialize a new pool with our chain adapter. let header = chain.head_header().unwrap();
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
// Build some dependent txs to add to the txpool. // Build some dependent txs to add to the txpool.
// We will build a block from a subset of these. // We will build a block from a subset of these.
let txs = vec![ let txs = vec![
test_transaction(&keychain, vec![1000], vec![390, 130, 120, 110]),
test_transaction(&keychain, vec![100], vec![90, 1]), test_transaction(&keychain, vec![100], vec![90, 1]),
test_transaction(&keychain, vec![90], vec![80, 2]), test_transaction(&keychain, vec![90], vec![80, 2]),
test_transaction(&keychain, vec![200], vec![199]), test_transaction(&keychain, vec![200], vec![199]),
@ -98,65 +76,60 @@ fn test_block_building_max_weight() {
// Fees and weights of our original txs in insert order. // Fees and weights of our original txs in insert order.
assert_eq!( assert_eq!(
txs.iter().map(|x| x.fee()).collect::<Vec<_>>(), txs.iter().map(|x| x.fee()).collect::<Vec<_>>(),
[9, 8, 1, 7, 6] [250, 9, 8, 1, 7, 6]
); );
assert_eq!( assert_eq!(
txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(), txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(),
[8, 8, 4, 8, 8] [16, 8, 8, 4, 8, 8]
); );
assert_eq!( assert_eq!(
txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(), txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(),
[1125, 1000, 250, 875, 750] [15625, 1125, 1000, 250, 875, 750]
); );
// Populate our txpool with the txs. // Populate our txpool with the txs.
{
let mut write_pool = pool.write();
for tx in txs { for tx in txs {
println!("***** {}", tx.fee_to_weight()); pool.add_to_pool(test_source(), tx, false, &header).unwrap();
write_pool
.add_to_pool(test_source(), tx, false, &header)
.unwrap();
}
} }
// Check we added them all to the txpool successfully. // Check we added them all to the txpool successfully.
assert_eq!(pool.read().total_size(), 5); assert_eq!(pool.total_size(), 6);
// Prepare some "mineable" txs from the txpool. // // Prepare some "mineable" txs from the txpool.
// Note: We cannot fit all the txs from the txpool into a block. // // Note: We cannot fit all the txs from the txpool into a block.
let txs = pool.read().prepare_mineable_transactions().unwrap(); let txs = pool.prepare_mineable_transactions().unwrap();
// Fees and weights of the "mineable" txs. // Fees and weights of the "mineable" txs.
assert_eq!(txs.iter().map(|x| x.fee()).collect::<Vec<_>>(), [9, 8, 7]); assert_eq!(
txs.iter().map(|x| x.fee()).collect::<Vec<_>>(),
[250, 9, 8, 7]
);
assert_eq!( assert_eq!(
txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(), txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(),
[8, 8, 8] [16, 8, 8, 8]
); );
assert_eq!( assert_eq!(
txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(), txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(),
[1125, 1000, 875] [15625, 1125, 1000, 875]
); );
let block = add_block(header, txs, &mut chain); add_block(&chain, txs, &keychain);
let block = chain.get_block(&chain.head().unwrap().hash()).unwrap();
// Check contents of the block itself (including coinbase reward). // Check contents of the block itself (including coinbase reward).
assert_eq!(block.inputs().len(), 2); assert_eq!(block.inputs().len(), 3);
assert_eq!(block.outputs().len(), 6); assert_eq!(block.outputs().len(), 10);
assert_eq!(block.kernels().len(), 4); assert_eq!(block.kernels().len(), 5);
// Now reconcile the transaction pool with the new block // Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect. // and check the resulting contents of the pool are what we expect.
{ pool.reconcile_block(&block).unwrap();
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
// We should still have 2 tx in the pool after accepting the new block. // We should still have 2 tx in the pool after accepting the new block.
// This one exceeded the max block weight when building the block so // This one exceeded the max block weight when building the block so
// remained in the txpool. // remained in the txpool.
assert_eq!(write_pool.total_size(), 2); assert_eq!(pool.total_size(), 2);
}
}
// Cleanup db directory // Cleanup db directory
clean_output_dir(db_root.clone()); clean_output_dir(db_root.into());
} }

View file

@ -16,9 +16,7 @@ pub mod common;
use self::core::core::hash::Hashed; use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{Block, BlockHeader}; use self::core::global;
use self::core::pow::Difficulty;
use self::core::{global, libtx};
use self::keychain::{ExtKeychain, Keychain}; use self::keychain::{ExtKeychain, Keychain};
use self::util::RwLock; use self::util::RwLock;
use crate::common::ChainAdapter; use crate::common::ChainAdapter;
@ -30,69 +28,37 @@ use std::sync::Arc;
#[test] #[test]
fn test_transaction_pool_block_reconciliation() { fn test_transaction_pool_block_reconciliation() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting); global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_block_reconciliation".to_string(); let db_root = "target/.block_reconciliation";
clean_output_dir(db_root.clone()); clean_output_dir(db_root.into());
{
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter. // Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone())); let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
let header = { add_some_blocks(&chain, 3, &keychain);
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
0,
false,
)
.unwrap();
let genesis = BlockHeader::default();
let mut block = Block::new(&genesis, vec![], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). let header_1 = chain.get_header_by_height(1).unwrap();
block.header.prev_root = genesis.hash();
chain.update_db_for_block(&block); // Now create tx to spend an early coinbase (now matured).
block.header
};
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with. // Provides us with some useful outputs to test with.
let initial_tx = let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
let block = { // Mine that initial tx so we can spend it with multiple txs.
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0); add_block(&chain, vec![initial_tx], &keychain);
let fees = initial_tx.fee();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fees,
false,
)
.unwrap();
let mut block =
Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from). let header = chain.head_header().unwrap();
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
block
};
let header = block.header;
// Preparation: We will introduce three root pool transactions. // Preparation: We will introduce three root pool transactions.
// 1. A transaction that should be invalidated because it is exactly // 1. A transaction that should be invalidated because it is exactly
@ -144,18 +110,14 @@ fn test_transaction_pool_block_reconciliation() {
// First we add the above transactions to the pool. // First we add the above transactions to the pool.
// All should be accepted. // All should be accepted.
{ assert_eq!(pool.total_size(), 0);
let mut write_pool = pool.write();
assert_eq!(write_pool.total_size(), 0);
for tx in &txs_to_add { for tx in &txs_to_add {
write_pool pool.add_to_pool(test_source(), tx.clone(), false, &header)
.add_to_pool(test_source(), tx.clone(), false, &header)
.unwrap(); .unwrap();
} }
assert_eq!(write_pool.total_size(), txs_to_add.len()); assert_eq!(pool.total_size(), txs_to_add.len());
}
// Now we prepare the block that will cause the above conditions to be met. // Now we prepare the block that will cause the above conditions to be met.
// First, the transactions we want in the block: // First, the transactions we want in the block:
@ -169,46 +131,21 @@ fn test_transaction_pool_block_reconciliation() {
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]); let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4]; let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
add_block(&chain, block_txs, &keychain);
// Now apply this block. let block = chain.get_block(&chain.head().unwrap().hash()).unwrap();
let block = {
let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fees,
false,
)
.unwrap();
let mut block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
block
};
// Check the pool still contains everything we expect at this point. // Check the pool still contains everything we expect at this point.
{ assert_eq!(pool.total_size(), txs_to_add.len());
let write_pool = pool.write();
assert_eq!(write_pool.total_size(), txs_to_add.len());
}
// And reconcile the pool with this latest block. // And reconcile the pool with this latest block.
{ pool.reconcile_block(&block).unwrap();
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap(); assert_eq!(pool.total_size(), 4);
assert_eq!(pool.txpool.entries[0].tx, valid_transaction);
assert_eq!(pool.txpool.entries[1].tx, pool_child);
assert_eq!(pool.txpool.entries[2].tx, conflict_valid_child);
assert_eq!(pool.txpool.entries[3].tx, valid_child_valid);
assert_eq!(write_pool.total_size(), 4);
assert_eq!(write_pool.txpool.entries[0].tx, valid_transaction);
assert_eq!(write_pool.txpool.entries[1].tx, pool_child);
assert_eq!(write_pool.txpool.entries[2].tx, conflict_valid_child);
assert_eq!(write_pool.txpool.entries[3].tx, valid_child_valid);
}
}
// Cleanup db directory // Cleanup db directory
clean_output_dir(db_root.clone()); clean_output_dir(db_root.into());
} }

View file

@ -14,12 +14,10 @@
pub mod common; pub mod common;
use self::core::core::hash::Hash;
use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{BlockHeader, BlockSums, Transaction};
use self::core::global; use self::core::global;
use self::keychain::{ExtKeychain, Keychain}; use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::{BlockChain, PoolError}; use self::pool::types::PoolError;
use self::util::RwLock; use self::util::RwLock;
use crate::common::*; use crate::common::*;
use grin_core as core; use grin_core as core;
@ -28,61 +26,49 @@ use grin_pool as pool;
use grin_util as util; use grin_util as util;
use std::sync::Arc; use std::sync::Arc;
#[derive(Clone)]
pub struct CoinbaseMaturityErrorChainAdapter {}
impl CoinbaseMaturityErrorChainAdapter {
pub fn new() -> CoinbaseMaturityErrorChainAdapter {
CoinbaseMaturityErrorChainAdapter {}
}
}
impl BlockChain for CoinbaseMaturityErrorChainAdapter {
fn chain_head(&self) -> Result<BlockHeader, PoolError> {
unimplemented!();
}
fn get_block_header(&self, _hash: &Hash) -> Result<BlockHeader, PoolError> {
unimplemented!();
}
fn get_block_sums(&self, _hash: &Hash) -> Result<BlockSums, PoolError> {
unimplemented!();
}
fn validate_tx(&self, _tx: &Transaction) -> Result<(), PoolError> {
unimplemented!();
}
// Returns an ImmatureCoinbase for every tx we pass in.
fn verify_coinbase_maturity(&self, _tx: &Transaction) -> Result<(), PoolError> {
Err(PoolError::ImmatureCoinbase)
}
// Mocking this out for these tests.
fn verify_tx_lock_height(&self, _tx: &Transaction) -> Result<(), PoolError> {
Ok(())
}
}
/// Test we correctly verify coinbase maturity when adding txs to the pool. /// Test we correctly verify coinbase maturity when adding txs to the pool.
#[test] #[test]
fn test_coinbase_maturity() { fn test_coinbase_maturity() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting); global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
// Mocking this up with an adapter that will raise an error for coinbase let db_root = "target/.coinbase_maturity";
// maturity. clean_output_dir(db_root.into());
let chain = Arc::new(CoinbaseMaturityErrorChainAdapter::new());
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let pool = RwLock::new(test_setup(chain, verifier_cache));
{ let genesis = genesis_block(&keychain);
let mut write_pool = pool.write(); let chain = Arc::new(init_chain(db_root, genesis));
let tx = test_transaction(&keychain, vec![50], vec![49]); let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
match write_pool.add_to_pool(test_source(), tx.clone(), true, &BlockHeader::default()) {
Err(PoolError::ImmatureCoinbase) => {} // Initialize a new pool with our chain adapter.
_ => panic!("Expected an immature coinbase error here."), let mut pool = init_transaction_pool(
} Arc::new(ChainAdapter {
} chain: chain.clone(),
}),
verifier_cache,
);
// Add a single block, introducing coinbase output to be spent later.
add_block(&chain, vec![], &keychain);
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![100]);
// Coinbase is not yet matured and cannot be spent.
let header = chain.head_header().unwrap();
assert_eq!(
pool.add_to_pool(test_source(), tx.clone(), true, &header)
.err(),
Some(PoolError::ImmatureCoinbase)
);
// Add 2 more blocks. Original coinbase output is now matured and can be spent.
add_some_blocks(&chain, 2, &keychain);
let header = chain.head_header().unwrap();
assert_eq!(
pool.add_to_pool(test_source(), tx.clone(), true, &header),
Ok(())
);
clean_output_dir(db_root.into());
} }

View file

@ -14,144 +14,141 @@
//! Common test functions //! Common test functions
use self::chain::store::ChainStore; use self::chain::types::{NoopAdapter, Options};
use self::chain::types::Tip; use self::chain::Chain;
use self::core::core::hash::{Hash, Hashed}; use self::core::consensus;
use self::core::core::verifier_cache::VerifierCache; use self::core::core::hash::Hash;
use self::core::core::{Block, BlockHeader, BlockSums, Committed, KernelFeatures, Transaction}; use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use self::core::libtx; use self::core::core::{Block, BlockHeader, BlockSums, KernelFeatures, Transaction};
use self::keychain::{ExtKeychain, Keychain}; use self::core::genesis;
use self::core::global;
use self::core::libtx::{build, reward, ProofBuilder};
use self::core::pow;
use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use self::pool::types::*; use self::pool::types::*;
use self::pool::TransactionPool; use self::pool::TransactionPool;
use self::util::secp::pedersen::Commitment;
use self::util::RwLock; use self::util::RwLock;
use chrono::Duration;
use grin_chain as chain; use grin_chain as chain;
use grin_core as core; use grin_core as core;
use grin_keychain as keychain; use grin_keychain as keychain;
use grin_pool as pool; use grin_pool as pool;
use grin_util as util; use grin_util as util;
use std::collections::HashSet;
use std::fs; use std::fs;
use std::sync::Arc; use std::sync::Arc;
#[derive(Clone)] /// Build genesis block with reward (non-empty, like we have in mainnet).
pub struct ChainAdapter { pub fn genesis_block<K>(keychain: &K) -> Block
pub store: Arc<RwLock<ChainStore>>, where
pub utxo: Arc<RwLock<HashSet<Commitment>>>, K: Keychain,
{
let key_id = keychain::ExtKeychain::derive_key_id(1, 0, 0, 0, 0);
let reward = reward::output(keychain, &ProofBuilder::new(keychain), &key_id, 0, false).unwrap();
genesis::genesis_dev().with_reward(reward.0, reward.1)
} }
impl ChainAdapter { pub fn init_chain(dir_name: &str, genesis: Block) -> Chain {
pub fn init(db_root: String) -> Result<ChainAdapter, String> { let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let target_dir = format!("target/{}", db_root); Chain::init(
let chain_store = ChainStore::new(&target_dir) dir_name.to_string(),
.map_err(|e| format!("failed to init chain_store, {:?}", e))?; Arc::new(NoopAdapter {}),
let store = Arc::new(RwLock::new(chain_store)); genesis,
let utxo = Arc::new(RwLock::new(HashSet::new())); pow::verify_size,
verifier_cache,
Ok(ChainAdapter { store, utxo }) false,
)
.unwrap()
} }
pub fn update_db_for_block(&self, block: &Block) { pub fn add_some_blocks<K>(chain: &Chain, count: u64, keychain: &K)
let header = &block.header; where
let tip = Tip::from_header(header); K: Keychain,
let s = self.store.write(); {
let batch = s.batch().unwrap(); for _ in 0..count {
add_block(chain, vec![], keychain);
}
}
batch.save_block_header(header).unwrap(); pub fn add_block<K>(chain: &Chain, txs: Vec<Transaction>, keychain: &K)
batch.save_body_head(&tip).unwrap(); where
K: Keychain,
{
let prev = chain.head_header().unwrap();
let height = prev.height + 1;
let next_header_info = consensus::next_difficulty(height, chain.difficulty_iter().unwrap());
let fee = txs.iter().map(|x| x.fee()).sum();
let key_id = ExtKeychainPath::new(1, height as u32, 0, 0, 0).to_identifier();
let reward =
reward::output(keychain, &ProofBuilder::new(keychain), &key_id, fee, false).unwrap();
// Retrieve previous block_sums from the db. let mut block = Block::new(&prev, txs, next_header_info.clone().difficulty, reward).unwrap();
let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&tip.prev_block_h) {
prev_sums
} else {
BlockSums::default()
};
// Overage is based purely on the new block. block.header.timestamp = prev.timestamp + Duration::seconds(60);
// Previous block_sums have taken all previous overage into account. block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
let overage = header.overage();
// Offset on the other hand is the total kernel offset from the new block. chain.set_txhashset_roots(&mut block).unwrap();
let offset = header.total_kernel_offset();
// Verify the kernel sums for the block_sums with the new block applied. let edge_bits = global::min_edge_bits();
let (utxo_sum, kernel_sum) = (prev_sums, block as &dyn Committed) block.header.pow.proof.edge_bits = edge_bits;
.verify_kernel_sums(overage, offset) pow::pow_size(
&mut block.header,
next_header_info.difficulty,
global::proofsize(),
edge_bits,
)
.unwrap(); .unwrap();
let block_sums = BlockSums { chain.process_block(block, Options::NONE).unwrap();
utxo_sum, }
kernel_sum,
};
batch.save_block_sums(&header.hash(), block_sums).unwrap();
batch.commit().unwrap(); #[derive(Clone)]
pub struct ChainAdapter {
{ pub chain: Arc<Chain>,
let mut utxo = self.utxo.write();
for x in block.inputs() {
utxo.remove(&x.commitment());
}
for x in block.outputs() {
utxo.insert(x.commitment());
}
}
}
} }
impl BlockChain for ChainAdapter { impl BlockChain for ChainAdapter {
fn chain_head(&self) -> Result<BlockHeader, PoolError> { fn chain_head(&self) -> Result<BlockHeader, PoolError> {
let s = self.store.read(); self.chain
s.head_header() .head_header()
.map_err(|_| PoolError::Other(format!("failed to get chain head"))) .map_err(|_| PoolError::Other("failed to get chain head".into()))
} }
fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, PoolError> { fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, PoolError> {
let s = self.store.read(); self.chain
s.get_block_header(hash) .get_block_header(hash)
.map_err(|_| PoolError::Other(format!("failed to get block header"))) .map_err(|_| PoolError::Other("failed to get block header".into()))
} }
fn get_block_sums(&self, hash: &Hash) -> Result<BlockSums, PoolError> { fn get_block_sums(&self, hash: &Hash) -> Result<BlockSums, PoolError> {
let s = self.store.read(); self.chain
s.get_block_sums(hash) .get_block_sums(hash)
.map_err(|_| PoolError::Other(format!("failed to get block sums"))) .map_err(|_| PoolError::Other("failed to get block sums".into()))
} }
fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> { fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
let utxo = self.utxo.read(); self.chain
.validate_tx(tx)
.map_err(|_| PoolError::Other("failed to validate tx".into()))
}
for x in tx.outputs() { fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), PoolError> {
if utxo.contains(&x.commitment()) { self.chain
return Err(PoolError::Other(format!("output commitment not unique"))); .verify_coinbase_maturity(tx)
.map_err(|_| PoolError::ImmatureCoinbase)
}
fn verify_tx_lock_height(&self, tx: &Transaction) -> Result<(), PoolError> {
self.chain
.verify_tx_lock_height(tx)
.map_err(|_| PoolError::ImmatureTransaction)
} }
} }
for x in tx.inputs() { pub fn init_transaction_pool<B, V>(
if !utxo.contains(&x.commitment()) {
return Err(PoolError::Other(format!("not in utxo set")));
}
}
Ok(())
}
// Mocking this check out for these tests.
// We will test the Merkle proof verification logic elsewhere.
fn verify_coinbase_maturity(&self, _tx: &Transaction) -> Result<(), PoolError> {
Ok(())
}
// Mocking this out for these tests.
fn verify_tx_lock_height(&self, _tx: &Transaction) -> Result<(), PoolError> {
Ok(())
}
}
pub fn test_setup<B, V>(
chain: Arc<B>, chain: Arc<B>,
verifier_cache: Arc<RwLock<V>>, verifier_cache: Arc<RwLock<V>>,
) -> TransactionPool<B, NoopAdapter, V> ) -> TransactionPool<B, NoopPoolAdapter, V>
where where
B: BlockChain, B: BlockChain,
V: VerifierCache + 'static, V: VerifierCache + 'static,
@ -165,7 +162,7 @@ where
}, },
chain.clone(), chain.clone(),
verifier_cache.clone(), verifier_cache.clone(),
Arc::new(NoopAdapter {}), Arc::new(NoopPoolAdapter {}),
) )
} }
@ -189,19 +186,19 @@ where
// single input spending a single coinbase (deterministic key_id aka height) // single input spending a single coinbase (deterministic key_id aka height)
{ {
let key_id = ExtKeychain::derive_key_id(1, header.height as u32, 0, 0, 0); let key_id = ExtKeychain::derive_key_id(1, header.height as u32, 0, 0, 0);
tx_elements.push(libtx::build::coinbase_input(coinbase_reward, key_id)); tx_elements.push(build::coinbase_input(coinbase_reward, key_id));
} }
for output_value in output_values { for output_value in output_values {
let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0); let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::output(output_value, key_id)); tx_elements.push(build::output(output_value, key_id));
} }
libtx::build::transaction( build::transaction(
KernelFeatures::Plain { fee: fees as u64 }, KernelFeatures::Plain { fee: fees as u64 },
tx_elements, tx_elements,
keychain, keychain,
&libtx::ProofBuilder::new(keychain), &ProofBuilder::new(keychain),
) )
.unwrap() .unwrap()
} }
@ -240,19 +237,19 @@ where
for input_value in input_values { for input_value in input_values {
let key_id = ExtKeychain::derive_key_id(1, input_value as u32, 0, 0, 0); let key_id = ExtKeychain::derive_key_id(1, input_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::input(input_value, key_id)); tx_elements.push(build::input(input_value, key_id));
} }
for output_value in output_values { for output_value in output_values {
let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0); let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::output(output_value, key_id)); tx_elements.push(build::output(output_value, key_id));
} }
libtx::build::transaction( build::transaction(
kernel_features, kernel_features,
tx_elements, tx_elements,
keychain, keychain,
&libtx::ProofBuilder::new(keychain), &ProofBuilder::new(keychain),
) )
.unwrap() .unwrap()
} }
@ -262,7 +259,7 @@ pub fn test_source() -> TxSource {
} }
pub fn clean_output_dir(db_root: String) { pub fn clean_output_dir(db_root: String) {
if let Err(e) = fs::remove_dir_all(format!("target/{}", db_root)) { if let Err(e) = fs::remove_dir_all(db_root) {
println!("cleaning output dir failed - {:?}", e) println!("cleaning output dir failed - {:?}", e)
} }
} }

View file

@ -1,216 +0,0 @@
// Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod common;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{
Block, BlockHeader, HeaderVersion, KernelFeatures, NRDRelativeHeight, Transaction,
};
use self::core::global;
use self::core::pow::Difficulty;
use self::core::{consensus, libtx};
use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::PoolError;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::sync::Arc;
#[test]
fn test_nrd_kernel_verification_block_version() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_nrd_enabled(true);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_nrd_kernels";
clean_output_dir(db_root.into());
let mut chain = ChainAdapter::init(db_root.into()).unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fee,
false,
)
.unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs
let mut block = add_block(header, vec![initial_tx], &mut chain);
let mut header = block.header;
// Initialize a new pool with our chain adapter.
let mut pool = test_setup(Arc::new(chain.clone()), verifier_cache);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
KernelFeatures::NoRecentDuplicate {
fee: 6,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);
assert!(header.version < HeaderVersion(4));
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelPreHF3)
);
// Now mine several more blocks out to HF3
for _ in 0..7 {
block = add_block(header, vec![], &mut chain);
header = block.header;
}
assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK);
assert_eq!(header.version, HeaderVersion(4));
// Now confirm we can successfully add transaction with NRD kernel to txpool.
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Ok(()),
);
assert_eq!(pool.total_size(), 1);
let txs = pool.prepare_mineable_transactions().unwrap();
assert_eq!(txs.len(), 1);
// Cleanup db directory
clean_output_dir(db_root.into());
}
#[test]
fn test_nrd_kernel_verification_nrd_disabled() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_nrd_kernel_disabled";
clean_output_dir(db_root.into());
let mut chain = ChainAdapter::init(db_root.into()).unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fee,
false,
)
.unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs
let mut block = add_block(header, vec![initial_tx], &mut chain);
let mut header = block.header;
// Initialize a new pool with our chain adapter.
let mut pool = test_setup(Arc::new(chain.clone()), verifier_cache);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
KernelFeatures::NoRecentDuplicate {
fee: 6,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);
assert!(header.version < HeaderVersion(4));
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelNotEnabled)
);
// Now mine several more blocks out to HF3
for _ in 0..7 {
block = add_block(header, vec![], &mut chain);
header = block.header;
}
assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK);
assert_eq!(header.version, HeaderVersion(4));
// NRD kernel support not enabled via feature flag, so not valid.
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelNotEnabled)
);
assert_eq!(pool.total_size(), 0);
let txs = pool.prepare_mineable_transactions().unwrap();
assert_eq!(txs.len(), 0);
// Cleanup db directory
clean_output_dir(db_root.into());
}

View file

@ -0,0 +1,98 @@
// Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod common;
use self::core::consensus;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::PoolError;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::sync::Arc;
#[test]
fn test_nrd_kernels_disabled() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_nrd_enabled(false);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = "target/.nrd_kernels_disabled";
clean_output_dir(db_root.into());
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
// Add some blocks.
add_some_blocks(&chain, 3, &keychain);
// Spend the initial coinbase.
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
add_block(&chain, vec![tx], &keychain);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
KernelFeatures::NoRecentDuplicate {
fee: 6,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);
let header = chain.head_header().unwrap();
assert!(header.version < HeaderVersion(4));
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelNotEnabled)
);
// Now mine several more blocks out to HF3
add_some_blocks(&chain, 5, &keychain);
let header = chain.head_header().unwrap();
assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK);
assert_eq!(header.version, HeaderVersion(4));
// NRD kernel support not enabled via feature flag, so not valid.
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelNotEnabled)
);
assert_eq!(pool.total_size(), 0);
let txs = pool.prepare_mineable_transactions().unwrap();
assert_eq!(txs.len(), 0);
// Cleanup db directory
clean_output_dir(db_root.into());
}

View file

@ -0,0 +1,98 @@
// Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod common;
use self::core::consensus;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::PoolError;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::sync::Arc;
#[test]
fn test_nrd_kernels_enabled() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_nrd_enabled(true);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = "target/.nrd_kernels_enabled";
clean_output_dir(db_root.into());
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
// Add some blocks.
add_some_blocks(&chain, 3, &keychain);
// Spend the initial coinbase.
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
add_block(&chain, vec![tx], &keychain);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
KernelFeatures::NoRecentDuplicate {
fee: 6,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);
let header = chain.head_header().unwrap();
assert!(header.version < HeaderVersion(4));
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelPreHF3)
);
// Now mine several more blocks out to HF3
add_some_blocks(&chain, 5, &keychain);
let header = chain.head_header().unwrap();
assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK);
assert_eq!(header.version, HeaderVersion(4));
// NRD kernel support not enabled via feature flag, so not valid.
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Ok(())
);
assert_eq!(pool.total_size(), 1);
let txs = pool.prepare_mineable_transactions().unwrap();
assert_eq!(txs.len(), 1);
// Cleanup db directory
clean_output_dir(db_root.into());
}

View file

@ -15,9 +15,8 @@
pub mod common; pub mod common;
use self::core::core::verifier_cache::LruVerifierCache; use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{transaction, Block, BlockHeader, Weighting}; use self::core::core::{transaction, Weighting};
use self::core::pow::Difficulty; use self::core::global;
use self::core::{global, libtx};
use self::keychain::{ExtKeychain, Keychain}; use self::keychain::{ExtKeychain, Keychain};
use self::pool::TxSource; use self::pool::TxSource;
use self::util::RwLock; use self::util::RwLock;
@ -31,65 +30,47 @@ use std::sync::Arc;
/// Test we can add some txs to the pool (both stempool and txpool). /// Test we can add some txs to the pool (both stempool and txpool).
#[test] #[test]
fn test_the_transaction_pool() { fn test_the_transaction_pool() {
// Use mainnet config to allow for reasonably large block weights. util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::Mainnet); global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap(); let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_transaction_pool".to_string(); let db_root = "target/.transaction_pool";
clean_output_dir(db_root.clone()); clean_output_dir(db_root.into());
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter. // Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone())); let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache.clone(),
);
let header = { add_some_blocks(&chain, 3, &keychain);
let height = 1; let header = chain.head_header().unwrap();
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output( let header_1 = chain.get_header_by_height(1).unwrap();
let initial_tx = test_transaction_spending_coinbase(
&keychain, &keychain,
&libtx::ProofBuilder::new(&keychain), &header_1,
&key_id,
0,
false,
)
.unwrap();
let block = Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
chain.update_db_for_block(&block);
block.header
};
// Now create tx to spend a coinbase, giving us some useful outputs for testing
// with.
let initial_tx = {
test_transaction_spending_coinbase(
&keychain,
&header,
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400], vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
) );
};
// Add this tx to the pool (stem=false, direct to txpool). // Add this tx to the pool (stem=false, direct to txpool).
{ {
let mut write_pool = pool.write(); pool.add_to_pool(test_source(), initial_tx, false, &header)
write_pool
.add_to_pool(test_source(), initial_tx, false, &header)
.unwrap(); .unwrap();
assert_eq!(write_pool.total_size(), 1); assert_eq!(pool.total_size(), 1);
} }
// Test adding a tx that "double spends" an output currently spent by a tx // Test adding a tx that "double spends" an output currently spent by a tx
// already in the txpool. In this case we attempt to spend the original coinbase twice. // already in the txpool. In this case we attempt to spend the original coinbase twice.
{ {
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]); let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
let mut write_pool = pool.write(); assert!(pool.add_to_pool(test_source(), tx, false, &header).is_err());
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
} }
// tx1 spends some outputs from the initial test tx. // tx1 spends some outputs from the initial test tx.
@ -97,32 +78,26 @@ fn test_the_transaction_pool() {
// tx2 spends some outputs from both tx1 and the initial test tx. // tx2 spends some outputs from both tx1 and the initial test tx.
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]); let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
// Take a write lock and add a couple of tx entries to the pool.
{ {
let mut write_pool = pool.write();
// Check we have a single initial tx in the pool. // Check we have a single initial tx in the pool.
assert_eq!(write_pool.total_size(), 1); assert_eq!(pool.total_size(), 1);
// First, add a simple tx directly to the txpool (stem = false). // First, add a simple tx directly to the txpool (stem = false).
write_pool pool.add_to_pool(test_source(), tx1.clone(), false, &header)
.add_to_pool(test_source(), tx1.clone(), false, &header)
.unwrap(); .unwrap();
assert_eq!(write_pool.total_size(), 2); assert_eq!(pool.total_size(), 2);
// Add another tx spending outputs from the previous tx. // Add another tx spending outputs from the previous tx.
write_pool pool.add_to_pool(test_source(), tx2.clone(), false, &header)
.add_to_pool(test_source(), tx2.clone(), false, &header)
.unwrap(); .unwrap();
assert_eq!(write_pool.total_size(), 3); assert_eq!(pool.total_size(), 3);
} }
// Test adding the exact same tx multiple times (same kernel signature). // Test adding the exact same tx multiple times (same kernel signature).
// This will fail for stem=false during tx aggregation due to duplicate // This will fail for stem=false during tx aggregation due to duplicate
// outputs and duplicate kernels. // outputs and duplicate kernels.
{ {
let mut write_pool = pool.write(); assert!(pool
assert!(write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header) .add_to_pool(test_source(), tx1.clone(), false, &header)
.is_err()); .is_err());
} }
@ -131,8 +106,7 @@ fn test_the_transaction_pool() {
// Note: not the *same* tx, just same underlying inputs/outputs. // Note: not the *same* tx, just same underlying inputs/outputs.
{ {
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]); let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let mut write_pool = pool.write(); assert!(pool
assert!(write_pool
.add_to_pool(test_source(), tx1a, false, &header) .add_to_pool(test_source(), tx1a, false, &header)
.is_err()); .is_err());
} }
@ -140,8 +114,7 @@ fn test_the_transaction_pool() {
// Test adding a tx attempting to spend a non-existent output. // Test adding a tx attempting to spend a non-existent output.
{ {
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]); let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
let mut write_pool = pool.write(); assert!(pool
assert!(write_pool
.add_to_pool(test_source(), bad_tx, false, &header) .add_to_pool(test_source(), bad_tx, false, &header)
.is_err()); .is_err());
} }
@ -152,71 +125,53 @@ fn test_the_transaction_pool() {
// to be immediately stolen via a "replay" tx. // to be immediately stolen via a "replay" tx.
{ {
let tx = test_transaction(&keychain, vec![900], vec![498]); let tx = test_transaction(&keychain, vec![900], vec![498]);
let mut write_pool = pool.write(); assert!(pool.add_to_pool(test_source(), tx, false, &header).is_err());
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
} }
// Confirm the tx pool correctly identifies an invalid tx (already spent). // Confirm the tx pool correctly identifies an invalid tx (already spent).
{ {
let mut write_pool = pool.write();
let tx3 = test_transaction(&keychain, vec![500], vec![497]); let tx3 = test_transaction(&keychain, vec![500], vec![497]);
assert!(write_pool assert!(pool
.add_to_pool(test_source(), tx3, false, &header) .add_to_pool(test_source(), tx3, false, &header)
.is_err()); .is_err());
assert_eq!(write_pool.total_size(), 3); assert_eq!(pool.total_size(), 3);
} }
// Now add a couple of txs to the stempool (stem = true). // Now add a couple of txs to the stempool (stem = true).
{ {
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![599], vec![598]); let tx = test_transaction(&keychain, vec![599], vec![598]);
write_pool pool.add_to_pool(test_source(), tx, true, &header).unwrap();
.add_to_pool(test_source(), tx, true, &header)
.unwrap();
let tx2 = test_transaction(&keychain, vec![598], vec![597]); let tx2 = test_transaction(&keychain, vec![598], vec![597]);
write_pool pool.add_to_pool(test_source(), tx2, true, &header).unwrap();
.add_to_pool(test_source(), tx2, true, &header) assert_eq!(pool.total_size(), 3);
.unwrap(); assert_eq!(pool.stempool.size(), 2);
assert_eq!(write_pool.total_size(), 3);
assert_eq!(write_pool.stempool.size(), 2);
} }
// Check we can take some entries from the stempool and "fluff" them into the // Check we can take some entries from the stempool and "fluff" them into the
// txpool. This also exercises multi-kernel txs. // txpool. This also exercises multi-kernel txs.
{ {
let mut write_pool = pool.write(); let agg_tx = pool.stempool.all_transactions_aggregate().unwrap().unwrap();
let agg_tx = write_pool
.stempool
.all_transactions_aggregate()
.unwrap()
.unwrap();
assert_eq!(agg_tx.kernels().len(), 2); assert_eq!(agg_tx.kernels().len(), 2);
write_pool pool.add_to_pool(test_source(), agg_tx, false, &header)
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap(); .unwrap();
assert_eq!(write_pool.total_size(), 4); assert_eq!(pool.total_size(), 4);
assert!(write_pool.stempool.is_empty()); assert!(pool.stempool.is_empty());
} }
// Adding a duplicate tx to the stempool will result in it being fluffed. // Adding a duplicate tx to the stempool will result in it being fluffed.
// This handles the case of the stem path having a cycle in it. // This handles the case of the stem path having a cycle in it.
{ {
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![597], vec![596]); let tx = test_transaction(&keychain, vec![597], vec![596]);
write_pool pool.add_to_pool(test_source(), tx.clone(), true, &header)
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap(); .unwrap();
assert_eq!(write_pool.total_size(), 4); assert_eq!(pool.total_size(), 4);
assert_eq!(write_pool.stempool.size(), 1); assert_eq!(pool.stempool.size(), 1);
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool. // Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
write_pool pool.add_to_pool(test_source(), tx.clone(), true, &header)
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap(); .unwrap();
assert_eq!(write_pool.total_size(), 5); assert_eq!(pool.total_size(), 5);
assert!(write_pool.stempool.is_empty()); assert!(pool.stempool.is_empty());
} }
// Now check we can correctly deaggregate a multi-kernel tx based on current // Now check we can correctly deaggregate a multi-kernel tx based on current
@ -224,8 +179,6 @@ fn test_the_transaction_pool() {
// We will do this be adding a new tx to the pool // We will do this be adding a new tx to the pool
// that is a superset of a tx already in the pool. // that is a superset of a tx already in the pool.
{ {
let mut write_pool = pool.write();
let tx4 = test_transaction(&keychain, vec![800], vec![799]); let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form) // tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about // tx4 is the "new" part of this aggregated tx that we care about
@ -235,11 +188,10 @@ fn test_the_transaction_pool() {
.validate(Weighting::AsTransaction, verifier_cache.clone()) .validate(Weighting::AsTransaction, verifier_cache.clone())
.unwrap(); .unwrap();
write_pool pool.add_to_pool(test_source(), agg_tx, false, &header)
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap(); .unwrap();
assert_eq!(write_pool.total_size(), 6); assert_eq!(pool.total_size(), 6);
let entry = write_pool.txpool.entries.last().unwrap(); let entry = pool.txpool.entries.last().unwrap();
assert_eq!(entry.tx.kernels().len(), 1); assert_eq!(entry.tx.kernels().len(), 1);
assert_eq!(entry.src, TxSource::Deaggregate); assert_eq!(entry.src, TxSource::Deaggregate);
} }
@ -247,232 +199,19 @@ fn test_the_transaction_pool() {
// Check we cannot "double spend" an output spent in a previous block. // Check we cannot "double spend" an output spent in a previous block.
// We use the initial coinbase output here for convenience. // We use the initial coinbase output here for convenience.
{ {
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap()); let double_spend_tx = test_transaction_spending_coinbase(&keychain, &header, vec![1000]);
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
let header = {
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
0,
false,
)
.unwrap();
let block =
Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
chain.update_db_for_block(&block);
block.header
};
// Now create tx to spend a coinbase, giving us some useful outputs for testing
// with.
let initial_tx = {
test_transaction_spending_coinbase(
&keychain,
&header,
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
)
};
// Add this tx to the pool (stem=false, direct to txpool).
{
let mut write_pool = pool.write();
write_pool
.add_to_pool(test_source(), initial_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 1);
}
// Test adding a tx that "double spends" an output currently spent by a tx
// already in the txpool. In this case we attempt to spend the original coinbase twice.
{
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
}
// tx1 spends some outputs from the initial test tx.
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
// tx2 spends some outputs from both tx1 and the initial test tx.
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
// Take a write lock and add a couple of tx entries to the pool.
{
let mut write_pool = pool.write();
// Check we have a single initial tx in the pool.
assert_eq!(write_pool.total_size(), 1);
// First, add a simple tx directly to the txpool (stem = false).
write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 2);
// Add another tx spending outputs from the previous tx.
write_pool
.add_to_pool(test_source(), tx2.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
}
// Test adding the exact same tx multiple times (same kernel signature).
// This will fail for stem=false during tx aggregation due to duplicate
// outputs and duplicate kernels.
{
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
.is_err());
}
// Test adding a duplicate tx with the same input and outputs.
// Note: not the *same* tx, just same underlying inputs/outputs.
{
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx1a, false, &header)
.is_err());
}
// Test adding a tx attempting to spend a non-existent output.
{
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), bad_tx, false, &header)
.is_err());
}
// Test adding a tx that would result in a duplicate output (conflicts with
// output from tx2). For reasons of security all outputs in the UTXO set must
// be unique. Otherwise spending one will almost certainly cause the other
// to be immediately stolen via a "replay" tx.
{
let tx = test_transaction(&keychain, vec![900], vec![498]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
}
// Confirm the tx pool correctly identifies an invalid tx (already spent).
{
let mut write_pool = pool.write();
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
assert!(write_pool
.add_to_pool(test_source(), tx3, false, &header)
.is_err());
assert_eq!(write_pool.total_size(), 3);
}
// Now add a couple of txs to the stempool (stem = true).
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![599], vec![598]);
write_pool
.add_to_pool(test_source(), tx, true, &header)
.unwrap();
let tx2 = test_transaction(&keychain, vec![598], vec![597]);
write_pool
.add_to_pool(test_source(), tx2, true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
assert_eq!(write_pool.stempool.size(), 2);
}
// Check we can take some entries from the stempool and "fluff" them into the
// txpool. This also exercises multi-kernel txs.
{
let mut write_pool = pool.write();
let agg_tx = write_pool
.stempool
.all_transactions_aggregate()
.unwrap()
.unwrap();
assert_eq!(agg_tx.kernels().len(), 2);
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert!(write_pool.stempool.is_empty());
}
// Adding a duplicate tx to the stempool will result in it being fluffed.
// This handles the case of the stem path having a cycle in it.
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![597], vec![596]);
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert_eq!(write_pool.stempool.size(), 1);
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 5);
assert!(write_pool.stempool.is_empty());
}
// Now check we can correctly deaggregate a multi-kernel tx based on current
// contents of the txpool.
// We will do this be adding a new tx to the pool
// that is a superset of a tx already in the pool.
{
let mut write_pool = pool.write();
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
agg_tx
.validate(Weighting::AsTransaction, verifier_cache.clone())
.unwrap();
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 6);
let entry = write_pool.txpool.entries.last().unwrap();
assert_eq!(entry.tx.kernels().len(), 1);
assert_eq!(entry.src, TxSource::Deaggregate);
}
// Check we cannot "double spend" an output spent in a previous block.
// We use the initial coinbase output here for convenience.
{
let mut write_pool = pool.write();
let double_spend_tx =
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };
// check we cannot add a double spend to the stempool // check we cannot add a double spend to the stempool
assert!(write_pool assert!(pool
.add_to_pool(test_source(), double_spend_tx.clone(), true, &header) .add_to_pool(test_source(), double_spend_tx.clone(), true, &header)
.is_err()); .is_err());
// check we cannot add a double spend to the txpool // check we cannot add a double spend to the txpool
assert!(write_pool assert!(pool
.add_to_pool(test_source(), double_spend_tx.clone(), false, &header) .add_to_pool(test_source(), double_spend_tx.clone(), false, &header)
.is_err()); .is_err());
} }
}
// Cleanup db directory // Cleanup db directory
clean_output_dir(db_root.clone()); clean_output_dir(db_root.into());
} }