Rework pool tests to use real chain (was mock chain) (#3342)

* rework pool tests to use real chain (was mock chain) to better reflect reality (tx/block validation rules etc.)

* cleanup
This commit is contained in:
Antioch Peverell 2020-06-07 09:26:08 +01:00 committed by GitHub
parent c7c9a32b9b
commit c54568e69f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 677 additions and 1093 deletions

View file

@ -78,7 +78,7 @@ pub const TESTING_INITIAL_GRAPH_WEIGHT: u32 = 1;
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: usize = 150;
pub const TESTING_MAX_BLOCK_WEIGHT: usize = 250;
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.

View file

@ -287,9 +287,9 @@ pub trait PoolAdapter: Send + Sync {
/// Dummy adapter used as a placeholder for real implementations
#[allow(dead_code)]
pub struct NoopAdapter {}
pub struct NoopPoolAdapter {}
impl PoolAdapter for NoopAdapter {
impl PoolAdapter for NoopPoolAdapter {
fn tx_accepted(&self, _entry: &PoolEntry) {}
fn stem_tx_accepted(&self, _entry: &PoolEntry) -> Result<(), PoolError> {
Ok(())

View file

@ -16,126 +16,98 @@ pub mod common;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{Block, BlockHeader, Transaction};
use self::core::pow::Difficulty;
use self::core::{global, libtx};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::PoolError;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::sync::Arc;
#[test]
fn test_transaction_pool_block_building() {
fn test_transaction_pool_block_building() -> Result<(), PoolError> {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_block_building".to_string();
clean_output_dir(db_root.clone());
let db_root = "target/.block_building";
clean_output_dir(db_root.into());
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
add_some_blocks(&chain, 3, &keychain);
let header_1 = chain.get_header_by_height(1).unwrap();
// Now create tx to spend an early coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, vec![initial_tx], &keychain);
let header = chain.head_header().unwrap();
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]);
let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]);
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
{
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
// Add the three root txs to the pool.
pool.add_to_pool(test_source(), root_tx_1.clone(), false, &header)?;
pool.add_to_pool(test_source(), root_tx_2.clone(), false, &header)?;
pool.add_to_pool(test_source(), root_tx_3.clone(), false, &header)?;
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Now add the two child txs to the pool.
pool.add_to_pool(test_source(), child_tx_1.clone(), false, &header)?;
pool.add_to_pool(test_source(), child_tx_2.clone(), false, &header)?;
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let add_block =
|prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fee,
false,
)
.unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs
let block = add_block(header, vec![initial_tx], &mut chain);
let header = block.header;
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]);
let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]);
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
{
let mut write_pool = pool.write();
// Add the three root txs to the pool.
write_pool
.add_to_pool(test_source(), root_tx_1.clone(), false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), root_tx_2.clone(), false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), root_tx_3.clone(), false, &header)
.unwrap();
// Now add the two child txs to the pool.
write_pool
.add_to_pool(test_source(), child_tx_1.clone(), false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), child_tx_2.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 5);
}
let txs = pool.read().prepare_mineable_transactions().unwrap();
let block = add_block(header, txs, &mut chain);
// Check the block contains what we expect.
assert_eq!(block.inputs().len(), 4);
assert_eq!(block.outputs().len(), 4);
assert_eq!(block.kernels().len(), 6);
assert!(block.kernels().contains(&root_tx_1.kernels()[0]));
assert!(block.kernels().contains(&root_tx_2.kernels()[0]));
assert!(block.kernels().contains(&root_tx_3.kernels()[0]));
assert!(block.kernels().contains(&child_tx_1.kernels()[0]));
assert!(block.kernels().contains(&child_tx_1.kernels()[0]));
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 0);
}
assert_eq!(pool.total_size(), 5);
}
let txs = pool.prepare_mineable_transactions()?;
add_block(&chain, txs, &keychain);
// Get full block from head of the chain (block we just processed).
let block = chain.get_block(&chain.head().unwrap().hash()).unwrap();
// Check the block contains what we expect.
assert_eq!(block.inputs().len(), 4);
assert_eq!(block.outputs().len(), 4);
assert_eq!(block.kernels().len(), 6);
assert!(block.kernels().contains(&root_tx_1.kernels()[0]));
assert!(block.kernels().contains(&root_tx_2.kernels()[0]));
assert!(block.kernels().contains(&root_tx_3.kernels()[0]));
assert!(block.kernels().contains(&child_tx_1.kernels()[0]));
assert!(block.kernels().contains(&child_tx_1.kernels()[0]));
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
{
pool.reconcile_block(&block)?;
assert_eq!(pool.total_size(), 0);
}
// Cleanup db directory
clean_output_dir(db_root.clone());
clean_output_dir(db_root.into());
Ok(())
}

View file

@ -15,13 +15,9 @@
//! Test coverage for block building at the limit of max_block_weight.
pub mod common;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{Block, BlockHeader, Transaction};
use self::core::global;
use self::core::libtx;
use self::core::pow::Difficulty;
use self::keychain::{ExtKeychain, Keychain};
use self::util::RwLock;
use crate::common::*;
@ -37,126 +33,103 @@ fn test_block_building_max_weight() {
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_block_building_max_weight".to_string();
clean_output_dir(db_root.clone());
let db_root = "target/.block_max_weight";
clean_output_dir(db_root.into());
{
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
// Convenient was to add a new block to the chain.
let add_block =
|prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fee,
false,
)
.unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
add_some_blocks(&chain, 3, &keychain);
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
let header_1 = chain.get_header_by_height(1).unwrap();
chain.update_db_for_block(&block);
block
};
// Now create tx to spend an early coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header_1, vec![100, 200, 300, 1000]);
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, vec![initial_tx], &keychain);
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header, vec![100, 200, 300]);
let header = chain.head_header().unwrap();
// Mine that initial tx so we can spend it with multiple txs
let block = add_block(header, vec![initial_tx], &mut chain);
let header = block.header;
// Build some dependent txs to add to the txpool.
// We will build a block from a subset of these.
let txs = vec![
test_transaction(&keychain, vec![1000], vec![390, 130, 120, 110]),
test_transaction(&keychain, vec![100], vec![90, 1]),
test_transaction(&keychain, vec![90], vec![80, 2]),
test_transaction(&keychain, vec![200], vec![199]),
test_transaction(&keychain, vec![300], vec![290, 3]),
test_transaction(&keychain, vec![290], vec![280, 4]),
];
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
// Fees and weights of our original txs in insert order.
assert_eq!(
txs.iter().map(|x| x.fee()).collect::<Vec<_>>(),
[250, 9, 8, 1, 7, 6]
);
assert_eq!(
txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(),
[16, 8, 8, 4, 8, 8]
);
assert_eq!(
txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(),
[15625, 1125, 1000, 250, 875, 750]
);
// Build some dependent txs to add to the txpool.
// We will build a block from a subset of these.
let txs = vec![
test_transaction(&keychain, vec![100], vec![90, 1]),
test_transaction(&keychain, vec![90], vec![80, 2]),
test_transaction(&keychain, vec![200], vec![199]),
test_transaction(&keychain, vec![300], vec![290, 3]),
test_transaction(&keychain, vec![290], vec![280, 4]),
];
// Fees and weights of our original txs in insert order.
assert_eq!(
txs.iter().map(|x| x.fee()).collect::<Vec<_>>(),
[9, 8, 1, 7, 6]
);
assert_eq!(
txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(),
[8, 8, 4, 8, 8]
);
assert_eq!(
txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(),
[1125, 1000, 250, 875, 750]
);
// Populate our txpool with the txs.
{
let mut write_pool = pool.write();
for tx in txs {
println!("***** {}", tx.fee_to_weight());
write_pool
.add_to_pool(test_source(), tx, false, &header)
.unwrap();
}
}
// Check we added them all to the txpool successfully.
assert_eq!(pool.read().total_size(), 5);
// Prepare some "mineable" txs from the txpool.
// Note: We cannot fit all the txs from the txpool into a block.
let txs = pool.read().prepare_mineable_transactions().unwrap();
// Fees and weights of the "mineable" txs.
assert_eq!(txs.iter().map(|x| x.fee()).collect::<Vec<_>>(), [9, 8, 7]);
assert_eq!(
txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(),
[8, 8, 8]
);
assert_eq!(
txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(),
[1125, 1000, 875]
);
let block = add_block(header, txs, &mut chain);
// Check contents of the block itself (including coinbase reward).
assert_eq!(block.inputs().len(), 2);
assert_eq!(block.outputs().len(), 6);
assert_eq!(block.kernels().len(), 4);
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
// We should still have 2 tx in the pool after accepting the new block.
// This one exceeded the max block weight when building the block so
// remained in the txpool.
assert_eq!(write_pool.total_size(), 2);
}
// Populate our txpool with the txs.
for tx in txs {
pool.add_to_pool(test_source(), tx, false, &header).unwrap();
}
// Check we added them all to the txpool successfully.
assert_eq!(pool.total_size(), 6);
// // Prepare some "mineable" txs from the txpool.
// // Note: We cannot fit all the txs from the txpool into a block.
let txs = pool.prepare_mineable_transactions().unwrap();
// Fees and weights of the "mineable" txs.
assert_eq!(
txs.iter().map(|x| x.fee()).collect::<Vec<_>>(),
[250, 9, 8, 7]
);
assert_eq!(
txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(),
[16, 8, 8, 8]
);
assert_eq!(
txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(),
[15625, 1125, 1000, 875]
);
add_block(&chain, txs, &keychain);
let block = chain.get_block(&chain.head().unwrap().hash()).unwrap();
// Check contents of the block itself (including coinbase reward).
assert_eq!(block.inputs().len(), 3);
assert_eq!(block.outputs().len(), 10);
assert_eq!(block.kernels().len(), 5);
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
pool.reconcile_block(&block).unwrap();
// We should still have 2 tx in the pool after accepting the new block.
// This one exceeded the max block weight when building the block so
// remained in the txpool.
assert_eq!(pool.total_size(), 2);
// Cleanup db directory
clean_output_dir(db_root.clone());
clean_output_dir(db_root.into());
}

View file

@ -16,9 +16,7 @@ pub mod common;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{Block, BlockHeader};
use self::core::pow::Difficulty;
use self::core::{global, libtx};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::util::RwLock;
use crate::common::ChainAdapter;
@ -30,185 +28,124 @@ use std::sync::Arc;
#[test]
fn test_transaction_pool_block_reconciliation() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_block_reconciliation".to_string();
clean_output_dir(db_root.clone());
{
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
let db_root = "target/.block_reconciliation";
clean_output_dir(db_root.into());
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
let header = {
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
0,
false,
)
add_some_blocks(&chain, 3, &keychain);
let header_1 = chain.get_header_by_height(1).unwrap();
// Now create tx to spend an early coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, vec![initial_tx], &keychain);
let header = chain.head_header().unwrap();
// Preparation: We will introduce three root pool transactions.
// 1. A transaction that should be invalidated because it is exactly
// contained in the block.
// 2. A transaction that should be invalidated because the input is
// consumed in the block, although it is not exactly consumed.
// 3. A transaction that should remain after block reconciliation.
let block_transaction = test_transaction(&keychain, vec![10], vec![8]);
let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]);
let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]);
// We will also introduce a few children:
// 4. A transaction that descends from transaction 1, that is in
// turn exactly contained in the block.
let block_child = test_transaction(&keychain, vec![8], vec![5, 1]);
// 5. A transaction that descends from transaction 4, that is not
// contained in the block at all and should be valid after
// reconciliation.
let pool_child = test_transaction(&keychain, vec![5], vec![3]);
// 6. A transaction that descends from transaction 2 that does not
// conflict with anything in the block in any way, but should be
// invalidated (orphaned).
let conflict_child = test_transaction(&keychain, vec![12], vec![2]);
// 7. A transaction that descends from transaction 2 that should be
// valid due to its inputs being satisfied by the block.
let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]);
// 8. A transaction that descends from transaction 3 that should be
// invalidated due to an output conflict.
let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]);
// 9. A transaction that descends from transaction 3 that should remain
// valid after reconciliation.
let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]);
// 10. A transaction that descends from both transaction 6 and
// transaction 9
let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]);
let txs_to_add = vec![
block_transaction,
conflict_transaction,
valid_transaction.clone(),
block_child,
pool_child.clone(),
conflict_child,
conflict_valid_child.clone(),
valid_child_conflict.clone(),
valid_child_valid.clone(),
mixed_child,
];
// First we add the above transactions to the pool.
// All should be accepted.
assert_eq!(pool.total_size(), 0);
for tx in &txs_to_add {
pool.add_to_pool(test_source(), tx.clone(), false, &header)
.unwrap();
let genesis = BlockHeader::default();
let mut block = Block::new(&genesis, vec![], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = genesis.hash();
chain.update_db_for_block(&block);
block.header
};
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
let block = {
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let fees = initial_tx.fee();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fees,
false,
)
.unwrap();
let mut block =
Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
block
};
let header = block.header;
// Preparation: We will introduce three root pool transactions.
// 1. A transaction that should be invalidated because it is exactly
// contained in the block.
// 2. A transaction that should be invalidated because the input is
// consumed in the block, although it is not exactly consumed.
// 3. A transaction that should remain after block reconciliation.
let block_transaction = test_transaction(&keychain, vec![10], vec![8]);
let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]);
let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]);
// We will also introduce a few children:
// 4. A transaction that descends from transaction 1, that is in
// turn exactly contained in the block.
let block_child = test_transaction(&keychain, vec![8], vec![5, 1]);
// 5. A transaction that descends from transaction 4, that is not
// contained in the block at all and should be valid after
// reconciliation.
let pool_child = test_transaction(&keychain, vec![5], vec![3]);
// 6. A transaction that descends from transaction 2 that does not
// conflict with anything in the block in any way, but should be
// invalidated (orphaned).
let conflict_child = test_transaction(&keychain, vec![12], vec![2]);
// 7. A transaction that descends from transaction 2 that should be
// valid due to its inputs being satisfied by the block.
let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]);
// 8. A transaction that descends from transaction 3 that should be
// invalidated due to an output conflict.
let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]);
// 9. A transaction that descends from transaction 3 that should remain
// valid after reconciliation.
let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]);
// 10. A transaction that descends from both transaction 6 and
// transaction 9
let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]);
let txs_to_add = vec![
block_transaction,
conflict_transaction,
valid_transaction.clone(),
block_child,
pool_child.clone(),
conflict_child,
conflict_valid_child.clone(),
valid_child_conflict.clone(),
valid_child_valid.clone(),
mixed_child,
];
// First we add the above transactions to the pool.
// All should be accepted.
{
let mut write_pool = pool.write();
assert_eq!(write_pool.total_size(), 0);
for tx in &txs_to_add {
write_pool
.add_to_pool(test_source(), tx.clone(), false, &header)
.unwrap();
}
assert_eq!(write_pool.total_size(), txs_to_add.len());
}
// Now we prepare the block that will cause the above conditions to be met.
// First, the transactions we want in the block:
// - Copy of 1
let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]);
// - Conflict w/ 2, satisfies 7
let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]);
// - Copy of 4
let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]);
// - Output conflict w/ 8
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
// Now apply this block.
let block = {
let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fees,
false,
)
.unwrap();
let mut block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
block
};
// Check the pool still contains everything we expect at this point.
{
let write_pool = pool.write();
assert_eq!(write_pool.total_size(), txs_to_add.len());
}
// And reconcile the pool with this latest block.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 4);
assert_eq!(write_pool.txpool.entries[0].tx, valid_transaction);
assert_eq!(write_pool.txpool.entries[1].tx, pool_child);
assert_eq!(write_pool.txpool.entries[2].tx, conflict_valid_child);
assert_eq!(write_pool.txpool.entries[3].tx, valid_child_valid);
}
}
assert_eq!(pool.total_size(), txs_to_add.len());
// Now we prepare the block that will cause the above conditions to be met.
// First, the transactions we want in the block:
// - Copy of 1
let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]);
// - Conflict w/ 2, satisfies 7
let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]);
// - Copy of 4
let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]);
// - Output conflict w/ 8
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
add_block(&chain, block_txs, &keychain);
let block = chain.get_block(&chain.head().unwrap().hash()).unwrap();
// Check the pool still contains everything we expect at this point.
assert_eq!(pool.total_size(), txs_to_add.len());
// And reconcile the pool with this latest block.
pool.reconcile_block(&block).unwrap();
assert_eq!(pool.total_size(), 4);
assert_eq!(pool.txpool.entries[0].tx, valid_transaction);
assert_eq!(pool.txpool.entries[1].tx, pool_child);
assert_eq!(pool.txpool.entries[2].tx, conflict_valid_child);
assert_eq!(pool.txpool.entries[3].tx, valid_child_valid);
// Cleanup db directory
clean_output_dir(db_root.clone());
clean_output_dir(db_root.into());
}

View file

@ -14,12 +14,10 @@
pub mod common;
use self::core::core::hash::Hash;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{BlockHeader, BlockSums, Transaction};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::{BlockChain, PoolError};
use self::pool::types::PoolError;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
@ -28,61 +26,49 @@ use grin_pool as pool;
use grin_util as util;
use std::sync::Arc;
#[derive(Clone)]
pub struct CoinbaseMaturityErrorChainAdapter {}
impl CoinbaseMaturityErrorChainAdapter {
pub fn new() -> CoinbaseMaturityErrorChainAdapter {
CoinbaseMaturityErrorChainAdapter {}
}
}
impl BlockChain for CoinbaseMaturityErrorChainAdapter {
fn chain_head(&self) -> Result<BlockHeader, PoolError> {
unimplemented!();
}
fn get_block_header(&self, _hash: &Hash) -> Result<BlockHeader, PoolError> {
unimplemented!();
}
fn get_block_sums(&self, _hash: &Hash) -> Result<BlockSums, PoolError> {
unimplemented!();
}
fn validate_tx(&self, _tx: &Transaction) -> Result<(), PoolError> {
unimplemented!();
}
// Returns an ImmatureCoinbase for every tx we pass in.
fn verify_coinbase_maturity(&self, _tx: &Transaction) -> Result<(), PoolError> {
Err(PoolError::ImmatureCoinbase)
}
// Mocking this out for these tests.
fn verify_tx_lock_height(&self, _tx: &Transaction) -> Result<(), PoolError> {
Ok(())
}
}
/// Test we correctly verify coinbase maturity when adding txs to the pool.
#[test]
fn test_coinbase_maturity() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
// Mocking this up with an adapter that will raise an error for coinbase
// maturity.
let chain = Arc::new(CoinbaseMaturityErrorChainAdapter::new());
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let pool = RwLock::new(test_setup(chain, verifier_cache));
let db_root = "target/.coinbase_maturity";
clean_output_dir(db_root.into());
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![50], vec![49]);
match write_pool.add_to_pool(test_source(), tx.clone(), true, &BlockHeader::default()) {
Err(PoolError::ImmatureCoinbase) => {}
_ => panic!("Expected an immature coinbase error here."),
}
}
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
// Add a single block, introducing coinbase output to be spent later.
add_block(&chain, vec![], &keychain);
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![100]);
// Coinbase is not yet matured and cannot be spent.
let header = chain.head_header().unwrap();
assert_eq!(
pool.add_to_pool(test_source(), tx.clone(), true, &header)
.err(),
Some(PoolError::ImmatureCoinbase)
);
// Add 2 more blocks. Original coinbase output is now matured and can be spent.
add_some_blocks(&chain, 2, &keychain);
let header = chain.head_header().unwrap();
assert_eq!(
pool.add_to_pool(test_source(), tx.clone(), true, &header),
Ok(())
);
clean_output_dir(db_root.into());
}

View file

@ -14,144 +14,141 @@
//! Common test functions
use self::chain::store::ChainStore;
use self::chain::types::Tip;
use self::core::core::hash::{Hash, Hashed};
use self::core::core::verifier_cache::VerifierCache;
use self::core::core::{Block, BlockHeader, BlockSums, Committed, KernelFeatures, Transaction};
use self::core::libtx;
use self::keychain::{ExtKeychain, Keychain};
use self::chain::types::{NoopAdapter, Options};
use self::chain::Chain;
use self::core::consensus;
use self::core::core::hash::Hash;
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use self::core::core::{Block, BlockHeader, BlockSums, KernelFeatures, Transaction};
use self::core::genesis;
use self::core::global;
use self::core::libtx::{build, reward, ProofBuilder};
use self::core::pow;
use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use self::pool::types::*;
use self::pool::TransactionPool;
use self::util::secp::pedersen::Commitment;
use self::util::RwLock;
use chrono::Duration;
use grin_chain as chain;
use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::collections::HashSet;
use std::fs;
use std::sync::Arc;
#[derive(Clone)]
pub struct ChainAdapter {
pub store: Arc<RwLock<ChainStore>>,
pub utxo: Arc<RwLock<HashSet<Commitment>>>,
/// Build genesis block with reward (non-empty, like we have in mainnet).
pub fn genesis_block<K>(keychain: &K) -> Block
where
K: Keychain,
{
let key_id = keychain::ExtKeychain::derive_key_id(1, 0, 0, 0, 0);
let reward = reward::output(keychain, &ProofBuilder::new(keychain), &key_id, 0, false).unwrap();
genesis::genesis_dev().with_reward(reward.0, reward.1)
}
impl ChainAdapter {
pub fn init(db_root: String) -> Result<ChainAdapter, String> {
let target_dir = format!("target/{}", db_root);
let chain_store = ChainStore::new(&target_dir)
.map_err(|e| format!("failed to init chain_store, {:?}", e))?;
let store = Arc::new(RwLock::new(chain_store));
let utxo = Arc::new(RwLock::new(HashSet::new()));
pub fn init_chain(dir_name: &str, genesis: Block) -> Chain {
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
Chain::init(
dir_name.to_string(),
Arc::new(NoopAdapter {}),
genesis,
pow::verify_size,
verifier_cache,
false,
)
.unwrap()
}
Ok(ChainAdapter { store, utxo })
pub fn add_some_blocks<K>(chain: &Chain, count: u64, keychain: &K)
where
K: Keychain,
{
for _ in 0..count {
add_block(chain, vec![], keychain);
}
}
pub fn update_db_for_block(&self, block: &Block) {
let header = &block.header;
let tip = Tip::from_header(header);
let s = self.store.write();
let batch = s.batch().unwrap();
pub fn add_block<K>(chain: &Chain, txs: Vec<Transaction>, keychain: &K)
where
K: Keychain,
{
let prev = chain.head_header().unwrap();
let height = prev.height + 1;
let next_header_info = consensus::next_difficulty(height, chain.difficulty_iter().unwrap());
let fee = txs.iter().map(|x| x.fee()).sum();
let key_id = ExtKeychainPath::new(1, height as u32, 0, 0, 0).to_identifier();
let reward =
reward::output(keychain, &ProofBuilder::new(keychain), &key_id, fee, false).unwrap();
batch.save_block_header(header).unwrap();
batch.save_body_head(&tip).unwrap();
let mut block = Block::new(&prev, txs, next_header_info.clone().difficulty, reward).unwrap();
// Retrieve previous block_sums from the db.
let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&tip.prev_block_h) {
prev_sums
} else {
BlockSums::default()
};
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
// Overage is based purely on the new block.
// Previous block_sums have taken all previous overage into account.
let overage = header.overage();
chain.set_txhashset_roots(&mut block).unwrap();
// Offset on the other hand is the total kernel offset from the new block.
let offset = header.total_kernel_offset();
let edge_bits = global::min_edge_bits();
block.header.pow.proof.edge_bits = edge_bits;
pow::pow_size(
&mut block.header,
next_header_info.difficulty,
global::proofsize(),
edge_bits,
)
.unwrap();
// Verify the kernel sums for the block_sums with the new block applied.
let (utxo_sum, kernel_sum) = (prev_sums, block as &dyn Committed)
.verify_kernel_sums(overage, offset)
.unwrap();
chain.process_block(block, Options::NONE).unwrap();
}
let block_sums = BlockSums {
utxo_sum,
kernel_sum,
};
batch.save_block_sums(&header.hash(), block_sums).unwrap();
batch.commit().unwrap();
{
let mut utxo = self.utxo.write();
for x in block.inputs() {
utxo.remove(&x.commitment());
}
for x in block.outputs() {
utxo.insert(x.commitment());
}
}
}
#[derive(Clone)]
pub struct ChainAdapter {
pub chain: Arc<Chain>,
}
impl BlockChain for ChainAdapter {
fn chain_head(&self) -> Result<BlockHeader, PoolError> {
let s = self.store.read();
s.head_header()
.map_err(|_| PoolError::Other(format!("failed to get chain head")))
self.chain
.head_header()
.map_err(|_| PoolError::Other("failed to get chain head".into()))
}
fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, PoolError> {
let s = self.store.read();
s.get_block_header(hash)
.map_err(|_| PoolError::Other(format!("failed to get block header")))
self.chain
.get_block_header(hash)
.map_err(|_| PoolError::Other("failed to get block header".into()))
}
fn get_block_sums(&self, hash: &Hash) -> Result<BlockSums, PoolError> {
let s = self.store.read();
s.get_block_sums(hash)
.map_err(|_| PoolError::Other(format!("failed to get block sums")))
self.chain
.get_block_sums(hash)
.map_err(|_| PoolError::Other("failed to get block sums".into()))
}
fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
let utxo = self.utxo.read();
for x in tx.outputs() {
if utxo.contains(&x.commitment()) {
return Err(PoolError::Other(format!("output commitment not unique")));
}
}
for x in tx.inputs() {
if !utxo.contains(&x.commitment()) {
return Err(PoolError::Other(format!("not in utxo set")));
}
}
Ok(())
self.chain
.validate_tx(tx)
.map_err(|_| PoolError::Other("failed to validate tx".into()))
}
// Mocking this check out for these tests.
// We will test the Merkle proof verification logic elsewhere.
fn verify_coinbase_maturity(&self, _tx: &Transaction) -> Result<(), PoolError> {
Ok(())
fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), PoolError> {
self.chain
.verify_coinbase_maturity(tx)
.map_err(|_| PoolError::ImmatureCoinbase)
}
// Mocking this out for these tests.
fn verify_tx_lock_height(&self, _tx: &Transaction) -> Result<(), PoolError> {
Ok(())
fn verify_tx_lock_height(&self, tx: &Transaction) -> Result<(), PoolError> {
self.chain
.verify_tx_lock_height(tx)
.map_err(|_| PoolError::ImmatureTransaction)
}
}
pub fn test_setup<B, V>(
pub fn init_transaction_pool<B, V>(
chain: Arc<B>,
verifier_cache: Arc<RwLock<V>>,
) -> TransactionPool<B, NoopAdapter, V>
) -> TransactionPool<B, NoopPoolAdapter, V>
where
B: BlockChain,
V: VerifierCache + 'static,
@ -165,7 +162,7 @@ where
},
chain.clone(),
verifier_cache.clone(),
Arc::new(NoopAdapter {}),
Arc::new(NoopPoolAdapter {}),
)
}
@ -189,19 +186,19 @@ where
// single input spending a single coinbase (deterministic key_id aka height)
{
let key_id = ExtKeychain::derive_key_id(1, header.height as u32, 0, 0, 0);
tx_elements.push(libtx::build::coinbase_input(coinbase_reward, key_id));
tx_elements.push(build::coinbase_input(coinbase_reward, key_id));
}
for output_value in output_values {
let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::output(output_value, key_id));
tx_elements.push(build::output(output_value, key_id));
}
libtx::build::transaction(
build::transaction(
KernelFeatures::Plain { fee: fees as u64 },
tx_elements,
keychain,
&libtx::ProofBuilder::new(keychain),
&ProofBuilder::new(keychain),
)
.unwrap()
}
@ -240,19 +237,19 @@ where
for input_value in input_values {
let key_id = ExtKeychain::derive_key_id(1, input_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::input(input_value, key_id));
tx_elements.push(build::input(input_value, key_id));
}
for output_value in output_values {
let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0);
tx_elements.push(libtx::build::output(output_value, key_id));
tx_elements.push(build::output(output_value, key_id));
}
libtx::build::transaction(
build::transaction(
kernel_features,
tx_elements,
keychain,
&libtx::ProofBuilder::new(keychain),
&ProofBuilder::new(keychain),
)
.unwrap()
}
@ -262,7 +259,7 @@ pub fn test_source() -> TxSource {
}
pub fn clean_output_dir(db_root: String) {
if let Err(e) = fs::remove_dir_all(format!("target/{}", db_root)) {
if let Err(e) = fs::remove_dir_all(db_root) {
println!("cleaning output dir failed - {:?}", e)
}
}

View file

@ -1,216 +0,0 @@
// Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod common;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{
Block, BlockHeader, HeaderVersion, KernelFeatures, NRDRelativeHeight, Transaction,
};
use self::core::global;
use self::core::pow::Difficulty;
use self::core::{consensus, libtx};
use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::PoolError;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::sync::Arc;
#[test]
fn test_nrd_kernel_verification_block_version() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_nrd_enabled(true);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_nrd_kernels";
clean_output_dir(db_root.into());
let mut chain = ChainAdapter::init(db_root.into()).unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fee,
false,
)
.unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs
let mut block = add_block(header, vec![initial_tx], &mut chain);
let mut header = block.header;
// Initialize a new pool with our chain adapter.
let mut pool = test_setup(Arc::new(chain.clone()), verifier_cache);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
KernelFeatures::NoRecentDuplicate {
fee: 6,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);
assert!(header.version < HeaderVersion(4));
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelPreHF3)
);
// Now mine several more blocks out to HF3
for _ in 0..7 {
block = add_block(header, vec![], &mut chain);
header = block.header;
}
assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK);
assert_eq!(header.version, HeaderVersion(4));
// Now confirm we can successfully add transaction with NRD kernel to txpool.
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Ok(()),
);
assert_eq!(pool.total_size(), 1);
let txs = pool.prepare_mineable_transactions().unwrap();
assert_eq!(txs.len(), 1);
// Cleanup db directory
clean_output_dir(db_root.into());
}
#[test]
fn test_nrd_kernel_verification_nrd_disabled() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_nrd_kernel_disabled";
clean_output_dir(db_root.into());
let mut chain = ChainAdapter::init(db_root.into()).unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
fee,
false,
)
.unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs
let mut block = add_block(header, vec![initial_tx], &mut chain);
let mut header = block.header;
// Initialize a new pool with our chain adapter.
let mut pool = test_setup(Arc::new(chain.clone()), verifier_cache);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
KernelFeatures::NoRecentDuplicate {
fee: 6,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);
assert!(header.version < HeaderVersion(4));
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelNotEnabled)
);
// Now mine several more blocks out to HF3
for _ in 0..7 {
block = add_block(header, vec![], &mut chain);
header = block.header;
}
assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK);
assert_eq!(header.version, HeaderVersion(4));
// NRD kernel support not enabled via feature flag, so not valid.
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelNotEnabled)
);
assert_eq!(pool.total_size(), 0);
let txs = pool.prepare_mineable_transactions().unwrap();
assert_eq!(txs.len(), 0);
// Cleanup db directory
clean_output_dir(db_root.into());
}

View file

@ -0,0 +1,98 @@
// Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod common;
use self::core::consensus;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::PoolError;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::sync::Arc;
#[test]
fn test_nrd_kernels_disabled() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_nrd_enabled(false);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = "target/.nrd_kernels_disabled";
clean_output_dir(db_root.into());
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
// Add some blocks.
add_some_blocks(&chain, 3, &keychain);
// Spend the initial coinbase.
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
add_block(&chain, vec![tx], &keychain);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
KernelFeatures::NoRecentDuplicate {
fee: 6,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);
let header = chain.head_header().unwrap();
assert!(header.version < HeaderVersion(4));
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelNotEnabled)
);
// Now mine several more blocks out to HF3
add_some_blocks(&chain, 5, &keychain);
let header = chain.head_header().unwrap();
assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK);
assert_eq!(header.version, HeaderVersion(4));
// NRD kernel support not enabled via feature flag, so not valid.
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelNotEnabled)
);
assert_eq!(pool.total_size(), 0);
let txs = pool.prepare_mineable_transactions().unwrap();
assert_eq!(txs.len(), 0);
// Cleanup db directory
clean_output_dir(db_root.into());
}

View file

@ -0,0 +1,98 @@
// Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod common;
use self::core::consensus;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::PoolError;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::sync::Arc;
#[test]
fn test_nrd_kernels_enabled() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_nrd_enabled(true);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = "target/.nrd_kernels_enabled";
clean_output_dir(db_root.into());
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
// Add some blocks.
add_some_blocks(&chain, 3, &keychain);
// Spend the initial coinbase.
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
add_block(&chain, vec![tx], &keychain);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
KernelFeatures::NoRecentDuplicate {
fee: 6,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);
let header = chain.head_header().unwrap();
assert!(header.version < HeaderVersion(4));
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Err(PoolError::NRDKernelPreHF3)
);
// Now mine several more blocks out to HF3
add_some_blocks(&chain, 5, &keychain);
let header = chain.head_header().unwrap();
assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK);
assert_eq!(header.version, HeaderVersion(4));
// NRD kernel support not enabled via feature flag, so not valid.
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Ok(())
);
assert_eq!(pool.total_size(), 1);
let txs = pool.prepare_mineable_transactions().unwrap();
assert_eq!(txs.len(), 1);
// Cleanup db directory
clean_output_dir(db_root.into());
}

View file

@ -15,9 +15,8 @@
pub mod common;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{transaction, Block, BlockHeader, Weighting};
use self::core::pow::Difficulty;
use self::core::{global, libtx};
use self::core::core::{transaction, Weighting};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::TxSource;
use self::util::RwLock;
@ -31,65 +30,47 @@ use std::sync::Arc;
/// Test we can add some txs to the pool (both stempool and txpool).
#[test]
fn test_the_transaction_pool() {
// Use mainnet config to allow for reasonably large block weights.
global::set_local_chain_type(global::ChainTypes::Mainnet);
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_transaction_pool".to_string();
clean_output_dir(db_root.clone());
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
let db_root = "target/.transaction_pool";
clean_output_dir(db_root.into());
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache.clone(),
);
let header = {
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
0,
false,
)
.unwrap();
let block = Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
add_some_blocks(&chain, 3, &keychain);
let header = chain.head_header().unwrap();
chain.update_db_for_block(&block);
block.header
};
// Now create tx to spend a coinbase, giving us some useful outputs for testing
// with.
let initial_tx = {
test_transaction_spending_coinbase(
&keychain,
&header,
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
)
};
let header_1 = chain.get_header_by_height(1).unwrap();
let initial_tx = test_transaction_spending_coinbase(
&keychain,
&header_1,
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
);
// Add this tx to the pool (stem=false, direct to txpool).
{
let mut write_pool = pool.write();
write_pool
.add_to_pool(test_source(), initial_tx, false, &header)
pool.add_to_pool(test_source(), initial_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 1);
assert_eq!(pool.total_size(), 1);
}
// Test adding a tx that "double spends" an output currently spent by a tx
// already in the txpool. In this case we attempt to spend the original coinbase twice.
{
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
assert!(pool.add_to_pool(test_source(), tx, false, &header).is_err());
}
// tx1 spends some outputs from the initial test tx.
@ -97,32 +78,26 @@ fn test_the_transaction_pool() {
// tx2 spends some outputs from both tx1 and the initial test tx.
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
// Take a write lock and add a couple of tx entries to the pool.
{
let mut write_pool = pool.write();
// Check we have a single initial tx in the pool.
assert_eq!(write_pool.total_size(), 1);
assert_eq!(pool.total_size(), 1);
// First, add a simple tx directly to the txpool (stem = false).
write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
pool.add_to_pool(test_source(), tx1.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 2);
assert_eq!(pool.total_size(), 2);
// Add another tx spending outputs from the previous tx.
write_pool
.add_to_pool(test_source(), tx2.clone(), false, &header)
pool.add_to_pool(test_source(), tx2.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
assert_eq!(pool.total_size(), 3);
}
// Test adding the exact same tx multiple times (same kernel signature).
// This will fail for stem=false during tx aggregation due to duplicate
// outputs and duplicate kernels.
{
let mut write_pool = pool.write();
assert!(write_pool
assert!(pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
.is_err());
}
@ -131,8 +106,7 @@ fn test_the_transaction_pool() {
// Note: not the *same* tx, just same underlying inputs/outputs.
{
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let mut write_pool = pool.write();
assert!(write_pool
assert!(pool
.add_to_pool(test_source(), tx1a, false, &header)
.is_err());
}
@ -140,8 +114,7 @@ fn test_the_transaction_pool() {
// Test adding a tx attempting to spend a non-existent output.
{
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
let mut write_pool = pool.write();
assert!(write_pool
assert!(pool
.add_to_pool(test_source(), bad_tx, false, &header)
.is_err());
}
@ -152,71 +125,53 @@ fn test_the_transaction_pool() {
// to be immediately stolen via a "replay" tx.
{
let tx = test_transaction(&keychain, vec![900], vec![498]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
assert!(pool.add_to_pool(test_source(), tx, false, &header).is_err());
}
// Confirm the tx pool correctly identifies an invalid tx (already spent).
{
let mut write_pool = pool.write();
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
assert!(write_pool
assert!(pool
.add_to_pool(test_source(), tx3, false, &header)
.is_err());
assert_eq!(write_pool.total_size(), 3);
assert_eq!(pool.total_size(), 3);
}
// Now add a couple of txs to the stempool (stem = true).
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![599], vec![598]);
write_pool
.add_to_pool(test_source(), tx, true, &header)
.unwrap();
pool.add_to_pool(test_source(), tx, true, &header).unwrap();
let tx2 = test_transaction(&keychain, vec![598], vec![597]);
write_pool
.add_to_pool(test_source(), tx2, true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
assert_eq!(write_pool.stempool.size(), 2);
pool.add_to_pool(test_source(), tx2, true, &header).unwrap();
assert_eq!(pool.total_size(), 3);
assert_eq!(pool.stempool.size(), 2);
}
// Check we can take some entries from the stempool and "fluff" them into the
// txpool. This also exercises multi-kernel txs.
{
let mut write_pool = pool.write();
let agg_tx = write_pool
.stempool
.all_transactions_aggregate()
.unwrap()
.unwrap();
let agg_tx = pool.stempool.all_transactions_aggregate().unwrap().unwrap();
assert_eq!(agg_tx.kernels().len(), 2);
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
pool.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert!(write_pool.stempool.is_empty());
assert_eq!(pool.total_size(), 4);
assert!(pool.stempool.is_empty());
}
// Adding a duplicate tx to the stempool will result in it being fluffed.
// This handles the case of the stem path having a cycle in it.
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![597], vec![596]);
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
pool.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert_eq!(write_pool.stempool.size(), 1);
assert_eq!(pool.total_size(), 4);
assert_eq!(pool.stempool.size(), 1);
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
pool.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 5);
assert!(write_pool.stempool.is_empty());
assert_eq!(pool.total_size(), 5);
assert!(pool.stempool.is_empty());
}
// Now check we can correctly deaggregate a multi-kernel tx based on current
@ -224,8 +179,6 @@ fn test_the_transaction_pool() {
// We will do this be adding a new tx to the pool
// that is a superset of a tx already in the pool.
{
let mut write_pool = pool.write();
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about
@ -235,11 +188,10 @@ fn test_the_transaction_pool() {
.validate(Weighting::AsTransaction, verifier_cache.clone())
.unwrap();
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
pool.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 6);
let entry = write_pool.txpool.entries.last().unwrap();
assert_eq!(pool.total_size(), 6);
let entry = pool.txpool.entries.last().unwrap();
assert_eq!(entry.tx.kernels().len(), 1);
assert_eq!(entry.src, TxSource::Deaggregate);
}
@ -247,232 +199,19 @@ fn test_the_transaction_pool() {
// Check we cannot "double spend" an output spent in a previous block.
// We use the initial coinbase output here for convenience.
{
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
let double_spend_tx = test_transaction_spending_coinbase(&keychain, &header, vec![1000]);
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// check we cannot add a double spend to the stempool
assert!(pool
.add_to_pool(test_source(), double_spend_tx.clone(), true, &header)
.is_err());
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
let header = {
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(
&keychain,
&libtx::ProofBuilder::new(&keychain),
&key_id,
0,
false,
)
.unwrap();
let block =
Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
chain.update_db_for_block(&block);
block.header
};
// Now create tx to spend a coinbase, giving us some useful outputs for testing
// with.
let initial_tx = {
test_transaction_spending_coinbase(
&keychain,
&header,
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
)
};
// Add this tx to the pool (stem=false, direct to txpool).
{
let mut write_pool = pool.write();
write_pool
.add_to_pool(test_source(), initial_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 1);
}
// Test adding a tx that "double spends" an output currently spent by a tx
// already in the txpool. In this case we attempt to spend the original coinbase twice.
{
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
}
// tx1 spends some outputs from the initial test tx.
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
// tx2 spends some outputs from both tx1 and the initial test tx.
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
// Take a write lock and add a couple of tx entries to the pool.
{
let mut write_pool = pool.write();
// Check we have a single initial tx in the pool.
assert_eq!(write_pool.total_size(), 1);
// First, add a simple tx directly to the txpool (stem = false).
write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 2);
// Add another tx spending outputs from the previous tx.
write_pool
.add_to_pool(test_source(), tx2.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
}
// Test adding the exact same tx multiple times (same kernel signature).
// This will fail for stem=false during tx aggregation due to duplicate
// outputs and duplicate kernels.
{
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
.is_err());
}
// Test adding a duplicate tx with the same input and outputs.
// Note: not the *same* tx, just same underlying inputs/outputs.
{
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx1a, false, &header)
.is_err());
}
// Test adding a tx attempting to spend a non-existent output.
{
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), bad_tx, false, &header)
.is_err());
}
// Test adding a tx that would result in a duplicate output (conflicts with
// output from tx2). For reasons of security all outputs in the UTXO set must
// be unique. Otherwise spending one will almost certainly cause the other
// to be immediately stolen via a "replay" tx.
{
let tx = test_transaction(&keychain, vec![900], vec![498]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
}
// Confirm the tx pool correctly identifies an invalid tx (already spent).
{
let mut write_pool = pool.write();
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
assert!(write_pool
.add_to_pool(test_source(), tx3, false, &header)
.is_err());
assert_eq!(write_pool.total_size(), 3);
}
// Now add a couple of txs to the stempool (stem = true).
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![599], vec![598]);
write_pool
.add_to_pool(test_source(), tx, true, &header)
.unwrap();
let tx2 = test_transaction(&keychain, vec![598], vec![597]);
write_pool
.add_to_pool(test_source(), tx2, true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
assert_eq!(write_pool.stempool.size(), 2);
}
// Check we can take some entries from the stempool and "fluff" them into the
// txpool. This also exercises multi-kernel txs.
{
let mut write_pool = pool.write();
let agg_tx = write_pool
.stempool
.all_transactions_aggregate()
.unwrap()
.unwrap();
assert_eq!(agg_tx.kernels().len(), 2);
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert!(write_pool.stempool.is_empty());
}
// Adding a duplicate tx to the stempool will result in it being fluffed.
// This handles the case of the stem path having a cycle in it.
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![597], vec![596]);
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert_eq!(write_pool.stempool.size(), 1);
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 5);
assert!(write_pool.stempool.is_empty());
}
// Now check we can correctly deaggregate a multi-kernel tx based on current
// contents of the txpool.
// We will do this be adding a new tx to the pool
// that is a superset of a tx already in the pool.
{
let mut write_pool = pool.write();
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
agg_tx
.validate(Weighting::AsTransaction, verifier_cache.clone())
.unwrap();
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 6);
let entry = write_pool.txpool.entries.last().unwrap();
assert_eq!(entry.tx.kernels().len(), 1);
assert_eq!(entry.src, TxSource::Deaggregate);
}
// Check we cannot "double spend" an output spent in a previous block.
// We use the initial coinbase output here for convenience.
{
let mut write_pool = pool.write();
let double_spend_tx =
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };
// check we cannot add a double spend to the stempool
assert!(write_pool
.add_to_pool(test_source(), double_spend_tx.clone(), true, &header)
.is_err());
// check we cannot add a double spend to the txpool
assert!(write_pool
.add_to_pool(test_source(), double_spend_tx.clone(), false, &header)
.is_err());
}
// check we cannot add a double spend to the txpool
assert!(pool
.add_to_pool(test_source(), double_spend_tx.clone(), false, &header)
.is_err());
}
// Cleanup db directory
clean_output_dir(db_root.clone());
clean_output_dir(db_root.into());
}