Cleanup db directory after tests (#2752)

* Cleanup db directory after tests

* Fix clean output dir windows

* Remove behind chain tests
This commit is contained in:
Quentin Le Sceller 2019-04-15 18:00:24 -04:00 committed by Ignotus Peverell
parent e8c50359e4
commit 606b4652f8
9 changed files with 1041 additions and 973 deletions

View file

@ -112,6 +112,8 @@ fn data_files() {
let chain = reload_chain(chain_dir);
chain.validate(false).unwrap();
}
// Cleanup chain directory
clean_output_dir(chain_dir);
}
fn _prepare_block(kc: &ExtKeychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {

View file

@ -59,7 +59,11 @@ fn setup(dir_name: &str, genesis: Block) -> Chain {
fn mine_empty_chain() {
global::set_mining_mode(ChainTypes::AutomatedTesting);
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
mine_some_on_top(".grin", pow::mine_genesis_block().unwrap(), &keychain);
{
mine_some_on_top(".grin", pow::mine_genesis_block().unwrap(), &keychain);
}
// Cleanup chain directory
clean_output_dir(".grin");
}
#[test]
@ -73,9 +77,10 @@ fn mine_genesis_reward_chain() {
let reward = reward::output(&keychain, &key_id, 0).unwrap();
genesis = genesis.with_reward(reward.0, reward.1);
let tmp_chain_dir = ".grin.tmp";
{
// setup a tmp chain to hande tx hashsets
let tmp_chain = setup(".grin.tmp", pow::mine_genesis_block().unwrap());
let tmp_chain = setup(tmp_chain_dir, pow::mine_genesis_block().unwrap());
tmp_chain.set_txhashset_roots(&mut genesis).unwrap();
genesis.header.output_mmr_size = 1;
genesis.header.kernel_mmr_size = 1;
@ -91,6 +96,9 @@ fn mine_genesis_reward_chain() {
.unwrap();
mine_some_on_top(".grin.genesis", genesis, &keychain);
// Cleanup chain directories
clean_output_dir(tmp_chain_dir);
clean_output_dir(".grin.genesis");
}
fn mine_some_on_top<K>(dir: &str, genesis: Block, keychain: &K)
@ -157,76 +165,84 @@ where
#[test]
fn mine_forks() {
global::set_mining_mode(ChainTypes::AutomatedTesting);
let chain = setup(".grin2", pow::mine_genesis_block().unwrap());
let kc = ExtKeychain::from_random_seed(false).unwrap();
{
let chain = setup(".grin2", pow::mine_genesis_block().unwrap());
let kc = ExtKeychain::from_random_seed(false).unwrap();
// add a first block to not fork genesis
let prev = chain.head_header().unwrap();
let b = prepare_block(&kc, &prev, &chain, 2);
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
// mine and add a few blocks
for n in 1..4 {
// first block for one branch
// add a first block to not fork genesis
let prev = chain.head_header().unwrap();
let b1 = prepare_block(&kc, &prev, &chain, 3 * n);
let b = prepare_block(&kc, &prev, &chain, 2);
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
// 2nd block with higher difficulty for other branch
let b2 = prepare_block(&kc, &prev, &chain, 3 * n + 1);
// mine and add a few blocks
// process the first block to extend the chain
let bhash = b1.hash();
chain.process_block(b1, chain::Options::SKIP_POW).unwrap();
for n in 1..4 {
// first block for one branch
let prev = chain.head_header().unwrap();
let b1 = prepare_block(&kc, &prev, &chain, 3 * n);
// checking our new head
let head = chain.head().unwrap();
assert_eq!(head.height, (n + 1) as u64);
assert_eq!(head.last_block_h, bhash);
assert_eq!(head.prev_block_h, prev.hash());
// 2nd block with higher difficulty for other branch
let b2 = prepare_block(&kc, &prev, &chain, 3 * n + 1);
// process the 2nd block to build a fork with more work
let bhash = b2.hash();
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
// process the first block to extend the chain
let bhash = b1.hash();
chain.process_block(b1, chain::Options::SKIP_POW).unwrap();
// checking head switch
let head = chain.head().unwrap();
assert_eq!(head.height, (n + 1) as u64);
assert_eq!(head.last_block_h, bhash);
assert_eq!(head.prev_block_h, prev.hash());
// checking our new head
let head = chain.head().unwrap();
assert_eq!(head.height, (n + 1) as u64);
assert_eq!(head.last_block_h, bhash);
assert_eq!(head.prev_block_h, prev.hash());
// process the 2nd block to build a fork with more work
let bhash = b2.hash();
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
// checking head switch
let head = chain.head().unwrap();
assert_eq!(head.height, (n + 1) as u64);
assert_eq!(head.last_block_h, bhash);
assert_eq!(head.prev_block_h, prev.hash());
}
}
// Cleanup chain directory
clean_output_dir(".grin2");
}
#[test]
fn mine_losing_fork() {
global::set_mining_mode(ChainTypes::AutomatedTesting);
let kc = ExtKeychain::from_random_seed(false).unwrap();
let chain = setup(".grin3", pow::mine_genesis_block().unwrap());
{
let chain = setup(".grin3", pow::mine_genesis_block().unwrap());
// add a first block we'll be forking from
let prev = chain.head_header().unwrap();
let b1 = prepare_block(&kc, &prev, &chain, 2);
let b1head = b1.header.clone();
chain.process_block(b1, chain::Options::SKIP_POW).unwrap();
// add a first block we'll be forking from
let prev = chain.head_header().unwrap();
let b1 = prepare_block(&kc, &prev, &chain, 2);
let b1head = b1.header.clone();
chain.process_block(b1, chain::Options::SKIP_POW).unwrap();
// prepare the 2 successor, sibling blocks, one with lower diff
let b2 = prepare_block(&kc, &b1head, &chain, 4);
let b2head = b2.header.clone();
let bfork = prepare_block(&kc, &b1head, &chain, 3);
// prepare the 2 successor, sibling blocks, one with lower diff
let b2 = prepare_block(&kc, &b1head, &chain, 4);
let b2head = b2.header.clone();
let bfork = prepare_block(&kc, &b1head, &chain, 3);
// add higher difficulty first, prepare its successor, then fork
// with lower diff
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
assert_eq!(chain.head_header().unwrap().hash(), b2head.hash());
let b3 = prepare_block(&kc, &b2head, &chain, 5);
chain
.process_block(bfork, chain::Options::SKIP_POW)
.unwrap();
// add higher difficulty first, prepare its successor, then fork
// with lower diff
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
assert_eq!(chain.head_header().unwrap().hash(), b2head.hash());
let b3 = prepare_block(&kc, &b2head, &chain, 5);
chain
.process_block(bfork, chain::Options::SKIP_POW)
.unwrap();
// adding the successor
let b3head = b3.header.clone();
chain.process_block(b3, chain::Options::SKIP_POW).unwrap();
assert_eq!(chain.head_header().unwrap().hash(), b3head.hash());
// adding the successor
let b3head = b3.header.clone();
chain.process_block(b3, chain::Options::SKIP_POW).unwrap();
assert_eq!(chain.head_header().unwrap().hash(), b3head.hash());
}
// Cleanup chain directory
clean_output_dir(".grin3");
}
#[test]
@ -237,222 +253,234 @@ fn longer_fork() {
// prepare 2 chains, the 2nd will be have the forked blocks we can
// then send back on the 1st
let genesis = pow::mine_genesis_block().unwrap();
let chain = setup(".grin4", genesis.clone());
{
let chain = setup(".grin4", genesis.clone());
// add blocks to both chains, 20 on the main one, only the first 5
// for the forked chain
let mut prev = chain.head_header().unwrap();
for n in 0..10 {
let b = prepare_block(&kc, &prev, &chain, 2 * n + 2);
prev = b.header.clone();
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
// add blocks to both chains, 20 on the main one, only the first 5
// for the forked chain
let mut prev = chain.head_header().unwrap();
for n in 0..10 {
let b = prepare_block(&kc, &prev, &chain, 2 * n + 2);
prev = b.header.clone();
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
}
let forked_block = chain.get_header_by_height(5).unwrap();
let head = chain.head_header().unwrap();
assert_eq!(head.height, 10);
assert_eq!(head.hash(), prev.hash());
let mut prev = forked_block;
for n in 0..7 {
let b = prepare_fork_block(&kc, &prev, &chain, 2 * n + 11);
prev = b.header.clone();
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
}
let new_head = prev;
// After all this the chain should have switched to the fork.
let head = chain.head_header().unwrap();
assert_eq!(head.height, 12);
assert_eq!(head.hash(), new_head.hash());
}
let forked_block = chain.get_header_by_height(5).unwrap();
let head = chain.head_header().unwrap();
assert_eq!(head.height, 10);
assert_eq!(head.hash(), prev.hash());
let mut prev = forked_block;
for n in 0..7 {
let b = prepare_fork_block(&kc, &prev, &chain, 2 * n + 11);
prev = b.header.clone();
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
}
let new_head = prev;
// After all this the chain should have switched to the fork.
let head = chain.head_header().unwrap();
assert_eq!(head.height, 12);
assert_eq!(head.hash(), new_head.hash());
// Cleanup chain directory
clean_output_dir(".grin4");
}
#[test]
fn spend_in_fork_and_compact() {
global::set_mining_mode(ChainTypes::AutomatedTesting);
util::init_test_logger();
let chain = setup(".grin6", pow::mine_genesis_block().unwrap());
let prev = chain.head_header().unwrap();
let kc = ExtKeychain::from_random_seed(false).unwrap();
{
let chain = setup(".grin6", pow::mine_genesis_block().unwrap());
let prev = chain.head_header().unwrap();
let kc = ExtKeychain::from_random_seed(false).unwrap();
let mut fork_head = prev;
let mut fork_head = prev;
// mine the first block and keep track of the block_hash
// so we can spend the coinbase later
let b = prepare_block(&kc, &fork_head, &chain, 2);
let out_id = OutputIdentifier::from_output(&b.outputs()[0]);
assert!(out_id.features.is_coinbase());
fork_head = b.header.clone();
chain
.process_block(b.clone(), chain::Options::SKIP_POW)
.unwrap();
// now mine three further blocks
for n in 3..6 {
let b = prepare_block(&kc, &fork_head, &chain, n);
// mine the first block and keep track of the block_hash
// so we can spend the coinbase later
let b = prepare_block(&kc, &fork_head, &chain, 2);
let out_id = OutputIdentifier::from_output(&b.outputs()[0]);
assert!(out_id.features.is_coinbase());
fork_head = b.header.clone();
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
}
chain
.process_block(b.clone(), chain::Options::SKIP_POW)
.unwrap();
// Check the height of the "fork block".
assert_eq!(fork_head.height, 4);
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier();
let key_id31 = ExtKeychainPath::new(1, 31, 0, 0, 0).to_identifier();
// now mine three further blocks
for n in 3..6 {
let b = prepare_block(&kc, &fork_head, &chain, n);
fork_head = b.header.clone();
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
}
let tx1 = build::transaction(
vec![
build::coinbase_input(consensus::REWARD, key_id2.clone()),
build::output(consensus::REWARD - 20000, key_id30.clone()),
build::with_fee(20000),
],
&kc,
)
.unwrap();
// Check the height of the "fork block".
assert_eq!(fork_head.height, 4);
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier();
let key_id31 = ExtKeychainPath::new(1, 31, 0, 0, 0).to_identifier();
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]);
let prev_main = next.header.clone();
chain
.process_block(next.clone(), chain::Options::SKIP_POW)
.unwrap();
chain.validate(false).unwrap();
let tx2 = build::transaction(
vec![
build::input(consensus::REWARD - 20000, key_id30.clone()),
build::output(consensus::REWARD - 40000, key_id31.clone()),
build::with_fee(20000),
],
&kc,
)
.unwrap();
let next = prepare_block_tx(&kc, &prev_main, &chain, 9, vec![&tx2]);
let prev_main = next.header.clone();
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
// Full chain validation for completeness.
chain.validate(false).unwrap();
// mine 2 forked blocks from the first
let fork = prepare_fork_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]);
let prev_fork = fork.header.clone();
chain.process_block(fork, chain::Options::SKIP_POW).unwrap();
let fork_next = prepare_fork_block_tx(&kc, &prev_fork, &chain, 8, vec![&tx2]);
let prev_fork = fork_next.header.clone();
chain
.process_block(fork_next, chain::Options::SKIP_POW)
let tx1 = build::transaction(
vec![
build::coinbase_input(consensus::REWARD, key_id2.clone()),
build::output(consensus::REWARD - 20000, key_id30.clone()),
build::with_fee(20000),
],
&kc,
)
.unwrap();
chain.validate(false).unwrap();
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]);
let prev_main = next.header.clone();
chain
.process_block(next.clone(), chain::Options::SKIP_POW)
.unwrap();
chain.validate(false).unwrap();
// check state
let head = chain.head_header().unwrap();
assert_eq!(head.height, 6);
assert_eq!(head.hash(), prev_main.hash());
assert!(chain
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
.is_ok());
assert!(chain
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
.is_err());
// make the fork win
let fork_next = prepare_fork_block(&kc, &prev_fork, &chain, 10);
let prev_fork = fork_next.header.clone();
chain
.process_block(fork_next, chain::Options::SKIP_POW)
let tx2 = build::transaction(
vec![
build::input(consensus::REWARD - 20000, key_id30.clone()),
build::output(consensus::REWARD - 40000, key_id31.clone()),
build::with_fee(20000),
],
&kc,
)
.unwrap();
chain.validate(false).unwrap();
// check state
let head = chain.head_header().unwrap();
assert_eq!(head.height, 7);
assert_eq!(head.hash(), prev_fork.hash());
assert!(chain
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
.is_ok());
assert!(chain
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
.is_err());
// add 20 blocks to go past the test horizon
let mut prev = prev_fork;
for n in 0..20 {
let next = prepare_block(&kc, &prev, &chain, 11 + n);
prev = next.header.clone();
let next = prepare_block_tx(&kc, &prev_main, &chain, 9, vec![&tx2]);
let prev_main = next.header.clone();
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
}
chain.validate(false).unwrap();
if let Err(e) = chain.compact() {
panic!("Error compacting chain: {:?}", e);
}
if let Err(e) = chain.validate(false) {
panic!("Validation error after compacting chain: {:?}", e);
// Full chain validation for completeness.
chain.validate(false).unwrap();
// mine 2 forked blocks from the first
let fork = prepare_fork_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]);
let prev_fork = fork.header.clone();
chain.process_block(fork, chain::Options::SKIP_POW).unwrap();
let fork_next = prepare_fork_block_tx(&kc, &prev_fork, &chain, 8, vec![&tx2]);
let prev_fork = fork_next.header.clone();
chain
.process_block(fork_next, chain::Options::SKIP_POW)
.unwrap();
chain.validate(false).unwrap();
// check state
let head = chain.head_header().unwrap();
assert_eq!(head.height, 6);
assert_eq!(head.hash(), prev_main.hash());
assert!(chain
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
.is_ok());
assert!(chain
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
.is_err());
// make the fork win
let fork_next = prepare_fork_block(&kc, &prev_fork, &chain, 10);
let prev_fork = fork_next.header.clone();
chain
.process_block(fork_next, chain::Options::SKIP_POW)
.unwrap();
chain.validate(false).unwrap();
// check state
let head = chain.head_header().unwrap();
assert_eq!(head.height, 7);
assert_eq!(head.hash(), prev_fork.hash());
assert!(chain
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
.is_ok());
assert!(chain
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
.is_err());
// add 20 blocks to go past the test horizon
let mut prev = prev_fork;
for n in 0..20 {
let next = prepare_block(&kc, &prev, &chain, 11 + n);
prev = next.header.clone();
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
}
chain.validate(false).unwrap();
if let Err(e) = chain.compact() {
panic!("Error compacting chain: {:?}", e);
}
if let Err(e) = chain.validate(false) {
panic!("Validation error after compacting chain: {:?}", e);
}
}
// Cleanup chain directory
clean_output_dir(".grin6");
}
/// Test ability to retrieve block headers for a given output
#[test]
fn output_header_mappings() {
global::set_mining_mode(ChainTypes::AutomatedTesting);
let chain = setup(
".grin_header_for_output",
pow::mine_genesis_block().unwrap(),
);
let keychain = ExtKeychain::from_random_seed(false).unwrap();
let mut reward_outputs = vec![];
{
let chain = setup(
".grin_header_for_output",
pow::mine_genesis_block().unwrap(),
);
let keychain = ExtKeychain::from_random_seed(false).unwrap();
let mut reward_outputs = vec![];
for n in 1..15 {
let prev = chain.head_header().unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
reward_outputs.push(reward.0.clone());
let mut b =
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
for n in 1..15 {
let prev = chain.head_header().unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
reward_outputs.push(reward.0.clone());
let mut b =
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
.unwrap();
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut b).unwrap();
let edge_bits = if n == 2 {
global::min_edge_bits() + 1
} else {
global::min_edge_bits()
};
b.header.pow.proof.edge_bits = edge_bits;
pow::pow_size(
&mut b.header,
next_header_info.difficulty,
global::proofsize(),
edge_bits,
)
.unwrap();
b.header.pow.proof.edge_bits = edge_bits;
chain.process_block(b, chain::Options::MINE).unwrap();
let header_for_output = chain
.get_header_for_output(&OutputIdentifier::from_output(&reward_outputs[n - 1]))
.unwrap();
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
assert_eq!(header_for_output.height, n as u64);
chain.set_txhashset_roots(&mut b).unwrap();
chain.validate(false).unwrap();
}
let edge_bits = if n == 2 {
global::min_edge_bits() + 1
} else {
global::min_edge_bits()
};
b.header.pow.proof.edge_bits = edge_bits;
pow::pow_size(
&mut b.header,
next_header_info.difficulty,
global::proofsize(),
edge_bits,
)
.unwrap();
b.header.pow.proof.edge_bits = edge_bits;
chain.process_block(b, chain::Options::MINE).unwrap();
let header_for_output = chain
.get_header_for_output(&OutputIdentifier::from_output(&reward_outputs[n - 1]))
.unwrap();
assert_eq!(header_for_output.height, n as u64);
chain.validate(false).unwrap();
}
// Check all output positions are as expected
for n in 1..15 {
let header_for_output = chain
.get_header_for_output(&OutputIdentifier::from_output(&reward_outputs[n - 1]))
.unwrap();
assert_eq!(header_for_output.height, n as u64);
// Check all output positions are as expected
for n in 1..15 {
let header_for_output = chain
.get_header_for_output(&OutputIdentifier::from_output(&reward_outputs[n - 1]))
.unwrap();
assert_eq!(header_for_output.height, n as u64);
}
}
// Cleanup chain directory
clean_output_dir(".grin_header_for_output");
}
fn prepare_block<K>(kc: &K, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block

View file

@ -53,48 +53,53 @@ fn test_various_store_indices() {
let keychain = ExtKeychain::from_random_seed(false).unwrap();
let key_id = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
let chain_store = Arc::new(chain::store::ChainStore::new(db_env).unwrap());
global::set_mining_mode(ChainTypes::AutomatedTesting);
let genesis = pow::mine_genesis_block().unwrap();
setup_chain(&genesis, chain_store.clone()).unwrap();
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
let block = Block::new(&genesis.header, vec![], Difficulty::min(), reward).unwrap();
let block_hash = block.hash();
{
let batch = chain_store.batch().unwrap();
batch.save_block_header(&block.header).unwrap();
batch.save_block(&block).unwrap();
batch.commit().unwrap();
}
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
let block_header = chain_store.get_block_header(&block_hash).unwrap();
assert_eq!(block_header.hash(), block_hash);
let chain_store = Arc::new(chain::store::ChainStore::new(db_env).unwrap());
// Test we can retrive the block from the db and that we can safely delete the
// block from the db even though the block_sums are missing.
{
// Block exists in the db.
assert!(chain_store.get_block(&block_hash).is_ok());
global::set_mining_mode(ChainTypes::AutomatedTesting);
let genesis = pow::mine_genesis_block().unwrap();
// Block sums do not exist (we never set them up).
assert!(chain_store.get_block_sums(&block_hash).is_err());
setup_chain(&genesis, chain_store.clone()).unwrap();
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
let block = Block::new(&genesis.header, vec![], Difficulty::min(), reward).unwrap();
let block_hash = block.hash();
{
// Start a new batch and delete the block.
let batch = chain_store.batch().unwrap();
assert!(batch.delete_block(&block_hash).is_ok());
// Block is deleted within this batch.
assert!(batch.get_block(&block_hash).is_err());
batch.save_block_header(&block.header).unwrap();
batch.save_block(&block).unwrap();
batch.commit().unwrap();
}
// Check the batch did not commit any changes to the store .
assert!(chain_store.get_block(&block_hash).is_ok());
let block_header = chain_store.get_block_header(&block_hash).unwrap();
assert_eq!(block_header.hash(), block_hash);
// Test we can retrive the block from the db and that we can safely delete the
// block from the db even though the block_sums are missing.
{
// Block exists in the db.
assert!(chain_store.get_block(&block_hash).is_ok());
// Block sums do not exist (we never set them up).
assert!(chain_store.get_block_sums(&block_hash).is_err());
{
// Start a new batch and delete the block.
let batch = chain_store.batch().unwrap();
assert!(batch.delete_block(&block_hash).is_ok());
// Block is deleted within this batch.
assert!(batch.get_block(&block_hash).is_err());
}
// Check the batch did not commit any changes to the store .
assert!(chain_store.get_block(&block_hash).is_ok());
}
}
// Cleanup chain directory
clean_output_dir(chain_dir);
}

View file

@ -38,116 +38,39 @@ fn clean_output_dir(dir_name: &str) {
#[test]
fn test_coinbase_maturity() {
let _ = env_logger::init();
clean_output_dir(".grin");
let chain_dir = ".grin_coinbase";
clean_output_dir(chain_dir);
global::set_mining_mode(ChainTypes::AutomatedTesting);
let genesis_block = pow::mine_genesis_block().unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let db_env = Arc::new(store::new_env(".grin".to_string()));
let chain = chain::Chain::init(
".grin".to_string(),
db_env,
Arc::new(NoopAdapter {}),
genesis_block,
pow::verify_size,
verifier_cache,
false,
Arc::new(Mutex::new(StopState::new())),
)
.unwrap();
let prev = chain.head_header().unwrap();
let keychain = ExtKeychain::from_random_seed(false).unwrap();
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let key_id4 = ExtKeychainPath::new(1, 4, 0, 0, 0).to_identifier();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let reward = libtx::reward::output(&keychain, &key_id1, 0).unwrap();
let mut block = core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block).unwrap();
pow::pow_size(
&mut block.header,
next_header_info.difficulty,
global::proofsize(),
global::min_edge_bits(),
)
.unwrap();
assert_eq!(block.outputs().len(), 1);
let coinbase_output = block.outputs()[0];
assert!(coinbase_output.is_coinbase());
chain
.process_block(block.clone(), chain::Options::MINE)
{
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
let chain = chain::Chain::init(
chain_dir.to_string(),
db_env,
Arc::new(NoopAdapter {}),
genesis_block,
pow::verify_size,
verifier_cache,
false,
Arc::new(Mutex::new(StopState::new())),
)
.unwrap();
let prev = chain.head_header().unwrap();
let amount = consensus::REWARD;
let lock_height = 1 + global::coinbase_maturity();
assert_eq!(lock_height, 4);
// here we build a tx that attempts to spend the earlier coinbase output
// this is not a valid tx as the coinbase output cannot be spent yet
let coinbase_txn = build::transaction(
vec![
build::coinbase_input(amount, key_id1.clone()),
build::output(amount - 2, key_id2.clone()),
build::with_fee(2),
],
&keychain,
)
.unwrap();
let txs = vec![coinbase_txn.clone()];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id3, fees).unwrap();
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block).unwrap();
// Confirm the tx attempting to spend the coinbase output
// is not valid at the current block height given the current chain state.
match chain.verify_coinbase_maturity(&coinbase_txn) {
Ok(_) => {}
Err(e) => match e.kind() {
ErrorKind::ImmatureCoinbase => {}
_ => panic!("Expected transaction error with immature coinbase."),
},
}
pow::pow_size(
&mut block.header,
next_header_info.difficulty,
global::proofsize(),
global::min_edge_bits(),
)
.unwrap();
// mine enough blocks to increase the height sufficiently for
// coinbase to reach maturity and be spendable in the next block
for _ in 0..3 {
let prev = chain.head_header().unwrap();
let keychain = ExtKeychain::from_random_seed(false).unwrap();
let pk = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let key_id4 = ExtKeychainPath::new(1, 4, 0, 0, 0).to_identifier();
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
let mut block = core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let reward = libtx::reward::output(&keychain, &key_id1, 0).unwrap();
let mut block = core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
@ -161,37 +84,120 @@ fn test_coinbase_maturity() {
)
.unwrap();
chain.process_block(block, chain::Options::MINE).unwrap();
assert_eq!(block.outputs().len(), 1);
let coinbase_output = block.outputs()[0];
assert!(coinbase_output.is_coinbase());
chain
.process_block(block.clone(), chain::Options::MINE)
.unwrap();
let prev = chain.head_header().unwrap();
let amount = consensus::REWARD;
let lock_height = 1 + global::coinbase_maturity();
assert_eq!(lock_height, 4);
// here we build a tx that attempts to spend the earlier coinbase output
// this is not a valid tx as the coinbase output cannot be spent yet
let coinbase_txn = build::transaction(
vec![
build::coinbase_input(amount, key_id1.clone()),
build::output(amount - 2, key_id2.clone()),
build::with_fee(2),
],
&keychain,
)
.unwrap();
let txs = vec![coinbase_txn.clone()];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id3, fees).unwrap();
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block).unwrap();
// Confirm the tx attempting to spend the coinbase output
// is not valid at the current block height given the current chain state.
match chain.verify_coinbase_maturity(&coinbase_txn) {
Ok(_) => {}
Err(e) => match e.kind() {
ErrorKind::ImmatureCoinbase => {}
_ => panic!("Expected transaction error with immature coinbase."),
},
}
pow::pow_size(
&mut block.header,
next_header_info.difficulty,
global::proofsize(),
global::min_edge_bits(),
)
.unwrap();
// mine enough blocks to increase the height sufficiently for
// coinbase to reach maturity and be spendable in the next block
for _ in 0..3 {
let prev = chain.head_header().unwrap();
let keychain = ExtKeychain::from_random_seed(false).unwrap();
let pk = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
let mut block =
core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block).unwrap();
pow::pow_size(
&mut block.header,
next_header_info.difficulty,
global::proofsize(),
global::min_edge_bits(),
)
.unwrap();
chain.process_block(block, chain::Options::MINE).unwrap();
}
let prev = chain.head_header().unwrap();
// Confirm the tx spending the coinbase output is now valid.
// The coinbase output has matured sufficiently based on current chain state.
chain.verify_coinbase_maturity(&coinbase_txn).unwrap();
let txs = vec![coinbase_txn];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let reward = libtx::reward::output(&keychain, &key_id4, fees).unwrap();
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block).unwrap();
pow::pow_size(
&mut block.header,
next_header_info.difficulty,
global::proofsize(),
global::min_edge_bits(),
)
.unwrap();
let result = chain.process_block(block, chain::Options::MINE);
match result {
Ok(_) => (),
Err(_) => panic!("we did not expect an error here"),
};
}
let prev = chain.head_header().unwrap();
// Confirm the tx spending the coinbase output is now valid.
// The coinbase output has matured sufficiently based on current chain state.
chain.verify_coinbase_maturity(&coinbase_txn).unwrap();
let txs = vec![coinbase_txn];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let reward = libtx::reward::output(&keychain, &key_id4, fees).unwrap();
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block).unwrap();
pow::pow_size(
&mut block.header,
next_header_info.difficulty,
global::proofsize(),
global::min_edge_bits(),
)
.unwrap();
let result = chain.process_block(block, chain::Options::MINE);
match result {
Ok(_) => (),
Err(_) => panic!("we did not expect an error here"),
};
// Cleanup chain directory
clean_output_dir(chain_dir);
}

View file

@ -41,47 +41,51 @@ fn test_unexpected_zip() {
let db_root = format!(".grin_txhashset_zip");
clean_output_dir(&db_root);
let db_env = Arc::new(store::new_env(db_root.clone()));
let chain_store = ChainStore::new(db_env).unwrap();
let store = Arc::new(chain_store);
txhashset::TxHashSet::open(db_root.clone(), store.clone(), None).unwrap();
// First check if everything works out of the box
assert!(txhashset::zip_read(db_root.clone(), &BlockHeader::default(), Some(rand)).is_ok());
let zip_path = Path::new(&db_root).join(format!("txhashset_snapshot_{}.zip", rand));
let zip_file = File::open(&zip_path).unwrap();
assert!(txhashset::zip_write(
PathBuf::from(db_root.clone()),
zip_file,
&BlockHeader::default()
)
.is_ok());
// Remove temp txhashset dir
fs::remove_dir_all(Path::new(&db_root).join(format!("txhashset_zip_{}", rand))).unwrap();
// Then add strange files in the original txhashset folder
write_file(db_root.clone());
assert!(txhashset::zip_read(db_root.clone(), &BlockHeader::default(), Some(rand)).is_ok());
// Check that the temp dir dos not contains the strange files
let txhashset_zip_path = Path::new(&db_root).join(format!("txhashset_zip_{}", rand));
assert!(txhashset_contains_expected_files(
format!("txhashset_zip_{}", rand),
txhashset_zip_path.clone()
));
fs::remove_dir_all(Path::new(&db_root).join(format!("txhashset_zip_{}", rand))).unwrap();
{
let db_env = Arc::new(store::new_env(db_root.clone()));
let chain_store = ChainStore::new(db_env).unwrap();
let store = Arc::new(chain_store);
txhashset::TxHashSet::open(db_root.clone(), store.clone(), None).unwrap();
// First check if everything works out of the box
assert!(txhashset::zip_read(db_root.clone(), &BlockHeader::default(), Some(rand)).is_ok());
let zip_path = Path::new(&db_root).join(format!("txhashset_snapshot_{}.zip", rand));
let zip_file = File::open(&zip_path).unwrap();
assert!(txhashset::zip_write(
PathBuf::from(db_root.clone()),
zip_file,
&BlockHeader::default()
)
.is_ok());
// Remove temp txhashset dir
fs::remove_dir_all(Path::new(&db_root).join(format!("txhashset_zip_{}", rand))).unwrap();
// Then add strange files in the original txhashset folder
write_file(db_root.clone());
assert!(txhashset::zip_read(db_root.clone(), &BlockHeader::default(), Some(rand)).is_ok());
// Check that the temp dir dos not contains the strange files
let txhashset_zip_path = Path::new(&db_root).join(format!("txhashset_zip_{}", rand));
assert!(txhashset_contains_expected_files(
format!("txhashset_zip_{}", rand),
txhashset_zip_path.clone()
));
fs::remove_dir_all(Path::new(&db_root).join(format!("txhashset_zip_{}", rand))).unwrap();
let zip_file = File::open(zip_path).unwrap();
assert!(txhashset::zip_write(
PathBuf::from(db_root.clone()),
zip_file,
&BlockHeader::default()
)
.is_ok());
// Check that the txhashset dir dos not contains the strange files
let txhashset_path = Path::new(&db_root).join("txhashset");
assert!(txhashset_contains_expected_files(
"txhashset".to_string(),
txhashset_path.clone()
));
fs::remove_dir_all(Path::new(&db_root).join("txhashset")).unwrap();
let zip_file = File::open(zip_path).unwrap();
assert!(txhashset::zip_write(
PathBuf::from(db_root.clone()),
zip_file,
&BlockHeader::default()
)
.is_ok());
// Check that the txhashset dir dos not contains the strange files
let txhashset_path = Path::new(&db_root).join("txhashset");
assert!(txhashset_contains_expected_files(
"txhashset".to_string(),
txhashset_path.clone()
));
fs::remove_dir_all(Path::new(&db_root).join("txhashset")).unwrap();
}
// Cleanup chain directory
clean_output_dir(&db_root);
}
fn write_file(db_root: String) {

View file

@ -34,87 +34,93 @@ fn test_transaction_pool_block_building() {
let db_root = ".grin_block_building".to_string();
clean_output_dir(db_root.clone());
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs
let block = add_block(header, vec![initial_tx], &mut chain);
let header = block.header;
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]);
let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]);
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
{
let mut write_pool = pool.write();
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
// Add the three root txs to the pool.
write_pool
.add_to_pool(test_source(), root_tx_1, false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), root_tx_2, false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), root_tx_3, false, &header)
.unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Now add the two child txs to the pool.
write_pool
.add_to_pool(test_source(), child_tx_1.clone(), false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), child_tx_2.clone(), false, &header)
.unwrap();
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let add_block =
|prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
assert_eq!(write_pool.total_size(), 5);
}
let txs = {
let read_pool = pool.read();
read_pool.prepare_mineable_transactions().unwrap()
};
// children should have been aggregated into parents
assert_eq!(txs.len(), 3);
let block = add_block(header, txs, &mut chain);
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 0);
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs
let block = add_block(header, vec![initial_tx], &mut chain);
let header = block.header;
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]);
let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]);
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
{
let mut write_pool = pool.write();
// Add the three root txs to the pool.
write_pool
.add_to_pool(test_source(), root_tx_1, false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), root_tx_2, false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), root_tx_3, false, &header)
.unwrap();
// Now add the two child txs to the pool.
write_pool
.add_to_pool(test_source(), child_tx_1.clone(), false, &header)
.unwrap();
write_pool
.add_to_pool(test_source(), child_tx_2.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 5);
}
let txs = {
let read_pool = pool.read();
read_pool.prepare_mineable_transactions().unwrap()
};
// children should have been aggregated into parents
assert_eq!(txs.len(), 3);
let block = add_block(header, txs, &mut chain);
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 0);
}
}
// Cleanup db directory
clean_output_dir(db_root.clone());
}

View file

@ -40,104 +40,110 @@ fn test_block_building_max_weight() {
let db_root = ".grin_block_building_max_weight".to_string();
clean_output_dir(db_root.clone());
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Convenient was to add a new block to the chain.
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![100, 200, 300]);
// Mine that initial tx so we can spend it with multiple txs
let block = add_block(header, vec![initial_tx], &mut chain);
let header = block.header;
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
// Build some dependent txs to add to the txpool.
// We will build a block from a subset of these.
let txs = vec![
test_transaction(&keychain, vec![100], vec![90, 1]),
test_transaction(&keychain, vec![90], vec![80, 2]),
test_transaction(&keychain, vec![200], vec![199]),
test_transaction(&keychain, vec![300], vec![290, 3]),
test_transaction(&keychain, vec![290], vec![280, 4]),
];
// Populate our txpool with the txs.
{
let mut write_pool = pool.write();
for tx in txs {
write_pool
.add_to_pool(test_source(), tx, false, &header)
.unwrap();
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Convenient was to add a new block to the chain.
let add_block =
|prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header, vec![100, 200, 300]);
// Mine that initial tx so we can spend it with multiple txs
let block = add_block(header, vec![initial_tx], &mut chain);
let header = block.header;
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
// Build some dependent txs to add to the txpool.
// We will build a block from a subset of these.
let txs = vec![
test_transaction(&keychain, vec![100], vec![90, 1]),
test_transaction(&keychain, vec![90], vec![80, 2]),
test_transaction(&keychain, vec![200], vec![199]),
test_transaction(&keychain, vec![300], vec![290, 3]),
test_transaction(&keychain, vec![290], vec![280, 4]),
];
// Populate our txpool with the txs.
{
let mut write_pool = pool.write();
for tx in txs {
write_pool
.add_to_pool(test_source(), tx, false, &header)
.unwrap();
}
}
// Check we added them all to the txpool successfully.
assert_eq!(pool.read().total_size(), 5);
// Prepare some "mineable txs" from the txpool.
// Note: We cannot fit all the txs from the txpool into a block.
let txs = pool.read().prepare_mineable_transactions().unwrap();
// Check resulting tx aggregation is what we expect.
// We expect to produce 2 aggregated txs based on txpool contents.
assert_eq!(txs.len(), 2);
// Check the tx we built is the aggregation of the correct set of underlying txs.
// We included 4 out of the 5 txs here.
assert_eq!(txs[0].kernels().len(), 1);
assert_eq!(txs[1].kernels().len(), 2);
// Check our weights after aggregation.
assert_eq!(txs[0].inputs().len(), 1);
assert_eq!(txs[0].outputs().len(), 1);
assert_eq!(txs[0].kernels().len(), 1);
assert_eq!(txs[0].tx_weight_as_block(), 25);
assert_eq!(txs[1].inputs().len(), 1);
assert_eq!(txs[1].outputs().len(), 3);
assert_eq!(txs[1].kernels().len(), 2);
assert_eq!(txs[1].tx_weight_as_block(), 70);
let block = add_block(header, txs, &mut chain);
// Check contents of the block itself (including coinbase reward).
assert_eq!(block.inputs().len(), 2);
assert_eq!(block.outputs().len(), 5);
assert_eq!(block.kernels().len(), 4);
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
// We should still have 2 tx in the pool after accepting the new block.
// This one exceeded the max block weight when building the block so
// remained in the txpool.
assert_eq!(write_pool.total_size(), 2);
}
}
// Check we added them all to the txpool successfully.
assert_eq!(pool.read().total_size(), 5);
// Prepare some "mineable txs" from the txpool.
// Note: We cannot fit all the txs from the txpool into a block.
let txs = pool.read().prepare_mineable_transactions().unwrap();
// Check resulting tx aggregation is what we expect.
// We expect to produce 2 aggregated txs based on txpool contents.
assert_eq!(txs.len(), 2);
// Check the tx we built is the aggregation of the correct set of underlying txs.
// We included 4 out of the 5 txs here.
assert_eq!(txs[0].kernels().len(), 1);
assert_eq!(txs[1].kernels().len(), 2);
// Check our weights after aggregation.
assert_eq!(txs[0].inputs().len(), 1);
assert_eq!(txs[0].outputs().len(), 1);
assert_eq!(txs[0].kernels().len(), 1);
assert_eq!(txs[0].tx_weight_as_block(), 25);
assert_eq!(txs[1].inputs().len(), 1);
assert_eq!(txs[1].outputs().len(), 3);
assert_eq!(txs[1].kernels().len(), 2);
assert_eq!(txs[1].tx_weight_as_block(), 70);
let block = add_block(header, txs, &mut chain);
// Check contents of the block itself (including coinbase reward).
assert_eq!(block.inputs().len(), 2);
assert_eq!(block.outputs().len(), 5);
assert_eq!(block.kernels().len(), 4);
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
// We should still have 2 tx in the pool after accepting the new block.
// This one exceeded the max block weight when building the block so
// remained in the txpool.
assert_eq!(write_pool.total_size(), 2);
}
// Cleanup db directory
clean_output_dir(db_root.clone());
}

View file

@ -34,153 +34,159 @@ fn test_transaction_pool_block_reconciliation() {
let db_root = ".grin_block_reconciliation".to_string();
clean_output_dir(db_root.clone());
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
let header = {
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
let genesis = BlockHeader::default();
let mut block = Block::new(&genesis, vec![], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = genesis.hash();
chain.update_db_for_block(&block);
block.header
};
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
let block = {
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let fees = initial_tx.fee();
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
let mut block = Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
block
};
let header = block.header;
// Preparation: We will introduce three root pool transactions.
// 1. A transaction that should be invalidated because it is exactly
// contained in the block.
// 2. A transaction that should be invalidated because the input is
// consumed in the block, although it is not exactly consumed.
// 3. A transaction that should remain after block reconciliation.
let block_transaction = test_transaction(&keychain, vec![10], vec![8]);
let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]);
let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]);
// We will also introduce a few children:
// 4. A transaction that descends from transaction 1, that is in
// turn exactly contained in the block.
let block_child = test_transaction(&keychain, vec![8], vec![5, 1]);
// 5. A transaction that descends from transaction 4, that is not
// contained in the block at all and should be valid after
// reconciliation.
let pool_child = test_transaction(&keychain, vec![5], vec![3]);
// 6. A transaction that descends from transaction 2 that does not
// conflict with anything in the block in any way, but should be
// invalidated (orphaned).
let conflict_child = test_transaction(&keychain, vec![12], vec![2]);
// 7. A transaction that descends from transaction 2 that should be
// valid due to its inputs being satisfied by the block.
let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]);
// 8. A transaction that descends from transaction 3 that should be
// invalidated due to an output conflict.
let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]);
// 9. A transaction that descends from transaction 3 that should remain
// valid after reconciliation.
let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]);
// 10. A transaction that descends from both transaction 6 and
// transaction 9
let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]);
let txs_to_add = vec![
block_transaction,
conflict_transaction,
valid_transaction.clone(),
block_child,
pool_child.clone(),
conflict_child,
conflict_valid_child.clone(),
valid_child_conflict.clone(),
valid_child_valid.clone(),
mixed_child,
];
// First we add the above transactions to the pool.
// All should be accepted.
{
let mut write_pool = pool.write();
assert_eq!(write_pool.total_size(), 0);
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
for tx in &txs_to_add {
write_pool
.add_to_pool(test_source(), tx.clone(), false, &header)
.unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
let header = {
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
let genesis = BlockHeader::default();
let mut block = Block::new(&genesis, vec![], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = genesis.hash();
chain.update_db_for_block(&block);
block.header
};
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
let block = {
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let fees = initial_tx.fee();
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
let mut block =
Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
block
};
let header = block.header;
// Preparation: We will introduce three root pool transactions.
// 1. A transaction that should be invalidated because it is exactly
// contained in the block.
// 2. A transaction that should be invalidated because the input is
// consumed in the block, although it is not exactly consumed.
// 3. A transaction that should remain after block reconciliation.
let block_transaction = test_transaction(&keychain, vec![10], vec![8]);
let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]);
let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]);
// We will also introduce a few children:
// 4. A transaction that descends from transaction 1, that is in
// turn exactly contained in the block.
let block_child = test_transaction(&keychain, vec![8], vec![5, 1]);
// 5. A transaction that descends from transaction 4, that is not
// contained in the block at all and should be valid after
// reconciliation.
let pool_child = test_transaction(&keychain, vec![5], vec![3]);
// 6. A transaction that descends from transaction 2 that does not
// conflict with anything in the block in any way, but should be
// invalidated (orphaned).
let conflict_child = test_transaction(&keychain, vec![12], vec![2]);
// 7. A transaction that descends from transaction 2 that should be
// valid due to its inputs being satisfied by the block.
let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]);
// 8. A transaction that descends from transaction 3 that should be
// invalidated due to an output conflict.
let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]);
// 9. A transaction that descends from transaction 3 that should remain
// valid after reconciliation.
let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]);
// 10. A transaction that descends from both transaction 6 and
// transaction 9
let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]);
let txs_to_add = vec![
block_transaction,
conflict_transaction,
valid_transaction.clone(),
block_child,
pool_child.clone(),
conflict_child,
conflict_valid_child.clone(),
valid_child_conflict.clone(),
valid_child_valid.clone(),
mixed_child,
];
// First we add the above transactions to the pool.
// All should be accepted.
{
let mut write_pool = pool.write();
assert_eq!(write_pool.total_size(), 0);
for tx in &txs_to_add {
write_pool
.add_to_pool(test_source(), tx.clone(), false, &header)
.unwrap();
}
assert_eq!(write_pool.total_size(), txs_to_add.len());
}
assert_eq!(write_pool.total_size(), txs_to_add.len());
}
// Now we prepare the block that will cause the above conditions to be met.
// First, the transactions we want in the block:
// - Copy of 1
let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]);
// - Conflict w/ 2, satisfies 7
let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]);
// - Copy of 4
let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]);
// - Output conflict w/ 8
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
// Now apply this block.
let block = {
let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
let mut block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
block
};
// Check the pool still contains everything we expect at this point.
{
let write_pool = pool.write();
assert_eq!(write_pool.total_size(), txs_to_add.len());
}
// And reconcile the pool with this latest block.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 4);
assert_eq!(write_pool.txpool.entries[0].tx, valid_transaction);
assert_eq!(write_pool.txpool.entries[1].tx, pool_child);
assert_eq!(write_pool.txpool.entries[2].tx, conflict_valid_child);
assert_eq!(write_pool.txpool.entries[3].tx, valid_child_valid);
// Now we prepare the block that will cause the above conditions to be met.
// First, the transactions we want in the block:
// - Copy of 1
let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]);
// - Conflict w/ 2, satisfies 7
let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]);
// - Copy of 4
let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]);
// - Output conflict w/ 8
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
// Now apply this block.
let block = {
let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
let mut block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
block
};
// Check the pool still contains everything we expect at this point.
{
let write_pool = pool.write();
assert_eq!(write_pool.total_size(), txs_to_add.len());
}
// And reconcile the pool with this latest block.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
assert_eq!(write_pool.total_size(), 4);
assert_eq!(write_pool.txpool.entries[0].tx, valid_transaction);
assert_eq!(write_pool.txpool.entries[1].tx, pool_child);
assert_eq!(write_pool.txpool.entries[2].tx, conflict_valid_child);
assert_eq!(write_pool.txpool.entries[3].tx, valid_child_valid);
}
}
// Cleanup db directory
clean_output_dir(db_root.clone());
}

View file

@ -33,221 +33,226 @@ fn test_the_transaction_pool() {
let db_root = ".grin_transaction_pool".to_string();
clean_output_dir(db_root.clone());
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
let header = {
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
let block = Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
chain.update_db_for_block(&block);
block.header
};
// Now create tx to spend a coinbase, giving us some useful outputs for testing
// with.
let initial_tx = {
test_transaction_spending_coinbase(
&keychain,
&header,
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
)
};
// Add this tx to the pool (stem=false, direct to txpool).
{
let mut write_pool = pool.write();
write_pool
.add_to_pool(test_source(), initial_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 1);
}
// Test adding a tx that "double spends" an output currently spent by a tx
// already in the txpool. In this case we attempt to spend the original coinbase twice.
{
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
}
// tx1 spends some outputs from the initial test tx.
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
// tx2 spends some outputs from both tx1 and the initial test tx.
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
// Take a write lock and add a couple of tx entries to the pool.
{
let mut write_pool = pool.write();
// Check we have a single initial tx in the pool.
assert_eq!(write_pool.total_size(), 1);
// First, add a simple tx directly to the txpool (stem = false).
write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 2);
// Add another tx spending outputs from the previous tx.
write_pool
.add_to_pool(test_source(), tx2.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
}
// Test adding the exact same tx multiple times (same kernel signature).
// This will fail for stem=false during tx aggregation due to duplicate
// outputs and duplicate kernels.
{
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
.is_err());
}
// Test adding a duplicate tx with the same input and outputs.
// Note: not the *same* tx, just same underlying inputs/outputs.
{
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx1a, false, &header)
.is_err());
}
// Test adding a tx attempting to spend a non-existent output.
{
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), bad_tx, false, &header)
.is_err());
}
// Test adding a tx that would result in a duplicate output (conflicts with
// output from tx2). For reasons of security all outputs in the UTXO set must
// be unique. Otherwise spending one will almost certainly cause the other
// to be immediately stolen via a "replay" tx.
{
let tx = test_transaction(&keychain, vec![900], vec![498]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
}
// Confirm the tx pool correctly identifies an invalid tx (already spent).
{
let mut write_pool = pool.write();
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
assert!(write_pool
.add_to_pool(test_source(), tx3, false, &header)
.is_err());
assert_eq!(write_pool.total_size(), 3);
}
// Now add a couple of txs to the stempool (stem = true).
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![599], vec![598]);
write_pool
.add_to_pool(test_source(), tx, true, &header)
.unwrap();
let tx2 = test_transaction(&keychain, vec![598], vec![597]);
write_pool
.add_to_pool(test_source(), tx2, true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
assert_eq!(write_pool.stempool.size(), 2);
}
// Check we can take some entries from the stempool and "fluff" them into the
// txpool. This also exercises multi-kernel txs.
{
let mut write_pool = pool.write();
let agg_tx = write_pool
.stempool
.all_transactions_aggregate()
.unwrap()
.unwrap();
assert_eq!(agg_tx.kernels().len(), 2);
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert!(write_pool.stempool.is_empty());
}
// Adding a duplicate tx to the stempool will result in it being fluffed.
// This handles the case of the stem path having a cycle in it.
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![597], vec![596]);
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert_eq!(write_pool.stempool.size(), 1);
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 5);
assert!(write_pool.stempool.is_empty());
}
// Now check we can correctly deaggregate a multi-kernel tx based on current
// contents of the txpool.
// We will do this be adding a new tx to the pool
// that is a superset of a tx already in the pool.
{
let mut write_pool = pool.write();
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
agg_tx
.validate(Weighting::AsTransaction, verifier_cache.clone())
.unwrap();
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 6);
let entry = write_pool.txpool.entries.last().unwrap();
assert_eq!(entry.tx.kernels().len(), 1);
assert_eq!(entry.src.debug_name, "deagg");
}
// Check we cannot "double spend" an output spent in a previous block.
// We use the initial coinbase output here for convenience.
{
let mut write_pool = pool.write();
let double_spend_tx =
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };
// check we cannot add a double spend to the stempool
assert!(write_pool
.add_to_pool(test_source(), double_spend_tx.clone(), true, &header)
.is_err());
// check we cannot add a double spend to the txpool
assert!(write_pool
.add_to_pool(test_source(), double_spend_tx.clone(), false, &header)
.is_err());
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
let header = {
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
let block =
Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
chain.update_db_for_block(&block);
block.header
};
// Now create tx to spend a coinbase, giving us some useful outputs for testing
// with.
let initial_tx = {
test_transaction_spending_coinbase(
&keychain,
&header,
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
)
};
// Add this tx to the pool (stem=false, direct to txpool).
{
let mut write_pool = pool.write();
write_pool
.add_to_pool(test_source(), initial_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 1);
}
// Test adding a tx that "double spends" an output currently spent by a tx
// already in the txpool. In this case we attempt to spend the original coinbase twice.
{
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
}
// tx1 spends some outputs from the initial test tx.
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
// tx2 spends some outputs from both tx1 and the initial test tx.
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
// Take a write lock and add a couple of tx entries to the pool.
{
let mut write_pool = pool.write();
// Check we have a single initial tx in the pool.
assert_eq!(write_pool.total_size(), 1);
// First, add a simple tx directly to the txpool (stem = false).
write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 2);
// Add another tx spending outputs from the previous tx.
write_pool
.add_to_pool(test_source(), tx2.clone(), false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
}
// Test adding the exact same tx multiple times (same kernel signature).
// This will fail for stem=false during tx aggregation due to duplicate
// outputs and duplicate kernels.
{
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx1.clone(), false, &header)
.is_err());
}
// Test adding a duplicate tx with the same input and outputs.
// Note: not the *same* tx, just same underlying inputs/outputs.
{
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx1a, false, &header)
.is_err());
}
// Test adding a tx attempting to spend a non-existent output.
{
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), bad_tx, false, &header)
.is_err());
}
// Test adding a tx that would result in a duplicate output (conflicts with
// output from tx2). For reasons of security all outputs in the UTXO set must
// be unique. Otherwise spending one will almost certainly cause the other
// to be immediately stolen via a "replay" tx.
{
let tx = test_transaction(&keychain, vec![900], vec![498]);
let mut write_pool = pool.write();
assert!(write_pool
.add_to_pool(test_source(), tx, false, &header)
.is_err());
}
// Confirm the tx pool correctly identifies an invalid tx (already spent).
{
let mut write_pool = pool.write();
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
assert!(write_pool
.add_to_pool(test_source(), tx3, false, &header)
.is_err());
assert_eq!(write_pool.total_size(), 3);
}
// Now add a couple of txs to the stempool (stem = true).
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![599], vec![598]);
write_pool
.add_to_pool(test_source(), tx, true, &header)
.unwrap();
let tx2 = test_transaction(&keychain, vec![598], vec![597]);
write_pool
.add_to_pool(test_source(), tx2, true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 3);
assert_eq!(write_pool.stempool.size(), 2);
}
// Check we can take some entries from the stempool and "fluff" them into the
// txpool. This also exercises multi-kernel txs.
{
let mut write_pool = pool.write();
let agg_tx = write_pool
.stempool
.all_transactions_aggregate()
.unwrap()
.unwrap();
assert_eq!(agg_tx.kernels().len(), 2);
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert!(write_pool.stempool.is_empty());
}
// Adding a duplicate tx to the stempool will result in it being fluffed.
// This handles the case of the stem path having a cycle in it.
{
let mut write_pool = pool.write();
let tx = test_transaction(&keychain, vec![597], vec![596]);
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 4);
assert_eq!(write_pool.stempool.size(), 1);
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
write_pool
.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 5);
assert!(write_pool.stempool.is_empty());
}
// Now check we can correctly deaggregate a multi-kernel tx based on current
// contents of the txpool.
// We will do this be adding a new tx to the pool
// that is a superset of a tx already in the pool.
{
let mut write_pool = pool.write();
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
agg_tx
.validate(Weighting::AsTransaction, verifier_cache.clone())
.unwrap();
write_pool
.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();
assert_eq!(write_pool.total_size(), 6);
let entry = write_pool.txpool.entries.last().unwrap();
assert_eq!(entry.tx.kernels().len(), 1);
assert_eq!(entry.src.debug_name, "deagg");
}
// Check we cannot "double spend" an output spent in a previous block.
// We use the initial coinbase output here for convenience.
{
let mut write_pool = pool.write();
let double_spend_tx =
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };
// check we cannot add a double spend to the stempool
assert!(write_pool
.add_to_pool(test_source(), double_spend_tx.clone(), true, &header)
.is_err());
// check we cannot add a double spend to the txpool
assert!(write_pool
.add_to_pool(test_source(), double_spend_tx.clone(), false, &header)
.is_err());
}
}
// Cleanup db directory
clean_output_dir(db_root.clone());
}