mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
Cleanup db directory after tests (#2752)
* Cleanup db directory after tests * Fix clean output dir windows * Remove behind chain tests
This commit is contained in:
parent
e8c50359e4
commit
606b4652f8
9 changed files with 1041 additions and 973 deletions
|
@ -112,6 +112,8 @@ fn data_files() {
|
||||||
let chain = reload_chain(chain_dir);
|
let chain = reload_chain(chain_dir);
|
||||||
chain.validate(false).unwrap();
|
chain.validate(false).unwrap();
|
||||||
}
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
|
clean_output_dir(chain_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _prepare_block(kc: &ExtKeychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {
|
fn _prepare_block(kc: &ExtKeychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {
|
||||||
|
|
|
@ -59,7 +59,11 @@ fn setup(dir_name: &str, genesis: Block) -> Chain {
|
||||||
fn mine_empty_chain() {
|
fn mine_empty_chain() {
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
|
||||||
mine_some_on_top(".grin", pow::mine_genesis_block().unwrap(), &keychain);
|
{
|
||||||
|
mine_some_on_top(".grin", pow::mine_genesis_block().unwrap(), &keychain);
|
||||||
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
|
clean_output_dir(".grin");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -73,9 +77,10 @@ fn mine_genesis_reward_chain() {
|
||||||
let reward = reward::output(&keychain, &key_id, 0).unwrap();
|
let reward = reward::output(&keychain, &key_id, 0).unwrap();
|
||||||
genesis = genesis.with_reward(reward.0, reward.1);
|
genesis = genesis.with_reward(reward.0, reward.1);
|
||||||
|
|
||||||
|
let tmp_chain_dir = ".grin.tmp";
|
||||||
{
|
{
|
||||||
// setup a tmp chain to hande tx hashsets
|
// setup a tmp chain to hande tx hashsets
|
||||||
let tmp_chain = setup(".grin.tmp", pow::mine_genesis_block().unwrap());
|
let tmp_chain = setup(tmp_chain_dir, pow::mine_genesis_block().unwrap());
|
||||||
tmp_chain.set_txhashset_roots(&mut genesis).unwrap();
|
tmp_chain.set_txhashset_roots(&mut genesis).unwrap();
|
||||||
genesis.header.output_mmr_size = 1;
|
genesis.header.output_mmr_size = 1;
|
||||||
genesis.header.kernel_mmr_size = 1;
|
genesis.header.kernel_mmr_size = 1;
|
||||||
|
@ -91,6 +96,9 @@ fn mine_genesis_reward_chain() {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
mine_some_on_top(".grin.genesis", genesis, &keychain);
|
mine_some_on_top(".grin.genesis", genesis, &keychain);
|
||||||
|
// Cleanup chain directories
|
||||||
|
clean_output_dir(tmp_chain_dir);
|
||||||
|
clean_output_dir(".grin.genesis");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mine_some_on_top<K>(dir: &str, genesis: Block, keychain: &K)
|
fn mine_some_on_top<K>(dir: &str, genesis: Block, keychain: &K)
|
||||||
|
@ -157,76 +165,84 @@ where
|
||||||
#[test]
|
#[test]
|
||||||
fn mine_forks() {
|
fn mine_forks() {
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
let chain = setup(".grin2", pow::mine_genesis_block().unwrap());
|
{
|
||||||
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
let chain = setup(".grin2", pow::mine_genesis_block().unwrap());
|
||||||
|
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
|
|
||||||
// add a first block to not fork genesis
|
// add a first block to not fork genesis
|
||||||
let prev = chain.head_header().unwrap();
|
|
||||||
let b = prepare_block(&kc, &prev, &chain, 2);
|
|
||||||
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
|
||||||
|
|
||||||
// mine and add a few blocks
|
|
||||||
|
|
||||||
for n in 1..4 {
|
|
||||||
// first block for one branch
|
|
||||||
let prev = chain.head_header().unwrap();
|
let prev = chain.head_header().unwrap();
|
||||||
let b1 = prepare_block(&kc, &prev, &chain, 3 * n);
|
let b = prepare_block(&kc, &prev, &chain, 2);
|
||||||
|
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
||||||
|
|
||||||
// 2nd block with higher difficulty for other branch
|
// mine and add a few blocks
|
||||||
let b2 = prepare_block(&kc, &prev, &chain, 3 * n + 1);
|
|
||||||
|
|
||||||
// process the first block to extend the chain
|
for n in 1..4 {
|
||||||
let bhash = b1.hash();
|
// first block for one branch
|
||||||
chain.process_block(b1, chain::Options::SKIP_POW).unwrap();
|
let prev = chain.head_header().unwrap();
|
||||||
|
let b1 = prepare_block(&kc, &prev, &chain, 3 * n);
|
||||||
|
|
||||||
// checking our new head
|
// 2nd block with higher difficulty for other branch
|
||||||
let head = chain.head().unwrap();
|
let b2 = prepare_block(&kc, &prev, &chain, 3 * n + 1);
|
||||||
assert_eq!(head.height, (n + 1) as u64);
|
|
||||||
assert_eq!(head.last_block_h, bhash);
|
|
||||||
assert_eq!(head.prev_block_h, prev.hash());
|
|
||||||
|
|
||||||
// process the 2nd block to build a fork with more work
|
// process the first block to extend the chain
|
||||||
let bhash = b2.hash();
|
let bhash = b1.hash();
|
||||||
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
|
chain.process_block(b1, chain::Options::SKIP_POW).unwrap();
|
||||||
|
|
||||||
// checking head switch
|
// checking our new head
|
||||||
let head = chain.head().unwrap();
|
let head = chain.head().unwrap();
|
||||||
assert_eq!(head.height, (n + 1) as u64);
|
assert_eq!(head.height, (n + 1) as u64);
|
||||||
assert_eq!(head.last_block_h, bhash);
|
assert_eq!(head.last_block_h, bhash);
|
||||||
assert_eq!(head.prev_block_h, prev.hash());
|
assert_eq!(head.prev_block_h, prev.hash());
|
||||||
|
|
||||||
|
// process the 2nd block to build a fork with more work
|
||||||
|
let bhash = b2.hash();
|
||||||
|
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
|
||||||
|
|
||||||
|
// checking head switch
|
||||||
|
let head = chain.head().unwrap();
|
||||||
|
assert_eq!(head.height, (n + 1) as u64);
|
||||||
|
assert_eq!(head.last_block_h, bhash);
|
||||||
|
assert_eq!(head.prev_block_h, prev.hash());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
|
clean_output_dir(".grin2");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn mine_losing_fork() {
|
fn mine_losing_fork() {
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
let chain = setup(".grin3", pow::mine_genesis_block().unwrap());
|
{
|
||||||
|
let chain = setup(".grin3", pow::mine_genesis_block().unwrap());
|
||||||
|
|
||||||
// add a first block we'll be forking from
|
// add a first block we'll be forking from
|
||||||
let prev = chain.head_header().unwrap();
|
let prev = chain.head_header().unwrap();
|
||||||
let b1 = prepare_block(&kc, &prev, &chain, 2);
|
let b1 = prepare_block(&kc, &prev, &chain, 2);
|
||||||
let b1head = b1.header.clone();
|
let b1head = b1.header.clone();
|
||||||
chain.process_block(b1, chain::Options::SKIP_POW).unwrap();
|
chain.process_block(b1, chain::Options::SKIP_POW).unwrap();
|
||||||
|
|
||||||
// prepare the 2 successor, sibling blocks, one with lower diff
|
// prepare the 2 successor, sibling blocks, one with lower diff
|
||||||
let b2 = prepare_block(&kc, &b1head, &chain, 4);
|
let b2 = prepare_block(&kc, &b1head, &chain, 4);
|
||||||
let b2head = b2.header.clone();
|
let b2head = b2.header.clone();
|
||||||
let bfork = prepare_block(&kc, &b1head, &chain, 3);
|
let bfork = prepare_block(&kc, &b1head, &chain, 3);
|
||||||
|
|
||||||
// add higher difficulty first, prepare its successor, then fork
|
// add higher difficulty first, prepare its successor, then fork
|
||||||
// with lower diff
|
// with lower diff
|
||||||
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
|
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
|
||||||
assert_eq!(chain.head_header().unwrap().hash(), b2head.hash());
|
assert_eq!(chain.head_header().unwrap().hash(), b2head.hash());
|
||||||
let b3 = prepare_block(&kc, &b2head, &chain, 5);
|
let b3 = prepare_block(&kc, &b2head, &chain, 5);
|
||||||
chain
|
chain
|
||||||
.process_block(bfork, chain::Options::SKIP_POW)
|
.process_block(bfork, chain::Options::SKIP_POW)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// adding the successor
|
// adding the successor
|
||||||
let b3head = b3.header.clone();
|
let b3head = b3.header.clone();
|
||||||
chain.process_block(b3, chain::Options::SKIP_POW).unwrap();
|
chain.process_block(b3, chain::Options::SKIP_POW).unwrap();
|
||||||
assert_eq!(chain.head_header().unwrap().hash(), b3head.hash());
|
assert_eq!(chain.head_header().unwrap().hash(), b3head.hash());
|
||||||
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
|
clean_output_dir(".grin3");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -237,222 +253,234 @@ fn longer_fork() {
|
||||||
// prepare 2 chains, the 2nd will be have the forked blocks we can
|
// prepare 2 chains, the 2nd will be have the forked blocks we can
|
||||||
// then send back on the 1st
|
// then send back on the 1st
|
||||||
let genesis = pow::mine_genesis_block().unwrap();
|
let genesis = pow::mine_genesis_block().unwrap();
|
||||||
let chain = setup(".grin4", genesis.clone());
|
{
|
||||||
|
let chain = setup(".grin4", genesis.clone());
|
||||||
|
|
||||||
// add blocks to both chains, 20 on the main one, only the first 5
|
// add blocks to both chains, 20 on the main one, only the first 5
|
||||||
// for the forked chain
|
// for the forked chain
|
||||||
let mut prev = chain.head_header().unwrap();
|
let mut prev = chain.head_header().unwrap();
|
||||||
for n in 0..10 {
|
for n in 0..10 {
|
||||||
let b = prepare_block(&kc, &prev, &chain, 2 * n + 2);
|
let b = prepare_block(&kc, &prev, &chain, 2 * n + 2);
|
||||||
prev = b.header.clone();
|
prev = b.header.clone();
|
||||||
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let forked_block = chain.get_header_by_height(5).unwrap();
|
||||||
|
|
||||||
|
let head = chain.head_header().unwrap();
|
||||||
|
assert_eq!(head.height, 10);
|
||||||
|
assert_eq!(head.hash(), prev.hash());
|
||||||
|
|
||||||
|
let mut prev = forked_block;
|
||||||
|
for n in 0..7 {
|
||||||
|
let b = prepare_fork_block(&kc, &prev, &chain, 2 * n + 11);
|
||||||
|
prev = b.header.clone();
|
||||||
|
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let new_head = prev;
|
||||||
|
|
||||||
|
// After all this the chain should have switched to the fork.
|
||||||
|
let head = chain.head_header().unwrap();
|
||||||
|
assert_eq!(head.height, 12);
|
||||||
|
assert_eq!(head.hash(), new_head.hash());
|
||||||
}
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
let forked_block = chain.get_header_by_height(5).unwrap();
|
clean_output_dir(".grin4");
|
||||||
|
|
||||||
let head = chain.head_header().unwrap();
|
|
||||||
assert_eq!(head.height, 10);
|
|
||||||
assert_eq!(head.hash(), prev.hash());
|
|
||||||
|
|
||||||
let mut prev = forked_block;
|
|
||||||
for n in 0..7 {
|
|
||||||
let b = prepare_fork_block(&kc, &prev, &chain, 2 * n + 11);
|
|
||||||
prev = b.header.clone();
|
|
||||||
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let new_head = prev;
|
|
||||||
|
|
||||||
// After all this the chain should have switched to the fork.
|
|
||||||
let head = chain.head_header().unwrap();
|
|
||||||
assert_eq!(head.height, 12);
|
|
||||||
assert_eq!(head.hash(), new_head.hash());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn spend_in_fork_and_compact() {
|
fn spend_in_fork_and_compact() {
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
util::init_test_logger();
|
util::init_test_logger();
|
||||||
let chain = setup(".grin6", pow::mine_genesis_block().unwrap());
|
{
|
||||||
let prev = chain.head_header().unwrap();
|
let chain = setup(".grin6", pow::mine_genesis_block().unwrap());
|
||||||
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
let prev = chain.head_header().unwrap();
|
||||||
|
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
|
|
||||||
let mut fork_head = prev;
|
let mut fork_head = prev;
|
||||||
|
|
||||||
// mine the first block and keep track of the block_hash
|
// mine the first block and keep track of the block_hash
|
||||||
// so we can spend the coinbase later
|
// so we can spend the coinbase later
|
||||||
let b = prepare_block(&kc, &fork_head, &chain, 2);
|
let b = prepare_block(&kc, &fork_head, &chain, 2);
|
||||||
let out_id = OutputIdentifier::from_output(&b.outputs()[0]);
|
let out_id = OutputIdentifier::from_output(&b.outputs()[0]);
|
||||||
assert!(out_id.features.is_coinbase());
|
assert!(out_id.features.is_coinbase());
|
||||||
fork_head = b.header.clone();
|
|
||||||
chain
|
|
||||||
.process_block(b.clone(), chain::Options::SKIP_POW)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// now mine three further blocks
|
|
||||||
for n in 3..6 {
|
|
||||||
let b = prepare_block(&kc, &fork_head, &chain, n);
|
|
||||||
fork_head = b.header.clone();
|
fork_head = b.header.clone();
|
||||||
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
chain
|
||||||
}
|
.process_block(b.clone(), chain::Options::SKIP_POW)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Check the height of the "fork block".
|
// now mine three further blocks
|
||||||
assert_eq!(fork_head.height, 4);
|
for n in 3..6 {
|
||||||
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
|
let b = prepare_block(&kc, &fork_head, &chain, n);
|
||||||
let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier();
|
fork_head = b.header.clone();
|
||||||
let key_id31 = ExtKeychainPath::new(1, 31, 0, 0, 0).to_identifier();
|
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
let tx1 = build::transaction(
|
// Check the height of the "fork block".
|
||||||
vec![
|
assert_eq!(fork_head.height, 4);
|
||||||
build::coinbase_input(consensus::REWARD, key_id2.clone()),
|
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
|
||||||
build::output(consensus::REWARD - 20000, key_id30.clone()),
|
let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier();
|
||||||
build::with_fee(20000),
|
let key_id31 = ExtKeychainPath::new(1, 31, 0, 0, 0).to_identifier();
|
||||||
],
|
|
||||||
&kc,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]);
|
let tx1 = build::transaction(
|
||||||
let prev_main = next.header.clone();
|
vec![
|
||||||
chain
|
build::coinbase_input(consensus::REWARD, key_id2.clone()),
|
||||||
.process_block(next.clone(), chain::Options::SKIP_POW)
|
build::output(consensus::REWARD - 20000, key_id30.clone()),
|
||||||
.unwrap();
|
build::with_fee(20000),
|
||||||
chain.validate(false).unwrap();
|
],
|
||||||
|
&kc,
|
||||||
let tx2 = build::transaction(
|
)
|
||||||
vec![
|
|
||||||
build::input(consensus::REWARD - 20000, key_id30.clone()),
|
|
||||||
build::output(consensus::REWARD - 40000, key_id31.clone()),
|
|
||||||
build::with_fee(20000),
|
|
||||||
],
|
|
||||||
&kc,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let next = prepare_block_tx(&kc, &prev_main, &chain, 9, vec![&tx2]);
|
|
||||||
let prev_main = next.header.clone();
|
|
||||||
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
|
|
||||||
|
|
||||||
// Full chain validation for completeness.
|
|
||||||
chain.validate(false).unwrap();
|
|
||||||
|
|
||||||
// mine 2 forked blocks from the first
|
|
||||||
let fork = prepare_fork_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]);
|
|
||||||
let prev_fork = fork.header.clone();
|
|
||||||
chain.process_block(fork, chain::Options::SKIP_POW).unwrap();
|
|
||||||
|
|
||||||
let fork_next = prepare_fork_block_tx(&kc, &prev_fork, &chain, 8, vec![&tx2]);
|
|
||||||
let prev_fork = fork_next.header.clone();
|
|
||||||
chain
|
|
||||||
.process_block(fork_next, chain::Options::SKIP_POW)
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
chain.validate(false).unwrap();
|
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]);
|
||||||
|
let prev_main = next.header.clone();
|
||||||
|
chain
|
||||||
|
.process_block(next.clone(), chain::Options::SKIP_POW)
|
||||||
|
.unwrap();
|
||||||
|
chain.validate(false).unwrap();
|
||||||
|
|
||||||
// check state
|
let tx2 = build::transaction(
|
||||||
let head = chain.head_header().unwrap();
|
vec![
|
||||||
assert_eq!(head.height, 6);
|
build::input(consensus::REWARD - 20000, key_id30.clone()),
|
||||||
assert_eq!(head.hash(), prev_main.hash());
|
build::output(consensus::REWARD - 40000, key_id31.clone()),
|
||||||
assert!(chain
|
build::with_fee(20000),
|
||||||
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
|
],
|
||||||
.is_ok());
|
&kc,
|
||||||
assert!(chain
|
)
|
||||||
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
|
|
||||||
.is_err());
|
|
||||||
|
|
||||||
// make the fork win
|
|
||||||
let fork_next = prepare_fork_block(&kc, &prev_fork, &chain, 10);
|
|
||||||
let prev_fork = fork_next.header.clone();
|
|
||||||
chain
|
|
||||||
.process_block(fork_next, chain::Options::SKIP_POW)
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
chain.validate(false).unwrap();
|
|
||||||
|
|
||||||
// check state
|
let next = prepare_block_tx(&kc, &prev_main, &chain, 9, vec![&tx2]);
|
||||||
let head = chain.head_header().unwrap();
|
let prev_main = next.header.clone();
|
||||||
assert_eq!(head.height, 7);
|
|
||||||
assert_eq!(head.hash(), prev_fork.hash());
|
|
||||||
assert!(chain
|
|
||||||
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
|
|
||||||
.is_ok());
|
|
||||||
assert!(chain
|
|
||||||
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
|
|
||||||
.is_err());
|
|
||||||
|
|
||||||
// add 20 blocks to go past the test horizon
|
|
||||||
let mut prev = prev_fork;
|
|
||||||
for n in 0..20 {
|
|
||||||
let next = prepare_block(&kc, &prev, &chain, 11 + n);
|
|
||||||
prev = next.header.clone();
|
|
||||||
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
|
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
|
||||||
}
|
|
||||||
|
|
||||||
chain.validate(false).unwrap();
|
// Full chain validation for completeness.
|
||||||
if let Err(e) = chain.compact() {
|
chain.validate(false).unwrap();
|
||||||
panic!("Error compacting chain: {:?}", e);
|
|
||||||
}
|
// mine 2 forked blocks from the first
|
||||||
if let Err(e) = chain.validate(false) {
|
let fork = prepare_fork_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]);
|
||||||
panic!("Validation error after compacting chain: {:?}", e);
|
let prev_fork = fork.header.clone();
|
||||||
|
chain.process_block(fork, chain::Options::SKIP_POW).unwrap();
|
||||||
|
|
||||||
|
let fork_next = prepare_fork_block_tx(&kc, &prev_fork, &chain, 8, vec![&tx2]);
|
||||||
|
let prev_fork = fork_next.header.clone();
|
||||||
|
chain
|
||||||
|
.process_block(fork_next, chain::Options::SKIP_POW)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
chain.validate(false).unwrap();
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let head = chain.head_header().unwrap();
|
||||||
|
assert_eq!(head.height, 6);
|
||||||
|
assert_eq!(head.hash(), prev_main.hash());
|
||||||
|
assert!(chain
|
||||||
|
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
|
||||||
|
.is_ok());
|
||||||
|
assert!(chain
|
||||||
|
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
|
||||||
|
.is_err());
|
||||||
|
|
||||||
|
// make the fork win
|
||||||
|
let fork_next = prepare_fork_block(&kc, &prev_fork, &chain, 10);
|
||||||
|
let prev_fork = fork_next.header.clone();
|
||||||
|
chain
|
||||||
|
.process_block(fork_next, chain::Options::SKIP_POW)
|
||||||
|
.unwrap();
|
||||||
|
chain.validate(false).unwrap();
|
||||||
|
|
||||||
|
// check state
|
||||||
|
let head = chain.head_header().unwrap();
|
||||||
|
assert_eq!(head.height, 7);
|
||||||
|
assert_eq!(head.hash(), prev_fork.hash());
|
||||||
|
assert!(chain
|
||||||
|
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
|
||||||
|
.is_ok());
|
||||||
|
assert!(chain
|
||||||
|
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
|
||||||
|
.is_err());
|
||||||
|
|
||||||
|
// add 20 blocks to go past the test horizon
|
||||||
|
let mut prev = prev_fork;
|
||||||
|
for n in 0..20 {
|
||||||
|
let next = prepare_block(&kc, &prev, &chain, 11 + n);
|
||||||
|
prev = next.header.clone();
|
||||||
|
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
chain.validate(false).unwrap();
|
||||||
|
if let Err(e) = chain.compact() {
|
||||||
|
panic!("Error compacting chain: {:?}", e);
|
||||||
|
}
|
||||||
|
if let Err(e) = chain.validate(false) {
|
||||||
|
panic!("Validation error after compacting chain: {:?}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
|
clean_output_dir(".grin6");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test ability to retrieve block headers for a given output
|
/// Test ability to retrieve block headers for a given output
|
||||||
#[test]
|
#[test]
|
||||||
fn output_header_mappings() {
|
fn output_header_mappings() {
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
let chain = setup(
|
{
|
||||||
".grin_header_for_output",
|
let chain = setup(
|
||||||
pow::mine_genesis_block().unwrap(),
|
".grin_header_for_output",
|
||||||
);
|
pow::mine_genesis_block().unwrap(),
|
||||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
);
|
||||||
let mut reward_outputs = vec![];
|
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
|
let mut reward_outputs = vec![];
|
||||||
|
|
||||||
for n in 1..15 {
|
for n in 1..15 {
|
||||||
let prev = chain.head_header().unwrap();
|
let prev = chain.head_header().unwrap();
|
||||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||||
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
|
let pk = ExtKeychainPath::new(1, n as u32, 0, 0, 0).to_identifier();
|
||||||
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
|
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
|
||||||
reward_outputs.push(reward.0.clone());
|
reward_outputs.push(reward.0.clone());
|
||||||
let mut b =
|
let mut b =
|
||||||
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
|
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
|
||||||
|
.unwrap();
|
||||||
|
b.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||||
|
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||||
|
|
||||||
|
chain.set_txhashset_roots(&mut b).unwrap();
|
||||||
|
|
||||||
|
let edge_bits = if n == 2 {
|
||||||
|
global::min_edge_bits() + 1
|
||||||
|
} else {
|
||||||
|
global::min_edge_bits()
|
||||||
|
};
|
||||||
|
b.header.pow.proof.edge_bits = edge_bits;
|
||||||
|
pow::pow_size(
|
||||||
|
&mut b.header,
|
||||||
|
next_header_info.difficulty,
|
||||||
|
global::proofsize(),
|
||||||
|
edge_bits,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
b.header.pow.proof.edge_bits = edge_bits;
|
||||||
|
|
||||||
|
chain.process_block(b, chain::Options::MINE).unwrap();
|
||||||
|
|
||||||
|
let header_for_output = chain
|
||||||
|
.get_header_for_output(&OutputIdentifier::from_output(&reward_outputs[n - 1]))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
b.header.timestamp = prev.timestamp + Duration::seconds(60);
|
assert_eq!(header_for_output.height, n as u64);
|
||||||
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
|
||||||
|
|
||||||
chain.set_txhashset_roots(&mut b).unwrap();
|
chain.validate(false).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
let edge_bits = if n == 2 {
|
// Check all output positions are as expected
|
||||||
global::min_edge_bits() + 1
|
for n in 1..15 {
|
||||||
} else {
|
let header_for_output = chain
|
||||||
global::min_edge_bits()
|
.get_header_for_output(&OutputIdentifier::from_output(&reward_outputs[n - 1]))
|
||||||
};
|
.unwrap();
|
||||||
b.header.pow.proof.edge_bits = edge_bits;
|
assert_eq!(header_for_output.height, n as u64);
|
||||||
pow::pow_size(
|
}
|
||||||
&mut b.header,
|
|
||||||
next_header_info.difficulty,
|
|
||||||
global::proofsize(),
|
|
||||||
edge_bits,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
b.header.pow.proof.edge_bits = edge_bits;
|
|
||||||
|
|
||||||
chain.process_block(b, chain::Options::MINE).unwrap();
|
|
||||||
|
|
||||||
let header_for_output = chain
|
|
||||||
.get_header_for_output(&OutputIdentifier::from_output(&reward_outputs[n - 1]))
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(header_for_output.height, n as u64);
|
|
||||||
|
|
||||||
chain.validate(false).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check all output positions are as expected
|
|
||||||
for n in 1..15 {
|
|
||||||
let header_for_output = chain
|
|
||||||
.get_header_for_output(&OutputIdentifier::from_output(&reward_outputs[n - 1]))
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(header_for_output.height, n as u64);
|
|
||||||
}
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
|
clean_output_dir(".grin_header_for_output");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prepare_block<K>(kc: &K, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block
|
fn prepare_block<K>(kc: &K, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block
|
||||||
|
|
|
@ -53,48 +53,53 @@ fn test_various_store_indices() {
|
||||||
|
|
||||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
let key_id = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
let key_id = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||||
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
|
|
||||||
|
|
||||||
let chain_store = Arc::new(chain::store::ChainStore::new(db_env).unwrap());
|
|
||||||
|
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
|
||||||
let genesis = pow::mine_genesis_block().unwrap();
|
|
||||||
|
|
||||||
setup_chain(&genesis, chain_store.clone()).unwrap();
|
|
||||||
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
|
|
||||||
let block = Block::new(&genesis.header, vec![], Difficulty::min(), reward).unwrap();
|
|
||||||
let block_hash = block.hash();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
let batch = chain_store.batch().unwrap();
|
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
|
||||||
batch.save_block_header(&block.header).unwrap();
|
|
||||||
batch.save_block(&block).unwrap();
|
|
||||||
batch.commit().unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_header = chain_store.get_block_header(&block_hash).unwrap();
|
let chain_store = Arc::new(chain::store::ChainStore::new(db_env).unwrap());
|
||||||
assert_eq!(block_header.hash(), block_hash);
|
|
||||||
|
|
||||||
// Test we can retrive the block from the db and that we can safely delete the
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
// block from the db even though the block_sums are missing.
|
let genesis = pow::mine_genesis_block().unwrap();
|
||||||
{
|
|
||||||
// Block exists in the db.
|
|
||||||
assert!(chain_store.get_block(&block_hash).is_ok());
|
|
||||||
|
|
||||||
// Block sums do not exist (we never set them up).
|
setup_chain(&genesis, chain_store.clone()).unwrap();
|
||||||
assert!(chain_store.get_block_sums(&block_hash).is_err());
|
|
||||||
|
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
|
||||||
|
let block = Block::new(&genesis.header, vec![], Difficulty::min(), reward).unwrap();
|
||||||
|
let block_hash = block.hash();
|
||||||
|
|
||||||
{
|
{
|
||||||
// Start a new batch and delete the block.
|
|
||||||
let batch = chain_store.batch().unwrap();
|
let batch = chain_store.batch().unwrap();
|
||||||
assert!(batch.delete_block(&block_hash).is_ok());
|
batch.save_block_header(&block.header).unwrap();
|
||||||
|
batch.save_block(&block).unwrap();
|
||||||
// Block is deleted within this batch.
|
batch.commit().unwrap();
|
||||||
assert!(batch.get_block(&block_hash).is_err());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the batch did not commit any changes to the store .
|
let block_header = chain_store.get_block_header(&block_hash).unwrap();
|
||||||
assert!(chain_store.get_block(&block_hash).is_ok());
|
assert_eq!(block_header.hash(), block_hash);
|
||||||
|
|
||||||
|
// Test we can retrive the block from the db and that we can safely delete the
|
||||||
|
// block from the db even though the block_sums are missing.
|
||||||
|
{
|
||||||
|
// Block exists in the db.
|
||||||
|
assert!(chain_store.get_block(&block_hash).is_ok());
|
||||||
|
|
||||||
|
// Block sums do not exist (we never set them up).
|
||||||
|
assert!(chain_store.get_block_sums(&block_hash).is_err());
|
||||||
|
|
||||||
|
{
|
||||||
|
// Start a new batch and delete the block.
|
||||||
|
let batch = chain_store.batch().unwrap();
|
||||||
|
assert!(batch.delete_block(&block_hash).is_ok());
|
||||||
|
|
||||||
|
// Block is deleted within this batch.
|
||||||
|
assert!(batch.get_block(&block_hash).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the batch did not commit any changes to the store .
|
||||||
|
assert!(chain_store.get_block(&block_hash).is_ok());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
|
clean_output_dir(chain_dir);
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,116 +38,39 @@ fn clean_output_dir(dir_name: &str) {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_coinbase_maturity() {
|
fn test_coinbase_maturity() {
|
||||||
let _ = env_logger::init();
|
let _ = env_logger::init();
|
||||||
clean_output_dir(".grin");
|
let chain_dir = ".grin_coinbase";
|
||||||
|
clean_output_dir(chain_dir);
|
||||||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||||
|
|
||||||
let genesis_block = pow::mine_genesis_block().unwrap();
|
let genesis_block = pow::mine_genesis_block().unwrap();
|
||||||
|
|
||||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||||
|
|
||||||
let db_env = Arc::new(store::new_env(".grin".to_string()));
|
{
|
||||||
let chain = chain::Chain::init(
|
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
|
||||||
".grin".to_string(),
|
let chain = chain::Chain::init(
|
||||||
db_env,
|
chain_dir.to_string(),
|
||||||
Arc::new(NoopAdapter {}),
|
db_env,
|
||||||
genesis_block,
|
Arc::new(NoopAdapter {}),
|
||||||
pow::verify_size,
|
genesis_block,
|
||||||
verifier_cache,
|
pow::verify_size,
|
||||||
false,
|
verifier_cache,
|
||||||
Arc::new(Mutex::new(StopState::new())),
|
false,
|
||||||
)
|
Arc::new(Mutex::new(StopState::new())),
|
||||||
.unwrap();
|
)
|
||||||
|
|
||||||
let prev = chain.head_header().unwrap();
|
|
||||||
|
|
||||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
|
||||||
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
|
||||||
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
|
|
||||||
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
|
|
||||||
let key_id4 = ExtKeychainPath::new(1, 4, 0, 0, 0).to_identifier();
|
|
||||||
|
|
||||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id1, 0).unwrap();
|
|
||||||
let mut block = core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
|
|
||||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
|
||||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
|
||||||
|
|
||||||
chain.set_txhashset_roots(&mut block).unwrap();
|
|
||||||
|
|
||||||
pow::pow_size(
|
|
||||||
&mut block.header,
|
|
||||||
next_header_info.difficulty,
|
|
||||||
global::proofsize(),
|
|
||||||
global::min_edge_bits(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(block.outputs().len(), 1);
|
|
||||||
let coinbase_output = block.outputs()[0];
|
|
||||||
assert!(coinbase_output.is_coinbase());
|
|
||||||
|
|
||||||
chain
|
|
||||||
.process_block(block.clone(), chain::Options::MINE)
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let prev = chain.head_header().unwrap();
|
|
||||||
|
|
||||||
let amount = consensus::REWARD;
|
|
||||||
|
|
||||||
let lock_height = 1 + global::coinbase_maturity();
|
|
||||||
assert_eq!(lock_height, 4);
|
|
||||||
|
|
||||||
// here we build a tx that attempts to spend the earlier coinbase output
|
|
||||||
// this is not a valid tx as the coinbase output cannot be spent yet
|
|
||||||
let coinbase_txn = build::transaction(
|
|
||||||
vec![
|
|
||||||
build::coinbase_input(amount, key_id1.clone()),
|
|
||||||
build::output(amount - 2, key_id2.clone()),
|
|
||||||
build::with_fee(2),
|
|
||||||
],
|
|
||||||
&keychain,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let txs = vec![coinbase_txn.clone()];
|
|
||||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id3, fees).unwrap();
|
|
||||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
|
||||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
|
||||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
|
||||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
|
||||||
|
|
||||||
chain.set_txhashset_roots(&mut block).unwrap();
|
|
||||||
|
|
||||||
// Confirm the tx attempting to spend the coinbase output
|
|
||||||
// is not valid at the current block height given the current chain state.
|
|
||||||
match chain.verify_coinbase_maturity(&coinbase_txn) {
|
|
||||||
Ok(_) => {}
|
|
||||||
Err(e) => match e.kind() {
|
|
||||||
ErrorKind::ImmatureCoinbase => {}
|
|
||||||
_ => panic!("Expected transaction error with immature coinbase."),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
pow::pow_size(
|
|
||||||
&mut block.header,
|
|
||||||
next_header_info.difficulty,
|
|
||||||
global::proofsize(),
|
|
||||||
global::min_edge_bits(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// mine enough blocks to increase the height sufficiently for
|
|
||||||
// coinbase to reach maturity and be spendable in the next block
|
|
||||||
for _ in 0..3 {
|
|
||||||
let prev = chain.head_header().unwrap();
|
let prev = chain.head_header().unwrap();
|
||||||
|
|
||||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
let pk = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||||
|
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
|
||||||
|
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
|
||||||
|
let key_id4 = ExtKeychainPath::new(1, 4, 0, 0, 0).to_identifier();
|
||||||
|
|
||||||
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
|
|
||||||
let mut block = core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
|
|
||||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||||
|
let reward = libtx::reward::output(&keychain, &key_id1, 0).unwrap();
|
||||||
|
let mut block = core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
|
||||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||||
|
|
||||||
|
@ -161,37 +84,120 @@ fn test_coinbase_maturity() {
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
chain.process_block(block, chain::Options::MINE).unwrap();
|
assert_eq!(block.outputs().len(), 1);
|
||||||
|
let coinbase_output = block.outputs()[0];
|
||||||
|
assert!(coinbase_output.is_coinbase());
|
||||||
|
|
||||||
|
chain
|
||||||
|
.process_block(block.clone(), chain::Options::MINE)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let prev = chain.head_header().unwrap();
|
||||||
|
|
||||||
|
let amount = consensus::REWARD;
|
||||||
|
|
||||||
|
let lock_height = 1 + global::coinbase_maturity();
|
||||||
|
assert_eq!(lock_height, 4);
|
||||||
|
|
||||||
|
// here we build a tx that attempts to spend the earlier coinbase output
|
||||||
|
// this is not a valid tx as the coinbase output cannot be spent yet
|
||||||
|
let coinbase_txn = build::transaction(
|
||||||
|
vec![
|
||||||
|
build::coinbase_input(amount, key_id1.clone()),
|
||||||
|
build::output(amount - 2, key_id2.clone()),
|
||||||
|
build::with_fee(2),
|
||||||
|
],
|
||||||
|
&keychain,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let txs = vec![coinbase_txn.clone()];
|
||||||
|
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||||
|
let reward = libtx::reward::output(&keychain, &key_id3, fees).unwrap();
|
||||||
|
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
||||||
|
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||||
|
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||||
|
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||||
|
|
||||||
|
chain.set_txhashset_roots(&mut block).unwrap();
|
||||||
|
|
||||||
|
// Confirm the tx attempting to spend the coinbase output
|
||||||
|
// is not valid at the current block height given the current chain state.
|
||||||
|
match chain.verify_coinbase_maturity(&coinbase_txn) {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(e) => match e.kind() {
|
||||||
|
ErrorKind::ImmatureCoinbase => {}
|
||||||
|
_ => panic!("Expected transaction error with immature coinbase."),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pow::pow_size(
|
||||||
|
&mut block.header,
|
||||||
|
next_header_info.difficulty,
|
||||||
|
global::proofsize(),
|
||||||
|
global::min_edge_bits(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// mine enough blocks to increase the height sufficiently for
|
||||||
|
// coinbase to reach maturity and be spendable in the next block
|
||||||
|
for _ in 0..3 {
|
||||||
|
let prev = chain.head_header().unwrap();
|
||||||
|
|
||||||
|
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||||
|
let pk = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||||
|
|
||||||
|
let reward = libtx::reward::output(&keychain, &pk, 0).unwrap();
|
||||||
|
let mut block =
|
||||||
|
core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
|
||||||
|
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||||
|
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||||
|
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||||
|
|
||||||
|
chain.set_txhashset_roots(&mut block).unwrap();
|
||||||
|
|
||||||
|
pow::pow_size(
|
||||||
|
&mut block.header,
|
||||||
|
next_header_info.difficulty,
|
||||||
|
global::proofsize(),
|
||||||
|
global::min_edge_bits(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
chain.process_block(block, chain::Options::MINE).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let prev = chain.head_header().unwrap();
|
||||||
|
|
||||||
|
// Confirm the tx spending the coinbase output is now valid.
|
||||||
|
// The coinbase output has matured sufficiently based on current chain state.
|
||||||
|
chain.verify_coinbase_maturity(&coinbase_txn).unwrap();
|
||||||
|
|
||||||
|
let txs = vec![coinbase_txn];
|
||||||
|
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||||
|
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||||
|
let reward = libtx::reward::output(&keychain, &key_id4, fees).unwrap();
|
||||||
|
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
||||||
|
|
||||||
|
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||||
|
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||||
|
|
||||||
|
chain.set_txhashset_roots(&mut block).unwrap();
|
||||||
|
|
||||||
|
pow::pow_size(
|
||||||
|
&mut block.header,
|
||||||
|
next_header_info.difficulty,
|
||||||
|
global::proofsize(),
|
||||||
|
global::min_edge_bits(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let result = chain.process_block(block, chain::Options::MINE);
|
||||||
|
match result {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(_) => panic!("we did not expect an error here"),
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
let prev = chain.head_header().unwrap();
|
clean_output_dir(chain_dir);
|
||||||
|
|
||||||
// Confirm the tx spending the coinbase output is now valid.
|
|
||||||
// The coinbase output has matured sufficiently based on current chain state.
|
|
||||||
chain.verify_coinbase_maturity(&coinbase_txn).unwrap();
|
|
||||||
|
|
||||||
let txs = vec![coinbase_txn];
|
|
||||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
|
||||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id4, fees).unwrap();
|
|
||||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
|
||||||
|
|
||||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
|
||||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
|
||||||
|
|
||||||
chain.set_txhashset_roots(&mut block).unwrap();
|
|
||||||
|
|
||||||
pow::pow_size(
|
|
||||||
&mut block.header,
|
|
||||||
next_header_info.difficulty,
|
|
||||||
global::proofsize(),
|
|
||||||
global::min_edge_bits(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let result = chain.process_block(block, chain::Options::MINE);
|
|
||||||
match result {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(_) => panic!("we did not expect an error here"),
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,47 +41,51 @@ fn test_unexpected_zip() {
|
||||||
|
|
||||||
let db_root = format!(".grin_txhashset_zip");
|
let db_root = format!(".grin_txhashset_zip");
|
||||||
clean_output_dir(&db_root);
|
clean_output_dir(&db_root);
|
||||||
let db_env = Arc::new(store::new_env(db_root.clone()));
|
{
|
||||||
let chain_store = ChainStore::new(db_env).unwrap();
|
let db_env = Arc::new(store::new_env(db_root.clone()));
|
||||||
let store = Arc::new(chain_store);
|
let chain_store = ChainStore::new(db_env).unwrap();
|
||||||
txhashset::TxHashSet::open(db_root.clone(), store.clone(), None).unwrap();
|
let store = Arc::new(chain_store);
|
||||||
// First check if everything works out of the box
|
txhashset::TxHashSet::open(db_root.clone(), store.clone(), None).unwrap();
|
||||||
assert!(txhashset::zip_read(db_root.clone(), &BlockHeader::default(), Some(rand)).is_ok());
|
// First check if everything works out of the box
|
||||||
let zip_path = Path::new(&db_root).join(format!("txhashset_snapshot_{}.zip", rand));
|
assert!(txhashset::zip_read(db_root.clone(), &BlockHeader::default(), Some(rand)).is_ok());
|
||||||
let zip_file = File::open(&zip_path).unwrap();
|
let zip_path = Path::new(&db_root).join(format!("txhashset_snapshot_{}.zip", rand));
|
||||||
assert!(txhashset::zip_write(
|
let zip_file = File::open(&zip_path).unwrap();
|
||||||
PathBuf::from(db_root.clone()),
|
assert!(txhashset::zip_write(
|
||||||
zip_file,
|
PathBuf::from(db_root.clone()),
|
||||||
&BlockHeader::default()
|
zip_file,
|
||||||
)
|
&BlockHeader::default()
|
||||||
.is_ok());
|
)
|
||||||
// Remove temp txhashset dir
|
.is_ok());
|
||||||
fs::remove_dir_all(Path::new(&db_root).join(format!("txhashset_zip_{}", rand))).unwrap();
|
// Remove temp txhashset dir
|
||||||
// Then add strange files in the original txhashset folder
|
fs::remove_dir_all(Path::new(&db_root).join(format!("txhashset_zip_{}", rand))).unwrap();
|
||||||
write_file(db_root.clone());
|
// Then add strange files in the original txhashset folder
|
||||||
assert!(txhashset::zip_read(db_root.clone(), &BlockHeader::default(), Some(rand)).is_ok());
|
write_file(db_root.clone());
|
||||||
// Check that the temp dir dos not contains the strange files
|
assert!(txhashset::zip_read(db_root.clone(), &BlockHeader::default(), Some(rand)).is_ok());
|
||||||
let txhashset_zip_path = Path::new(&db_root).join(format!("txhashset_zip_{}", rand));
|
// Check that the temp dir dos not contains the strange files
|
||||||
assert!(txhashset_contains_expected_files(
|
let txhashset_zip_path = Path::new(&db_root).join(format!("txhashset_zip_{}", rand));
|
||||||
format!("txhashset_zip_{}", rand),
|
assert!(txhashset_contains_expected_files(
|
||||||
txhashset_zip_path.clone()
|
format!("txhashset_zip_{}", rand),
|
||||||
));
|
txhashset_zip_path.clone()
|
||||||
fs::remove_dir_all(Path::new(&db_root).join(format!("txhashset_zip_{}", rand))).unwrap();
|
));
|
||||||
|
fs::remove_dir_all(Path::new(&db_root).join(format!("txhashset_zip_{}", rand))).unwrap();
|
||||||
|
|
||||||
let zip_file = File::open(zip_path).unwrap();
|
let zip_file = File::open(zip_path).unwrap();
|
||||||
assert!(txhashset::zip_write(
|
assert!(txhashset::zip_write(
|
||||||
PathBuf::from(db_root.clone()),
|
PathBuf::from(db_root.clone()),
|
||||||
zip_file,
|
zip_file,
|
||||||
&BlockHeader::default()
|
&BlockHeader::default()
|
||||||
)
|
)
|
||||||
.is_ok());
|
.is_ok());
|
||||||
// Check that the txhashset dir dos not contains the strange files
|
// Check that the txhashset dir dos not contains the strange files
|
||||||
let txhashset_path = Path::new(&db_root).join("txhashset");
|
let txhashset_path = Path::new(&db_root).join("txhashset");
|
||||||
assert!(txhashset_contains_expected_files(
|
assert!(txhashset_contains_expected_files(
|
||||||
"txhashset".to_string(),
|
"txhashset".to_string(),
|
||||||
txhashset_path.clone()
|
txhashset_path.clone()
|
||||||
));
|
));
|
||||||
fs::remove_dir_all(Path::new(&db_root).join("txhashset")).unwrap();
|
fs::remove_dir_all(Path::new(&db_root).join("txhashset")).unwrap();
|
||||||
|
}
|
||||||
|
// Cleanup chain directory
|
||||||
|
clean_output_dir(&db_root);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_file(db_root: String) {
|
fn write_file(db_root: String) {
|
||||||
|
|
|
@ -34,87 +34,93 @@ fn test_transaction_pool_block_building() {
|
||||||
|
|
||||||
let db_root = ".grin_block_building".to_string();
|
let db_root = ".grin_block_building".to_string();
|
||||||
clean_output_dir(db_root.clone());
|
clean_output_dir(db_root.clone());
|
||||||
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
|
|
||||||
|
|
||||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
|
||||||
|
|
||||||
// Initialize the chain/txhashset with an initial block
|
|
||||||
// so we have a non-empty UTXO set.
|
|
||||||
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
|
|
||||||
let height = prev_header.height + 1;
|
|
||||||
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
|
||||||
let fee = txs.iter().map(|x| x.fee()).sum();
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
|
|
||||||
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
|
|
||||||
|
|
||||||
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
|
||||||
block.header.prev_root = prev_header.hash();
|
|
||||||
|
|
||||||
chain.update_db_for_block(&block);
|
|
||||||
block
|
|
||||||
};
|
|
||||||
|
|
||||||
let block = add_block(BlockHeader::default(), vec![], &mut chain);
|
|
||||||
let header = block.header;
|
|
||||||
|
|
||||||
// Now create tx to spend that first coinbase (now matured).
|
|
||||||
// Provides us with some useful outputs to test with.
|
|
||||||
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
|
|
||||||
|
|
||||||
// Mine that initial tx so we can spend it with multiple txs
|
|
||||||
let block = add_block(header, vec![initial_tx], &mut chain);
|
|
||||||
let header = block.header;
|
|
||||||
|
|
||||||
// Initialize a new pool with our chain adapter.
|
|
||||||
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
|
|
||||||
|
|
||||||
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
|
|
||||||
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
|
|
||||||
let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]);
|
|
||||||
|
|
||||||
let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]);
|
|
||||||
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write();
|
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
|
||||||
|
|
||||||
// Add the three root txs to the pool.
|
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||||
write_pool
|
|
||||||
.add_to_pool(test_source(), root_tx_1, false, &header)
|
|
||||||
.unwrap();
|
|
||||||
write_pool
|
|
||||||
.add_to_pool(test_source(), root_tx_2, false, &header)
|
|
||||||
.unwrap();
|
|
||||||
write_pool
|
|
||||||
.add_to_pool(test_source(), root_tx_3, false, &header)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Now add the two child txs to the pool.
|
// Initialize the chain/txhashset with an initial block
|
||||||
write_pool
|
// so we have a non-empty UTXO set.
|
||||||
.add_to_pool(test_source(), child_tx_1.clone(), false, &header)
|
let add_block =
|
||||||
.unwrap();
|
|prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
|
||||||
write_pool
|
let height = prev_header.height + 1;
|
||||||
.add_to_pool(test_source(), child_tx_2.clone(), false, &header)
|
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
||||||
.unwrap();
|
let fee = txs.iter().map(|x| x.fee()).sum();
|
||||||
|
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
|
||||||
|
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
|
||||||
|
|
||||||
assert_eq!(write_pool.total_size(), 5);
|
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
||||||
}
|
block.header.prev_root = prev_header.hash();
|
||||||
|
|
||||||
let txs = {
|
chain.update_db_for_block(&block);
|
||||||
let read_pool = pool.read();
|
block
|
||||||
read_pool.prepare_mineable_transactions().unwrap()
|
};
|
||||||
};
|
|
||||||
// children should have been aggregated into parents
|
let block = add_block(BlockHeader::default(), vec![], &mut chain);
|
||||||
assert_eq!(txs.len(), 3);
|
let header = block.header;
|
||||||
|
|
||||||
let block = add_block(header, txs, &mut chain);
|
// Now create tx to spend that first coinbase (now matured).
|
||||||
|
// Provides us with some useful outputs to test with.
|
||||||
// Now reconcile the transaction pool with the new block
|
let initial_tx =
|
||||||
// and check the resulting contents of the pool are what we expect.
|
test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
|
||||||
{
|
|
||||||
let mut write_pool = pool.write();
|
// Mine that initial tx so we can spend it with multiple txs
|
||||||
write_pool.reconcile_block(&block).unwrap();
|
let block = add_block(header, vec![initial_tx], &mut chain);
|
||||||
|
let header = block.header;
|
||||||
assert_eq!(write_pool.total_size(), 0);
|
|
||||||
|
// Initialize a new pool with our chain adapter.
|
||||||
|
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
|
||||||
|
|
||||||
|
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
|
||||||
|
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
|
||||||
|
let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]);
|
||||||
|
|
||||||
|
let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]);
|
||||||
|
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut write_pool = pool.write();
|
||||||
|
|
||||||
|
// Add the three root txs to the pool.
|
||||||
|
write_pool
|
||||||
|
.add_to_pool(test_source(), root_tx_1, false, &header)
|
||||||
|
.unwrap();
|
||||||
|
write_pool
|
||||||
|
.add_to_pool(test_source(), root_tx_2, false, &header)
|
||||||
|
.unwrap();
|
||||||
|
write_pool
|
||||||
|
.add_to_pool(test_source(), root_tx_3, false, &header)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Now add the two child txs to the pool.
|
||||||
|
write_pool
|
||||||
|
.add_to_pool(test_source(), child_tx_1.clone(), false, &header)
|
||||||
|
.unwrap();
|
||||||
|
write_pool
|
||||||
|
.add_to_pool(test_source(), child_tx_2.clone(), false, &header)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(write_pool.total_size(), 5);
|
||||||
|
}
|
||||||
|
|
||||||
|
let txs = {
|
||||||
|
let read_pool = pool.read();
|
||||||
|
read_pool.prepare_mineable_transactions().unwrap()
|
||||||
|
};
|
||||||
|
// children should have been aggregated into parents
|
||||||
|
assert_eq!(txs.len(), 3);
|
||||||
|
|
||||||
|
let block = add_block(header, txs, &mut chain);
|
||||||
|
|
||||||
|
// Now reconcile the transaction pool with the new block
|
||||||
|
// and check the resulting contents of the pool are what we expect.
|
||||||
|
{
|
||||||
|
let mut write_pool = pool.write();
|
||||||
|
write_pool.reconcile_block(&block).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(write_pool.total_size(), 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
// Cleanup db directory
|
||||||
|
clean_output_dir(db_root.clone());
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,104 +40,110 @@ fn test_block_building_max_weight() {
|
||||||
let db_root = ".grin_block_building_max_weight".to_string();
|
let db_root = ".grin_block_building_max_weight".to_string();
|
||||||
clean_output_dir(db_root.clone());
|
clean_output_dir(db_root.clone());
|
||||||
|
|
||||||
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
|
|
||||||
|
|
||||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
|
||||||
|
|
||||||
// Convenient was to add a new block to the chain.
|
|
||||||
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
|
|
||||||
let height = prev_header.height + 1;
|
|
||||||
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
|
||||||
let fee = txs.iter().map(|x| x.fee()).sum();
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
|
|
||||||
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
|
|
||||||
|
|
||||||
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
|
||||||
block.header.prev_root = prev_header.hash();
|
|
||||||
|
|
||||||
chain.update_db_for_block(&block);
|
|
||||||
block
|
|
||||||
};
|
|
||||||
|
|
||||||
// Initialize the chain/txhashset with an initial block
|
|
||||||
// so we have a non-empty UTXO set.
|
|
||||||
let block = add_block(BlockHeader::default(), vec![], &mut chain);
|
|
||||||
let header = block.header;
|
|
||||||
|
|
||||||
// Now create tx to spend that first coinbase (now matured).
|
|
||||||
// Provides us with some useful outputs to test with.
|
|
||||||
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![100, 200, 300]);
|
|
||||||
|
|
||||||
// Mine that initial tx so we can spend it with multiple txs
|
|
||||||
let block = add_block(header, vec![initial_tx], &mut chain);
|
|
||||||
let header = block.header;
|
|
||||||
|
|
||||||
// Initialize a new pool with our chain adapter.
|
|
||||||
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
|
|
||||||
|
|
||||||
// Build some dependent txs to add to the txpool.
|
|
||||||
// We will build a block from a subset of these.
|
|
||||||
let txs = vec![
|
|
||||||
test_transaction(&keychain, vec![100], vec![90, 1]),
|
|
||||||
test_transaction(&keychain, vec![90], vec![80, 2]),
|
|
||||||
test_transaction(&keychain, vec![200], vec![199]),
|
|
||||||
test_transaction(&keychain, vec![300], vec![290, 3]),
|
|
||||||
test_transaction(&keychain, vec![290], vec![280, 4]),
|
|
||||||
];
|
|
||||||
|
|
||||||
// Populate our txpool with the txs.
|
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write();
|
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
|
||||||
for tx in txs {
|
|
||||||
write_pool
|
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||||
.add_to_pool(test_source(), tx, false, &header)
|
|
||||||
.unwrap();
|
// Convenient was to add a new block to the chain.
|
||||||
|
let add_block =
|
||||||
|
|prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
|
||||||
|
let height = prev_header.height + 1;
|
||||||
|
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
||||||
|
let fee = txs.iter().map(|x| x.fee()).sum();
|
||||||
|
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
|
||||||
|
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
|
||||||
|
|
||||||
|
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
||||||
|
block.header.prev_root = prev_header.hash();
|
||||||
|
|
||||||
|
chain.update_db_for_block(&block);
|
||||||
|
block
|
||||||
|
};
|
||||||
|
|
||||||
|
// Initialize the chain/txhashset with an initial block
|
||||||
|
// so we have a non-empty UTXO set.
|
||||||
|
let block = add_block(BlockHeader::default(), vec![], &mut chain);
|
||||||
|
let header = block.header;
|
||||||
|
|
||||||
|
// Now create tx to spend that first coinbase (now matured).
|
||||||
|
// Provides us with some useful outputs to test with.
|
||||||
|
let initial_tx =
|
||||||
|
test_transaction_spending_coinbase(&keychain, &header, vec![100, 200, 300]);
|
||||||
|
|
||||||
|
// Mine that initial tx so we can spend it with multiple txs
|
||||||
|
let block = add_block(header, vec![initial_tx], &mut chain);
|
||||||
|
let header = block.header;
|
||||||
|
|
||||||
|
// Initialize a new pool with our chain adapter.
|
||||||
|
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
|
||||||
|
|
||||||
|
// Build some dependent txs to add to the txpool.
|
||||||
|
// We will build a block from a subset of these.
|
||||||
|
let txs = vec![
|
||||||
|
test_transaction(&keychain, vec![100], vec![90, 1]),
|
||||||
|
test_transaction(&keychain, vec![90], vec![80, 2]),
|
||||||
|
test_transaction(&keychain, vec![200], vec![199]),
|
||||||
|
test_transaction(&keychain, vec![300], vec![290, 3]),
|
||||||
|
test_transaction(&keychain, vec![290], vec![280, 4]),
|
||||||
|
];
|
||||||
|
|
||||||
|
// Populate our txpool with the txs.
|
||||||
|
{
|
||||||
|
let mut write_pool = pool.write();
|
||||||
|
for tx in txs {
|
||||||
|
write_pool
|
||||||
|
.add_to_pool(test_source(), tx, false, &header)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check we added them all to the txpool successfully.
|
||||||
|
assert_eq!(pool.read().total_size(), 5);
|
||||||
|
|
||||||
|
// Prepare some "mineable txs" from the txpool.
|
||||||
|
// Note: We cannot fit all the txs from the txpool into a block.
|
||||||
|
let txs = pool.read().prepare_mineable_transactions().unwrap();
|
||||||
|
|
||||||
|
// Check resulting tx aggregation is what we expect.
|
||||||
|
// We expect to produce 2 aggregated txs based on txpool contents.
|
||||||
|
assert_eq!(txs.len(), 2);
|
||||||
|
|
||||||
|
// Check the tx we built is the aggregation of the correct set of underlying txs.
|
||||||
|
// We included 4 out of the 5 txs here.
|
||||||
|
assert_eq!(txs[0].kernels().len(), 1);
|
||||||
|
assert_eq!(txs[1].kernels().len(), 2);
|
||||||
|
|
||||||
|
// Check our weights after aggregation.
|
||||||
|
assert_eq!(txs[0].inputs().len(), 1);
|
||||||
|
assert_eq!(txs[0].outputs().len(), 1);
|
||||||
|
assert_eq!(txs[0].kernels().len(), 1);
|
||||||
|
assert_eq!(txs[0].tx_weight_as_block(), 25);
|
||||||
|
|
||||||
|
assert_eq!(txs[1].inputs().len(), 1);
|
||||||
|
assert_eq!(txs[1].outputs().len(), 3);
|
||||||
|
assert_eq!(txs[1].kernels().len(), 2);
|
||||||
|
assert_eq!(txs[1].tx_weight_as_block(), 70);
|
||||||
|
|
||||||
|
let block = add_block(header, txs, &mut chain);
|
||||||
|
|
||||||
|
// Check contents of the block itself (including coinbase reward).
|
||||||
|
assert_eq!(block.inputs().len(), 2);
|
||||||
|
assert_eq!(block.outputs().len(), 5);
|
||||||
|
assert_eq!(block.kernels().len(), 4);
|
||||||
|
|
||||||
|
// Now reconcile the transaction pool with the new block
|
||||||
|
// and check the resulting contents of the pool are what we expect.
|
||||||
|
{
|
||||||
|
let mut write_pool = pool.write();
|
||||||
|
write_pool.reconcile_block(&block).unwrap();
|
||||||
|
|
||||||
|
// We should still have 2 tx in the pool after accepting the new block.
|
||||||
|
// This one exceeded the max block weight when building the block so
|
||||||
|
// remained in the txpool.
|
||||||
|
assert_eq!(write_pool.total_size(), 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Cleanup db directory
|
||||||
// Check we added them all to the txpool successfully.
|
clean_output_dir(db_root.clone());
|
||||||
assert_eq!(pool.read().total_size(), 5);
|
|
||||||
|
|
||||||
// Prepare some "mineable txs" from the txpool.
|
|
||||||
// Note: We cannot fit all the txs from the txpool into a block.
|
|
||||||
let txs = pool.read().prepare_mineable_transactions().unwrap();
|
|
||||||
|
|
||||||
// Check resulting tx aggregation is what we expect.
|
|
||||||
// We expect to produce 2 aggregated txs based on txpool contents.
|
|
||||||
assert_eq!(txs.len(), 2);
|
|
||||||
|
|
||||||
// Check the tx we built is the aggregation of the correct set of underlying txs.
|
|
||||||
// We included 4 out of the 5 txs here.
|
|
||||||
assert_eq!(txs[0].kernels().len(), 1);
|
|
||||||
assert_eq!(txs[1].kernels().len(), 2);
|
|
||||||
|
|
||||||
// Check our weights after aggregation.
|
|
||||||
assert_eq!(txs[0].inputs().len(), 1);
|
|
||||||
assert_eq!(txs[0].outputs().len(), 1);
|
|
||||||
assert_eq!(txs[0].kernels().len(), 1);
|
|
||||||
assert_eq!(txs[0].tx_weight_as_block(), 25);
|
|
||||||
|
|
||||||
assert_eq!(txs[1].inputs().len(), 1);
|
|
||||||
assert_eq!(txs[1].outputs().len(), 3);
|
|
||||||
assert_eq!(txs[1].kernels().len(), 2);
|
|
||||||
assert_eq!(txs[1].tx_weight_as_block(), 70);
|
|
||||||
|
|
||||||
let block = add_block(header, txs, &mut chain);
|
|
||||||
|
|
||||||
// Check contents of the block itself (including coinbase reward).
|
|
||||||
assert_eq!(block.inputs().len(), 2);
|
|
||||||
assert_eq!(block.outputs().len(), 5);
|
|
||||||
assert_eq!(block.kernels().len(), 4);
|
|
||||||
|
|
||||||
// Now reconcile the transaction pool with the new block
|
|
||||||
// and check the resulting contents of the pool are what we expect.
|
|
||||||
{
|
|
||||||
let mut write_pool = pool.write();
|
|
||||||
write_pool.reconcile_block(&block).unwrap();
|
|
||||||
|
|
||||||
// We should still have 2 tx in the pool after accepting the new block.
|
|
||||||
// This one exceeded the max block weight when building the block so
|
|
||||||
// remained in the txpool.
|
|
||||||
assert_eq!(write_pool.total_size(), 2);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,153 +34,159 @@ fn test_transaction_pool_block_reconciliation() {
|
||||||
|
|
||||||
let db_root = ".grin_block_reconciliation".to_string();
|
let db_root = ".grin_block_reconciliation".to_string();
|
||||||
clean_output_dir(db_root.clone());
|
clean_output_dir(db_root.clone());
|
||||||
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
|
|
||||||
|
|
||||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
|
||||||
|
|
||||||
// Initialize a new pool with our chain adapter.
|
|
||||||
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
|
|
||||||
|
|
||||||
let header = {
|
|
||||||
let height = 1;
|
|
||||||
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
|
|
||||||
let genesis = BlockHeader::default();
|
|
||||||
let mut block = Block::new(&genesis, vec![], Difficulty::min(), reward).unwrap();
|
|
||||||
|
|
||||||
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
|
||||||
block.header.prev_root = genesis.hash();
|
|
||||||
|
|
||||||
chain.update_db_for_block(&block);
|
|
||||||
|
|
||||||
block.header
|
|
||||||
};
|
|
||||||
|
|
||||||
// Now create tx to spend that first coinbase (now matured).
|
|
||||||
// Provides us with some useful outputs to test with.
|
|
||||||
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
|
|
||||||
|
|
||||||
let block = {
|
|
||||||
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
|
|
||||||
let fees = initial_tx.fee();
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
|
|
||||||
let mut block = Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap();
|
|
||||||
|
|
||||||
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
|
||||||
block.header.prev_root = header.hash();
|
|
||||||
|
|
||||||
chain.update_db_for_block(&block);
|
|
||||||
|
|
||||||
block
|
|
||||||
};
|
|
||||||
|
|
||||||
let header = block.header;
|
|
||||||
|
|
||||||
// Preparation: We will introduce three root pool transactions.
|
|
||||||
// 1. A transaction that should be invalidated because it is exactly
|
|
||||||
// contained in the block.
|
|
||||||
// 2. A transaction that should be invalidated because the input is
|
|
||||||
// consumed in the block, although it is not exactly consumed.
|
|
||||||
// 3. A transaction that should remain after block reconciliation.
|
|
||||||
let block_transaction = test_transaction(&keychain, vec![10], vec![8]);
|
|
||||||
let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]);
|
|
||||||
let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]);
|
|
||||||
|
|
||||||
// We will also introduce a few children:
|
|
||||||
// 4. A transaction that descends from transaction 1, that is in
|
|
||||||
// turn exactly contained in the block.
|
|
||||||
let block_child = test_transaction(&keychain, vec![8], vec![5, 1]);
|
|
||||||
// 5. A transaction that descends from transaction 4, that is not
|
|
||||||
// contained in the block at all and should be valid after
|
|
||||||
// reconciliation.
|
|
||||||
let pool_child = test_transaction(&keychain, vec![5], vec![3]);
|
|
||||||
// 6. A transaction that descends from transaction 2 that does not
|
|
||||||
// conflict with anything in the block in any way, but should be
|
|
||||||
// invalidated (orphaned).
|
|
||||||
let conflict_child = test_transaction(&keychain, vec![12], vec![2]);
|
|
||||||
// 7. A transaction that descends from transaction 2 that should be
|
|
||||||
// valid due to its inputs being satisfied by the block.
|
|
||||||
let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]);
|
|
||||||
// 8. A transaction that descends from transaction 3 that should be
|
|
||||||
// invalidated due to an output conflict.
|
|
||||||
let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]);
|
|
||||||
// 9. A transaction that descends from transaction 3 that should remain
|
|
||||||
// valid after reconciliation.
|
|
||||||
let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]);
|
|
||||||
// 10. A transaction that descends from both transaction 6 and
|
|
||||||
// transaction 9
|
|
||||||
let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]);
|
|
||||||
|
|
||||||
let txs_to_add = vec![
|
|
||||||
block_transaction,
|
|
||||||
conflict_transaction,
|
|
||||||
valid_transaction.clone(),
|
|
||||||
block_child,
|
|
||||||
pool_child.clone(),
|
|
||||||
conflict_child,
|
|
||||||
conflict_valid_child.clone(),
|
|
||||||
valid_child_conflict.clone(),
|
|
||||||
valid_child_valid.clone(),
|
|
||||||
mixed_child,
|
|
||||||
];
|
|
||||||
|
|
||||||
// First we add the above transactions to the pool.
|
|
||||||
// All should be accepted.
|
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write();
|
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
|
||||||
assert_eq!(write_pool.total_size(), 0);
|
|
||||||
|
|
||||||
for tx in &txs_to_add {
|
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||||
write_pool
|
|
||||||
.add_to_pool(test_source(), tx.clone(), false, &header)
|
// Initialize a new pool with our chain adapter.
|
||||||
.unwrap();
|
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
|
||||||
|
|
||||||
|
let header = {
|
||||||
|
let height = 1;
|
||||||
|
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
||||||
|
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
|
||||||
|
let genesis = BlockHeader::default();
|
||||||
|
let mut block = Block::new(&genesis, vec![], Difficulty::min(), reward).unwrap();
|
||||||
|
|
||||||
|
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
||||||
|
block.header.prev_root = genesis.hash();
|
||||||
|
|
||||||
|
chain.update_db_for_block(&block);
|
||||||
|
|
||||||
|
block.header
|
||||||
|
};
|
||||||
|
|
||||||
|
// Now create tx to spend that first coinbase (now matured).
|
||||||
|
// Provides us with some useful outputs to test with.
|
||||||
|
let initial_tx =
|
||||||
|
test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
|
||||||
|
|
||||||
|
let block = {
|
||||||
|
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
|
||||||
|
let fees = initial_tx.fee();
|
||||||
|
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
|
||||||
|
let mut block =
|
||||||
|
Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap();
|
||||||
|
|
||||||
|
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
||||||
|
block.header.prev_root = header.hash();
|
||||||
|
|
||||||
|
chain.update_db_for_block(&block);
|
||||||
|
|
||||||
|
block
|
||||||
|
};
|
||||||
|
|
||||||
|
let header = block.header;
|
||||||
|
|
||||||
|
// Preparation: We will introduce three root pool transactions.
|
||||||
|
// 1. A transaction that should be invalidated because it is exactly
|
||||||
|
// contained in the block.
|
||||||
|
// 2. A transaction that should be invalidated because the input is
|
||||||
|
// consumed in the block, although it is not exactly consumed.
|
||||||
|
// 3. A transaction that should remain after block reconciliation.
|
||||||
|
let block_transaction = test_transaction(&keychain, vec![10], vec![8]);
|
||||||
|
let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]);
|
||||||
|
let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]);
|
||||||
|
|
||||||
|
// We will also introduce a few children:
|
||||||
|
// 4. A transaction that descends from transaction 1, that is in
|
||||||
|
// turn exactly contained in the block.
|
||||||
|
let block_child = test_transaction(&keychain, vec![8], vec![5, 1]);
|
||||||
|
// 5. A transaction that descends from transaction 4, that is not
|
||||||
|
// contained in the block at all and should be valid after
|
||||||
|
// reconciliation.
|
||||||
|
let pool_child = test_transaction(&keychain, vec![5], vec![3]);
|
||||||
|
// 6. A transaction that descends from transaction 2 that does not
|
||||||
|
// conflict with anything in the block in any way, but should be
|
||||||
|
// invalidated (orphaned).
|
||||||
|
let conflict_child = test_transaction(&keychain, vec![12], vec![2]);
|
||||||
|
// 7. A transaction that descends from transaction 2 that should be
|
||||||
|
// valid due to its inputs being satisfied by the block.
|
||||||
|
let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]);
|
||||||
|
// 8. A transaction that descends from transaction 3 that should be
|
||||||
|
// invalidated due to an output conflict.
|
||||||
|
let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]);
|
||||||
|
// 9. A transaction that descends from transaction 3 that should remain
|
||||||
|
// valid after reconciliation.
|
||||||
|
let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]);
|
||||||
|
// 10. A transaction that descends from both transaction 6 and
|
||||||
|
// transaction 9
|
||||||
|
let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]);
|
||||||
|
|
||||||
|
let txs_to_add = vec![
|
||||||
|
block_transaction,
|
||||||
|
conflict_transaction,
|
||||||
|
valid_transaction.clone(),
|
||||||
|
block_child,
|
||||||
|
pool_child.clone(),
|
||||||
|
conflict_child,
|
||||||
|
conflict_valid_child.clone(),
|
||||||
|
valid_child_conflict.clone(),
|
||||||
|
valid_child_valid.clone(),
|
||||||
|
mixed_child,
|
||||||
|
];
|
||||||
|
|
||||||
|
// First we add the above transactions to the pool.
|
||||||
|
// All should be accepted.
|
||||||
|
{
|
||||||
|
let mut write_pool = pool.write();
|
||||||
|
assert_eq!(write_pool.total_size(), 0);
|
||||||
|
|
||||||
|
for tx in &txs_to_add {
|
||||||
|
write_pool
|
||||||
|
.add_to_pool(test_source(), tx.clone(), false, &header)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(write_pool.total_size(), txs_to_add.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(write_pool.total_size(), txs_to_add.len());
|
// Now we prepare the block that will cause the above conditions to be met.
|
||||||
}
|
// First, the transactions we want in the block:
|
||||||
|
// - Copy of 1
|
||||||
// Now we prepare the block that will cause the above conditions to be met.
|
let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]);
|
||||||
// First, the transactions we want in the block:
|
// - Conflict w/ 2, satisfies 7
|
||||||
// - Copy of 1
|
let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]);
|
||||||
let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]);
|
// - Copy of 4
|
||||||
// - Conflict w/ 2, satisfies 7
|
let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]);
|
||||||
let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]);
|
// - Output conflict w/ 8
|
||||||
// - Copy of 4
|
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
|
||||||
let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]);
|
|
||||||
// - Output conflict w/ 8
|
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
|
||||||
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
|
|
||||||
|
// Now apply this block.
|
||||||
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
|
let block = {
|
||||||
|
let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
|
||||||
// Now apply this block.
|
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
|
||||||
let block = {
|
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
|
||||||
let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
|
let mut block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap();
|
||||||
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, fees).unwrap();
|
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
||||||
let mut block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap();
|
block.header.prev_root = header.hash();
|
||||||
|
|
||||||
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
|
chain.update_db_for_block(&block);
|
||||||
block.header.prev_root = header.hash();
|
block
|
||||||
|
};
|
||||||
chain.update_db_for_block(&block);
|
|
||||||
block
|
// Check the pool still contains everything we expect at this point.
|
||||||
};
|
{
|
||||||
|
let write_pool = pool.write();
|
||||||
// Check the pool still contains everything we expect at this point.
|
assert_eq!(write_pool.total_size(), txs_to_add.len());
|
||||||
{
|
}
|
||||||
let write_pool = pool.write();
|
|
||||||
assert_eq!(write_pool.total_size(), txs_to_add.len());
|
// And reconcile the pool with this latest block.
|
||||||
}
|
{
|
||||||
|
let mut write_pool = pool.write();
|
||||||
// And reconcile the pool with this latest block.
|
write_pool.reconcile_block(&block).unwrap();
|
||||||
{
|
|
||||||
let mut write_pool = pool.write();
|
assert_eq!(write_pool.total_size(), 4);
|
||||||
write_pool.reconcile_block(&block).unwrap();
|
assert_eq!(write_pool.txpool.entries[0].tx, valid_transaction);
|
||||||
|
assert_eq!(write_pool.txpool.entries[1].tx, pool_child);
|
||||||
assert_eq!(write_pool.total_size(), 4);
|
assert_eq!(write_pool.txpool.entries[2].tx, conflict_valid_child);
|
||||||
assert_eq!(write_pool.txpool.entries[0].tx, valid_transaction);
|
assert_eq!(write_pool.txpool.entries[3].tx, valid_child_valid);
|
||||||
assert_eq!(write_pool.txpool.entries[1].tx, pool_child);
|
}
|
||||||
assert_eq!(write_pool.txpool.entries[2].tx, conflict_valid_child);
|
|
||||||
assert_eq!(write_pool.txpool.entries[3].tx, valid_child_valid);
|
|
||||||
}
|
}
|
||||||
|
// Cleanup db directory
|
||||||
|
clean_output_dir(db_root.clone());
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,221 +33,226 @@ fn test_the_transaction_pool() {
|
||||||
|
|
||||||
let db_root = ".grin_transaction_pool".to_string();
|
let db_root = ".grin_transaction_pool".to_string();
|
||||||
clean_output_dir(db_root.clone());
|
clean_output_dir(db_root.clone());
|
||||||
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
|
|
||||||
|
|
||||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
|
||||||
|
|
||||||
// Initialize a new pool with our chain adapter.
|
|
||||||
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
|
|
||||||
|
|
||||||
let header = {
|
|
||||||
let height = 1;
|
|
||||||
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
|
|
||||||
let block = Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
|
|
||||||
|
|
||||||
chain.update_db_for_block(&block);
|
|
||||||
|
|
||||||
block.header
|
|
||||||
};
|
|
||||||
|
|
||||||
// Now create tx to spend a coinbase, giving us some useful outputs for testing
|
|
||||||
// with.
|
|
||||||
let initial_tx = {
|
|
||||||
test_transaction_spending_coinbase(
|
|
||||||
&keychain,
|
|
||||||
&header,
|
|
||||||
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Add this tx to the pool (stem=false, direct to txpool).
|
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write();
|
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
|
||||||
write_pool
|
|
||||||
.add_to_pool(test_source(), initial_tx, false, &header)
|
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||||
.unwrap();
|
|
||||||
assert_eq!(write_pool.total_size(), 1);
|
// Initialize a new pool with our chain adapter.
|
||||||
}
|
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
|
||||||
|
|
||||||
// Test adding a tx that "double spends" an output currently spent by a tx
|
let header = {
|
||||||
// already in the txpool. In this case we attempt to spend the original coinbase twice.
|
let height = 1;
|
||||||
{
|
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
|
||||||
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
|
let reward = libtx::reward::output(&keychain, &key_id, 0).unwrap();
|
||||||
let mut write_pool = pool.write();
|
let block =
|
||||||
assert!(write_pool
|
Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
|
||||||
.add_to_pool(test_source(), tx, false, &header)
|
|
||||||
.is_err());
|
chain.update_db_for_block(&block);
|
||||||
}
|
|
||||||
|
block.header
|
||||||
// tx1 spends some outputs from the initial test tx.
|
};
|
||||||
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
|
||||||
// tx2 spends some outputs from both tx1 and the initial test tx.
|
// Now create tx to spend a coinbase, giving us some useful outputs for testing
|
||||||
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
|
// with.
|
||||||
|
let initial_tx = {
|
||||||
// Take a write lock and add a couple of tx entries to the pool.
|
test_transaction_spending_coinbase(
|
||||||
{
|
&keychain,
|
||||||
let mut write_pool = pool.write();
|
&header,
|
||||||
|
vec![500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400],
|
||||||
// Check we have a single initial tx in the pool.
|
)
|
||||||
assert_eq!(write_pool.total_size(), 1);
|
};
|
||||||
|
|
||||||
// First, add a simple tx directly to the txpool (stem = false).
|
// Add this tx to the pool (stem=false, direct to txpool).
|
||||||
write_pool
|
{
|
||||||
.add_to_pool(test_source(), tx1.clone(), false, &header)
|
let mut write_pool = pool.write();
|
||||||
.unwrap();
|
write_pool
|
||||||
assert_eq!(write_pool.total_size(), 2);
|
.add_to_pool(test_source(), initial_tx, false, &header)
|
||||||
|
.unwrap();
|
||||||
// Add another tx spending outputs from the previous tx.
|
assert_eq!(write_pool.total_size(), 1);
|
||||||
write_pool
|
}
|
||||||
.add_to_pool(test_source(), tx2.clone(), false, &header)
|
|
||||||
.unwrap();
|
// Test adding a tx that "double spends" an output currently spent by a tx
|
||||||
assert_eq!(write_pool.total_size(), 3);
|
// already in the txpool. In this case we attempt to spend the original coinbase twice.
|
||||||
}
|
{
|
||||||
|
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
|
||||||
// Test adding the exact same tx multiple times (same kernel signature).
|
let mut write_pool = pool.write();
|
||||||
// This will fail for stem=false during tx aggregation due to duplicate
|
assert!(write_pool
|
||||||
// outputs and duplicate kernels.
|
.add_to_pool(test_source(), tx, false, &header)
|
||||||
{
|
.is_err());
|
||||||
let mut write_pool = pool.write();
|
}
|
||||||
assert!(write_pool
|
|
||||||
.add_to_pool(test_source(), tx1.clone(), false, &header)
|
// tx1 spends some outputs from the initial test tx.
|
||||||
.is_err());
|
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
||||||
}
|
// tx2 spends some outputs from both tx1 and the initial test tx.
|
||||||
|
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
|
||||||
// Test adding a duplicate tx with the same input and outputs.
|
|
||||||
// Note: not the *same* tx, just same underlying inputs/outputs.
|
// Take a write lock and add a couple of tx entries to the pool.
|
||||||
{
|
{
|
||||||
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
let mut write_pool = pool.write();
|
||||||
let mut write_pool = pool.write();
|
|
||||||
assert!(write_pool
|
// Check we have a single initial tx in the pool.
|
||||||
.add_to_pool(test_source(), tx1a, false, &header)
|
assert_eq!(write_pool.total_size(), 1);
|
||||||
.is_err());
|
|
||||||
}
|
// First, add a simple tx directly to the txpool (stem = false).
|
||||||
|
write_pool
|
||||||
// Test adding a tx attempting to spend a non-existent output.
|
.add_to_pool(test_source(), tx1.clone(), false, &header)
|
||||||
{
|
.unwrap();
|
||||||
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
|
assert_eq!(write_pool.total_size(), 2);
|
||||||
let mut write_pool = pool.write();
|
|
||||||
assert!(write_pool
|
// Add another tx spending outputs from the previous tx.
|
||||||
.add_to_pool(test_source(), bad_tx, false, &header)
|
write_pool
|
||||||
.is_err());
|
.add_to_pool(test_source(), tx2.clone(), false, &header)
|
||||||
}
|
.unwrap();
|
||||||
|
assert_eq!(write_pool.total_size(), 3);
|
||||||
// Test adding a tx that would result in a duplicate output (conflicts with
|
}
|
||||||
// output from tx2). For reasons of security all outputs in the UTXO set must
|
|
||||||
// be unique. Otherwise spending one will almost certainly cause the other
|
// Test adding the exact same tx multiple times (same kernel signature).
|
||||||
// to be immediately stolen via a "replay" tx.
|
// This will fail for stem=false during tx aggregation due to duplicate
|
||||||
{
|
// outputs and duplicate kernels.
|
||||||
let tx = test_transaction(&keychain, vec![900], vec![498]);
|
{
|
||||||
let mut write_pool = pool.write();
|
let mut write_pool = pool.write();
|
||||||
assert!(write_pool
|
assert!(write_pool
|
||||||
.add_to_pool(test_source(), tx, false, &header)
|
.add_to_pool(test_source(), tx1.clone(), false, &header)
|
||||||
.is_err());
|
.is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Confirm the tx pool correctly identifies an invalid tx (already spent).
|
// Test adding a duplicate tx with the same input and outputs.
|
||||||
{
|
// Note: not the *same* tx, just same underlying inputs/outputs.
|
||||||
let mut write_pool = pool.write();
|
{
|
||||||
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
|
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
||||||
assert!(write_pool
|
let mut write_pool = pool.write();
|
||||||
.add_to_pool(test_source(), tx3, false, &header)
|
assert!(write_pool
|
||||||
.is_err());
|
.add_to_pool(test_source(), tx1a, false, &header)
|
||||||
assert_eq!(write_pool.total_size(), 3);
|
.is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now add a couple of txs to the stempool (stem = true).
|
// Test adding a tx attempting to spend a non-existent output.
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write();
|
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
|
||||||
let tx = test_transaction(&keychain, vec![599], vec![598]);
|
let mut write_pool = pool.write();
|
||||||
write_pool
|
assert!(write_pool
|
||||||
.add_to_pool(test_source(), tx, true, &header)
|
.add_to_pool(test_source(), bad_tx, false, &header)
|
||||||
.unwrap();
|
.is_err());
|
||||||
let tx2 = test_transaction(&keychain, vec![598], vec![597]);
|
}
|
||||||
write_pool
|
|
||||||
.add_to_pool(test_source(), tx2, true, &header)
|
// Test adding a tx that would result in a duplicate output (conflicts with
|
||||||
.unwrap();
|
// output from tx2). For reasons of security all outputs in the UTXO set must
|
||||||
assert_eq!(write_pool.total_size(), 3);
|
// be unique. Otherwise spending one will almost certainly cause the other
|
||||||
assert_eq!(write_pool.stempool.size(), 2);
|
// to be immediately stolen via a "replay" tx.
|
||||||
}
|
{
|
||||||
|
let tx = test_transaction(&keychain, vec![900], vec![498]);
|
||||||
// Check we can take some entries from the stempool and "fluff" them into the
|
let mut write_pool = pool.write();
|
||||||
// txpool. This also exercises multi-kernel txs.
|
assert!(write_pool
|
||||||
{
|
.add_to_pool(test_source(), tx, false, &header)
|
||||||
let mut write_pool = pool.write();
|
.is_err());
|
||||||
let agg_tx = write_pool
|
}
|
||||||
.stempool
|
|
||||||
.all_transactions_aggregate()
|
// Confirm the tx pool correctly identifies an invalid tx (already spent).
|
||||||
.unwrap()
|
{
|
||||||
.unwrap();
|
let mut write_pool = pool.write();
|
||||||
assert_eq!(agg_tx.kernels().len(), 2);
|
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
|
||||||
write_pool
|
assert!(write_pool
|
||||||
.add_to_pool(test_source(), agg_tx, false, &header)
|
.add_to_pool(test_source(), tx3, false, &header)
|
||||||
.unwrap();
|
.is_err());
|
||||||
assert_eq!(write_pool.total_size(), 4);
|
assert_eq!(write_pool.total_size(), 3);
|
||||||
assert!(write_pool.stempool.is_empty());
|
}
|
||||||
}
|
|
||||||
|
// Now add a couple of txs to the stempool (stem = true).
|
||||||
// Adding a duplicate tx to the stempool will result in it being fluffed.
|
{
|
||||||
// This handles the case of the stem path having a cycle in it.
|
let mut write_pool = pool.write();
|
||||||
{
|
let tx = test_transaction(&keychain, vec![599], vec![598]);
|
||||||
let mut write_pool = pool.write();
|
write_pool
|
||||||
let tx = test_transaction(&keychain, vec![597], vec![596]);
|
.add_to_pool(test_source(), tx, true, &header)
|
||||||
write_pool
|
.unwrap();
|
||||||
.add_to_pool(test_source(), tx.clone(), true, &header)
|
let tx2 = test_transaction(&keychain, vec![598], vec![597]);
|
||||||
.unwrap();
|
write_pool
|
||||||
assert_eq!(write_pool.total_size(), 4);
|
.add_to_pool(test_source(), tx2, true, &header)
|
||||||
assert_eq!(write_pool.stempool.size(), 1);
|
.unwrap();
|
||||||
|
assert_eq!(write_pool.total_size(), 3);
|
||||||
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
|
assert_eq!(write_pool.stempool.size(), 2);
|
||||||
write_pool
|
}
|
||||||
.add_to_pool(test_source(), tx.clone(), true, &header)
|
|
||||||
.unwrap();
|
// Check we can take some entries from the stempool and "fluff" them into the
|
||||||
assert_eq!(write_pool.total_size(), 5);
|
// txpool. This also exercises multi-kernel txs.
|
||||||
assert!(write_pool.stempool.is_empty());
|
{
|
||||||
}
|
let mut write_pool = pool.write();
|
||||||
|
let agg_tx = write_pool
|
||||||
// Now check we can correctly deaggregate a multi-kernel tx based on current
|
.stempool
|
||||||
// contents of the txpool.
|
.all_transactions_aggregate()
|
||||||
// We will do this be adding a new tx to the pool
|
.unwrap()
|
||||||
// that is a superset of a tx already in the pool.
|
.unwrap();
|
||||||
{
|
assert_eq!(agg_tx.kernels().len(), 2);
|
||||||
let mut write_pool = pool.write();
|
write_pool
|
||||||
|
.add_to_pool(test_source(), agg_tx, false, &header)
|
||||||
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
|
.unwrap();
|
||||||
// tx1 and tx2 are already in the txpool (in aggregated form)
|
assert_eq!(write_pool.total_size(), 4);
|
||||||
// tx4 is the "new" part of this aggregated tx that we care about
|
assert!(write_pool.stempool.is_empty());
|
||||||
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
|
}
|
||||||
|
|
||||||
agg_tx
|
// Adding a duplicate tx to the stempool will result in it being fluffed.
|
||||||
.validate(Weighting::AsTransaction, verifier_cache.clone())
|
// This handles the case of the stem path having a cycle in it.
|
||||||
.unwrap();
|
{
|
||||||
|
let mut write_pool = pool.write();
|
||||||
write_pool
|
let tx = test_transaction(&keychain, vec![597], vec![596]);
|
||||||
.add_to_pool(test_source(), agg_tx, false, &header)
|
write_pool
|
||||||
.unwrap();
|
.add_to_pool(test_source(), tx.clone(), true, &header)
|
||||||
assert_eq!(write_pool.total_size(), 6);
|
.unwrap();
|
||||||
let entry = write_pool.txpool.entries.last().unwrap();
|
assert_eq!(write_pool.total_size(), 4);
|
||||||
assert_eq!(entry.tx.kernels().len(), 1);
|
assert_eq!(write_pool.stempool.size(), 1);
|
||||||
assert_eq!(entry.src.debug_name, "deagg");
|
|
||||||
}
|
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
|
||||||
|
write_pool
|
||||||
// Check we cannot "double spend" an output spent in a previous block.
|
.add_to_pool(test_source(), tx.clone(), true, &header)
|
||||||
// We use the initial coinbase output here for convenience.
|
.unwrap();
|
||||||
{
|
assert_eq!(write_pool.total_size(), 5);
|
||||||
let mut write_pool = pool.write();
|
assert!(write_pool.stempool.is_empty());
|
||||||
|
}
|
||||||
let double_spend_tx =
|
|
||||||
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };
|
// Now check we can correctly deaggregate a multi-kernel tx based on current
|
||||||
|
// contents of the txpool.
|
||||||
// check we cannot add a double spend to the stempool
|
// We will do this be adding a new tx to the pool
|
||||||
assert!(write_pool
|
// that is a superset of a tx already in the pool.
|
||||||
.add_to_pool(test_source(), double_spend_tx.clone(), true, &header)
|
{
|
||||||
.is_err());
|
let mut write_pool = pool.write();
|
||||||
|
|
||||||
// check we cannot add a double spend to the txpool
|
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
|
||||||
assert!(write_pool
|
// tx1 and tx2 are already in the txpool (in aggregated form)
|
||||||
.add_to_pool(test_source(), double_spend_tx.clone(), false, &header)
|
// tx4 is the "new" part of this aggregated tx that we care about
|
||||||
.is_err());
|
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
|
||||||
|
|
||||||
|
agg_tx
|
||||||
|
.validate(Weighting::AsTransaction, verifier_cache.clone())
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
write_pool
|
||||||
|
.add_to_pool(test_source(), agg_tx, false, &header)
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(write_pool.total_size(), 6);
|
||||||
|
let entry = write_pool.txpool.entries.last().unwrap();
|
||||||
|
assert_eq!(entry.tx.kernels().len(), 1);
|
||||||
|
assert_eq!(entry.src.debug_name, "deagg");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check we cannot "double spend" an output spent in a previous block.
|
||||||
|
// We use the initial coinbase output here for convenience.
|
||||||
|
{
|
||||||
|
let mut write_pool = pool.write();
|
||||||
|
|
||||||
|
let double_spend_tx =
|
||||||
|
{ test_transaction_spending_coinbase(&keychain, &header, vec![1000]) };
|
||||||
|
|
||||||
|
// check we cannot add a double spend to the stempool
|
||||||
|
assert!(write_pool
|
||||||
|
.add_to_pool(test_source(), double_spend_tx.clone(), true, &header)
|
||||||
|
.is_err());
|
||||||
|
|
||||||
|
// check we cannot add a double spend to the txpool
|
||||||
|
assert!(write_pool
|
||||||
|
.add_to_pool(test_source(), double_spend_tx.clone(), false, &header)
|
||||||
|
.is_err());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
// Cleanup db directory
|
||||||
|
clean_output_dir(db_root.clone());
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue