From 4121ea124092081148273ec5846397cc001d7fa2 Mon Sep 17 00:00:00 2001 From: Yeastplume Date: Wed, 9 May 2018 10:15:58 +0100 Subject: [PATCH] Wallet+Keychain refactoring (#1035) * beginning to refactor keychain into wallet lib * rustfmt * more refactor of aggsig lib, simplify aggsig context manager, hold instance statically for now * clean some warnings * clean some warnings * fix wallet send test a bit * fix core tests, move wallet dependent tests into integration tests * repair chain tests * refactor/fix pool tests * fix wallet tests, moved from keychain * add wallet tests --- Cargo.lock | 4 + chain/Cargo.toml | 1 + chain/tests/data_file_integrity.rs | 30 +- chain/tests/mine_simple_chain.rs | 12 +- chain/tests/store_indices.rs | 13 +- chain/tests/test_coinbase_maturity.rs | 35 +- core/Cargo.toml | 3 + core/benches/sumtree.rs | 75 -- core/src/core/block.rs | 454 +------ core/src/core/mod.rs | 556 -------- core/src/core/transaction.rs | 37 +- core/src/pow/mod.rs | 1 - core/tests/block.rs | 393 ++++++ core/tests/common/mod.rs | 104 ++ core/tests/core.rs | 504 ++++++++ core/tests/transaction.rs | 50 + keychain/src/keychain.rs | 753 +---------- keychain/src/lib.rs | 6 +- pool/Cargo.toml | 3 + pool/src/blockchain.rs | 41 +- pool/src/graph.rs | 88 +- pool/src/lib.rs | 6 +- pool/src/pool.rs | 1127 +--------------- pool/src/types.rs | 38 +- pool/tests/graph.rs | 96 ++ pool/tests/pool.rs | 1150 +++++++++++++++++ servers/src/mining/mine_block.rs | 3 +- servers/tests/framework/mod.rs | 8 +- servers/tests/wallet.rs | 3 +- store/src/lib.rs | 2 - wallet/Cargo.toml | 1 + wallet/src/lib.rs | 4 + wallet/src/libwallet/aggsig.rs | 269 ++++ wallet/src/libwallet/blind.rs | 15 + .../core => wallet/src/libwallet}/build.rs | 23 +- wallet/src/libwallet/error.rs | 60 + wallet/src/libwallet/mod.rs | 29 + wallet/src/libwallet/proof.rs | 113 ++ wallet/src/libwallet/reward.rs | 80 ++ wallet/src/receiver.rs | 72 +- wallet/src/restore.rs | 21 +- wallet/src/sender.rs | 39 +- wallet/src/types.rs | 7 +- wallet/tests/libwallet.rs | 453 +++++++ 44 files changed, 3599 insertions(+), 3183 deletions(-) delete mode 100644 core/benches/sumtree.rs create mode 100644 core/tests/block.rs create mode 100644 core/tests/common/mod.rs create mode 100644 core/tests/core.rs create mode 100644 core/tests/transaction.rs create mode 100644 pool/tests/graph.rs create mode 100644 pool/tests/pool.rs create mode 100644 wallet/src/libwallet/aggsig.rs create mode 100644 wallet/src/libwallet/blind.rs rename {core/src/core => wallet/src/libwallet}/build.rs (94%) create mode 100644 wallet/src/libwallet/error.rs create mode 100644 wallet/src/libwallet/mod.rs create mode 100644 wallet/src/libwallet/proof.rs create mode 100644 wallet/src/libwallet/reward.rs create mode 100644 wallet/tests/libwallet.rs diff --git a/Cargo.lock b/Cargo.lock index 3272243bd..8c618f022 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -633,6 +633,7 @@ dependencies = [ "grin_keychain 0.2.0", "grin_store 0.2.0", "grin_util 0.2.0", + "grin_wallet 0.2.0", "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", @@ -663,6 +664,7 @@ dependencies = [ "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "grin_keychain 0.2.0", "grin_util 0.2.0", + "grin_wallet 0.2.0", "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "num-bigint 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", @@ -715,6 +717,7 @@ dependencies = [ "grin_core 0.2.0", "grin_keychain 0.2.0", "grin_util 0.2.0", + "grin_wallet 0.2.0", "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", @@ -801,6 +804,7 @@ dependencies = [ "grin_util 0.2.0", "hyper 0.11.24 (registry+https://github.com/rust-lang/crates.io-index)", "iron 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "prettytable-rs 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)", "router 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/chain/Cargo.toml b/chain/Cargo.toml index d9ec2d432..4f6c338ad 100644 --- a/chain/Cargo.toml +++ b/chain/Cargo.toml @@ -20,5 +20,6 @@ grin_store = { path = "../store" } grin_util = { path = "../util" } [dev-dependencies] +grin_wallet = { path = "../wallet" } env_logger = "0.3" rand = "0.3" diff --git a/chain/tests/data_file_integrity.rs b/chain/tests/data_file_integrity.rs index 2e6f71355..37bb677c1 100644 --- a/chain/tests/data_file_integrity.rs +++ b/chain/tests/data_file_integrity.rs @@ -17,6 +17,7 @@ extern crate grin_chain as chain; extern crate grin_core as core; extern crate grin_keychain as keychain; extern crate grin_util as util; +extern crate grin_wallet as wallet; extern crate rand; extern crate time; @@ -32,6 +33,7 @@ use core::global; use core::global::ChainTypes; use keychain::Keychain; +use wallet::libwallet; use core::pow; @@ -73,8 +75,8 @@ fn data_files() { let prev = chain.head_header().unwrap(); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); let pk = keychain.derive_key_id(n as u32).unwrap(); - let mut b = - core::core::Block::new(&prev, vec![], &keychain, &pk, difficulty.clone()).unwrap(); + let reward = libwallet::reward::output(&keychain, &pk, 0, prev.height).unwrap(); + let mut b = core::core::Block::new(&prev, vec![], difficulty.clone(), reward).unwrap(); b.header.timestamp = prev.timestamp + time::Duration::seconds(60); chain.set_txhashset_roots(&mut b, false).unwrap(); @@ -86,7 +88,7 @@ fn data_files() { global::sizeshift(), ).unwrap(); - let bhash = b.hash(); + let _bhash = b.hash(); chain .process_block(b.clone(), chain::Options::MINE) .unwrap(); @@ -112,43 +114,43 @@ fn data_files() { } } -fn prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block { - let mut b = prepare_block_nosum(kc, prev, diff, vec![]); +fn _prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block { + let mut b = _prepare_block_nosum(kc, prev, diff, vec![]); chain.set_txhashset_roots(&mut b, false).unwrap(); b } -fn prepare_block_tx( +fn _prepare_block_tx( kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>, ) -> Block { - let mut b = prepare_block_nosum(kc, prev, diff, txs); + let mut b = _prepare_block_nosum(kc, prev, diff, txs); chain.set_txhashset_roots(&mut b, false).unwrap(); b } -fn prepare_fork_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block { - let mut b = prepare_block_nosum(kc, prev, diff, vec![]); +fn _prepare_fork_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block { + let mut b = _prepare_block_nosum(kc, prev, diff, vec![]); chain.set_txhashset_roots(&mut b, true).unwrap(); b } -fn prepare_fork_block_tx( +fn _prepare_fork_block_tx( kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>, ) -> Block { - let mut b = prepare_block_nosum(kc, prev, diff, txs); + let mut b = _prepare_block_nosum(kc, prev, diff, txs); chain.set_txhashset_roots(&mut b, true).unwrap(); b } -fn prepare_block_nosum( +fn _prepare_block_nosum( kc: &Keychain, prev: &BlockHeader, diff: u64, @@ -156,7 +158,9 @@ fn prepare_block_nosum( ) -> Block { let key_id = kc.derive_key_id(diff as u32).unwrap(); - let mut b = match core::core::Block::new(prev, txs, kc, &key_id, Difficulty::from_num(diff)) { + let fees = txs.iter().map(|tx| tx.fee()).sum(); + let reward = libwallet::reward::output(&kc, &key_id, fees, prev.height).unwrap(); + let mut b = match core::core::Block::new(prev, txs, Difficulty::from_num(diff), reward) { Err(e) => panic!("{:?}", e), Ok(b) => b, }; diff --git a/chain/tests/mine_simple_chain.rs b/chain/tests/mine_simple_chain.rs index cdf2502fa..a73eb68b2 100644 --- a/chain/tests/mine_simple_chain.rs +++ b/chain/tests/mine_simple_chain.rs @@ -17,6 +17,7 @@ extern crate grin_chain as chain; extern crate grin_core as core; extern crate grin_keychain as keychain; extern crate grin_util as util; +extern crate grin_wallet as wallet; extern crate rand; extern crate time; @@ -25,12 +26,13 @@ use std::sync::Arc; use chain::Chain; use chain::types::*; -use core::core::{build, Block, BlockHeader, OutputFeatures, OutputIdentifier, Transaction}; +use core::core::{Block, BlockHeader, OutputFeatures, OutputIdentifier, Transaction}; use core::core::hash::Hashed; use core::core::target::Difficulty; use core::consensus; use core::global; use core::global::ChainTypes; +use wallet::libwallet::{self, build}; use keychain::Keychain; @@ -62,8 +64,8 @@ fn mine_empty_chain() { let prev = chain.head_header().unwrap(); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); let pk = keychain.derive_key_id(n as u32).unwrap(); - let mut b = - core::core::Block::new(&prev, vec![], &keychain, &pk, difficulty.clone()).unwrap(); + let reward = libwallet::reward::output(&keychain, &pk, 0, prev.height).unwrap(); + let mut b = core::core::Block::new(&prev, vec![], difficulty.clone(), reward).unwrap(); b.header.timestamp = prev.timestamp + time::Duration::seconds(60); chain.set_txhashset_roots(&mut b, false).unwrap(); @@ -410,7 +412,9 @@ fn prepare_block_nosum( let proof_size = global::proofsize(); let key_id = kc.derive_key_id(diff as u32).unwrap(); - let mut b = match core::core::Block::new(prev, txs, kc, &key_id, Difficulty::from_num(diff)) { + let fees = txs.iter().map(|tx| tx.fee()).sum(); + let reward = libwallet::reward::output(&kc, &key_id, fees, prev.height).unwrap(); + let mut b = match core::core::Block::new(prev, txs, Difficulty::from_num(diff), reward) { Err(e) => panic!("{:?}", e), Ok(b) => b, }; diff --git a/chain/tests/store_indices.rs b/chain/tests/store_indices.rs index 205970622..4e9d39a5c 100644 --- a/chain/tests/store_indices.rs +++ b/chain/tests/store_indices.rs @@ -16,6 +16,7 @@ extern crate env_logger; extern crate grin_chain as chain; extern crate grin_core as core; extern crate grin_keychain as keychain; +extern crate grin_wallet as wallet; extern crate rand; use std::fs; @@ -30,6 +31,8 @@ use core::global; use core::global::ChainTypes; use core::pow; +use wallet::libwallet; + fn clean_output_dir(dir_name: &str) { let _ = fs::remove_dir_all(dir_name); } @@ -53,13 +56,9 @@ fn test_various_store_indices() { .setup_height(&genesis.header, &Tip::new(genesis.hash())) .unwrap(); - let block = Block::new( - &genesis.header, - vec![], - &keychain, - &key_id, - Difficulty::one(), - ).unwrap(); + let reward = libwallet::reward::output(&keychain, &key_id, 0, 1).unwrap(); + + let block = Block::new(&genesis.header, vec![], Difficulty::one(), reward).unwrap(); let block_hash = block.hash(); chain_store.save_block(&block).unwrap(); diff --git a/chain/tests/test_coinbase_maturity.rs b/chain/tests/test_coinbase_maturity.rs index fc45cec94..a4ae32564 100644 --- a/chain/tests/test_coinbase_maturity.rs +++ b/chain/tests/test_coinbase_maturity.rs @@ -16,6 +16,7 @@ extern crate env_logger; extern crate grin_chain as chain; extern crate grin_core as core; extern crate grin_keychain as keychain; +extern crate grin_wallet as wallet; extern crate rand; extern crate time; @@ -23,7 +24,7 @@ use std::fs; use std::sync::Arc; use chain::types::*; -use core::core::build; +use wallet::libwallet::build; use core::core::target::Difficulty; use core::core::transaction; use core::core::OutputIdentifier; @@ -32,6 +33,7 @@ use core::global; use core::global::ChainTypes; use keychain::Keychain; +use wallet::libwallet; use core::pow; @@ -62,8 +64,8 @@ fn test_coinbase_maturity() { let key_id3 = keychain.derive_key_id(3).unwrap(); let key_id4 = keychain.derive_key_id(4).unwrap(); - let mut block = - core::core::Block::new(&prev, vec![], &keychain, &key_id1, Difficulty::one()).unwrap(); + let reward = libwallet::reward::output(&keychain, &key_id1, 0, prev.height).unwrap(); + let mut block = core::core::Block::new(&prev, vec![], Difficulty::one(), reward).unwrap(); block.header.timestamp = prev.timestamp + time::Duration::seconds(60); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); @@ -114,13 +116,10 @@ fn test_coinbase_maturity() { &keychain, ).unwrap(); - let mut block = core::core::Block::new( - &prev, - vec![&coinbase_txn], - &keychain, - &key_id3, - Difficulty::one(), - ).unwrap(); + let txs = vec![&coinbase_txn]; + let fees = txs.iter().map(|tx| tx.fee()).sum(); + let reward = libwallet::reward::output(&keychain, &key_id3, fees, prev.height).unwrap(); + let mut block = core::core::Block::new(&prev, txs, Difficulty::one(), reward).unwrap(); block.header.timestamp = prev.timestamp + time::Duration::seconds(60); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); @@ -145,8 +144,8 @@ fn test_coinbase_maturity() { let keychain = Keychain::from_random_seed().unwrap(); let pk = keychain.derive_key_id(1).unwrap(); - let mut block = - core::core::Block::new(&prev, vec![], &keychain, &pk, Difficulty::one()).unwrap(); + let reward = libwallet::reward::output(&keychain, &pk, 0, prev.height).unwrap(); + let mut block = core::core::Block::new(&prev, vec![], Difficulty::one(), reward).unwrap(); block.header.timestamp = prev.timestamp + time::Duration::seconds(60); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); @@ -174,13 +173,11 @@ fn test_coinbase_maturity() { &keychain, ).unwrap(); - let mut block = core::core::Block::new( - &prev, - vec![&coinbase_txn], - &keychain, - &key_id4, - Difficulty::one(), - ).unwrap(); + let txs = vec![&coinbase_txn]; + let fees = txs.iter().map(|tx| tx.fee()).sum(); + let reward = libwallet::reward::output(&keychain, &key_id4, fees, prev.height).unwrap(); + let mut block = + core::core::Block::new(&prev, vec![&coinbase_txn], Difficulty::one(), reward).unwrap(); block.header.timestamp = prev.timestamp + time::Duration::seconds(60); diff --git a/core/Cargo.toml b/core/Cargo.toml index 3a46797b5..983308f33 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -20,3 +20,6 @@ time = "0.1" grin_keychain = { path = "../keychain" } grin_util = { path = "../util" } + +[dev-dependencies] +grin_wallet = { path = "../wallet" } diff --git a/core/benches/sumtree.rs b/core/benches/sumtree.rs deleted file mode 100644 index a043d02f8..000000000 --- a/core/benches/sumtree.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 The Grin Developers -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![feature(test)] - -extern crate grin_core as core; -extern crate rand; -extern crate test; - -use rand::Rng; -use test::Bencher; - -use core::core::txhashset::{self, Summable, TxHashSet}; -use core::ser::{Error, Writeable, Writer}; - -#[derive(Copy, Clone, Debug)] -struct TestElem([u32; 4]); -impl Summable for TestElem { - type Sum = u64; - fn sum(&self) -> u64 { - // sums are not allowed to overflow, so we use this simple - // non-injective "sum" function that will still be homomorphic - self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 - + self.0[3] as u64 - } -} - -impl Writeable for TestElem { - fn write(&self, writer: &mut W) -> Result<(), Error> { - try!(writer.write_u32(self.0[0])); - try!(writer.write_u32(self.0[1])); - try!(writer.write_u32(self.0[2])); - writer.write_u32(self.0[3]) - } -} - -#[bench] -fn bench_small_tree(b: &mut Bencher) { - let mut rng = rand::thread_rng(); - b.iter(|| { - let mut big_tree = TxHashSet::new(); - for i in 0..1000 { - // To avoid RNG overflow we generate random elements that are small. - // Though to avoid repeat elements they have to be reasonably big. - let new_elem; - let word1 = rng.gen::() as u32; - let word2 = rng.gen::() as u32; - if rng.gen() { - if rng.gen() { - new_elem = TestElem([word1, word2, 0, 0]); - } else { - new_elem = TestElem([word1, 0, word2, 0]); - } - } else { - if rng.gen() { - new_elem = TestElem([0, word1, 0, word2]); - } else { - new_elem = TestElem([0, 0, word1, word2]); - } - } - - big_tree.push(new_elem); - } - }); -} diff --git a/core/src/core/block.rs b/core/src/core/block.rs index ce1898f55..c0a7dc742 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -18,8 +18,8 @@ use time; use rand::{thread_rng, Rng}; use std::collections::HashSet; -use core::{Commitment, Committed, Input, KernelFeatures, Output, OutputFeatures, Proof, - ProofMessageElements, ShortId, Transaction, TxKernel}; +use core::{Commitment, Committed, Input, KernelFeatures, Output, OutputFeatures, Proof, ShortId, + Transaction, TxKernel}; use consensus; use consensus::{exceeds_weight, reward, VerifySortOrder, REWARD}; use core::hash::{Hash, HashWriter, Hashed, ZERO_HASH}; @@ -30,7 +30,6 @@ use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSo use global; use keychain; use keychain::BlindingFactor; -use util::kernel_sig_msg; use util::LOGGER; use util::{secp, static_secp_instance}; @@ -402,14 +401,10 @@ impl Block { pub fn new( prev: &BlockHeader, txs: Vec<&Transaction>, - keychain: &keychain::Keychain, - key_id: &keychain::Identifier, difficulty: Difficulty, + reward_output: (Output, TxKernel), ) -> Result { - let fees = txs.iter().map(|tx| tx.fee()).sum(); - let (reward_out, reward_proof) = - Block::reward_output(keychain, key_id, fees, prev.height + 1)?; - let block = Block::with_reward(prev, txs, reward_out, reward_proof, difficulty)?; + let block = Block::with_reward(prev, txs, reward_output.0, reward_output.1, difficulty)?; Ok(block) } @@ -705,7 +700,8 @@ impl Block { Ok(()) } - fn verify_sums( + /// Verify sums + pub fn verify_sums( &self, prev_output_sum: &Commitment, prev_kernel_sum: &Commitment, @@ -744,12 +740,12 @@ impl Block { Ok((io_sum, kernel_sum)) } - // Validate the coinbase outputs generated by miners. Entails 2 main checks: - // - // * That the sum of all coinbase-marked outputs equal the supply. - // * That the sum of blinding factors for all coinbase-marked outputs match - // the coinbase-marked kernels. - fn verify_coinbase(&self) -> Result<(), Error> { + /// Validate the coinbase outputs generated by miners. Entails 2 main checks: + /// + /// * That the sum of all coinbase-marked outputs equal the supply. + /// * That the sum of blinding factors for all coinbase-marked outputs match + /// the coinbase-marked kernels. + pub fn verify_coinbase(&self) -> Result<(), Error> { let cb_outs = self.outputs .iter() .filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT)) @@ -781,430 +777,4 @@ impl Block { } Ok(()) } - - /// Builds the blinded output and related signature proof for the block - /// reward. - pub fn reward_output( - keychain: &keychain::Keychain, - key_id: &keychain::Identifier, - fees: u64, - height: u64, - ) -> Result<(Output, TxKernel), keychain::Error> { - let value = reward(fees); - let commit = keychain.commit(value, key_id)?; - let msg = ProofMessageElements::new(value, key_id); - - trace!(LOGGER, "Block reward - Pedersen Commit is: {:?}", commit,); - - let rproof = keychain.range_proof(value, key_id, commit, None, msg.to_proof_message())?; - - let output = Output { - features: OutputFeatures::COINBASE_OUTPUT, - commit: commit, - proof: rproof, - }; - - let secp = static_secp_instance(); - let secp = secp.lock().unwrap(); - let over_commit = secp.commit_value(reward(fees))?; - let out_commit = output.commitment(); - let excess = secp.commit_sum(vec![out_commit], vec![over_commit])?; - - // NOTE: Remember we sign the fee *and* the lock_height. - // For a coinbase output the fee is 0 and the lock_height is - // the lock_height of the coinbase output itself, - // not the lock_height of the tx (there is no tx for a coinbase output). - // This output will not be spendable earlier than lock_height (and we sign this - // here). - let msg = secp::Message::from_slice(&kernel_sig_msg(0, height))?; - let sig = keychain.aggsig_sign_from_key_id(&msg, &key_id)?; - - let proof = TxKernel { - features: KernelFeatures::COINBASE_KERNEL, - excess: excess, - excess_sig: sig, - fee: 0, - // lock_height here is the height of the block (tx should be valid immediately) - // *not* the lock_height of the coinbase output (only spendable 1,000 blocks later) - lock_height: height, - }; - Ok((output, proof)) - } -} - -#[cfg(test)] -mod test { - use std::time::Instant; - - use super::*; - use core::Transaction; - use core::build::{self, input, output, with_fee}; - use core::test::{tx1i2o, tx2i1o}; - use keychain::{Identifier, Keychain}; - use consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT}; - use util::{secp, secp_static}; - - // utility to create a block without worrying about the key or previous - // header - fn new_block( - txs: Vec<&Transaction>, - keychain: &Keychain, - previous_header: &BlockHeader, - ) -> Block { - let key_id = keychain.derive_key_id(1).unwrap(); - Block::new(&previous_header, txs, keychain, &key_id, Difficulty::one()).unwrap() - } - - // utility producing a transaction that spends an output with the provided - // value and blinding key - fn txspend1i1o( - v: u64, - keychain: &Keychain, - key_id1: Identifier, - key_id2: Identifier, - ) -> Transaction { - build::transaction( - vec![input(v, key_id1), output(3, key_id2), with_fee(2)], - &keychain, - ).unwrap() - } - - // Too slow for now #[test] - // TODO: make this fast enough or add similar but faster test? - #[allow(dead_code)] - fn too_large_block() { - let keychain = Keychain::from_random_seed().unwrap(); - let max_out = MAX_BLOCK_WEIGHT / BLOCK_OUTPUT_WEIGHT; - - let zero_commit = secp_static::commit_to_zero_value(); - - let mut pks = vec![]; - for n in 0..(max_out + 1) { - pks.push(keychain.derive_key_id(n as u32).unwrap()); - } - - let mut parts = vec![]; - for _ in 0..max_out { - parts.push(output(5, pks.pop().unwrap())); - } - - let now = Instant::now(); - parts.append(&mut vec![input(500000, pks.pop().unwrap()), with_fee(2)]); - let mut tx = build::transaction(parts, &keychain).unwrap(); - println!("Build tx: {}", now.elapsed().as_secs()); - - let prev = BlockHeader::default(); - let b = new_block(vec![&mut tx], &keychain, &prev); - assert!(b.validate(&zero_commit, &zero_commit).is_err()); - } - - #[test] - // block with no inputs/outputs/kernels - // no fees, no reward, no coinbase - fn very_empty_block() { - let b = Block { - header: BlockHeader::default(), - inputs: vec![], - outputs: vec![], - kernels: vec![], - }; - - assert_eq!( - b.verify_coinbase(), - Err(Error::Secp(secp::Error::IncorrectCommitSum)) - ); - } - - #[test] - // builds a block with a tx spending another and check that cut_through occurred - fn block_with_cut_through() { - let keychain = Keychain::from_random_seed().unwrap(); - let key_id1 = keychain.derive_key_id(1).unwrap(); - let key_id2 = keychain.derive_key_id(2).unwrap(); - let key_id3 = keychain.derive_key_id(3).unwrap(); - - let zero_commit = secp_static::commit_to_zero_value(); - - let mut btx1 = tx2i1o(); - let mut btx2 = build::transaction( - vec![input(7, key_id1), output(5, key_id2.clone()), with_fee(2)], - &keychain, - ).unwrap(); - - // spending tx2 - reuse key_id2 - - let mut btx3 = txspend1i1o(5, &keychain, key_id2.clone(), key_id3); - let prev = BlockHeader::default(); - let b = new_block(vec![&mut btx1, &mut btx2, &mut btx3], &keychain, &prev); - - // block should have been automatically compacted (including reward - // output) and should still be valid - b.validate(&zero_commit, &zero_commit).unwrap(); - assert_eq!(b.inputs.len(), 3); - assert_eq!(b.outputs.len(), 3); - } - - #[test] - fn empty_block_with_coinbase_is_valid() { - let keychain = Keychain::from_random_seed().unwrap(); - let zero_commit = secp_static::commit_to_zero_value(); - let prev = BlockHeader::default(); - let b = new_block(vec![], &keychain, &prev); - - assert_eq!(b.inputs.len(), 0); - assert_eq!(b.outputs.len(), 1); - assert_eq!(b.kernels.len(), 1); - - let coinbase_outputs = b.outputs - .iter() - .filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT)) - .map(|o| o.clone()) - .collect::>(); - assert_eq!(coinbase_outputs.len(), 1); - - let coinbase_kernels = b.kernels - .iter() - .filter(|out| out.features.contains(KernelFeatures::COINBASE_KERNEL)) - .map(|o| o.clone()) - .collect::>(); - assert_eq!(coinbase_kernels.len(), 1); - - // the block should be valid here (single coinbase output with corresponding - // txn kernel) - assert!(b.validate(&zero_commit, &zero_commit).is_ok()); - } - - #[test] - // test that flipping the COINBASE_OUTPUT flag on the output features - // invalidates the block and specifically it causes verify_coinbase to fail - // additionally verifying the merkle_inputs_outputs also fails - fn remove_coinbase_output_flag() { - let keychain = Keychain::from_random_seed().unwrap(); - let zero_commit = secp_static::commit_to_zero_value(); - let prev = BlockHeader::default(); - let mut b = new_block(vec![], &keychain, &prev); - - assert!( - b.outputs[0] - .features - .contains(OutputFeatures::COINBASE_OUTPUT) - ); - b.outputs[0] - .features - .remove(OutputFeatures::COINBASE_OUTPUT); - - assert_eq!(b.verify_coinbase(), Err(Error::CoinbaseSumMismatch)); - assert!(b.verify_sums(&zero_commit, &zero_commit).is_ok()); - assert_eq!( - b.validate(&zero_commit, &zero_commit), - Err(Error::CoinbaseSumMismatch) - ); - } - - #[test] - // test that flipping the COINBASE_KERNEL flag on the kernel features - // invalidates the block and specifically it causes verify_coinbase to fail - fn remove_coinbase_kernel_flag() { - let keychain = Keychain::from_random_seed().unwrap(); - let zero_commit = secp_static::commit_to_zero_value(); - let prev = BlockHeader::default(); - let mut b = new_block(vec![], &keychain, &prev); - - assert!( - b.kernels[0] - .features - .contains(KernelFeatures::COINBASE_KERNEL) - ); - b.kernels[0] - .features - .remove(KernelFeatures::COINBASE_KERNEL); - - assert_eq!( - b.verify_coinbase(), - Err(Error::Secp(secp::Error::IncorrectCommitSum)) - ); - - assert_eq!( - b.validate(&zero_commit, &zero_commit), - Err(Error::Secp(secp::Error::IncorrectCommitSum)) - ); - } - - #[test] - fn serialize_deserialize_block() { - let keychain = Keychain::from_random_seed().unwrap(); - let prev = BlockHeader::default(); - let b = new_block(vec![], &keychain, &prev); - - let mut vec = Vec::new(); - ser::serialize(&mut vec, &b).expect("serialization failed"); - let b2: Block = ser::deserialize(&mut &vec[..]).unwrap(); - - assert_eq!(b.header, b2.header); - assert_eq!(b.inputs, b2.inputs); - assert_eq!(b.outputs, b2.outputs); - assert_eq!(b.kernels, b2.kernels); - } - - #[test] - fn empty_block_serialized_size() { - let keychain = Keychain::from_random_seed().unwrap(); - let prev = BlockHeader::default(); - let b = new_block(vec![], &keychain, &prev); - let mut vec = Vec::new(); - ser::serialize(&mut vec, &b).expect("serialization failed"); - let target_len = 1_216; - assert_eq!(vec.len(), target_len,); - } - - #[test] - fn block_single_tx_serialized_size() { - let keychain = Keychain::from_random_seed().unwrap(); - let tx1 = tx1i2o(); - let prev = BlockHeader::default(); - let b = new_block(vec![&tx1], &keychain, &prev); - let mut vec = Vec::new(); - ser::serialize(&mut vec, &b).expect("serialization failed"); - let target_len = 2_796; - assert_eq!(vec.len(), target_len); - } - - #[test] - fn empty_compact_block_serialized_size() { - let keychain = Keychain::from_random_seed().unwrap(); - let prev = BlockHeader::default(); - let b = new_block(vec![], &keychain, &prev); - let mut vec = Vec::new(); - ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed"); - let target_len = 1_224; - assert_eq!(vec.len(), target_len,); - } - - #[test] - fn compact_block_single_tx_serialized_size() { - let keychain = Keychain::from_random_seed().unwrap(); - let tx1 = tx1i2o(); - let prev = BlockHeader::default(); - let b = new_block(vec![&tx1], &keychain, &prev); - let mut vec = Vec::new(); - ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed"); - let target_len = 1_230; - assert_eq!(vec.len(), target_len,); - } - - #[test] - fn block_10_tx_serialized_size() { - let keychain = Keychain::from_random_seed().unwrap(); - global::set_mining_mode(global::ChainTypes::Mainnet); - - let mut txs = vec![]; - for _ in 0..10 { - let tx = tx1i2o(); - txs.push(tx); - } - let prev = BlockHeader::default(); - let b = new_block(txs.iter().collect(), &keychain, &prev); - let mut vec = Vec::new(); - ser::serialize(&mut vec, &b).expect("serialization failed"); - let target_len = 17_016; - assert_eq!(vec.len(), target_len,); - } - - #[test] - fn compact_block_10_tx_serialized_size() { - let keychain = Keychain::from_random_seed().unwrap(); - - let mut txs = vec![]; - for _ in 0..10 { - let tx = tx1i2o(); - txs.push(tx); - } - let prev = BlockHeader::default(); - let b = new_block(txs.iter().collect(), &keychain, &prev); - let mut vec = Vec::new(); - ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed"); - let target_len = 1_284; - assert_eq!(vec.len(), target_len,); - } - - #[test] - fn compact_block_hash_with_nonce() { - let keychain = Keychain::from_random_seed().unwrap(); - let tx = tx1i2o(); - let prev = BlockHeader::default(); - let b = new_block(vec![&tx], &keychain, &prev); - let cb1 = b.as_compact_block(); - let cb2 = b.as_compact_block(); - - // random nonce will not affect the hash of the compact block itself - // hash is based on header POW only - assert!(cb1.nonce != cb2.nonce); - assert_eq!(b.hash(), cb1.hash()); - assert_eq!(cb1.hash(), cb2.hash()); - - assert!(cb1.kern_ids[0] != cb2.kern_ids[0]); - - // check we can identify the specified kernel from the short_id - // correctly in both of the compact_blocks - assert_eq!( - cb1.kern_ids[0], - tx.kernels[0].short_id(&cb1.hash(), cb1.nonce) - ); - assert_eq!( - cb2.kern_ids[0], - tx.kernels[0].short_id(&cb2.hash(), cb2.nonce) - ); - } - - #[test] - fn convert_block_to_compact_block() { - let keychain = Keychain::from_random_seed().unwrap(); - let tx1 = tx1i2o(); - let prev = BlockHeader::default(); - let b = new_block(vec![&tx1], &keychain, &prev); - let cb = b.as_compact_block(); - - assert_eq!(cb.out_full.len(), 1); - assert_eq!(cb.kern_full.len(), 1); - assert_eq!(cb.kern_ids.len(), 1); - - assert_eq!( - cb.kern_ids[0], - b.kernels - .iter() - .find(|x| !x.features.contains(KernelFeatures::COINBASE_KERNEL)) - .unwrap() - .short_id(&cb.hash(), cb.nonce) - ); - } - - #[test] - fn hydrate_empty_compact_block() { - let keychain = Keychain::from_random_seed().unwrap(); - let prev = BlockHeader::default(); - let b = new_block(vec![], &keychain, &prev); - let cb = b.as_compact_block(); - let hb = Block::hydrate_from(cb, vec![]); - assert_eq!(hb.header, b.header); - assert_eq!(hb.outputs, b.outputs); - assert_eq!(hb.kernels, b.kernels); - } - - #[test] - fn serialize_deserialize_compact_block() { - let b = CompactBlock { - header: BlockHeader::default(), - nonce: 0, - out_full: vec![], - kern_full: vec![], - kern_ids: vec![ShortId::zero()], - }; - - let mut vec = Vec::new(); - ser::serialize(&mut vec, &b).expect("serialization failed"); - let b2: CompactBlock = ser::deserialize(&mut &vec[..]).unwrap(); - - assert_eq!(b.header, b2.header); - assert_eq!(b.kern_ids, b2.kern_ids); - } } diff --git a/core/src/core/mod.rs b/core/src/core/mod.rs index d663c694e..5467b82e7 100644 --- a/core/src/core/mod.rs +++ b/core/src/core/mod.rs @@ -15,13 +15,11 @@ //! Core types pub mod block; -pub mod build; pub mod hash; pub mod id; pub mod pmmr; pub mod target; pub mod transaction; -// pub mod txoset; #[allow(dead_code)] use rand::{thread_rng, Rng}; @@ -274,14 +272,6 @@ pub fn amount_to_hr_string(amount: u64) -> String { #[cfg(test)] mod test { use super::*; - use core::target::Difficulty; - use core::hash::ZERO_HASH; - use core::build::{initial_tx, input, output, with_excess, with_fee, with_lock_height}; - use core::block::Error::KernelLockHeight; - use ser; - use keychain; - use keychain::Keychain; - use util::secp_static; #[test] pub fn test_amount_to_hr() { @@ -304,550 +294,4 @@ mod test { assert!("5000000000.000000000" == amount_to_hr_string(5_000_000_000_000_000_000)); } - #[test] - #[should_panic(expected = "InvalidSecretKey")] - fn test_zero_commit_fails() { - let keychain = Keychain::from_random_seed().unwrap(); - let key_id1 = keychain.derive_key_id(1).unwrap(); - - // blinding should fail as signing with a zero r*G shouldn't work - build::transaction( - vec![ - input(10, key_id1.clone()), - output(9, key_id1.clone()), - with_fee(1), - ], - &keychain, - ).unwrap(); - } - - #[test] - fn simple_tx_ser() { - let tx = tx2i1o(); - let mut vec = Vec::new(); - ser::serialize(&mut vec, &tx).expect("serialization failed"); - let target_len = 954; - assert_eq!(vec.len(), target_len,); - } - - #[test] - fn simple_tx_ser_deser() { - let tx = tx2i1o(); - let mut vec = Vec::new(); - ser::serialize(&mut vec, &tx).expect("serialization failed"); - let dtx: Transaction = ser::deserialize(&mut &vec[..]).unwrap(); - assert_eq!(dtx.fee(), 2); - assert_eq!(dtx.inputs.len(), 2); - assert_eq!(dtx.outputs.len(), 1); - assert_eq!(tx.hash(), dtx.hash()); - } - - #[test] - fn tx_double_ser_deser() { - // checks serializing doesn't mess up the tx and produces consistent results - let btx = tx2i1o(); - - let mut vec = Vec::new(); - assert!(ser::serialize(&mut vec, &btx).is_ok()); - let dtx: Transaction = ser::deserialize(&mut &vec[..]).unwrap(); - - let mut vec2 = Vec::new(); - assert!(ser::serialize(&mut vec2, &btx).is_ok()); - let dtx2: Transaction = ser::deserialize(&mut &vec2[..]).unwrap(); - - assert_eq!(btx.hash(), dtx.hash()); - assert_eq!(dtx.hash(), dtx2.hash()); - } - - #[test] - fn build_tx_kernel() { - let keychain = Keychain::from_random_seed().unwrap(); - let key_id1 = keychain.derive_key_id(1).unwrap(); - let key_id2 = keychain.derive_key_id(2).unwrap(); - let key_id3 = keychain.derive_key_id(3).unwrap(); - - // first build a valid tx with corresponding blinding factor - let tx = build::transaction( - vec![ - input(10, key_id1), - output(5, key_id2), - output(3, key_id3), - with_fee(2), - ], - &keychain, - ).unwrap(); - - // check the tx is valid - tx.validate().unwrap(); - - // check the kernel is also itself valid - assert_eq!(tx.kernels.len(), 1); - let kern = &tx.kernels[0]; - kern.verify().unwrap(); - - assert_eq!(kern.features, KernelFeatures::DEFAULT_KERNEL); - assert_eq!(kern.fee, tx.fee()); - } - - // Combine two transactions into one big transaction (with multiple kernels) - // and check it still validates. - #[test] - fn transaction_cut_through() { - let tx1 = tx1i2o(); - let tx2 = tx2i1o(); - - assert!(tx1.validate().is_ok()); - assert!(tx2.validate().is_ok()); - - // now build a "cut_through" tx from tx1 and tx2 - let tx3 = aggregate_with_cut_through(vec![tx1, tx2]).unwrap(); - - assert!(tx3.validate().is_ok()); - } - - // Attempt to deaggregate a multi-kernel transaction in a different way - #[test] - fn multi_kernel_transaction_deaggregation() { - let tx1 = tx1i1o(); - let tx2 = tx1i1o(); - let tx3 = tx1i1o(); - let tx4 = tx1i1o(); - - assert!(tx1.validate().is_ok()); - assert!(tx2.validate().is_ok()); - assert!(tx3.validate().is_ok()); - assert!(tx4.validate().is_ok()); - - let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap(); - let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); - let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap(); - - assert!(tx1234.validate().is_ok()); - assert!(tx12.validate().is_ok()); - assert!(tx34.validate().is_ok()); - - let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap(); - assert!(deaggregated_tx34.validate().is_ok()); - assert_eq!(tx34, deaggregated_tx34); - - let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap(); - - assert!(deaggregated_tx12.validate().is_ok()); - assert_eq!(tx12, deaggregated_tx12); - } - - #[test] - fn multi_kernel_transaction_deaggregation_2() { - let tx1 = tx1i1o(); - let tx2 = tx1i1o(); - let tx3 = tx1i1o(); - - assert!(tx1.validate().is_ok()); - assert!(tx2.validate().is_ok()); - assert!(tx3.validate().is_ok()); - - let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap(); - let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); - - assert!(tx123.validate().is_ok()); - assert!(tx12.validate().is_ok()); - - let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap(); - assert!(deaggregated_tx3.validate().is_ok()); - assert_eq!(tx3, deaggregated_tx3); - } - - #[test] - fn multi_kernel_transaction_deaggregation_3() { - let tx1 = tx1i1o(); - let tx2 = tx1i1o(); - let tx3 = tx1i1o(); - - assert!(tx1.validate().is_ok()); - assert!(tx2.validate().is_ok()); - assert!(tx3.validate().is_ok()); - - let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap(); - let tx13 = aggregate(vec![tx1.clone(), tx3.clone()]).unwrap(); - let tx2 = aggregate(vec![tx2.clone()]).unwrap(); - - assert!(tx123.validate().is_ok()); - assert!(tx2.validate().is_ok()); - - let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap(); - assert!(deaggregated_tx13.validate().is_ok()); - assert_eq!(tx13, deaggregated_tx13); - } - - #[test] - fn multi_kernel_transaction_deaggregation_4() { - let tx1 = tx1i1o(); - let tx2 = tx1i1o(); - let tx3 = tx1i1o(); - let tx4 = tx1i1o(); - let tx5 = tx1i1o(); - - assert!(tx1.validate().is_ok()); - assert!(tx2.validate().is_ok()); - assert!(tx3.validate().is_ok()); - assert!(tx4.validate().is_ok()); - assert!(tx5.validate().is_ok()); - - let tx12345 = aggregate(vec![ - tx1.clone(), - tx2.clone(), - tx3.clone(), - tx4.clone(), - tx5.clone(), - ]).unwrap(); - assert!(tx12345.validate().is_ok()); - - let deaggregated_tx5 = deaggregate( - tx12345.clone(), - vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()], - ).unwrap(); - assert!(deaggregated_tx5.validate().is_ok()); - assert_eq!(tx5, deaggregated_tx5); - } - - #[test] - fn multi_kernel_transaction_deaggregation_5() { - let tx1 = tx1i1o(); - let tx2 = tx1i1o(); - let tx3 = tx1i1o(); - let tx4 = tx1i1o(); - let tx5 = tx1i1o(); - - assert!(tx1.validate().is_ok()); - assert!(tx2.validate().is_ok()); - assert!(tx3.validate().is_ok()); - assert!(tx4.validate().is_ok()); - assert!(tx5.validate().is_ok()); - - let tx12345 = aggregate(vec![ - tx1.clone(), - tx2.clone(), - tx3.clone(), - tx4.clone(), - tx5.clone(), - ]).unwrap(); - let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); - let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap(); - - assert!(tx12345.validate().is_ok()); - - let deaggregated_tx5 = - deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap(); - assert!(deaggregated_tx5.validate().is_ok()); - assert_eq!(tx5, deaggregated_tx5); - } - - // Attempt to deaggregate a multi-kernel transaction - #[test] - fn basic_transaction_deaggregation() { - let tx1 = tx1i2o(); - let tx2 = tx2i1o(); - - assert!(tx1.validate().is_ok()); - assert!(tx2.validate().is_ok()); - - // now build a "cut_through" tx from tx1 and tx2 - let tx3 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); - - assert!(tx3.validate().is_ok()); - - let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap(); - - assert!(deaggregated_tx1.validate().is_ok()); - assert_eq!(tx1, deaggregated_tx1); - - let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap(); - - assert!(deaggregated_tx2.validate().is_ok()); - assert_eq!(tx2, deaggregated_tx2); - } - - #[test] - fn hash_output() { - let keychain = Keychain::from_random_seed().unwrap(); - let key_id1 = keychain.derive_key_id(1).unwrap(); - let key_id2 = keychain.derive_key_id(2).unwrap(); - let key_id3 = keychain.derive_key_id(3).unwrap(); - - let tx = build::transaction( - vec![ - input(75, key_id1), - output(42, key_id2), - output(32, key_id3), - with_fee(1), - ], - &keychain, - ).unwrap(); - let h = tx.outputs[0].hash(); - assert!(h != ZERO_HASH); - let h2 = tx.outputs[1].hash(); - assert!(h != h2); - } - - #[ignore] - #[test] - fn blind_tx() { - let btx = tx2i1o(); - assert!(btx.validate().is_ok()); - - // Ignored for bullet proofs, because calling range_proof_info - // with a bullet proof causes painful errors - - // checks that the range proof on our blind output is sufficiently hiding - let Output { proof, .. } = btx.outputs[0]; - - let secp = static_secp_instance(); - let secp = secp.lock().unwrap(); - let info = secp.range_proof_info(proof); - - assert!(info.min == 0); - assert!(info.max == u64::max_value()); - } - - #[test] - fn tx_hash_diff() { - let btx1 = tx2i1o(); - let btx2 = tx1i1o(); - - if btx1.hash() == btx2.hash() { - panic!("diff txs have same hash") - } - } - - /// Simulate the standard exchange between 2 parties when creating a basic - /// 2 inputs, 2 outputs transaction. - #[test] - fn tx_build_exchange() { - let keychain = Keychain::from_random_seed().unwrap(); - let key_id1 = keychain.derive_key_id(1).unwrap(); - let key_id2 = keychain.derive_key_id(2).unwrap(); - let key_id3 = keychain.derive_key_id(3).unwrap(); - let key_id4 = keychain.derive_key_id(4).unwrap(); - - let (tx_alice, blind_sum) = { - // Alice gets 2 of her pre-existing outputs to send 5 coins to Bob, they - // become inputs in the new transaction - let (in1, in2) = (input(4, key_id1), input(3, key_id2)); - - // Alice builds her transaction, with change, which also produces the sum - // of blinding factors before they're obscured. - let (tx, sum) = build::partial_transaction( - vec![in1, in2, output(1, key_id3), with_fee(2)], - &keychain, - ).unwrap(); - - (tx, sum) - }; - - // From now on, Bob only has the obscured transaction and the sum of - // blinding factors. He adds his output, finalizes the transaction so it's - // ready for broadcast. - let tx_final = build::transaction( - vec![ - initial_tx(tx_alice), - with_excess(blind_sum), - output(4, key_id4), - ], - &keychain, - ).unwrap(); - - tx_final.validate().unwrap(); - } - - #[test] - fn reward_empty_block() { - let keychain = keychain::Keychain::from_random_seed().unwrap(); - let key_id = keychain.derive_key_id(1).unwrap(); - - let zero_commit = secp_static::commit_to_zero_value(); - - let previous_header = BlockHeader::default(); - - let b = Block::new( - &previous_header, - vec![], - &keychain, - &key_id, - Difficulty::one(), - ).unwrap(); - b.cut_through() - .validate(&zero_commit, &zero_commit) - .unwrap(); - } - - #[test] - fn reward_with_tx_block() { - let keychain = keychain::Keychain::from_random_seed().unwrap(); - let key_id = keychain.derive_key_id(1).unwrap(); - - let zero_commit = secp_static::commit_to_zero_value(); - - let mut tx1 = tx2i1o(); - tx1.validate().unwrap(); - - let previous_header = BlockHeader::default(); - - let block = Block::new( - &previous_header, - vec![&mut tx1], - &keychain, - &key_id, - Difficulty::one(), - ).unwrap(); - block - .cut_through() - .validate(&zero_commit, &zero_commit) - .unwrap(); - } - - #[test] - fn simple_block() { - let keychain = keychain::Keychain::from_random_seed().unwrap(); - let key_id = keychain.derive_key_id(1).unwrap(); - - let zero_commit = secp_static::commit_to_zero_value(); - - let mut tx1 = tx2i1o(); - let mut tx2 = tx1i1o(); - - let previous_header = BlockHeader::default(); - - let b = Block::new( - &previous_header, - vec![&mut tx1, &mut tx2], - &keychain, - &key_id, - Difficulty::one(), - ).unwrap(); - b.validate(&zero_commit, &zero_commit).unwrap(); - } - - #[test] - fn test_block_with_timelocked_tx() { - let keychain = keychain::Keychain::from_random_seed().unwrap(); - - let key_id1 = keychain.derive_key_id(1).unwrap(); - let key_id2 = keychain.derive_key_id(2).unwrap(); - let key_id3 = keychain.derive_key_id(3).unwrap(); - - let zero_commit = secp_static::commit_to_zero_value(); - - // first check we can add a timelocked tx where lock height matches current - // block height and that the resulting block is valid - let tx1 = build::transaction( - vec![ - input(5, key_id1.clone()), - output(3, key_id2.clone()), - with_fee(2), - with_lock_height(1), - ], - &keychain, - ).unwrap(); - - let previous_header = BlockHeader::default(); - - let b = Block::new( - &previous_header, - vec![&tx1], - &keychain, - &key_id3.clone(), - Difficulty::one(), - ).unwrap(); - b.validate(&zero_commit, &zero_commit).unwrap(); - - // now try adding a timelocked tx where lock height is greater than current - // block height - let tx1 = build::transaction( - vec![ - input(5, key_id1.clone()), - output(3, key_id2.clone()), - with_fee(2), - with_lock_height(2), - ], - &keychain, - ).unwrap(); - - let previous_header = BlockHeader::default(); - - let b = Block::new( - &previous_header, - vec![&tx1], - &keychain, - &key_id3.clone(), - Difficulty::one(), - ).unwrap(); - match b.validate(&zero_commit, &zero_commit) { - Err(KernelLockHeight(height)) => { - assert_eq!(height, 2); - } - _ => panic!("expecting KernelLockHeight error here"), - } - } - - #[test] - pub fn test_verify_1i1o_sig() { - let tx = tx1i1o(); - tx.validate().unwrap(); - } - - #[test] - pub fn test_verify_2i1o_sig() { - let tx = tx2i1o(); - tx.validate().unwrap(); - } - - // utility producing a transaction with 2 inputs and a single outputs - pub fn tx2i1o() -> Transaction { - let keychain = keychain::Keychain::from_random_seed().unwrap(); - let key_id1 = keychain.derive_key_id(1).unwrap(); - let key_id2 = keychain.derive_key_id(2).unwrap(); - let key_id3 = keychain.derive_key_id(3).unwrap(); - - build::transaction_with_offset( - vec![ - input(10, key_id1), - input(11, key_id2), - output(19, key_id3), - with_fee(2), - ], - &keychain, - ).unwrap() - } - - // utility producing a transaction with a single input and output - pub fn tx1i1o() -> Transaction { - let keychain = keychain::Keychain::from_random_seed().unwrap(); - let key_id1 = keychain.derive_key_id(1).unwrap(); - let key_id2 = keychain.derive_key_id(2).unwrap(); - - build::transaction_with_offset( - vec![input(5, key_id1), output(3, key_id2), with_fee(2)], - &keychain, - ).unwrap() - } - - // utility producing a transaction with a single input - // and two outputs (one change output) - // Note: this tx has an "offset" kernel - pub fn tx1i2o() -> Transaction { - let keychain = keychain::Keychain::from_random_seed().unwrap(); - let key_id1 = keychain.derive_key_id(1).unwrap(); - let key_id2 = keychain.derive_key_id(2).unwrap(); - let key_id3 = keychain.derive_key_id(3).unwrap(); - - build::transaction_with_offset( - vec![ - input(6, key_id1), - output(3, key_id2), - output(1, key_id3), - with_fee(2), - ], - &keychain, - ).unwrap() - } } diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index 2bdbb9b2d..6c575bd74 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -30,7 +30,7 @@ use core::BlockHeader; use core::hash::{Hash, Hashed, ZERO_HASH}; use core::pmmr::MerkleProof; use keychain; -use keychain::{BlindingFactor, Keychain}; +use keychain::BlindingFactor; use ser::{self, read_and_verify_sorted, ser_vec, PMMRable, Readable, Reader, Writeable, WriteableSorted, Writer}; use util; @@ -186,7 +186,15 @@ impl TxKernel { let secp = static_secp_instance(); let secp = secp.lock().unwrap(); let sig = &self.excess_sig; - let valid = Keychain::aggsig_verify_single_from_commit(&secp, &sig, &msg, &self.excess); + // Verify aggsig directly in libsecp + let pubkeys = &self.excess.to_two_pubkeys(&secp); + let mut valid = false; + for i in 0..pubkeys.len() { + valid = secp::aggsig::verify_single(&secp, &sig, &msg, None, &pubkeys[i], false); + if valid { + break; + } + } if !valid { return Err(secp::Error::IncorrectSignature); } @@ -947,7 +955,7 @@ impl Output { pub fn verify_proof(&self) -> Result<(), secp::Error> { let secp = static_secp_instance(); let secp = secp.lock().unwrap(); - match Keychain::verify_range_proof(&secp, self.commit, self.proof, None) { + match secp.verify_bullet_proof(self.commit, self.proof, None) { Ok(_) => Ok(()), Err(e) => Err(e), } @@ -1196,29 +1204,6 @@ mod test { assert_eq!(kernel2.fee, 10); } - #[test] - fn test_output_ser_deser() { - let keychain = Keychain::from_random_seed().unwrap(); - let key_id = keychain.derive_key_id(1).unwrap(); - let commit = keychain.commit(5, &key_id).unwrap(); - let msg = secp::pedersen::ProofMessage::empty(); - let proof = keychain.range_proof(5, &key_id, commit, None, msg).unwrap(); - - let out = Output { - features: OutputFeatures::DEFAULT_OUTPUT, - commit: commit, - proof: proof, - }; - - let mut vec = vec![]; - ser::serialize(&mut vec, &out).expect("serialized failed"); - let dout: Output = ser::deserialize(&mut &vec[..]).unwrap(); - - assert_eq!(dout.features, OutputFeatures::DEFAULT_OUTPUT); - assert_eq!(dout.commit, out.commit); - assert_eq!(dout.proof, out.proof); - } - #[test] fn commit_consistency() { let keychain = Keychain::from_seed(&[0; 32]).unwrap(); diff --git a/core/src/pow/mod.rs b/core/src/pow/mod.rs index f80cbd544..c8650ff88 100644 --- a/core/src/pow/mod.rs +++ b/core/src/pow/mod.rs @@ -120,7 +120,6 @@ mod test { use global; use core::target::Difficulty; use genesis; - use global::ChainTypes; /// We'll be generating genesis blocks differently #[ignore] diff --git a/core/tests/block.rs b/core/tests/block.rs new file mode 100644 index 000000000..bbdfeef5b --- /dev/null +++ b/core/tests/block.rs @@ -0,0 +1,393 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +extern crate grin_core; +extern crate grin_keychain as keychain; +extern crate grin_util as util; +extern crate grin_wallet as wallet; + +pub mod common; + +use grin_core::core::{Block, BlockHeader, CompactBlock, KernelFeatures, OutputFeatures}; +use grin_core::core::hash::Hashed; +use grin_core::core::block::Error; +use grin_core::core::id::{ShortId, ShortIdentifiable}; +use wallet::libwallet::build::{self, input, output, with_fee}; +use common::{new_block, tx1i2o, tx2i1o, txspend1i1o}; +use keychain::Keychain; +use grin_core::consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT}; +use grin_core::ser; +use grin_core::global; +use std::time::Instant; + +use util::{secp, secp_static}; + +// Too slow for now #[test] +// TODO: make this fast enough or add similar but faster test? +#[allow(dead_code)] +fn too_large_block() { + let keychain = Keychain::from_random_seed().unwrap(); + let max_out = MAX_BLOCK_WEIGHT / BLOCK_OUTPUT_WEIGHT; + + let zero_commit = secp_static::commit_to_zero_value(); + + let mut pks = vec![]; + for n in 0..(max_out + 1) { + pks.push(keychain.derive_key_id(n as u32).unwrap()); + } + + let mut parts = vec![]; + for _ in 0..max_out { + parts.push(output(5, pks.pop().unwrap())); + } + + let now = Instant::now(); + parts.append(&mut vec![input(500000, pks.pop().unwrap()), with_fee(2)]); + let mut tx = build::transaction(parts, &keychain).unwrap(); + println!("Build tx: {}", now.elapsed().as_secs()); + + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![&mut tx], &keychain, &prev, &key_id); + assert!(b.validate(&zero_commit, &zero_commit).is_err()); +} + +#[test] +// block with no inputs/outputs/kernels +// no fees, no reward, no coinbase +fn very_empty_block() { + let b = Block { + header: BlockHeader::default(), + inputs: vec![], + outputs: vec![], + kernels: vec![], + }; + + assert_eq!( + b.verify_coinbase(), + Err(Error::Secp(secp::Error::IncorrectCommitSum)) + ); +} + +#[test] +// builds a block with a tx spending another and check that cut_through occurred +fn block_with_cut_through() { + let keychain = Keychain::from_random_seed().unwrap(); + let key_id1 = keychain.derive_key_id(1).unwrap(); + let key_id2 = keychain.derive_key_id(2).unwrap(); + let key_id3 = keychain.derive_key_id(3).unwrap(); + + let zero_commit = secp_static::commit_to_zero_value(); + + let mut btx1 = tx2i1o(); + let mut btx2 = build::transaction( + vec![input(7, key_id1), output(5, key_id2.clone()), with_fee(2)], + &keychain, + ).unwrap(); + + // spending tx2 - reuse key_id2 + + let mut btx3 = txspend1i1o(5, &keychain, key_id2.clone(), key_id3); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block( + vec![&mut btx1, &mut btx2, &mut btx3], + &keychain, + &prev, + &key_id, + ); + + // block should have been automatically compacted (including reward + // output) and should still be valid + b.validate(&zero_commit, &zero_commit).unwrap(); + assert_eq!(b.inputs.len(), 3); + assert_eq!(b.outputs.len(), 3); +} + +#[test] +fn empty_block_with_coinbase_is_valid() { + let keychain = Keychain::from_random_seed().unwrap(); + let zero_commit = secp_static::commit_to_zero_value(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![], &keychain, &prev, &key_id); + + assert_eq!(b.inputs.len(), 0); + assert_eq!(b.outputs.len(), 1); + assert_eq!(b.kernels.len(), 1); + + let coinbase_outputs = b.outputs + .iter() + .filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT)) + .map(|o| o.clone()) + .collect::>(); + assert_eq!(coinbase_outputs.len(), 1); + + let coinbase_kernels = b.kernels + .iter() + .filter(|out| out.features.contains(KernelFeatures::COINBASE_KERNEL)) + .map(|o| o.clone()) + .collect::>(); + assert_eq!(coinbase_kernels.len(), 1); + + // the block should be valid here (single coinbase output with corresponding + // txn kernel) + assert!(b.validate(&zero_commit, &zero_commit).is_ok()); +} + +#[test] +// test that flipping the COINBASE_OUTPUT flag on the output features +// invalidates the block and specifically it causes verify_coinbase to fail +// additionally verifying the merkle_inputs_outputs also fails +fn remove_coinbase_output_flag() { + let keychain = Keychain::from_random_seed().unwrap(); + let zero_commit = secp_static::commit_to_zero_value(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let mut b = new_block(vec![], &keychain, &prev, &key_id); + + assert!( + b.outputs[0] + .features + .contains(OutputFeatures::COINBASE_OUTPUT) + ); + b.outputs[0] + .features + .remove(OutputFeatures::COINBASE_OUTPUT); + + assert_eq!(b.verify_coinbase(), Err(Error::CoinbaseSumMismatch)); + assert!(b.verify_sums(&zero_commit, &zero_commit).is_ok()); + assert_eq!( + b.validate(&zero_commit, &zero_commit), + Err(Error::CoinbaseSumMismatch) + ); +} + +#[test] +// test that flipping the COINBASE_KERNEL flag on the kernel features +// invalidates the block and specifically it causes verify_coinbase to fail +fn remove_coinbase_kernel_flag() { + let keychain = Keychain::from_random_seed().unwrap(); + let zero_commit = secp_static::commit_to_zero_value(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let mut b = new_block(vec![], &keychain, &prev, &key_id); + + assert!( + b.kernels[0] + .features + .contains(KernelFeatures::COINBASE_KERNEL) + ); + b.kernels[0] + .features + .remove(KernelFeatures::COINBASE_KERNEL); + + assert_eq!( + b.verify_coinbase(), + Err(Error::Secp(secp::Error::IncorrectCommitSum)) + ); + + assert_eq!( + b.validate(&zero_commit, &zero_commit), + Err(Error::Secp(secp::Error::IncorrectCommitSum)) + ); +} + +#[test] +fn serialize_deserialize_block() { + let keychain = Keychain::from_random_seed().unwrap(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![], &keychain, &prev, &key_id); + + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b).expect("serialization failed"); + let b2: Block = ser::deserialize(&mut &vec[..]).unwrap(); + + assert_eq!(b.header, b2.header); + assert_eq!(b.inputs, b2.inputs); + assert_eq!(b.outputs, b2.outputs); + assert_eq!(b.kernels, b2.kernels); +} + +#[test] +fn empty_block_serialized_size() { + let keychain = Keychain::from_random_seed().unwrap(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![], &keychain, &prev, &key_id); + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b).expect("serialization failed"); + let target_len = 1_216; + assert_eq!(vec.len(), target_len,); +} + +#[test] +fn block_single_tx_serialized_size() { + let keychain = Keychain::from_random_seed().unwrap(); + let tx1 = tx1i2o(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![&tx1], &keychain, &prev, &key_id); + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b).expect("serialization failed"); + let target_len = 2_796; + assert_eq!(vec.len(), target_len); +} + +#[test] +fn empty_compact_block_serialized_size() { + let keychain = Keychain::from_random_seed().unwrap(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![], &keychain, &prev, &key_id); + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed"); + let target_len = 1_224; + assert_eq!(vec.len(), target_len,); +} + +#[test] +fn compact_block_single_tx_serialized_size() { + let keychain = Keychain::from_random_seed().unwrap(); + let tx1 = tx1i2o(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![&tx1], &keychain, &prev, &key_id); + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed"); + let target_len = 1_230; + assert_eq!(vec.len(), target_len,); +} + +#[test] +fn block_10_tx_serialized_size() { + let keychain = Keychain::from_random_seed().unwrap(); + global::set_mining_mode(global::ChainTypes::Mainnet); + + let mut txs = vec![]; + for _ in 0..10 { + let tx = tx1i2o(); + txs.push(tx); + } + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(txs.iter().collect(), &keychain, &prev, &key_id); + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b).expect("serialization failed"); + let target_len = 17_016; + assert_eq!(vec.len(), target_len,); +} + +#[test] +fn compact_block_10_tx_serialized_size() { + let keychain = Keychain::from_random_seed().unwrap(); + + let mut txs = vec![]; + for _ in 0..10 { + let tx = tx1i2o(); + txs.push(tx); + } + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(txs.iter().collect(), &keychain, &prev, &key_id); + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed"); + let target_len = 1_284; + assert_eq!(vec.len(), target_len,); +} + +#[test] +fn compact_block_hash_with_nonce() { + let keychain = Keychain::from_random_seed().unwrap(); + let tx = tx1i2o(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![&tx], &keychain, &prev, &key_id); + let cb1 = b.as_compact_block(); + let cb2 = b.as_compact_block(); + + // random nonce will not affect the hash of the compact block itself + // hash is based on header POW only + assert!(cb1.nonce != cb2.nonce); + assert_eq!(b.hash(), cb1.hash()); + assert_eq!(cb1.hash(), cb2.hash()); + + assert!(cb1.kern_ids[0] != cb2.kern_ids[0]); + + // check we can identify the specified kernel from the short_id + // correctly in both of the compact_blocks + assert_eq!( + cb1.kern_ids[0], + tx.kernels[0].short_id(&cb1.hash(), cb1.nonce) + ); + assert_eq!( + cb2.kern_ids[0], + tx.kernels[0].short_id(&cb2.hash(), cb2.nonce) + ); +} + +#[test] +fn convert_block_to_compact_block() { + let keychain = Keychain::from_random_seed().unwrap(); + let tx1 = tx1i2o(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![&tx1], &keychain, &prev, &key_id); + let cb = b.as_compact_block(); + + assert_eq!(cb.out_full.len(), 1); + assert_eq!(cb.kern_full.len(), 1); + assert_eq!(cb.kern_ids.len(), 1); + + assert_eq!( + cb.kern_ids[0], + b.kernels + .iter() + .find(|x| !x.features.contains(KernelFeatures::COINBASE_KERNEL)) + .unwrap() + .short_id(&cb.hash(), cb.nonce) + ); +} + +#[test] +fn hydrate_empty_compact_block() { + let keychain = Keychain::from_random_seed().unwrap(); + let prev = BlockHeader::default(); + let key_id = keychain.derive_key_id(1).unwrap(); + let b = new_block(vec![], &keychain, &prev, &key_id); + let cb = b.as_compact_block(); + let hb = Block::hydrate_from(cb, vec![]); + assert_eq!(hb.header, b.header); + assert_eq!(hb.outputs, b.outputs); + assert_eq!(hb.kernels, b.kernels); +} + +#[test] +fn serialize_deserialize_compact_block() { + let b = CompactBlock { + header: BlockHeader::default(), + nonce: 0, + out_full: vec![], + kern_full: vec![], + kern_ids: vec![ShortId::zero()], + }; + + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b).expect("serialization failed"); + let b2: CompactBlock = ser::deserialize(&mut &vec[..]).unwrap(); + + assert_eq!(b.header, b2.header); + assert_eq!(b.kern_ids, b2.kern_ids); +} diff --git a/core/tests/common/mod.rs b/core/tests/common/mod.rs new file mode 100644 index 000000000..87b958097 --- /dev/null +++ b/core/tests/common/mod.rs @@ -0,0 +1,104 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Common test functions + +extern crate grin_core; +extern crate grin_keychain as keychain; +extern crate grin_util as util; +extern crate grin_wallet as wallet; + +use grin_core::core::block::{Block, BlockHeader}; +use grin_core::core::target::Difficulty; +use grin_core::core::Transaction; +use keychain::{Identifier, Keychain}; +use wallet::libwallet::build::{self, input, output, with_fee}; +use wallet::libwallet::reward; + +// utility producing a transaction with 2 inputs and a single outputs +pub fn tx2i1o() -> Transaction { + let keychain = keychain::Keychain::from_random_seed().unwrap(); + let key_id1 = keychain.derive_key_id(1).unwrap(); + let key_id2 = keychain.derive_key_id(2).unwrap(); + let key_id3 = keychain.derive_key_id(3).unwrap(); + + build::transaction_with_offset( + vec![ + input(10, key_id1), + input(11, key_id2), + output(19, key_id3), + with_fee(2), + ], + &keychain, + ).unwrap() +} + +// utility producing a transaction with a single input and output +pub fn tx1i1o() -> Transaction { + let keychain = keychain::Keychain::from_random_seed().unwrap(); + let key_id1 = keychain.derive_key_id(1).unwrap(); + let key_id2 = keychain.derive_key_id(2).unwrap(); + + build::transaction_with_offset( + vec![input(5, key_id1), output(3, key_id2), with_fee(2)], + &keychain, + ).unwrap() +} + +// utility producing a transaction with a single input +// and two outputs (one change output) +// Note: this tx has an "offset" kernel +pub fn tx1i2o() -> Transaction { + let keychain = keychain::Keychain::from_random_seed().unwrap(); + let key_id1 = keychain.derive_key_id(1).unwrap(); + let key_id2 = keychain.derive_key_id(2).unwrap(); + let key_id3 = keychain.derive_key_id(3).unwrap(); + + build::transaction_with_offset( + vec![ + input(6, key_id1), + output(3, key_id2), + output(1, key_id3), + with_fee(2), + ], + &keychain, + ).unwrap() +} + +// utility to create a block without worrying about the key or previous +// header +pub fn new_block( + txs: Vec<&Transaction>, + keychain: &Keychain, + previous_header: &BlockHeader, + key_id: &Identifier, +) -> Block { + let fees = txs.iter().map(|tx| tx.fee()).sum(); + let reward_output = reward::output(keychain, &key_id, fees, previous_header.height).unwrap(); + Block::new(&previous_header, txs, Difficulty::one(), reward_output).unwrap() +} + +// utility producing a transaction that spends an output with the provided +// value and blinding key +pub fn txspend1i1o( + v: u64, + keychain: &Keychain, + key_id1: Identifier, + key_id2: Identifier, +) -> Transaction { + build::transaction( + vec![input(v, key_id1), output(3, key_id2), with_fee(2)], + &keychain, + ).unwrap() +} diff --git a/core/tests/core.rs b/core/tests/core.rs new file mode 100644 index 000000000..055a212b9 --- /dev/null +++ b/core/tests/core.rs @@ -0,0 +1,504 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Core tests +extern crate grin_core; +extern crate grin_keychain as keychain; +extern crate grin_util as util; +extern crate grin_wallet as wallet; + +pub mod common; + +use grin_core::core::hash::{Hashed, ZERO_HASH}; +use grin_core::core::block::BlockHeader; +use grin_core::core::{aggregate, aggregate_with_cut_through, deaggregate, KernelFeatures, Output, + Transaction}; +use wallet::libwallet::build::{self, initial_tx, input, output, with_excess, with_fee, + with_lock_height}; +use grin_core::core::block::Error::KernelLockHeight; +use grin_core::ser; +use keychain::Keychain; +use util::{secp_static, static_secp_instance}; +use common::{new_block, tx1i1o, tx1i2o, tx2i1o}; + +#[test] +fn simple_tx_ser() { + let tx = tx2i1o(); + let mut vec = Vec::new(); + ser::serialize(&mut vec, &tx).expect("serialization failed"); + let target_len = 954; + assert_eq!(vec.len(), target_len,); +} + +#[test] +fn simple_tx_ser_deser() { + let tx = tx2i1o(); + let mut vec = Vec::new(); + ser::serialize(&mut vec, &tx).expect("serialization failed"); + let dtx: Transaction = ser::deserialize(&mut &vec[..]).unwrap(); + assert_eq!(dtx.fee(), 2); + assert_eq!(dtx.inputs.len(), 2); + assert_eq!(dtx.outputs.len(), 1); + assert_eq!(tx.hash(), dtx.hash()); +} + +#[test] +fn tx_double_ser_deser() { + // checks serializing doesn't mess up the tx and produces consistent results + let btx = tx2i1o(); + + let mut vec = Vec::new(); + assert!(ser::serialize(&mut vec, &btx).is_ok()); + let dtx: Transaction = ser::deserialize(&mut &vec[..]).unwrap(); + + let mut vec2 = Vec::new(); + assert!(ser::serialize(&mut vec2, &btx).is_ok()); + let dtx2: Transaction = ser::deserialize(&mut &vec2[..]).unwrap(); + + assert_eq!(btx.hash(), dtx.hash()); + assert_eq!(dtx.hash(), dtx2.hash()); +} + +#[test] +#[should_panic(expected = "InvalidSecretKey")] +fn test_zero_commit_fails() { + let keychain = Keychain::from_random_seed().unwrap(); + let key_id1 = keychain.derive_key_id(1).unwrap(); + + // blinding should fail as signing with a zero r*G shouldn't work + build::transaction( + vec![ + input(10, key_id1.clone()), + output(9, key_id1.clone()), + with_fee(1), + ], + &keychain, + ).unwrap(); +} + +#[test] +fn build_tx_kernel() { + let keychain = Keychain::from_random_seed().unwrap(); + let key_id1 = keychain.derive_key_id(1).unwrap(); + let key_id2 = keychain.derive_key_id(2).unwrap(); + let key_id3 = keychain.derive_key_id(3).unwrap(); + + // first build a valid tx with corresponding blinding factor + let tx = build::transaction( + vec![ + input(10, key_id1), + output(5, key_id2), + output(3, key_id3), + with_fee(2), + ], + &keychain, + ).unwrap(); + + // check the tx is valid + tx.validate().unwrap(); + + // check the kernel is also itself valid + assert_eq!(tx.kernels.len(), 1); + let kern = &tx.kernels[0]; + kern.verify().unwrap(); + + assert_eq!(kern.features, KernelFeatures::DEFAULT_KERNEL); + assert_eq!(kern.fee, tx.fee()); +} + +// Combine two transactions into one big transaction (with multiple kernels) +// and check it still validates. +#[test] +fn transaction_cut_through() { + let tx1 = tx1i2o(); + let tx2 = tx2i1o(); + + assert!(tx1.validate().is_ok()); + assert!(tx2.validate().is_ok()); + + // now build a "cut_through" tx from tx1 and tx2 + let tx3 = aggregate_with_cut_through(vec![tx1, tx2]).unwrap(); + + assert!(tx3.validate().is_ok()); +} + +// Attempt to deaggregate a multi-kernel transaction in a different way +#[test] +fn multi_kernel_transaction_deaggregation() { + let tx1 = tx1i1o(); + let tx2 = tx1i1o(); + let tx3 = tx1i1o(); + let tx4 = tx1i1o(); + + assert!(tx1.validate().is_ok()); + assert!(tx2.validate().is_ok()); + assert!(tx3.validate().is_ok()); + assert!(tx4.validate().is_ok()); + + let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap(); + let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); + let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap(); + + assert!(tx1234.validate().is_ok()); + assert!(tx12.validate().is_ok()); + assert!(tx34.validate().is_ok()); + + let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap(); + assert!(deaggregated_tx34.validate().is_ok()); + assert_eq!(tx34, deaggregated_tx34); + + let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap(); + + assert!(deaggregated_tx12.validate().is_ok()); + assert_eq!(tx12, deaggregated_tx12); +} + +#[test] +fn multi_kernel_transaction_deaggregation_2() { + let tx1 = tx1i1o(); + let tx2 = tx1i1o(); + let tx3 = tx1i1o(); + + assert!(tx1.validate().is_ok()); + assert!(tx2.validate().is_ok()); + assert!(tx3.validate().is_ok()); + + let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap(); + let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); + + assert!(tx123.validate().is_ok()); + assert!(tx12.validate().is_ok()); + + let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap(); + assert!(deaggregated_tx3.validate().is_ok()); + assert_eq!(tx3, deaggregated_tx3); +} + +#[test] +fn multi_kernel_transaction_deaggregation_3() { + let tx1 = tx1i1o(); + let tx2 = tx1i1o(); + let tx3 = tx1i1o(); + + assert!(tx1.validate().is_ok()); + assert!(tx2.validate().is_ok()); + assert!(tx3.validate().is_ok()); + + let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap(); + let tx13 = aggregate(vec![tx1.clone(), tx3.clone()]).unwrap(); + let tx2 = aggregate(vec![tx2.clone()]).unwrap(); + + assert!(tx123.validate().is_ok()); + assert!(tx2.validate().is_ok()); + + let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap(); + assert!(deaggregated_tx13.validate().is_ok()); + assert_eq!(tx13, deaggregated_tx13); +} + +#[test] +fn multi_kernel_transaction_deaggregation_4() { + let tx1 = tx1i1o(); + let tx2 = tx1i1o(); + let tx3 = tx1i1o(); + let tx4 = tx1i1o(); + let tx5 = tx1i1o(); + + assert!(tx1.validate().is_ok()); + assert!(tx2.validate().is_ok()); + assert!(tx3.validate().is_ok()); + assert!(tx4.validate().is_ok()); + assert!(tx5.validate().is_ok()); + + let tx12345 = aggregate(vec![ + tx1.clone(), + tx2.clone(), + tx3.clone(), + tx4.clone(), + tx5.clone(), + ]).unwrap(); + assert!(tx12345.validate().is_ok()); + + let deaggregated_tx5 = deaggregate( + tx12345.clone(), + vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()], + ).unwrap(); + assert!(deaggregated_tx5.validate().is_ok()); + assert_eq!(tx5, deaggregated_tx5); +} + +#[test] +fn multi_kernel_transaction_deaggregation_5() { + let tx1 = tx1i1o(); + let tx2 = tx1i1o(); + let tx3 = tx1i1o(); + let tx4 = tx1i1o(); + let tx5 = tx1i1o(); + + assert!(tx1.validate().is_ok()); + assert!(tx2.validate().is_ok()); + assert!(tx3.validate().is_ok()); + assert!(tx4.validate().is_ok()); + assert!(tx5.validate().is_ok()); + + let tx12345 = aggregate(vec![ + tx1.clone(), + tx2.clone(), + tx3.clone(), + tx4.clone(), + tx5.clone(), + ]).unwrap(); + let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); + let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap(); + + assert!(tx12345.validate().is_ok()); + + let deaggregated_tx5 = deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap(); + assert!(deaggregated_tx5.validate().is_ok()); + assert_eq!(tx5, deaggregated_tx5); +} + +// Attempt to deaggregate a multi-kernel transaction +#[test] +fn basic_transaction_deaggregation() { + let tx1 = tx1i2o(); + let tx2 = tx2i1o(); + + assert!(tx1.validate().is_ok()); + assert!(tx2.validate().is_ok()); + + // now build a "cut_through" tx from tx1 and tx2 + let tx3 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); + + assert!(tx3.validate().is_ok()); + + let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap(); + + assert!(deaggregated_tx1.validate().is_ok()); + assert_eq!(tx1, deaggregated_tx1); + + let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap(); + + assert!(deaggregated_tx2.validate().is_ok()); + assert_eq!(tx2, deaggregated_tx2); +} + +#[test] +fn hash_output() { + let keychain = Keychain::from_random_seed().unwrap(); + let key_id1 = keychain.derive_key_id(1).unwrap(); + let key_id2 = keychain.derive_key_id(2).unwrap(); + let key_id3 = keychain.derive_key_id(3).unwrap(); + + let tx = build::transaction( + vec![ + input(75, key_id1), + output(42, key_id2), + output(32, key_id3), + with_fee(1), + ], + &keychain, + ).unwrap(); + let h = tx.outputs[0].hash(); + assert!(h != ZERO_HASH); + let h2 = tx.outputs[1].hash(); + assert!(h != h2); +} + +#[ignore] +#[test] +fn blind_tx() { + let btx = tx2i1o(); + assert!(btx.validate().is_ok()); + + // Ignored for bullet proofs, because calling range_proof_info + // with a bullet proof causes painful errors + + // checks that the range proof on our blind output is sufficiently hiding + let Output { proof, .. } = btx.outputs[0]; + + let secp = static_secp_instance(); + let secp = secp.lock().unwrap(); + let info = secp.range_proof_info(proof); + + assert!(info.min == 0); + assert!(info.max == u64::max_value()); +} + +#[test] +fn tx_hash_diff() { + let btx1 = tx2i1o(); + let btx2 = tx1i1o(); + + if btx1.hash() == btx2.hash() { + panic!("diff txs have same hash") + } +} + +/// Simulate the standard exchange between 2 parties when creating a basic +/// 2 inputs, 2 outputs transaction. +#[test] +fn tx_build_exchange() { + let keychain = Keychain::from_random_seed().unwrap(); + let key_id1 = keychain.derive_key_id(1).unwrap(); + let key_id2 = keychain.derive_key_id(2).unwrap(); + let key_id3 = keychain.derive_key_id(3).unwrap(); + let key_id4 = keychain.derive_key_id(4).unwrap(); + + let (tx_alice, blind_sum) = { + // Alice gets 2 of her pre-existing outputs to send 5 coins to Bob, they + // become inputs in the new transaction + let (in1, in2) = (input(4, key_id1), input(3, key_id2)); + + // Alice builds her transaction, with change, which also produces the sum + // of blinding factors before they're obscured. + let (tx, sum) = + build::partial_transaction(vec![in1, in2, output(1, key_id3), with_fee(2)], &keychain) + .unwrap(); + + (tx, sum) + }; + + // From now on, Bob only has the obscured transaction and the sum of + // blinding factors. He adds his output, finalizes the transaction so it's + // ready for broadcast. + let tx_final = build::transaction( + vec![ + initial_tx(tx_alice), + with_excess(blind_sum), + output(4, key_id4), + ], + &keychain, + ).unwrap(); + + tx_final.validate().unwrap(); +} + +#[test] +fn reward_empty_block() { + let keychain = keychain::Keychain::from_random_seed().unwrap(); + let key_id = keychain.derive_key_id(1).unwrap(); + + let zero_commit = secp_static::commit_to_zero_value(); + + let previous_header = BlockHeader::default(); + + let b = new_block(vec![], &keychain, &previous_header, &key_id); + + b.cut_through() + .validate(&zero_commit, &zero_commit) + .unwrap(); +} + +#[test] +fn reward_with_tx_block() { + let keychain = keychain::Keychain::from_random_seed().unwrap(); + let key_id = keychain.derive_key_id(1).unwrap(); + + let zero_commit = secp_static::commit_to_zero_value(); + + let mut tx1 = tx2i1o(); + tx1.validate().unwrap(); + + let previous_header = BlockHeader::default(); + + let block = new_block(vec![&mut tx1], &keychain, &previous_header, &key_id); + block + .cut_through() + .validate(&zero_commit, &zero_commit) + .unwrap(); +} + +#[test] +fn simple_block() { + let keychain = keychain::Keychain::from_random_seed().unwrap(); + let key_id = keychain.derive_key_id(1).unwrap(); + + let zero_commit = secp_static::commit_to_zero_value(); + + let mut tx1 = tx2i1o(); + let mut tx2 = tx1i1o(); + + let previous_header = BlockHeader::default(); + let b = new_block( + vec![&mut tx1, &mut tx2], + &keychain, + &previous_header, + &key_id, + ); + + b.validate(&zero_commit, &zero_commit).unwrap(); +} + +#[test] +fn test_block_with_timelocked_tx() { + let keychain = keychain::Keychain::from_random_seed().unwrap(); + + let key_id1 = keychain.derive_key_id(1).unwrap(); + let key_id2 = keychain.derive_key_id(2).unwrap(); + let key_id3 = keychain.derive_key_id(3).unwrap(); + + let zero_commit = secp_static::commit_to_zero_value(); + + // first check we can add a timelocked tx where lock height matches current + // block height and that the resulting block is valid + let tx1 = build::transaction( + vec![ + input(5, key_id1.clone()), + output(3, key_id2.clone()), + with_fee(2), + with_lock_height(1), + ], + &keychain, + ).unwrap(); + + let previous_header = BlockHeader::default(); + + let b = new_block(vec![&tx1], &keychain, &previous_header, &key_id3.clone()); + b.validate(&zero_commit, &zero_commit).unwrap(); + + // now try adding a timelocked tx where lock height is greater than current + // block height + let tx1 = build::transaction( + vec![ + input(5, key_id1.clone()), + output(3, key_id2.clone()), + with_fee(2), + with_lock_height(2), + ], + &keychain, + ).unwrap(); + + let previous_header = BlockHeader::default(); + let b = new_block(vec![&tx1], &keychain, &previous_header, &key_id3.clone()); + + match b.validate(&zero_commit, &zero_commit) { + Err(KernelLockHeight(height)) => { + assert_eq!(height, 2); + } + _ => panic!("expecting KernelLockHeight error here"), + } +} + +#[test] +pub fn test_verify_1i1o_sig() { + let tx = tx1i1o(); + tx.validate().unwrap(); +} + +#[test] +pub fn test_verify_2i1o_sig() { + let tx = tx2i1o(); + tx.validate().unwrap(); +} diff --git a/core/tests/transaction.rs b/core/tests/transaction.rs new file mode 100644 index 000000000..e9cc172e6 --- /dev/null +++ b/core/tests/transaction.rs @@ -0,0 +1,50 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Transaction integration tests +extern crate grin_core; +extern crate grin_keychain as keychain; +extern crate grin_util as util; +extern crate grin_wallet as wallet; + +pub mod common; + +use grin_core::core::{Output, OutputFeatures}; +use grin_core::ser; +use keychain::Keychain; +use util::secp; +use wallet::libwallet::proof; + +#[test] +fn test_output_ser_deser() { + let keychain = Keychain::from_random_seed().unwrap(); + let key_id = keychain.derive_key_id(1).unwrap(); + let commit = keychain.commit(5, &key_id).unwrap(); + let msg = secp::pedersen::ProofMessage::empty(); + let proof = proof::create(&keychain, 5, &key_id, commit, None, msg).unwrap(); + + let out = Output { + features: OutputFeatures::DEFAULT_OUTPUT, + commit: commit, + proof: proof, + }; + + let mut vec = vec![]; + ser::serialize(&mut vec, &out).expect("serialized failed"); + let dout: Output = ser::deserialize(&mut &vec[..]).unwrap(); + + assert_eq!(dout.features, OutputFeatures::DEFAULT_OUTPUT); + assert_eq!(dout.commit, out.commit); + assert_eq!(dout.proof, out.proof); +} diff --git a/keychain/src/keychain.rs b/keychain/src/keychain.rs index eaaa4bbb9..5bf4567a3 100644 --- a/keychain/src/keychain.rs +++ b/keychain/src/keychain.rs @@ -19,13 +19,10 @@ use std::{error, fmt}; use util::secp; use util::secp::{Message, Secp256k1, Signature}; -use util::secp::key::{PublicKey, SecretKey}; -use util::secp::pedersen::{Commitment, ProofInfo, ProofMessage, RangeProof}; -use util::secp::aggsig; +use util::secp::key::SecretKey; +use util::secp::pedersen::Commitment; use util::logger::LOGGER; -use util::kernel_sig_msg; use blake2; -use uuid::Uuid; use blind::{BlindSum, BlindingFactor}; use extkey::{self, Identifier}; @@ -66,23 +63,10 @@ impl fmt::Display for Error { } } -/// Holds internal information about an aggsig operation -#[derive(Clone, Debug)] -pub struct AggSigTxContext { - // Secret key (of which public is shared) - pub sec_key: SecretKey, - // Secret nonce (of which public is shared) - // (basically a SecretKey) - pub sec_nonce: SecretKey, - // If I'm the recipient, store my outputs between invocations (that I need to sum) - pub output_ids: Vec, -} - #[derive(Clone, Debug)] pub struct Keychain { secp: Secp256k1, extkey: extkey::ExtendedKey, - pub aggsig_contexts: Arc>>>, key_overrides: HashMap, key_derivation_cache: Arc>>, } @@ -111,7 +95,6 @@ impl Keychain { let keychain = Keychain { secp: secp, extkey: extkey, - aggsig_contexts: Arc::new(RwLock::new(None)), key_overrides: HashMap::new(), key_derivation_cache: Arc::new(RwLock::new(HashMap::new())), }; @@ -130,7 +113,7 @@ impl Keychain { Ok(child_key.key_id) } - fn derived_key(&self, key_id: &Identifier) -> Result { + pub fn derived_key(&self, key_id: &Identifier) -> Result { // first check our overrides and just return the key if we have one in there if let Some(key) = self.key_overrides.get(key_id) { trace!( @@ -214,93 +197,6 @@ impl Keychain { Ok(commit) } - pub fn rangeproof_create_nonce(&self, commit: &Commitment) -> SecretKey { - // hash(commit|masterkey) as nonce - let root_key = self.root_key_id().to_bytes(); - let res = blake2::blake2b::blake2b(32, &commit.0, &root_key); - let res = res.as_bytes(); - let mut ret_val = [0; 32]; - for i in 0..res.len() { - ret_val[i] = res[i]; - } - SecretKey::from_slice(&self.secp, &ret_val).unwrap() - } - - pub fn range_proof( - &self, - amount: u64, - key_id: &Identifier, - _commit: Commitment, - extra_data: Option>, - msg: ProofMessage, - ) -> Result { - let commit = self.commit(amount, key_id)?; - let skey = self.derived_key(key_id)?; - let nonce = self.rangeproof_create_nonce(&commit); - if msg.len() == 0 { - return Ok(self.secp - .bullet_proof(amount, skey, nonce, extra_data, None)); - } else { - if msg.len() != 64 { - error!(LOGGER, "Bullet proof message must be 64 bytes."); - return Err(Error::RangeProof( - "Bullet proof message must be 64 bytes".to_string(), - )); - } - } - return Ok(self.secp - .bullet_proof(amount, skey, nonce, extra_data, Some(msg))); - } - - pub fn verify_range_proof( - secp: &Secp256k1, - commit: Commitment, - proof: RangeProof, - extra_data: Option>, - ) -> Result<(), secp::Error> { - let result = secp.verify_bullet_proof(commit, proof, extra_data); - match result { - Ok(_) => Ok(()), - Err(e) => Err(e), - } - } - - pub fn rewind_range_proof( - &self, - key_id: &Identifier, - commit: Commitment, - extra_data: Option>, - proof: RangeProof, - ) -> Result { - let skey = self.derived_key(key_id)?; - let nonce = self.rangeproof_create_nonce(&commit); - let proof_message = self.secp - .unwind_bullet_proof(commit, skey, nonce, extra_data, proof); - let proof_info = match proof_message { - Ok(p) => ProofInfo { - success: true, - value: 0, - message: p, - mlen: 0, - min: 0, - max: 0, - exp: 0, - mantissa: 0, - }, - Err(_) => ProofInfo { - success: false, - value: 0, - message: ProofMessage::empty(), - mlen: 0, - min: 0, - max: 0, - exp: 0, - mantissa: 0, - }, - }; - return Ok(proof_info); - } - pub fn blind_sum(&self, blind_sum: &BlindSum) -> Result { let mut pos_keys: Vec = blind_sum .positive_key_ids @@ -330,236 +226,6 @@ impl Keychain { Ok(BlindingFactor::from_secret_key(sum)) } - pub fn aggsig_create_context( - &self, - transaction_id: &Uuid, - sec_key: SecretKey, - ) -> Result<(), Error> { - let mut contexts = self.aggsig_contexts.write().unwrap(); - if contexts.is_none() { - *contexts = Some(HashMap::new()) - } - if contexts.as_mut().unwrap().contains_key(transaction_id) { - return Err(Error::Transaction(String::from( - "Duplication transaction id", - ))); - } - contexts.as_mut().unwrap().insert( - transaction_id.clone(), - AggSigTxContext { - sec_key: sec_key, - sec_nonce: aggsig::export_secnonce_single(&self.secp).unwrap(), - output_ids: vec![], - }, - ); - Ok(()) - } - - /// Tracks an output contributing to my excess value (if it needs to - /// be kept between invocations - pub fn aggsig_add_output(&self, transaction_id: &Uuid, output_id: &Identifier) { - let mut agg_contexts = self.aggsig_contexts.write().unwrap(); - let mut agg_contexts_local = agg_contexts.as_mut().unwrap().clone(); - let mut agg_context = agg_contexts_local.get(transaction_id).unwrap().clone(); - agg_context.output_ids.push(output_id.clone()); - agg_contexts_local.insert(transaction_id.clone(), agg_context); - *agg_contexts = Some(agg_contexts_local); - } - - /// Returns all stored outputs - pub fn aggsig_get_outputs(&self, transaction_id: &Uuid) -> Vec { - let contexts = self.aggsig_contexts.clone(); - let contexts_read = contexts.read().unwrap(); - let agg_context = contexts_read.as_ref().unwrap(); - let agg_context_return = agg_context.get(transaction_id); - agg_context_return.unwrap().output_ids.clone() - } - - /// Returns private key, private nonce - pub fn aggsig_get_private_keys(&self, transaction_id: &Uuid) -> (SecretKey, SecretKey) { - let contexts = self.aggsig_contexts.clone(); - let contexts_read = contexts.read().unwrap(); - let agg_context = contexts_read.as_ref().unwrap(); - let agg_context_return = agg_context.get(transaction_id); - ( - agg_context_return.unwrap().sec_key.clone(), - agg_context_return.unwrap().sec_nonce.clone(), - ) - } - - /// Returns public key, public nonce - pub fn aggsig_get_public_keys(&self, transaction_id: &Uuid) -> (PublicKey, PublicKey) { - let contexts = self.aggsig_contexts.clone(); - let contexts_read = contexts.read().unwrap(); - let agg_context = contexts_read.as_ref().unwrap(); - let agg_context_return = agg_context.get(transaction_id); - ( - PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_key).unwrap(), - PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_nonce).unwrap(), - ) - } - - /// Note 'secnonce' here is used to perform the signature, while 'pubnonce' just allows you to - /// provide a custom public nonce to include while calculating e - /// nonce_sum is the sum used to decide whether secnonce should be inverted during sig time - pub fn aggsig_sign_single( - &self, - transaction_id: &Uuid, - msg: &Message, - secnonce: Option<&SecretKey>, - pubnonce: Option<&PublicKey>, - nonce_sum: Option<&PublicKey>, - ) -> Result { - let contexts = self.aggsig_contexts.clone(); - let contexts_read = contexts.read().unwrap(); - let agg_context = contexts_read.as_ref().unwrap(); - let agg_context_return = agg_context.get(transaction_id); - let sig = aggsig::sign_single( - &self.secp, - msg, - &agg_context_return.unwrap().sec_key, - secnonce, - pubnonce, - nonce_sum, - )?; - Ok(sig) - } - - //Verifies an aggsig signature - pub fn aggsig_verify_single( - &self, - sig: &Signature, - msg: &Message, - pubnonce: Option<&PublicKey>, - pubkey: &PublicKey, - is_partial: bool, - ) -> bool { - aggsig::verify_single(&self.secp, sig, msg, pubnonce, pubkey, is_partial) - } - - //Verifies other final sig corresponds with what we're expecting - pub fn aggsig_verify_final_sig_build_msg( - &self, - sig: &Signature, - pubkey: &PublicKey, - fee: u64, - lock_height: u64, - ) -> bool { - let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height)).unwrap(); - self.aggsig_verify_single(sig, &msg, None, pubkey, true) - } - - //Verifies other party's sig corresponds with what we're expecting - pub fn aggsig_verify_partial_sig( - &self, - transaction_id: &Uuid, - sig: &Signature, - other_pub_nonce: &PublicKey, - pubkey: &PublicKey, - fee: u64, - lock_height: u64, - ) -> bool { - let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id); - let mut nonce_sum = other_pub_nonce.clone(); - let _ = nonce_sum.add_exp_assign(&self.secp, &sec_nonce); - let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height)).unwrap(); - - self.aggsig_verify_single(sig, &msg, Some(&nonce_sum), pubkey, true) - } - - pub fn aggsig_calculate_partial_sig( - &self, - transaction_id: &Uuid, - other_pub_nonce: &PublicKey, - fee: u64, - lock_height: u64, - ) -> Result { - // Add public nonces kR*G + kS*G - let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id); - let mut nonce_sum = other_pub_nonce.clone(); - let _ = nonce_sum.add_exp_assign(&self.secp, &sec_nonce); - let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height))?; - - //Now calculate signature using message M=fee, nonce in e=nonce_sum - self.aggsig_sign_single( - transaction_id, - &msg, - Some(&sec_nonce), - Some(&nonce_sum), - Some(&nonce_sum), - ) - } - - /// Helper function to calculate final signature - pub fn aggsig_calculate_final_sig( - &self, - transaction_id: &Uuid, - their_sig: &Signature, - our_sig: &Signature, - their_pub_nonce: &PublicKey, - ) -> Result { - // Add public nonces kR*G + kS*G - let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id); - let mut nonce_sum = their_pub_nonce.clone(); - let _ = nonce_sum.add_exp_assign(&self.secp, &sec_nonce); - let sig = aggsig::add_signatures_single(&self.secp, their_sig, our_sig, &nonce_sum)?; - Ok(sig) - } - - /// Helper function to calculate final public key - pub fn aggsig_calculate_final_pubkey( - &self, - transaction_id: &Uuid, - their_public_key: &PublicKey, - ) -> Result { - let (our_sec_key, _) = self.aggsig_get_private_keys(transaction_id); - let mut pk_sum = their_public_key.clone(); - let _ = pk_sum.add_exp_assign(&self.secp, &our_sec_key); - Ok(pk_sum) - } - - /// Just a simple sig, creates its own nonce, etc - pub fn aggsig_sign_from_key_id( - &self, - msg: &Message, - key_id: &Identifier, - ) -> Result { - let skey = self.derived_key(key_id)?; - let sig = aggsig::sign_single(&self.secp, &msg, &skey, None, None, None)?; - Ok(sig) - } - - /// Verifies a sig given a commitment - pub fn aggsig_verify_single_from_commit( - secp: &Secp256k1, - sig: &Signature, - msg: &Message, - commit: &Commitment, - ) -> bool { - // Extract the pubkey, unfortunately we need this hack for now, (we just hope - // one is valid) TODO: Create better secp256k1 API to do this - let pubkeys = commit.to_two_pubkeys(secp); - let mut valid = false; - for i in 0..pubkeys.len() { - valid = aggsig::verify_single(secp, &sig, &msg, None, &pubkeys[i], false); - if valid { - break; - } - } - valid - } - - /// Just a simple sig, creates its own nonce, etc - pub fn aggsig_sign_with_blinding( - secp: &Secp256k1, - msg: &Message, - blinding: &BlindingFactor, - ) -> Result { - let skey = &blinding.secret_key(&secp)?; - let sig = aggsig::sign_single(secp, &msg, skey, None, None, None)?; - Ok(sig) - } - pub fn sign(&self, msg: &Message, key_id: &Identifier) -> Result { let skey = self.derived_key(key_id)?; let sig = self.secp.sign(msg, &skey)?; @@ -583,14 +249,8 @@ impl Keychain { #[cfg(test)] mod test { - use rand::thread_rng; - - use uuid::Uuid; - use keychain::{BlindSum, BlindingFactor, Keychain}; - use util::kernel_sig_msg; use util::secp; - use util::secp::pedersen::ProofMessage; use util::secp::key::SecretKey; #[test] @@ -667,411 +327,4 @@ mod test { BlindingFactor::from_secret_key(skey3), ); } - - #[test] - fn aggsig_sender_receiver_interaction() { - let sender_keychain = Keychain::from_random_seed().unwrap(); - let receiver_keychain = Keychain::from_random_seed().unwrap(); - - // tx identifier for wallet interaction - let tx_id = Uuid::new_v4(); - - // Calculate the kernel excess here for convenience. - // Normally this would happen during transaction building. - let kernel_excess = { - let skey1 = sender_keychain - .derived_key(&sender_keychain.derive_key_id(1).unwrap()) - .unwrap(); - - let skey2 = receiver_keychain - .derived_key(&receiver_keychain.derive_key_id(1).unwrap()) - .unwrap(); - - let keychain = Keychain::from_random_seed().unwrap(); - let blinding_factor = keychain - .blind_sum(&BlindSum::new() - .sub_blinding_factor(BlindingFactor::from_secret_key(skey1)) - .add_blinding_factor(BlindingFactor::from_secret_key(skey2))) - .unwrap(); - - keychain - .secp - .commit(0, blinding_factor.secret_key(&keychain.secp).unwrap()) - .unwrap() - }; - - // sender starts the tx interaction - let (sender_pub_excess, sender_pub_nonce) = { - let keychain = sender_keychain.clone(); - - let skey = keychain - .derived_key(&keychain.derive_key_id(1).unwrap()) - .unwrap(); - - // dealing with an input here so we need to negate the blinding_factor - // rather than use it as is - let blinding_factor = keychain - .blind_sum(&BlindSum::new() - .sub_blinding_factor(BlindingFactor::from_secret_key(skey))) - .unwrap(); - - let blind = blinding_factor.secret_key(&keychain.secp()).unwrap(); - - keychain.aggsig_create_context(&tx_id, blind).unwrap(); - keychain.aggsig_get_public_keys(&tx_id) - }; - - // receiver receives partial tx - let (receiver_pub_excess, receiver_pub_nonce, sig_part) = { - let keychain = receiver_keychain.clone(); - let key_id = keychain.derive_key_id(1).unwrap(); - - // let blind = blind_sum.secret_key(&keychain.secp())?; - let blind = keychain.derived_key(&key_id).unwrap(); - - keychain.aggsig_create_context(&tx_id, blind).unwrap(); - let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(&tx_id); - keychain.aggsig_add_output(&tx_id, &key_id); - - let sig_part = keychain - .aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0) - .unwrap(); - (pub_excess, pub_nonce, sig_part) - }; - - // check the sender can verify the partial signature - // received in the response back from the receiver - { - let keychain = sender_keychain.clone(); - let sig_verifies = keychain.aggsig_verify_partial_sig( - &tx_id, - &sig_part, - &receiver_pub_nonce, - &receiver_pub_excess, - 0, - 0, - ); - assert!(sig_verifies); - } - - // now sender signs with their key - let sender_sig_part = { - let keychain = sender_keychain.clone(); - keychain - .aggsig_calculate_partial_sig(&tx_id, &receiver_pub_nonce, 0, 0) - .unwrap() - }; - - // check the receiver can verify the partial signature - // received by the sender - { - let keychain = receiver_keychain.clone(); - let sig_verifies = keychain.aggsig_verify_partial_sig( - &tx_id, - &sender_sig_part, - &sender_pub_nonce, - &sender_pub_excess, - 0, - 0, - ); - assert!(sig_verifies); - } - - // Receiver now builds final signature from sender and receiver parts - let (final_sig, final_pubkey) = { - let keychain = receiver_keychain.clone(); - - // Receiver recreates their partial sig (we do not maintain state from earlier) - let our_sig_part = keychain - .aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0) - .unwrap(); - - // Receiver now generates final signature from the two parts - let final_sig = keychain - .aggsig_calculate_final_sig( - &tx_id, - &sender_sig_part, - &our_sig_part, - &sender_pub_nonce, - ) - .unwrap(); - - // Receiver calculates the final public key (to verify sig later) - let final_pubkey = keychain - .aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess) - .unwrap(); - - (final_sig, final_pubkey) - }; - - // Receiver checks the final signature verifies - { - let keychain = receiver_keychain.clone(); - - // Receiver check the final signature verifies - let sig_verifies = - keychain.aggsig_verify_final_sig_build_msg(&final_sig, &final_pubkey, 0, 0); - assert!(sig_verifies); - } - - // Check we can verify the sig using the kernel excess - { - let keychain = Keychain::from_random_seed().unwrap(); - - let msg = secp::Message::from_slice(&kernel_sig_msg(0, 0)).unwrap(); - - let sig_verifies = Keychain::aggsig_verify_single_from_commit( - &keychain.secp, - &final_sig, - &msg, - &kernel_excess, - ); - - assert!(sig_verifies); - } - } - - #[test] - fn aggsig_sender_receiver_interaction_offset() { - let sender_keychain = Keychain::from_random_seed().unwrap(); - let receiver_keychain = Keychain::from_random_seed().unwrap(); - - // tx identifier for wallet interaction - let tx_id = Uuid::new_v4(); - - // This is the kernel offset that we use to split the key - // Summing these at the block level prevents the - // kernels from being used to reconstruct (or identify) individual transactions - let kernel_offset = SecretKey::new(&sender_keychain.secp(), &mut thread_rng()); - - // Calculate the kernel excess here for convenience. - // Normally this would happen during transaction building. - let kernel_excess = { - let skey1 = sender_keychain - .derived_key(&sender_keychain.derive_key_id(1).unwrap()) - .unwrap(); - - let skey2 = receiver_keychain - .derived_key(&receiver_keychain.derive_key_id(1).unwrap()) - .unwrap(); - - let keychain = Keychain::from_random_seed().unwrap(); - let blinding_factor = keychain - .blind_sum(&BlindSum::new() - .sub_blinding_factor(BlindingFactor::from_secret_key(skey1)) - .add_blinding_factor(BlindingFactor::from_secret_key(skey2)) - // subtract the kernel offset here like as would when - // verifying a kernel signature - .sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset))) - .unwrap(); - - keychain - .secp - .commit(0, blinding_factor.secret_key(&keychain.secp).unwrap()) - .unwrap() - }; - - // sender starts the tx interaction - let (sender_pub_excess, sender_pub_nonce) = { - let keychain = sender_keychain.clone(); - - let skey = keychain - .derived_key(&keychain.derive_key_id(1).unwrap()) - .unwrap(); - - // dealing with an input here so we need to negate the blinding_factor - // rather than use it as is - let blinding_factor = keychain - .blind_sum(&BlindSum::new() - .sub_blinding_factor(BlindingFactor::from_secret_key(skey)) - // subtract the kernel offset to create an aggsig context - // with our "split" key - .sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset))) - .unwrap(); - - let blind = blinding_factor.secret_key(&keychain.secp()).unwrap(); - - keychain.aggsig_create_context(&tx_id, blind).unwrap(); - keychain.aggsig_get_public_keys(&tx_id) - }; - - // receiver receives partial tx - let (receiver_pub_excess, receiver_pub_nonce, sig_part) = { - let keychain = receiver_keychain.clone(); - let key_id = keychain.derive_key_id(1).unwrap(); - - let blind = keychain.derived_key(&key_id).unwrap(); - - keychain.aggsig_create_context(&tx_id, blind).unwrap(); - let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(&tx_id); - keychain.aggsig_add_output(&tx_id, &key_id); - - let sig_part = keychain - .aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0) - .unwrap(); - (pub_excess, pub_nonce, sig_part) - }; - - // check the sender can verify the partial signature - // received in the response back from the receiver - { - let keychain = sender_keychain.clone(); - let sig_verifies = keychain.aggsig_verify_partial_sig( - &tx_id, - &sig_part, - &receiver_pub_nonce, - &receiver_pub_excess, - 0, - 0, - ); - assert!(sig_verifies); - } - - // now sender signs with their key - let sender_sig_part = { - let keychain = sender_keychain.clone(); - keychain - .aggsig_calculate_partial_sig(&tx_id, &receiver_pub_nonce, 0, 0) - .unwrap() - }; - - // check the receiver can verify the partial signature - // received by the sender - { - let keychain = receiver_keychain.clone(); - let sig_verifies = keychain.aggsig_verify_partial_sig( - &tx_id, - &sender_sig_part, - &sender_pub_nonce, - &sender_pub_excess, - 0, - 0, - ); - assert!(sig_verifies); - } - - // Receiver now builds final signature from sender and receiver parts - let (final_sig, final_pubkey) = { - let keychain = receiver_keychain.clone(); - - // Receiver recreates their partial sig (we do not maintain state from earlier) - let our_sig_part = keychain - .aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0) - .unwrap(); - - // Receiver now generates final signature from the two parts - let final_sig = keychain - .aggsig_calculate_final_sig( - &tx_id, - &sender_sig_part, - &our_sig_part, - &sender_pub_nonce, - ) - .unwrap(); - - // Receiver calculates the final public key (to verify sig later) - let final_pubkey = keychain - .aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess) - .unwrap(); - - (final_sig, final_pubkey) - }; - - // Receiver checks the final signature verifies - { - let keychain = receiver_keychain.clone(); - - // Receiver check the final signature verifies - let sig_verifies = - keychain.aggsig_verify_final_sig_build_msg(&final_sig, &final_pubkey, 0, 0); - assert!(sig_verifies); - } - - // Check we can verify the sig using the kernel excess - { - let keychain = Keychain::from_random_seed().unwrap(); - - let msg = secp::Message::from_slice(&kernel_sig_msg(0, 0)).unwrap(); - - let sig_verifies = Keychain::aggsig_verify_single_from_commit( - &keychain.secp, - &final_sig, - &msg, - &kernel_excess, - ); - - assert!(sig_verifies); - } - } - - #[test] - fn test_rewind_range_proof() { - let keychain = Keychain::from_random_seed().unwrap(); - let key_id = keychain.derive_key_id(1).unwrap(); - let commit = keychain.commit(5, &key_id).unwrap(); - let msg = ProofMessage::from_bytes(&[0u8; 64]); - let extra_data = [99u8; 64]; - - let proof = keychain - .range_proof(5, &key_id, commit, Some(extra_data.to_vec().clone()), msg) - .unwrap(); - let proof_info = keychain - .rewind_range_proof(&key_id, commit, Some(extra_data.to_vec().clone()), proof) - .unwrap(); - - assert_eq!(proof_info.success, true); - - // now check the recovered message is "empty" (but not truncated) i.e. all - // zeroes - //Value is in the message in this case - assert_eq!( - proof_info.message, - secp::pedersen::ProofMessage::from_bytes(&[0; secp::constants::BULLET_PROOF_MSG_SIZE]) - ); - - let key_id2 = keychain.derive_key_id(2).unwrap(); - - // cannot rewind with a different nonce - let proof_info = keychain - .rewind_range_proof(&key_id2, commit, Some(extra_data.to_vec().clone()), proof) - .unwrap(); - // With bullet proofs, if you provide the wrong nonce you'll get gibberish back - // as opposed to a failure to recover the message - assert_ne!( - proof_info.message, - secp::pedersen::ProofMessage::from_bytes(&[0; secp::constants::BULLET_PROOF_MSG_SIZE]) - ); - assert_eq!(proof_info.value, 0); - - // cannot rewind with a commitment to the same value using a different key - let commit2 = keychain.commit(5, &key_id2).unwrap(); - let proof_info = keychain - .rewind_range_proof(&key_id, commit2, Some(extra_data.to_vec().clone()), proof) - .unwrap(); - assert_eq!(proof_info.success, false); - assert_eq!(proof_info.value, 0); - - // cannot rewind with a commitment to a different value - let commit3 = keychain.commit(4, &key_id).unwrap(); - let proof_info = keychain - .rewind_range_proof(&key_id, commit3, Some(extra_data.to_vec().clone()), proof) - .unwrap(); - assert_eq!(proof_info.success, false); - assert_eq!(proof_info.value, 0); - - // cannot rewind with wrong extra committed data - let commit3 = keychain.commit(4, &key_id).unwrap(); - let wrong_extra_data = [98u8; 64]; - let _should_err = keychain - .rewind_range_proof( - &key_id, - commit3, - Some(wrong_extra_data.to_vec().clone()), - proof, - ) - .unwrap(); - - assert_eq!(proof_info.success, false); - assert_eq!(proof_info.value, 0); - } } diff --git a/keychain/src/lib.rs b/keychain/src/lib.rs index 12a347d4e..9024bd036 100644 --- a/keychain/src/lib.rs +++ b/keychain/src/lib.rs @@ -26,10 +26,10 @@ extern crate serde_json; extern crate slog; extern crate uuid; -mod blind; -mod extkey; +pub mod blind; +pub mod extkey; pub use blind::{BlindSum, BlindingFactor}; pub use extkey::{ExtendedKey, Identifier, IDENTIFIER_SIZE}; pub mod keychain; -pub use keychain::{AggSigTxContext, Error, Keychain}; +pub use keychain::{Error, Keychain}; diff --git a/pool/Cargo.toml b/pool/Cargo.toml index 6a68884ac..4ffec69a4 100644 --- a/pool/Cargo.toml +++ b/pool/Cargo.toml @@ -16,3 +16,6 @@ time = "0.1" grin_core = { path = "../core" } grin_keychain = { path = "../keychain" } grin_util = { path = "../util" } + +[dev-dependencies] +grin_wallet = { path = "../wallet" } diff --git a/pool/src/blockchain.rs b/pool/src/blockchain.rs index eba48f602..772ac5c0f 100644 --- a/pool/src/blockchain.rs +++ b/pool/src/blockchain.rs @@ -1,11 +1,25 @@ -// This file is (hopefully) temporary. +// Copyright 2018 The Grin Developers // -// It contains a trait based on (but not exactly equal to) the trait defined -// for the blockchain Output set, discussed at -// https://github.com/ignopeverell/grin/issues/29, and a dummy implementation -// of said trait. -// Notably, OutputDiff has been left off, and the question of how to handle -// abstract return types has been deferred. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This file is (hopefully) temporary. +//! +//! It contains a trait based on (but not exactly equal to) the trait defined +//! for the blockchain Output set, discussed at +//! https://github.com/ignopeverell/grin/issues/29, and a dummy implementation +//! of said trait. +//! Notably, OutputDiff has been left off, and the question of how to handle +//! abstract return types has been deferred. use std::collections::HashMap; use std::clone::Clone; @@ -25,16 +39,19 @@ pub struct DummyOutputSet { #[allow(dead_code)] impl DummyOutputSet { + /// Empty output set pub fn empty() -> DummyOutputSet { DummyOutputSet { outputs: HashMap::new(), } } + /// roots pub fn root(&self) -> hash::Hash { hash::ZERO_HASH } + /// apply a block pub fn apply(&self, b: &block::Block) -> DummyOutputSet { let mut new_outputs = self.outputs.clone(); @@ -49,6 +66,7 @@ impl DummyOutputSet { } } + /// create with block pub fn with_block(&mut self, b: &block::Block) { for input in &b.inputs { self.outputs.remove(&input.commitment()); @@ -58,12 +76,14 @@ impl DummyOutputSet { } } + /// rewind pub fn rewind(&self, _: &block::Block) -> DummyOutputSet { DummyOutputSet { outputs: HashMap::new(), } } + /// get an output pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> { self.outputs.get(output_ref) } @@ -74,7 +94,7 @@ impl DummyOutputSet { } } - // only for testing: add an output to the map + /// only for testing: add an output to the map pub fn with_output(&self, output: transaction::Output) -> DummyOutputSet { let mut new_outputs = self.outputs.clone(); new_outputs.insert(output.commitment(), output); @@ -94,6 +114,7 @@ pub struct DummyChainImpl { #[allow(dead_code)] impl DummyChainImpl { + /// new dummy chain pub fn new() -> DummyChainImpl { DummyChainImpl { output: RwLock::new(DummyOutputSet { @@ -152,8 +173,12 @@ impl DummyChain for DummyChainImpl { } } +/// Dummy chain trait pub trait DummyChain: BlockChain { + /// update output set fn update_output_set(&mut self, new_output: DummyOutputSet); + /// apply a block fn apply_block(&self, b: &block::Block); + /// store header fn store_head_header(&self, block_header: &block::BlockHeader); } diff --git a/pool/src/graph.rs b/pool/src/graph.rs index 1bf13ba47..746b5880a 100644 --- a/pool/src/graph.rs +++ b/pool/src/graph.rs @@ -139,13 +139,14 @@ impl fmt::Debug for Edge { /// The generic graph container. Both graphs, the pool and orphans, embed this /// structure and add additional capability on top of it. pub struct DirectedGraph { - edges: HashMap, - vertices: Vec, - - // A small optimization: keeping roots (vertices with in-degree 0) in a - // separate list makes topological sort a bit faster. (This is true for - // Kahn's, not sure about other implementations) - roots: Vec, + /// Edges + pub edges: HashMap, + /// Vertices + pub vertices: Vec, + /// A small optimization: keeping roots (vertices with in-degree 0) in a + /// separate list makes topological sort a bit faster. (This is true for + /// Kahn's, not sure about other implementations) + pub roots: Vec, } impl DirectedGraph { @@ -290,76 +291,3 @@ pub fn transaction_identifier(tx: &core::transaction::Transaction) -> core::hash // core::transaction::merkle_inputs_outputs(&tx.inputs, &tx.outputs) tx.hash() } - -#[cfg(test)] -mod tests { - use super::*; - use keychain::Keychain; - use rand; - use core::core::OutputFeatures; - use core::core::transaction::ProofMessageElements; - - #[test] - fn test_add_entry() { - let keychain = Keychain::from_random_seed().unwrap(); - let key_id1 = keychain.derive_key_id(1).unwrap(); - let key_id2 = keychain.derive_key_id(2).unwrap(); - let key_id3 = keychain.derive_key_id(3).unwrap(); - - let output_commit = keychain.commit(70, &key_id1).unwrap(); - - let inputs = vec![ - core::transaction::Input::new( - OutputFeatures::DEFAULT_OUTPUT, - keychain.commit(50, &key_id2).unwrap(), - None, - None, - ), - core::transaction::Input::new( - OutputFeatures::DEFAULT_OUTPUT, - keychain.commit(25, &key_id3).unwrap(), - None, - None, - ), - ]; - - let msg = ProofMessageElements::new(100, &key_id1); - - let output = core::transaction::Output { - features: OutputFeatures::DEFAULT_OUTPUT, - commit: output_commit, - proof: keychain - .range_proof(100, &key_id1, output_commit, None, msg.to_proof_message()) - .unwrap(), - }; - - let kernel = core::transaction::TxKernel::empty() - .with_fee(5) - .with_lock_height(0); - - let test_transaction = - core::transaction::Transaction::new(inputs, vec![output], vec![kernel]); - - let test_pool_entry = PoolEntry::new(&test_transaction); - - let incoming_edge_1 = Edge::new( - Some(random_hash()), - Some(core::hash::ZERO_HASH), - OutputIdentifier::from_output(&output), - ); - - let mut test_graph = DirectedGraph::empty(); - - test_graph.add_entry(test_pool_entry, vec![incoming_edge_1]); - - assert_eq!(test_graph.vertices.len(), 1); - assert_eq!(test_graph.roots.len(), 0); - assert_eq!(test_graph.edges.len(), 1); - } - - /// For testing/debugging: a random tx hash - fn random_hash() -> core::hash::Hash { - let hash_bytes: [u8; 32] = rand::random(); - core::hash::Hash(hash_bytes) - } -} diff --git a/pool/src/lib.rs b/pool/src/lib.rs index c18478a77..a2305004f 100644 --- a/pool/src/lib.rs +++ b/pool/src/lib.rs @@ -22,9 +22,9 @@ #![warn(missing_docs)] pub mod graph; -mod types; -mod blockchain; -mod pool; +pub mod types; +pub mod blockchain; +pub mod pool; extern crate blake2_rfc as blake2; extern crate grin_core as core; diff --git a/pool/src/pool.rs b/pool/src/pool.rs index 9d5a2ca32..70bcd32f5 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -36,7 +36,8 @@ pub use graph; /// The transactions HashMap holds ownership of all transactions in the pool, /// keyed by their transaction hash. pub struct TransactionPool { - config: PoolConfig, + /// configuration + pub config: PoolConfig, /// All transactions hash in the stempool with a time attached to ensure /// propagation pub time_stem_transactions: HashMap, @@ -50,11 +51,11 @@ pub struct TransactionPool { pub pool: Pool, /// Orphans in the pool pub orphans: Orphans, - - // blockchain is a DummyChain, for now, which mimics what the future - // chain will offer to the pool - blockchain: Arc, - adapter: Arc, + /// blockchain is a DummyChain, for now, which mimics what the future + /// chain will offer to the pool + pub blockchain: Arc, + /// Adapter + pub adapter: Arc, } impl TransactionPool @@ -923,1117 +924,3 @@ where Ok(()) } } - -#[cfg(test)] -mod tests { - use super::*; - use core::core::build; - use core::global; - use blockchain::{DummyChain, DummyChainImpl, DummyOutputSet}; - use keychain::Keychain; - use std::sync::{Arc, RwLock}; - use blake2; - use core::global::ChainTypes; - use core::core::Proof; - use core::core::hash::{Hash, Hashed}; - use core::core::pmmr::MerkleProof; - use core::core::target::Difficulty; - use core::core::transaction::{self, ProofMessageElements}; - use types::PoolError::InvalidTx; - - macro_rules! expect_output_parent { - ($pool:expr, $expected:pat, $( $output:expr ),+ ) => { - $( - match $pool - .search_for_best_output( - &OutputIdentifier::from_output(&test_output($output)) - ) { - $expected => {}, - x => panic!( - "Unexpected result from output search for {:?}, got {:?}", - $output, - x, - ), - }; - )* - } - } - - #[test] - /// A basic test; add a pair of transactions to the pool. - fn test_basic_pool_add() { - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); - // We want this transaction to be rooted in the blockchain. - let new_output = DummyOutputSet::empty() - .with_output(test_output(5)) - .with_output(test_output(6)) - .with_output(test_output(7)) - .with_output(test_output(8)); - - // Prepare a second transaction, connected to the first. - let child_transaction = test_transaction(vec![11, 3], vec![12]); - - dummy_chain.update_output_set(new_output); - - // To mirror how this construction is intended to be used, the pool - // is placed inside a RwLock. - let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); - - // Take the write lock and add a pool entry - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - // First, add the transaction rooted in the blockchain - let result = write_pool.add_to_memory_pool(test_source(), parent_transaction, false); - if result.is_err() { - panic!("got an error adding parent tx: {:?}", result.err().unwrap()); - } - - // Now, add the transaction connected as a child to the first - let child_result = - write_pool.add_to_memory_pool(test_source(), child_transaction, false); - - if child_result.is_err() { - panic!( - "got an error adding child tx: {:?}", - child_result.err().unwrap() - ); - } - } - - // Now take the read lock and use a few exposed methods to check consistency - { - let read_pool = pool.read().unwrap(); - assert_eq!(read_pool.total_size(), 2); - expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 12); - expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 11, 5); - expect_output_parent!(read_pool, Parent::BlockTransaction, 8); - expect_output_parent!(read_pool, Parent::Unknown, 20); - } - } - - #[test] - /// Attempt to add a multi kernel transaction to the mempool - fn test_multikernel_pool_add() { - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); - // We want this transaction to be rooted in the blockchain. - let new_output = DummyOutputSet::empty() - .with_output(test_output(5)) - .with_output(test_output(6)) - .with_output(test_output(7)) - .with_output(test_output(8)); - - // Prepare a second transaction, connected to the first. - let child_transaction = test_transaction(vec![11, 3], vec![12]); - - let txs = vec![parent_transaction, child_transaction]; - let multi_kernel_transaction = transaction::aggregate_with_cut_through(txs).unwrap(); - - dummy_chain.update_output_set(new_output); - - // To mirror how this construction is intended to be used, the pool - // is placed inside a RwLock. - let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); - - // Take the write lock and add a pool entry - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - // First, add the transaction rooted in the blockchain - let result = - write_pool.add_to_memory_pool(test_source(), multi_kernel_transaction, false); - if result.is_err() { - panic!( - "got an error adding multi-kernel tx: {:?}", - result.err().unwrap() - ); - } - } - - // Now take the read lock and use a few exposed methods to check consistency - { - let read_pool = pool.read().unwrap(); - assert_eq!(read_pool.total_size(), 1); - expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 12); - expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 5); - expect_output_parent!(read_pool, Parent::BlockTransaction, 8); - expect_output_parent!(read_pool, Parent::Unknown, 11, 3, 20); - } - } - - #[test] - /// Attempt to deaggregate a multi_kernel transaction - /// Push the parent transaction in the mempool then send a multikernel tx containing it and a - /// child transaction In the end, the pool should contain both transactions. - fn test_multikernel_deaggregate() { - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - let transaction1 = test_transaction_with_offset(vec![5], vec![1]); - println!("{:?}", transaction1.validate()); - let transaction2 = test_transaction_with_offset(vec![8], vec![2]); - - // We want these transactions to be rooted in the blockchain. - let new_output = DummyOutputSet::empty() - .with_output(test_output(5)) - .with_output(test_output(8)); - - dummy_chain.update_output_set(new_output); - - // To mirror how this construction is intended to be used, the pool - // is placed inside a RwLock. - let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); - - // Take the write lock and add a pool entry - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - // First, add the first transaction - let result = write_pool.add_to_memory_pool(test_source(), transaction1.clone(), false); - if result.is_err() { - panic!("got an error adding tx 1: {:?}", result.err().unwrap()); - } - } - - let txs = vec![transaction1.clone(), transaction2.clone()]; - let multi_kernel_transaction = transaction::aggregate(txs).unwrap(); - - let found_tx: Transaction; - // Now take the read lock and attempt to deaggregate the transaction - { - let read_pool = pool.read().unwrap(); - found_tx = read_pool - .deaggregate_transaction(multi_kernel_transaction) - .unwrap(); - - // Test the retrived transactions - assert_eq!(transaction2, found_tx); - } - - // Take the write lock and add a pool entry - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 1); - - // First, add the transaction rooted in the blockchain - let result = write_pool.add_to_memory_pool(test_source(), found_tx.clone(), false); - if result.is_err() { - panic!("got an error adding child tx: {:?}", result.err().unwrap()); - } - } - - // Now take the read lock and use a few exposed methods to check consistency - { - let read_pool = pool.read().unwrap(); - assert_eq!(read_pool.total_size(), 2); - expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 1, 2); - expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 5, 8); - expect_output_parent!(read_pool, Parent::Unknown, 11, 3, 20); - } - } - - #[test] - /// Attempt to add a bad multi kernel transaction to the mempool should get - /// rejected - fn test_bad_multikernel_pool_add() { - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); - // We want this transaction to be rooted in the blockchain. - let new_output = DummyOutputSet::empty() - .with_output(test_output(5)) - .with_output(test_output(6)) - .with_output(test_output(7)) - .with_output(test_output(8)); - - // Prepare a second transaction, connected to the first. - let child_transaction1 = test_transaction(vec![11, 3], vec![12]); - let child_transaction2 = test_transaction(vec![11, 3], vec![10]); - - let txs = vec![parent_transaction, child_transaction1, child_transaction2]; - let bad_multi_kernel_transaction = transaction::aggregate(txs).unwrap(); - - dummy_chain.update_output_set(new_output); - - // To mirror how this construction is intended to be used, the pool - // is placed inside a RwLock. - let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); - - // Take the write lock and add a pool entry - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - // First, add the transaction rooted in the blockchain - let result = - write_pool.add_to_memory_pool(test_source(), bad_multi_kernel_transaction, false); - assert!(result.is_err()); - } - } - - #[test] - /// A basic test; add a transaction to the pool and add the child to the - /// stempool - fn test_pool_stempool_add() { - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); - // We want this transaction to be rooted in the blockchain. - let new_output = DummyOutputSet::empty() - .with_output(test_output(5)) - .with_output(test_output(6)) - .with_output(test_output(7)) - .with_output(test_output(8)); - - // Prepare a second transaction, connected to the first. - let child_transaction = test_transaction(vec![11, 3], vec![12]); - - dummy_chain.update_output_set(new_output); - - // To mirror how this construction is intended to be used, the pool - // is placed inside a RwLock. - let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); - - // Take the write lock and add a pool entry - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - // First, add the transaction rooted in the blockchain - let result = write_pool.add_to_memory_pool(test_source(), parent_transaction, false); - if result.is_err() { - panic!("got an error adding parent tx: {:?}", result.err().unwrap()); - } - - // Now, add the transaction connected as a child to the first - let child_result = - write_pool.add_to_memory_pool(test_source(), child_transaction, true); - - if child_result.is_err() { - panic!( - "got an error adding child tx: {:?}", - child_result.err().unwrap() - ); - } - } - - // Now take the read lock and use a few exposed methods to check consistency - { - let read_pool = pool.read().unwrap(); - assert_eq!(read_pool.total_size(), 2); - if read_pool.stempool.num_transactions() == 0 { - expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 12); - } else { - expect_output_parent!(read_pool, Parent::StemPoolTransaction{tx_ref: _}, 12); - } - expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 11, 5); - expect_output_parent!(read_pool, Parent::BlockTransaction, 8); - expect_output_parent!(read_pool, Parent::Unknown, 20); - } - } - - #[test] - /// A basic test; add a transaction to the stempool and one the regular transaction pool - /// Child transaction should be added to the stempool. - fn test_stempool_pool_add() { - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); - // We want this transaction to be rooted in the blockchain. - let new_output = DummyOutputSet::empty() - .with_output(test_output(5)) - .with_output(test_output(6)) - .with_output(test_output(7)) - .with_output(test_output(8)); - - // Prepare a second transaction, connected to the first. - let child_transaction = test_transaction(vec![11, 3], vec![12]); - - dummy_chain.update_output_set(new_output); - - // To mirror how this construction is intended to be used, the pool - // is placed inside a RwLock. - let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); - - // Take the write lock and add a pool entry - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - // First, add the transaction rooted in the blockchain - let result = write_pool.add_to_memory_pool(test_source(), parent_transaction, true); - if result.is_err() { - panic!("got an error adding parent tx: {:?}", result.err().unwrap()); - } - - // Now, add the transaction connected as a child to the first - let child_result = - write_pool.add_to_memory_pool(test_source(), child_transaction, false); - if child_result.is_err() { - panic!( - "got an error adding child tx: {:?}", - child_result.err().unwrap() - ); - } - } - - // Now take the read lock and use a few exposed methods to check consistency - { - let read_pool = pool.read().unwrap(); - // First transaction is a stem transaction. In that case the child transaction - // should be force stem - assert_eq!(read_pool.total_size(), 2); - // Parent has been directly fluffed - if read_pool.stempool.num_transactions() == 0 { - expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 12); - } else { - expect_output_parent!(read_pool, Parent::StemPoolTransaction{tx_ref: _}, 12); - } - expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 11, 5); - expect_output_parent!(read_pool, Parent::BlockTransaction, 8); - expect_output_parent!(read_pool, Parent::Unknown, 20); - } - } - - #[test] - /// Testing various expected error conditions - pub fn test_pool_add_error() { - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - let new_output = DummyOutputSet::empty() - .with_output(test_output(5)) - .with_output(test_output(6)) - .with_output(test_output(7)); - - dummy_chain.update_output_set(new_output); - - let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - // First expected failure: duplicate output - let duplicate_tx = test_transaction(vec![5, 6], vec![7]); - - match write_pool.add_to_memory_pool(test_source(), duplicate_tx, false) { - Ok(_) => panic!("Got OK from add_to_memory_pool when dup was expected"), - Err(x) => { - match x { - PoolError::DuplicateOutput { - other_tx, - in_chain, - output, - } => if other_tx.is_some() || !in_chain - || output != test_output(7).commitment() - { - panic!("Unexpected parameter in DuplicateOutput: {:?}", x); - }, - _ => panic!( - "Unexpected error when adding duplicate output transaction: {:?}", - x - ), - }; - } - }; - - // To test DoubleSpend and AlreadyInPool conditions, we need to add - // a valid transaction. - let valid_transaction = test_transaction(vec![5, 6], vec![9]); - - match write_pool.add_to_memory_pool(test_source(), valid_transaction.clone(), false) { - Ok(_) => {} - Err(x) => panic!("Unexpected error while adding a valid transaction: {:?}", x), - }; - - // Now, test a DoubleSpend by consuming the same blockchain unspent - // as valid_transaction: - let double_spend_transaction = test_transaction(vec![6], vec![2]); - - match write_pool.add_to_memory_pool(test_source(), double_spend_transaction, false) { - Ok(_) => panic!("Expected error when adding double spend, got Ok"), - Err(x) => { - match x { - PoolError::DoubleSpend { - other_tx: _, - spent_output, - } => if spent_output != test_output(6).commitment() { - panic!("Unexpected parameter in DoubleSpend: {:?}", x); - }, - _ => panic!( - "Unexpected error when adding double spend transaction: {:?}", - x - ), - }; - } - }; - - // Note, this used to work as expected, but after aggsig implementation - // creating another transaction with the same inputs/outputs doesn't create - // the same hash ID due to the random nonces in an aggsig. This - // will instead throw a (correct as well) Already spent error. An AlreadyInPool - // error can only come up in the case of the exact same transaction being - // added - //let already_in_pool = test_transaction(vec![5, 6], vec![9]); - - match write_pool.add_to_memory_pool(test_source(), valid_transaction, false) { - Ok(_) => panic!("Expected error when adding already in pool, got Ok"), - Err(x) => { - match x { - PoolError::AlreadyInPool => {} - _ => panic!("Unexpected error when adding already in pool tx: {:?}", x), - }; - } - }; - - assert_eq!(write_pool.total_size(), 1); - - // now attempt to add a timelocked tx to the pool - // should fail as invalid based on current height - let timelocked_tx_1 = timelocked_transaction(vec![9], vec![5], 10); - match write_pool.add_to_memory_pool(test_source(), timelocked_tx_1, false) { - Err(PoolError::ImmatureTransaction { - lock_height: height, - }) => { - assert_eq!(height, 10); - } - Err(e) => panic!("expected ImmatureTransaction error here - {:?}", e), - Ok(_) => panic!("expected ImmatureTransaction error here"), - }; - } - } - - #[test] - fn test_immature_coinbase() { - global::set_mining_mode(ChainTypes::AutomatedTesting); - let mut dummy_chain = DummyChainImpl::new(); - let proof_size = global::proofsize(); - - let lock_height = 1 + global::coinbase_maturity(); - assert_eq!(lock_height, 4); - - let coinbase_output = test_coinbase_output(15); - dummy_chain.update_output_set(DummyOutputSet::empty().with_output(coinbase_output)); - - let chain_ref = Arc::new(dummy_chain); - let pool = RwLock::new(test_setup(&chain_ref)); - - { - let mut write_pool = pool.write().unwrap(); - - let coinbase_header = block::BlockHeader { - height: 1, - pow: Proof::random(proof_size), - ..block::BlockHeader::default() - }; - chain_ref.store_head_header(&coinbase_header); - - let head_header = block::BlockHeader { - height: 2, - pow: Proof::random(proof_size), - ..block::BlockHeader::default() - }; - chain_ref.store_head_header(&head_header); - - let txn = test_transaction_with_coinbase_input(15, coinbase_header.hash(), vec![10, 3]); - let result = write_pool.add_to_memory_pool(test_source(), txn, false); - match result { - Err(InvalidTx(transaction::Error::ImmatureCoinbase)) => {} - _ => panic!("expected ImmatureCoinbase error here"), - }; - - let head_header = block::BlockHeader { - height: 4, - ..block::BlockHeader::default() - }; - chain_ref.store_head_header(&head_header); - - let txn = test_transaction_with_coinbase_input(15, coinbase_header.hash(), vec![10, 3]); - let result = write_pool.add_to_memory_pool(test_source(), txn, false); - match result { - Ok(_) => {} - Err(_) => panic!("this should not return an error here"), - }; - } - } - - #[test] - /// Testing an expected orphan - fn test_add_orphan() { - // TODO we need a test here - } - - #[test] - fn test_zero_confirmation_reconciliation() { - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - // single Output - let new_output = DummyOutputSet::empty().with_output(test_output(100)); - - dummy_chain.update_output_set(new_output); - let chain_ref = Arc::new(dummy_chain); - let pool = RwLock::new(test_setup(&chain_ref)); - - // now create two txs - // tx1 spends the Output - // tx2 spends output from tx1 - let tx1 = test_transaction(vec![100], vec![90]); - let tx2 = test_transaction(vec![90], vec![80]); - - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - // now add both txs to the pool (tx2 spends tx1 with zero confirmations) - // both should be accepted if tx1 added before tx2 - write_pool - .add_to_memory_pool(test_source(), tx1, false) - .unwrap(); - write_pool - .add_to_memory_pool(test_source(), tx2, false) - .unwrap(); - - assert_eq!(write_pool.pool_size(), 2); - } - - let txs: Vec; - { - let read_pool = pool.read().unwrap(); - let mut mineable_txs = read_pool.prepare_mineable_transactions(3); - txs = mineable_txs.drain(..).map(|x| *x).collect(); - - // confirm we can preparing both txs for mining here - // one root tx in the pool, and one non-root vertex in the pool - assert_eq!(txs.len(), 2); - } - - let keychain = Keychain::from_random_seed().unwrap(); - let key_id = keychain.derive_key_id(1).unwrap(); - - // now "mine" the block passing in the mineable txs from earlier - let block = block::Block::new( - &block::BlockHeader::default(), - txs.iter().collect(), - &keychain, - &key_id, - Difficulty::one(), - ).unwrap(); - - // now apply the block to ensure the chainstate is updated before we reconcile - chain_ref.apply_block(&block); - - // now reconcile the block - // we should evict both txs here - { - let mut write_pool = pool.write().unwrap(); - let evicted_transactions = write_pool.reconcile_block(&block).unwrap(); - assert_eq!(evicted_transactions.len(), 2); - } - - // check the pool is consistent after reconciling the block - // we should have zero txs in the pool (neither roots nor non-roots) - { - let read_pool = pool.write().unwrap(); - assert_eq!(read_pool.pool.len_vertices(), 0); - assert_eq!(read_pool.pool.len_roots(), 0); - } - } - - #[test] - /// Testing block reconciliation - fn test_block_reconciliation() { - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - let new_output = DummyOutputSet::empty() - .with_output(test_output(10)) - .with_output(test_output(20)) - .with_output(test_output(30)) - .with_output(test_output(40)); - - dummy_chain.update_output_set(new_output); - - let chain_ref = Arc::new(dummy_chain); - - let pool = RwLock::new(test_setup(&chain_ref)); - - // Preparation: We will introduce a three root pool transactions. - // 1. A transaction that should be invalidated because it is exactly - // contained in the block. - // 2. A transaction that should be invalidated because the input is - // consumed in the block, although it is not exactly consumed. - // 3. A transaction that should remain after block reconciliation. - let block_transaction = test_transaction(vec![10], vec![8]); - let conflict_transaction = test_transaction(vec![20], vec![12, 6]); - let valid_transaction = test_transaction(vec![30], vec![13, 15]); - - // We will also introduce a few children: - // 4. A transaction that descends from transaction 1, that is in - // turn exactly contained in the block. - let block_child = test_transaction(vec![8], vec![5, 1]); - // 5. A transaction that descends from transaction 4, that is not - // contained in the block at all and should be valid after - // reconciliation. - let pool_child = test_transaction(vec![5], vec![3]); - // 6. A transaction that descends from transaction 2 that does not - // conflict with anything in the block in any way, but should be - // invalidated (orphaned). - let conflict_child = test_transaction(vec![12], vec![2]); - // 7. A transaction that descends from transaction 2 that should be - // valid due to its inputs being satisfied by the block. - let conflict_valid_child = test_transaction(vec![6], vec![4]); - // 8. A transaction that descends from transaction 3 that should be - // invalidated due to an output conflict. - let valid_child_conflict = test_transaction(vec![13], vec![9]); - // 9. A transaction that descends from transaction 3 that should remain - // valid after reconciliation. - let valid_child_valid = test_transaction(vec![15], vec![11]); - // 10. A transaction that descends from both transaction 6 and - // transaction 9 - let mixed_child = test_transaction(vec![2, 11], vec![7]); - - // Add transactions. - // Note: There are some ordering constraints that must be followed here - // until orphans is 100% implemented. Once the orphans process has - // stabilized, we can mix these up to exercise that path a bit. - let mut txs_to_add = vec![ - block_transaction, - conflict_transaction, - valid_transaction, - block_child, - pool_child, - conflict_child, - conflict_valid_child, - valid_child_conflict, - valid_child_valid, - mixed_child, - ]; - - let expected_pool_size = txs_to_add.len(); - - // First we add the above transactions to the pool; all should be - // accepted. - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - for tx in txs_to_add.drain(..) { - write_pool - .add_to_memory_pool(test_source(), tx, false) - .unwrap(); - } - - assert_eq!(write_pool.total_size(), expected_pool_size); - } - // Now we prepare the block that will cause the above condition. - // First, the transactions we want in the block: - // - Copy of 1 - let block_tx_1 = test_transaction(vec![10], vec![8]); - // - Conflict w/ 2, satisfies 7 - let block_tx_2 = test_transaction(vec![20], vec![6]); - // - Copy of 4 - let block_tx_3 = test_transaction(vec![8], vec![5, 1]); - // - Output conflict w/ 8 - let block_tx_4 = test_transaction(vec![40], vec![9, 1]); - let block_transactions = vec![&block_tx_1, &block_tx_2, &block_tx_3, &block_tx_4]; - - let keychain = Keychain::from_random_seed().unwrap(); - let key_id = keychain.derive_key_id(1).unwrap(); - - let block = block::Block::new( - &block::BlockHeader::default(), - block_transactions, - &keychain, - &key_id, - Difficulty::one(), - ).unwrap(); - - chain_ref.apply_block(&block); - - // Block reconciliation - { - let mut write_pool = pool.write().unwrap(); - - let evicted_transactions = write_pool.reconcile_block(&block); - - assert!(evicted_transactions.is_ok()); - - assert_eq!(evicted_transactions.unwrap().len(), 6); - - // TODO: Txids are not yet deterministic. When they are, we should - // check the specific transactions that were evicted. - } - - // Using the pool's methods to validate a few end conditions. - { - let read_pool = pool.read().unwrap(); - - assert_eq!(read_pool.total_size(), 4); - - // We should have available blockchain outputs - expect_output_parent!(read_pool, Parent::BlockTransaction, 9, 1); - - // We should have spent blockchain outputs - expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 5, 6); - - // We should have spent pool references - expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 15); - - // We should have unspent pool references - expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 3, 11, 13); - - // References internal to the block should be unknown - expect_output_parent!(read_pool, Parent::Unknown, 8); - - // Evicted transactions should have unknown outputs - expect_output_parent!(read_pool, Parent::Unknown, 2, 7); - } - } - - #[test] - /// Test transaction selection and block building. - fn test_block_building() { - // Add a handful of transactions - let mut dummy_chain = DummyChainImpl::new(); - let head_header = block::BlockHeader { - height: 1, - ..block::BlockHeader::default() - }; - dummy_chain.store_head_header(&head_header); - - let new_output = DummyOutputSet::empty() - .with_output(test_output(10)) - .with_output(test_output(20)) - .with_output(test_output(30)) - .with_output(test_output(40)); - - dummy_chain.update_output_set(new_output); - - let chain_ref = Arc::new(dummy_chain); - - let pool = RwLock::new(test_setup(&chain_ref)); - - let root_tx_1 = test_transaction(vec![10, 20], vec![24]); - let root_tx_2 = test_transaction(vec![30], vec![28]); - let root_tx_3 = test_transaction(vec![40], vec![38]); - - let child_tx_1 = test_transaction(vec![24], vec![22]); - let child_tx_2 = test_transaction(vec![38], vec![32]); - - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); - - assert!( - write_pool - .add_to_memory_pool(test_source(), root_tx_1, false) - .is_ok() - ); - assert!( - write_pool - .add_to_memory_pool(test_source(), root_tx_2, false) - .is_ok() - ); - assert!( - write_pool - .add_to_memory_pool(test_source(), root_tx_3, false) - .is_ok() - ); - assert!( - write_pool - .add_to_memory_pool(test_source(), child_tx_1, false) - .is_ok() - ); - assert!( - write_pool - .add_to_memory_pool(test_source(), child_tx_2, false) - .is_ok() - ); - - assert_eq!(write_pool.total_size(), 5); - } - - // Request blocks - let block: block::Block; - let mut txs: Vec>; - { - let read_pool = pool.read().unwrap(); - txs = read_pool.prepare_mineable_transactions(3); - assert_eq!(txs.len(), 3); - // TODO: This is ugly, either make block::new take owned - // txs instead of mut refs, or change - // prepare_mineable_transactions to return mut refs - let block_txs: Vec = txs.drain(..).map(|x| *x).collect(); - let tx_refs = block_txs.iter().collect(); - - let keychain = Keychain::from_random_seed().unwrap(); - let key_id = keychain.derive_key_id(1).unwrap(); - block = block::Block::new( - &block::BlockHeader::default(), - tx_refs, - &keychain, - &key_id, - Difficulty::one(), - ).unwrap(); - } - - chain_ref.apply_block(&block); - // Reconcile block - { - let mut write_pool = pool.write().unwrap(); - - let evicted_transactions = write_pool.reconcile_block(&block); - - assert!(evicted_transactions.is_ok()); - - assert_eq!(evicted_transactions.unwrap().len(), 3); - assert_eq!(write_pool.total_size(), 2); - } - } - - fn test_setup(dummy_chain: &Arc) -> TransactionPool { - TransactionPool { - config: PoolConfig { - accept_fee_base: 0, - max_pool_size: 10_000, - dandelion_probability: 90, - dandelion_embargo: 30, - }, - time_stem_transactions: HashMap::new(), - stem_transactions: HashMap::new(), - transactions: HashMap::new(), - stempool: Pool::empty(), - pool: Pool::empty(), - orphans: Orphans::empty(), - blockchain: dummy_chain.clone(), - adapter: Arc::new(NoopAdapter {}), - } - } - - /// Cobble together a test transaction for testing the transaction pool. - /// - /// Connectivity here is the most important element. - /// Every output is given a blinding key equal to its value, so that the - /// entire commitment can be derived deterministically from just the value. - /// - /// Fees are the remainder between input and output values, - /// so the numbers should make sense. - fn test_transaction( - input_values: Vec, - output_values: Vec, - ) -> transaction::Transaction { - let keychain = keychain_for_tests(); - - let input_sum = input_values.iter().sum::() as i64; - let output_sum = output_values.iter().sum::() as i64; - - let fees: i64 = input_sum - output_sum; - assert!(fees >= 0); - - let mut tx_elements = Vec::new(); - - for input_value in input_values { - let key_id = keychain.derive_key_id(input_value as u32).unwrap(); - tx_elements.push(build::input(input_value, key_id)); - } - - for output_value in output_values { - let key_id = keychain.derive_key_id(output_value as u32).unwrap(); - tx_elements.push(build::output(output_value, key_id)); - } - tx_elements.push(build::with_fee(fees as u64)); - - build::transaction(tx_elements, &keychain).unwrap() - } - - fn test_transaction_with_offset( - input_values: Vec, - output_values: Vec, - ) -> transaction::Transaction { - let keychain = keychain_for_tests(); - - let input_sum = input_values.iter().sum::() as i64; - let output_sum = output_values.iter().sum::() as i64; - - let fees: i64 = input_sum - output_sum; - assert!(fees >= 0); - - let mut tx_elements = Vec::new(); - - for input_value in input_values { - let key_id = keychain.derive_key_id(input_value as u32).unwrap(); - tx_elements.push(build::input(input_value, key_id)); - } - - for output_value in output_values { - let key_id = keychain.derive_key_id(output_value as u32).unwrap(); - tx_elements.push(build::output(output_value, key_id)); - } - tx_elements.push(build::with_fee(fees as u64)); - - build::transaction_with_offset(tx_elements, &keychain).unwrap() - } - - fn test_transaction_with_coinbase_input( - input_value: u64, - input_block_hash: Hash, - output_values: Vec, - ) -> transaction::Transaction { - let keychain = keychain_for_tests(); - - let output_sum = output_values.iter().sum::() as i64; - - let fees: i64 = input_value as i64 - output_sum; - assert!(fees >= 0); - - let mut tx_elements = Vec::new(); - - let merkle_proof = MerkleProof { - node: Hash::default(), - root: Hash::default(), - peaks: vec![Hash::default()], - ..MerkleProof::default() - }; - - let key_id = keychain.derive_key_id(input_value as u32).unwrap(); - tx_elements.push(build::coinbase_input( - input_value, - input_block_hash, - merkle_proof, - key_id, - )); - - for output_value in output_values { - let key_id = keychain.derive_key_id(output_value as u32).unwrap(); - tx_elements.push(build::output(output_value, key_id)); - } - tx_elements.push(build::with_fee(fees as u64)); - - build::transaction(tx_elements, &keychain).unwrap() - } - - /// Very un-dry way of building a vanilla tx and adding a lock_height to it. - /// TODO - rethink this. - fn timelocked_transaction( - input_values: Vec, - output_values: Vec, - lock_height: u64, - ) -> transaction::Transaction { - let keychain = keychain_for_tests(); - - let fees: i64 = - input_values.iter().sum::() as i64 - output_values.iter().sum::() as i64; - assert!(fees >= 0); - - let mut tx_elements = Vec::new(); - - for input_value in input_values { - let key_id = keychain.derive_key_id(input_value as u32).unwrap(); - tx_elements.push(build::input(input_value, key_id)); - } - - for output_value in output_values { - let key_id = keychain.derive_key_id(output_value as u32).unwrap(); - tx_elements.push(build::output(output_value, key_id)); - } - tx_elements.push(build::with_fee(fees as u64)); - - tx_elements.push(build::with_lock_height(lock_height)); - build::transaction(tx_elements, &keychain).unwrap() - } - - /// Deterministically generate an output defined by our test scheme - fn test_output(value: u64) -> transaction::Output { - let keychain = keychain_for_tests(); - let key_id = keychain.derive_key_id(value as u32).unwrap(); - let msg = ProofMessageElements::new(value, &key_id); - let commit = keychain.commit(value, &key_id).unwrap(); - let proof = keychain - .range_proof(value, &key_id, commit, None, msg.to_proof_message()) - .unwrap(); - transaction::Output { - features: transaction::OutputFeatures::DEFAULT_OUTPUT, - commit: commit, - proof: proof, - } - } - - /// Deterministically generate a coinbase output defined by our test scheme - fn test_coinbase_output(value: u64) -> transaction::Output { - let keychain = keychain_for_tests(); - let key_id = keychain.derive_key_id(value as u32).unwrap(); - let msg = ProofMessageElements::new(value, &key_id); - let commit = keychain.commit(value, &key_id).unwrap(); - let proof = keychain - .range_proof(value, &key_id, commit, None, msg.to_proof_message()) - .unwrap(); - transaction::Output { - features: transaction::OutputFeatures::COINBASE_OUTPUT, - commit: commit, - proof: proof, - } - } - - fn keychain_for_tests() -> Keychain { - let seed = "pool_tests"; - let seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes()); - Keychain::from_seed(seed.as_bytes()).unwrap() - } - - /// A generic TxSource representing a test - fn test_source() -> TxSource { - TxSource { - debug_name: "test".to_string(), - identifier: "127.0.0.1".to_string(), - } - } -} diff --git a/pool/src/types.rs b/pool/src/types.rs index a95b29db8..ad76460d3 100644 --- a/pool/src/types.rs +++ b/pool/src/types.rs @@ -92,11 +92,25 @@ pub struct TxSource { /// This enum describes the parent for a given input of a transaction. #[derive(Clone)] pub enum Parent { + /// Unknown Unknown, + /// Block Transaction BlockTransaction, - PoolTransaction { tx_ref: hash::Hash }, - StemPoolTransaction { tx_ref: hash::Hash }, - AlreadySpent { other_tx: hash::Hash }, + /// Pool Transaction + PoolTransaction { + /// Transaction reference + tx_ref: hash::Hash, + }, + /// StemPool Transaction + StemPoolTransaction { + /// Transaction reference + tx_ref: hash::Hash, + }, + /// AlreadySpent + AlreadySpent { + /// Other transaction reference + other_tx: hash::Hash, + }, } impl fmt::Debug for Parent { @@ -244,6 +258,7 @@ pub struct Pool { } impl Pool { + /// Return an empty pool pub fn empty() -> Pool { Pool { graph: graph::DirectedGraph::empty(), @@ -263,18 +278,22 @@ impl Pool { .map(|x| x.destination_hash().unwrap()) } + /// Length of roots pub fn len_roots(&self) -> usize { self.graph.len_roots() } + /// Length of vertices pub fn len_vertices(&self) -> usize { self.graph.len_vertices() } + /// Consumed outputs pub fn get_blockchain_spent(&self, c: &Commitment) -> Option<&graph::Edge> { self.consumed_blockchain_outputs.get(c) } + /// Add transaction pub fn add_pool_transaction( &mut self, pool_entry: graph::PoolEntry, @@ -309,9 +328,9 @@ impl Pool { } } - // More relax way for stempool transaction in order to accept scenario such as: - // Parent is in mempool, child is allowed in stempool - // + /// More relax way for stempool transaction in order to accept scenario such as: + /// Parent is in mempool, child is allowed in stempool + /// pub fn add_stempool_transaction( &mut self, pool_entry: graph::PoolEntry, @@ -342,10 +361,12 @@ impl Pool { } } + /// Update roots pub fn update_roots(&mut self) { self.graph.update_roots() } + /// Remove transaction pub fn remove_pool_transaction( &mut self, tx: &transaction::Transaction, @@ -429,6 +450,7 @@ pub struct Orphans { } impl Orphans { + /// empty set pub fn empty() -> Orphans { Orphans { graph: graph::DirectedGraph::empty(), @@ -450,6 +472,7 @@ impl Orphans { .map(|x| x.destination_hash().unwrap()) } + /// unknown output pub fn get_unknown_output(&self, output: &Commitment) -> Option<&graph::Edge> { self.missing_outputs.get(output) } @@ -571,14 +594,17 @@ pub trait TransactionGraphContainer { self.get_internal_spent_output(c) } + /// number of root transactions fn num_root_transactions(&self) -> usize { self.get_graph().len_roots() } + /// number of transactions fn num_transactions(&self) -> usize { self.get_graph().len_vertices() } + /// number of output edges fn num_output_edges(&self) -> usize { self.get_graph().len_edges() } diff --git a/pool/tests/graph.rs b/pool/tests/graph.rs new file mode 100644 index 000000000..0dcf878aa --- /dev/null +++ b/pool/tests/graph.rs @@ -0,0 +1,96 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Top-level Graph tests + +extern crate grin_core as core; +extern crate grin_keychain as keychain; +extern crate grin_pool as pool; +extern crate grin_wallet as wallet; + +extern crate rand; + +use keychain::Keychain; +use core::core::OutputFeatures; +use core::core::transaction::ProofMessageElements; +use wallet::libwallet::proof; + +#[test] +fn test_add_entry() { + let keychain = Keychain::from_random_seed().unwrap(); + let key_id1 = keychain.derive_key_id(1).unwrap(); + let key_id2 = keychain.derive_key_id(2).unwrap(); + let key_id3 = keychain.derive_key_id(3).unwrap(); + + let output_commit = keychain.commit(70, &key_id1).unwrap(); + + let inputs = vec![ + core::core::transaction::Input::new( + OutputFeatures::DEFAULT_OUTPUT, + keychain.commit(50, &key_id2).unwrap(), + None, + None, + ), + core::core::transaction::Input::new( + OutputFeatures::DEFAULT_OUTPUT, + keychain.commit(25, &key_id3).unwrap(), + None, + None, + ), + ]; + + let msg = ProofMessageElements::new(100, &key_id1); + + let output = core::core::transaction::Output { + features: OutputFeatures::DEFAULT_OUTPUT, + commit: output_commit, + proof: proof::create( + &keychain, + 100, + &key_id1, + output_commit, + None, + msg.to_proof_message(), + ).unwrap(), + }; + + let kernel = core::core::transaction::TxKernel::empty() + .with_fee(5) + .with_lock_height(0); + + let test_transaction = + core::core::transaction::Transaction::new(inputs, vec![output], vec![kernel]); + + let test_pool_entry = pool::graph::PoolEntry::new(&test_transaction); + + let incoming_edge_1 = pool::graph::Edge::new( + Some(random_hash()), + Some(core::core::hash::ZERO_HASH), + core::core::OutputIdentifier::from_output(&output), + ); + + let mut test_graph = pool::graph::DirectedGraph::empty(); + + test_graph.add_entry(test_pool_entry, vec![incoming_edge_1]); + + assert_eq!(test_graph.vertices.len(), 1); + assert_eq!(test_graph.roots.len(), 0); + assert_eq!(test_graph.edges.len(), 1); +} + +/// For testing/debugging: a random tx hash +fn random_hash() -> core::core::hash::Hash { + let hash_bytes: [u8; 32] = rand::random(); + core::core::hash::Hash(hash_bytes) +} diff --git a/pool/tests/pool.rs b/pool/tests/pool.rs new file mode 100644 index 000000000..f8de28d9a --- /dev/null +++ b/pool/tests/pool.rs @@ -0,0 +1,1150 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Top-level Pool tests + +extern crate blake2_rfc as blake2; +extern crate grin_core as core; +extern crate grin_keychain as keychain; +extern crate grin_pool as pool; +extern crate grin_util as util; +extern crate grin_wallet as wallet; + +extern crate rand; +extern crate time; + +use std::collections::HashMap; + +use core::core::transaction::{self, ProofMessageElements}; +use core::core::{OutputIdentifier, Transaction}; +use core::core::block; + +use pool::*; +use core::global; +use blockchain::{DummyChain, DummyChainImpl, DummyOutputSet}; +use std::sync::{Arc, RwLock}; +use core::global::ChainTypes; +use core::core::Proof; +use core::core::hash::{Hash, Hashed}; +use core::core::pmmr::MerkleProof; +use core::core::target::Difficulty; +use types::PoolError::InvalidTx; + +use keychain::Keychain; +use wallet::libwallet::{build, proof, reward}; + +use pool::types::*; + +macro_rules! expect_output_parent { + ($pool:expr, $expected:pat, $( $output:expr ),+ ) => { + $( + match $pool + .search_for_best_output( + &OutputIdentifier::from_output(&test_output($output)) + ) { + $expected => {}, + x => panic!( + "Unexpected result from output search for {:?}, got {:?}", + $output, + x, + ), + }; + )* + } +} + +#[test] +/// A basic test; add a pair of transactions to the pool. +fn test_basic_pool_add() { + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); + // We want this transaction to be rooted in the blockchain. + let new_output = DummyOutputSet::empty() + .with_output(test_output(5)) + .with_output(test_output(6)) + .with_output(test_output(7)) + .with_output(test_output(8)); + + // Prepare a second transaction, connected to the first. + let child_transaction = test_transaction(vec![11, 3], vec![12]); + + dummy_chain.update_output_set(new_output); + + // To mirror how this construction is intended to be used, the pool + // is placed inside a RwLock. + let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); + + // Take the write lock and add a pool entry + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + // First, add the transaction rooted in the blockchain + let result = write_pool.add_to_memory_pool(test_source(), parent_transaction, false); + if result.is_err() { + panic!("got an error adding parent tx: {:?}", result.err().unwrap()); + } + + // Now, add the transaction connected as a child to the first + let child_result = write_pool.add_to_memory_pool(test_source(), child_transaction, false); + + if child_result.is_err() { + panic!( + "got an error adding child tx: {:?}", + child_result.err().unwrap() + ); + } + } + + // Now take the read lock and use a few exposed methods to check consistency + { + let read_pool = pool.read().unwrap(); + assert_eq!(read_pool.total_size(), 2); + expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 12); + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 11, 5); + expect_output_parent!(read_pool, Parent::BlockTransaction, 8); + expect_output_parent!(read_pool, Parent::Unknown, 20); + } +} + +#[test] +/// Attempt to add a multi kernel transaction to the mempool +fn test_multikernel_pool_add() { + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); + // We want this transaction to be rooted in the blockchain. + let new_output = DummyOutputSet::empty() + .with_output(test_output(5)) + .with_output(test_output(6)) + .with_output(test_output(7)) + .with_output(test_output(8)); + + // Prepare a second transaction, connected to the first. + let child_transaction = test_transaction(vec![11, 3], vec![12]); + + let txs = vec![parent_transaction, child_transaction]; + let multi_kernel_transaction = transaction::aggregate_with_cut_through(txs).unwrap(); + + dummy_chain.update_output_set(new_output); + + // To mirror how this construction is intended to be used, the pool + // is placed inside a RwLock. + let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); + + // Take the write lock and add a pool entry + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + // First, add the transaction rooted in the blockchain + let result = write_pool.add_to_memory_pool(test_source(), multi_kernel_transaction, false); + if result.is_err() { + panic!( + "got an error adding multi-kernel tx: {:?}", + result.err().unwrap() + ); + } + } + + // Now take the read lock and use a few exposed methods to check consistency + { + let read_pool = pool.read().unwrap(); + assert_eq!(read_pool.total_size(), 1); + expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 12); + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 5); + expect_output_parent!(read_pool, Parent::BlockTransaction, 8); + expect_output_parent!(read_pool, Parent::Unknown, 11, 3, 20); + } +} + +#[test] +/// Attempt to deaggregate a multi_kernel transaction +/// Push the parent transaction in the mempool then send a multikernel tx containing it and a +/// child transaction In the end, the pool should contain both transactions. +fn test_multikernel_deaggregate() { + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + let transaction1 = test_transaction_with_offset(vec![5], vec![1]); + println!("{:?}", transaction1.validate()); + let transaction2 = test_transaction_with_offset(vec![8], vec![2]); + + // We want these transactions to be rooted in the blockchain. + let new_output = DummyOutputSet::empty() + .with_output(test_output(5)) + .with_output(test_output(8)); + + dummy_chain.update_output_set(new_output); + + // To mirror how this construction is intended to be used, the pool + // is placed inside a RwLock. + let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); + + // Take the write lock and add a pool entry + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + // First, add the first transaction + let result = write_pool.add_to_memory_pool(test_source(), transaction1.clone(), false); + if result.is_err() { + panic!("got an error adding tx 1: {:?}", result.err().unwrap()); + } + } + + let txs = vec![transaction1.clone(), transaction2.clone()]; + let multi_kernel_transaction = transaction::aggregate(txs).unwrap(); + + let found_tx: Transaction; + // Now take the read lock and attempt to deaggregate the transaction + { + let read_pool = pool.read().unwrap(); + found_tx = read_pool + .deaggregate_transaction(multi_kernel_transaction) + .unwrap(); + + // Test the retrived transactions + assert_eq!(transaction2, found_tx); + } + + // Take the write lock and add a pool entry + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 1); + + // First, add the transaction rooted in the blockchain + let result = write_pool.add_to_memory_pool(test_source(), found_tx.clone(), false); + if result.is_err() { + panic!("got an error adding child tx: {:?}", result.err().unwrap()); + } + } + + // Now take the read lock and use a few exposed methods to check consistency + { + let read_pool = pool.read().unwrap(); + assert_eq!(read_pool.total_size(), 2); + expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 1, 2); + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 5, 8); + expect_output_parent!(read_pool, Parent::Unknown, 11, 3, 20); + } +} + +#[test] +/// Attempt to add a bad multi kernel transaction to the mempool should get +/// rejected +fn test_bad_multikernel_pool_add() { + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); + // We want this transaction to be rooted in the blockchain. + let new_output = DummyOutputSet::empty() + .with_output(test_output(5)) + .with_output(test_output(6)) + .with_output(test_output(7)) + .with_output(test_output(8)); + + // Prepare a second transaction, connected to the first. + let child_transaction1 = test_transaction(vec![11, 3], vec![12]); + let child_transaction2 = test_transaction(vec![11, 3], vec![10]); + + let txs = vec![parent_transaction, child_transaction1, child_transaction2]; + let bad_multi_kernel_transaction = transaction::aggregate(txs).unwrap(); + + dummy_chain.update_output_set(new_output); + + // To mirror how this construction is intended to be used, the pool + // is placed inside a RwLock. + let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); + + // Take the write lock and add a pool entry + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + // First, add the transaction rooted in the blockchain + let result = + write_pool.add_to_memory_pool(test_source(), bad_multi_kernel_transaction, false); + assert!(result.is_err()); + } +} + +#[test] +/// A basic test; add a transaction to the pool and add the child to the +/// stempool +fn test_pool_stempool_add() { + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); + // We want this transaction to be rooted in the blockchain. + let new_output = DummyOutputSet::empty() + .with_output(test_output(5)) + .with_output(test_output(6)) + .with_output(test_output(7)) + .with_output(test_output(8)); + + // Prepare a second transaction, connected to the first. + let child_transaction = test_transaction(vec![11, 3], vec![12]); + + dummy_chain.update_output_set(new_output); + + // To mirror how this construction is intended to be used, the pool + // is placed inside a RwLock. + let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); + + // Take the write lock and add a pool entry + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + // First, add the transaction rooted in the blockchain + let result = write_pool.add_to_memory_pool(test_source(), parent_transaction, false); + if result.is_err() { + panic!("got an error adding parent tx: {:?}", result.err().unwrap()); + } + + // Now, add the transaction connected as a child to the first + let child_result = write_pool.add_to_memory_pool(test_source(), child_transaction, true); + + if child_result.is_err() { + panic!( + "got an error adding child tx: {:?}", + child_result.err().unwrap() + ); + } + } + + // Now take the read lock and use a few exposed methods to check consistency + { + let read_pool = pool.read().unwrap(); + assert_eq!(read_pool.total_size(), 2); + if read_pool.stempool.num_transactions() == 0 { + expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 12); + } else { + expect_output_parent!(read_pool, Parent::StemPoolTransaction{tx_ref: _}, 12); + } + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 11, 5); + expect_output_parent!(read_pool, Parent::BlockTransaction, 8); + expect_output_parent!(read_pool, Parent::Unknown, 20); + } +} + +#[test] +/// A basic test; add a transaction to the stempool and one the regular transaction pool +/// Child transaction should be added to the stempool. +fn test_stempool_pool_add() { + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 3]); + // We want this transaction to be rooted in the blockchain. + let new_output = DummyOutputSet::empty() + .with_output(test_output(5)) + .with_output(test_output(6)) + .with_output(test_output(7)) + .with_output(test_output(8)); + + // Prepare a second transaction, connected to the first. + let child_transaction = test_transaction(vec![11, 3], vec![12]); + + dummy_chain.update_output_set(new_output); + + // To mirror how this construction is intended to be used, the pool + // is placed inside a RwLock. + let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); + + // Take the write lock and add a pool entry + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + // First, add the transaction rooted in the blockchain + let result = write_pool.add_to_memory_pool(test_source(), parent_transaction, true); + if result.is_err() { + panic!("got an error adding parent tx: {:?}", result.err().unwrap()); + } + + // Now, add the transaction connected as a child to the first + let child_result = write_pool.add_to_memory_pool(test_source(), child_transaction, false); + if child_result.is_err() { + panic!( + "got an error adding child tx: {:?}", + child_result.err().unwrap() + ); + } + } + + // Now take the read lock and use a few exposed methods to check consistency + { + let read_pool = pool.read().unwrap(); + // First transaction is a stem transaction. In that case the child transaction + // should be force stem + assert_eq!(read_pool.total_size(), 2); + // Parent has been directly fluffed + if read_pool.stempool.num_transactions() == 0 { + expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 12); + } else { + expect_output_parent!(read_pool, Parent::StemPoolTransaction{tx_ref: _}, 12); + } + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 11, 5); + expect_output_parent!(read_pool, Parent::BlockTransaction, 8); + expect_output_parent!(read_pool, Parent::Unknown, 20); + } +} + +#[test] +/// Testing various expected error conditions +pub fn test_pool_add_error() { + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + let new_output = DummyOutputSet::empty() + .with_output(test_output(5)) + .with_output(test_output(6)) + .with_output(test_output(7)); + + dummy_chain.update_output_set(new_output); + + let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + // First expected failure: duplicate output + let duplicate_tx = test_transaction(vec![5, 6], vec![7]); + + match write_pool.add_to_memory_pool(test_source(), duplicate_tx, false) { + Ok(_) => panic!("Got OK from add_to_memory_pool when dup was expected"), + Err(x) => { + match x { + PoolError::DuplicateOutput { + other_tx, + in_chain, + output, + } => if other_tx.is_some() || !in_chain || output != test_output(7).commitment() + { + panic!("Unexpected parameter in DuplicateOutput: {:?}", x); + }, + _ => panic!( + "Unexpected error when adding duplicate output transaction: {:?}", + x + ), + }; + } + }; + + // To test DoubleSpend and AlreadyInPool conditions, we need to add + // a valid transaction. + let valid_transaction = test_transaction(vec![5, 6], vec![9]); + + match write_pool.add_to_memory_pool(test_source(), valid_transaction.clone(), false) { + Ok(_) => {} + Err(x) => panic!("Unexpected error while adding a valid transaction: {:?}", x), + }; + + // Now, test a DoubleSpend by consuming the same blockchain unspent + // as valid_transaction: + let double_spend_transaction = test_transaction(vec![6], vec![2]); + + match write_pool.add_to_memory_pool(test_source(), double_spend_transaction, false) { + Ok(_) => panic!("Expected error when adding double spend, got Ok"), + Err(x) => { + match x { + PoolError::DoubleSpend { + other_tx: _, + spent_output, + } => if spent_output != test_output(6).commitment() { + panic!("Unexpected parameter in DoubleSpend: {:?}", x); + }, + _ => panic!( + "Unexpected error when adding double spend transaction: {:?}", + x + ), + }; + } + }; + + // Note, this used to work as expected, but after aggsig implementation + // creating another transaction with the same inputs/outputs doesn't create + // the same hash ID due to the random nonces in an aggsig. This + // will instead throw a (correct as well) Already spent error. An AlreadyInPool + // error can only come up in the case of the exact same transaction being + // added + //let already_in_pool = test_transaction(vec![5, 6], vec![9]); + + match write_pool.add_to_memory_pool(test_source(), valid_transaction, false) { + Ok(_) => panic!("Expected error when adding already in pool, got Ok"), + Err(x) => { + match x { + PoolError::AlreadyInPool => {} + _ => panic!("Unexpected error when adding already in pool tx: {:?}", x), + }; + } + }; + + assert_eq!(write_pool.total_size(), 1); + + // now attempt to add a timelocked tx to the pool + // should fail as invalid based on current height + let timelocked_tx_1 = timelocked_transaction(vec![9], vec![5], 10); + match write_pool.add_to_memory_pool(test_source(), timelocked_tx_1, false) { + Err(PoolError::ImmatureTransaction { + lock_height: height, + }) => { + assert_eq!(height, 10); + } + Err(e) => panic!("expected ImmatureTransaction error here - {:?}", e), + Ok(_) => panic!("expected ImmatureTransaction error here"), + }; + } +} + +#[test] +fn test_immature_coinbase() { + global::set_mining_mode(ChainTypes::AutomatedTesting); + let mut dummy_chain = DummyChainImpl::new(); + let proof_size = global::proofsize(); + + let lock_height = 1 + global::coinbase_maturity(); + assert_eq!(lock_height, 4); + + let coinbase_output = test_coinbase_output(15); + dummy_chain.update_output_set(DummyOutputSet::empty().with_output(coinbase_output)); + + let chain_ref = Arc::new(dummy_chain); + let pool = RwLock::new(test_setup(&chain_ref)); + + { + let mut write_pool = pool.write().unwrap(); + + let coinbase_header = block::BlockHeader { + height: 1, + pow: Proof::random(proof_size), + ..block::BlockHeader::default() + }; + chain_ref.store_head_header(&coinbase_header); + + let head_header = block::BlockHeader { + height: 2, + pow: Proof::random(proof_size), + ..block::BlockHeader::default() + }; + chain_ref.store_head_header(&head_header); + + let txn = test_transaction_with_coinbase_input(15, coinbase_header.hash(), vec![10, 3]); + let result = write_pool.add_to_memory_pool(test_source(), txn, false); + match result { + Err(InvalidTx(transaction::Error::ImmatureCoinbase)) => {} + _ => panic!("expected ImmatureCoinbase error here"), + }; + + let head_header = block::BlockHeader { + height: 4, + ..block::BlockHeader::default() + }; + chain_ref.store_head_header(&head_header); + + let txn = test_transaction_with_coinbase_input(15, coinbase_header.hash(), vec![10, 3]); + let result = write_pool.add_to_memory_pool(test_source(), txn, false); + match result { + Ok(_) => {} + Err(_) => panic!("this should not return an error here"), + }; + } +} + +#[test] +/// Testing an expected orphan +fn test_add_orphan() { + // TODO we need a test here +} + +#[test] +fn test_zero_confirmation_reconciliation() { + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + // single Output + let new_output = DummyOutputSet::empty().with_output(test_output(100)); + + dummy_chain.update_output_set(new_output); + let chain_ref = Arc::new(dummy_chain); + let pool = RwLock::new(test_setup(&chain_ref)); + + // now create two txs + // tx1 spends the Output + // tx2 spends output from tx1 + let tx1 = test_transaction(vec![100], vec![90]); + let tx2 = test_transaction(vec![90], vec![80]); + + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + // now add both txs to the pool (tx2 spends tx1 with zero confirmations) + // both should be accepted if tx1 added before tx2 + write_pool + .add_to_memory_pool(test_source(), tx1, false) + .unwrap(); + write_pool + .add_to_memory_pool(test_source(), tx2, false) + .unwrap(); + + assert_eq!(write_pool.pool_size(), 2); + } + + let txs: Vec; + { + let read_pool = pool.read().unwrap(); + let mut mineable_txs = read_pool.prepare_mineable_transactions(3); + txs = mineable_txs.drain(..).map(|x| *x).collect(); + + // confirm we can preparing both txs for mining here + // one root tx in the pool, and one non-root vertex in the pool + assert_eq!(txs.len(), 2); + } + + let keychain = Keychain::from_random_seed().unwrap(); + let key_id = keychain.derive_key_id(1).unwrap(); + + let fees = txs.iter().map(|tx| tx.fee()).sum(); + let reward = reward::output(&keychain, &key_id, fees, 0).unwrap(); + + // now "mine" the block passing in the mineable txs from earlier + let block = block::Block::new( + &block::BlockHeader::default(), + txs.iter().collect(), + Difficulty::one(), + reward, + ).unwrap(); + + // now apply the block to ensure the chainstate is updated before we reconcile + chain_ref.apply_block(&block); + + // now reconcile the block + // we should evict both txs here + { + let mut write_pool = pool.write().unwrap(); + let evicted_transactions = write_pool.reconcile_block(&block).unwrap(); + assert_eq!(evicted_transactions.len(), 2); + } + + // check the pool is consistent after reconciling the block + // we should have zero txs in the pool (neither roots nor non-roots) + { + let read_pool = pool.write().unwrap(); + assert_eq!(read_pool.pool.len_vertices(), 0); + assert_eq!(read_pool.pool.len_roots(), 0); + } +} + +#[test] +/// Testing block reconciliation +fn test_block_reconciliation() { + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + let new_output = DummyOutputSet::empty() + .with_output(test_output(10)) + .with_output(test_output(20)) + .with_output(test_output(30)) + .with_output(test_output(40)); + + dummy_chain.update_output_set(new_output); + + let chain_ref = Arc::new(dummy_chain); + + let pool = RwLock::new(test_setup(&chain_ref)); + + // Preparation: We will introduce a three root pool transactions. + // 1. A transaction that should be invalidated because it is exactly + // contained in the block. + // 2. A transaction that should be invalidated because the input is + // consumed in the block, although it is not exactly consumed. + // 3. A transaction that should remain after block reconciliation. + let block_transaction = test_transaction(vec![10], vec![8]); + let conflict_transaction = test_transaction(vec![20], vec![12, 6]); + let valid_transaction = test_transaction(vec![30], vec![13, 15]); + + // We will also introduce a few children: + // 4. A transaction that descends from transaction 1, that is in + // turn exactly contained in the block. + let block_child = test_transaction(vec![8], vec![5, 1]); + // 5. A transaction that descends from transaction 4, that is not + // contained in the block at all and should be valid after + // reconciliation. + let pool_child = test_transaction(vec![5], vec![3]); + // 6. A transaction that descends from transaction 2 that does not + // conflict with anything in the block in any way, but should be + // invalidated (orphaned). + let conflict_child = test_transaction(vec![12], vec![2]); + // 7. A transaction that descends from transaction 2 that should be + // valid due to its inputs being satisfied by the block. + let conflict_valid_child = test_transaction(vec![6], vec![4]); + // 8. A transaction that descends from transaction 3 that should be + // invalidated due to an output conflict. + let valid_child_conflict = test_transaction(vec![13], vec![9]); + // 9. A transaction that descends from transaction 3 that should remain + // valid after reconciliation. + let valid_child_valid = test_transaction(vec![15], vec![11]); + // 10. A transaction that descends from both transaction 6 and + // transaction 9 + let mixed_child = test_transaction(vec![2, 11], vec![7]); + + // Add transactions. + // Note: There are some ordering constraints that must be followed here + // until orphans is 100% implemented. Once the orphans process has + // stabilized, we can mix these up to exercise that path a bit. + let mut txs_to_add = vec![ + block_transaction, + conflict_transaction, + valid_transaction, + block_child, + pool_child, + conflict_child, + conflict_valid_child, + valid_child_conflict, + valid_child_valid, + mixed_child, + ]; + + let expected_pool_size = txs_to_add.len(); + + // First we add the above transactions to the pool; all should be + // accepted. + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + for tx in txs_to_add.drain(..) { + write_pool + .add_to_memory_pool(test_source(), tx, false) + .unwrap(); + } + + assert_eq!(write_pool.total_size(), expected_pool_size); + } + // Now we prepare the block that will cause the above condition. + // First, the transactions we want in the block: + // - Copy of 1 + let block_tx_1 = test_transaction(vec![10], vec![8]); + // - Conflict w/ 2, satisfies 7 + let block_tx_2 = test_transaction(vec![20], vec![6]); + // - Copy of 4 + let block_tx_3 = test_transaction(vec![8], vec![5, 1]); + // - Output conflict w/ 8 + let block_tx_4 = test_transaction(vec![40], vec![9, 1]); + let block_transactions = vec![&block_tx_1, &block_tx_2, &block_tx_3, &block_tx_4]; + + let keychain = Keychain::from_random_seed().unwrap(); + let key_id = keychain.derive_key_id(1).unwrap(); + + let fees = block_transactions.iter().map(|tx| tx.fee()).sum(); + let reward = reward::output(&keychain, &key_id, fees, 0).unwrap(); + + let block = block::Block::new( + &block::BlockHeader::default(), + block_transactions, + Difficulty::one(), + reward, + ).unwrap(); + + chain_ref.apply_block(&block); + + // Block reconciliation + { + let mut write_pool = pool.write().unwrap(); + + let evicted_transactions = write_pool.reconcile_block(&block); + + assert!(evicted_transactions.is_ok()); + + assert_eq!(evicted_transactions.unwrap().len(), 6); + + // TODO: Txids are not yet deterministic. When they are, we should + // check the specific transactions that were evicted. + } + + // Using the pool's methods to validate a few end conditions. + { + let read_pool = pool.read().unwrap(); + + assert_eq!(read_pool.total_size(), 4); + + // We should have available blockchain outputs + expect_output_parent!(read_pool, Parent::BlockTransaction, 9, 1); + + // We should have spent blockchain outputs + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 5, 6); + + // We should have spent pool references + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 15); + + // We should have unspent pool references + expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 3, 11, 13); + + // References internal to the block should be unknown + expect_output_parent!(read_pool, Parent::Unknown, 8); + + // Evicted transactions should have unknown outputs + expect_output_parent!(read_pool, Parent::Unknown, 2, 7); + } +} + +#[test] +/// Test transaction selection and block building. +fn test_block_building() { + // Add a handful of transactions + let mut dummy_chain = DummyChainImpl::new(); + let head_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + dummy_chain.store_head_header(&head_header); + + let new_output = DummyOutputSet::empty() + .with_output(test_output(10)) + .with_output(test_output(20)) + .with_output(test_output(30)) + .with_output(test_output(40)); + + dummy_chain.update_output_set(new_output); + + let chain_ref = Arc::new(dummy_chain); + + let pool = RwLock::new(test_setup(&chain_ref)); + + let root_tx_1 = test_transaction(vec![10, 20], vec![24]); + let root_tx_2 = test_transaction(vec![30], vec![28]); + let root_tx_3 = test_transaction(vec![40], vec![38]); + + let child_tx_1 = test_transaction(vec![24], vec![22]); + let child_tx_2 = test_transaction(vec![38], vec![32]); + + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); + + assert!( + write_pool + .add_to_memory_pool(test_source(), root_tx_1, false) + .is_ok() + ); + assert!( + write_pool + .add_to_memory_pool(test_source(), root_tx_2, false) + .is_ok() + ); + assert!( + write_pool + .add_to_memory_pool(test_source(), root_tx_3, false) + .is_ok() + ); + assert!( + write_pool + .add_to_memory_pool(test_source(), child_tx_1, false) + .is_ok() + ); + assert!( + write_pool + .add_to_memory_pool(test_source(), child_tx_2, false) + .is_ok() + ); + + assert_eq!(write_pool.total_size(), 5); + } + + // Request blocks + let block: block::Block; + let mut txs: Vec>; + { + let read_pool = pool.read().unwrap(); + txs = read_pool.prepare_mineable_transactions(3); + assert_eq!(txs.len(), 3); + // TODO: This is ugly, either make block::new take owned + // txs instead of mut refs, or change + // prepare_mineable_transactions to return mut refs + let block_txs: Vec = txs.drain(..).map(|x| *x).collect(); + let tx_refs: Vec<&transaction::Transaction> = block_txs.iter().collect(); + + let keychain = Keychain::from_random_seed().unwrap(); + let key_id = keychain.derive_key_id(1).unwrap(); + let fees = tx_refs.iter().map(|tx| tx.fee()).sum(); + let reward = reward::output(&keychain, &key_id, fees, 0).unwrap(); + block = block::Block::new( + &block::BlockHeader::default(), + tx_refs, + Difficulty::one(), + reward, + ).unwrap(); + } + + chain_ref.apply_block(&block); + // Reconcile block + { + let mut write_pool = pool.write().unwrap(); + + let evicted_transactions = write_pool.reconcile_block(&block); + + assert!(evicted_transactions.is_ok()); + + assert_eq!(evicted_transactions.unwrap().len(), 3); + assert_eq!(write_pool.total_size(), 2); + } +} + +fn test_setup(dummy_chain: &Arc) -> TransactionPool { + TransactionPool { + config: PoolConfig { + accept_fee_base: 0, + max_pool_size: 10_000, + dandelion_probability: 90, + dandelion_embargo: 30, + }, + time_stem_transactions: HashMap::new(), + stem_transactions: HashMap::new(), + transactions: HashMap::new(), + stempool: Pool::empty(), + pool: Pool::empty(), + orphans: Orphans::empty(), + blockchain: dummy_chain.clone(), + adapter: Arc::new(NoopAdapter {}), + } +} + +/// Cobble together a test transaction for testing the transaction pool. +/// +/// Connectivity here is the most important element. +/// Every output is given a blinding key equal to its value, so that the +/// entire commitment can be derived deterministically from just the value. +/// +/// Fees are the remainder between input and output values, +/// so the numbers should make sense. +fn test_transaction(input_values: Vec, output_values: Vec) -> transaction::Transaction { + let keychain = keychain_for_tests(); + + let input_sum = input_values.iter().sum::() as i64; + let output_sum = output_values.iter().sum::() as i64; + + let fees: i64 = input_sum - output_sum; + assert!(fees >= 0); + + let mut tx_elements = Vec::new(); + + for input_value in input_values { + let key_id = keychain.derive_key_id(input_value as u32).unwrap(); + tx_elements.push(build::input(input_value, key_id)); + } + + for output_value in output_values { + let key_id = keychain.derive_key_id(output_value as u32).unwrap(); + tx_elements.push(build::output(output_value, key_id)); + } + tx_elements.push(build::with_fee(fees as u64)); + + build::transaction(tx_elements, &keychain).unwrap() +} + +fn test_transaction_with_offset( + input_values: Vec, + output_values: Vec, +) -> transaction::Transaction { + let keychain = keychain_for_tests(); + + let input_sum = input_values.iter().sum::() as i64; + let output_sum = output_values.iter().sum::() as i64; + + let fees: i64 = input_sum - output_sum; + assert!(fees >= 0); + + let mut tx_elements = Vec::new(); + + for input_value in input_values { + let key_id = keychain.derive_key_id(input_value as u32).unwrap(); + tx_elements.push(build::input(input_value, key_id)); + } + + for output_value in output_values { + let key_id = keychain.derive_key_id(output_value as u32).unwrap(); + tx_elements.push(build::output(output_value, key_id)); + } + tx_elements.push(build::with_fee(fees as u64)); + + build::transaction_with_offset(tx_elements, &keychain).unwrap() +} + +fn test_transaction_with_coinbase_input( + input_value: u64, + input_block_hash: Hash, + output_values: Vec, +) -> transaction::Transaction { + let keychain = keychain_for_tests(); + + let output_sum = output_values.iter().sum::() as i64; + + let fees: i64 = input_value as i64 - output_sum; + assert!(fees >= 0); + + let mut tx_elements = Vec::new(); + + let merkle_proof = MerkleProof { + node: Hash::default(), + root: Hash::default(), + peaks: vec![Hash::default()], + ..MerkleProof::default() + }; + + let key_id = keychain.derive_key_id(input_value as u32).unwrap(); + tx_elements.push(build::coinbase_input( + input_value, + input_block_hash, + merkle_proof, + key_id, + )); + + for output_value in output_values { + let key_id = keychain.derive_key_id(output_value as u32).unwrap(); + tx_elements.push(build::output(output_value, key_id)); + } + tx_elements.push(build::with_fee(fees as u64)); + + build::transaction(tx_elements, &keychain).unwrap() +} + +/// Very un-dry way of building a vanilla tx and adding a lock_height to it. +/// TODO - rethink this. +fn timelocked_transaction( + input_values: Vec, + output_values: Vec, + lock_height: u64, +) -> transaction::Transaction { + let keychain = keychain_for_tests(); + + let fees: i64 = + input_values.iter().sum::() as i64 - output_values.iter().sum::() as i64; + assert!(fees >= 0); + + let mut tx_elements = Vec::new(); + + for input_value in input_values { + let key_id = keychain.derive_key_id(input_value as u32).unwrap(); + tx_elements.push(build::input(input_value, key_id)); + } + + for output_value in output_values { + let key_id = keychain.derive_key_id(output_value as u32).unwrap(); + tx_elements.push(build::output(output_value, key_id)); + } + tx_elements.push(build::with_fee(fees as u64)); + + tx_elements.push(build::with_lock_height(lock_height)); + build::transaction(tx_elements, &keychain).unwrap() +} + +/// Deterministically generate an output defined by our test scheme +fn test_output(value: u64) -> transaction::Output { + let keychain = keychain_for_tests(); + let key_id = keychain.derive_key_id(value as u32).unwrap(); + let msg = ProofMessageElements::new(value, &key_id); + let commit = keychain.commit(value, &key_id).unwrap(); + let proof = proof::create( + &keychain, + value, + &key_id, + commit, + None, + msg.to_proof_message(), + ).unwrap(); + transaction::Output { + features: transaction::OutputFeatures::DEFAULT_OUTPUT, + commit: commit, + proof: proof, + } +} + +/// Deterministically generate a coinbase output defined by our test scheme +fn test_coinbase_output(value: u64) -> transaction::Output { + let keychain = keychain_for_tests(); + let key_id = keychain.derive_key_id(value as u32).unwrap(); + let msg = ProofMessageElements::new(value, &key_id); + let commit = keychain.commit(value, &key_id).unwrap(); + let proof = proof::create( + &keychain, + value, + &key_id, + commit, + None, + msg.to_proof_message(), + ).unwrap(); + transaction::Output { + features: transaction::OutputFeatures::COINBASE_OUTPUT, + commit: commit, + proof: proof, + } +} + +fn keychain_for_tests() -> Keychain { + let seed = "pool_tests"; + let seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes()); + Keychain::from_seed(seed.as_bytes()).unwrap() +} + +/// A generic TxSource representing a test +fn test_source() -> TxSource { + TxSource { + debug_name: "test".to_string(), + identifier: "127.0.0.1".to_string(), + } +} diff --git a/servers/src/mining/mine_block.rs b/servers/src/mining/mine_block.rs index 3750c696b..988652fef 100644 --- a/servers/src/mining/mine_block.rs +++ b/servers/src/mining/mine_block.rs @@ -225,7 +225,8 @@ fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, B let keychain = Keychain::from_random_seed().unwrap(); let key_id = keychain.derive_key_id(1).unwrap(); let (out, kernel) = - core::Block::reward_output(&keychain, &key_id, block_fees.fees, block_fees.height).unwrap(); + wallet::libwallet::reward::output(&keychain, &key_id, block_fees.fees, block_fees.height) + .unwrap(); Ok((out, kernel, block_fees)) } diff --git a/servers/tests/framework/mod.rs b/servers/tests/framework/mod.rs index b5d908e81..8efd8a2da 100644 --- a/servers/tests/framework/mod.rs +++ b/servers/tests/framework/mod.rs @@ -222,7 +222,7 @@ impl LocalServerContainer { "starting test Miner on port {}", self.config.p2p_server_port ); - s.start_test_miner(wallet_url); + s.start_test_miner(Some(self.config.coinbase_wallet_address.clone())); } for p in &mut self.peer_list { @@ -262,7 +262,11 @@ impl LocalServerContainer { self.wallet_config.data_file_dir = self.working_dir.clone(); let _ = fs::create_dir_all(self.wallet_config.clone().data_file_dir); - wallet::WalletSeed::init_file(&self.wallet_config).unwrap(); + let r = wallet::WalletSeed::init_file(&self.wallet_config); + + if let Err(e) = r { + //panic!("Error initting wallet seed: {}", e); + } let wallet_seed = wallet::WalletSeed::from_file(&self.wallet_config) .expect("Failed to read wallet seed file."); diff --git a/servers/tests/wallet.rs b/servers/tests/wallet.rs index ac1f14c1c..561d77ece 100644 --- a/servers/tests/wallet.rs +++ b/servers/tests/wallet.rs @@ -35,7 +35,7 @@ use util::LOGGER; /// Start 1 node mining and two wallets, then send a few /// transactions from one to the other -// #[test] +//#[test] fn basic_wallet_transactions() { let test_name_dir = "test_servers"; core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting); @@ -50,6 +50,7 @@ fn basic_wallet_transactions() { let mut coinbase_config = LocalServerContainerConfig::default(); coinbase_config.name = String::from("coinbase_wallet"); coinbase_config.wallet_validating_node_url = String::from("http://127.0.0.1:30001"); + coinbase_config.coinbase_wallet_address = String::from("http://127.0.0.1:13415"); coinbase_config.wallet_port = 10002; let coinbase_wallet = Arc::new(Mutex::new( LocalServerContainer::new(coinbase_config).unwrap(), diff --git a/store/src/lib.rs b/store/src/lib.rs index 59303d09e..1f4f74339 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -30,8 +30,6 @@ extern crate memmap; extern crate rocksdb; extern crate serde; #[macro_use] -extern crate serde_derive; -#[macro_use] extern crate slog; pub mod pmmr; diff --git a/wallet/Cargo.toml b/wallet/Cargo.toml index 1a2acb768..e2a44cb6d 100644 --- a/wallet/Cargo.toml +++ b/wallet/Cargo.toml @@ -14,6 +14,7 @@ failure_derive = "0.1" futures = "0.1" hyper = "0.11" iron = "0.5" +lazy_static = "0.2" prettytable-rs = "0.6" rand = "0.3" router = "0.5" diff --git a/wallet/src/lib.rs b/wallet/src/lib.rs index de7556f51..15b0bba3a 100644 --- a/wallet/src/lib.rs +++ b/wallet/src/lib.rs @@ -41,6 +41,9 @@ extern crate router; extern crate tokio_core; extern crate tokio_retry; +#[macro_use] +extern crate lazy_static; + extern crate grin_api as api; extern crate grin_core as core; extern crate grin_keychain as keychain; @@ -56,6 +59,7 @@ mod types; mod restore; pub mod client; pub mod server; +pub mod libwallet; pub use outputs::show_outputs; pub use info::{retrieve_info, show_info}; diff --git a/wallet/src/libwallet/aggsig.rs b/wallet/src/libwallet/aggsig.rs new file mode 100644 index 000000000..972e863eb --- /dev/null +++ b/wallet/src/libwallet/aggsig.rs @@ -0,0 +1,269 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +/// Aggsig library definitions + +use std::collections::HashMap; + +use util::secp::key::{PublicKey, SecretKey}; +use util::secp::{self, aggsig, Message, Secp256k1, Signature}; +use util::secp::pedersen::Commitment; +use util::kernel_sig_msg; +use uuid::Uuid; +use keychain::Keychain; +use keychain::extkey::Identifier; +use keychain::blind::BlindingFactor; +use libwallet::error::Error; + +#[derive(Clone, Debug)] +/// Holds the context for a single aggsig transaction +pub struct Context { + /// Transaction ID + pub transaction_id: Uuid, + /// Secret key (of which public is shared) + pub sec_key: SecretKey, + /// Secret nonce (of which public is shared) + /// (basically a SecretKey) + pub sec_nonce: SecretKey, + /// If I'm the recipient, store my outputs between invocations (that I need + /// to sum) + pub output_ids: Vec, +} + +#[derive(Clone, Debug)] +/// Holds many contexts, to support multiple transactions hitting a wallet receiver +/// at once +pub struct ContextManager { + contexts: HashMap, +} + +impl ContextManager { + /// Create + pub fn new() -> ContextManager { + ContextManager { + contexts: HashMap::new(), + } + } + + /// Creates a context for a transaction id if required + /// otherwise does nothing + pub fn create_context( + &mut self, + secp: &secp::Secp256k1, + transaction_id: &Uuid, + sec_key: SecretKey, + ) -> Context { + if !self.contexts.contains_key(transaction_id) { + self.contexts.insert( + transaction_id.clone(), + Context { + sec_key: sec_key, + transaction_id: transaction_id.clone(), + sec_nonce: aggsig::export_secnonce_single(secp).unwrap(), + output_ids: vec![], + }, + ); + } + self.get_context(transaction_id) + } + + /// Retrieve a context by transaction id + pub fn get_context(&self, transaction_id: &Uuid) -> Context { + self.contexts.get(&transaction_id).unwrap().clone() + } + + /// Save context + pub fn save_context(&mut self, c: Context) { + self.contexts.insert(c.transaction_id.clone(), c); + } +} + +impl Context { + /// Tracks an output contributing to my excess value (if it needs to + /// be kept between invocations + pub fn add_output(&mut self, output_id: &Identifier) { + self.output_ids.push(output_id.clone()); + } + + /// Returns all stored outputs + pub fn get_outputs(&self) -> Vec { + self.output_ids.clone() + } + + /// Returns private key, private nonce + pub fn get_private_keys(&self) -> (SecretKey, SecretKey) { + (self.sec_key.clone(), self.sec_nonce.clone()) + } + + /// Returns public key, public nonce + pub fn get_public_keys(&self, secp: &Secp256k1) -> (PublicKey, PublicKey) { + ( + PublicKey::from_secret_key(secp, &self.sec_key).unwrap(), + PublicKey::from_secret_key(secp, &self.sec_nonce).unwrap(), + ) + } + + /// Note 'secnonce' here is used to perform the signature, while 'pubnonce' just allows you to + /// provide a custom public nonce to include while calculating e + /// nonce_sum is the sum used to decide whether secnonce should be inverted during sig time + pub fn sign_single( + &self, + secp: &Secp256k1, + msg: &Message, + secnonce: Option<&SecretKey>, + pubnonce: Option<&PublicKey>, + nonce_sum: Option<&PublicKey>, + ) -> Result { + let sig = aggsig::sign_single(secp, msg, &self.sec_key, secnonce, pubnonce, nonce_sum)?; + Ok(sig) + } + + //Verifies other final sig corresponds with what we're expecting + pub fn verify_final_sig_build_msg( + &self, + secp: &Secp256k1, + sig: &Signature, + pubkey: &PublicKey, + fee: u64, + lock_height: u64, + ) -> bool { + let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height)).unwrap(); + verify_single(secp, sig, &msg, None, pubkey, true) + } + + //Verifies other party's sig corresponds with what we're expecting + pub fn verify_partial_sig( + &self, + secp: &Secp256k1, + sig: &Signature, + other_pub_nonce: &PublicKey, + pubkey: &PublicKey, + fee: u64, + lock_height: u64, + ) -> bool { + let (_, sec_nonce) = self.get_private_keys(); + let mut nonce_sum = other_pub_nonce.clone(); + let _ = nonce_sum.add_exp_assign(secp, &sec_nonce); + let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height)).unwrap(); + + verify_single(secp, sig, &msg, Some(&nonce_sum), pubkey, true) + } + + pub fn calculate_partial_sig( + &self, + secp: &Secp256k1, + other_pub_nonce: &PublicKey, + fee: u64, + lock_height: u64, + ) -> Result { + // Add public nonces kR*G + kS*G + let (_, sec_nonce) = self.get_private_keys(); + let mut nonce_sum = other_pub_nonce.clone(); + let _ = nonce_sum.add_exp_assign(secp, &sec_nonce); + let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height))?; + + //Now calculate signature using message M=fee, nonce in e=nonce_sum + self.sign_single( + secp, + &msg, + Some(&sec_nonce), + Some(&nonce_sum), + Some(&nonce_sum), + ) + } + + /// Helper function to calculate final signature + pub fn calculate_final_sig( + &self, + secp: &Secp256k1, + their_sig: &Signature, + our_sig: &Signature, + their_pub_nonce: &PublicKey, + ) -> Result { + // Add public nonces kR*G + kS*G + let (_, sec_nonce) = self.get_private_keys(); + let mut nonce_sum = their_pub_nonce.clone(); + let _ = nonce_sum.add_exp_assign(secp, &sec_nonce); + let sig = aggsig::add_signatures_single(&secp, their_sig, our_sig, &nonce_sum)?; + Ok(sig) + } + + /// Helper function to calculate final public key + pub fn calculate_final_pubkey( + &self, + secp: &Secp256k1, + their_public_key: &PublicKey, + ) -> Result { + let (our_sec_key, _) = self.get_private_keys(); + let mut pk_sum = their_public_key.clone(); + let _ = pk_sum.add_exp_assign(secp, &our_sec_key); + Ok(pk_sum) + } +} + +// Contextless functions + +/// Just a simple sig, creates its own nonce, etc +pub fn sign_from_key_id( + secp: &Secp256k1, + k: &Keychain, + msg: &Message, + key_id: &Identifier, +) -> Result { + let skey = k.derived_key(key_id)?; + let sig = aggsig::sign_single(secp, &msg, &skey, None, None, None)?; + Ok(sig) +} + +/// Verifies a sig given a commitment +pub fn verify_single_from_commit( + secp: &Secp256k1, + sig: &Signature, + msg: &Message, + commit: &Commitment, +) -> bool { + // Extract the pubkey, unfortunately we need this hack for now, (we just hope + // one is valid) TODO: Create better secp256k1 API to do this + let pubkeys = commit.to_two_pubkeys(secp); + let mut valid = false; + for i in 0..pubkeys.len() { + valid = aggsig::verify_single(secp, &sig, &msg, None, &pubkeys[i], false); + if valid { + break; + } + } + valid +} + +//Verifies an aggsig signature +pub fn verify_single( + secp: &Secp256k1, + sig: &Signature, + msg: &Message, + pubnonce: Option<&PublicKey>, + pubkey: &PublicKey, + is_partial: bool, +) -> bool { + aggsig::verify_single(secp, sig, msg, pubnonce, pubkey, is_partial) +} + +/// Just a simple sig, creates its own nonce, etc +pub fn sign_with_blinding( + secp: &Secp256k1, + msg: &Message, + blinding: &BlindingFactor, +) -> Result { + let skey = &blinding.secret_key(&secp)?; + let sig = aggsig::sign_single(secp, &msg, skey, None, None, None)?; + Ok(sig) +} diff --git a/wallet/src/libwallet/blind.rs b/wallet/src/libwallet/blind.rs new file mode 100644 index 000000000..bbf33b375 --- /dev/null +++ b/wallet/src/libwallet/blind.rs @@ -0,0 +1,15 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Stub while figuring out wallet redesign diff --git a/core/src/core/build.rs b/wallet/src/libwallet/build.rs similarity index 94% rename from core/src/core/build.rs rename to wallet/src/libwallet/build.rs index 737c3f4d0..71f91dab5 100644 --- a/core/src/core/build.rs +++ b/wallet/src/libwallet/build.rs @@ -27,9 +27,10 @@ use util::{kernel_sig_msg, secp}; -use core::{Input, Output, OutputFeatures, ProofMessageElements, Transaction, TxKernel}; -use core::hash::Hash; -use core::pmmr::MerkleProof; +use core::core::{Input, Output, OutputFeatures, ProofMessageElements, Transaction, TxKernel}; +use core::core::hash::Hash; +use core::core::pmmr::MerkleProof; +use libwallet::{aggsig, proof}; use keychain; use keychain::{BlindSum, BlindingFactor, Identifier, Keychain}; use util::LOGGER; @@ -105,10 +106,14 @@ pub fn output(value: u64, key_id: Identifier) -> Box { let msg = ProofMessageElements::new(value, &key_id); - let rproof = build - .keychain - .range_proof(value, &key_id, commit, None, msg.to_proof_message()) - .unwrap(); + let rproof = proof::create( + build.keychain, + value, + &key_id, + commit, + None, + msg.to_proof_message(), + ).unwrap(); ( tx.with_output(Output { @@ -214,7 +219,7 @@ pub fn transaction( let skey = blind_sum.secret_key(&keychain.secp())?; kern.excess = keychain.secp().commit(0, skey)?; - kern.excess_sig = Keychain::aggsig_sign_with_blinding(&keychain.secp(), &msg, &blind_sum)?; + kern.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &blind_sum).unwrap(); tx.kernels.push(kern); @@ -243,7 +248,7 @@ pub fn transaction_with_offset( // generate kernel excess and excess_sig using the split key k1 let skey = k1.secret_key(&keychain.secp())?; kern.excess = ctx.keychain.secp().commit(0, skey)?; - kern.excess_sig = Keychain::aggsig_sign_with_blinding(&keychain.secp(), &msg, &k1)?; + kern.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &k1).unwrap(); // store the kernel offset (k2) on the tx itself // commitments will sum correctly when including the offset diff --git a/wallet/src/libwallet/error.rs b/wallet/src/libwallet/error.rs new file mode 100644 index 000000000..0c4e6c871 --- /dev/null +++ b/wallet/src/libwallet/error.rs @@ -0,0 +1,60 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Wallet lib errors + +use util::secp; +use keychain::{self, extkey}; + +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Error { + Secp(secp::Error), + Keychain(keychain::Error), + ExtendedKey(extkey::Error), + Transaction(String), + RangeProof(String), +} + +impl From for Error { + fn from(e: secp::Error) -> Error { + Error::Secp(e) + } +} + +impl From for Error { + fn from(e: extkey::Error) -> Error { + Error::ExtendedKey(e) + } +} + +impl From for Error { + fn from(e: keychain::Error) -> Error { + Error::Keychain(e) + } +} +/*impl error::Error for Error { + fn description(&self) -> &str { + match *self { + _ => "some kind of wallet lib error", + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + _ => write!(f, "some kind of wallet lib error"), + } + } +}*/ diff --git a/wallet/src/libwallet/mod.rs b/wallet/src/libwallet/mod.rs new file mode 100644 index 000000000..473db5b1d --- /dev/null +++ b/wallet/src/libwallet/mod.rs @@ -0,0 +1,29 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Wallet lib... should be used by clients to build wallets and +//! encapsulate all functions needed to build transactions and operate a wallet + +#![deny(non_upper_case_globals)] +#![deny(non_camel_case_types)] +#![deny(non_snake_case)] +#![deny(unused_mut)] +#![warn(missing_docs)] + +pub mod error; +pub mod aggsig; +pub mod blind; +pub mod proof; +pub mod reward; +pub mod build; diff --git a/wallet/src/libwallet/proof.rs b/wallet/src/libwallet/proof.rs new file mode 100644 index 000000000..73934db66 --- /dev/null +++ b/wallet/src/libwallet/proof.rs @@ -0,0 +1,113 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Rangeproof library functions + +use keychain::Keychain; +use util::secp::pedersen::{Commitment, ProofInfo, ProofMessage, RangeProof}; +use util::secp::key::SecretKey; +use util::secp::{self, Secp256k1}; +use util::logger::LOGGER; +use keychain::extkey::Identifier; +use libwallet::error::Error; +use blake2; + +pub fn create_nonce(k: &Keychain, commit: &Commitment) -> SecretKey { + // hash(commit|masterkey) as nonce + let root_key = k.root_key_id().to_bytes(); + let res = blake2::blake2b::blake2b(32, &commit.0, &root_key); + let res = res.as_bytes(); + let mut ret_val = [0; 32]; + for i in 0..res.len() { + ret_val[i] = res[i]; + } + SecretKey::from_slice(k.secp(), &ret_val).unwrap() +} + +/// So we want this to take an opaque structure that can be called +/// back to get the sensitive data + +pub fn create( + k: &Keychain, + amount: u64, + key_id: &Identifier, + _commit: Commitment, + extra_data: Option>, + msg: ProofMessage, +) -> Result { + let commit = k.commit(amount, key_id)?; + let skey = k.derived_key(key_id)?; + let nonce = create_nonce(k, &commit); + if msg.len() == 0 { + return Ok(k.secp().bullet_proof(amount, skey, nonce, extra_data, None)); + } else { + if msg.len() != 64 { + error!(LOGGER, "Bullet proof message must be 64 bytes."); + return Err(Error::RangeProof( + "Bullet proof message must be 64 bytes".to_string(), + )); + } + } + return Ok(k.secp() + .bullet_proof(amount, skey, nonce, extra_data, Some(msg))); +} + +pub fn verify( + secp: &Secp256k1, + commit: Commitment, + proof: RangeProof, + extra_data: Option>, +) -> Result<(), secp::Error> { + let result = secp.verify_bullet_proof(commit, proof, extra_data); + match result { + Ok(_) => Ok(()), + Err(e) => Err(e), + } +} + +pub fn rewind( + k: &Keychain, + key_id: &Identifier, + commit: Commitment, + extra_data: Option>, + proof: RangeProof, +) -> Result { + let skey = k.derived_key(key_id)?; + let nonce = create_nonce(k, &commit); + let proof_message = k.secp() + .unwind_bullet_proof(commit, skey, nonce, extra_data, proof); + let proof_info = match proof_message { + Ok(p) => ProofInfo { + success: true, + value: 0, + message: p, + mlen: 0, + min: 0, + max: 0, + exp: 0, + mantissa: 0, + }, + Err(_) => ProofInfo { + success: false, + value: 0, + message: ProofMessage::empty(), + mlen: 0, + min: 0, + max: 0, + exp: 0, + mantissa: 0, + }, + }; + return Ok(proof_info); +} diff --git a/wallet/src/libwallet/reward.rs b/wallet/src/libwallet/reward.rs new file mode 100644 index 000000000..33c7e88be --- /dev/null +++ b/wallet/src/libwallet/reward.rs @@ -0,0 +1,80 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Builds the blinded output and related signature proof for the block +/// reward. + +use keychain; + +use core::core::{Output, OutputFeatures, ProofMessageElements, TxKernel}; +use core::consensus::reward; +use libwallet::{aggsig, proof}; +use libwallet::error::Error; +use core::core::KernelFeatures; +use util::{kernel_sig_msg, secp, static_secp_instance, LOGGER}; + +/// output a reward output +pub fn output( + keychain: &keychain::Keychain, + key_id: &keychain::Identifier, + fees: u64, + height: u64, +) -> Result<(Output, TxKernel), Error> { + let value = reward(fees); + let commit = keychain.commit(value, key_id)?; + let msg = ProofMessageElements::new(value, key_id); + + trace!(LOGGER, "Block reward - Pedersen Commit is: {:?}", commit,); + + let rproof = proof::create( + keychain, + value, + key_id, + commit, + None, + msg.to_proof_message(), + )?; + + let output = Output { + features: OutputFeatures::COINBASE_OUTPUT, + commit: commit, + proof: rproof, + }; + + let secp = static_secp_instance(); + let secp = secp.lock().unwrap(); + let over_commit = secp.commit_value(reward(fees))?; + let out_commit = output.commitment(); + let excess = secp.commit_sum(vec![out_commit], vec![over_commit])?; + + // NOTE: Remember we sign the fee *and* the lock_height. + // For a coinbase output the fee is 0 and the lock_height is + // the lock_height of the coinbase output itself, + // not the lock_height of the tx (there is no tx for a coinbase output). + // This output will not be spendable earlier than lock_height (and we sign this + // here). + let msg = secp::Message::from_slice(&kernel_sig_msg(0, height))?; + let sig = aggsig::sign_from_key_id(&secp, keychain, &msg, &key_id)?; + + let proof = TxKernel { + features: KernelFeatures::COINBASE_KERNEL, + excess: excess, + excess_sig: sig, + fee: 0, + // lock_height here is the height of the block (tx should be valid immediately) + // *not* the lock_height of the coinbase output (only spendable 1,000 blocks later) + lock_height: height, + }; + Ok((output, proof)) +} diff --git a/wallet/src/receiver.rs b/wallet/src/receiver.rs index a92388d84..aaaad44cb 100644 --- a/wallet/src/receiver.rs +++ b/wallet/src/receiver.rs @@ -21,11 +21,12 @@ use iron::Handler; use iron::prelude::*; use iron::status; use serde_json; -use uuid::Uuid; +use std::sync::{Arc, RwLock}; use api; use core::consensus::reward; -use core::core::{amount_to_hr_string, build, Block, Committed, Output, Transaction, TxKernel}; +use core::core::{amount_to_hr_string, Committed, Output, Transaction, TxKernel}; +use libwallet::{aggsig, build, reward}; use core::{global, ser}; use failure::{Fail, ResultExt}; use keychain::{BlindingFactor, Identifier, Keychain}; @@ -39,6 +40,12 @@ pub struct TxWrapper { pub tx_hex: String, } +lazy_static! { + /// Static reference to aggsig context (temporary while wallet is being refactored) + pub static ref AGGSIG_CONTEXT_MANAGER:Arc> + = Arc::new(RwLock::new(aggsig::ContextManager::new())); +} + /// Receive Part 1 of interactive transactions from sender, Sender Initiation /// Return result of part 2, Recipient Initation, to sender /// -Receiver receives inputs, outputs xS * G and kS * G @@ -52,6 +59,7 @@ pub struct TxWrapper { fn handle_sender_initiation( config: &WalletConfig, + context_manager: &mut aggsig::ContextManager, keychain: &Keychain, partial_tx: &PartialTx, ) -> Result { @@ -125,14 +133,13 @@ fn handle_sender_initiation( let blind = blind_sum .secret_key(&keychain.secp()) .context(ErrorKind::Keychain)?; - keychain - .aggsig_create_context(&partial_tx.id, blind) - .context(ErrorKind::Keychain)?; - keychain.aggsig_add_output(&partial_tx.id, &key_id); + let mut context = context_manager.create_context(keychain.secp(), &partial_tx.id, blind); - let sig_part = keychain - .aggsig_calculate_partial_sig( - &partial_tx.id, + context.add_output(&key_id); + + let sig_part = context + .calculate_partial_sig( + keychain.secp(), &sender_pub_nonce, tx.fee(), tx.lock_height(), @@ -142,7 +149,7 @@ fn handle_sender_initiation( // Build the response, which should contain sR, blinding excess xR * G, public // nonce kR * G let mut partial_tx = build_partial_tx( - &partial_tx.id, + &context, keychain, amount, kernel_offset, @@ -151,6 +158,8 @@ fn handle_sender_initiation( ); partial_tx.phase = PartialTxPhase::ReceiverInitiation; + context_manager.save_context(context); + Ok(partial_tx) } @@ -169,15 +178,17 @@ fn handle_sender_initiation( fn handle_sender_confirmation( config: &WalletConfig, + context_manager: &mut aggsig::ContextManager, keychain: &Keychain, partial_tx: &PartialTx, fluff: bool, ) -> Result { let (amount, sender_pub_blinding, sender_pub_nonce, kernel_offset, sender_sig_part, tx) = read_partial_tx(keychain, partial_tx)?; + let mut context = context_manager.get_context(&partial_tx.id); let sender_sig_part = sender_sig_part.unwrap(); - let res = keychain.aggsig_verify_partial_sig( - &partial_tx.id, + let res = context.verify_partial_sig( + &keychain.secp(), &sender_sig_part, &sender_pub_nonce, &sender_pub_blinding, @@ -191,9 +202,9 @@ fn handle_sender_confirmation( } // Just calculate our sig part again instead of storing - let our_sig_part = keychain - .aggsig_calculate_partial_sig( - &partial_tx.id, + let our_sig_part = context + .calculate_partial_sig( + &keychain.secp(), &sender_pub_nonce, tx.fee(), tx.lock_height(), @@ -201,9 +212,9 @@ fn handle_sender_confirmation( .unwrap(); // And the final signature - let final_sig = keychain - .aggsig_calculate_final_sig( - &partial_tx.id, + let final_sig = context + .calculate_final_sig( + &keychain.secp(), &sender_sig_part, &our_sig_part, &sender_pub_nonce, @@ -211,12 +222,13 @@ fn handle_sender_confirmation( .unwrap(); // Calculate the final public key (for our own sanity check) - let final_pubkey = keychain - .aggsig_calculate_final_pubkey(&partial_tx.id, &sender_pub_blinding) + let final_pubkey = context + .calculate_final_pubkey(&keychain.secp(), &sender_pub_blinding) .unwrap(); // Check our final sig verifies - let res = keychain.aggsig_verify_final_sig_build_msg( + let res = context.verify_final_sig_build_msg( + &keychain.secp(), &final_sig, &final_pubkey, tx.fee(), @@ -229,7 +241,7 @@ fn handle_sender_confirmation( } let final_tx = build_final_transaction( - &partial_tx.id, + &mut context, config, keychain, amount, @@ -254,13 +266,15 @@ fn handle_sender_confirmation( // Return what we've actually posted // TODO - why build_partial_tx here? Just a naming issue? let mut partial_tx = build_partial_tx( - &partial_tx.id, + &context, keychain, amount, kernel_offset, Some(final_sig), tx, ); + + context_manager.save_context(context); partial_tx.phase = PartialTxPhase::ReceiverConfirmation; Ok(partial_tx) } @@ -285,10 +299,12 @@ impl Handler for WalletReceiver { } if let Ok(Some(partial_tx)) = struct_body { + let mut acm = AGGSIG_CONTEXT_MANAGER.write().unwrap(); match partial_tx.phase { PartialTxPhase::SenderInitiation => { let resp_tx = handle_sender_initiation( &self.config, + &mut acm, &self.keychain, &partial_tx, ).map_err(|e| { @@ -304,6 +320,7 @@ impl Handler for WalletReceiver { PartialTxPhase::SenderConfirmation => { let resp_tx = handle_sender_confirmation( &self.config, + &mut acm, &self.keychain, &partial_tx, fluff, @@ -393,14 +410,15 @@ pub fn receive_coinbase( debug!(LOGGER, "receive_coinbase: {:?}", block_fees); - let (out, kern) = Block::reward_output(&keychain, &key_id, block_fees.fees, block_fees.height) - .context(ErrorKind::Keychain)?; + let (out, kern) = + reward::output(&keychain, &key_id, block_fees.fees, block_fees.height).unwrap(); + /* .context(ErrorKind::Keychain)?; */ Ok((out, kern, block_fees)) } /// builds a final transaction after the aggregated sig exchange fn build_final_transaction( - tx_id: &Uuid, + context: &mut aggsig::Context, config: &WalletConfig, keychain: &Keychain, amount: u64, @@ -443,7 +461,7 @@ fn build_final_transaction( // Get output we created in earlier step // TODO: will just be one for now, support multiple later - let output_vec = keychain.aggsig_get_outputs(tx_id); + let output_vec = context.get_outputs(); // operate within a lock on wallet data let (key_id, derivation) = WalletData::with_wallet(&config.data_file_dir, |wallet_data| { diff --git a/wallet/src/restore.rs b/wallet/src/restore.rs index a44fb46c3..2e3efe1a6 100644 --- a/wallet/src/restore.rs +++ b/wallet/src/restore.rs @@ -22,6 +22,7 @@ use core::core::transaction::ProofMessageElements; use types::{Error, ErrorKind, MerkleProofWrapper, OutputData, OutputStatus, WalletConfig, WalletData}; use byteorder::{BigEndian, ByteOrder}; +use libwallet::proof; pub fn get_chain_height(config: &WalletConfig) -> Result { let url = format!("{}/v1/chain", config.check_node_api_http_addr); @@ -142,9 +143,13 @@ fn find_outputs_with_key( // message 3 times, indicating a strong match. Also, sec_key provided // to unwind in this case will be meaningless. With only the nonce known // only the first 32 bytes of the recovered message will be accurate - let info = keychain - .rewind_range_proof(&skey, output.commit, None, output.range_proof().unwrap()) - .unwrap(); + let info = proof::rewind( + keychain, + &skey, + output.commit, + None, + output.range_proof().unwrap(), + ).unwrap(); let message = ProofMessageElements::from_proof_message(info.message).unwrap(); let value = message.value(); if value.is_err() { @@ -176,9 +181,13 @@ fn find_outputs_with_key( } found = true; // we have a partial match, let's just confirm - let info = keychain - .rewind_range_proof(key_id, output.commit, None, output.range_proof().unwrap()) - .unwrap(); + let info = proof::rewind( + keychain, + key_id, + output.commit, + None, + output.range_proof().unwrap(), + ).unwrap(); let message = ProofMessageElements::from_proof_message(info.message).unwrap(); let value = message.value(); if value.is_err() || !message.zeroes_correct() { diff --git a/wallet/src/sender.rs b/wallet/src/sender.rs index c567d9046..32bfaa18b 100644 --- a/wallet/src/sender.rs +++ b/wallet/src/sender.rs @@ -18,7 +18,8 @@ use uuid::Uuid; use api; use client; use checker; -use core::core::{amount_to_hr_string, build, Transaction}; +use core::core::{amount_to_hr_string, Transaction}; +use libwallet::{aggsig, build}; use core::ser; use keychain::{BlindSum, BlindingFactor, Identifier, Keychain}; use receiver::TxWrapper; @@ -79,24 +80,16 @@ pub fn issue_send_tx( // computes total blinding excess xS -Sender picks random nonce kS // -Sender posts inputs, outputs, Message M=fee, xS * G and kS * G to Receiver // - // Create a new aggsig context let tx_id = Uuid::new_v4(); let skey = blind_offset .secret_key(&keychain.secp()) .context(ErrorKind::Keychain)?; - keychain - .aggsig_create_context(&tx_id, skey) - .context(ErrorKind::Keychain)?; - // let kernel_key = kernel_blind - // .secret_key(keychain.secp()) - // .context(ErrorKind::Keychain)?; - // let kernel_offset = keychain - // .secp() - // .commit(0, kernel_key) - // .context(ErrorKind::Keychain)?; + // Create a new aggsig context + let mut context_manager = aggsig::ContextManager::new(); + let context = context_manager.create_context(keychain.secp(), &tx_id, skey); - let partial_tx = build_partial_tx(&tx_id, keychain, amount_with_fee, kernel_offset, None, tx); + let partial_tx = build_partial_tx(&context, keychain, amount_with_fee, kernel_offset, None, tx); // Closure to acquire wallet lock and lock the coins being spent // so we avoid accidental double spend attempt. @@ -178,8 +171,8 @@ pub fn issue_send_tx( */ let (_amount, recp_pub_blinding, recp_pub_nonce, kernel_offset, sig, tx) = read_partial_tx(keychain, &res.unwrap())?; - let res = keychain.aggsig_verify_partial_sig( - &tx_id, + let res = context.verify_partial_sig( + &keychain.secp(), &sig.unwrap(), &recp_pub_nonce, &recp_pub_blinding, @@ -191,15 +184,20 @@ pub fn issue_send_tx( return Err(ErrorKind::Signature("Partial Sig from recipient invalid."))?; } - let sig_part = keychain - .aggsig_calculate_partial_sig(&tx_id, &recp_pub_nonce, tx.fee(), tx.lock_height()) + let sig_part = context + .calculate_partial_sig( + &keychain.secp(), + &recp_pub_nonce, + tx.fee(), + tx.lock_height(), + ) .unwrap(); // Build the next stage, containing sS (and our pubkeys again, for the // recipient's convenience) offset has not been modified during tx building, // so pass it back in let mut partial_tx = build_partial_tx( - &tx_id, + &context, keychain, amount_with_fee, kernel_offset, @@ -225,6 +223,9 @@ pub fn issue_send_tx( return Err(e); } + // Not really necessary here + context_manager.save_context(context); + // All good so update_wallet()?; Ok(()) @@ -457,7 +458,7 @@ fn inputs_and_change( #[cfg(test)] mod test { - use core::core::build; + use libwallet::build; use keychain::Keychain; #[test] diff --git a/wallet/src/types.rs b/wallet/src/types.rs index 30d2e0691..1377463a1 100644 --- a/wallet/src/types.rs +++ b/wallet/src/types.rs @@ -24,6 +24,7 @@ use std::path::Path; use std::path::MAIN_SEPARATOR; use std::collections::HashMap; use std::cmp::min; +use libwallet::aggsig; use serde; use serde_json; @@ -777,14 +778,14 @@ pub struct PartialTx { /// aggsig_tx_context should contain the private key/nonce pair /// the resulting partial tx will contain the corresponding public keys pub fn build_partial_tx( - transaction_id: &Uuid, + context: &aggsig::Context, keychain: &keychain::Keychain, receive_amount: u64, kernel_offset: BlindingFactor, part_sig: Option, tx: Transaction, ) -> PartialTx { - let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(transaction_id); + let (pub_excess, pub_nonce) = context.get_public_keys(keychain.secp()); let mut pub_excess = pub_excess.serialize_vec(keychain.secp(), true).clone(); let len = pub_excess.clone().len(); let pub_excess: Vec<_> = pub_excess.drain(0..len).collect(); @@ -795,7 +796,7 @@ pub fn build_partial_tx( PartialTx { phase: PartialTxPhase::SenderInitiation, - id: transaction_id.clone(), + id: context.transaction_id, amount: receive_amount, public_blind_excess: util::to_hex(pub_excess), public_nonce: util::to_hex(pub_nonce), diff --git a/wallet/tests/libwallet.rs b/wallet/tests/libwallet.rs new file mode 100644 index 000000000..92a5c6443 --- /dev/null +++ b/wallet/tests/libwallet.rs @@ -0,0 +1,453 @@ +// Copyright 2018 The Grin Developers +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! libwallet specific tests +extern crate grin_core as core; +extern crate grin_keychain as keychain; +extern crate grin_util as util; +extern crate grin_wallet as wallet; + +extern crate rand; +extern crate uuid; + +use uuid::Uuid; +use util::{kernel_sig_msg, secp}; +use util::secp::key::SecretKey; +use util::secp::pedersen::ProofMessage; +use keychain::{BlindSum, BlindingFactor, Keychain}; +use wallet::libwallet::{aggsig, proof}; + +use rand::thread_rng; + +#[test] +fn aggsig_sender_receiver_interaction() { + let sender_keychain = Keychain::from_random_seed().unwrap(); + let receiver_keychain = Keychain::from_random_seed().unwrap(); + let mut sender_aggsig_cm = aggsig::ContextManager::new(); + let mut receiver_aggsig_cm = aggsig::ContextManager::new(); + + // tx identifier for wallet interaction + let tx_id = Uuid::new_v4(); + + // Calculate the kernel excess here for convenience. + // Normally this would happen during transaction building. + let kernel_excess = { + let skey1 = sender_keychain + .derived_key(&sender_keychain.derive_key_id(1).unwrap()) + .unwrap(); + + let skey2 = receiver_keychain + .derived_key(&receiver_keychain.derive_key_id(1).unwrap()) + .unwrap(); + + let keychain = Keychain::from_random_seed().unwrap(); + let blinding_factor = keychain + .blind_sum(&BlindSum::new() + .sub_blinding_factor(BlindingFactor::from_secret_key(skey1)) + .add_blinding_factor(BlindingFactor::from_secret_key(skey2))) + .unwrap(); + + keychain + .secp() + .commit(0, blinding_factor.secret_key(&keychain.secp()).unwrap()) + .unwrap() + }; + + // sender starts the tx interaction + let (sender_pub_excess, sender_pub_nonce) = { + let keychain = sender_keychain.clone(); + + let skey = keychain + .derived_key(&keychain.derive_key_id(1).unwrap()) + .unwrap(); + + // dealing with an input here so we need to negate the blinding_factor + // rather than use it as is + let bs = BlindSum::new(); + let blinding_factor = keychain + .blind_sum(&bs.sub_blinding_factor(BlindingFactor::from_secret_key(skey))) + .unwrap(); + + let blind = blinding_factor.secret_key(&keychain.secp()).unwrap(); + + let cx = sender_aggsig_cm.create_context(&keychain.secp(), &tx_id, blind); + cx.get_public_keys(&keychain.secp()) + }; + + // receiver receives partial tx + let (receiver_pub_excess, receiver_pub_nonce, sig_part) = { + let keychain = receiver_keychain.clone(); + let key_id = keychain.derive_key_id(1).unwrap(); + + // let blind = blind_sum.secret_key(&keychain.secp())?; + let blind = keychain.derived_key(&key_id).unwrap(); + + let mut cx = receiver_aggsig_cm.create_context(&keychain.secp(), &tx_id, blind); + let (pub_excess, pub_nonce) = cx.get_public_keys(&keychain.secp()); + cx.add_output(&key_id); + + let sig_part = cx.calculate_partial_sig(&keychain.secp(), &sender_pub_nonce, 0, 0) + .unwrap(); + receiver_aggsig_cm.save_context(cx); + (pub_excess, pub_nonce, sig_part) + }; + + // check the sender can verify the partial signature + // received in the response back from the receiver + { + let keychain = sender_keychain.clone(); + let cx = sender_aggsig_cm.get_context(&tx_id); + let sig_verifies = cx.verify_partial_sig( + &keychain.secp(), + &sig_part, + &receiver_pub_nonce, + &receiver_pub_excess, + 0, + 0, + ); + assert!(sig_verifies); + } + + // now sender signs with their key + let sender_sig_part = { + let keychain = sender_keychain.clone(); + let cx = sender_aggsig_cm.get_context(&tx_id); + cx.calculate_partial_sig(&keychain.secp(), &receiver_pub_nonce, 0, 0) + .unwrap() + }; + + // check the receiver can verify the partial signature + // received by the sender + { + let keychain = receiver_keychain.clone(); + let cx = receiver_aggsig_cm.get_context(&tx_id); + let sig_verifies = cx.verify_partial_sig( + &keychain.secp(), + &sender_sig_part, + &sender_pub_nonce, + &sender_pub_excess, + 0, + 0, + ); + assert!(sig_verifies); + } + + // Receiver now builds final signature from sender and receiver parts + let (final_sig, final_pubkey) = { + let keychain = receiver_keychain.clone(); + let cx = receiver_aggsig_cm.get_context(&tx_id); + + // Receiver recreates their partial sig (we do not maintain state from earlier) + let our_sig_part = cx.calculate_partial_sig(&keychain.secp(), &sender_pub_nonce, 0, 0) + .unwrap(); + + // Receiver now generates final signature from the two parts + let final_sig = cx.calculate_final_sig( + &keychain.secp(), + &sender_sig_part, + &our_sig_part, + &sender_pub_nonce, + ).unwrap(); + + // Receiver calculates the final public key (to verify sig later) + let final_pubkey = cx.calculate_final_pubkey(&keychain.secp(), &sender_pub_excess) + .unwrap(); + + (final_sig, final_pubkey) + }; + + // Receiver checks the final signature verifies + { + let keychain = receiver_keychain.clone(); + let cx = receiver_aggsig_cm.get_context(&tx_id); + + // Receiver check the final signature verifies + let sig_verifies = + cx.verify_final_sig_build_msg(&keychain.secp(), &final_sig, &final_pubkey, 0, 0); + assert!(sig_verifies); + } + + // Check we can verify the sig using the kernel excess + { + let keychain = Keychain::from_random_seed().unwrap(); + + let msg = secp::Message::from_slice(&kernel_sig_msg(0, 0)).unwrap(); + + let sig_verifies = + aggsig::verify_single_from_commit(&keychain.secp(), &final_sig, &msg, &kernel_excess); + + assert!(sig_verifies); + } +} + +#[test] +fn aggsig_sender_receiver_interaction_offset() { + let sender_keychain = Keychain::from_random_seed().unwrap(); + let receiver_keychain = Keychain::from_random_seed().unwrap(); + let mut sender_aggsig_cm = aggsig::ContextManager::new(); + let mut receiver_aggsig_cm = aggsig::ContextManager::new(); + + // tx identifier for wallet interaction + let tx_id = Uuid::new_v4(); + + // This is the kernel offset that we use to split the key + // Summing these at the block level prevents the + // kernels from being used to reconstruct (or identify) individual transactions + let kernel_offset = SecretKey::new(&sender_keychain.secp(), &mut thread_rng()); + + // Calculate the kernel excess here for convenience. + // Normally this would happen during transaction building. + let kernel_excess = { + let skey1 = sender_keychain + .derived_key(&sender_keychain.derive_key_id(1).unwrap()) + .unwrap(); + + let skey2 = receiver_keychain + .derived_key(&receiver_keychain.derive_key_id(1).unwrap()) + .unwrap(); + + let keychain = Keychain::from_random_seed().unwrap(); + let blinding_factor = keychain + .blind_sum(&BlindSum::new() + .sub_blinding_factor(BlindingFactor::from_secret_key(skey1)) + .add_blinding_factor(BlindingFactor::from_secret_key(skey2)) + // subtract the kernel offset here like as would when + // verifying a kernel signature + .sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset))) + .unwrap(); + + keychain + .secp() + .commit(0, blinding_factor.secret_key(&keychain.secp()).unwrap()) + .unwrap() + }; + + // sender starts the tx interaction + let (sender_pub_excess, sender_pub_nonce) = { + let keychain = sender_keychain.clone(); + + let skey = keychain + .derived_key(&keychain.derive_key_id(1).unwrap()) + .unwrap(); + + // dealing with an input here so we need to negate the blinding_factor + // rather than use it as is + let blinding_factor = keychain + .blind_sum(&BlindSum::new() + .sub_blinding_factor(BlindingFactor::from_secret_key(skey)) + // subtract the kernel offset to create an aggsig context + // with our "split" key + .sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset))) + .unwrap(); + + let blind = blinding_factor.secret_key(&keychain.secp()).unwrap(); + + let cx = sender_aggsig_cm.create_context(&keychain.secp(), &tx_id, blind); + cx.get_public_keys(&keychain.secp()) + }; + + // receiver receives partial tx + let (receiver_pub_excess, receiver_pub_nonce, sig_part) = { + let keychain = receiver_keychain.clone(); + let key_id = keychain.derive_key_id(1).unwrap(); + + let blind = keychain.derived_key(&key_id).unwrap(); + + let mut cx = receiver_aggsig_cm.create_context(&keychain.secp(), &tx_id, blind); + let (pub_excess, pub_nonce) = cx.get_public_keys(&keychain.secp()); + cx.add_output(&key_id); + + let sig_part = cx.calculate_partial_sig(&keychain.secp(), &sender_pub_nonce, 0, 0) + .unwrap(); + receiver_aggsig_cm.save_context(cx); + (pub_excess, pub_nonce, sig_part) + }; + + // check the sender can verify the partial signature + // received in the response back from the receiver + { + let keychain = sender_keychain.clone(); + let cx = sender_aggsig_cm.get_context(&tx_id); + let sig_verifies = cx.verify_partial_sig( + &keychain.secp(), + &sig_part, + &receiver_pub_nonce, + &receiver_pub_excess, + 0, + 0, + ); + assert!(sig_verifies); + } + + // now sender signs with their key + let sender_sig_part = { + let keychain = sender_keychain.clone(); + let cx = sender_aggsig_cm.get_context(&tx_id); + cx.calculate_partial_sig(&keychain.secp(), &receiver_pub_nonce, 0, 0) + .unwrap() + }; + + // check the receiver can verify the partial signature + // received by the sender + { + let keychain = receiver_keychain.clone(); + let cx = receiver_aggsig_cm.get_context(&tx_id); + let sig_verifies = cx.verify_partial_sig( + &keychain.secp(), + &sender_sig_part, + &sender_pub_nonce, + &sender_pub_excess, + 0, + 0, + ); + assert!(sig_verifies); + } + + // Receiver now builds final signature from sender and receiver parts + let (final_sig, final_pubkey) = { + let keychain = receiver_keychain.clone(); + let cx = receiver_aggsig_cm.get_context(&tx_id); + + // Receiver recreates their partial sig (we do not maintain state from earlier) + let our_sig_part = cx.calculate_partial_sig(&keychain.secp(), &sender_pub_nonce, 0, 0) + .unwrap(); + + // Receiver now generates final signature from the two parts + let final_sig = cx.calculate_final_sig( + &keychain.secp(), + &sender_sig_part, + &our_sig_part, + &sender_pub_nonce, + ).unwrap(); + + // Receiver calculates the final public key (to verify sig later) + let final_pubkey = cx.calculate_final_pubkey(&keychain.secp(), &sender_pub_excess) + .unwrap(); + + (final_sig, final_pubkey) + }; + + // Receiver checks the final signature verifies + { + let keychain = receiver_keychain.clone(); + let cx = receiver_aggsig_cm.get_context(&tx_id); + + // Receiver check the final signature verifies + let sig_verifies = + cx.verify_final_sig_build_msg(&keychain.secp(), &final_sig, &final_pubkey, 0, 0); + assert!(sig_verifies); + } + + // Check we can verify the sig using the kernel excess + { + let keychain = Keychain::from_random_seed().unwrap(); + + let msg = secp::Message::from_slice(&kernel_sig_msg(0, 0)).unwrap(); + + let sig_verifies = + aggsig::verify_single_from_commit(&keychain.secp(), &final_sig, &msg, &kernel_excess); + + assert!(sig_verifies); + } +} + +#[test] +fn test_rewind_range_proof() { + let keychain = Keychain::from_random_seed().unwrap(); + let key_id = keychain.derive_key_id(1).unwrap(); + let commit = keychain.commit(5, &key_id).unwrap(); + let msg = ProofMessage::from_bytes(&[0u8; 64]); + let extra_data = [99u8; 64]; + + let proof = proof::create( + &keychain, + 5, + &key_id, + commit, + Some(extra_data.to_vec().clone()), + msg, + ).unwrap(); + let proof_info = proof::rewind( + &keychain, + &key_id, + commit, + Some(extra_data.to_vec().clone()), + proof, + ).unwrap(); + + assert_eq!(proof_info.success, true); + + // now check the recovered message is "empty" (but not truncated) i.e. all + // zeroes + //Value is in the message in this case + assert_eq!( + proof_info.message, + secp::pedersen::ProofMessage::from_bytes(&[0; secp::constants::BULLET_PROOF_MSG_SIZE]) + ); + + let key_id2 = keychain.derive_key_id(2).unwrap(); + + // cannot rewind with a different nonce + let proof_info = proof::rewind( + &keychain, + &key_id2, + commit, + Some(extra_data.to_vec().clone()), + proof, + ).unwrap(); + // With bullet proofs, if you provide the wrong nonce you'll get gibberish back + // as opposed to a failure to recover the message + assert_ne!( + proof_info.message, + secp::pedersen::ProofMessage::from_bytes(&[0; secp::constants::BULLET_PROOF_MSG_SIZE]) + ); + assert_eq!(proof_info.value, 0); + + // cannot rewind with a commitment to the same value using a different key + let commit2 = keychain.commit(5, &key_id2).unwrap(); + let proof_info = proof::rewind( + &keychain, + &key_id, + commit2, + Some(extra_data.to_vec().clone()), + proof, + ).unwrap(); + assert_eq!(proof_info.success, false); + assert_eq!(proof_info.value, 0); + + // cannot rewind with a commitment to a different value + let commit3 = keychain.commit(4, &key_id).unwrap(); + let proof_info = proof::rewind( + &keychain, + &key_id, + commit3, + Some(extra_data.to_vec().clone()), + proof, + ).unwrap(); + assert_eq!(proof_info.success, false); + assert_eq!(proof_info.value, 0); + + // cannot rewind with wrong extra committed data + let commit3 = keychain.commit(4, &key_id).unwrap(); + let wrong_extra_data = [98u8; 64]; + let _should_err = proof::rewind( + &keychain, + &key_id, + commit3, + Some(wrong_extra_data.to_vec().clone()), + proof, + ).unwrap(); + + assert_eq!(proof_info.success, false); + assert_eq!(proof_info.value, 0); +}