mirror of
https://github.com/mimblewimble/grin.git
synced 2025-02-01 17:01:09 +03:00
[WIP] txpool (tx validation) using block sums for full validation (#1567)
tx validation (txpool) rework/simplify
This commit is contained in:
parent
82b785282c
commit
e72d8b58e4
22 changed files with 521 additions and 712 deletions
|
@ -148,8 +148,7 @@ impl OutputHandler {
|
||||||
.filter(|output| commitments.is_empty() || commitments.contains(&output.commit))
|
.filter(|output| commitments.is_empty() || commitments.contains(&output.commit))
|
||||||
.map(|output| {
|
.map(|output| {
|
||||||
OutputPrintable::from_output(output, w(&self.chain), Some(&header), include_proof)
|
OutputPrintable::from_output(output, w(&self.chain), Some(&header), include_proof)
|
||||||
})
|
}).collect();
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(BlockOutputs {
|
Ok(BlockOutputs {
|
||||||
header: BlockHeaderInfo::from_header(&header),
|
header: BlockHeaderInfo::from_header(&header),
|
||||||
|
@ -737,12 +736,10 @@ impl PoolPushHandler {
|
||||||
.and_then(move |wrapper: TxWrapper| {
|
.and_then(move |wrapper: TxWrapper| {
|
||||||
util::from_hex(wrapper.tx_hex)
|
util::from_hex(wrapper.tx_hex)
|
||||||
.map_err(|e| ErrorKind::RequestError(format!("Bad request: {}", e)).into())
|
.map_err(|e| ErrorKind::RequestError(format!("Bad request: {}", e)).into())
|
||||||
})
|
}).and_then(move |tx_bin| {
|
||||||
.and_then(move |tx_bin| {
|
|
||||||
ser::deserialize(&mut &tx_bin[..])
|
ser::deserialize(&mut &tx_bin[..])
|
||||||
.map_err(|e| ErrorKind::RequestError(format!("Bad request: {}", e)).into())
|
.map_err(|e| ErrorKind::RequestError(format!("Bad request: {}", e)).into())
|
||||||
})
|
}).and_then(move |tx: Transaction| {
|
||||||
.and_then(move |tx: Transaction| {
|
|
||||||
let source = pool::TxSource {
|
let source = pool::TxSource {
|
||||||
debug_name: "push-api".to_string(),
|
debug_name: "push-api".to_string(),
|
||||||
identifier: "?.?.?.?".to_string(),
|
identifier: "?.?.?.?".to_string(),
|
||||||
|
@ -760,7 +757,7 @@ impl PoolPushHandler {
|
||||||
let mut tx_pool = pool_arc.write().unwrap();
|
let mut tx_pool = pool_arc.write().unwrap();
|
||||||
let header = tx_pool.blockchain.chain_head().unwrap();
|
let header = tx_pool.blockchain.chain_head().unwrap();
|
||||||
tx_pool
|
tx_pool
|
||||||
.add_to_pool(source, tx, !fluff, &header.hash())
|
.add_to_pool(source, tx, !fluff, &header)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
error!(LOGGER, "update_pool: failed with error: {:?}", e);
|
error!(LOGGER, "update_pool: failed with error: {:?}", e);
|
||||||
ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into()
|
ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into()
|
||||||
|
|
|
@ -431,6 +431,7 @@ impl Chain {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// TODO - where do we call this from? And do we need a rewind first?
|
||||||
/// For the given commitment find the unspent output and return the
|
/// For the given commitment find the unspent output and return the
|
||||||
/// associated Return an error if the output does not exist or has been
|
/// associated Return an error if the output does not exist or has been
|
||||||
/// spent. This querying is done in a way that is consistent with the
|
/// spent. This querying is done in a way that is consistent with the
|
||||||
|
@ -445,33 +446,20 @@ impl Chain {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), Error> {
|
||||||
|
let mut txhashset = self.txhashset.write().unwrap();
|
||||||
|
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||||
|
extension.rewind(header)?;
|
||||||
|
extension.validate_utxo_fast(tx.inputs(), tx.outputs())?;
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
fn next_block_height(&self) -> Result<u64, Error> {
|
fn next_block_height(&self) -> Result<u64, Error> {
|
||||||
let bh = self.head_header()?;
|
let bh = self.head_header()?;
|
||||||
Ok(bh.height + 1)
|
Ok(bh.height + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Validate a vec of "raw" transactions against a known chain state
|
|
||||||
/// at the block with the specified block hash.
|
|
||||||
/// Specifying a "pre_tx" if we need to adjust the state, for example when
|
|
||||||
/// validating the txs in the stempool we adjust the state based on the
|
|
||||||
/// txpool.
|
|
||||||
pub fn validate_raw_txs(
|
|
||||||
&self,
|
|
||||||
txs: Vec<Transaction>,
|
|
||||||
pre_tx: Option<Transaction>,
|
|
||||||
block_hash: &Hash,
|
|
||||||
) -> Result<Vec<Transaction>, Error> {
|
|
||||||
// Get header so we can rewind chain state correctly.
|
|
||||||
let header = self.store.get_block_header(block_hash)?;
|
|
||||||
|
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
|
||||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
|
||||||
extension.rewind(&header)?;
|
|
||||||
let valid_txs = extension.validate_raw_txs(txs, pre_tx)?;
|
|
||||||
Ok(valid_txs)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verify we are not attempting to spend a coinbase output
|
/// Verify we are not attempting to spend a coinbase output
|
||||||
/// that has not yet sufficiently matured.
|
/// that has not yet sufficiently matured.
|
||||||
pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> {
|
pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> {
|
||||||
|
@ -876,6 +864,13 @@ impl Chain {
|
||||||
.map_err(|e| ErrorKind::StoreErr(e, "chain get header".to_owned()).into())
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get header".to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get block_sums by header hash.
|
||||||
|
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
|
||||||
|
self.store
|
||||||
|
.get_block_sums(h)
|
||||||
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get block_sums".to_owned()).into())
|
||||||
|
}
|
||||||
|
|
||||||
/// Gets the block header at the provided height
|
/// Gets the block header at the provided height
|
||||||
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||||
self.store
|
self.store
|
||||||
|
|
|
@ -519,6 +519,10 @@ fn verify_coinbase_maturity(block: &Block, ext: &mut txhashset::Extension) -> Re
|
||||||
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
|
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
|
||||||
/// of the new block.
|
/// of the new block.
|
||||||
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
||||||
|
// First check all our inputs exist in the current UTXO set.
|
||||||
|
// And that we are not introducing any duplicate outputs in the UTXO set.
|
||||||
|
ext.validate_utxo_fast(b.inputs(), b.outputs())?;
|
||||||
|
|
||||||
// Retrieve the block_sums for the previous block.
|
// Retrieve the block_sums for the previous block.
|
||||||
let block_sums = ext.batch.get_block_sums(&b.header.previous)?;
|
let block_sums = ext.batch.get_block_sums(&b.header.previous)?;
|
||||||
|
|
||||||
|
|
|
@ -90,6 +90,14 @@ impl ChainStore {
|
||||||
self.db.exists(&to_key(BLOCK_PREFIX, &mut h.to_vec()))
|
self.db.exists(&to_key(BLOCK_PREFIX, &mut h.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_block_sums(&self, bh: &Hash) -> Result<BlockSums, Error> {
|
||||||
|
option_to_not_found(
|
||||||
|
self.db
|
||||||
|
.get_ser(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec())),
|
||||||
|
&format!("Block sums for block: {}", bh),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
||||||
{
|
{
|
||||||
let mut header_cache = self.header_cache.write().unwrap();
|
let mut header_cache = self.header_cache.write().unwrap();
|
||||||
|
|
|
@ -29,9 +29,7 @@ use core::core::committed::Committed;
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::merkle_proof::MerkleProof;
|
use core::core::merkle_proof::MerkleProof;
|
||||||
use core::core::pmmr::{self, PMMR};
|
use core::core::pmmr::{self, PMMR};
|
||||||
use core::core::{
|
use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier, TxKernel};
|
||||||
Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier, Transaction, TxKernel,
|
|
||||||
};
|
|
||||||
use core::global;
|
use core::global;
|
||||||
use core::ser::{PMMRIndexHashable, PMMRable};
|
use core::ser::{PMMRIndexHashable, PMMRable};
|
||||||
|
|
||||||
|
@ -439,115 +437,6 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rewind the MMR backend to undo applying a raw tx to the txhashset extension.
|
|
||||||
// This is used during txpool validation to undo an invalid tx.
|
|
||||||
fn rewind_raw_tx(
|
|
||||||
&mut self,
|
|
||||||
output_pos: u64,
|
|
||||||
kernel_pos: u64,
|
|
||||||
rewind_rm_pos: &Bitmap,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
self.rewind_to_pos(output_pos, kernel_pos, rewind_rm_pos)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply a "raw" transaction to the txhashset.
|
|
||||||
/// We will never commit a txhashset extension that includes raw txs.
|
|
||||||
/// But we can use this when validating txs in the tx pool.
|
|
||||||
/// If we can add a tx to the tx pool and then successfully add the
|
|
||||||
/// aggregated tx from the tx pool to the current chain state (via a
|
|
||||||
/// txhashset extension) then we know the tx pool is valid (including the
|
|
||||||
/// new tx).
|
|
||||||
pub fn apply_raw_tx(&mut self, tx: &Transaction) -> Result<(), Error> {
|
|
||||||
// This should *never* be called on a writeable extension...
|
|
||||||
assert!(
|
|
||||||
self.rollback,
|
|
||||||
"applied raw_tx to writeable txhashset extension"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Checkpoint the MMR positions before we apply the new tx,
|
|
||||||
// anything goes wrong we will rewind to these positions.
|
|
||||||
let output_pos = self.output_pmmr.unpruned_size();
|
|
||||||
let kernel_pos = self.kernel_pmmr.unpruned_size();
|
|
||||||
|
|
||||||
// Build bitmap of output pos spent (as inputs) by this tx for rewind.
|
|
||||||
let rewind_rm_pos = tx
|
|
||||||
.inputs()
|
|
||||||
.iter()
|
|
||||||
.filter_map(|x| self.batch.get_output_pos(&x.commitment()).ok())
|
|
||||||
.map(|x| x as u32)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for ref output in tx.outputs() {
|
|
||||||
match self.apply_output(output) {
|
|
||||||
Ok(pos) => {
|
|
||||||
self.batch.save_output_pos(&output.commitment(), pos)?;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
self.rewind_raw_tx(output_pos, kernel_pos, &rewind_rm_pos)?;
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for ref input in tx.inputs() {
|
|
||||||
if let Err(e) = self.apply_input(input) {
|
|
||||||
self.rewind_raw_tx(output_pos, kernel_pos, &rewind_rm_pos)?;
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for ref kernel in tx.kernels() {
|
|
||||||
if let Err(e) = self.apply_kernel(kernel) {
|
|
||||||
self.rewind_raw_tx(output_pos, kernel_pos, &rewind_rm_pos)?;
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Validate a vector of "raw" transactions against the current chain state.
|
|
||||||
/// We support rewind on a "dirty" txhashset - so we can apply each tx in
|
|
||||||
/// turn, rewinding if any particular tx is not valid and continuing
|
|
||||||
/// through the vec of txs provided. This allows us to efficiently apply
|
|
||||||
/// all the txs, filtering out those that are not valid and returning the
|
|
||||||
/// final vec of txs that were successfully validated against the txhashset.
|
|
||||||
///
|
|
||||||
/// Note: We also pass in a "pre_tx". This tx is applied to and validated
|
|
||||||
/// before we start applying the vec of txs. We use this when validating
|
|
||||||
/// txs in the stempool as we need to account for txs in the txpool as
|
|
||||||
/// well (new_tx + stempool + txpool + txhashset). So we aggregate the
|
|
||||||
/// contents of the txpool into a single aggregated tx and pass it in here
|
|
||||||
/// as the "pre_tx" so we apply it to the txhashset before we start
|
|
||||||
/// validating the stempool txs.
|
|
||||||
/// This is optional and we pass in None when validating the txpool txs
|
|
||||||
/// themselves.
|
|
||||||
///
|
|
||||||
pub fn validate_raw_txs(
|
|
||||||
&mut self,
|
|
||||||
txs: Vec<Transaction>,
|
|
||||||
pre_tx: Option<Transaction>,
|
|
||||||
) -> Result<Vec<Transaction>, Error> {
|
|
||||||
let mut valid_txs = vec![];
|
|
||||||
|
|
||||||
// First apply the "pre_tx" to account for any state that need adding to
|
|
||||||
// the chain state before we can validate our vec of txs.
|
|
||||||
// This is the aggregate tx from the txpool if we are validating the stempool.
|
|
||||||
if let Some(tx) = pre_tx {
|
|
||||||
self.apply_raw_tx(&tx)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now validate each tx, rewinding any tx (and only that tx)
|
|
||||||
// if it fails to validate successfully.
|
|
||||||
for tx in txs {
|
|
||||||
if self.apply_raw_tx(&tx).is_ok() {
|
|
||||||
valid_txs.push(tx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(valid_txs)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verify we are not attempting to spend any coinbase outputs
|
/// Verify we are not attempting to spend any coinbase outputs
|
||||||
/// that have not sufficiently matured.
|
/// that have not sufficiently matured.
|
||||||
pub fn verify_coinbase_maturity(
|
pub fn verify_coinbase_maturity(
|
||||||
|
@ -589,9 +478,25 @@ impl<'a> Extension<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Apply a new set of blocks on top the existing sum trees. Blocks are
|
// Inputs _must_ spend unspent outputs.
|
||||||
/// applied in order of the provided Vec. If pruning is enabled, inputs also
|
// Outputs _must not_ introduce duplicate commitments.
|
||||||
/// prune MMR data.
|
pub fn validate_utxo_fast(
|
||||||
|
&mut self,
|
||||||
|
inputs: &Vec<Input>,
|
||||||
|
outputs: &Vec<Output>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
for out in outputs {
|
||||||
|
self.validate_utxo_output(out)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for input in inputs {
|
||||||
|
self.validate_utxo_input(input)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply a new block to the existing state.
|
||||||
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
|
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
|
||||||
for out in b.outputs() {
|
for out in b.outputs() {
|
||||||
let pos = self.apply_output(out)?;
|
let pos = self.apply_output(out)?;
|
||||||
|
@ -610,6 +515,18 @@ impl<'a> Extension<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO - Is this sufficient?
|
||||||
|
fn validate_utxo_input(&mut self, input: &Input) -> Result<(), Error> {
|
||||||
|
let commit = input.commitment();
|
||||||
|
let pos_res = self.batch.get_output_pos(&commit);
|
||||||
|
if let Ok(pos) = pos_res {
|
||||||
|
if let Some(_) = self.output_pmmr.get_data(pos) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(ErrorKind::AlreadySpent(commit).into())
|
||||||
|
}
|
||||||
|
|
||||||
fn apply_input(&mut self, input: &Input) -> Result<(), Error> {
|
fn apply_input(&mut self, input: &Input) -> Result<(), Error> {
|
||||||
let commit = input.commitment();
|
let commit = input.commitment();
|
||||||
let pos_res = self.batch.get_output_pos(&commit);
|
let pos_res = self.batch.get_output_pos(&commit);
|
||||||
|
@ -648,6 +565,19 @@ impl<'a> Extension<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// TODO - Is this sufficient?
|
||||||
|
fn validate_utxo_output(&mut self, out: &Output) -> Result<(), Error> {
|
||||||
|
let commit = out.commitment();
|
||||||
|
if let Ok(pos) = self.batch.get_output_pos(&commit) {
|
||||||
|
if let Some(out_mmr) = self.output_pmmr.get_data(pos) {
|
||||||
|
if out_mmr.commitment() == commit {
|
||||||
|
return Err(ErrorKind::DuplicateCommitment(commit).into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> {
|
fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> {
|
||||||
let commit = out.commitment();
|
let commit = out.commitment();
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,6 @@ extern crate grin_core as core;
|
||||||
extern crate grin_keychain as keychain;
|
extern crate grin_keychain as keychain;
|
||||||
extern crate grin_store as store;
|
extern crate grin_store as store;
|
||||||
extern crate grin_util as util;
|
extern crate grin_util as util;
|
||||||
extern crate grin_wallet as wallet;
|
|
||||||
|
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fs::{self, File, OpenOptions};
|
use std::fs::{self, File, OpenOptions};
|
||||||
|
@ -27,113 +26,13 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use chain::store::ChainStore;
|
use chain::store::ChainStore;
|
||||||
use chain::txhashset;
|
use chain::txhashset;
|
||||||
use chain::types::Tip;
|
use core::core::BlockHeader;
|
||||||
use core::core::{Block, BlockHeader};
|
|
||||||
use core::pow::Difficulty;
|
|
||||||
use keychain::{ExtKeychain, Keychain};
|
|
||||||
use util::file;
|
use util::file;
|
||||||
use wallet::libtx::{build, reward};
|
|
||||||
|
|
||||||
fn clean_output_dir(dir_name: &str) {
|
fn clean_output_dir(dir_name: &str) {
|
||||||
let _ = fs::remove_dir_all(dir_name);
|
let _ = fs::remove_dir_all(dir_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_some_raw_txs() {
|
|
||||||
let db_root = format!(".grin_txhashset_raw_txs");
|
|
||||||
clean_output_dir(&db_root);
|
|
||||||
|
|
||||||
let db_env = Arc::new(store::new_env(db_root.clone()));
|
|
||||||
|
|
||||||
let chain_store = ChainStore::new(db_env).unwrap();
|
|
||||||
let store = Arc::new(chain_store);
|
|
||||||
// open the txhashset, creating a new one if necessary
|
|
||||||
let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone(), None).unwrap();
|
|
||||||
|
|
||||||
let keychain = ExtKeychain::from_random_seed().unwrap();
|
|
||||||
let key_id1 = keychain.derive_key_id(1).unwrap();
|
|
||||||
let key_id2 = keychain.derive_key_id(2).unwrap();
|
|
||||||
let key_id3 = keychain.derive_key_id(3).unwrap();
|
|
||||||
let key_id4 = keychain.derive_key_id(4).unwrap();
|
|
||||||
let key_id5 = keychain.derive_key_id(5).unwrap();
|
|
||||||
let key_id6 = keychain.derive_key_id(6).unwrap();
|
|
||||||
|
|
||||||
// Create a simple block with a single coinbase output
|
|
||||||
// so we have something to spend.
|
|
||||||
let prev_header = BlockHeader::default();
|
|
||||||
let reward_out = reward::output(&keychain, &key_id1, 0, prev_header.height).unwrap();
|
|
||||||
let block = Block::new(&prev_header, vec![], Difficulty::one(), reward_out).unwrap();
|
|
||||||
|
|
||||||
// Now apply this initial block to the (currently empty) MMRs.
|
|
||||||
// Note: this results in an output MMR with a single leaf node.
|
|
||||||
// We need to be careful with pruning while processing the txs below
|
|
||||||
// as we cannot prune a tree with a single node in it (no sibling or parent).
|
|
||||||
let mut batch = store.batch().unwrap();
|
|
||||||
txhashset::extending(&mut txhashset, &mut batch, |extension| {
|
|
||||||
extension.apply_block(&block)
|
|
||||||
}).unwrap();
|
|
||||||
|
|
||||||
// Make sure we setup the head in the store based on block we just accepted.
|
|
||||||
let head = Tip::from_block(&block.header);
|
|
||||||
batch.save_head(&head).unwrap();
|
|
||||||
batch.commit().unwrap();
|
|
||||||
|
|
||||||
let coinbase_reward = 60_000_000_000;
|
|
||||||
|
|
||||||
// tx1 spends the original coinbase output from the block
|
|
||||||
let tx1 = build::transaction(
|
|
||||||
vec![
|
|
||||||
build::coinbase_input(coinbase_reward, key_id1.clone()),
|
|
||||||
build::output(100, key_id2.clone()),
|
|
||||||
build::output(150, key_id3.clone()),
|
|
||||||
],
|
|
||||||
&keychain,
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// tx2 attempts to "double spend" the coinbase output from the block (conflicts
|
|
||||||
// with tx1)
|
|
||||||
let tx2 = build::transaction(
|
|
||||||
vec![
|
|
||||||
build::coinbase_input(coinbase_reward, key_id1.clone()),
|
|
||||||
build::output(100, key_id4.clone()),
|
|
||||||
],
|
|
||||||
&keychain,
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// tx3 spends one output from tx1
|
|
||||||
let tx3 = build::transaction(
|
|
||||||
vec![
|
|
||||||
build::input(100, key_id2.clone()),
|
|
||||||
build::output(90, key_id5.clone()),
|
|
||||||
],
|
|
||||||
&keychain,
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// tx4 spends the other output from tx1 and the output from tx3
|
|
||||||
let tx4 = build::transaction(
|
|
||||||
vec![
|
|
||||||
build::input(150, key_id3.clone()),
|
|
||||||
build::input(90, key_id5.clone()),
|
|
||||||
build::output(220, key_id6.clone()),
|
|
||||||
],
|
|
||||||
&keychain,
|
|
||||||
).unwrap();
|
|
||||||
|
|
||||||
// Now validate the txs against the txhashset (via a readonly extension).
|
|
||||||
// Note: we use a single txhashset extension and we can continue to
|
|
||||||
// apply txs successfully after a failure.
|
|
||||||
let _ = txhashset::extending_readonly(&mut txhashset, |extension| {
|
|
||||||
// Note: we pass in an increasing "height" here so we can rollback
|
|
||||||
// each tx individually as necessary, while maintaining a long lived
|
|
||||||
// txhashset extension.
|
|
||||||
assert!(extension.apply_raw_tx(&tx1).is_ok());
|
|
||||||
assert!(extension.apply_raw_tx(&tx2).is_err());
|
|
||||||
assert!(extension.apply_raw_tx(&tx3).is_ok());
|
|
||||||
assert!(extension.apply_raw_tx(&tx4).is_ok());
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_unexpected_zip() {
|
fn test_unexpected_zip() {
|
||||||
let db_root = format!(".grin_txhashset_zip");
|
let db_root = format!(".grin_txhashset_zip");
|
||||||
|
@ -180,8 +79,7 @@ fn write_file(db_root: String) {
|
||||||
.join("txhashset")
|
.join("txhashset")
|
||||||
.join("kernel")
|
.join("kernel")
|
||||||
.join("strange0"),
|
.join("strange0"),
|
||||||
)
|
).unwrap();
|
||||||
.unwrap();
|
|
||||||
OpenOptions::new()
|
OpenOptions::new()
|
||||||
.create(true)
|
.create(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
|
@ -196,8 +94,7 @@ fn write_file(db_root: String) {
|
||||||
.join("txhashset")
|
.join("txhashset")
|
||||||
.join("strange_dir")
|
.join("strange_dir")
|
||||||
.join("strange2"),
|
.join("strange2"),
|
||||||
)
|
).unwrap();
|
||||||
.unwrap();
|
|
||||||
fs::create_dir(
|
fs::create_dir(
|
||||||
Path::new(&db_root)
|
Path::new(&db_root)
|
||||||
.join("txhashset")
|
.join("txhashset")
|
||||||
|
@ -213,8 +110,7 @@ fn write_file(db_root: String) {
|
||||||
.join("strange_dir")
|
.join("strange_dir")
|
||||||
.join("strange_subdir")
|
.join("strange_subdir")
|
||||||
.join("strange3"),
|
.join("strange3"),
|
||||||
)
|
).unwrap();
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn txhashset_contains_expected_files(dirname: String, path_buf: PathBuf) -> bool {
|
fn txhashset_contains_expected_files(dirname: String, path_buf: PathBuf) -> bool {
|
||||||
|
|
|
@ -26,7 +26,7 @@ use consensus::{self, reward, REWARD};
|
||||||
use core::committed::{self, Committed};
|
use core::committed::{self, Committed};
|
||||||
use core::compact_block::{CompactBlock, CompactBlockBody};
|
use core::compact_block::{CompactBlock, CompactBlockBody};
|
||||||
use core::hash::{Hash, HashWriter, Hashed, ZERO_HASH};
|
use core::hash::{Hash, HashWriter, Hashed, ZERO_HASH};
|
||||||
use core::verifier_cache::{LruVerifierCache, VerifierCache};
|
use core::verifier_cache::VerifierCache;
|
||||||
use core::{
|
use core::{
|
||||||
transaction, Commitment, Input, KernelFeatures, Output, OutputFeatures, Transaction,
|
transaction, Commitment, Input, KernelFeatures, Output, OutputFeatures, Transaction,
|
||||||
TransactionBody, TxKernel,
|
TransactionBody, TxKernel,
|
||||||
|
@ -415,15 +415,8 @@ impl Block {
|
||||||
difficulty: Difficulty,
|
difficulty: Difficulty,
|
||||||
reward_output: (Output, TxKernel),
|
reward_output: (Output, TxKernel),
|
||||||
) -> Result<Block, Error> {
|
) -> Result<Block, Error> {
|
||||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
let mut block =
|
||||||
let mut block = Block::with_reward(
|
Block::with_reward(prev, txs, reward_output.0, reward_output.1, difficulty)?;
|
||||||
prev,
|
|
||||||
txs,
|
|
||||||
reward_output.0,
|
|
||||||
reward_output.1,
|
|
||||||
difficulty,
|
|
||||||
verifier_cache,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Now set the pow on the header so block hashing works as expected.
|
// Now set the pow on the header so block hashing works as expected.
|
||||||
{
|
{
|
||||||
|
@ -497,12 +490,10 @@ impl Block {
|
||||||
reward_out: Output,
|
reward_out: Output,
|
||||||
reward_kern: TxKernel,
|
reward_kern: TxKernel,
|
||||||
difficulty: Difficulty,
|
difficulty: Difficulty,
|
||||||
verifier: Arc<RwLock<VerifierCache>>,
|
|
||||||
) -> Result<Block, Error> {
|
) -> Result<Block, Error> {
|
||||||
// A block is just a big transaction, aggregate as such.
|
// A block is just a big transaction, aggregate as such.
|
||||||
// Note that aggregation also runs transaction validation
|
let mut agg_tx = transaction::aggregate(txs)?;
|
||||||
// and duplicate commitment checks.
|
|
||||||
let mut agg_tx = transaction::aggregate(txs, verifier)?;
|
|
||||||
// Now add the reward output and reward kernel to the aggregate tx.
|
// Now add the reward output and reward kernel to the aggregate tx.
|
||||||
// At this point the tx is technically invalid,
|
// At this point the tx is technically invalid,
|
||||||
// but the tx body is valid if we account for the reward (i.e. as a block).
|
// but the tx body is valid if we account for the reward (i.e. as a block).
|
||||||
|
|
|
@ -59,7 +59,6 @@ impl Default for BlockSums {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// WAT?
|
|
||||||
/// It's a tuple but we can verify the "full" kernel sums on it.
|
/// It's a tuple but we can verify the "full" kernel sums on it.
|
||||||
/// This means we can take a previous block_sums, apply a new block to it
|
/// This means we can take a previous block_sums, apply a new block to it
|
||||||
/// and verify the full kernel sums (full UTXO and kernel sets).
|
/// and verify the full kernel sums (full UTXO and kernel sets).
|
||||||
|
|
|
@ -404,7 +404,7 @@ impl TransactionBody {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Total fee for a TransactionBody is the sum of fees of all kernels.
|
/// Total fee for a TransactionBody is the sum of fees of all kernels.
|
||||||
pub fn fee(&self) -> u64 {
|
fn fee(&self) -> u64 {
|
||||||
self.kernels
|
self.kernels
|
||||||
.iter()
|
.iter()
|
||||||
.fold(0, |acc, ref x| acc.saturating_add(x.fee))
|
.fold(0, |acc, ref x| acc.saturating_add(x.fee))
|
||||||
|
@ -748,7 +748,8 @@ impl Transaction {
|
||||||
self.body.fee()
|
self.body.fee()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn overage(&self) -> i64 {
|
/// Total overage across all kernels.
|
||||||
|
pub fn overage(&self) -> i64 {
|
||||||
self.body.overage()
|
self.body.overage()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -822,10 +823,7 @@ pub fn cut_through(inputs: &mut Vec<Input>, outputs: &mut Vec<Output>) -> Result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Aggregate a vec of txs into a multi-kernel tx with cut_through.
|
/// Aggregate a vec of txs into a multi-kernel tx with cut_through.
|
||||||
pub fn aggregate(
|
pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
|
||||||
mut txs: Vec<Transaction>,
|
|
||||||
verifier: Arc<RwLock<VerifierCache>>,
|
|
||||||
) -> Result<Transaction, Error> {
|
|
||||||
// convenience short-circuiting
|
// convenience short-circuiting
|
||||||
if txs.is_empty() {
|
if txs.is_empty() {
|
||||||
return Ok(Transaction::empty());
|
return Ok(Transaction::empty());
|
||||||
|
@ -867,22 +865,12 @@ pub fn aggregate(
|
||||||
// * sum of all kernel offsets
|
// * sum of all kernel offsets
|
||||||
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
|
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
|
||||||
|
|
||||||
// Now validate the aggregate tx to ensure we have not built something invalid.
|
|
||||||
// The resulting tx could be invalid for a variety of reasons -
|
|
||||||
// * tx too large (too many inputs|outputs|kernels)
|
|
||||||
// * cut-through may have invalidated the sums
|
|
||||||
tx.validate(verifier)?;
|
|
||||||
|
|
||||||
Ok(tx)
|
Ok(tx)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempt to deaggregate a multi-kernel transaction based on multiple
|
/// Attempt to deaggregate a multi-kernel transaction based on multiple
|
||||||
/// transactions
|
/// transactions
|
||||||
pub fn deaggregate(
|
pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transaction, Error> {
|
||||||
mk_tx: Transaction,
|
|
||||||
txs: Vec<Transaction>,
|
|
||||||
verifier: Arc<RwLock<VerifierCache>>,
|
|
||||||
) -> Result<Transaction, Error> {
|
|
||||||
let mut inputs: Vec<Input> = vec![];
|
let mut inputs: Vec<Input> = vec![];
|
||||||
let mut outputs: Vec<Output> = vec![];
|
let mut outputs: Vec<Output> = vec![];
|
||||||
let mut kernels: Vec<TxKernel> = vec![];
|
let mut kernels: Vec<TxKernel> = vec![];
|
||||||
|
@ -891,7 +879,7 @@ pub fn deaggregate(
|
||||||
// transaction
|
// transaction
|
||||||
let mut kernel_offsets = vec![];
|
let mut kernel_offsets = vec![];
|
||||||
|
|
||||||
let tx = aggregate(txs, verifier.clone())?;
|
let tx = aggregate(txs)?;
|
||||||
|
|
||||||
for mk_input in mk_tx.body.inputs {
|
for mk_input in mk_tx.body.inputs {
|
||||||
if !tx.body.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
|
if !tx.body.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
|
||||||
|
@ -941,9 +929,6 @@ pub fn deaggregate(
|
||||||
|
|
||||||
// Build a new tx from the above data.
|
// Build a new tx from the above data.
|
||||||
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
|
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
|
||||||
|
|
||||||
// Now validate the resulting tx to ensure we have not built something invalid.
|
|
||||||
tx.validate(verifier)?;
|
|
||||||
Ok(tx)
|
Ok(tx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,7 +137,7 @@ fn transaction_cut_through() {
|
||||||
let vc = verifier_cache();
|
let vc = verifier_cache();
|
||||||
|
|
||||||
// now build a "cut_through" tx from tx1 and tx2
|
// now build a "cut_through" tx from tx1 and tx2
|
||||||
let tx3 = aggregate(vec![tx1, tx2], vc.clone()).unwrap();
|
let tx3 = aggregate(vec![tx1, tx2]).unwrap();
|
||||||
|
|
||||||
assert!(tx3.validate(vc.clone()).is_ok());
|
assert!(tx3.validate(vc.clone()).is_ok());
|
||||||
}
|
}
|
||||||
|
@ -157,22 +157,19 @@ fn multi_kernel_transaction_deaggregation() {
|
||||||
assert!(tx3.validate(vc.clone()).is_ok());
|
assert!(tx3.validate(vc.clone()).is_ok());
|
||||||
assert!(tx4.validate(vc.clone()).is_ok());
|
assert!(tx4.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let tx1234 = aggregate(
|
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
|
||||||
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
|
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||||
vc.clone(),
|
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
|
||||||
).unwrap();
|
|
||||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], vc.clone()).unwrap();
|
|
||||||
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], vc.clone()).unwrap();
|
|
||||||
|
|
||||||
assert!(tx1234.validate(vc.clone()).is_ok());
|
assert!(tx1234.validate(vc.clone()).is_ok());
|
||||||
assert!(tx12.validate(vc.clone()).is_ok());
|
assert!(tx12.validate(vc.clone()).is_ok());
|
||||||
assert!(tx34.validate(vc.clone()).is_ok());
|
assert!(tx34.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()], vc.clone()).unwrap();
|
let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap();
|
||||||
assert!(deaggregated_tx34.validate(vc.clone()).is_ok());
|
assert!(deaggregated_tx34.validate(vc.clone()).is_ok());
|
||||||
assert_eq!(tx34, deaggregated_tx34);
|
assert_eq!(tx34, deaggregated_tx34);
|
||||||
|
|
||||||
let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()], vc.clone()).unwrap();
|
let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap();
|
||||||
|
|
||||||
assert!(deaggregated_tx12.validate(vc.clone()).is_ok());
|
assert!(deaggregated_tx12.validate(vc.clone()).is_ok());
|
||||||
assert_eq!(tx12, deaggregated_tx12);
|
assert_eq!(tx12, deaggregated_tx12);
|
||||||
|
@ -190,13 +187,13 @@ fn multi_kernel_transaction_deaggregation_2() {
|
||||||
assert!(tx2.validate(vc.clone()).is_ok());
|
assert!(tx2.validate(vc.clone()).is_ok());
|
||||||
assert!(tx3.validate(vc.clone()).is_ok());
|
assert!(tx3.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()], vc.clone()).unwrap();
|
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
|
||||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], vc.clone()).unwrap();
|
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||||
|
|
||||||
assert!(tx123.validate(vc.clone()).is_ok());
|
assert!(tx123.validate(vc.clone()).is_ok());
|
||||||
assert!(tx12.validate(vc.clone()).is_ok());
|
assert!(tx12.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()], vc.clone()).unwrap();
|
let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap();
|
||||||
assert!(deaggregated_tx3.validate(vc.clone()).is_ok());
|
assert!(deaggregated_tx3.validate(vc.clone()).is_ok());
|
||||||
assert_eq!(tx3, deaggregated_tx3);
|
assert_eq!(tx3, deaggregated_tx3);
|
||||||
}
|
}
|
||||||
|
@ -213,14 +210,14 @@ fn multi_kernel_transaction_deaggregation_3() {
|
||||||
assert!(tx2.validate(vc.clone()).is_ok());
|
assert!(tx2.validate(vc.clone()).is_ok());
|
||||||
assert!(tx3.validate(vc.clone()).is_ok());
|
assert!(tx3.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()], vc.clone()).unwrap();
|
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
|
||||||
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()], vc.clone()).unwrap();
|
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()]).unwrap();
|
||||||
let tx2 = aggregate(vec![tx2.clone()], vc.clone()).unwrap();
|
let tx2 = aggregate(vec![tx2.clone()]).unwrap();
|
||||||
|
|
||||||
assert!(tx123.validate(vc.clone()).is_ok());
|
assert!(tx123.validate(vc.clone()).is_ok());
|
||||||
assert!(tx2.validate(vc.clone()).is_ok());
|
assert!(tx2.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()], vc.clone()).unwrap();
|
let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap();
|
||||||
assert!(deaggregated_tx13.validate(vc.clone()).is_ok());
|
assert!(deaggregated_tx13.validate(vc.clone()).is_ok());
|
||||||
assert_eq!(tx13, deaggregated_tx13);
|
assert_eq!(tx13, deaggregated_tx13);
|
||||||
}
|
}
|
||||||
|
@ -241,22 +238,18 @@ fn multi_kernel_transaction_deaggregation_4() {
|
||||||
assert!(tx4.validate(vc.clone()).is_ok());
|
assert!(tx4.validate(vc.clone()).is_ok());
|
||||||
assert!(tx5.validate(vc.clone()).is_ok());
|
assert!(tx5.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let tx12345 = aggregate(
|
let tx12345 = aggregate(vec![
|
||||||
vec![
|
tx1.clone(),
|
||||||
tx1.clone(),
|
tx2.clone(),
|
||||||
tx2.clone(),
|
tx3.clone(),
|
||||||
tx3.clone(),
|
tx4.clone(),
|
||||||
tx4.clone(),
|
tx5.clone(),
|
||||||
tx5.clone(),
|
]).unwrap();
|
||||||
],
|
|
||||||
vc.clone(),
|
|
||||||
).unwrap();
|
|
||||||
assert!(tx12345.validate(vc.clone()).is_ok());
|
assert!(tx12345.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let deaggregated_tx5 = deaggregate(
|
let deaggregated_tx5 = deaggregate(
|
||||||
tx12345.clone(),
|
tx12345.clone(),
|
||||||
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
|
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
|
||||||
vc.clone(),
|
|
||||||
).unwrap();
|
).unwrap();
|
||||||
assert!(deaggregated_tx5.validate(vc.clone()).is_ok());
|
assert!(deaggregated_tx5.validate(vc.clone()).is_ok());
|
||||||
assert_eq!(tx5, deaggregated_tx5);
|
assert_eq!(tx5, deaggregated_tx5);
|
||||||
|
@ -278,26 +271,19 @@ fn multi_kernel_transaction_deaggregation_5() {
|
||||||
assert!(tx4.validate(vc.clone()).is_ok());
|
assert!(tx4.validate(vc.clone()).is_ok());
|
||||||
assert!(tx5.validate(vc.clone()).is_ok());
|
assert!(tx5.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let tx12345 = aggregate(
|
let tx12345 = aggregate(vec![
|
||||||
vec![
|
tx1.clone(),
|
||||||
tx1.clone(),
|
tx2.clone(),
|
||||||
tx2.clone(),
|
tx3.clone(),
|
||||||
tx3.clone(),
|
tx4.clone(),
|
||||||
tx4.clone(),
|
tx5.clone(),
|
||||||
tx5.clone(),
|
]).unwrap();
|
||||||
],
|
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||||
vc.clone(),
|
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
|
||||||
).unwrap();
|
|
||||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], vc.clone()).unwrap();
|
|
||||||
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], vc.clone()).unwrap();
|
|
||||||
|
|
||||||
assert!(tx12345.validate(vc.clone()).is_ok());
|
assert!(tx12345.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let deaggregated_tx5 = deaggregate(
|
let deaggregated_tx5 = deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap();
|
||||||
tx12345.clone(),
|
|
||||||
vec![tx12.clone(), tx34.clone()],
|
|
||||||
vc.clone(),
|
|
||||||
).unwrap();
|
|
||||||
assert!(deaggregated_tx5.validate(vc.clone()).is_ok());
|
assert!(deaggregated_tx5.validate(vc.clone()).is_ok());
|
||||||
assert_eq!(tx5, deaggregated_tx5);
|
assert_eq!(tx5, deaggregated_tx5);
|
||||||
}
|
}
|
||||||
|
@ -314,16 +300,16 @@ fn basic_transaction_deaggregation() {
|
||||||
assert!(tx2.validate(vc.clone()).is_ok());
|
assert!(tx2.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
// now build a "cut_through" tx from tx1 and tx2
|
// now build a "cut_through" tx from tx1 and tx2
|
||||||
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()], vc.clone()).unwrap();
|
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||||
|
|
||||||
assert!(tx3.validate(vc.clone()).is_ok());
|
assert!(tx3.validate(vc.clone()).is_ok());
|
||||||
|
|
||||||
let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()], vc.clone()).unwrap();
|
let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap();
|
||||||
|
|
||||||
assert!(deaggregated_tx1.validate(vc.clone()).is_ok());
|
assert!(deaggregated_tx1.validate(vc.clone()).is_ok());
|
||||||
assert_eq!(tx1, deaggregated_tx1);
|
assert_eq!(tx1, deaggregated_tx1);
|
||||||
|
|
||||||
let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()], vc.clone()).unwrap();
|
let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap();
|
||||||
|
|
||||||
assert!(deaggregated_tx2.validate(vc.clone()).is_ok());
|
assert!(deaggregated_tx2.validate(vc.clone()).is_ok());
|
||||||
assert_eq!(tx2, deaggregated_tx2);
|
assert_eq!(tx2, deaggregated_tx2);
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
|
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
use std::cmp::min;
|
use std::cmp::min;
|
||||||
|
use std::ops::Add;
|
||||||
/// Keychain trait and its main supporting types. The Identifier is a
|
/// Keychain trait and its main supporting types. The Identifier is a
|
||||||
/// semi-opaque structure (just bytes) to track keys within the Keychain.
|
/// semi-opaque structure (just bytes) to track keys within the Keychain.
|
||||||
/// BlindingFactor is a useful wrapper around a private key to help with
|
/// BlindingFactor is a useful wrapper around a private key to help with
|
||||||
|
@ -28,6 +29,7 @@ use util::secp::constants::SECRET_KEY_SIZE;
|
||||||
use util::secp::key::{PublicKey, SecretKey};
|
use util::secp::key::{PublicKey, SecretKey};
|
||||||
use util::secp::pedersen::Commitment;
|
use util::secp::pedersen::Commitment;
|
||||||
use util::secp::{self, Message, Secp256k1, Signature};
|
use util::secp::{self, Message, Secp256k1, Signature};
|
||||||
|
use util::static_secp_instance;
|
||||||
|
|
||||||
// Size of an identifier in bytes
|
// Size of an identifier in bytes
|
||||||
pub const IDENTIFIER_SIZE: usize = 10;
|
pub const IDENTIFIER_SIZE: usize = 10;
|
||||||
|
@ -177,6 +179,32 @@ impl AsRef<[u8]> for BlindingFactor {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Add for BlindingFactor {
|
||||||
|
type Output = Result<BlindingFactor, Error>;
|
||||||
|
|
||||||
|
// Convenient (and robust) way to add two blinding_factors together.
|
||||||
|
// Handles "zero" blinding_factors correctly.
|
||||||
|
//
|
||||||
|
// let bf = (bf1 + bf2)?;
|
||||||
|
//
|
||||||
|
fn add(self, other: BlindingFactor) -> Self::Output {
|
||||||
|
let secp = static_secp_instance();
|
||||||
|
let secp = secp.lock().unwrap();
|
||||||
|
let keys = vec![self, other]
|
||||||
|
.into_iter()
|
||||||
|
.filter(|x| *x != BlindingFactor::zero())
|
||||||
|
.filter_map(|x| x.secret_key(&secp).ok())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
if keys.is_empty() {
|
||||||
|
Ok(BlindingFactor::zero())
|
||||||
|
} else {
|
||||||
|
let sum = secp.blind_sum(keys, vec![])?;
|
||||||
|
Ok(BlindingFactor::from_secret_key(sum))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl BlindingFactor {
|
impl BlindingFactor {
|
||||||
pub fn from_secret_key(skey: secp::key::SecretKey) -> BlindingFactor {
|
pub fn from_secret_key(skey: secp::key::SecretKey) -> BlindingFactor {
|
||||||
BlindingFactor::from_slice(&skey.as_ref())
|
BlindingFactor::from_slice(&skey.as_ref())
|
||||||
|
|
176
pool/src/pool.rs
176
pool/src/pool.rs
|
@ -23,7 +23,7 @@ use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::id::{ShortId, ShortIdentifiable};
|
use core::core::id::{ShortId, ShortIdentifiable};
|
||||||
use core::core::transaction;
|
use core::core::transaction;
|
||||||
use core::core::verifier_cache::VerifierCache;
|
use core::core::verifier_cache::VerifierCache;
|
||||||
use core::core::{Block, Transaction, TxKernel};
|
use core::core::{Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel};
|
||||||
use types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
|
use types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
|
||||||
use util::LOGGER;
|
use util::LOGGER;
|
||||||
|
|
||||||
|
@ -111,9 +111,8 @@ impl Pool {
|
||||||
/// appropriate to put in a mined block. Aggregates chains of dependent
|
/// appropriate to put in a mined block. Aggregates chains of dependent
|
||||||
/// transactions, orders by fee over weight and ensures to total weight
|
/// transactions, orders by fee over weight and ensures to total weight
|
||||||
/// doesn't exceed block limits.
|
/// doesn't exceed block limits.
|
||||||
pub fn prepare_mineable_transactions(&self) -> Vec<Transaction> {
|
pub fn prepare_mineable_transactions(&self) -> Result<Vec<Transaction>, PoolError> {
|
||||||
let header = self.blockchain.chain_head().unwrap();
|
let header = self.blockchain.chain_head()?;
|
||||||
|
|
||||||
let tx_buckets = self.bucket_transactions();
|
let tx_buckets = self.bucket_transactions();
|
||||||
|
|
||||||
// flatten buckets using aggregate (with cut-through)
|
// flatten buckets using aggregate (with cut-through)
|
||||||
|
@ -121,8 +120,9 @@ impl Pool {
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter_map(|mut bucket| {
|
.filter_map(|mut bucket| {
|
||||||
bucket.truncate(MAX_TX_CHAIN);
|
bucket.truncate(MAX_TX_CHAIN);
|
||||||
transaction::aggregate(bucket, self.verifier_cache.clone()).ok()
|
transaction::aggregate(bucket).ok()
|
||||||
}).collect();
|
}).filter(|x| x.validate(self.verifier_cache.clone()).is_ok())
|
||||||
|
.collect();
|
||||||
|
|
||||||
// sort by fees over weight, multiplying by 1000 to keep some precision
|
// sort by fees over weight, multiplying by 1000 to keep some precision
|
||||||
// don't think we'll ever see a >max_u64/1000 fee transaction
|
// don't think we'll ever see a >max_u64/1000 fee transaction
|
||||||
|
@ -135,11 +135,12 @@ impl Pool {
|
||||||
weight < MAX_MINEABLE_WEIGHT
|
weight < MAX_MINEABLE_WEIGHT
|
||||||
});
|
});
|
||||||
|
|
||||||
// make sure those txs are all valid together, no Error is expected
|
// Iteratively apply the txs to the current chain state,
|
||||||
// when passing None
|
// rejecting any that do not result in a valid state.
|
||||||
self.blockchain
|
// Return a vec of all the valid txs.
|
||||||
.validate_raw_txs(flat_txs, None, &header.hash())
|
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
|
||||||
.expect("should never happen")
|
let txs = self.validate_raw_txs(flat_txs, None, &header, &block_sums)?;
|
||||||
|
Ok(txs)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn all_transactions(&self) -> Vec<Transaction> {
|
pub fn all_transactions(&self) -> Vec<Transaction> {
|
||||||
|
@ -152,39 +153,37 @@ impl Pool {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
let tx = transaction::aggregate(txs, self.verifier_cache.clone())?;
|
let tx = transaction::aggregate(txs)?;
|
||||||
|
tx.validate(self.verifier_cache.clone())?;
|
||||||
Ok(Some(tx))
|
Ok(Some(tx))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn select_valid_transactions(
|
pub fn select_valid_transactions(
|
||||||
&mut self,
|
&self,
|
||||||
from_state: PoolEntryState,
|
txs: Vec<Transaction>,
|
||||||
to_state: PoolEntryState,
|
|
||||||
extra_tx: Option<Transaction>,
|
extra_tx: Option<Transaction>,
|
||||||
block_hash: &Hash,
|
header: &BlockHeader,
|
||||||
) -> Result<Vec<Transaction>, PoolError> {
|
) -> Result<Vec<Transaction>, PoolError> {
|
||||||
let entries = &mut self
|
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
|
||||||
.entries
|
let valid_txs = self.validate_raw_txs(txs, extra_tx, header, &block_sums)?;
|
||||||
.iter_mut()
|
Ok(valid_txs)
|
||||||
.filter(|x| x.state == from_state)
|
}
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let candidate_txs: Vec<Transaction> = entries.iter().map(|x| x.tx.clone()).collect();
|
pub fn get_transactions_in_state(&self, state: PoolEntryState) -> Vec<Transaction> {
|
||||||
if candidate_txs.is_empty() {
|
self.entries
|
||||||
return Ok(vec![]);
|
.iter()
|
||||||
}
|
.filter(|x| x.state == state)
|
||||||
let valid_txs = self
|
.map(|x| x.tx.clone())
|
||||||
.blockchain
|
.collect::<Vec<_>>()
|
||||||
.validate_raw_txs(candidate_txs, extra_tx, block_hash)?;
|
}
|
||||||
|
|
||||||
// Update state on all entries included in final vec of valid txs.
|
// Transition the specified pool entries to the new state.
|
||||||
for x in &mut entries.iter_mut() {
|
pub fn transition_to_state(&mut self, txs: &Vec<Transaction>, state: PoolEntryState) {
|
||||||
if valid_txs.contains(&x.tx) {
|
for x in self.entries.iter_mut() {
|
||||||
x.state = to_state.clone();
|
if txs.contains(&x.tx) {
|
||||||
|
x.state = state.clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(valid_txs)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Aggregate this new tx with all existing txs in the pool.
|
// Aggregate this new tx with all existing txs in the pool.
|
||||||
|
@ -194,7 +193,7 @@ impl Pool {
|
||||||
&mut self,
|
&mut self,
|
||||||
entry: PoolEntry,
|
entry: PoolEntry,
|
||||||
extra_txs: Vec<Transaction>,
|
extra_txs: Vec<Transaction>,
|
||||||
block_hash: &Hash,
|
header: &BlockHeader,
|
||||||
) -> Result<(), PoolError> {
|
) -> Result<(), PoolError> {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
|
@ -205,7 +204,7 @@ impl Pool {
|
||||||
entry.tx.inputs().len(),
|
entry.tx.inputs().len(),
|
||||||
entry.tx.outputs().len(),
|
entry.tx.outputs().len(),
|
||||||
entry.tx.kernels().len(),
|
entry.tx.kernels().len(),
|
||||||
block_hash,
|
header.hash(),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Combine all the txs from the pool with any extra txs provided.
|
// Combine all the txs from the pool with any extra txs provided.
|
||||||
|
@ -225,13 +224,14 @@ impl Pool {
|
||||||
// Create a single aggregated tx from the existing pool txs and the
|
// Create a single aggregated tx from the existing pool txs and the
|
||||||
// new entry
|
// new entry
|
||||||
txs.push(entry.tx.clone());
|
txs.push(entry.tx.clone());
|
||||||
transaction::aggregate(txs, self.verifier_cache.clone())?
|
|
||||||
|
let tx = transaction::aggregate(txs)?;
|
||||||
|
tx.validate(self.verifier_cache.clone())?;
|
||||||
|
tx
|
||||||
};
|
};
|
||||||
|
|
||||||
// Validate aggregated tx against a known chain state (via txhashset
|
// Validate aggregated tx against a known chain state.
|
||||||
// extension).
|
self.validate_raw_tx(&agg_tx, header)?;
|
||||||
self.blockchain
|
|
||||||
.validate_raw_txs(vec![], Some(agg_tx), block_hash)?;
|
|
||||||
|
|
||||||
// If we get here successfully then we can safely add the entry to the pool.
|
// If we get here successfully then we can safely add the entry to the pool.
|
||||||
self.entries.push(entry);
|
self.entries.push(entry);
|
||||||
|
@ -239,32 +239,88 @@ impl Pool {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn validate_raw_tx(
|
||||||
|
&self,
|
||||||
|
tx: &Transaction,
|
||||||
|
header: &BlockHeader,
|
||||||
|
) -> Result<BlockSums, PoolError> {
|
||||||
|
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
|
||||||
|
let new_sums = self.apply_txs_to_block_sums(&block_sums, vec![tx.clone()], header)?;
|
||||||
|
Ok(new_sums)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_raw_txs(
|
||||||
|
&self,
|
||||||
|
txs: Vec<Transaction>,
|
||||||
|
extra_tx: Option<Transaction>,
|
||||||
|
header: &BlockHeader,
|
||||||
|
block_sums: &BlockSums,
|
||||||
|
) -> Result<Vec<Transaction>, PoolError> {
|
||||||
|
let mut valid_txs = vec![];
|
||||||
|
|
||||||
|
for tx in txs {
|
||||||
|
let mut candidate_txs = vec![];
|
||||||
|
if let Some(extra_tx) = extra_tx.clone() {
|
||||||
|
candidate_txs.push(extra_tx);
|
||||||
|
};
|
||||||
|
candidate_txs.extend(valid_txs.clone());
|
||||||
|
candidate_txs.push(tx.clone());
|
||||||
|
if self
|
||||||
|
.apply_txs_to_block_sums(&block_sums, candidate_txs, header)
|
||||||
|
.is_ok()
|
||||||
|
{
|
||||||
|
valid_txs.push(tx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(valid_txs)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply_txs_to_block_sums(
|
||||||
|
&self,
|
||||||
|
block_sums: &BlockSums,
|
||||||
|
txs: Vec<Transaction>,
|
||||||
|
header: &BlockHeader,
|
||||||
|
) -> Result<BlockSums, PoolError> {
|
||||||
|
// Build a single aggregate tx and validate it.
|
||||||
|
let tx = transaction::aggregate(txs)?;
|
||||||
|
tx.validate(self.verifier_cache.clone())?;
|
||||||
|
|
||||||
|
// Validate the tx against current chain state.
|
||||||
|
// Check all inputs are in the current UTXO set.
|
||||||
|
// Check all outputs are unique in current UTXO set.
|
||||||
|
self.blockchain.validate_tx(&tx, header)?;
|
||||||
|
|
||||||
|
let overage = tx.overage();
|
||||||
|
let offset = (header.total_kernel_offset() + tx.offset)?;
|
||||||
|
|
||||||
|
// Verify the kernel sums for the block_sums with the new tx applied,
|
||||||
|
// accounting for overage and offset.
|
||||||
|
let (utxo_sum, kernel_sum) =
|
||||||
|
(block_sums.clone(), &tx as &Committed).verify_kernel_sums(overage, offset)?;
|
||||||
|
|
||||||
|
Ok(BlockSums {
|
||||||
|
utxo_sum,
|
||||||
|
kernel_sum,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pub fn reconcile(
|
pub fn reconcile(
|
||||||
&mut self,
|
&mut self,
|
||||||
extra_tx: Option<Transaction>,
|
extra_tx: Option<Transaction>,
|
||||||
block_hash: &Hash,
|
header: &BlockHeader,
|
||||||
) -> Result<(), PoolError> {
|
) -> Result<(), PoolError> {
|
||||||
let candidate_txs = self.all_transactions();
|
let existing_entries = self.entries.clone();
|
||||||
let existing_len = candidate_txs.len();
|
self.entries.clear();
|
||||||
|
|
||||||
if candidate_txs.is_empty() {
|
let mut extra_txs = vec![];
|
||||||
return Ok(());
|
if let Some(extra_tx) = extra_tx {
|
||||||
|
extra_txs.push(extra_tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Go through the candidate txs and keep everything that validates incrementally
|
for x in existing_entries {
|
||||||
// against a known chain state, accounting for the "extra tx" as necessary.
|
let _ = self.add_to_pool(x.clone(), extra_txs.clone(), header);
|
||||||
let valid_txs = self
|
}
|
||||||
.blockchain
|
|
||||||
.validate_raw_txs(candidate_txs, extra_tx, block_hash)?;
|
|
||||||
self.entries.retain(|x| valid_txs.contains(&x.tx));
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"pool [{}]: reconcile: existing txs {}, retained txs {}",
|
|
||||||
self.name,
|
|
||||||
existing_len,
|
|
||||||
self.entries.len(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ use chrono::prelude::Utc;
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::id::ShortId;
|
use core::core::id::ShortId;
|
||||||
use core::core::verifier_cache::VerifierCache;
|
use core::core::verifier_cache::VerifierCache;
|
||||||
use core::core::{transaction, Block, Transaction};
|
use core::core::{transaction, Block, BlockHeader, Transaction};
|
||||||
use pool::Pool;
|
use pool::Pool;
|
||||||
use types::{BlockChain, PoolAdapter, PoolConfig, PoolEntry, PoolEntryState, PoolError, TxSource};
|
use types::{BlockChain, PoolAdapter, PoolConfig, PoolEntry, PoolEntryState, PoolError, TxSource};
|
||||||
|
|
||||||
|
@ -61,33 +61,39 @@ impl TransactionPool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_to_stempool(&mut self, entry: PoolEntry, block_hash: &Hash) -> Result<(), PoolError> {
|
fn add_to_stempool(&mut self, entry: PoolEntry, header: &BlockHeader) -> Result<(), PoolError> {
|
||||||
// Add tx to stempool (passing in all txs from txpool to validate against).
|
// Add tx to stempool (passing in all txs from txpool to validate against).
|
||||||
self.stempool
|
self.stempool
|
||||||
.add_to_pool(entry.clone(), self.txpool.all_transactions(), block_hash)?;
|
.add_to_pool(entry.clone(), self.txpool.all_transactions(), header)?;
|
||||||
|
|
||||||
// Note: we do not notify the adapter here,
|
// Note: we do not notify the adapter here,
|
||||||
// we let the dandelion monitor handle this.
|
// we let the dandelion monitor handle this.
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_to_txpool(&mut self, mut entry: PoolEntry, block_hash: &Hash) -> Result<(), PoolError> {
|
fn add_to_txpool(
|
||||||
|
&mut self,
|
||||||
|
mut entry: PoolEntry,
|
||||||
|
header: &BlockHeader,
|
||||||
|
) -> Result<(), PoolError> {
|
||||||
// First deaggregate the tx based on current txpool txs.
|
// First deaggregate the tx based on current txpool txs.
|
||||||
if entry.tx.kernels().len() > 1 {
|
if entry.tx.kernels().len() > 1 {
|
||||||
let txs = self
|
let txs = self
|
||||||
.txpool
|
.txpool
|
||||||
.find_matching_transactions(entry.tx.kernels().clone());
|
.find_matching_transactions(entry.tx.kernels().clone());
|
||||||
if !txs.is_empty() {
|
if !txs.is_empty() {
|
||||||
entry.tx = transaction::deaggregate(entry.tx, txs, self.verifier_cache.clone())?;
|
let tx = transaction::deaggregate(entry.tx, txs)?;
|
||||||
|
tx.validate(self.verifier_cache.clone())?;
|
||||||
|
entry.tx = tx;
|
||||||
entry.src.debug_name = "deagg".to_string();
|
entry.src.debug_name = "deagg".to_string();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.txpool.add_to_pool(entry.clone(), vec![], block_hash)?;
|
self.txpool.add_to_pool(entry.clone(), vec![], header)?;
|
||||||
|
|
||||||
// We now need to reconcile the stempool based on the new state of the txpool.
|
// We now need to reconcile the stempool based on the new state of the txpool.
|
||||||
// Some stempool txs may no longer be valid and we need to evict them.
|
// Some stempool txs may no longer be valid and we need to evict them.
|
||||||
let txpool_tx = self.txpool.aggregate_transaction()?;
|
let txpool_tx = self.txpool.aggregate_transaction()?;
|
||||||
self.stempool.reconcile(txpool_tx, block_hash)?;
|
self.stempool.reconcile(txpool_tx, header)?;
|
||||||
|
|
||||||
self.adapter.tx_accepted(&entry.tx);
|
self.adapter.tx_accepted(&entry.tx);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -100,7 +106,7 @@ impl TransactionPool {
|
||||||
src: TxSource,
|
src: TxSource,
|
||||||
tx: Transaction,
|
tx: Transaction,
|
||||||
stem: bool,
|
stem: bool,
|
||||||
block_hash: &Hash,
|
header: &BlockHeader,
|
||||||
) -> Result<(), PoolError> {
|
) -> Result<(), PoolError> {
|
||||||
// Quick check to deal with common case of seeing the *same* tx
|
// Quick check to deal with common case of seeing the *same* tx
|
||||||
// broadcast from multiple peers simultaneously.
|
// broadcast from multiple peers simultaneously.
|
||||||
|
@ -129,9 +135,9 @@ impl TransactionPool {
|
||||||
};
|
};
|
||||||
|
|
||||||
if stem {
|
if stem {
|
||||||
self.add_to_stempool(entry, block_hash)?;
|
self.add_to_stempool(entry, header)?;
|
||||||
} else {
|
} else {
|
||||||
self.add_to_txpool(entry, block_hash)?;
|
self.add_to_txpool(entry, header)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -141,12 +147,12 @@ impl TransactionPool {
|
||||||
pub fn reconcile_block(&mut self, block: &Block) -> Result<(), PoolError> {
|
pub fn reconcile_block(&mut self, block: &Block) -> Result<(), PoolError> {
|
||||||
// First reconcile the txpool.
|
// First reconcile the txpool.
|
||||||
self.txpool.reconcile_block(block)?;
|
self.txpool.reconcile_block(block)?;
|
||||||
self.txpool.reconcile(None, &block.hash())?;
|
self.txpool.reconcile(None, &block.header)?;
|
||||||
|
|
||||||
// Then reconcile the stempool, accounting for the txpool txs.
|
// Then reconcile the stempool, accounting for the txpool txs.
|
||||||
let txpool_tx = self.txpool.aggregate_transaction()?;
|
let txpool_tx = self.txpool.aggregate_transaction()?;
|
||||||
self.stempool.reconcile_block(block)?;
|
self.stempool.reconcile_block(block)?;
|
||||||
self.stempool.reconcile(txpool_tx, &block.hash())?;
|
self.stempool.reconcile(txpool_tx, &block.header)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -191,7 +197,7 @@ impl TransactionPool {
|
||||||
|
|
||||||
/// Returns a vector of transactions from the txpool so we can build a
|
/// Returns a vector of transactions from the txpool so we can build a
|
||||||
/// block from them.
|
/// block from them.
|
||||||
pub fn prepare_mineable_transactions(&self) -> Vec<Transaction> {
|
pub fn prepare_mineable_transactions(&self) -> Result<Vec<Transaction>, PoolError> {
|
||||||
self.txpool.prepare_mineable_transactions()
|
self.txpool.prepare_mineable_transactions()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,9 +18,11 @@
|
||||||
use chrono::prelude::{DateTime, Utc};
|
use chrono::prelude::{DateTime, Utc};
|
||||||
|
|
||||||
use core::consensus;
|
use core::consensus;
|
||||||
|
use core::core::committed;
|
||||||
use core::core::hash::Hash;
|
use core::core::hash::Hash;
|
||||||
use core::core::transaction::{self, Transaction};
|
use core::core::transaction::{self, Transaction};
|
||||||
use core::core::BlockHeader;
|
use core::core::{BlockHeader, BlockSums};
|
||||||
|
use keychain;
|
||||||
|
|
||||||
/// Dandelion relay timer
|
/// Dandelion relay timer
|
||||||
const DANDELION_RELAY_SECS: u64 = 600;
|
const DANDELION_RELAY_SECS: u64 = 600;
|
||||||
|
@ -161,6 +163,10 @@ pub struct TxSource {
|
||||||
pub enum PoolError {
|
pub enum PoolError {
|
||||||
/// An invalid pool entry caused by underlying tx validation error
|
/// An invalid pool entry caused by underlying tx validation error
|
||||||
InvalidTx(transaction::Error),
|
InvalidTx(transaction::Error),
|
||||||
|
/// Underlying keychain error.
|
||||||
|
Keychain(keychain::Error),
|
||||||
|
/// Underlying "committed" error.
|
||||||
|
Committed(committed::Error),
|
||||||
/// Attempt to add a transaction to the pool with lock_height
|
/// Attempt to add a transaction to the pool with lock_height
|
||||||
/// greater than height of current block
|
/// greater than height of current block
|
||||||
ImmatureTransaction,
|
ImmatureTransaction,
|
||||||
|
@ -186,17 +192,20 @@ impl From<transaction::Error> for PoolError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<keychain::Error> for PoolError {
|
||||||
|
fn from(e: keychain::Error) -> PoolError {
|
||||||
|
PoolError::Keychain(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<committed::Error> for PoolError {
|
||||||
|
fn from(e: committed::Error) -> PoolError {
|
||||||
|
PoolError::Committed(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Interface that the pool requires from a blockchain implementation.
|
/// Interface that the pool requires from a blockchain implementation.
|
||||||
pub trait BlockChain: Sync + Send {
|
pub trait BlockChain: Sync + Send {
|
||||||
/// Validate a vec of txs against known chain state at specific block
|
|
||||||
/// after applying the pre_tx to the chain state.
|
|
||||||
fn validate_raw_txs(
|
|
||||||
&self,
|
|
||||||
txs: Vec<transaction::Transaction>,
|
|
||||||
pre_tx: Option<transaction::Transaction>,
|
|
||||||
block_hash: &Hash,
|
|
||||||
) -> Result<Vec<transaction::Transaction>, PoolError>;
|
|
||||||
|
|
||||||
/// Verify any coinbase outputs being spent
|
/// Verify any coinbase outputs being spent
|
||||||
/// have matured sufficiently.
|
/// have matured sufficiently.
|
||||||
fn verify_coinbase_maturity(&self, tx: &transaction::Transaction) -> Result<(), PoolError>;
|
fn verify_coinbase_maturity(&self, tx: &transaction::Transaction) -> Result<(), PoolError>;
|
||||||
|
@ -205,7 +214,12 @@ pub trait BlockChain: Sync + Send {
|
||||||
/// have matured sufficiently.
|
/// have matured sufficiently.
|
||||||
fn verify_tx_lock_height(&self, tx: &transaction::Transaction) -> Result<(), PoolError>;
|
fn verify_tx_lock_height(&self, tx: &transaction::Transaction) -> Result<(), PoolError>;
|
||||||
|
|
||||||
|
fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), PoolError>;
|
||||||
|
|
||||||
fn chain_head(&self) -> Result<BlockHeader, PoolError>;
|
fn chain_head(&self) -> Result<BlockHeader, PoolError>;
|
||||||
|
|
||||||
|
fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, PoolError>;
|
||||||
|
fn get_block_sums(&self, hash: &Hash) -> Result<BlockSums, PoolError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Bridge between the transaction pool and the rest of the system. Handles
|
/// Bridge between the transaction pool and the rest of the system. Handles
|
||||||
|
|
|
@ -27,12 +27,8 @@ pub mod common;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use core::core::{Block, BlockHeader};
|
|
||||||
|
|
||||||
use chain::txhashset;
|
|
||||||
use chain::types::Tip;
|
|
||||||
use core::core::hash::Hashed;
|
|
||||||
use core::core::verifier_cache::LruVerifierCache;
|
use core::core::verifier_cache::LruVerifierCache;
|
||||||
|
use core::core::{Block, BlockHeader, Transaction};
|
||||||
use core::pow::Difficulty;
|
use core::pow::Difficulty;
|
||||||
|
|
||||||
use keychain::{ExtKeychain, Keychain};
|
use keychain::{ExtKeychain, Keychain};
|
||||||
|
@ -47,53 +43,35 @@ fn test_transaction_pool_block_building() {
|
||||||
|
|
||||||
let db_root = ".grin_block_building".to_string();
|
let db_root = ".grin_block_building".to_string();
|
||||||
clean_output_dir(db_root.clone());
|
clean_output_dir(db_root.clone());
|
||||||
let chain = ChainAdapter::init(db_root.clone()).unwrap();
|
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
|
||||||
|
|
||||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||||
|
|
||||||
// Initialize the chain/txhashset with an initial block
|
// Initialize the chain/txhashset with an initial block
|
||||||
// so we have a non-empty UTXO set.
|
// so we have a non-empty UTXO set.
|
||||||
let add_block = |height, txs| {
|
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
|
||||||
|
let height = prev_header.height + 1;
|
||||||
let key_id = keychain.derive_key_id(height as u32).unwrap();
|
let key_id = keychain.derive_key_id(height as u32).unwrap();
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, 0, height).unwrap();
|
let fee = txs.iter().map(|x| x.fee()).sum();
|
||||||
let mut block =
|
let reward = libtx::reward::output(&keychain, &key_id, fee, height).unwrap();
|
||||||
Block::new(&BlockHeader::default(), txs, Difficulty::one(), reward).unwrap();
|
let block = Block::new(&prev_header, txs, Difficulty::one(), reward).unwrap();
|
||||||
|
|
||||||
let mut txhashset = chain.txhashset.write().unwrap();
|
chain.update_db_for_block(&block);
|
||||||
let mut batch = chain.store.batch().unwrap();
|
|
||||||
txhashset::extending(&mut txhashset, &mut batch, |extension| {
|
|
||||||
extension.apply_block(&block)?;
|
|
||||||
|
|
||||||
// Now set the roots and sizes as necessary on the block header.
|
|
||||||
let roots = extension.roots();
|
|
||||||
block.header.output_root = roots.output_root;
|
|
||||||
block.header.range_proof_root = roots.rproof_root;
|
|
||||||
block.header.kernel_root = roots.kernel_root;
|
|
||||||
let sizes = extension.sizes();
|
|
||||||
block.header.output_mmr_size = sizes.0;
|
|
||||||
block.header.kernel_mmr_size = sizes.2;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}).unwrap();
|
|
||||||
|
|
||||||
let tip = Tip::from_block(&block.header);
|
|
||||||
batch.save_block_header(&block.header).unwrap();
|
|
||||||
batch.save_head(&tip).unwrap();
|
|
||||||
batch.commit().unwrap();
|
|
||||||
|
|
||||||
block.header
|
block.header
|
||||||
};
|
};
|
||||||
let header = add_block(1, vec![]);
|
|
||||||
|
|
||||||
// Initialize a new pool with our chain adapter.
|
let header = add_block(BlockHeader::default(), vec![], &mut chain);
|
||||||
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
|
|
||||||
|
|
||||||
// Now create tx to spend that first coinbase (now matured).
|
// Now create tx to spend that first coinbase (now matured).
|
||||||
// Provides us with some useful outputs to test with.
|
// Provides us with some useful outputs to test with.
|
||||||
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
|
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
|
||||||
|
|
||||||
// Mine that initial tx so we can spend it with multiple txs
|
// Mine that initial tx so we can spend it with multiple txs
|
||||||
let header = add_block(2, vec![initial_tx]);
|
let header = add_block(header, vec![initial_tx], &mut chain);
|
||||||
|
|
||||||
|
// Initialize a new pool with our chain adapter.
|
||||||
|
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
|
||||||
|
|
||||||
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
|
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
|
||||||
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
|
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
|
||||||
|
@ -107,21 +85,21 @@ fn test_transaction_pool_block_building() {
|
||||||
|
|
||||||
// Add the three root txs to the pool.
|
// Add the three root txs to the pool.
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), root_tx_1, false, &header.hash())
|
.add_to_pool(test_source(), root_tx_1, false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), root_tx_2, false, &header.hash())
|
.add_to_pool(test_source(), root_tx_2, false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), root_tx_3, false, &header.hash())
|
.add_to_pool(test_source(), root_tx_3, false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Now add the two child txs to the pool.
|
// Now add the two child txs to the pool.
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), child_tx_1.clone(), false, &header.hash())
|
.add_to_pool(test_source(), child_tx_1.clone(), false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), child_tx_2.clone(), false, &header.hash())
|
.add_to_pool(test_source(), child_tx_2.clone(), false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(write_pool.total_size(), 5);
|
assert_eq!(write_pool.total_size(), 5);
|
||||||
|
@ -129,41 +107,19 @@ fn test_transaction_pool_block_building() {
|
||||||
|
|
||||||
let txs = {
|
let txs = {
|
||||||
let read_pool = pool.read().unwrap();
|
let read_pool = pool.read().unwrap();
|
||||||
read_pool.prepare_mineable_transactions()
|
read_pool.prepare_mineable_transactions().unwrap()
|
||||||
};
|
};
|
||||||
// children should have been aggregated into parents
|
// children should have been aggregated into parents
|
||||||
assert_eq!(txs.len(), 3);
|
assert_eq!(txs.len(), 3);
|
||||||
|
|
||||||
let mut block = {
|
let block = {
|
||||||
let key_id = keychain.derive_key_id(2).unwrap();
|
let key_id = keychain.derive_key_id(2).unwrap();
|
||||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
|
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
|
||||||
Block::new(&header, txs, Difficulty::one(), reward)
|
Block::new(&header, txs, Difficulty::one(), reward)
|
||||||
}.unwrap();
|
}.unwrap();
|
||||||
|
|
||||||
{
|
chain.update_db_for_block(&block);
|
||||||
let mut batch = chain.store.batch().unwrap();
|
|
||||||
let mut txhashset = chain.txhashset.write().unwrap();
|
|
||||||
txhashset::extending(&mut txhashset, &mut batch, |extension| {
|
|
||||||
extension.apply_block(&block)?;
|
|
||||||
|
|
||||||
// Now set the roots and sizes as necessary on the block header.
|
|
||||||
let roots = extension.roots();
|
|
||||||
block.header.output_root = roots.output_root;
|
|
||||||
block.header.range_proof_root = roots.rproof_root;
|
|
||||||
block.header.kernel_root = roots.kernel_root;
|
|
||||||
let sizes = extension.sizes();
|
|
||||||
block.header.output_mmr_size = sizes.0;
|
|
||||||
block.header.kernel_mmr_size = sizes.2;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}).unwrap();
|
|
||||||
|
|
||||||
let tip = Tip::from_block(&block.header);
|
|
||||||
batch.save_block_header(&block.header).unwrap();
|
|
||||||
batch.save_head(&tip).unwrap();
|
|
||||||
batch.commit().unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now reconcile the transaction pool with the new block
|
// Now reconcile the transaction pool with the new block
|
||||||
// and check the resulting contents of the pool are what we expect.
|
// and check the resulting contents of the pool are what we expect.
|
||||||
|
|
|
@ -27,15 +27,9 @@ pub mod common;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use core::core::hash::Hashed;
|
|
||||||
use core::core::{Block, BlockHeader};
|
use core::core::{Block, BlockHeader};
|
||||||
|
|
||||||
use chain::txhashset;
|
use common::*;
|
||||||
use chain::types::Tip;
|
|
||||||
use common::{
|
|
||||||
clean_output_dir, test_setup, test_source, test_transaction,
|
|
||||||
test_transaction_spending_coinbase, ChainAdapter,
|
|
||||||
};
|
|
||||||
use core::core::verifier_cache::LruVerifierCache;
|
use core::core::verifier_cache::LruVerifierCache;
|
||||||
use core::pow::Difficulty;
|
use core::pow::Difficulty;
|
||||||
use keychain::{ExtKeychain, Keychain};
|
use keychain::{ExtKeychain, Keychain};
|
||||||
|
@ -47,84 +41,41 @@ fn test_transaction_pool_block_reconciliation() {
|
||||||
|
|
||||||
let db_root = ".grin_block_reconciliation".to_string();
|
let db_root = ".grin_block_reconciliation".to_string();
|
||||||
clean_output_dir(db_root.clone());
|
clean_output_dir(db_root.clone());
|
||||||
let chain = ChainAdapter::init(db_root.clone()).unwrap();
|
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
|
||||||
|
|
||||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||||
|
|
||||||
// Initialize the chain/txhashset with an initial block
|
// Initialize a new pool with our chain adapter.
|
||||||
// so we have a non-empty UTXO set.
|
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
|
||||||
|
|
||||||
let header = {
|
let header = {
|
||||||
let height = 1;
|
let height = 1;
|
||||||
let key_id = keychain.derive_key_id(height as u32).unwrap();
|
let key_id = keychain.derive_key_id(height as u32).unwrap();
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, 0, height).unwrap();
|
let reward = libtx::reward::output(&keychain, &key_id, 0, height).unwrap();
|
||||||
let mut block =
|
let block = Block::new(&BlockHeader::default(), vec![], Difficulty::one(), reward).unwrap();
|
||||||
Block::new(&BlockHeader::default(), vec![], Difficulty::one(), reward).unwrap();
|
|
||||||
|
|
||||||
let mut batch = chain.store.batch().unwrap();
|
chain.update_db_for_block(&block);
|
||||||
let mut txhashset = chain.txhashset.write().unwrap();
|
|
||||||
txhashset::extending(&mut txhashset, &mut batch, |extension| {
|
|
||||||
extension.apply_block(&block)?;
|
|
||||||
|
|
||||||
// Now set the roots and sizes as necessary on the block header.
|
|
||||||
let roots = extension.roots();
|
|
||||||
block.header.output_root = roots.output_root;
|
|
||||||
block.header.range_proof_root = roots.rproof_root;
|
|
||||||
block.header.kernel_root = roots.kernel_root;
|
|
||||||
let sizes = extension.sizes();
|
|
||||||
block.header.output_mmr_size = sizes.0;
|
|
||||||
block.header.kernel_mmr_size = sizes.2;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}).unwrap();
|
|
||||||
|
|
||||||
let tip = Tip::from_block(&block.header);
|
|
||||||
batch.save_block_header(&block.header).unwrap();
|
|
||||||
batch.save_head(&tip).unwrap();
|
|
||||||
batch.commit().unwrap();
|
|
||||||
|
|
||||||
block.header
|
block.header
|
||||||
};
|
};
|
||||||
|
|
||||||
// Initialize a new pool with our chain adapter.
|
|
||||||
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache.clone()));
|
|
||||||
|
|
||||||
// Now create tx to spend that first coinbase (now matured).
|
// Now create tx to spend that first coinbase (now matured).
|
||||||
// Provides us with some useful outputs to test with.
|
// Provides us with some useful outputs to test with.
|
||||||
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
|
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
|
||||||
|
|
||||||
let header = {
|
let block = {
|
||||||
let key_id = keychain.derive_key_id(2).unwrap();
|
let key_id = keychain.derive_key_id(2).unwrap();
|
||||||
let fees = initial_tx.fee();
|
let fees = initial_tx.fee();
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
|
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
|
||||||
let mut block = Block::new(&header, vec![initial_tx], Difficulty::one(), reward).unwrap();
|
let block = Block::new(&header, vec![initial_tx], Difficulty::one(), reward).unwrap();
|
||||||
|
|
||||||
let mut batch = chain.store.batch().unwrap();
|
chain.update_db_for_block(&block);
|
||||||
{
|
|
||||||
let mut txhashset = chain.txhashset.write().unwrap();
|
|
||||||
txhashset::extending(&mut txhashset, &mut batch, |extension| {
|
|
||||||
extension.apply_block(&block)?;
|
|
||||||
|
|
||||||
// Now set the roots and sizes as necessary on the block header.
|
block
|
||||||
let roots = extension.roots();
|
|
||||||
block.header.output_root = roots.output_root;
|
|
||||||
block.header.range_proof_root = roots.rproof_root;
|
|
||||||
block.header.kernel_root = roots.kernel_root;
|
|
||||||
let sizes = extension.sizes();
|
|
||||||
block.header.output_mmr_size = sizes.0;
|
|
||||||
block.header.kernel_mmr_size = sizes.2;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let tip = Tip::from_block(&block.header);
|
|
||||||
batch.save_block_header(&block.header).unwrap();
|
|
||||||
batch.save_head(&tip).unwrap();
|
|
||||||
batch.commit().unwrap();
|
|
||||||
|
|
||||||
block.header
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let header = block.header;
|
||||||
|
|
||||||
// Preparation: We will introduce three root pool transactions.
|
// Preparation: We will introduce three root pool transactions.
|
||||||
// 1. A transaction that should be invalidated because it is exactly
|
// 1. A transaction that should be invalidated because it is exactly
|
||||||
// contained in the block.
|
// contained in the block.
|
||||||
|
@ -181,7 +132,7 @@ fn test_transaction_pool_block_reconciliation() {
|
||||||
|
|
||||||
for tx in &txs_to_add {
|
for tx in &txs_to_add {
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx.clone(), false, &header.hash())
|
.add_to_pool(test_source(), tx.clone(), false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,6 +149,7 @@ fn test_transaction_pool_block_reconciliation() {
|
||||||
let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]);
|
let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]);
|
||||||
// - Output conflict w/ 8
|
// - Output conflict w/ 8
|
||||||
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
|
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
|
||||||
|
|
||||||
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
|
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
|
||||||
|
|
||||||
// Now apply this block.
|
// Now apply this block.
|
||||||
|
@ -205,34 +157,9 @@ fn test_transaction_pool_block_reconciliation() {
|
||||||
let key_id = keychain.derive_key_id(3).unwrap();
|
let key_id = keychain.derive_key_id(3).unwrap();
|
||||||
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
|
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
|
||||||
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
|
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
|
||||||
let mut block = Block::new(&header, block_txs, Difficulty::one(), reward).unwrap();
|
let block = Block::new(&header, block_txs, Difficulty::one(), reward).unwrap();
|
||||||
|
|
||||||
{
|
|
||||||
let mut batch = chain.store.batch().unwrap();
|
|
||||||
let mut txhashset = chain.txhashset.write().unwrap();
|
|
||||||
txhashset::extending(&mut txhashset, &mut batch, |extension| {
|
|
||||||
extension.apply_block(&block)?;
|
|
||||||
|
|
||||||
// Now set the roots and sizes as necessary on the block header.
|
|
||||||
let roots = extension.roots();
|
|
||||||
block.header.output_root = roots.output_root;
|
|
||||||
block.header.range_proof_root = roots.rproof_root;
|
|
||||||
block.header.kernel_root = roots.kernel_root;
|
|
||||||
let sizes = extension.sizes();
|
|
||||||
block.header.output_mmr_size = sizes.0;
|
|
||||||
block.header.kernel_mmr_size = sizes.2;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}).unwrap();
|
|
||||||
batch.commit().unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let tip = Tip::from_block(&block.header);
|
|
||||||
let batch = chain.store.batch().unwrap();
|
|
||||||
batch.save_block_header(&block.header).unwrap();
|
|
||||||
batch.save_head(&tip).unwrap();
|
|
||||||
batch.commit().unwrap();
|
|
||||||
|
|
||||||
|
chain.update_db_for_block(&block);
|
||||||
block
|
block
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -27,10 +27,10 @@ pub mod common;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use common::{test_setup, test_source, test_transaction};
|
use common::*;
|
||||||
use core::core::hash::Hash;
|
use core::core::hash::Hash;
|
||||||
use core::core::verifier_cache::LruVerifierCache;
|
use core::core::verifier_cache::LruVerifierCache;
|
||||||
use core::core::{BlockHeader, Transaction};
|
use core::core::{BlockHeader, BlockSums, Transaction};
|
||||||
use keychain::{ExtKeychain, Keychain};
|
use keychain::{ExtKeychain, Keychain};
|
||||||
use pool::types::{BlockChain, PoolError};
|
use pool::types::{BlockChain, PoolError};
|
||||||
|
|
||||||
|
@ -48,12 +48,15 @@ impl BlockChain for CoinbaseMaturityErrorChainAdapter {
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_raw_txs(
|
fn get_block_header(&self, _hash: &Hash) -> Result<BlockHeader, PoolError> {
|
||||||
&self,
|
unimplemented!();
|
||||||
_txs: Vec<Transaction>,
|
}
|
||||||
_pre_tx: Option<Transaction>,
|
|
||||||
_block_hash: &Hash,
|
fn get_block_sums(&self, _hash: &Hash) -> Result<BlockSums, PoolError> {
|
||||||
) -> Result<Vec<Transaction>, PoolError> {
|
unimplemented!();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_tx(&self, _tx: &Transaction, _header: &BlockHeader) -> Result<(), PoolError> {
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +85,7 @@ fn test_coinbase_maturity() {
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write().unwrap();
|
||||||
let tx = test_transaction(&keychain, vec![50], vec![49]);
|
let tx = test_transaction(&keychain, vec![50], vec![49]);
|
||||||
match write_pool.add_to_pool(test_source(), tx.clone(), true, &Hash::default()) {
|
match write_pool.add_to_pool(test_source(), tx.clone(), true, &BlockHeader::default()) {
|
||||||
Err(PoolError::ImmatureCoinbase) => {}
|
Err(PoolError::ImmatureCoinbase) => {}
|
||||||
_ => panic!("Expected an immature coinbase error here."),
|
_ => panic!("Expected an immature coinbase error here."),
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,16 +26,16 @@ extern crate grin_wallet as wallet;
|
||||||
extern crate chrono;
|
extern crate chrono;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use core::core::hash::Hash;
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::verifier_cache::VerifierCache;
|
use core::core::verifier_cache::VerifierCache;
|
||||||
use core::core::{BlockHeader, Transaction};
|
use core::core::{Block, BlockHeader, BlockSums, Committed, Transaction};
|
||||||
|
|
||||||
use chain::store::ChainStore;
|
use chain::store::ChainStore;
|
||||||
use chain::txhashset;
|
use chain::types::Tip;
|
||||||
use chain::txhashset::TxHashSet;
|
|
||||||
use pool::*;
|
use pool::*;
|
||||||
|
|
||||||
use keychain::Keychain;
|
use keychain::Keychain;
|
||||||
|
@ -43,11 +43,12 @@ use wallet::libtx;
|
||||||
|
|
||||||
use pool::types::*;
|
use pool::types::*;
|
||||||
use pool::TransactionPool;
|
use pool::TransactionPool;
|
||||||
|
use util::secp::pedersen::Commitment;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ChainAdapter {
|
pub struct ChainAdapter {
|
||||||
pub txhashset: Arc<RwLock<TxHashSet>>,
|
|
||||||
pub store: Arc<ChainStore>,
|
pub store: Arc<ChainStore>,
|
||||||
|
pub utxo: Arc<RwLock<HashSet<Commitment>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChainAdapter {
|
impl ChainAdapter {
|
||||||
|
@ -57,13 +58,54 @@ impl ChainAdapter {
|
||||||
let chain_store =
|
let chain_store =
|
||||||
ChainStore::new(db_env).map_err(|e| format!("failed to init chain_store, {:?}", e))?;
|
ChainStore::new(db_env).map_err(|e| format!("failed to init chain_store, {:?}", e))?;
|
||||||
let store = Arc::new(chain_store);
|
let store = Arc::new(chain_store);
|
||||||
let txhashset = TxHashSet::open(target_dir.clone(), store.clone(), None)
|
let utxo = Arc::new(RwLock::new(HashSet::new()));
|
||||||
.map_err(|e| format!("failed to init txhashset, {}", e))?;
|
|
||||||
|
|
||||||
Ok(ChainAdapter {
|
Ok(ChainAdapter { store, utxo })
|
||||||
txhashset: Arc::new(RwLock::new(txhashset)),
|
}
|
||||||
store: store.clone(),
|
|
||||||
})
|
pub fn update_db_for_block(&self, block: &Block) {
|
||||||
|
let header = &block.header;
|
||||||
|
let batch = self.store.batch().unwrap();
|
||||||
|
let tip = Tip::from_block(&header);
|
||||||
|
batch.save_block_header(&header).unwrap();
|
||||||
|
batch.save_head(&tip).unwrap();
|
||||||
|
|
||||||
|
// Retrieve previous block_sums from the db.
|
||||||
|
let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&header.previous) {
|
||||||
|
prev_sums
|
||||||
|
} else {
|
||||||
|
BlockSums::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Overage is based purely on the new block.
|
||||||
|
// Previous block_sums have taken all previous overage into account.
|
||||||
|
let overage = header.overage();
|
||||||
|
|
||||||
|
// Offset on the other hand is the total kernel offset from the new block.
|
||||||
|
let offset = header.total_kernel_offset();
|
||||||
|
|
||||||
|
// Verify the kernel sums for the block_sums with the new block applied.
|
||||||
|
let (utxo_sum, kernel_sum) = (prev_sums, block as &Committed)
|
||||||
|
.verify_kernel_sums(overage, offset)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let block_sums = BlockSums {
|
||||||
|
utxo_sum,
|
||||||
|
kernel_sum,
|
||||||
|
};
|
||||||
|
batch.save_block_sums(&header.hash(), &block_sums).unwrap();
|
||||||
|
|
||||||
|
batch.commit().unwrap();
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut utxo = self.utxo.write().unwrap();
|
||||||
|
for x in block.inputs() {
|
||||||
|
utxo.remove(&x.commitment());
|
||||||
|
}
|
||||||
|
for x in block.outputs() {
|
||||||
|
utxo.insert(x.commitment());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,24 +116,34 @@ impl BlockChain for ChainAdapter {
|
||||||
.map_err(|_| PoolError::Other(format!("failed to get chain head")))
|
.map_err(|_| PoolError::Other(format!("failed to get chain head")))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_raw_txs(
|
fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, PoolError> {
|
||||||
&self,
|
self.store
|
||||||
txs: Vec<Transaction>,
|
.get_block_header(hash)
|
||||||
pre_tx: Option<Transaction>,
|
.map_err(|_| PoolError::Other(format!("failed to get block header")))
|
||||||
block_hash: &Hash,
|
}
|
||||||
) -> Result<Vec<Transaction>, PoolError> {
|
|
||||||
let header = self
|
|
||||||
.store
|
|
||||||
.get_block_header(&block_hash)
|
|
||||||
.map_err(|_| PoolError::Other(format!("failed to get header")))?;
|
|
||||||
|
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
fn get_block_sums(&self, hash: &Hash) -> Result<BlockSums, PoolError> {
|
||||||
let res = txhashset::extending_readonly(&mut txhashset, |extension| {
|
self.store
|
||||||
extension.rewind(&header)?;
|
.get_block_sums(hash)
|
||||||
let valid_txs = extension.validate_raw_txs(txs, pre_tx)?;
|
.map_err(|_| PoolError::Other(format!("failed to get block sums")))
|
||||||
Ok(valid_txs)
|
}
|
||||||
}).map_err(|e| PoolError::Other(format!("Error: test chain adapter: {:?}", e)))?;
|
|
||||||
Ok(res)
|
fn validate_tx(&self, tx: &Transaction, _header: &BlockHeader) -> Result<(), pool::PoolError> {
|
||||||
|
let utxo = self.utxo.read().unwrap();
|
||||||
|
|
||||||
|
for x in tx.outputs() {
|
||||||
|
if utxo.contains(&x.commitment()) {
|
||||||
|
return Err(PoolError::Other(format!("output commitment not unique")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for x in tx.inputs() {
|
||||||
|
if !utxo.contains(&x.commitment()) {
|
||||||
|
return Err(PoolError::Other(format!("not in utxo set")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mocking this check out for these tests.
|
// Mocking this check out for these tests.
|
||||||
|
|
|
@ -27,13 +27,7 @@ pub mod common;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use chain::txhashset;
|
use common::*;
|
||||||
use chain::types::Tip;
|
|
||||||
use common::{
|
|
||||||
clean_output_dir, test_setup, test_source, test_transaction,
|
|
||||||
test_transaction_spending_coinbase, ChainAdapter,
|
|
||||||
};
|
|
||||||
use core::core::hash::Hashed;
|
|
||||||
use core::core::verifier_cache::LruVerifierCache;
|
use core::core::verifier_cache::LruVerifierCache;
|
||||||
use core::core::{transaction, Block, BlockHeader};
|
use core::core::{transaction, Block, BlockHeader};
|
||||||
use core::pow::Difficulty;
|
use core::pow::Difficulty;
|
||||||
|
@ -47,12 +41,13 @@ fn test_the_transaction_pool() {
|
||||||
|
|
||||||
let db_root = ".grin_transaction_pool".to_string();
|
let db_root = ".grin_transaction_pool".to_string();
|
||||||
clean_output_dir(db_root.clone());
|
clean_output_dir(db_root.clone());
|
||||||
let chain = ChainAdapter::init(db_root.clone()).unwrap();
|
let chain = Arc::new(ChainAdapter::init(db_root.clone()).unwrap());
|
||||||
|
|
||||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||||
|
|
||||||
// Initialize the chain/txhashset with a few blocks,
|
// Initialize a new pool with our chain adapter.
|
||||||
// so we have a non-empty UTXO set.
|
let pool = RwLock::new(test_setup(chain.clone(), verifier_cache.clone()));
|
||||||
|
|
||||||
let header = {
|
let header = {
|
||||||
let height = 1;
|
let height = 1;
|
||||||
let key_id = keychain.derive_key_id(height as u32).unwrap();
|
let key_id = keychain.derive_key_id(height as u32).unwrap();
|
||||||
|
@ -60,34 +55,11 @@ fn test_the_transaction_pool() {
|
||||||
let mut block =
|
let mut block =
|
||||||
Block::new(&BlockHeader::default(), vec![], Difficulty::one(), reward).unwrap();
|
Block::new(&BlockHeader::default(), vec![], Difficulty::one(), reward).unwrap();
|
||||||
|
|
||||||
let mut txhashset = chain.txhashset.write().unwrap();
|
chain.update_db_for_block(&block);
|
||||||
let mut batch = chain.store.batch().unwrap();
|
|
||||||
txhashset::extending(&mut txhashset, &mut batch, |extension| {
|
|
||||||
extension.apply_block(&block)?;
|
|
||||||
|
|
||||||
// Now set the roots and sizes as necessary on the block header.
|
|
||||||
let roots = extension.roots();
|
|
||||||
block.header.output_root = roots.output_root;
|
|
||||||
block.header.range_proof_root = roots.rproof_root;
|
|
||||||
block.header.kernel_root = roots.kernel_root;
|
|
||||||
let sizes = extension.sizes();
|
|
||||||
block.header.output_mmr_size = sizes.0;
|
|
||||||
block.header.kernel_mmr_size = sizes.2;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}).unwrap();
|
|
||||||
|
|
||||||
let tip = Tip::from_block(&block.header);
|
|
||||||
batch.save_block_header(&block.header).unwrap();
|
|
||||||
batch.save_head(&tip).unwrap();
|
|
||||||
batch.commit().unwrap();
|
|
||||||
|
|
||||||
block.header
|
block.header
|
||||||
};
|
};
|
||||||
|
|
||||||
// Initialize a new pool with our chain adapter.
|
|
||||||
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache.clone()));
|
|
||||||
|
|
||||||
// Now create tx to spend a coinbase, giving us some useful outputs for testing
|
// Now create tx to spend a coinbase, giving us some useful outputs for testing
|
||||||
// with.
|
// with.
|
||||||
let initial_tx = {
|
let initial_tx = {
|
||||||
|
@ -102,7 +74,7 @@ fn test_the_transaction_pool() {
|
||||||
{
|
{
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write().unwrap();
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), initial_tx, false, &header.hash())
|
.add_to_pool(test_source(), initial_tx, false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(write_pool.total_size(), 1);
|
assert_eq!(write_pool.total_size(), 1);
|
||||||
}
|
}
|
||||||
|
@ -121,14 +93,14 @@ fn test_the_transaction_pool() {
|
||||||
|
|
||||||
// First, add a simple tx to the pool in "stem" mode.
|
// First, add a simple tx to the pool in "stem" mode.
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx1.clone(), true, &header.hash())
|
.add_to_pool(test_source(), tx1.clone(), true, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(write_pool.total_size(), 1);
|
assert_eq!(write_pool.total_size(), 1);
|
||||||
assert_eq!(write_pool.stempool.size(), 1);
|
assert_eq!(write_pool.stempool.size(), 1);
|
||||||
|
|
||||||
// Add another tx spending outputs from the previous tx.
|
// Add another tx spending outputs from the previous tx.
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx2.clone(), true, &header.hash())
|
.add_to_pool(test_source(), tx2.clone(), true, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(write_pool.total_size(), 1);
|
assert_eq!(write_pool.total_size(), 1);
|
||||||
assert_eq!(write_pool.stempool.size(), 2);
|
assert_eq!(write_pool.stempool.size(), 2);
|
||||||
|
@ -141,7 +113,7 @@ fn test_the_transaction_pool() {
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write().unwrap();
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx1.clone(), true, &header.hash())
|
.add_to_pool(test_source(), tx1.clone(), true, &header)
|
||||||
.is_err()
|
.is_err()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -153,7 +125,7 @@ fn test_the_transaction_pool() {
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write().unwrap();
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx1a, true, &header.hash())
|
.add_to_pool(test_source(), tx1a, true, &header)
|
||||||
.is_err()
|
.is_err()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -164,7 +136,7 @@ fn test_the_transaction_pool() {
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write().unwrap();
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), bad_tx, true, &header.hash())
|
.add_to_pool(test_source(), bad_tx, true, &header)
|
||||||
.is_err()
|
.is_err()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -178,7 +150,7 @@ fn test_the_transaction_pool() {
|
||||||
let mut write_pool = pool.write().unwrap();
|
let mut write_pool = pool.write().unwrap();
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx, true, &header.hash())
|
.add_to_pool(test_source(), tx, true, &header)
|
||||||
.is_err()
|
.is_err()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -189,7 +161,7 @@ fn test_the_transaction_pool() {
|
||||||
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
|
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), tx3, true, &header.hash())
|
.add_to_pool(test_source(), tx3, true, &header)
|
||||||
.is_err()
|
.is_err()
|
||||||
);
|
);
|
||||||
assert_eq!(write_pool.total_size(), 1);
|
assert_eq!(write_pool.total_size(), 1);
|
||||||
|
@ -207,7 +179,7 @@ fn test_the_transaction_pool() {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(agg_tx.kernels().len(), 2);
|
assert_eq!(agg_tx.kernels().len(), 2);
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), agg_tx, false, &header.hash())
|
.add_to_pool(test_source(), agg_tx, false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(write_pool.total_size(), 2);
|
assert_eq!(write_pool.total_size(), 2);
|
||||||
}
|
}
|
||||||
|
@ -222,11 +194,12 @@ fn test_the_transaction_pool() {
|
||||||
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
|
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
|
||||||
// tx1 and tx2 are already in the txpool (in aggregated form)
|
// tx1 and tx2 are already in the txpool (in aggregated form)
|
||||||
// tx4 is the "new" part of this aggregated tx that we care about
|
// tx4 is the "new" part of this aggregated tx that we care about
|
||||||
let agg_tx =
|
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
|
||||||
transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4], verifier_cache.clone())
|
|
||||||
.unwrap();
|
agg_tx.validate(verifier_cache.clone()).unwrap();
|
||||||
|
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), agg_tx, false, &header.hash())
|
.add_to_pool(test_source(), agg_tx, false, &header)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(write_pool.total_size(), 3);
|
assert_eq!(write_pool.total_size(), 3);
|
||||||
let entry = write_pool.txpool.entries.last().unwrap();
|
let entry = write_pool.txpool.entries.last().unwrap();
|
||||||
|
@ -245,19 +218,14 @@ fn test_the_transaction_pool() {
|
||||||
// check we cannot add a double spend to the stempool
|
// check we cannot add a double spend to the stempool
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(test_source(), double_spend_tx.clone(), true, &header.hash())
|
.add_to_pool(test_source(), double_spend_tx.clone(), true, &header)
|
||||||
.is_err()
|
.is_err()
|
||||||
);
|
);
|
||||||
|
|
||||||
// check we cannot add a double spend to the txpool
|
// check we cannot add a double spend to the txpool
|
||||||
assert!(
|
assert!(
|
||||||
write_pool
|
write_pool
|
||||||
.add_to_pool(
|
.add_to_pool(test_source(), double_spend_tx.clone(), false, &header)
|
||||||
test_source(),
|
|
||||||
double_spend_tx.clone(),
|
|
||||||
false,
|
|
||||||
&header.hash()
|
|
||||||
)
|
|
||||||
.is_err()
|
.is_err()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ use common::types::{self, ChainValidationMode, ServerConfig, SyncState, SyncStat
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::transaction::Transaction;
|
use core::core::transaction::Transaction;
|
||||||
use core::core::verifier_cache::VerifierCache;
|
use core::core::verifier_cache::VerifierCache;
|
||||||
use core::core::{BlockHeader, CompactBlock};
|
use core::core::{BlockHeader, BlockSums, CompactBlock};
|
||||||
use core::pow::Difficulty;
|
use core::pow::Difficulty;
|
||||||
use core::{core, global};
|
use core::{core, global};
|
||||||
use p2p;
|
use p2p;
|
||||||
|
@ -46,7 +46,7 @@ fn wo<T>(weak_one: &OneTime<Weak<T>>) -> Arc<T> {
|
||||||
w(weak_one.borrow().deref())
|
w(weak_one.borrow().deref())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implementation of the NetAdapter for the blockchain. Gets notified when new
|
/// Implementation of the NetAdapter for the . Gets notified when new
|
||||||
/// blocks and transactions are received and forwards to the chain and pool
|
/// blocks and transactions are received and forwards to the chain and pool
|
||||||
/// implementations.
|
/// implementations.
|
||||||
pub struct NetToChainAdapter {
|
pub struct NetToChainAdapter {
|
||||||
|
@ -80,7 +80,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
};
|
};
|
||||||
|
|
||||||
let tx_hash = tx.hash();
|
let tx_hash = tx.hash();
|
||||||
let block_hash = w(&self.chain).head_header().unwrap().hash();
|
let header = w(&self.chain).head_header().unwrap();
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
|
@ -93,7 +93,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
|
|
||||||
let res = {
|
let res = {
|
||||||
let mut tx_pool = self.tx_pool.write().unwrap();
|
let mut tx_pool = self.tx_pool.write().unwrap();
|
||||||
tx_pool.add_to_pool(source, tx, stem, &block_hash)
|
tx_pool.add_to_pool(source, tx, stem, &header)
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
|
@ -617,7 +617,7 @@ impl NetToChainAdapter {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implementation of the ChainAdapter for the network. Gets notified when the
|
/// Implementation of the ChainAdapter for the network. Gets notified when the
|
||||||
/// blockchain accepted a new block, asking the pool to update its state and
|
/// accepted a new block, asking the pool to update its state and
|
||||||
/// the network to broadcast the block
|
/// the network to broadcast the block
|
||||||
pub struct ChainToPoolAndNetAdapter {
|
pub struct ChainToPoolAndNetAdapter {
|
||||||
sync_state: Arc<SyncState>,
|
sync_state: Arc<SyncState>,
|
||||||
|
@ -708,7 +708,7 @@ impl PoolToNetAdapter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implements the view of the blockchain required by the TransactionPool to
|
/// Implements the view of the required by the TransactionPool to
|
||||||
/// operate. Mostly needed to break any direct lifecycle or implementation
|
/// operate. Mostly needed to break any direct lifecycle or implementation
|
||||||
/// dependency between the pool and the chain.
|
/// dependency between the pool and the chain.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
@ -732,25 +732,27 @@ impl PoolToChainAdapter {
|
||||||
|
|
||||||
impl pool::BlockChain for PoolToChainAdapter {
|
impl pool::BlockChain for PoolToChainAdapter {
|
||||||
fn chain_head(&self) -> Result<BlockHeader, pool::PoolError> {
|
fn chain_head(&self) -> Result<BlockHeader, pool::PoolError> {
|
||||||
wo(&self.chain).head_header().map_err(|e| {
|
wo(&self.chain)
|
||||||
pool::PoolError::Other(format!(
|
.head_header()
|
||||||
"Chain adapter failed to retrieve chain head: {:?}",
|
.map_err(|_| pool::PoolError::Other(format!("failed to get head_header")))
|
||||||
e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_raw_txs(
|
fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, pool::PoolError> {
|
||||||
&self,
|
|
||||||
txs: Vec<Transaction>,
|
|
||||||
pre_tx: Option<Transaction>,
|
|
||||||
block_hash: &Hash,
|
|
||||||
) -> Result<(Vec<Transaction>), pool::PoolError> {
|
|
||||||
wo(&self.chain)
|
wo(&self.chain)
|
||||||
.validate_raw_txs(txs, pre_tx, block_hash)
|
.get_block_header(hash)
|
||||||
.map_err(|e| {
|
.map_err(|_| pool::PoolError::Other(format!("failed to get block_header")))
|
||||||
pool::PoolError::Other(format!("Chain adapter failed to validate_raw_txs: {:?}", e))
|
}
|
||||||
})
|
|
||||||
|
fn get_block_sums(&self, hash: &Hash) -> Result<BlockSums, pool::PoolError> {
|
||||||
|
wo(&self.chain)
|
||||||
|
.get_block_sums(hash)
|
||||||
|
.map_err(|_| pool::PoolError::Other(format!("failed to get block_sums")))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), pool::PoolError> {
|
||||||
|
wo(&self.chain)
|
||||||
|
.validate_tx(tx, header)
|
||||||
|
.map_err(|_| pool::PoolError::Other(format!("failed to validate tx")))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
||||||
|
|
|
@ -93,12 +93,15 @@ fn process_stem_phase(
|
||||||
let header = tx_pool.blockchain.chain_head()?;
|
let header = tx_pool.blockchain.chain_head()?;
|
||||||
|
|
||||||
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
||||||
let stem_txs = tx_pool.stempool.select_valid_transactions(
|
let stem_txs = tx_pool
|
||||||
PoolEntryState::ToStem,
|
.stempool
|
||||||
PoolEntryState::Stemmed,
|
.get_transactions_in_state(PoolEntryState::ToStem);
|
||||||
txpool_tx,
|
let stem_txs = tx_pool
|
||||||
&header.hash(),
|
.stempool
|
||||||
)?;
|
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
|
||||||
|
tx_pool
|
||||||
|
.stempool
|
||||||
|
.transition_to_state(&stem_txs, PoolEntryState::Stemmed);
|
||||||
|
|
||||||
if stem_txs.len() > 0 {
|
if stem_txs.len() > 0 {
|
||||||
debug!(
|
debug!(
|
||||||
|
@ -107,7 +110,8 @@ fn process_stem_phase(
|
||||||
stem_txs.len()
|
stem_txs.len()
|
||||||
);
|
);
|
||||||
|
|
||||||
let agg_tx = transaction::aggregate(stem_txs, verifier_cache.clone())?;
|
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||||
|
agg_tx.validate(verifier_cache.clone())?;
|
||||||
|
|
||||||
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
|
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
|
@ -121,7 +125,7 @@ fn process_stem_phase(
|
||||||
identifier: "?.?.?.?".to_string(),
|
identifier: "?.?.?.?".to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
tx_pool.add_to_pool(src, agg_tx, false, &header.hash())?;
|
tx_pool.add_to_pool(src, agg_tx, false, &header)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -136,12 +140,15 @@ fn process_fluff_phase(
|
||||||
let header = tx_pool.blockchain.chain_head()?;
|
let header = tx_pool.blockchain.chain_head()?;
|
||||||
|
|
||||||
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
||||||
let stem_txs = tx_pool.stempool.select_valid_transactions(
|
let stem_txs = tx_pool
|
||||||
PoolEntryState::ToFluff,
|
.stempool
|
||||||
PoolEntryState::Fluffed,
|
.get_transactions_in_state(PoolEntryState::ToFluff);
|
||||||
txpool_tx,
|
let stem_txs = tx_pool
|
||||||
&header.hash(),
|
.stempool
|
||||||
)?;
|
.select_valid_transactions(stem_txs, txpool_tx, &header)?;
|
||||||
|
tx_pool
|
||||||
|
.stempool
|
||||||
|
.transition_to_state(&stem_txs, PoolEntryState::Fluffed);
|
||||||
|
|
||||||
if stem_txs.len() > 0 {
|
if stem_txs.len() > 0 {
|
||||||
debug!(
|
debug!(
|
||||||
|
@ -150,14 +157,15 @@ fn process_fluff_phase(
|
||||||
stem_txs.len()
|
stem_txs.len()
|
||||||
);
|
);
|
||||||
|
|
||||||
let agg_tx = transaction::aggregate(stem_txs, verifier_cache.clone())?;
|
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||||
|
agg_tx.validate(verifier_cache.clone())?;
|
||||||
|
|
||||||
let src = TxSource {
|
let src = TxSource {
|
||||||
debug_name: "fluff".to_string(),
|
debug_name: "fluff".to_string(),
|
||||||
identifier: "?.?.?.?".to_string(),
|
identifier: "?.?.?.?".to_string(),
|
||||||
};
|
};
|
||||||
|
|
||||||
tx_pool.add_to_pool(src, agg_tx, false, &header.hash())?;
|
tx_pool.add_to_pool(src, agg_tx, false, &header)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -238,7 +246,7 @@ fn process_expired_entries(
|
||||||
debug_name: "embargo_expired".to_string(),
|
debug_name: "embargo_expired".to_string(),
|
||||||
identifier: "?.?.?.?".to_string(),
|
identifier: "?.?.?.?".to_string(),
|
||||||
};
|
};
|
||||||
match tx_pool.add_to_pool(src, entry.tx, false, &header.hash()) {
|
match tx_pool.add_to_pool(src, entry.tx, false, &header) {
|
||||||
Ok(_) => debug!(
|
Ok(_) => debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"dand_mon: embargo expired, fluffed tx successfully."
|
"dand_mon: embargo expired, fluffed tx successfully."
|
||||||
|
|
|
@ -109,7 +109,12 @@ fn build_block(
|
||||||
let difficulty = consensus::next_difficulty(diff_iter).unwrap();
|
let difficulty = consensus::next_difficulty(diff_iter).unwrap();
|
||||||
|
|
||||||
// extract current transaction from the pool
|
// extract current transaction from the pool
|
||||||
let txs = tx_pool.read().unwrap().prepare_mineable_transactions();
|
// TODO - we have a lot of unwrap() going on in this fn...
|
||||||
|
let txs = tx_pool
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.prepare_mineable_transactions()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// build the coinbase and the block itself
|
// build the coinbase and the block itself
|
||||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||||
|
@ -121,14 +126,7 @@ fn build_block(
|
||||||
};
|
};
|
||||||
|
|
||||||
let (output, kernel, block_fees) = get_coinbase(wallet_listener_url, block_fees)?;
|
let (output, kernel, block_fees) = get_coinbase(wallet_listener_url, block_fees)?;
|
||||||
let mut b = core::Block::with_reward(
|
let mut b = core::Block::with_reward(&head, txs, output, kernel, difficulty.clone())?;
|
||||||
&head,
|
|
||||||
txs,
|
|
||||||
output,
|
|
||||||
kernel,
|
|
||||||
difficulty.clone(),
|
|
||||||
verifier_cache.clone(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// making sure we're not spending time mining a useless block
|
// making sure we're not spending time mining a useless block
|
||||||
b.validate(
|
b.validate(
|
||||||
|
|
Loading…
Reference in a new issue