mirror of
https://github.com/mimblewimble/grin.git
synced 2025-04-24 03:11:14 +03:00
Pool tx weight verification (#2466)
This commit is contained in:
parent
a97ab376cb
commit
c8fd0575ed
9 changed files with 215 additions and 101 deletions
core
pool
servers/src/grin
|
@ -27,7 +27,9 @@ use crate::core::committed::{self, Committed};
|
|||
use crate::core::compact_block::{CompactBlock, CompactBlockBody};
|
||||
use crate::core::hash::{Hash, Hashed, ZERO_HASH};
|
||||
use crate::core::verifier_cache::VerifierCache;
|
||||
use crate::core::{transaction, Commitment, Input, Output, Transaction, TransactionBody, TxKernel};
|
||||
use crate::core::{
|
||||
transaction, Commitment, Input, Output, Transaction, TransactionBody, TxKernel, Weighting,
|
||||
};
|
||||
use crate::global;
|
||||
use crate::keychain::{self, BlindingFactor};
|
||||
use crate::pow::{Difficulty, Proof, ProofOfWork};
|
||||
|
@ -377,7 +379,7 @@ impl Readable for Block {
|
|||
// Treat any validation issues as data corruption.
|
||||
// An example of this would be reading a block
|
||||
// that exceeded the allowed number of inputs.
|
||||
body.validate_read(true)
|
||||
body.validate_read(Weighting::AsBlock)
|
||||
.map_err(|_| ser::Error::CorruptedData)?;
|
||||
|
||||
Ok(Block { header, body })
|
||||
|
@ -608,7 +610,7 @@ impl Block {
|
|||
/// * coinbase sum verification
|
||||
/// * kernel sum verification
|
||||
pub fn validate_read(&self) -> Result<(), Error> {
|
||||
self.body.validate_read(true)?;
|
||||
self.body.validate_read(Weighting::AsBlock)?;
|
||||
self.verify_kernel_lock_heights()?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -638,7 +640,7 @@ impl Block {
|
|||
prev_kernel_offset: &BlindingFactor,
|
||||
verifier: Arc<RwLock<dyn VerifierCache>>,
|
||||
) -> Result<Commitment, Error> {
|
||||
self.body.validate(true, verifier)?;
|
||||
self.body.validate(Weighting::AsBlock, verifier)?;
|
||||
|
||||
self.verify_kernel_lock_heights()?;
|
||||
self.verify_coinbase()?;
|
||||
|
|
|
@ -365,6 +365,22 @@ impl FixedLength for TxKernelEntry {
|
|||
+ secp::constants::AGG_SIGNATURE_SIZE;
|
||||
}
|
||||
|
||||
/// Enum of possible tx weight verification options -
|
||||
///
|
||||
/// * As "transaction" checks tx (as block) weight does not exceed max_block_weight.
|
||||
/// * As "block" same as above but allow for additional coinbase reward (1 output, 1 kernel).
|
||||
/// * With "no limit" to skip the weight check.
|
||||
///
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum Weighting {
|
||||
/// Tx represents a tx (max block weight, accounting for additional coinbase reward).
|
||||
AsTransaction,
|
||||
/// Tx represents a block (max block weight).
|
||||
AsBlock,
|
||||
/// No max weight limit (skip the weight check).
|
||||
NoLimit,
|
||||
}
|
||||
|
||||
/// TransactionBody is a common abstraction for transaction and block
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct TransactionBody {
|
||||
|
@ -587,11 +603,36 @@ impl TransactionBody {
|
|||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
// Verify the body is not too big in terms of number of inputs|outputs|kernels.
|
||||
fn verify_weight(&self, with_reward: bool) -> Result<(), Error> {
|
||||
// if as_block check the body as if it was a block, with an additional output and
|
||||
// kernel for reward
|
||||
let reserve = if with_reward { 0 } else { 1 };
|
||||
/// Verify the body is not too big in terms of number of inputs|outputs|kernels.
|
||||
/// Weight rules vary depending on the "weight type" (block or tx or pool).
|
||||
fn verify_weight(&self, weighting: Weighting) -> Result<(), Error> {
|
||||
// If "tx" body then remember to reduce the max_block_weight for block requirements.
|
||||
// If "block" body then verify weight based on full set of inputs|outputs|kernels.
|
||||
// If "pool" body then skip weight verification (pool can be larger than single block).
|
||||
//
|
||||
// Note: Taking a max tx and building a block from it we need to allow room
|
||||
// for the additional coinbase reward (output + kernel).
|
||||
//
|
||||
let reserve = match weighting {
|
||||
Weighting::AsTransaction => 1,
|
||||
Weighting::AsBlock => 0,
|
||||
Weighting::NoLimit => {
|
||||
// We do not verify "tx as pool" weight so we are done here.
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// Note: If we add a reserve then we are reducing the number of outputs and kernels
|
||||
// allowed in the body itself.
|
||||
// A block is allowed to be slightly weightier than a tx to account for
|
||||
// the additional coinbase reward output and kernel.
|
||||
// i.e. We need to reserve space for coinbase reward when verifying tx weight.
|
||||
//
|
||||
// In effect we are verifying the tx _can_ be included in a block without exceeding
|
||||
// MAX_BLOCK_WEIGHT.
|
||||
//
|
||||
// max_block = max_tx + coinbase
|
||||
//
|
||||
let tx_block_weight = TransactionBody::weight_as_block(
|
||||
self.inputs.len(),
|
||||
self.outputs.len() + reserve,
|
||||
|
@ -656,8 +697,8 @@ impl TransactionBody {
|
|||
/// Subset of full validation that skips expensive verification steps, specifically -
|
||||
/// * rangeproof verification
|
||||
/// * kernel signature verification
|
||||
pub fn validate_read(&self, with_reward: bool) -> Result<(), Error> {
|
||||
self.verify_weight(with_reward)?;
|
||||
pub fn validate_read(&self, weighting: Weighting) -> Result<(), Error> {
|
||||
self.verify_weight(weighting)?;
|
||||
self.verify_sorted()?;
|
||||
self.verify_cut_through()?;
|
||||
Ok(())
|
||||
|
@ -668,10 +709,10 @@ impl TransactionBody {
|
|||
/// output.
|
||||
pub fn validate(
|
||||
&self,
|
||||
with_reward: bool,
|
||||
weighting: Weighting,
|
||||
verifier: Arc<RwLock<dyn VerifierCache>>,
|
||||
) -> Result<(), Error> {
|
||||
self.validate_read(with_reward)?;
|
||||
self.validate_read(weighting)?;
|
||||
|
||||
// Find all the outputs that have not had their rangeproofs verified.
|
||||
let outputs = {
|
||||
|
@ -892,7 +933,7 @@ impl Transaction {
|
|||
/// * kernel signature verification (on the body)
|
||||
/// * kernel sum verification
|
||||
pub fn validate_read(&self) -> Result<(), Error> {
|
||||
self.body.validate_read(false)?;
|
||||
self.body.validate_read(Weighting::AsTransaction)?;
|
||||
self.body.verify_features()?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -900,8 +941,12 @@ impl Transaction {
|
|||
/// Validates all relevant parts of a fully built transaction. Checks the
|
||||
/// excess value against the signature as well as range proofs for each
|
||||
/// output.
|
||||
pub fn validate(&self, verifier: Arc<RwLock<dyn VerifierCache>>) -> Result<(), Error> {
|
||||
self.body.validate(false, verifier)?;
|
||||
pub fn validate(
|
||||
&self,
|
||||
weighting: Weighting,
|
||||
verifier: Arc<RwLock<dyn VerifierCache>>,
|
||||
) -> Result<(), Error> {
|
||||
self.body.validate(weighting, verifier)?;
|
||||
self.body.verify_features()?;
|
||||
self.verify_kernel_sums(self.overage(), self.offset)?;
|
||||
Ok(())
|
||||
|
|
|
@ -256,6 +256,7 @@ mod test {
|
|||
use std::sync::Arc;
|
||||
|
||||
use super::*;
|
||||
use crate::core::transaction::Weighting;
|
||||
use crate::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
||||
use crate::keychain::{ExtKeychain, ExtKeychainPath};
|
||||
|
||||
|
@ -283,7 +284,7 @@ mod test {
|
|||
)
|
||||
.unwrap();
|
||||
|
||||
tx.validate(vc.clone()).unwrap();
|
||||
tx.validate(Weighting::AsTransaction, vc.clone()).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -306,7 +307,7 @@ mod test {
|
|||
)
|
||||
.unwrap();
|
||||
|
||||
tx.validate(vc.clone()).unwrap();
|
||||
tx.validate(Weighting::AsTransaction, vc.clone()).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -323,6 +324,6 @@ mod test {
|
|||
)
|
||||
.unwrap();
|
||||
|
||||
tx.validate(vc.clone()).unwrap();
|
||||
tx.validate(Weighting::AsTransaction, vc.clone()).unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
use crate::blake2::blake2b::blake2b;
|
||||
use crate::core::amount_to_hr_string;
|
||||
use crate::core::committed::Committed;
|
||||
use crate::core::transaction::{kernel_features, kernel_sig_msg, Transaction};
|
||||
use crate::core::transaction::{kernel_features, kernel_sig_msg, Transaction, Weighting};
|
||||
use crate::core::verifier_cache::LruVerifierCache;
|
||||
use crate::keychain::{BlindSum, BlindingFactor, Keychain};
|
||||
use crate::libtx::error::{Error, ErrorKind};
|
||||
|
@ -472,8 +472,9 @@ impl Slate {
|
|||
final_tx.kernels()[0].verify()?;
|
||||
|
||||
// confirm the overall transaction is valid (including the updated kernel)
|
||||
// accounting for tx weight limits
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
let _ = final_tx.validate(verifier_cache)?;
|
||||
let _ = final_tx.validate(Weighting::AsTransaction, verifier_cache)?;
|
||||
|
||||
self.tx = final_tx;
|
||||
Ok(())
|
||||
|
|
|
@ -20,7 +20,7 @@ use self::core::core::block::BlockHeader;
|
|||
use self::core::core::block::Error::KernelLockHeight;
|
||||
use self::core::core::hash::{Hashed, ZERO_HASH};
|
||||
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
||||
use self::core::core::{aggregate, deaggregate, KernelFeatures, Output, Transaction};
|
||||
use self::core::core::{aggregate, deaggregate, KernelFeatures, Output, Transaction, Weighting};
|
||||
use self::core::libtx::build::{
|
||||
self, initial_tx, input, output, with_excess, with_fee, with_lock_height,
|
||||
};
|
||||
|
@ -115,7 +115,8 @@ fn build_tx_kernel() {
|
|||
.unwrap();
|
||||
|
||||
// check the tx is valid
|
||||
tx.validate(verifier_cache()).unwrap();
|
||||
tx.validate(Weighting::AsTransaction, verifier_cache())
|
||||
.unwrap();
|
||||
|
||||
// check the kernel is also itself valid
|
||||
assert_eq!(tx.kernels().len(), 1);
|
||||
|
@ -133,15 +134,19 @@ fn transaction_cut_through() {
|
|||
let tx1 = tx1i2o();
|
||||
let tx2 = tx2i1o();
|
||||
|
||||
assert!(tx1.validate(verifier_cache()).is_ok());
|
||||
assert!(tx2.validate(verifier_cache()).is_ok());
|
||||
assert!(tx1
|
||||
.validate(Weighting::AsTransaction, verifier_cache())
|
||||
.is_ok());
|
||||
assert!(tx2
|
||||
.validate(Weighting::AsTransaction, verifier_cache())
|
||||
.is_ok());
|
||||
|
||||
let vc = verifier_cache();
|
||||
|
||||
// now build a "cut_through" tx from tx1 and tx2
|
||||
let tx3 = aggregate(vec![tx1, tx2]).unwrap();
|
||||
|
||||
assert!(tx3.validate(vc.clone()).is_ok());
|
||||
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
}
|
||||
|
||||
// Attempt to deaggregate a multi-kernel transaction in a different way
|
||||
|
@ -154,26 +159,32 @@ fn multi_kernel_transaction_deaggregation() {
|
|||
|
||||
let vc = verifier_cache();
|
||||
|
||||
assert!(tx1.validate(vc.clone()).is_ok());
|
||||
assert!(tx2.validate(vc.clone()).is_ok());
|
||||
assert!(tx3.validate(vc.clone()).is_ok());
|
||||
assert!(tx4.validate(vc.clone()).is_ok());
|
||||
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
|
||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
|
||||
|
||||
assert!(tx1234.validate(vc.clone()).is_ok());
|
||||
assert!(tx12.validate(vc.clone()).is_ok());
|
||||
assert!(tx34.validate(vc.clone()).is_ok());
|
||||
assert!(tx1234
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
assert!(tx12.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx34.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap();
|
||||
assert!(deaggregated_tx34.validate(vc.clone()).is_ok());
|
||||
assert!(deaggregated_tx34
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
assert_eq!(tx34, deaggregated_tx34);
|
||||
|
||||
let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap();
|
||||
|
||||
assert!(deaggregated_tx12.validate(vc.clone()).is_ok());
|
||||
assert!(deaggregated_tx12
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
assert_eq!(tx12, deaggregated_tx12);
|
||||
}
|
||||
|
||||
|
@ -185,18 +196,20 @@ fn multi_kernel_transaction_deaggregation_2() {
|
|||
|
||||
let vc = verifier_cache();
|
||||
|
||||
assert!(tx1.validate(vc.clone()).is_ok());
|
||||
assert!(tx2.validate(vc.clone()).is_ok());
|
||||
assert!(tx3.validate(vc.clone()).is_ok());
|
||||
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
|
||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||
|
||||
assert!(tx123.validate(vc.clone()).is_ok());
|
||||
assert!(tx12.validate(vc.clone()).is_ok());
|
||||
assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx12.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap();
|
||||
assert!(deaggregated_tx3.validate(vc.clone()).is_ok());
|
||||
assert!(deaggregated_tx3
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
assert_eq!(tx3, deaggregated_tx3);
|
||||
}
|
||||
|
||||
|
@ -208,19 +221,21 @@ fn multi_kernel_transaction_deaggregation_3() {
|
|||
|
||||
let vc = verifier_cache();
|
||||
|
||||
assert!(tx1.validate(vc.clone()).is_ok());
|
||||
assert!(tx2.validate(vc.clone()).is_ok());
|
||||
assert!(tx3.validate(vc.clone()).is_ok());
|
||||
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
|
||||
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()]).unwrap();
|
||||
let tx2 = aggregate(vec![tx2.clone()]).unwrap();
|
||||
|
||||
assert!(tx123.validate(vc.clone()).is_ok());
|
||||
assert!(tx2.validate(vc.clone()).is_ok());
|
||||
assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap();
|
||||
assert!(deaggregated_tx13.validate(vc.clone()).is_ok());
|
||||
assert!(deaggregated_tx13
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
assert_eq!(tx13, deaggregated_tx13);
|
||||
}
|
||||
|
||||
|
@ -234,11 +249,11 @@ fn multi_kernel_transaction_deaggregation_4() {
|
|||
|
||||
let vc = verifier_cache();
|
||||
|
||||
assert!(tx1.validate(vc.clone()).is_ok());
|
||||
assert!(tx2.validate(vc.clone()).is_ok());
|
||||
assert!(tx3.validate(vc.clone()).is_ok());
|
||||
assert!(tx4.validate(vc.clone()).is_ok());
|
||||
assert!(tx5.validate(vc.clone()).is_ok());
|
||||
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx5.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
let tx12345 = aggregate(vec![
|
||||
tx1.clone(),
|
||||
|
@ -248,14 +263,18 @@ fn multi_kernel_transaction_deaggregation_4() {
|
|||
tx5.clone(),
|
||||
])
|
||||
.unwrap();
|
||||
assert!(tx12345.validate(vc.clone()).is_ok());
|
||||
assert!(tx12345
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
|
||||
let deaggregated_tx5 = deaggregate(
|
||||
tx12345.clone(),
|
||||
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
assert!(deaggregated_tx5.validate(vc.clone()).is_ok());
|
||||
assert!(deaggregated_tx5
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
assert_eq!(tx5, deaggregated_tx5);
|
||||
}
|
||||
|
||||
|
@ -269,11 +288,11 @@ fn multi_kernel_transaction_deaggregation_5() {
|
|||
|
||||
let vc = verifier_cache();
|
||||
|
||||
assert!(tx1.validate(vc.clone()).is_ok());
|
||||
assert!(tx2.validate(vc.clone()).is_ok());
|
||||
assert!(tx3.validate(vc.clone()).is_ok());
|
||||
assert!(tx4.validate(vc.clone()).is_ok());
|
||||
assert!(tx5.validate(vc.clone()).is_ok());
|
||||
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx5.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
let tx12345 = aggregate(vec![
|
||||
tx1.clone(),
|
||||
|
@ -286,10 +305,14 @@ fn multi_kernel_transaction_deaggregation_5() {
|
|||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
|
||||
|
||||
assert!(tx12345.validate(vc.clone()).is_ok());
|
||||
assert!(tx12345
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
|
||||
let deaggregated_tx5 = deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap();
|
||||
assert!(deaggregated_tx5.validate(vc.clone()).is_ok());
|
||||
assert!(deaggregated_tx5
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
assert_eq!(tx5, deaggregated_tx5);
|
||||
}
|
||||
|
||||
|
@ -301,22 +324,26 @@ fn basic_transaction_deaggregation() {
|
|||
|
||||
let vc = verifier_cache();
|
||||
|
||||
assert!(tx1.validate(vc.clone()).is_ok());
|
||||
assert!(tx2.validate(vc.clone()).is_ok());
|
||||
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
// now build a "cut_through" tx from tx1 and tx2
|
||||
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||
|
||||
assert!(tx3.validate(vc.clone()).is_ok());
|
||||
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
|
||||
|
||||
let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap();
|
||||
|
||||
assert!(deaggregated_tx1.validate(vc.clone()).is_ok());
|
||||
assert!(deaggregated_tx1
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
assert_eq!(tx1, deaggregated_tx1);
|
||||
|
||||
let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap();
|
||||
|
||||
assert!(deaggregated_tx2.validate(vc.clone()).is_ok());
|
||||
assert!(deaggregated_tx2
|
||||
.validate(Weighting::AsTransaction, vc.clone())
|
||||
.is_ok());
|
||||
assert_eq!(tx2, deaggregated_tx2);
|
||||
}
|
||||
|
||||
|
@ -347,7 +374,9 @@ fn hash_output() {
|
|||
#[test]
|
||||
fn blind_tx() {
|
||||
let btx = tx2i1o();
|
||||
assert!(btx.validate(verifier_cache()).is_ok());
|
||||
assert!(btx
|
||||
.validate(Weighting::AsTransaction, verifier_cache())
|
||||
.is_ok());
|
||||
|
||||
// Ignored for bullet proofs, because calling range_proof_info
|
||||
// with a bullet proof causes painful errors
|
||||
|
@ -410,7 +439,9 @@ fn tx_build_exchange() {
|
|||
)
|
||||
.unwrap();
|
||||
|
||||
tx_final.validate(verifier_cache()).unwrap();
|
||||
tx_final
|
||||
.validate(Weighting::AsTransaction, verifier_cache())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -436,7 +467,7 @@ fn reward_with_tx_block() {
|
|||
let vc = verifier_cache();
|
||||
|
||||
let mut tx1 = tx2i1o();
|
||||
tx1.validate(vc.clone()).unwrap();
|
||||
tx1.validate(Weighting::AsTransaction, vc.clone()).unwrap();
|
||||
|
||||
let previous_header = BlockHeader::default();
|
||||
|
||||
|
@ -524,11 +555,13 @@ fn test_block_with_timelocked_tx() {
|
|||
#[test]
|
||||
pub fn test_verify_1i1o_sig() {
|
||||
let tx = tx1i1o();
|
||||
tx.validate(verifier_cache()).unwrap();
|
||||
tx.validate(Weighting::AsTransaction, verifier_cache())
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_verify_2i1o_sig() {
|
||||
let tx = tx2i1o();
|
||||
tx.validate(verifier_cache()).unwrap();
|
||||
tx.validate(Weighting::AsTransaction, verifier_cache())
|
||||
.unwrap();
|
||||
}
|
||||
|
|
|
@ -19,7 +19,9 @@ use self::core::core::hash::{Hash, Hashed};
|
|||
use self::core::core::id::{ShortId, ShortIdentifiable};
|
||||
use self::core::core::transaction;
|
||||
use self::core::core::verifier_cache::VerifierCache;
|
||||
use self::core::core::{Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel};
|
||||
use self::core::core::{
|
||||
Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel, Weighting,
|
||||
};
|
||||
use self::util::RwLock;
|
||||
use crate::types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
|
||||
use grin_core as core;
|
||||
|
@ -127,7 +129,11 @@ impl Pool {
|
|||
let mut flat_txs: Vec<Transaction> = tx_buckets
|
||||
.into_iter()
|
||||
.filter_map(|bucket| transaction::aggregate(bucket).ok())
|
||||
.filter(|x| x.validate(self.verifier_cache.clone()).is_ok())
|
||||
.filter(|x| {
|
||||
// Here we validate the tx, subject to regular tx weight limits.
|
||||
x.validate(Weighting::AsTransaction, self.verifier_cache.clone())
|
||||
.is_ok()
|
||||
})
|
||||
.collect();
|
||||
|
||||
// sort by fees over weight, multiplying by 1000 to keep some precision
|
||||
|
@ -143,8 +149,9 @@ impl Pool {
|
|||
|
||||
// Iteratively apply the txs to the current chain state,
|
||||
// rejecting any that do not result in a valid state.
|
||||
// Verify these txs produce an aggregated tx below max tx weight.
|
||||
// Return a vec of all the valid txs.
|
||||
let txs = self.validate_raw_txs(flat_txs, None, &header)?;
|
||||
let txs = self.validate_raw_txs(flat_txs, None, &header, Weighting::AsTransaction)?;
|
||||
Ok(txs)
|
||||
}
|
||||
|
||||
|
@ -152,14 +159,19 @@ impl Pool {
|
|||
self.entries.iter().map(|x| x.tx.clone()).collect()
|
||||
}
|
||||
|
||||
pub fn aggregate_transaction(&self) -> Result<Option<Transaction>, PoolError> {
|
||||
/// Return a single aggregate tx representing all txs in the txpool.
|
||||
/// Returns None if the txpool is empty.
|
||||
pub fn all_transactions_aggregate(&self) -> Result<Option<Transaction>, PoolError> {
|
||||
let txs = self.all_transactions();
|
||||
if txs.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let tx = transaction::aggregate(txs)?;
|
||||
tx.validate(self.verifier_cache.clone())?;
|
||||
|
||||
// Validate the single aggregate transaction "as pool", not subject to tx weight limits.
|
||||
tx.validate(Weighting::NoLimit, self.verifier_cache.clone())?;
|
||||
|
||||
Ok(Some(tx))
|
||||
}
|
||||
|
||||
|
@ -169,7 +181,7 @@ impl Pool {
|
|||
extra_tx: Option<Transaction>,
|
||||
header: &BlockHeader,
|
||||
) -> Result<Vec<Transaction>, PoolError> {
|
||||
let valid_txs = self.validate_raw_txs(txs, extra_tx, header)?;
|
||||
let valid_txs = self.validate_raw_txs(txs, extra_tx, header, Weighting::NoLimit)?;
|
||||
Ok(valid_txs)
|
||||
}
|
||||
|
||||
|
@ -216,15 +228,21 @@ impl Pool {
|
|||
// Create a single aggregated tx from the existing pool txs and the
|
||||
// new entry
|
||||
txs.push(entry.tx.clone());
|
||||
|
||||
let tx = transaction::aggregate(txs)?;
|
||||
tx.validate(self.verifier_cache.clone())?;
|
||||
tx
|
||||
transaction::aggregate(txs)?
|
||||
};
|
||||
|
||||
// Validate aggregated tx against a known chain state.
|
||||
self.validate_raw_tx(&agg_tx, header)?;
|
||||
// Validate aggregated tx (existing pool + new tx), ignoring tx weight limits.
|
||||
// Validate against known chain state at the provided header.
|
||||
self.validate_raw_tx(&agg_tx, header, Weighting::NoLimit)?;
|
||||
|
||||
// If we get here successfully then we can safely add the entry to the pool.
|
||||
self.log_pool_add(&entry, header);
|
||||
self.entries.push(entry);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn log_pool_add(&self, entry: &PoolEntry, header: &BlockHeader) {
|
||||
debug!(
|
||||
"add_to_pool [{}]: {} ({}) [in/out/kern: {}/{}/{}] pool: {} (at block {})",
|
||||
self.name,
|
||||
|
@ -236,18 +254,17 @@ impl Pool {
|
|||
self.size(),
|
||||
header.hash(),
|
||||
);
|
||||
// If we get here successfully then we can safely add the entry to the pool.
|
||||
self.entries.push(entry);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_raw_tx(
|
||||
&self,
|
||||
tx: &Transaction,
|
||||
header: &BlockHeader,
|
||||
weighting: Weighting,
|
||||
) -> Result<BlockSums, PoolError> {
|
||||
tx.validate(self.verifier_cache.clone())?;
|
||||
// Validate the tx, conditionally checking against weight limits,
|
||||
// based on weight verification type.
|
||||
tx.validate(weighting, self.verifier_cache.clone())?;
|
||||
|
||||
// Validate the tx against current chain state.
|
||||
// Check all inputs are in the current UTXO set.
|
||||
|
@ -263,6 +280,7 @@ impl Pool {
|
|||
txs: Vec<Transaction>,
|
||||
extra_tx: Option<Transaction>,
|
||||
header: &BlockHeader,
|
||||
weighting: Weighting,
|
||||
) -> Result<Vec<Transaction>, PoolError> {
|
||||
let mut valid_txs = vec![];
|
||||
|
||||
|
@ -278,7 +296,7 @@ impl Pool {
|
|||
let agg_tx = transaction::aggregate(candidate_txs)?;
|
||||
|
||||
// We know the tx is valid if the entire aggregate tx is valid.
|
||||
if self.validate_raw_tx(&agg_tx, header).is_ok() {
|
||||
if self.validate_raw_tx(&agg_tx, header, weighting).is_ok() {
|
||||
valid_txs.push(tx);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
use self::core::core::hash::{Hash, Hashed};
|
||||
use self::core::core::id::ShortId;
|
||||
use self::core::core::verifier_cache::VerifierCache;
|
||||
use self::core::core::{transaction, Block, BlockHeader, Transaction};
|
||||
use self::core::core::{transaction, Block, BlockHeader, Transaction, Weighting};
|
||||
use self::util::RwLock;
|
||||
use crate::pool::Pool;
|
||||
use crate::types::{
|
||||
|
@ -108,7 +108,10 @@ impl TransactionPool {
|
|||
let txs = self.txpool.find_matching_transactions(entry.tx.kernels());
|
||||
if !txs.is_empty() {
|
||||
let tx = transaction::deaggregate(entry.tx, txs)?;
|
||||
tx.validate(self.verifier_cache.clone())?;
|
||||
|
||||
// Validate this deaggregated tx "as tx", subject to regular tx weight limits.
|
||||
tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?;
|
||||
|
||||
entry.tx = tx;
|
||||
entry.src.debug_name = "deagg".to_string();
|
||||
}
|
||||
|
@ -118,7 +121,7 @@ impl TransactionPool {
|
|||
// We now need to reconcile the stempool based on the new state of the txpool.
|
||||
// Some stempool txs may no longer be valid and we need to evict them.
|
||||
{
|
||||
let txpool_tx = self.txpool.aggregate_transaction()?;
|
||||
let txpool_tx = self.txpool.all_transactions_aggregate()?;
|
||||
self.stempool.reconcile(txpool_tx, header)?;
|
||||
}
|
||||
|
||||
|
@ -145,7 +148,8 @@ impl TransactionPool {
|
|||
self.is_acceptable(&tx, stem)?;
|
||||
|
||||
// Make sure the transaction is valid before anything else.
|
||||
tx.validate(self.verifier_cache.clone())
|
||||
// Validate tx accounting for max tx weight.
|
||||
tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())
|
||||
.map_err(PoolError::InvalidTx)?;
|
||||
|
||||
// Check the tx lock_time is valid based on current chain state.
|
||||
|
@ -218,7 +222,7 @@ impl TransactionPool {
|
|||
// Now reconcile our stempool, accounting for the updated txpool txs.
|
||||
self.stempool.reconcile_block(block);
|
||||
{
|
||||
let txpool_tx = self.txpool.aggregate_transaction()?;
|
||||
let txpool_tx = self.txpool.all_transactions_aggregate()?;
|
||||
self.stempool.reconcile(txpool_tx, &block.header)?;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
pub mod common;
|
||||
|
||||
use self::core::core::verifier_cache::LruVerifierCache;
|
||||
use self::core::core::{transaction, Block, BlockHeader};
|
||||
use self::core::core::{transaction, Block, BlockHeader, Weighting};
|
||||
use self::core::libtx;
|
||||
use self::core::pow::Difficulty;
|
||||
use self::keychain::{ExtKeychain, Keychain};
|
||||
|
@ -177,7 +177,7 @@ fn test_the_transaction_pool() {
|
|||
let mut write_pool = pool.write();
|
||||
let agg_tx = write_pool
|
||||
.stempool
|
||||
.aggregate_transaction()
|
||||
.all_transactions_aggregate()
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(agg_tx.kernels().len(), 2);
|
||||
|
@ -219,7 +219,9 @@ fn test_the_transaction_pool() {
|
|||
// tx4 is the "new" part of this aggregated tx that we care about
|
||||
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
|
||||
|
||||
agg_tx.validate(verifier_cache.clone()).unwrap();
|
||||
agg_tx
|
||||
.validate(Weighting::AsTransaction, verifier_cache.clone())
|
||||
.unwrap();
|
||||
|
||||
write_pool
|
||||
.add_to_pool(test_source(), agg_tx, false, &header)
|
||||
|
|
|
@ -97,7 +97,8 @@ fn process_stem_phase(
|
|||
return Ok(());
|
||||
}
|
||||
|
||||
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
||||
// Get the aggregate tx representing the entire txpool.
|
||||
let txpool_tx = tx_pool.txpool.all_transactions_aggregate()?;
|
||||
|
||||
let stem_txs = tx_pool
|
||||
.stempool
|
||||
|
@ -110,7 +111,10 @@ fn process_stem_phase(
|
|||
debug!("dand_mon: Found {} txs for stemming.", stem_txs.len());
|
||||
|
||||
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||
agg_tx.validate(verifier_cache.clone())?;
|
||||
agg_tx.validate(
|
||||
transaction::Weighting::AsTransaction,
|
||||
verifier_cache.clone(),
|
||||
)?;
|
||||
|
||||
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
|
||||
if res.is_err() {
|
||||
|
@ -143,7 +147,8 @@ fn process_fluff_phase(
|
|||
return Ok(());
|
||||
}
|
||||
|
||||
let txpool_tx = tx_pool.txpool.aggregate_transaction()?;
|
||||
// Get the aggregate tx representing the entire txpool.
|
||||
let txpool_tx = tx_pool.txpool.all_transactions_aggregate()?;
|
||||
|
||||
let stem_txs = tx_pool
|
||||
.stempool
|
||||
|
@ -156,7 +161,10 @@ fn process_fluff_phase(
|
|||
debug!("dand_mon: Found {} txs for fluffing.", stem_txs.len());
|
||||
|
||||
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||
agg_tx.validate(verifier_cache.clone())?;
|
||||
agg_tx.validate(
|
||||
transaction::Weighting::AsTransaction,
|
||||
verifier_cache.clone(),
|
||||
)?;
|
||||
|
||||
let src = TxSource {
|
||||
debug_name: "fluff".to_string(),
|
||||
|
|
Loading…
Add table
Reference in a new issue