Simplify tx.validate() and transaction::aggregate() (#1436)

* simplify tx validation and aggregation
we *only* need to account for reward when building a block from txs

* rustfmt

* cleanup and tests passing

* rustfmt

* better comments in with_reward()

* fix wallet tests
This commit is contained in:
Antioch Peverell 2018-08-28 20:00:25 +01:00 committed by GitHub
parent 3dacc6a397
commit c334c557aa
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 120 additions and 135 deletions

View file

@ -431,9 +431,14 @@ impl Block {
reward_kern: TxKernel,
difficulty: Difficulty,
) -> Result<Block, Error> {
// A block is just a big transaction, aggregate as such. Note that
// aggregate also runs validation and duplicate commitment checks.
let agg_tx = transaction::aggregate(txs, Some((reward_out, reward_kern)))?;
// A block is just a big transaction, aggregate as such.
// Note that aggregation also runs transaction validation
// and duplicate commitment checks.
let mut agg_tx = transaction::aggregate(txs)?;
// Now add the reward output and reward kernel to the aggregate tx.
// At this point the tx is technically invalid,
// but the tx body is valid if we account for the reward (i.e. as a block).
agg_tx = agg_tx.with_output(reward_out).with_kernel(reward_kern);
// Now add the kernel offset of the previous block for a total
let total_kernel_offset =
@ -452,6 +457,9 @@ impl Block {
let now = Utc::now().timestamp();
let timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(now, 0), Utc);
// Now build the block with all the above information.
// Note: We have not validated the block here.
// Caller must validate the block as necessary.
Block {
header: BlockHeader {
height: prev.height + 1,

View file

@ -613,8 +613,7 @@ impl Readable for Transaction {
// Treat any validation issues as data corruption.
// An example of this would be reading a tx
// that exceeded the allowed number of inputs.
tx.validate_read(false)
.map_err(|_| ser::Error::CorruptedData)?;
tx.validate_read().map_err(|_| ser::Error::CorruptedData)?;
Ok(tx)
}
@ -749,23 +748,19 @@ impl Transaction {
/// * rangeproof verification (on the body)
/// * kernel signature verification (on the body)
/// * kernel sum verification
pub fn validate_read(&self, with_reward: bool) -> Result<(), Error> {
self.body.validate_read(with_reward)?;
if !with_reward {
self.body.verify_features()?;
}
pub fn validate_read(&self) -> Result<(), Error> {
self.body.validate_read(false)?;
self.body.verify_features()?;
Ok(())
}
/// Validates all relevant parts of a fully built transaction. Checks the
/// excess value against the signature as well as range proofs for each
/// output.
pub fn validate(&self, with_reward: bool) -> Result<(), Error> {
self.body.validate(with_reward)?;
if !with_reward {
self.body.verify_features()?;
self.verify_kernel_sums(self.overage(), self.offset)?;
}
pub fn validate(&self) -> Result<(), Error> {
self.body.validate(false)?;
self.body.verify_features()?;
self.verify_kernel_sums(self.overage(), self.offset)?;
Ok(())
}
@ -812,16 +807,13 @@ pub fn cut_through(inputs: &mut Vec<Input>, outputs: &mut Vec<Output>) -> Result
Ok(())
}
/// Aggregate a vec of transactions into a multi-kernel transaction with
/// cut_through. Optionally allows passing a reward output and kernel for
/// block building.
pub fn aggregate(
mut transactions: Vec<Transaction>,
reward: Option<(Output, TxKernel)>,
) -> Result<Transaction, Error> {
/// Aggregate a vec of txs into a multi-kernel tx with cut_through.
pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
// convenience short-circuiting
if reward.is_none() && transactions.len() == 1 {
return Ok(transactions.pop().unwrap());
if txs.is_empty() {
return Ok(Transaction::empty());
} else if txs.len() == 1 {
return Ok(txs.pop().unwrap());
}
let mut inputs: Vec<Input> = vec![];
@ -832,18 +824,13 @@ pub fn aggregate(
// transaction
let mut kernel_offsets: Vec<BlindingFactor> = vec![];
for mut transaction in transactions {
for mut tx in txs {
// we will sum these later to give a single aggregate offset
kernel_offsets.push(transaction.offset);
kernel_offsets.push(tx.offset);
inputs.append(&mut transaction.body.inputs);
outputs.append(&mut transaction.body.outputs);
kernels.append(&mut transaction.body.kernels);
}
let with_reward = reward.is_some();
if let Some((out, kernel)) = reward {
outputs.push(out);
kernels.push(kernel);
inputs.append(&mut tx.body.inputs);
outputs.append(&mut tx.body.outputs);
kernels.append(&mut tx.body.kernels);
}
// Sort inputs and outputs during cut_through.
@ -867,7 +854,7 @@ pub fn aggregate(
// The resulting tx could be invalid for a variety of reasons -
// * tx too large (too many inputs|outputs|kernels)
// * cut-through may have invalidated the sums
tx.validate(with_reward)?;
tx.validate()?;
Ok(tx)
}
@ -883,7 +870,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
// transaction
let mut kernel_offsets = vec![];
let tx = aggregate(txs, None)?;
let tx = aggregate(txs)?;
for mk_input in mk_tx.body.inputs {
if !tx.body.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
@ -935,8 +922,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
// Now validate the resulting tx to ensure we have not built something invalid.
tx.validate(false)?;
tx.validate()?;
Ok(tx)
}

View file

@ -106,7 +106,7 @@ fn build_tx_kernel() {
).unwrap();
// check the tx is valid
tx.validate(false).unwrap();
tx.validate().unwrap();
// check the kernel is also itself valid
assert_eq!(tx.kernels().len(), 1);
@ -124,13 +124,13 @@ fn transaction_cut_through() {
let tx1 = tx1i2o();
let tx2 = tx2i1o();
assert!(tx1.validate(false).is_ok());
assert!(tx2.validate(false).is_ok());
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
// now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(vec![tx1, tx2], None).unwrap();
let tx3 = aggregate(vec![tx1, tx2]).unwrap();
assert!(tx3.validate(false).is_ok());
assert!(tx3.validate().is_ok());
}
// Attempt to deaggregate a multi-kernel transaction in a different way
@ -141,29 +141,26 @@ fn multi_kernel_transaction_deaggregation() {
let tx3 = tx1i1o();
let tx4 = tx1i1o();
assert!(tx1.validate(false).is_ok());
assert!(tx2.validate(false).is_ok());
assert!(tx3.validate(false).is_ok());
assert!(tx4.validate(false).is_ok());
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
assert!(tx4.validate().is_ok());
let tx1234 = aggregate(
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
None,
).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], None).unwrap();
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
assert!(tx1234.validate(false).is_ok());
assert!(tx12.validate(false).is_ok());
assert!(tx34.validate(false).is_ok());
assert!(tx1234.validate().is_ok());
assert!(tx12.validate().is_ok());
assert!(tx34.validate().is_ok());
let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap();
assert!(deaggregated_tx34.validate(false).is_ok());
assert!(deaggregated_tx34.validate().is_ok());
assert_eq!(tx34, deaggregated_tx34);
let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap();
assert!(deaggregated_tx12.validate(false).is_ok());
assert!(deaggregated_tx12.validate().is_ok());
assert_eq!(tx12, deaggregated_tx12);
}
@ -173,18 +170,18 @@ fn multi_kernel_transaction_deaggregation_2() {
let tx2 = tx1i1o();
let tx3 = tx1i1o();
assert!(tx1.validate(false).is_ok());
assert!(tx2.validate(false).is_ok());
assert!(tx3.validate(false).is_ok());
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()], None).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
assert!(tx123.validate(false).is_ok());
assert!(tx12.validate(false).is_ok());
assert!(tx123.validate().is_ok());
assert!(tx12.validate().is_ok());
let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap();
assert!(deaggregated_tx3.validate(false).is_ok());
assert!(deaggregated_tx3.validate().is_ok());
assert_eq!(tx3, deaggregated_tx3);
}
@ -194,19 +191,19 @@ fn multi_kernel_transaction_deaggregation_3() {
let tx2 = tx1i1o();
let tx3 = tx1i1o();
assert!(tx1.validate(false).is_ok());
assert!(tx2.validate(false).is_ok());
assert!(tx3.validate(false).is_ok());
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()], None).unwrap();
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()], None).unwrap();
let tx2 = aggregate(vec![tx2.clone()], None).unwrap();
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()]).unwrap();
let tx2 = aggregate(vec![tx2.clone()]).unwrap();
assert!(tx123.validate(false).is_ok());
assert!(tx2.validate(false).is_ok());
assert!(tx123.validate().is_ok());
assert!(tx2.validate().is_ok());
let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap();
assert!(deaggregated_tx13.validate(false).is_ok());
assert!(deaggregated_tx13.validate().is_ok());
assert_eq!(tx13, deaggregated_tx13);
}
@ -218,29 +215,26 @@ fn multi_kernel_transaction_deaggregation_4() {
let tx4 = tx1i1o();
let tx5 = tx1i1o();
assert!(tx1.validate(false).is_ok());
assert!(tx2.validate(false).is_ok());
assert!(tx3.validate(false).is_ok());
assert!(tx4.validate(false).is_ok());
assert!(tx5.validate(false).is_ok());
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
assert!(tx4.validate().is_ok());
assert!(tx5.validate().is_ok());
let tx12345 = aggregate(
vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
],
None,
).unwrap();
assert!(tx12345.validate(false).is_ok());
let tx12345 = aggregate(vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
]).unwrap();
assert!(tx12345.validate().is_ok());
let deaggregated_tx5 = deaggregate(
tx12345.clone(),
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
).unwrap();
assert!(deaggregated_tx5.validate(false).is_ok());
assert!(deaggregated_tx5.validate().is_ok());
assert_eq!(tx5, deaggregated_tx5);
}
@ -252,29 +246,26 @@ fn multi_kernel_transaction_deaggregation_5() {
let tx4 = tx1i1o();
let tx5 = tx1i1o();
assert!(tx1.validate(false).is_ok());
assert!(tx2.validate(false).is_ok());
assert!(tx3.validate(false).is_ok());
assert!(tx4.validate(false).is_ok());
assert!(tx5.validate(false).is_ok());
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
assert!(tx4.validate().is_ok());
assert!(tx5.validate().is_ok());
let tx12345 = aggregate(
vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
],
None,
).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], None).unwrap();
let tx12345 = aggregate(vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
]).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
assert!(tx12345.validate(false).is_ok());
assert!(tx12345.validate().is_ok());
let deaggregated_tx5 = deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap();
assert!(deaggregated_tx5.validate(false).is_ok());
assert!(deaggregated_tx5.validate().is_ok());
assert_eq!(tx5, deaggregated_tx5);
}
@ -284,22 +275,22 @@ fn basic_transaction_deaggregation() {
let tx1 = tx1i2o();
let tx2 = tx2i1o();
assert!(tx1.validate(false).is_ok());
assert!(tx2.validate(false).is_ok());
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
// now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
assert!(tx3.validate(false).is_ok());
assert!(tx3.validate().is_ok());
let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap();
assert!(deaggregated_tx1.validate(false).is_ok());
assert!(deaggregated_tx1.validate().is_ok());
assert_eq!(tx1, deaggregated_tx1);
let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap();
assert!(deaggregated_tx2.validate(false).is_ok());
assert!(deaggregated_tx2.validate().is_ok());
assert_eq!(tx2, deaggregated_tx2);
}
@ -329,7 +320,7 @@ fn hash_output() {
#[test]
fn blind_tx() {
let btx = tx2i1o();
assert!(btx.validate(false).is_ok());
assert!(btx.validate().is_ok());
// Ignored for bullet proofs, because calling range_proof_info
// with a bullet proof causes painful errors
@ -391,7 +382,7 @@ fn tx_build_exchange() {
&keychain,
).unwrap();
tx_final.validate(false).unwrap();
tx_final.validate().unwrap();
}
#[test]
@ -419,7 +410,7 @@ fn reward_with_tx_block() {
let zero_commit = secp_static::commit_to_zero_value();
let mut tx1 = tx2i1o();
tx1.validate(false).unwrap();
tx1.validate().unwrap();
let previous_header = BlockHeader::default();
@ -505,11 +496,11 @@ fn test_block_with_timelocked_tx() {
#[test]
pub fn test_verify_1i1o_sig() {
let tx = tx1i1o();
tx.validate(false).unwrap();
tx.validate().unwrap();
}
#[test]
pub fn test_verify_2i1o_sig() {
let tx = tx2i1o();
tx.validate(false).unwrap();
tx.validate().unwrap();
}

View file

@ -86,7 +86,7 @@ impl Pool {
.into_iter()
.filter_map(|mut bucket| {
bucket.truncate(MAX_TX_CHAIN);
transaction::aggregate(bucket, None).ok()
transaction::aggregate(bucket).ok()
})
.collect();
@ -118,7 +118,7 @@ impl Pool {
return Ok(None);
}
let tx = transaction::aggregate(txs, None)?;
let tx = transaction::aggregate(txs)?;
Ok(Some(tx))
}
@ -191,7 +191,7 @@ impl Pool {
// Create a single aggregated tx from the existing pool txs and the
// new entry
txs.push(entry.tx.clone());
transaction::aggregate(txs, None)?
transaction::aggregate(txs)?
};
// Validate aggregated tx against a known chain state (via txhashset

View file

@ -100,7 +100,7 @@ impl TransactionPool {
self.is_acceptable(&tx)?;
// Make sure the transaction is valid before anything else.
tx.validate(false).map_err(|e| PoolError::InvalidTx(e))?;
tx.validate().map_err(|e| PoolError::InvalidTx(e))?;
// Check the tx lock_time is valid based on current chain state.
self.blockchain.verify_tx_lock_height(&tx)?;

View file

@ -219,7 +219,7 @@ fn test_the_transaction_pool() {
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4], None).unwrap();
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
write_pool
.add_to_pool(test_source(), agg_tx, false, &header.hash())
.unwrap();

View file

@ -102,7 +102,7 @@ fn process_stem_phase(tx_pool: Arc<RwLock<TransactionPool>>) -> Result<(), PoolE
stem_txs.len()
);
let agg_tx = transaction::aggregate(stem_txs, None)?;
let agg_tx = transaction::aggregate(stem_txs)?;
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
if res.is_err() {
@ -142,7 +142,7 @@ fn process_fluff_phase(tx_pool: Arc<RwLock<TransactionPool>>) -> Result<(), Pool
stem_txs.len()
);
let agg_tx = transaction::aggregate(stem_txs, None)?;
let agg_tx = transaction::aggregate(stem_txs)?;
let src = TxSource {
debug_name: "fluff".to_string(),

View file

@ -299,7 +299,7 @@ mod test {
&keychain,
).unwrap();
tx.validate(false).unwrap();
tx.validate().unwrap();
}
#[test]
@ -319,7 +319,7 @@ mod test {
&keychain,
).unwrap();
tx.validate(false).unwrap();
tx.validate().unwrap();
}
#[test]
@ -333,6 +333,6 @@ mod test {
&keychain,
).unwrap();
tx.validate(false).unwrap();
tx.validate().unwrap();
}
}

View file

@ -398,7 +398,7 @@ impl Slate {
final_tx.kernels()[0].verify()?;
// confirm the overall transaction is valid (including the updated kernel)
let _ = final_tx.validate(false)?;
let _ = final_tx.validate()?;
self.tx = final_tx;
Ok(())

View file

@ -196,7 +196,7 @@ where
// finalize the burn transaction and send
let tx_burn = build::transaction(parts, &keychain)?;
tx_burn.validate(false)?;
tx_burn.validate()?;
Ok(tx_burn)
}