Consolidate and cleanup tx aggregation (#1332)

* Include commitments non-duplicate checks in aggregate
* Remove said check from the pool
* Block building now uses tx aggregation to reduce duplication
This commit is contained in:
Ignotus Peverell 2018-08-12 19:08:08 -07:00 committed by GitHub
parent 4be97abbbb
commit e9c987c075
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 149 additions and 191 deletions

View file

@ -523,7 +523,8 @@ impl Block {
let header = self.header.clone(); let header = self.header.clone();
let nonce = thread_rng().next_u64(); let nonce = thread_rng().next_u64();
let mut out_full = self.outputs let mut out_full = self
.outputs
.iter() .iter()
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT)) .filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
.cloned() .cloned()
@ -564,55 +565,19 @@ impl Block {
reward_kern: TxKernel, reward_kern: TxKernel,
difficulty: Difficulty, difficulty: Difficulty,
) -> Result<Block, Error> { ) -> Result<Block, Error> {
let mut kernels = vec![]; // A block is just a big transaction, aggregate as such. Note that
let mut inputs = vec![]; // aggregate also runs validation and duplicate commitment checks.
let mut outputs = vec![]; let agg_tx = transaction::aggregate(txs, Some((reward_out, reward_kern)))?;
// we will sum these together at the end // Now add the kernel offset of the previous block for a total
// to give us the overall offset for the block let total_kernel_offset =
let mut kernel_offsets = vec![]; committed::sum_kernel_offsets(vec![agg_tx.offset, prev.total_kernel_offset], vec![])?;
// iterate over the all the txs
// build the kernel for each
// and collect all the kernels, inputs and outputs
// to build the block (which we can sort of think of as one big tx?)
for tx in txs {
// validate each transaction and gather their kernels
// tx has an offset k2 where k = k1 + k2
// and the tx is signed using k1
// the kernel excess is k1G
// we will sum all the offsets later and store the total offset
// on the block_header
tx.validate()?;
// we will sum these later to give a single aggregate offset
kernel_offsets.push(tx.offset);
// add all tx inputs/outputs/kernels to the block
kernels.extend(tx.kernels.into_iter());
inputs.extend(tx.inputs.into_iter());
outputs.extend(tx.outputs.into_iter());
}
// include the reward kernel and output
kernels.push(reward_kern);
outputs.push(reward_out);
// now sort everything so the block is built deterministically
inputs.sort();
outputs.sort();
kernels.sort();
// now sum the kernel_offsets up to give us
// an aggregate offset for the entire block
kernel_offsets.push(prev.total_kernel_offset);
let total_kernel_offset = committed::sum_kernel_offsets(kernel_offsets, vec![])?;
let total_kernel_sum = { let total_kernel_sum = {
let zero_commit = secp_static::commit_to_zero_value(); let zero_commit = secp_static::commit_to_zero_value();
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock().unwrap();
let mut excesses = map_vec!(kernels, |x| x.excess()); let mut excesses = map_vec!(agg_tx.kernels, |x| x.excess());
excesses.push(prev.total_kernel_sum); excesses.push(prev.total_kernel_sum);
excesses.retain(|x| *x != zero_commit); excesses.retain(|x| *x != zero_commit);
secp.commit_sum(excesses, vec![])? secp.commit_sum(excesses, vec![])?
@ -628,9 +593,9 @@ impl Block {
total_kernel_sum, total_kernel_sum,
..Default::default() ..Default::default()
}, },
inputs, inputs: agg_tx.inputs,
outputs, outputs: agg_tx.outputs,
kernels, kernels: agg_tx.kernels,
}.cut_through()) }.cut_through())
} }
@ -655,12 +620,14 @@ impl Block {
/// we do not want to cut-through (all coinbase must be preserved) /// we do not want to cut-through (all coinbase must be preserved)
/// ///
pub fn cut_through(self) -> Block { pub fn cut_through(self) -> Block {
let in_set = self.inputs let in_set = self
.inputs
.iter() .iter()
.map(|inp| inp.commitment()) .map(|inp| inp.commitment())
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
let out_set = self.outputs let out_set = self
.outputs
.iter() .iter()
.filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT)) .filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT))
.map(|out| out.commitment()) .map(|out| out.commitment())
@ -668,12 +635,14 @@ impl Block {
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>(); let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
let new_inputs = self.inputs let new_inputs = self
.inputs
.into_iter() .into_iter()
.filter(|inp| !to_cut_through.contains(&inp.commitment())) .filter(|inp| !to_cut_through.contains(&inp.commitment()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let new_outputs = self.outputs let new_outputs = self
.outputs
.into_iter() .into_iter()
.filter(|out| !to_cut_through.contains(&out.commitment())) .filter(|out| !to_cut_through.contains(&out.commitment()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -757,7 +726,8 @@ impl Block {
// Verify that no input is spending an output from the same block. // Verify that no input is spending an output from the same block.
fn verify_cut_through(&self) -> Result<(), Error> { fn verify_cut_through(&self) -> Result<(), Error> {
for inp in &self.inputs { for inp in &self.inputs {
if self.outputs if self
.outputs
.iter() .iter()
.any(|out| out.commitment() == inp.commitment()) .any(|out| out.commitment() == inp.commitment())
{ {
@ -800,12 +770,14 @@ impl Block {
/// Check the sum of coinbase-marked outputs match /// Check the sum of coinbase-marked outputs match
/// the sum of coinbase-marked kernels accounting for fees. /// the sum of coinbase-marked kernels accounting for fees.
pub fn verify_coinbase(&self) -> Result<(), Error> { pub fn verify_coinbase(&self) -> Result<(), Error> {
let cb_outs = self.outputs let cb_outs = self
.outputs
.iter() .iter()
.filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT)) .filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT))
.collect::<Vec<&Output>>(); .collect::<Vec<&Output>>();
let cb_kerns = self.kernels let cb_kerns = self
.kernels
.iter() .iter()
.filter(|kernel| kernel.features.contains(KernelFeatures::COINBASE_KERNEL)) .filter(|kernel| kernel.features.contains(KernelFeatures::COINBASE_KERNEL))
.collect::<Vec<&TxKernel>>(); .collect::<Vec<&TxKernel>>();

View file

@ -314,7 +314,7 @@ impl Readable for Transaction {
// Treat any validation issues as data corruption. // Treat any validation issues as data corruption.
// An example of this would be reading a tx // An example of this would be reading a tx
// that exceeded the allowed number of inputs. // that exceeded the allowed number of inputs.
tx.validate().map_err(|_| ser::Error::CorruptedData)?; tx.validate(false).map_err(|_| ser::Error::CorruptedData)?;
Ok(tx) Ok(tx)
} }
@ -446,12 +446,14 @@ impl Transaction {
/// Validates all relevant parts of a fully built transaction. Checks the /// Validates all relevant parts of a fully built transaction. Checks the
/// excess value against the signature as well as range proofs for each /// excess value against the signature as well as range proofs for each
/// output. /// output.
pub fn validate(&self) -> Result<(), Error> { pub fn validate(&self, as_block: bool) -> Result<(), Error> {
self.verify_features()?; if !as_block {
self.verify_weight()?; self.verify_features()?;
self.verify_weight()?;
self.verify_kernel_sums(self.overage(), self.offset)?;
}
self.verify_sorted()?; self.verify_sorted()?;
self.verify_cut_through()?; self.verify_cut_through()?;
self.verify_kernel_sums(self.overage(), self.offset)?;
self.verify_rangeproofs()?; self.verify_rangeproofs()?;
self.verify_kernel_signatures()?; self.verify_kernel_signatures()?;
Ok(()) Ok(())
@ -482,7 +484,8 @@ impl Transaction {
// Verify that no input is spending an output from the same block. // Verify that no input is spending an output from the same block.
fn verify_cut_through(&self) -> Result<(), Error> { fn verify_cut_through(&self) -> Result<(), Error> {
for inp in &self.inputs { for inp in &self.inputs {
if self.outputs if self
.outputs
.iter() .iter()
.any(|out| out.commitment() == inp.commitment()) .any(|out| out.commitment() == inp.commitment())
{ {
@ -503,7 +506,8 @@ impl Transaction {
// Verify we have no outputs tagged as COINBASE_OUTPUT. // Verify we have no outputs tagged as COINBASE_OUTPUT.
fn verify_output_features(&self) -> Result<(), Error> { fn verify_output_features(&self) -> Result<(), Error> {
if self.outputs if self
.outputs
.iter() .iter()
.any(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT)) .any(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
{ {
@ -514,7 +518,8 @@ impl Transaction {
// Verify we have no kernels tagged as COINBASE_KERNEL. // Verify we have no kernels tagged as COINBASE_KERNEL.
fn verify_kernel_features(&self) -> Result<(), Error> { fn verify_kernel_features(&self) -> Result<(), Error> {
if self.kernels if self
.kernels
.iter() .iter()
.any(|x| x.features.contains(KernelFeatures::COINBASE_KERNEL)) .any(|x| x.features.contains(KernelFeatures::COINBASE_KERNEL))
{ {
@ -525,8 +530,12 @@ impl Transaction {
} }
/// Aggregate a vec of transactions into a multi-kernel transaction with /// Aggregate a vec of transactions into a multi-kernel transaction with
/// cut_through /// cut_through. Optionally allows passing a reward output and kernel for
pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> { /// block building.
pub fn aggregate(
transactions: Vec<Transaction>,
reward: Option<(Output, TxKernel)>,
) -> Result<Transaction, Error> {
let mut inputs: Vec<Input> = vec![]; let mut inputs: Vec<Input> = vec![];
let mut outputs: Vec<Output> = vec![]; let mut outputs: Vec<Output> = vec![];
let mut kernels: Vec<TxKernel> = vec![]; let mut kernels: Vec<TxKernel> = vec![];
@ -543,37 +552,24 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
outputs.append(&mut transaction.outputs); outputs.append(&mut transaction.outputs);
kernels.append(&mut transaction.kernels); kernels.append(&mut transaction.kernels);
} }
let as_block = reward.is_some();
if let Some((out, kernel)) = reward {
outputs.push(out);
kernels.push(kernel);
}
// now sum the kernel_offsets up to give us an aggregate offset for the // assemble output commitments set, checking they're all unique
// transaction let mut out_set = HashSet::new();
let total_kernel_offset = { let all_uniq = { outputs.iter().all(|o| out_set.insert(o.commitment())) };
let secp = static_secp_instance(); if !all_uniq {
let secp = secp.lock().unwrap(); return Err(Error::AggregationError);
let mut keys = kernel_offsets }
.into_iter()
.filter(|x| *x != BlindingFactor::zero())
.filter_map(|x| x.secret_key(&secp).ok())
.collect::<Vec<_>>();
if keys.is_empty() {
BlindingFactor::zero()
} else {
let sum = secp.blind_sum(keys, vec![])?;
BlindingFactor::from_secret_key(sum)
}
};
let in_set = inputs let in_set = inputs
.iter() .iter()
.map(|inp| inp.commitment()) .map(|inp| inp.commitment())
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
let out_set = outputs
.iter()
.filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT))
.map(|out| out.commitment())
.collect::<HashSet<_>>();
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>(); let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
let mut new_inputs = inputs let mut new_inputs = inputs
@ -591,6 +587,10 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
new_outputs.sort(); new_outputs.sort();
kernels.sort(); kernels.sort();
// now sum the kernel_offsets up to give us an aggregate offset for the
// transaction
let total_kernel_offset = committed::sum_kernel_offsets(kernel_offsets, vec![])?;
// build a new aggregate tx from the following - // build a new aggregate tx from the following -
// * cut-through inputs // * cut-through inputs
// * cut-through outputs // * cut-through outputs
@ -602,7 +602,7 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
// The resulting tx could be invalid for a variety of reasons - // The resulting tx could be invalid for a variety of reasons -
// * tx too large (too many inputs|outputs|kernels) // * tx too large (too many inputs|outputs|kernels)
// * cut-through may have invalidated the sums // * cut-through may have invalidated the sums
tx.validate()?; tx.validate(as_block)?;
Ok(tx) Ok(tx)
} }
@ -618,7 +618,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
// transaction // transaction
let mut kernel_offsets = vec![]; let mut kernel_offsets = vec![];
let tx = aggregate(txs)?; let tx = aggregate(txs, None)?;
for mk_input in mk_tx.inputs { for mk_input in mk_tx.inputs {
if !tx.inputs.contains(&mk_input) && !inputs.contains(&mk_input) { if !tx.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
@ -670,7 +670,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset); let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
// Now validate the resulting tx to ensure we have not built something invalid. // Now validate the resulting tx to ensure we have not built something invalid.
tx.validate()?; tx.validate(false)?;
Ok(tx) Ok(tx)
} }

View file

@ -105,7 +105,7 @@ fn build_tx_kernel() {
).unwrap(); ).unwrap();
// check the tx is valid // check the tx is valid
tx.validate().unwrap(); tx.validate(false).unwrap();
// check the kernel is also itself valid // check the kernel is also itself valid
assert_eq!(tx.kernels.len(), 1); assert_eq!(tx.kernels.len(), 1);
@ -123,13 +123,13 @@ fn transaction_cut_through() {
let tx1 = tx1i2o(); let tx1 = tx1i2o();
let tx2 = tx2i1o(); let tx2 = tx2i1o();
assert!(tx1.validate().is_ok()); assert!(tx1.validate(false).is_ok());
assert!(tx2.validate().is_ok()); assert!(tx2.validate(false).is_ok());
// now build a "cut_through" tx from tx1 and tx2 // now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(vec![tx1, tx2]).unwrap(); let tx3 = aggregate(vec![tx1, tx2], None).unwrap();
assert!(tx3.validate().is_ok()); assert!(tx3.validate(false).is_ok());
} }
// Attempt to deaggregate a multi-kernel transaction in a different way // Attempt to deaggregate a multi-kernel transaction in a different way
@ -140,26 +140,26 @@ fn multi_kernel_transaction_deaggregation() {
let tx3 = tx1i1o(); let tx3 = tx1i1o();
let tx4 = tx1i1o(); let tx4 = tx1i1o();
assert!(tx1.validate().is_ok()); assert!(tx1.validate(false).is_ok());
assert!(tx2.validate().is_ok()); assert!(tx2.validate(false).is_ok());
assert!(tx3.validate().is_ok()); assert!(tx3.validate(false).is_ok());
assert!(tx4.validate().is_ok()); assert!(tx4.validate(false).is_ok());
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap(); let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()], None).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap(); let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], None).unwrap();
assert!(tx1234.validate().is_ok()); assert!(tx1234.validate(false).is_ok());
assert!(tx12.validate().is_ok()); assert!(tx12.validate(false).is_ok());
assert!(tx34.validate().is_ok()); assert!(tx34.validate(false).is_ok());
let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap(); let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap();
assert!(deaggregated_tx34.validate().is_ok()); assert!(deaggregated_tx34.validate(false).is_ok());
assert_eq!(tx34, deaggregated_tx34); assert_eq!(tx34, deaggregated_tx34);
let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap(); let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap();
assert!(deaggregated_tx12.validate().is_ok()); assert!(deaggregated_tx12.validate(false).is_ok());
assert_eq!(tx12, deaggregated_tx12); assert_eq!(tx12, deaggregated_tx12);
} }
@ -169,18 +169,18 @@ fn multi_kernel_transaction_deaggregation_2() {
let tx2 = tx1i1o(); let tx2 = tx1i1o();
let tx3 = tx1i1o(); let tx3 = tx1i1o();
assert!(tx1.validate().is_ok()); assert!(tx1.validate(false).is_ok());
assert!(tx2.validate().is_ok()); assert!(tx2.validate(false).is_ok());
assert!(tx3.validate().is_ok()); assert!(tx3.validate(false).is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap(); let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()], None).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
assert!(tx123.validate().is_ok()); assert!(tx123.validate(false).is_ok());
assert!(tx12.validate().is_ok()); assert!(tx12.validate(false).is_ok());
let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap(); let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap();
assert!(deaggregated_tx3.validate().is_ok()); assert!(deaggregated_tx3.validate(false).is_ok());
assert_eq!(tx3, deaggregated_tx3); assert_eq!(tx3, deaggregated_tx3);
} }
@ -190,19 +190,19 @@ fn multi_kernel_transaction_deaggregation_3() {
let tx2 = tx1i1o(); let tx2 = tx1i1o();
let tx3 = tx1i1o(); let tx3 = tx1i1o();
assert!(tx1.validate().is_ok()); assert!(tx1.validate(false).is_ok());
assert!(tx2.validate().is_ok()); assert!(tx2.validate(false).is_ok());
assert!(tx3.validate().is_ok()); assert!(tx3.validate(false).is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap(); let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()], None).unwrap();
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()]).unwrap(); let tx13 = aggregate(vec![tx1.clone(), tx3.clone()], None).unwrap();
let tx2 = aggregate(vec![tx2.clone()]).unwrap(); let tx2 = aggregate(vec![tx2.clone()], None).unwrap();
assert!(tx123.validate().is_ok()); assert!(tx123.validate(false).is_ok());
assert!(tx2.validate().is_ok()); assert!(tx2.validate(false).is_ok());
let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap(); let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap();
assert!(deaggregated_tx13.validate().is_ok()); assert!(deaggregated_tx13.validate(false).is_ok());
assert_eq!(tx13, deaggregated_tx13); assert_eq!(tx13, deaggregated_tx13);
} }
@ -214,11 +214,11 @@ fn multi_kernel_transaction_deaggregation_4() {
let tx4 = tx1i1o(); let tx4 = tx1i1o();
let tx5 = tx1i1o(); let tx5 = tx1i1o();
assert!(tx1.validate().is_ok()); assert!(tx1.validate(false).is_ok());
assert!(tx2.validate().is_ok()); assert!(tx2.validate(false).is_ok());
assert!(tx3.validate().is_ok()); assert!(tx3.validate(false).is_ok());
assert!(tx4.validate().is_ok()); assert!(tx4.validate(false).is_ok());
assert!(tx5.validate().is_ok()); assert!(tx5.validate(false).is_ok());
let tx12345 = aggregate(vec![ let tx12345 = aggregate(vec![
tx1.clone(), tx1.clone(),
@ -226,14 +226,14 @@ fn multi_kernel_transaction_deaggregation_4() {
tx3.clone(), tx3.clone(),
tx4.clone(), tx4.clone(),
tx5.clone(), tx5.clone(),
]).unwrap(); ], None).unwrap();
assert!(tx12345.validate().is_ok()); assert!(tx12345.validate(false).is_ok());
let deaggregated_tx5 = deaggregate( let deaggregated_tx5 = deaggregate(
tx12345.clone(), tx12345.clone(),
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()], vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
).unwrap(); ).unwrap();
assert!(deaggregated_tx5.validate().is_ok()); assert!(deaggregated_tx5.validate(false).is_ok());
assert_eq!(tx5, deaggregated_tx5); assert_eq!(tx5, deaggregated_tx5);
} }
@ -245,11 +245,11 @@ fn multi_kernel_transaction_deaggregation_5() {
let tx4 = tx1i1o(); let tx4 = tx1i1o();
let tx5 = tx1i1o(); let tx5 = tx1i1o();
assert!(tx1.validate().is_ok()); assert!(tx1.validate(false).is_ok());
assert!(tx2.validate().is_ok()); assert!(tx2.validate(false).is_ok());
assert!(tx3.validate().is_ok()); assert!(tx3.validate(false).is_ok());
assert!(tx4.validate().is_ok()); assert!(tx4.validate(false).is_ok());
assert!(tx5.validate().is_ok()); assert!(tx5.validate(false).is_ok());
let tx12345 = aggregate(vec![ let tx12345 = aggregate(vec![
tx1.clone(), tx1.clone(),
@ -257,14 +257,14 @@ fn multi_kernel_transaction_deaggregation_5() {
tx3.clone(), tx3.clone(),
tx4.clone(), tx4.clone(),
tx5.clone(), tx5.clone(),
]).unwrap(); ], None).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap(); let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], None).unwrap();
assert!(tx12345.validate().is_ok()); assert!(tx12345.validate(false).is_ok());
let deaggregated_tx5 = deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap(); let deaggregated_tx5 = deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap();
assert!(deaggregated_tx5.validate().is_ok()); assert!(deaggregated_tx5.validate(false).is_ok());
assert_eq!(tx5, deaggregated_tx5); assert_eq!(tx5, deaggregated_tx5);
} }
@ -274,22 +274,22 @@ fn basic_transaction_deaggregation() {
let tx1 = tx1i2o(); let tx1 = tx1i2o();
let tx2 = tx2i1o(); let tx2 = tx2i1o();
assert!(tx1.validate().is_ok()); assert!(tx1.validate(false).is_ok());
assert!(tx2.validate().is_ok()); assert!(tx2.validate(false).is_ok());
// now build a "cut_through" tx from tx1 and tx2 // now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap(); let tx3 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
assert!(tx3.validate().is_ok()); assert!(tx3.validate(false).is_ok());
let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap(); let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap();
assert!(deaggregated_tx1.validate().is_ok()); assert!(deaggregated_tx1.validate(false).is_ok());
assert_eq!(tx1, deaggregated_tx1); assert_eq!(tx1, deaggregated_tx1);
let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap(); let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap();
assert!(deaggregated_tx2.validate().is_ok()); assert!(deaggregated_tx2.validate(false).is_ok());
assert_eq!(tx2, deaggregated_tx2); assert_eq!(tx2, deaggregated_tx2);
} }
@ -319,7 +319,7 @@ fn hash_output() {
#[test] #[test]
fn blind_tx() { fn blind_tx() {
let btx = tx2i1o(); let btx = tx2i1o();
assert!(btx.validate().is_ok()); assert!(btx.validate(false).is_ok());
// Ignored for bullet proofs, because calling range_proof_info // Ignored for bullet proofs, because calling range_proof_info
// with a bullet proof causes painful errors // with a bullet proof causes painful errors
@ -381,7 +381,7 @@ fn tx_build_exchange() {
&keychain, &keychain,
).unwrap(); ).unwrap();
tx_final.validate().unwrap(); tx_final.validate(false).unwrap();
} }
#[test] #[test]
@ -408,7 +408,7 @@ fn reward_with_tx_block() {
let zero_commit = secp_static::commit_to_zero_value(); let zero_commit = secp_static::commit_to_zero_value();
let mut tx1 = tx2i1o(); let mut tx1 = tx2i1o();
tx1.validate().unwrap(); tx1.validate(false).unwrap();
let previous_header = BlockHeader::default(); let previous_header = BlockHeader::default();
@ -493,11 +493,11 @@ fn test_block_with_timelocked_tx() {
#[test] #[test]
pub fn test_verify_1i1o_sig() { pub fn test_verify_1i1o_sig() {
let tx = tx1i1o(); let tx = tx1i1o();
tx.validate().unwrap(); tx.validate(false).unwrap();
} }
#[test] #[test]
pub fn test_verify_2i1o_sig() { pub fn test_verify_2i1o_sig() {
let tx = tx2i1o(); let tx = tx2i1o();
tx.validate().unwrap(); tx.validate(false).unwrap();
} }

View file

@ -26,13 +26,13 @@ use vec_backend::{TestElem, VecBackend};
#[test] #[test]
fn some_peak_map() { fn some_peak_map() {
assert_eq!(pmmr::peak_map_height(0), ( 0b0, 0)); assert_eq!(pmmr::peak_map_height(0), (0b0, 0));
assert_eq!(pmmr::peak_map_height(1), ( 0b1, 0)); assert_eq!(pmmr::peak_map_height(1), (0b1, 0));
assert_eq!(pmmr::peak_map_height(2), ( 0b1, 1)); assert_eq!(pmmr::peak_map_height(2), (0b1, 1));
assert_eq!(pmmr::peak_map_height(3), ( 0b10, 0)); assert_eq!(pmmr::peak_map_height(3), (0b10, 0));
assert_eq!(pmmr::peak_map_height(4), ( 0b11, 0)); assert_eq!(pmmr::peak_map_height(4), (0b11, 0));
assert_eq!(pmmr::peak_map_height(5), ( 0b11, 1)); assert_eq!(pmmr::peak_map_height(5), (0b11, 1));
assert_eq!(pmmr::peak_map_height(6), ( 0b11, 2)); assert_eq!(pmmr::peak_map_height(6), (0b11, 2));
assert_eq!(pmmr::peak_map_height(7), (0b100, 0)); assert_eq!(pmmr::peak_map_height(7), (0b100, 0));
} }

View file

@ -16,9 +16,9 @@ extern crate croaring;
use croaring::Bitmap; use croaring::Bitmap;
use core::core::BlockHeader;
use core::core::hash::Hash; use core::core::hash::Hash;
use core::core::pmmr::Backend; use core::core::pmmr::Backend;
use core::core::BlockHeader;
use core::ser; use core::ser;
use core::ser::{PMMRable, Readable, Reader, Writeable, Writer}; use core::ser::{PMMRable, Readable, Reader, Writeable, Writer};
@ -118,11 +118,7 @@ where
Ok(()) Ok(())
} }
fn rewind( fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> {
&mut self,
position: u64,
_rewind_rm_pos: &Bitmap,
) -> Result<(), String> {
self.elems = self.elems[0..(position as usize) + 1].to_vec(); self.elems = self.elems[0..(position as usize) + 1].to_vec();
Ok(()) Ok(())
} }

View file

@ -87,7 +87,7 @@ where
return Ok(None); return Ok(None);
} }
let tx = transaction::aggregate(txs)?; let tx = transaction::aggregate(txs, None)?;
Ok(Some(tx)) Ok(Some(tx))
} }
@ -142,20 +142,10 @@ where
// If we have nothing to aggregate then simply return the tx itself. // If we have nothing to aggregate then simply return the tx itself.
entry.tx.clone() entry.tx.clone()
} else { } else {
// Create a single aggregated tx from the existing pool txs (to check pool is // Create a single aggregated tx from the existing pool txs and the
// valid). // new entry
let agg_tx = transaction::aggregate(txs)?; txs.push(entry.tx.clone());
transaction::aggregate(txs, None)?
// Then check new tx would not introduce a duplicate output in the pool.
for x in &entry.tx.outputs {
if agg_tx.outputs.contains(&x) {
return Err(PoolError::DuplicateCommitment);
}
}
// Finally aggregate the new tx with everything in the pool (with any extra
// txs).
transaction::aggregate(vec![agg_tx, entry.tx.clone()])?
}; };
// Validate aggregated tx against the current chain state (via txhashset // Validate aggregated tx against the current chain state (via txhashset

View file

@ -108,7 +108,7 @@ where
self.is_acceptable(&tx)?; self.is_acceptable(&tx)?;
// Make sure the transaction is valid before anything else. // Make sure the transaction is valid before anything else.
tx.validate().map_err(|e| PoolError::InvalidTx(e))?; tx.validate(false).map_err(|e| PoolError::InvalidTx(e))?;
// Check the tx lock_time is valid based on current chain state. // Check the tx lock_time is valid based on current chain state.
self.blockchain.verify_tx_lock_height(&tx)?; self.blockchain.verify_tx_lock_height(&tx)?;

View file

@ -188,7 +188,7 @@ fn test_the_transaction_pool() {
let tx4 = test_transaction(&keychain, vec![800], vec![799]); let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form) // tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about // tx4 is the "new" part of this aggregated tx that we care about
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap(); let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4], None).unwrap();
write_pool write_pool
.add_to_pool(test_source(), agg_tx, false) .add_to_pool(test_source(), agg_tx, false)
.unwrap(); .unwrap();

View file

@ -104,7 +104,7 @@ where
stem_txs.len() stem_txs.len()
); );
let agg_tx = transaction::aggregate(stem_txs)?; let agg_tx = transaction::aggregate(stem_txs, None)?;
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx); let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
if res.is_err() { if res.is_err() {
@ -144,7 +144,7 @@ where
stem_txs.len() stem_txs.len()
); );
let agg_tx = transaction::aggregate(stem_txs)?; let agg_tx = transaction::aggregate(stem_txs, None)?;
let src = TxSource { let src = TxSource {
debug_name: "fluff".to_string(), debug_name: "fluff".to_string(),

View file

@ -293,7 +293,7 @@ mod test {
&keychain, &keychain,
).unwrap(); ).unwrap();
tx.validate().unwrap(); tx.validate(false).unwrap();
} }
#[test] #[test]
@ -313,7 +313,7 @@ mod test {
&keychain, &keychain,
).unwrap(); ).unwrap();
tx.validate().unwrap(); tx.validate(false).unwrap();
} }
#[test] #[test]
@ -327,6 +327,6 @@ mod test {
&keychain, &keychain,
).unwrap(); ).unwrap();
tx.validate().unwrap(); tx.validate(false).unwrap();
} }
} }

View file

@ -388,7 +388,7 @@ impl Slate {
final_tx.kernels[0].verify()?; final_tx.kernels[0].verify()?;
// confirm the overall transaction is valid (including the updated kernel) // confirm the overall transaction is valid (including the updated kernel)
let _ = final_tx.validate()?; let _ = final_tx.validate(false)?;
self.tx = final_tx; self.tx = final_tx;
Ok(()) Ok(())

View file

@ -198,7 +198,7 @@ where
// finalize the burn transaction and send // finalize the burn transaction and send
let tx_burn = build::transaction(parts, &keychain)?; let tx_burn = build::transaction(parts, &keychain)?;
tx_burn.validate()?; tx_burn.validate(false)?;
Ok(tx_burn) Ok(tx_burn)
} }