mirror of
https://github.com/mimblewimble/grin.git
synced 2025-02-01 08:51:08 +03:00
Consolidate and cleanup tx aggregation (#1332)
* Include commitments non-duplicate checks in aggregate * Remove said check from the pool * Block building now uses tx aggregation to reduce duplication
This commit is contained in:
parent
4be97abbbb
commit
e9c987c075
12 changed files with 149 additions and 191 deletions
|
@ -523,7 +523,8 @@ impl Block {
|
|||
let header = self.header.clone();
|
||||
let nonce = thread_rng().next_u64();
|
||||
|
||||
let mut out_full = self.outputs
|
||||
let mut out_full = self
|
||||
.outputs
|
||||
.iter()
|
||||
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
.cloned()
|
||||
|
@ -564,55 +565,19 @@ impl Block {
|
|||
reward_kern: TxKernel,
|
||||
difficulty: Difficulty,
|
||||
) -> Result<Block, Error> {
|
||||
let mut kernels = vec![];
|
||||
let mut inputs = vec![];
|
||||
let mut outputs = vec![];
|
||||
// A block is just a big transaction, aggregate as such. Note that
|
||||
// aggregate also runs validation and duplicate commitment checks.
|
||||
let agg_tx = transaction::aggregate(txs, Some((reward_out, reward_kern)))?;
|
||||
|
||||
// we will sum these together at the end
|
||||
// to give us the overall offset for the block
|
||||
let mut kernel_offsets = vec![];
|
||||
|
||||
// iterate over the all the txs
|
||||
// build the kernel for each
|
||||
// and collect all the kernels, inputs and outputs
|
||||
// to build the block (which we can sort of think of as one big tx?)
|
||||
for tx in txs {
|
||||
// validate each transaction and gather their kernels
|
||||
// tx has an offset k2 where k = k1 + k2
|
||||
// and the tx is signed using k1
|
||||
// the kernel excess is k1G
|
||||
// we will sum all the offsets later and store the total offset
|
||||
// on the block_header
|
||||
tx.validate()?;
|
||||
|
||||
// we will sum these later to give a single aggregate offset
|
||||
kernel_offsets.push(tx.offset);
|
||||
|
||||
// add all tx inputs/outputs/kernels to the block
|
||||
kernels.extend(tx.kernels.into_iter());
|
||||
inputs.extend(tx.inputs.into_iter());
|
||||
outputs.extend(tx.outputs.into_iter());
|
||||
}
|
||||
|
||||
// include the reward kernel and output
|
||||
kernels.push(reward_kern);
|
||||
outputs.push(reward_out);
|
||||
|
||||
// now sort everything so the block is built deterministically
|
||||
inputs.sort();
|
||||
outputs.sort();
|
||||
kernels.sort();
|
||||
|
||||
// now sum the kernel_offsets up to give us
|
||||
// an aggregate offset for the entire block
|
||||
kernel_offsets.push(prev.total_kernel_offset);
|
||||
let total_kernel_offset = committed::sum_kernel_offsets(kernel_offsets, vec![])?;
|
||||
// Now add the kernel offset of the previous block for a total
|
||||
let total_kernel_offset =
|
||||
committed::sum_kernel_offsets(vec![agg_tx.offset, prev.total_kernel_offset], vec![])?;
|
||||
|
||||
let total_kernel_sum = {
|
||||
let zero_commit = secp_static::commit_to_zero_value();
|
||||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
let mut excesses = map_vec!(kernels, |x| x.excess());
|
||||
let mut excesses = map_vec!(agg_tx.kernels, |x| x.excess());
|
||||
excesses.push(prev.total_kernel_sum);
|
||||
excesses.retain(|x| *x != zero_commit);
|
||||
secp.commit_sum(excesses, vec![])?
|
||||
|
@ -628,9 +593,9 @@ impl Block {
|
|||
total_kernel_sum,
|
||||
..Default::default()
|
||||
},
|
||||
inputs,
|
||||
outputs,
|
||||
kernels,
|
||||
inputs: agg_tx.inputs,
|
||||
outputs: agg_tx.outputs,
|
||||
kernels: agg_tx.kernels,
|
||||
}.cut_through())
|
||||
}
|
||||
|
||||
|
@ -655,12 +620,14 @@ impl Block {
|
|||
/// we do not want to cut-through (all coinbase must be preserved)
|
||||
///
|
||||
pub fn cut_through(self) -> Block {
|
||||
let in_set = self.inputs
|
||||
let in_set = self
|
||||
.inputs
|
||||
.iter()
|
||||
.map(|inp| inp.commitment())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let out_set = self.outputs
|
||||
let out_set = self
|
||||
.outputs
|
||||
.iter()
|
||||
.filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
.map(|out| out.commitment())
|
||||
|
@ -668,12 +635,14 @@ impl Block {
|
|||
|
||||
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
|
||||
|
||||
let new_inputs = self.inputs
|
||||
let new_inputs = self
|
||||
.inputs
|
||||
.into_iter()
|
||||
.filter(|inp| !to_cut_through.contains(&inp.commitment()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let new_outputs = self.outputs
|
||||
let new_outputs = self
|
||||
.outputs
|
||||
.into_iter()
|
||||
.filter(|out| !to_cut_through.contains(&out.commitment()))
|
||||
.collect::<Vec<_>>();
|
||||
|
@ -757,7 +726,8 @@ impl Block {
|
|||
// Verify that no input is spending an output from the same block.
|
||||
fn verify_cut_through(&self) -> Result<(), Error> {
|
||||
for inp in &self.inputs {
|
||||
if self.outputs
|
||||
if self
|
||||
.outputs
|
||||
.iter()
|
||||
.any(|out| out.commitment() == inp.commitment())
|
||||
{
|
||||
|
@ -800,12 +770,14 @@ impl Block {
|
|||
/// Check the sum of coinbase-marked outputs match
|
||||
/// the sum of coinbase-marked kernels accounting for fees.
|
||||
pub fn verify_coinbase(&self) -> Result<(), Error> {
|
||||
let cb_outs = self.outputs
|
||||
let cb_outs = self
|
||||
.outputs
|
||||
.iter()
|
||||
.filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
.collect::<Vec<&Output>>();
|
||||
|
||||
let cb_kerns = self.kernels
|
||||
let cb_kerns = self
|
||||
.kernels
|
||||
.iter()
|
||||
.filter(|kernel| kernel.features.contains(KernelFeatures::COINBASE_KERNEL))
|
||||
.collect::<Vec<&TxKernel>>();
|
||||
|
|
|
@ -314,7 +314,7 @@ impl Readable for Transaction {
|
|||
// Treat any validation issues as data corruption.
|
||||
// An example of this would be reading a tx
|
||||
// that exceeded the allowed number of inputs.
|
||||
tx.validate().map_err(|_| ser::Error::CorruptedData)?;
|
||||
tx.validate(false).map_err(|_| ser::Error::CorruptedData)?;
|
||||
|
||||
Ok(tx)
|
||||
}
|
||||
|
@ -446,12 +446,14 @@ impl Transaction {
|
|||
/// Validates all relevant parts of a fully built transaction. Checks the
|
||||
/// excess value against the signature as well as range proofs for each
|
||||
/// output.
|
||||
pub fn validate(&self) -> Result<(), Error> {
|
||||
self.verify_features()?;
|
||||
self.verify_weight()?;
|
||||
pub fn validate(&self, as_block: bool) -> Result<(), Error> {
|
||||
if !as_block {
|
||||
self.verify_features()?;
|
||||
self.verify_weight()?;
|
||||
self.verify_kernel_sums(self.overage(), self.offset)?;
|
||||
}
|
||||
self.verify_sorted()?;
|
||||
self.verify_cut_through()?;
|
||||
self.verify_kernel_sums(self.overage(), self.offset)?;
|
||||
self.verify_rangeproofs()?;
|
||||
self.verify_kernel_signatures()?;
|
||||
Ok(())
|
||||
|
@ -482,7 +484,8 @@ impl Transaction {
|
|||
// Verify that no input is spending an output from the same block.
|
||||
fn verify_cut_through(&self) -> Result<(), Error> {
|
||||
for inp in &self.inputs {
|
||||
if self.outputs
|
||||
if self
|
||||
.outputs
|
||||
.iter()
|
||||
.any(|out| out.commitment() == inp.commitment())
|
||||
{
|
||||
|
@ -503,7 +506,8 @@ impl Transaction {
|
|||
|
||||
// Verify we have no outputs tagged as COINBASE_OUTPUT.
|
||||
fn verify_output_features(&self) -> Result<(), Error> {
|
||||
if self.outputs
|
||||
if self
|
||||
.outputs
|
||||
.iter()
|
||||
.any(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
{
|
||||
|
@ -514,7 +518,8 @@ impl Transaction {
|
|||
|
||||
// Verify we have no kernels tagged as COINBASE_KERNEL.
|
||||
fn verify_kernel_features(&self) -> Result<(), Error> {
|
||||
if self.kernels
|
||||
if self
|
||||
.kernels
|
||||
.iter()
|
||||
.any(|x| x.features.contains(KernelFeatures::COINBASE_KERNEL))
|
||||
{
|
||||
|
@ -525,8 +530,12 @@ impl Transaction {
|
|||
}
|
||||
|
||||
/// Aggregate a vec of transactions into a multi-kernel transaction with
|
||||
/// cut_through
|
||||
pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
|
||||
/// cut_through. Optionally allows passing a reward output and kernel for
|
||||
/// block building.
|
||||
pub fn aggregate(
|
||||
transactions: Vec<Transaction>,
|
||||
reward: Option<(Output, TxKernel)>,
|
||||
) -> Result<Transaction, Error> {
|
||||
let mut inputs: Vec<Input> = vec![];
|
||||
let mut outputs: Vec<Output> = vec![];
|
||||
let mut kernels: Vec<TxKernel> = vec![];
|
||||
|
@ -543,37 +552,24 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
|
|||
outputs.append(&mut transaction.outputs);
|
||||
kernels.append(&mut transaction.kernels);
|
||||
}
|
||||
let as_block = reward.is_some();
|
||||
if let Some((out, kernel)) = reward {
|
||||
outputs.push(out);
|
||||
kernels.push(kernel);
|
||||
}
|
||||
|
||||
// now sum the kernel_offsets up to give us an aggregate offset for the
|
||||
// transaction
|
||||
let total_kernel_offset = {
|
||||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
let mut keys = kernel_offsets
|
||||
.into_iter()
|
||||
.filter(|x| *x != BlindingFactor::zero())
|
||||
.filter_map(|x| x.secret_key(&secp).ok())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if keys.is_empty() {
|
||||
BlindingFactor::zero()
|
||||
} else {
|
||||
let sum = secp.blind_sum(keys, vec![])?;
|
||||
BlindingFactor::from_secret_key(sum)
|
||||
}
|
||||
};
|
||||
// assemble output commitments set, checking they're all unique
|
||||
let mut out_set = HashSet::new();
|
||||
let all_uniq = { outputs.iter().all(|o| out_set.insert(o.commitment())) };
|
||||
if !all_uniq {
|
||||
return Err(Error::AggregationError);
|
||||
}
|
||||
|
||||
let in_set = inputs
|
||||
.iter()
|
||||
.map(|inp| inp.commitment())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let out_set = outputs
|
||||
.iter()
|
||||
.filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
.map(|out| out.commitment())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
|
||||
|
||||
let mut new_inputs = inputs
|
||||
|
@ -591,6 +587,10 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
|
|||
new_outputs.sort();
|
||||
kernels.sort();
|
||||
|
||||
// now sum the kernel_offsets up to give us an aggregate offset for the
|
||||
// transaction
|
||||
let total_kernel_offset = committed::sum_kernel_offsets(kernel_offsets, vec![])?;
|
||||
|
||||
// build a new aggregate tx from the following -
|
||||
// * cut-through inputs
|
||||
// * cut-through outputs
|
||||
|
@ -602,7 +602,7 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
|
|||
// The resulting tx could be invalid for a variety of reasons -
|
||||
// * tx too large (too many inputs|outputs|kernels)
|
||||
// * cut-through may have invalidated the sums
|
||||
tx.validate()?;
|
||||
tx.validate(as_block)?;
|
||||
|
||||
Ok(tx)
|
||||
}
|
||||
|
@ -618,7 +618,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
|
|||
// transaction
|
||||
let mut kernel_offsets = vec![];
|
||||
|
||||
let tx = aggregate(txs)?;
|
||||
let tx = aggregate(txs, None)?;
|
||||
|
||||
for mk_input in mk_tx.inputs {
|
||||
if !tx.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
|
||||
|
@ -670,7 +670,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
|
|||
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
|
||||
|
||||
// Now validate the resulting tx to ensure we have not built something invalid.
|
||||
tx.validate()?;
|
||||
tx.validate(false)?;
|
||||
|
||||
Ok(tx)
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ fn build_tx_kernel() {
|
|||
).unwrap();
|
||||
|
||||
// check the tx is valid
|
||||
tx.validate().unwrap();
|
||||
tx.validate(false).unwrap();
|
||||
|
||||
// check the kernel is also itself valid
|
||||
assert_eq!(tx.kernels.len(), 1);
|
||||
|
@ -123,13 +123,13 @@ fn transaction_cut_through() {
|
|||
let tx1 = tx1i2o();
|
||||
let tx2 = tx2i1o();
|
||||
|
||||
assert!(tx1.validate().is_ok());
|
||||
assert!(tx2.validate().is_ok());
|
||||
assert!(tx1.validate(false).is_ok());
|
||||
assert!(tx2.validate(false).is_ok());
|
||||
|
||||
// now build a "cut_through" tx from tx1 and tx2
|
||||
let tx3 = aggregate(vec![tx1, tx2]).unwrap();
|
||||
let tx3 = aggregate(vec![tx1, tx2], None).unwrap();
|
||||
|
||||
assert!(tx3.validate().is_ok());
|
||||
assert!(tx3.validate(false).is_ok());
|
||||
}
|
||||
|
||||
// Attempt to deaggregate a multi-kernel transaction in a different way
|
||||
|
@ -140,26 +140,26 @@ fn multi_kernel_transaction_deaggregation() {
|
|||
let tx3 = tx1i1o();
|
||||
let tx4 = tx1i1o();
|
||||
|
||||
assert!(tx1.validate().is_ok());
|
||||
assert!(tx2.validate().is_ok());
|
||||
assert!(tx3.validate().is_ok());
|
||||
assert!(tx4.validate().is_ok());
|
||||
assert!(tx1.validate(false).is_ok());
|
||||
assert!(tx2.validate(false).is_ok());
|
||||
assert!(tx3.validate(false).is_ok());
|
||||
assert!(tx4.validate(false).is_ok());
|
||||
|
||||
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
|
||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
|
||||
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()], None).unwrap();
|
||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
|
||||
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], None).unwrap();
|
||||
|
||||
assert!(tx1234.validate().is_ok());
|
||||
assert!(tx12.validate().is_ok());
|
||||
assert!(tx34.validate().is_ok());
|
||||
assert!(tx1234.validate(false).is_ok());
|
||||
assert!(tx12.validate(false).is_ok());
|
||||
assert!(tx34.validate(false).is_ok());
|
||||
|
||||
let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap();
|
||||
assert!(deaggregated_tx34.validate().is_ok());
|
||||
assert!(deaggregated_tx34.validate(false).is_ok());
|
||||
assert_eq!(tx34, deaggregated_tx34);
|
||||
|
||||
let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap();
|
||||
|
||||
assert!(deaggregated_tx12.validate().is_ok());
|
||||
assert!(deaggregated_tx12.validate(false).is_ok());
|
||||
assert_eq!(tx12, deaggregated_tx12);
|
||||
}
|
||||
|
||||
|
@ -169,18 +169,18 @@ fn multi_kernel_transaction_deaggregation_2() {
|
|||
let tx2 = tx1i1o();
|
||||
let tx3 = tx1i1o();
|
||||
|
||||
assert!(tx1.validate().is_ok());
|
||||
assert!(tx2.validate().is_ok());
|
||||
assert!(tx3.validate().is_ok());
|
||||
assert!(tx1.validate(false).is_ok());
|
||||
assert!(tx2.validate(false).is_ok());
|
||||
assert!(tx3.validate(false).is_ok());
|
||||
|
||||
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
|
||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()], None).unwrap();
|
||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
|
||||
|
||||
assert!(tx123.validate().is_ok());
|
||||
assert!(tx12.validate().is_ok());
|
||||
assert!(tx123.validate(false).is_ok());
|
||||
assert!(tx12.validate(false).is_ok());
|
||||
|
||||
let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap();
|
||||
assert!(deaggregated_tx3.validate().is_ok());
|
||||
assert!(deaggregated_tx3.validate(false).is_ok());
|
||||
assert_eq!(tx3, deaggregated_tx3);
|
||||
}
|
||||
|
||||
|
@ -190,19 +190,19 @@ fn multi_kernel_transaction_deaggregation_3() {
|
|||
let tx2 = tx1i1o();
|
||||
let tx3 = tx1i1o();
|
||||
|
||||
assert!(tx1.validate().is_ok());
|
||||
assert!(tx2.validate().is_ok());
|
||||
assert!(tx3.validate().is_ok());
|
||||
assert!(tx1.validate(false).is_ok());
|
||||
assert!(tx2.validate(false).is_ok());
|
||||
assert!(tx3.validate(false).is_ok());
|
||||
|
||||
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
|
||||
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()]).unwrap();
|
||||
let tx2 = aggregate(vec![tx2.clone()]).unwrap();
|
||||
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()], None).unwrap();
|
||||
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()], None).unwrap();
|
||||
let tx2 = aggregate(vec![tx2.clone()], None).unwrap();
|
||||
|
||||
assert!(tx123.validate().is_ok());
|
||||
assert!(tx2.validate().is_ok());
|
||||
assert!(tx123.validate(false).is_ok());
|
||||
assert!(tx2.validate(false).is_ok());
|
||||
|
||||
let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap();
|
||||
assert!(deaggregated_tx13.validate().is_ok());
|
||||
assert!(deaggregated_tx13.validate(false).is_ok());
|
||||
assert_eq!(tx13, deaggregated_tx13);
|
||||
}
|
||||
|
||||
|
@ -214,11 +214,11 @@ fn multi_kernel_transaction_deaggregation_4() {
|
|||
let tx4 = tx1i1o();
|
||||
let tx5 = tx1i1o();
|
||||
|
||||
assert!(tx1.validate().is_ok());
|
||||
assert!(tx2.validate().is_ok());
|
||||
assert!(tx3.validate().is_ok());
|
||||
assert!(tx4.validate().is_ok());
|
||||
assert!(tx5.validate().is_ok());
|
||||
assert!(tx1.validate(false).is_ok());
|
||||
assert!(tx2.validate(false).is_ok());
|
||||
assert!(tx3.validate(false).is_ok());
|
||||
assert!(tx4.validate(false).is_ok());
|
||||
assert!(tx5.validate(false).is_ok());
|
||||
|
||||
let tx12345 = aggregate(vec![
|
||||
tx1.clone(),
|
||||
|
@ -226,14 +226,14 @@ fn multi_kernel_transaction_deaggregation_4() {
|
|||
tx3.clone(),
|
||||
tx4.clone(),
|
||||
tx5.clone(),
|
||||
]).unwrap();
|
||||
assert!(tx12345.validate().is_ok());
|
||||
], None).unwrap();
|
||||
assert!(tx12345.validate(false).is_ok());
|
||||
|
||||
let deaggregated_tx5 = deaggregate(
|
||||
tx12345.clone(),
|
||||
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
|
||||
).unwrap();
|
||||
assert!(deaggregated_tx5.validate().is_ok());
|
||||
assert!(deaggregated_tx5.validate(false).is_ok());
|
||||
assert_eq!(tx5, deaggregated_tx5);
|
||||
}
|
||||
|
||||
|
@ -245,11 +245,11 @@ fn multi_kernel_transaction_deaggregation_5() {
|
|||
let tx4 = tx1i1o();
|
||||
let tx5 = tx1i1o();
|
||||
|
||||
assert!(tx1.validate().is_ok());
|
||||
assert!(tx2.validate().is_ok());
|
||||
assert!(tx3.validate().is_ok());
|
||||
assert!(tx4.validate().is_ok());
|
||||
assert!(tx5.validate().is_ok());
|
||||
assert!(tx1.validate(false).is_ok());
|
||||
assert!(tx2.validate(false).is_ok());
|
||||
assert!(tx3.validate(false).is_ok());
|
||||
assert!(tx4.validate(false).is_ok());
|
||||
assert!(tx5.validate(false).is_ok());
|
||||
|
||||
let tx12345 = aggregate(vec![
|
||||
tx1.clone(),
|
||||
|
@ -257,14 +257,14 @@ fn multi_kernel_transaction_deaggregation_5() {
|
|||
tx3.clone(),
|
||||
tx4.clone(),
|
||||
tx5.clone(),
|
||||
]).unwrap();
|
||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
|
||||
], None).unwrap();
|
||||
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
|
||||
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], None).unwrap();
|
||||
|
||||
assert!(tx12345.validate().is_ok());
|
||||
assert!(tx12345.validate(false).is_ok());
|
||||
|
||||
let deaggregated_tx5 = deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap();
|
||||
assert!(deaggregated_tx5.validate().is_ok());
|
||||
assert!(deaggregated_tx5.validate(false).is_ok());
|
||||
assert_eq!(tx5, deaggregated_tx5);
|
||||
}
|
||||
|
||||
|
@ -274,22 +274,22 @@ fn basic_transaction_deaggregation() {
|
|||
let tx1 = tx1i2o();
|
||||
let tx2 = tx2i1o();
|
||||
|
||||
assert!(tx1.validate().is_ok());
|
||||
assert!(tx2.validate().is_ok());
|
||||
assert!(tx1.validate(false).is_ok());
|
||||
assert!(tx2.validate(false).is_ok());
|
||||
|
||||
// now build a "cut_through" tx from tx1 and tx2
|
||||
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
|
||||
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
|
||||
|
||||
assert!(tx3.validate().is_ok());
|
||||
assert!(tx3.validate(false).is_ok());
|
||||
|
||||
let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap();
|
||||
|
||||
assert!(deaggregated_tx1.validate().is_ok());
|
||||
assert!(deaggregated_tx1.validate(false).is_ok());
|
||||
assert_eq!(tx1, deaggregated_tx1);
|
||||
|
||||
let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap();
|
||||
|
||||
assert!(deaggregated_tx2.validate().is_ok());
|
||||
assert!(deaggregated_tx2.validate(false).is_ok());
|
||||
assert_eq!(tx2, deaggregated_tx2);
|
||||
}
|
||||
|
||||
|
@ -319,7 +319,7 @@ fn hash_output() {
|
|||
#[test]
|
||||
fn blind_tx() {
|
||||
let btx = tx2i1o();
|
||||
assert!(btx.validate().is_ok());
|
||||
assert!(btx.validate(false).is_ok());
|
||||
|
||||
// Ignored for bullet proofs, because calling range_proof_info
|
||||
// with a bullet proof causes painful errors
|
||||
|
@ -381,7 +381,7 @@ fn tx_build_exchange() {
|
|||
&keychain,
|
||||
).unwrap();
|
||||
|
||||
tx_final.validate().unwrap();
|
||||
tx_final.validate(false).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -408,7 +408,7 @@ fn reward_with_tx_block() {
|
|||
let zero_commit = secp_static::commit_to_zero_value();
|
||||
|
||||
let mut tx1 = tx2i1o();
|
||||
tx1.validate().unwrap();
|
||||
tx1.validate(false).unwrap();
|
||||
|
||||
let previous_header = BlockHeader::default();
|
||||
|
||||
|
@ -493,11 +493,11 @@ fn test_block_with_timelocked_tx() {
|
|||
#[test]
|
||||
pub fn test_verify_1i1o_sig() {
|
||||
let tx = tx1i1o();
|
||||
tx.validate().unwrap();
|
||||
tx.validate(false).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_verify_2i1o_sig() {
|
||||
let tx = tx2i1o();
|
||||
tx.validate().unwrap();
|
||||
tx.validate(false).unwrap();
|
||||
}
|
||||
|
|
|
@ -26,13 +26,13 @@ use vec_backend::{TestElem, VecBackend};
|
|||
|
||||
#[test]
|
||||
fn some_peak_map() {
|
||||
assert_eq!(pmmr::peak_map_height(0), ( 0b0, 0));
|
||||
assert_eq!(pmmr::peak_map_height(1), ( 0b1, 0));
|
||||
assert_eq!(pmmr::peak_map_height(2), ( 0b1, 1));
|
||||
assert_eq!(pmmr::peak_map_height(3), ( 0b10, 0));
|
||||
assert_eq!(pmmr::peak_map_height(4), ( 0b11, 0));
|
||||
assert_eq!(pmmr::peak_map_height(5), ( 0b11, 1));
|
||||
assert_eq!(pmmr::peak_map_height(6), ( 0b11, 2));
|
||||
assert_eq!(pmmr::peak_map_height(0), (0b0, 0));
|
||||
assert_eq!(pmmr::peak_map_height(1), (0b1, 0));
|
||||
assert_eq!(pmmr::peak_map_height(2), (0b1, 1));
|
||||
assert_eq!(pmmr::peak_map_height(3), (0b10, 0));
|
||||
assert_eq!(pmmr::peak_map_height(4), (0b11, 0));
|
||||
assert_eq!(pmmr::peak_map_height(5), (0b11, 1));
|
||||
assert_eq!(pmmr::peak_map_height(6), (0b11, 2));
|
||||
assert_eq!(pmmr::peak_map_height(7), (0b100, 0));
|
||||
}
|
||||
|
||||
|
|
|
@ -16,9 +16,9 @@ extern crate croaring;
|
|||
|
||||
use croaring::Bitmap;
|
||||
|
||||
use core::core::BlockHeader;
|
||||
use core::core::hash::Hash;
|
||||
use core::core::pmmr::Backend;
|
||||
use core::core::BlockHeader;
|
||||
use core::ser;
|
||||
use core::ser::{PMMRable, Readable, Reader, Writeable, Writer};
|
||||
|
||||
|
@ -118,11 +118,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn rewind(
|
||||
&mut self,
|
||||
position: u64,
|
||||
_rewind_rm_pos: &Bitmap,
|
||||
) -> Result<(), String> {
|
||||
fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> {
|
||||
self.elems = self.elems[0..(position as usize) + 1].to_vec();
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ where
|
|||
return Ok(None);
|
||||
}
|
||||
|
||||
let tx = transaction::aggregate(txs)?;
|
||||
let tx = transaction::aggregate(txs, None)?;
|
||||
Ok(Some(tx))
|
||||
}
|
||||
|
||||
|
@ -142,20 +142,10 @@ where
|
|||
// If we have nothing to aggregate then simply return the tx itself.
|
||||
entry.tx.clone()
|
||||
} else {
|
||||
// Create a single aggregated tx from the existing pool txs (to check pool is
|
||||
// valid).
|
||||
let agg_tx = transaction::aggregate(txs)?;
|
||||
|
||||
// Then check new tx would not introduce a duplicate output in the pool.
|
||||
for x in &entry.tx.outputs {
|
||||
if agg_tx.outputs.contains(&x) {
|
||||
return Err(PoolError::DuplicateCommitment);
|
||||
}
|
||||
}
|
||||
|
||||
// Finally aggregate the new tx with everything in the pool (with any extra
|
||||
// txs).
|
||||
transaction::aggregate(vec![agg_tx, entry.tx.clone()])?
|
||||
// Create a single aggregated tx from the existing pool txs and the
|
||||
// new entry
|
||||
txs.push(entry.tx.clone());
|
||||
transaction::aggregate(txs, None)?
|
||||
};
|
||||
|
||||
// Validate aggregated tx against the current chain state (via txhashset
|
||||
|
|
|
@ -108,7 +108,7 @@ where
|
|||
self.is_acceptable(&tx)?;
|
||||
|
||||
// Make sure the transaction is valid before anything else.
|
||||
tx.validate().map_err(|e| PoolError::InvalidTx(e))?;
|
||||
tx.validate(false).map_err(|e| PoolError::InvalidTx(e))?;
|
||||
|
||||
// Check the tx lock_time is valid based on current chain state.
|
||||
self.blockchain.verify_tx_lock_height(&tx)?;
|
||||
|
|
|
@ -188,7 +188,7 @@ fn test_the_transaction_pool() {
|
|||
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
|
||||
// tx1 and tx2 are already in the txpool (in aggregated form)
|
||||
// tx4 is the "new" part of this aggregated tx that we care about
|
||||
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
|
||||
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4], None).unwrap();
|
||||
write_pool
|
||||
.add_to_pool(test_source(), agg_tx, false)
|
||||
.unwrap();
|
||||
|
|
|
@ -104,7 +104,7 @@ where
|
|||
stem_txs.len()
|
||||
);
|
||||
|
||||
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||
let agg_tx = transaction::aggregate(stem_txs, None)?;
|
||||
|
||||
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
|
||||
if res.is_err() {
|
||||
|
@ -144,7 +144,7 @@ where
|
|||
stem_txs.len()
|
||||
);
|
||||
|
||||
let agg_tx = transaction::aggregate(stem_txs)?;
|
||||
let agg_tx = transaction::aggregate(stem_txs, None)?;
|
||||
|
||||
let src = TxSource {
|
||||
debug_name: "fluff".to_string(),
|
||||
|
|
|
@ -293,7 +293,7 @@ mod test {
|
|||
&keychain,
|
||||
).unwrap();
|
||||
|
||||
tx.validate().unwrap();
|
||||
tx.validate(false).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -313,7 +313,7 @@ mod test {
|
|||
&keychain,
|
||||
).unwrap();
|
||||
|
||||
tx.validate().unwrap();
|
||||
tx.validate(false).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -327,6 +327,6 @@ mod test {
|
|||
&keychain,
|
||||
).unwrap();
|
||||
|
||||
tx.validate().unwrap();
|
||||
tx.validate(false).unwrap();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -388,7 +388,7 @@ impl Slate {
|
|||
final_tx.kernels[0].verify()?;
|
||||
|
||||
// confirm the overall transaction is valid (including the updated kernel)
|
||||
let _ = final_tx.validate()?;
|
||||
let _ = final_tx.validate(false)?;
|
||||
|
||||
self.tx = final_tx;
|
||||
Ok(())
|
||||
|
|
|
@ -198,7 +198,7 @@ where
|
|||
|
||||
// finalize the burn transaction and send
|
||||
let tx_burn = build::transaction(parts, &keychain)?;
|
||||
tx_burn.validate()?;
|
||||
tx_burn.validate(false)?;
|
||||
Ok(tx_burn)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue