diff --git a/core/src/core/block.rs b/core/src/core/block.rs index 44b0880f1..59a85e629 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -523,7 +523,8 @@ impl Block { let header = self.header.clone(); let nonce = thread_rng().next_u64(); - let mut out_full = self.outputs + let mut out_full = self + .outputs .iter() .filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT)) .cloned() @@ -564,55 +565,19 @@ impl Block { reward_kern: TxKernel, difficulty: Difficulty, ) -> Result { - let mut kernels = vec![]; - let mut inputs = vec![]; - let mut outputs = vec![]; + // A block is just a big transaction, aggregate as such. Note that + // aggregate also runs validation and duplicate commitment checks. + let agg_tx = transaction::aggregate(txs, Some((reward_out, reward_kern)))?; - // we will sum these together at the end - // to give us the overall offset for the block - let mut kernel_offsets = vec![]; - - // iterate over the all the txs - // build the kernel for each - // and collect all the kernels, inputs and outputs - // to build the block (which we can sort of think of as one big tx?) - for tx in txs { - // validate each transaction and gather their kernels - // tx has an offset k2 where k = k1 + k2 - // and the tx is signed using k1 - // the kernel excess is k1G - // we will sum all the offsets later and store the total offset - // on the block_header - tx.validate()?; - - // we will sum these later to give a single aggregate offset - kernel_offsets.push(tx.offset); - - // add all tx inputs/outputs/kernels to the block - kernels.extend(tx.kernels.into_iter()); - inputs.extend(tx.inputs.into_iter()); - outputs.extend(tx.outputs.into_iter()); - } - - // include the reward kernel and output - kernels.push(reward_kern); - outputs.push(reward_out); - - // now sort everything so the block is built deterministically - inputs.sort(); - outputs.sort(); - kernels.sort(); - - // now sum the kernel_offsets up to give us - // an aggregate offset for the entire block - kernel_offsets.push(prev.total_kernel_offset); - let total_kernel_offset = committed::sum_kernel_offsets(kernel_offsets, vec![])?; + // Now add the kernel offset of the previous block for a total + let total_kernel_offset = + committed::sum_kernel_offsets(vec![agg_tx.offset, prev.total_kernel_offset], vec![])?; let total_kernel_sum = { let zero_commit = secp_static::commit_to_zero_value(); let secp = static_secp_instance(); let secp = secp.lock().unwrap(); - let mut excesses = map_vec!(kernels, |x| x.excess()); + let mut excesses = map_vec!(agg_tx.kernels, |x| x.excess()); excesses.push(prev.total_kernel_sum); excesses.retain(|x| *x != zero_commit); secp.commit_sum(excesses, vec![])? @@ -628,9 +593,9 @@ impl Block { total_kernel_sum, ..Default::default() }, - inputs, - outputs, - kernels, + inputs: agg_tx.inputs, + outputs: agg_tx.outputs, + kernels: agg_tx.kernels, }.cut_through()) } @@ -655,12 +620,14 @@ impl Block { /// we do not want to cut-through (all coinbase must be preserved) /// pub fn cut_through(self) -> Block { - let in_set = self.inputs + let in_set = self + .inputs .iter() .map(|inp| inp.commitment()) .collect::>(); - let out_set = self.outputs + let out_set = self + .outputs .iter() .filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT)) .map(|out| out.commitment()) @@ -668,12 +635,14 @@ impl Block { let to_cut_through = in_set.intersection(&out_set).collect::>(); - let new_inputs = self.inputs + let new_inputs = self + .inputs .into_iter() .filter(|inp| !to_cut_through.contains(&inp.commitment())) .collect::>(); - let new_outputs = self.outputs + let new_outputs = self + .outputs .into_iter() .filter(|out| !to_cut_through.contains(&out.commitment())) .collect::>(); @@ -757,7 +726,8 @@ impl Block { // Verify that no input is spending an output from the same block. fn verify_cut_through(&self) -> Result<(), Error> { for inp in &self.inputs { - if self.outputs + if self + .outputs .iter() .any(|out| out.commitment() == inp.commitment()) { @@ -800,12 +770,14 @@ impl Block { /// Check the sum of coinbase-marked outputs match /// the sum of coinbase-marked kernels accounting for fees. pub fn verify_coinbase(&self) -> Result<(), Error> { - let cb_outs = self.outputs + let cb_outs = self + .outputs .iter() .filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT)) .collect::>(); - let cb_kerns = self.kernels + let cb_kerns = self + .kernels .iter() .filter(|kernel| kernel.features.contains(KernelFeatures::COINBASE_KERNEL)) .collect::>(); diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index 2e57ffb3f..3673fe7d2 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -314,7 +314,7 @@ impl Readable for Transaction { // Treat any validation issues as data corruption. // An example of this would be reading a tx // that exceeded the allowed number of inputs. - tx.validate().map_err(|_| ser::Error::CorruptedData)?; + tx.validate(false).map_err(|_| ser::Error::CorruptedData)?; Ok(tx) } @@ -446,12 +446,14 @@ impl Transaction { /// Validates all relevant parts of a fully built transaction. Checks the /// excess value against the signature as well as range proofs for each /// output. - pub fn validate(&self) -> Result<(), Error> { - self.verify_features()?; - self.verify_weight()?; + pub fn validate(&self, as_block: bool) -> Result<(), Error> { + if !as_block { + self.verify_features()?; + self.verify_weight()?; + self.verify_kernel_sums(self.overage(), self.offset)?; + } self.verify_sorted()?; self.verify_cut_through()?; - self.verify_kernel_sums(self.overage(), self.offset)?; self.verify_rangeproofs()?; self.verify_kernel_signatures()?; Ok(()) @@ -482,7 +484,8 @@ impl Transaction { // Verify that no input is spending an output from the same block. fn verify_cut_through(&self) -> Result<(), Error> { for inp in &self.inputs { - if self.outputs + if self + .outputs .iter() .any(|out| out.commitment() == inp.commitment()) { @@ -503,7 +506,8 @@ impl Transaction { // Verify we have no outputs tagged as COINBASE_OUTPUT. fn verify_output_features(&self) -> Result<(), Error> { - if self.outputs + if self + .outputs .iter() .any(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT)) { @@ -514,7 +518,8 @@ impl Transaction { // Verify we have no kernels tagged as COINBASE_KERNEL. fn verify_kernel_features(&self) -> Result<(), Error> { - if self.kernels + if self + .kernels .iter() .any(|x| x.features.contains(KernelFeatures::COINBASE_KERNEL)) { @@ -525,8 +530,12 @@ impl Transaction { } /// Aggregate a vec of transactions into a multi-kernel transaction with -/// cut_through -pub fn aggregate(transactions: Vec) -> Result { +/// cut_through. Optionally allows passing a reward output and kernel for +/// block building. +pub fn aggregate( + transactions: Vec, + reward: Option<(Output, TxKernel)>, +) -> Result { let mut inputs: Vec = vec![]; let mut outputs: Vec = vec![]; let mut kernels: Vec = vec![]; @@ -543,37 +552,24 @@ pub fn aggregate(transactions: Vec) -> Result { outputs.append(&mut transaction.outputs); kernels.append(&mut transaction.kernels); } + let as_block = reward.is_some(); + if let Some((out, kernel)) = reward { + outputs.push(out); + kernels.push(kernel); + } - // now sum the kernel_offsets up to give us an aggregate offset for the - // transaction - let total_kernel_offset = { - let secp = static_secp_instance(); - let secp = secp.lock().unwrap(); - let mut keys = kernel_offsets - .into_iter() - .filter(|x| *x != BlindingFactor::zero()) - .filter_map(|x| x.secret_key(&secp).ok()) - .collect::>(); - - if keys.is_empty() { - BlindingFactor::zero() - } else { - let sum = secp.blind_sum(keys, vec![])?; - BlindingFactor::from_secret_key(sum) - } - }; + // assemble output commitments set, checking they're all unique + let mut out_set = HashSet::new(); + let all_uniq = { outputs.iter().all(|o| out_set.insert(o.commitment())) }; + if !all_uniq { + return Err(Error::AggregationError); + } let in_set = inputs .iter() .map(|inp| inp.commitment()) .collect::>(); - let out_set = outputs - .iter() - .filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT)) - .map(|out| out.commitment()) - .collect::>(); - let to_cut_through = in_set.intersection(&out_set).collect::>(); let mut new_inputs = inputs @@ -591,6 +587,10 @@ pub fn aggregate(transactions: Vec) -> Result { new_outputs.sort(); kernels.sort(); + // now sum the kernel_offsets up to give us an aggregate offset for the + // transaction + let total_kernel_offset = committed::sum_kernel_offsets(kernel_offsets, vec![])?; + // build a new aggregate tx from the following - // * cut-through inputs // * cut-through outputs @@ -602,7 +602,7 @@ pub fn aggregate(transactions: Vec) -> Result { // The resulting tx could be invalid for a variety of reasons - // * tx too large (too many inputs|outputs|kernels) // * cut-through may have invalidated the sums - tx.validate()?; + tx.validate(as_block)?; Ok(tx) } @@ -618,7 +618,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec) -> Result) -> Result Result<(), String> { + fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> { self.elems = self.elems[0..(position as usize) + 1].to_vec(); Ok(()) } diff --git a/pool/src/pool.rs b/pool/src/pool.rs index a62b26791..51672d201 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -87,7 +87,7 @@ where return Ok(None); } - let tx = transaction::aggregate(txs)?; + let tx = transaction::aggregate(txs, None)?; Ok(Some(tx)) } @@ -142,20 +142,10 @@ where // If we have nothing to aggregate then simply return the tx itself. entry.tx.clone() } else { - // Create a single aggregated tx from the existing pool txs (to check pool is - // valid). - let agg_tx = transaction::aggregate(txs)?; - - // Then check new tx would not introduce a duplicate output in the pool. - for x in &entry.tx.outputs { - if agg_tx.outputs.contains(&x) { - return Err(PoolError::DuplicateCommitment); - } - } - - // Finally aggregate the new tx with everything in the pool (with any extra - // txs). - transaction::aggregate(vec![agg_tx, entry.tx.clone()])? + // Create a single aggregated tx from the existing pool txs and the + // new entry + txs.push(entry.tx.clone()); + transaction::aggregate(txs, None)? }; // Validate aggregated tx against the current chain state (via txhashset diff --git a/pool/src/transaction_pool.rs b/pool/src/transaction_pool.rs index 68f1723cb..ad8e1a70f 100644 --- a/pool/src/transaction_pool.rs +++ b/pool/src/transaction_pool.rs @@ -108,7 +108,7 @@ where self.is_acceptable(&tx)?; // Make sure the transaction is valid before anything else. - tx.validate().map_err(|e| PoolError::InvalidTx(e))?; + tx.validate(false).map_err(|e| PoolError::InvalidTx(e))?; // Check the tx lock_time is valid based on current chain state. self.blockchain.verify_tx_lock_height(&tx)?; diff --git a/pool/tests/transaction_pool.rs b/pool/tests/transaction_pool.rs index beb4e3924..f90764b43 100644 --- a/pool/tests/transaction_pool.rs +++ b/pool/tests/transaction_pool.rs @@ -188,7 +188,7 @@ fn test_the_transaction_pool() { let tx4 = test_transaction(&keychain, vec![800], vec![799]); // tx1 and tx2 are already in the txpool (in aggregated form) // tx4 is the "new" part of this aggregated tx that we care about - let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap(); + let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4], None).unwrap(); write_pool .add_to_pool(test_source(), agg_tx, false) .unwrap(); diff --git a/servers/src/grin/dandelion_monitor.rs b/servers/src/grin/dandelion_monitor.rs index d03af023c..8bd042c11 100644 --- a/servers/src/grin/dandelion_monitor.rs +++ b/servers/src/grin/dandelion_monitor.rs @@ -104,7 +104,7 @@ where stem_txs.len() ); - let agg_tx = transaction::aggregate(stem_txs)?; + let agg_tx = transaction::aggregate(stem_txs, None)?; let res = tx_pool.adapter.stem_tx_accepted(&agg_tx); if res.is_err() { @@ -144,7 +144,7 @@ where stem_txs.len() ); - let agg_tx = transaction::aggregate(stem_txs)?; + let agg_tx = transaction::aggregate(stem_txs, None)?; let src = TxSource { debug_name: "fluff".to_string(), diff --git a/wallet/src/libtx/build.rs b/wallet/src/libtx/build.rs index 9828136aa..31d493a26 100644 --- a/wallet/src/libtx/build.rs +++ b/wallet/src/libtx/build.rs @@ -293,7 +293,7 @@ mod test { &keychain, ).unwrap(); - tx.validate().unwrap(); + tx.validate(false).unwrap(); } #[test] @@ -313,7 +313,7 @@ mod test { &keychain, ).unwrap(); - tx.validate().unwrap(); + tx.validate(false).unwrap(); } #[test] @@ -327,6 +327,6 @@ mod test { &keychain, ).unwrap(); - tx.validate().unwrap(); + tx.validate(false).unwrap(); } } diff --git a/wallet/src/libtx/slate.rs b/wallet/src/libtx/slate.rs index ff49b6c7f..fc6eb4ab0 100644 --- a/wallet/src/libtx/slate.rs +++ b/wallet/src/libtx/slate.rs @@ -388,7 +388,7 @@ impl Slate { final_tx.kernels[0].verify()?; // confirm the overall transaction is valid (including the updated kernel) - let _ = final_tx.validate()?; + let _ = final_tx.validate(false)?; self.tx = final_tx; Ok(()) diff --git a/wallet/src/libwallet/internal/tx.rs b/wallet/src/libwallet/internal/tx.rs index 0a78f7f6e..73a44273f 100644 --- a/wallet/src/libwallet/internal/tx.rs +++ b/wallet/src/libwallet/internal/tx.rs @@ -198,7 +198,7 @@ where // finalize the burn transaction and send let tx_burn = build::transaction(parts, &keychain)?; - tx_burn.validate()?; + tx_burn.validate(false)?; Ok(tx_burn) }