Anti-aggregation mechanism for multi-kernel transaction (#984)

* Test multi kernel deaggregation

* Add aggregate without cut_through and deaggregate function

* Add deaggregate function in pool and test

* Rustfmt

* Add deaggregate_and_add_to_memory_pool

* Deaggregate regular multi kernel transaction by default

* Rustfmt

* Add error type faileddeaggregation

* Add find candidates function

* Rustfmt

* Use intersection of sets instead of for comparisons

* Rustfmt

* Removed unnecessary if

* Stricter verification with is_subset

* Rustfmt
This commit is contained in:
Quentin Le Sceller 2018-04-24 15:47:13 -04:00 committed by GitHub
parent 2b2e13be63
commit 55f6e3e63f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 498 additions and 12 deletions

View file

@ -353,11 +353,173 @@ mod test {
assert!(tx2.validate().is_ok()); assert!(tx2.validate().is_ok());
// now build a "cut_through" tx from tx1 and tx2 // now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(vec![tx1, tx2]).unwrap(); let tx3 = aggregate_with_cut_through(vec![tx1, tx2]).unwrap();
assert!(tx3.validate().is_ok()); assert!(tx3.validate().is_ok());
} }
// Attempt to deaggregate a multi-kernel transaction in a different way
#[test]
fn multi_kernel_transaction_deaggregation() {
let tx1 = tx1i1o();
let tx2 = tx1i1o();
let tx3 = tx1i1o();
let tx4 = tx1i1o();
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
assert!(tx4.validate().is_ok());
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
assert!(tx1234.validate().is_ok());
assert!(tx12.validate().is_ok());
assert!(tx34.validate().is_ok());
let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap();
assert!(deaggregated_tx34.validate().is_ok());
assert_eq!(tx34, deaggregated_tx34);
let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap();
assert!(deaggregated_tx12.validate().is_ok());
assert_eq!(tx12, deaggregated_tx12);
}
#[test]
fn multi_kernel_transaction_deaggregation_2() {
let tx1 = tx1i1o();
let tx2 = tx1i1o();
let tx3 = tx1i1o();
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
assert!(tx123.validate().is_ok());
assert!(tx12.validate().is_ok());
let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap();
assert!(deaggregated_tx3.validate().is_ok());
assert_eq!(tx3, deaggregated_tx3);
}
#[test]
fn multi_kernel_transaction_deaggregation_3() {
let tx1 = tx1i1o();
let tx2 = tx1i1o();
let tx3 = tx1i1o();
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()]).unwrap();
let tx2 = aggregate(vec![tx2.clone()]).unwrap();
assert!(tx123.validate().is_ok());
assert!(tx2.validate().is_ok());
let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap();
assert!(deaggregated_tx13.validate().is_ok());
assert_eq!(tx13, deaggregated_tx13);
}
#[test]
fn multi_kernel_transaction_deaggregation_4() {
let tx1 = tx1i1o();
let tx2 = tx1i1o();
let tx3 = tx1i1o();
let tx4 = tx1i1o();
let tx5 = tx1i1o();
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
assert!(tx4.validate().is_ok());
assert!(tx5.validate().is_ok());
let tx12345 = aggregate(vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
]).unwrap();
assert!(tx12345.validate().is_ok());
let deaggregated_tx5 = deaggregate(
tx12345.clone(),
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
).unwrap();
assert!(deaggregated_tx5.validate().is_ok());
assert_eq!(tx5, deaggregated_tx5);
}
#[test]
fn multi_kernel_transaction_deaggregation_5() {
let tx1 = tx1i1o();
let tx2 = tx1i1o();
let tx3 = tx1i1o();
let tx4 = tx1i1o();
let tx5 = tx1i1o();
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
assert!(tx3.validate().is_ok());
assert!(tx4.validate().is_ok());
assert!(tx5.validate().is_ok());
let tx12345 = aggregate(vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
]).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
assert!(tx12345.validate().is_ok());
let deaggregated_tx5 =
deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap();
assert!(deaggregated_tx5.validate().is_ok());
assert_eq!(tx5, deaggregated_tx5);
}
// Attempt to deaggregate a multi-kernel transaction
#[test]
fn basic_transaction_deaggregation() {
let tx1 = tx1i2o();
let tx2 = tx2i1o();
assert!(tx1.validate().is_ok());
assert!(tx2.validate().is_ok());
// now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
assert!(tx3.validate().is_ok());
let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap();
assert!(deaggregated_tx1.validate().is_ok());
assert_eq!(tx1, deaggregated_tx1);
let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap();
assert!(deaggregated_tx2.validate().is_ok());
assert_eq!(tx2, deaggregated_tx2);
}
#[test] #[test]
fn hash_output() { fn hash_output() {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();

View file

@ -234,6 +234,14 @@ pub struct Transaction {
pub offset: BlindingFactor, pub offset: BlindingFactor,
} }
/// PartialEq
impl PartialEq for Transaction {
fn eq(&self, tx: &Transaction) -> bool {
self.inputs == tx.inputs && self.outputs == tx.outputs && self.kernels == tx.kernels
&& self.offset == tx.offset
}
}
/// Implementation of Writeable for a fully blinded transaction, defines how to /// Implementation of Writeable for a fully blinded transaction, defines how to
/// write the transaction as binary. /// write the transaction as binary.
impl Writeable for Transaction { impl Writeable for Transaction {
@ -482,8 +490,9 @@ impl Transaction {
} }
} }
/// Aggregate a vec of transactions into a multi-kernel transaction /// Aggregate a vec of transactions into a multi-kernel transaction with
pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> { /// cut_through
pub fn aggregate_with_cut_through(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
let mut inputs: Vec<Input> = vec![]; let mut inputs: Vec<Input> = vec![];
let mut outputs: Vec<Output> = vec![]; let mut outputs: Vec<Output> = vec![];
let mut kernels: Vec<TxKernel> = vec![]; let mut kernels: Vec<TxKernel> = vec![];
@ -556,6 +565,121 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
Ok(tx.with_offset(total_kernel_offset)) Ok(tx.with_offset(total_kernel_offset))
} }
/// Aggregate a vec of transactions into a multi-kernel transaction
pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
let mut inputs: Vec<Input> = vec![];
let mut outputs: Vec<Output> = vec![];
let mut kernels: Vec<TxKernel> = vec![];
// we will sum these together at the end to give us the overall offset for the
// transaction
let mut kernel_offsets = vec![];
for mut transaction in transactions {
// we will summ these later to give a single aggregate offset
kernel_offsets.push(transaction.offset);
inputs.append(&mut transaction.inputs);
outputs.append(&mut transaction.outputs);
kernels.append(&mut transaction.kernels);
}
// now sum the kernel_offsets up to give us an aggregate offset for the
// transaction
let total_kernel_offset = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let mut keys = kernel_offsets
.iter()
.cloned()
.filter(|x| *x != BlindingFactor::zero())
.filter_map(|x| x.secret_key(&secp).ok())
.collect::<Vec<_>>();
if keys.is_empty() {
BlindingFactor::zero()
} else {
let sum = secp.blind_sum(keys, vec![])?;
BlindingFactor::from_secret_key(sum)
}
};
// sort them lexicographically
inputs.sort();
outputs.sort();
kernels.sort();
let tx = Transaction::new(inputs, outputs, kernels);
Ok(tx.with_offset(total_kernel_offset))
}
/// Attempt to deaggregate a multi-kernel transaction based on multiple
/// transactions
pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transaction, Error> {
let mut inputs: Vec<Input> = vec![];
let mut outputs: Vec<Output> = vec![];
let mut kernels: Vec<TxKernel> = vec![];
// we will subtract these at the end to give us the overall offset for the
// transaction
let mut kernel_offsets = vec![];
let tx = aggregate(txs).unwrap();
for mk_input in mk_tx.clone().inputs {
if !tx.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
inputs.push(mk_input);
}
}
for mk_output in mk_tx.clone().outputs {
if !tx.outputs.contains(&mk_output) && !outputs.contains(&mk_output) {
outputs.push(mk_output);
}
}
for mk_kernel in mk_tx.clone().kernels {
if !tx.kernels.contains(&mk_kernel) && !kernels.contains(&mk_kernel) {
kernels.push(mk_kernel);
}
}
kernel_offsets.push(tx.offset);
// now compute the total kernel offset
let total_kernel_offset = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let mut positive_key = vec![mk_tx.offset]
.iter()
.cloned()
.filter(|x| *x != BlindingFactor::zero())
.filter_map(|x| x.secret_key(&secp).ok())
.collect::<Vec<_>>();
let mut negative_keys = kernel_offsets
.iter()
.cloned()
.filter(|x| *x != BlindingFactor::zero())
.filter_map(|x| x.secret_key(&secp).ok())
.collect::<Vec<_>>();
if positive_key.is_empty() && negative_keys.is_empty() {
BlindingFactor::zero()
} else {
let sum = secp.blind_sum(positive_key, negative_keys)?;
BlindingFactor::from_secret_key(sum)
}
};
// Sorting them lexicographically
inputs.sort();
outputs.sort();
kernels.sort();
let tx = Transaction::new(inputs, outputs, kernels);
Ok(tx.with_offset(total_kernel_offset))
}
/// A transaction input. /// A transaction input.
/// ///
/// Primarily a reference to an output being spent by the transaction. /// Primarily a reference to an output being spent by the transaction.

View file

@ -24,7 +24,7 @@ use core::core::hash::Hash;
use core::core::hash::Hashed; use core::core::hash::Hashed;
use core::core::id::ShortIdentifiable; use core::core::id::ShortIdentifiable;
use core::core::transaction; use core::core::transaction;
use core::core::{OutputIdentifier, Transaction}; use core::core::{OutputIdentifier, Transaction, TxKernel};
use core::core::{block, hash}; use core::core::{block, hash};
use util::LOGGER; use util::LOGGER;
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
@ -409,6 +409,84 @@ where
} }
} }
/// Attempt to deaggregate a transaction and add it to the mempool
pub fn deaggregate_and_add_to_memory_pool(
&mut self,
tx_source: TxSource,
tx: transaction::Transaction,
stem: bool,
) -> Result<(), PoolError> {
match self.deaggregate_transaction(tx.clone()) {
Ok(deaggragated_tx) => self.add_to_memory_pool(tx_source, deaggragated_tx, stem),
Err(e) => {
debug!(
LOGGER,
"Could not deaggregate multi-kernel transaction: {:?}", e
);
self.add_to_memory_pool(tx_source, tx, stem)
}
}
}
/// Attempt to deaggregate multi-kernel transaction as much as possible based on the content
/// of the mempool
pub fn deaggregate_transaction(
&self,
tx: transaction::Transaction,
) -> Result<Transaction, PoolError> {
// find candidates tx and attempt to deaggregate
match self.find_candidates(tx.clone()) {
Some(candidates_txs) => match transaction::deaggregate(tx, candidates_txs) {
Ok(deaggregated_tx) => Ok(deaggregated_tx),
Err(e) => {
debug!(LOGGER, "Could not deaggregate transaction: {}", e);
Err(PoolError::FailedDeaggregation)
}
},
None => {
debug!(
LOGGER,
"Could not deaggregate transaction: no candidate transaction found"
);
Err(PoolError::FailedDeaggregation)
}
}
}
/// Find candidate transactions for a multi-kernel transaction
fn find_candidates(&self, tx: transaction::Transaction) -> Option<Vec<Transaction>> {
// While the inputs outputs can be cut-through the kernel will stay intact
// In order to deaggregate tx we look for tx with the same kernel
let mut found_txs: Vec<Transaction> = vec![];
// Gather all the kernels of the multi-kernel transaction in one set
let kernels_set: HashSet<TxKernel> = tx.kernels.iter().cloned().collect::<HashSet<_>>();
// Check each transaction in the pool
for (_, tx) in &self.transactions {
let candidates_kernels_set: HashSet<TxKernel> =
tx.kernels.iter().cloned().collect::<HashSet<_>>();
let kernels_set_intersection: HashSet<&TxKernel> =
kernels_set.intersection(&candidates_kernels_set).collect();
// Consider the transaction only if all the kernels match and if it is indeed a
// subset
if kernels_set_intersection.len() == tx.kernels.len()
&& candidates_kernels_set.is_subset(&kernels_set)
{
debug!(LOGGER, "Found a transaction with the same kernel");
found_txs.push(*tx.clone());
}
}
if found_txs.len() != 0 {
Some(found_txs)
} else {
None
}
}
/// Check the output for a conflict with an existing output. /// Check the output for a conflict with an existing output.
/// ///
/// Checks the output (by commitment) against outputs in the blockchain /// Checks the output (by commitment) against outputs in the blockchain
@ -964,7 +1042,7 @@ mod tests {
let child_transaction = test_transaction(vec![11, 3], vec![12]); let child_transaction = test_transaction(vec![11, 3], vec![12]);
let txs = vec![parent_transaction, child_transaction]; let txs = vec![parent_transaction, child_transaction];
let multi_kernel_transaction = transaction::aggregate(txs).unwrap(); let multi_kernel_transaction = transaction::aggregate_with_cut_through(txs).unwrap();
dummy_chain.update_output_set(new_output); dummy_chain.update_output_set(new_output);
@ -1000,7 +1078,84 @@ mod tests {
} }
#[test] #[test]
/// Attempt to add a bad multi kernel transaction to the mempool should get rejected /// Attempt to deaggregate a multi_kernel transaction
/// Push the parent transaction in the mempool then send a multikernel tx containing it and a
/// child transaction In the end, the pool should contain both transactions.
fn test_multikernel_deaggregate() {
let mut dummy_chain = DummyChainImpl::new();
let head_header = block::BlockHeader {
height: 1,
..block::BlockHeader::default()
};
dummy_chain.store_head_header(&head_header);
let transaction1 = test_transaction_with_offset(vec![5], vec![1]);
println!("{:?}", transaction1.validate());
let transaction2 = test_transaction_with_offset(vec![8], vec![2]);
// We want these transactions to be rooted in the blockchain.
let new_output = DummyOutputSet::empty()
.with_output(test_output(5))
.with_output(test_output(8));
dummy_chain.update_output_set(new_output);
// To mirror how this construction is intended to be used, the pool
// is placed inside a RwLock.
let pool = RwLock::new(test_setup(&Arc::new(dummy_chain)));
// Take the write lock and add a pool entry
{
let mut write_pool = pool.write().unwrap();
assert_eq!(write_pool.total_size(), 0);
// First, add the first transaction
let result = write_pool.add_to_memory_pool(test_source(), transaction1.clone(), false);
if result.is_err() {
panic!("got an error adding tx 1: {:?}", result.err().unwrap());
}
}
let txs = vec![transaction1.clone(), transaction2.clone()];
let multi_kernel_transaction = transaction::aggregate(txs).unwrap();
let found_tx: Transaction;
// Now take the read lock and attempt to deaggregate the transaction
{
let read_pool = pool.read().unwrap();
found_tx = read_pool
.deaggregate_transaction(multi_kernel_transaction)
.unwrap();
// Test the retrived transactions
assert_eq!(transaction2, found_tx);
}
// Take the write lock and add a pool entry
{
let mut write_pool = pool.write().unwrap();
assert_eq!(write_pool.total_size(), 1);
// First, add the transaction rooted in the blockchain
let result = write_pool.add_to_memory_pool(test_source(), found_tx.clone(), false);
if result.is_err() {
panic!("got an error adding child tx: {:?}", result.err().unwrap());
}
}
// Now take the read lock and use a few exposed methods to check consistency
{
let read_pool = pool.read().unwrap();
assert_eq!(read_pool.total_size(), 2);
expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 1, 2);
expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 5, 8);
expect_output_parent!(read_pool, Parent::Unknown, 11, 3, 20);
}
}
#[test]
/// Attempt to add a bad multi kernel transaction to the mempool should get
/// rejected
fn test_bad_multikernel_pool_add() { fn test_bad_multikernel_pool_add() {
let mut dummy_chain = DummyChainImpl::new(); let mut dummy_chain = DummyChainImpl::new();
let head_header = block::BlockHeader { let head_header = block::BlockHeader {
@ -1740,6 +1895,34 @@ mod tests {
build::transaction(tx_elements, &keychain).unwrap() build::transaction(tx_elements, &keychain).unwrap()
} }
fn test_transaction_with_offset(
input_values: Vec<u64>,
output_values: Vec<u64>,
) -> transaction::Transaction {
let keychain = keychain_for_tests();
let input_sum = input_values.iter().sum::<u64>() as i64;
let output_sum = output_values.iter().sum::<u64>() as i64;
let fees: i64 = input_sum - output_sum;
assert!(fees >= 0);
let mut tx_elements = Vec::new();
for input_value in input_values {
let key_id = keychain.derive_key_id(input_value as u32).unwrap();
tx_elements.push(build::input(input_value, key_id));
}
for output_value in output_values {
let key_id = keychain.derive_key_id(output_value as u32).unwrap();
tx_elements.push(build::output(output_value, key_id));
}
tx_elements.push(build::with_fee(fees as u64));
build::transaction_with_offset(tx_elements, &keychain).unwrap()
}
fn test_transaction_with_coinbase_input( fn test_transaction_with_coinbase_input(
input_value: u64, input_value: u64,
input_block_hash: Hash, input_block_hash: Hash,

View file

@ -141,6 +141,8 @@ pub enum PoolError {
/// The spent output /// The spent output
spent_output: Commitment, spent_output: Commitment,
}, },
/// A failed deaggregation error
FailedDeaggregation,
/// Attempt to add a transaction to the pool with lock_height /// Attempt to add a transaction to the pool with lock_height
/// greater than height of current block /// greater than height of current block
ImmatureTransaction { ImmatureTransaction {

View file

@ -78,12 +78,27 @@ impl p2p::ChainAdapter for NetToChainAdapter {
); );
let h = tx.hash(); let h = tx.hash();
if let Err(e) = self.tx_pool
.write() if !stem && tx.kernels.len() != 1 {
.unwrap() debug!(
.add_to_memory_pool(source, tx, stem) LOGGER,
{ "Received regular multi-kernel transaction will attempt to deaggregate"
debug!(LOGGER, "Transaction {} rejected: {:?}", h, e); );
if let Err(e) = self.tx_pool
.write()
.unwrap()
.deaggregate_and_add_to_memory_pool(source, tx, stem)
{
debug!(LOGGER, "Transaction {} rejected: {:?}", h, e);
}
} else {
if let Err(e) = self.tx_pool
.write()
.unwrap()
.add_to_memory_pool(source, tx, stem)
{
debug!(LOGGER, "Transaction {} rejected: {:?}", h, e);
}
} }
} }