2018-03-05 22:33:44 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2017-05-19 18:22:08 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
//! Transaction pool implementation.
|
|
|
|
//! Used for both the txpool and stempool layers in the pool.
|
2017-05-19 18:22:08 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
use std::collections::HashSet;
|
2018-03-20 06:18:54 +03:00
|
|
|
use std::sync::Arc;
|
2017-05-19 18:22:08 +03:00
|
|
|
|
2018-02-16 18:42:27 +03:00
|
|
|
use core::core::hash::Hashed;
|
|
|
|
use core::core::id::ShortIdentifiable;
|
2018-02-10 01:32:16 +03:00
|
|
|
use core::core::transaction;
|
2018-05-30 23:57:13 +03:00
|
|
|
use core::core::{Block, CompactBlock, Transaction, TxKernel};
|
2018-06-14 15:16:14 +03:00
|
|
|
use types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
|
2018-05-30 23:57:13 +03:00
|
|
|
use util::LOGGER;
|
2017-05-19 18:22:08 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub struct Pool<T> {
|
|
|
|
/// Entries in the pool (tx + info + timer) in simple insertion order.
|
|
|
|
pub entries: Vec<PoolEntry>,
|
|
|
|
/// The blockchain
|
2018-05-09 12:15:58 +03:00
|
|
|
pub blockchain: Arc<T>,
|
2018-05-30 23:57:13 +03:00
|
|
|
pub name: String,
|
2017-05-19 18:22:08 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
impl<T> Pool<T>
|
2017-09-29 21:44:25 +03:00
|
|
|
where
|
|
|
|
T: BlockChain,
|
|
|
|
{
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn new(chain: Arc<T>, name: String) -> Pool<T> {
|
|
|
|
Pool {
|
|
|
|
entries: vec![],
|
|
|
|
blockchain: chain.clone(),
|
|
|
|
name,
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-16 18:42:27 +03:00
|
|
|
/// Query the tx pool for all known txs based on kernel short_ids
|
|
|
|
/// from the provided compact_block.
|
|
|
|
/// Note: does not validate that we return the full set of required txs.
|
|
|
|
/// The caller will need to validate that themselves.
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn retrieve_transactions(&self, cb: &CompactBlock) -> Vec<Transaction> {
|
2018-02-16 18:42:27 +03:00
|
|
|
let mut txs = vec![];
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
for x in &self.entries {
|
|
|
|
for kernel in &x.tx.kernels {
|
2018-02-16 18:42:27 +03:00
|
|
|
// rehash each kernel to calculate the block specific short_id
|
2018-03-01 21:25:33 +03:00
|
|
|
let short_id = kernel.short_id(&cb.hash(), cb.nonce);
|
2018-02-16 18:42:27 +03:00
|
|
|
|
|
|
|
// if any kernel matches then keep the tx for later
|
|
|
|
if cb.kern_ids.contains(&short_id) {
|
2018-05-30 23:57:13 +03:00
|
|
|
txs.push(x.tx.clone());
|
2018-02-16 18:42:27 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
txs
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Take the first num_to_fetch txs based on insertion order.
|
|
|
|
pub fn prepare_mineable_transactions(&self, num_to_fetch: u32) -> Vec<Transaction> {
|
|
|
|
self.entries
|
|
|
|
.iter()
|
|
|
|
.take(num_to_fetch as usize)
|
|
|
|
.map(|x| x.tx.clone())
|
|
|
|
.collect()
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn all_transactions(&self) -> Vec<Transaction> {
|
|
|
|
self.entries.iter().map(|x| x.tx.clone()).collect()
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn aggregate_transaction(&self) -> Result<Option<Transaction>, PoolError> {
|
|
|
|
let txs = self.all_transactions();
|
|
|
|
if txs.is_empty() {
|
|
|
|
return Ok(None);
|
|
|
|
}
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
let tx = transaction::aggregate(txs)?;
|
|
|
|
Ok(Some(tx))
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn select_valid_transactions(
|
2017-09-29 21:44:25 +03:00
|
|
|
&mut self,
|
2018-05-30 23:57:13 +03:00
|
|
|
from_state: PoolEntryState,
|
|
|
|
to_state: PoolEntryState,
|
|
|
|
extra_tx: Option<Transaction>,
|
|
|
|
) -> Result<Vec<Transaction>, PoolError> {
|
|
|
|
let entries = &mut self.entries
|
|
|
|
.iter_mut()
|
|
|
|
.filter(|x| x.state == from_state)
|
|
|
|
.collect::<Vec<_>>();
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-06-14 20:37:21 +03:00
|
|
|
let candidate_txs: Vec<Transaction> = entries.iter().map(|x| x.tx.clone()).collect();
|
|
|
|
if candidate_txs.is_empty() {
|
|
|
|
return Ok(vec![]);
|
|
|
|
}
|
2018-05-30 23:57:13 +03:00
|
|
|
let valid_txs = self.blockchain.validate_raw_txs(candidate_txs, extra_tx)?;
|
2017-10-11 21:12:01 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Update state on all entries included in final vec of valid txs.
|
|
|
|
for x in &mut entries.iter_mut() {
|
|
|
|
if valid_txs.contains(&x.tx) {
|
|
|
|
x.state = to_state.clone();
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
Ok(valid_txs)
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Aggregate this new tx with all existing txs in the pool.
|
|
|
|
// If we can validate the aggregated tx against the current chain state
|
|
|
|
// then we can safely add the tx to the pool.
|
|
|
|
pub fn add_to_pool(
|
2018-04-24 22:47:13 +03:00
|
|
|
&mut self,
|
2018-05-30 23:57:13 +03:00
|
|
|
entry: PoolEntry,
|
|
|
|
extra_txs: Vec<Transaction>,
|
2018-04-24 22:47:13 +03:00
|
|
|
) -> Result<(), PoolError> {
|
2018-05-30 23:57:13 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-05-31 18:58:59 +03:00
|
|
|
"pool [{}]: add_to_pool: {}, {:?}",
|
2018-05-30 23:57:13 +03:00
|
|
|
self.name,
|
|
|
|
entry.tx.hash(),
|
|
|
|
entry.src,
|
|
|
|
);
|
2018-04-24 22:47:13 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Combine all the txs from the pool with any extra txs provided.
|
|
|
|
let mut txs = self.all_transactions();
|
|
|
|
txs.extend(extra_txs);
|
2018-04-24 22:47:13 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
let agg_tx = if txs.is_empty() {
|
|
|
|
// If we have nothing to aggregate then simply return the tx itself.
|
|
|
|
entry.tx.clone()
|
2018-04-24 22:47:13 +03:00
|
|
|
} else {
|
2018-05-30 23:57:13 +03:00
|
|
|
// Create a single aggregated tx from the existing pool txs (to check pool is
|
|
|
|
// valid).
|
|
|
|
let agg_tx = transaction::aggregate(txs)?;
|
|
|
|
|
|
|
|
// Then check new tx would not introduce a duplicate output in the pool.
|
|
|
|
for x in &entry.tx.outputs {
|
|
|
|
if agg_tx.outputs.contains(&x) {
|
|
|
|
return Err(PoolError::DuplicateCommitment);
|
|
|
|
}
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Finally aggregate the new tx with everything in the pool (with any extra
|
|
|
|
// txs).
|
|
|
|
transaction::aggregate(vec![agg_tx, entry.tx.clone()])?
|
2018-03-20 06:18:54 +03:00
|
|
|
};
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Validate aggregated tx against the current chain state (via txhashset
|
|
|
|
// extension).
|
|
|
|
self.blockchain.validate_raw_txs(vec![], Some(agg_tx))?;
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// If we get here successfully then we can safely add the entry to the pool.
|
|
|
|
self.entries.push(entry);
|
2017-09-29 21:44:25 +03:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn reconcile(&mut self, extra_tx: Option<Transaction>) -> Result<(), PoolError> {
|
|
|
|
let candidate_txs = self.all_transactions();
|
|
|
|
let existing_len = candidate_txs.len();
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
if candidate_txs.is_empty() {
|
|
|
|
return Ok(());
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Go through the candidate txs and keep everything that validates incrementally
|
|
|
|
// against the current chain state, accounting for the "extra tx" as necessary.
|
|
|
|
let valid_txs = self.blockchain.validate_raw_txs(candidate_txs, extra_tx)?;
|
|
|
|
self.entries.retain(|x| valid_txs.contains(&x.tx));
|
2018-03-20 06:18:54 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"pool [{}]: reconcile: existing txs {}, retained txs {}",
|
|
|
|
self.name,
|
|
|
|
existing_len,
|
|
|
|
self.entries.len(),
|
|
|
|
);
|
2017-10-18 23:42:51 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
Ok(())
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Filter txs in the pool based on the latest block.
|
|
|
|
// Reject any txs where we see a matching tx kernel in the block.
|
|
|
|
// Also reject any txs where we see a conflicting tx,
|
|
|
|
// where an input is spent in a different tx.
|
|
|
|
fn remaining_transactions(&self, block: &Block) -> Vec<Transaction> {
|
|
|
|
self.entries
|
2017-09-29 21:44:25 +03:00
|
|
|
.iter()
|
2018-05-30 23:57:13 +03:00
|
|
|
.filter(|x| !x.tx.kernels.iter().any(|y| block.kernels.contains(y)))
|
|
|
|
.filter(|x| !x.tx.inputs.iter().any(|y| block.inputs.contains(y)))
|
|
|
|
.map(|x| x.tx.clone())
|
2017-09-29 21:44:25 +03:00
|
|
|
.collect()
|
|
|
|
}
|
2017-10-07 21:24:11 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn find_matching_transactions(&self, kernels: Vec<TxKernel>) -> Vec<Transaction> {
|
|
|
|
// While the inputs outputs can be cut-through the kernel will stay intact
|
|
|
|
// In order to deaggregate tx we look for tx with the same kernel
|
|
|
|
let mut found_txs = vec![];
|
2018-03-20 06:18:54 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Gather all the kernels of the multi-kernel transaction in one set
|
|
|
|
let kernel_set = kernels.into_iter().collect::<HashSet<_>>();
|
2017-10-10 20:30:34 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Check each transaction in the pool
|
|
|
|
for entry in &self.entries {
|
|
|
|
let entry_kernel_set = entry.tx.kernels.iter().cloned().collect::<HashSet<_>>();
|
|
|
|
if entry_kernel_set.is_subset(&kernel_set) {
|
|
|
|
found_txs.push(entry.tx.clone());
|
2017-10-07 21:24:11 +03:00
|
|
|
}
|
|
|
|
}
|
2018-05-30 23:57:13 +03:00
|
|
|
found_txs
|
2017-10-07 21:24:11 +03:00
|
|
|
}
|
2018-03-20 06:18:54 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Quick reconciliation step - we can evict any txs in the pool where
|
|
|
|
/// inputs or kernels intersect with the block.
|
|
|
|
pub fn reconcile_block(&mut self, block: &Block) -> Result<(), PoolError> {
|
|
|
|
let candidate_txs = self.remaining_transactions(block);
|
|
|
|
self.entries.retain(|x| candidate_txs.contains(&x.tx));
|
2018-03-20 06:18:54 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn size(&self) -> usize {
|
|
|
|
self.entries.len()
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
2017-05-19 18:22:08 +03:00
|
|
|
}
|