2020-01-20 14:40:58 +03:00
|
|
|
// Copyright 2020 The Grin Developers
|
2017-05-19 18:22:08 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
//! Transaction pool implementation.
|
|
|
|
//! Used for both the txpool and stempool layers in the pool.
|
2017-05-19 18:22:08 +03:00
|
|
|
|
2018-12-08 02:59:40 +03:00
|
|
|
use self::core::core::hash::{Hash, Hashed};
|
|
|
|
use self::core::core::id::{ShortId, ShortIdentifiable};
|
|
|
|
use self::core::core::transaction;
|
|
|
|
use self::core::core::verifier_cache::VerifierCache;
|
2019-01-25 23:48:15 +03:00
|
|
|
use self::core::core::{
|
|
|
|
Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel, Weighting,
|
|
|
|
};
|
2018-12-08 02:59:40 +03:00
|
|
|
use self::util::RwLock;
|
2019-03-20 16:08:56 +03:00
|
|
|
use crate::types::{BlockChain, PoolEntry, PoolError};
|
2018-12-08 02:59:40 +03:00
|
|
|
use grin_core as core;
|
|
|
|
use grin_util as util;
|
2019-04-30 14:14:02 +03:00
|
|
|
use std::cmp::Reverse;
|
2018-08-20 01:50:43 +03:00
|
|
|
use std::collections::{HashMap, HashSet};
|
2018-10-20 03:13:07 +03:00
|
|
|
use std::sync::Arc;
|
2017-05-19 18:22:08 +03:00
|
|
|
|
2018-08-28 00:22:48 +03:00
|
|
|
pub struct Pool {
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Entries in the pool (tx + info + timer) in simple insertion order.
|
|
|
|
pub entries: Vec<PoolEntry>,
|
|
|
|
/// The blockchain
|
2018-12-08 02:59:40 +03:00
|
|
|
pub blockchain: Arc<dyn BlockChain>,
|
|
|
|
pub verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
2018-05-30 23:57:13 +03:00
|
|
|
pub name: String,
|
2017-05-19 18:22:08 +03:00
|
|
|
}
|
|
|
|
|
2018-08-28 00:22:48 +03:00
|
|
|
impl Pool {
|
2018-08-30 17:44:34 +03:00
|
|
|
pub fn new(
|
2018-12-08 02:59:40 +03:00
|
|
|
chain: Arc<dyn BlockChain>,
|
|
|
|
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
2018-08-30 17:44:34 +03:00
|
|
|
name: String,
|
|
|
|
) -> Pool {
|
2018-05-30 23:57:13 +03:00
|
|
|
Pool {
|
|
|
|
entries: vec![],
|
2018-10-25 15:21:36 +03:00
|
|
|
blockchain: chain,
|
|
|
|
verifier_cache,
|
2018-05-30 23:57:13 +03:00
|
|
|
name,
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-03 14:35:37 +03:00
|
|
|
/// Does the transaction pool contain an entry for the given transaction?
|
2018-09-18 17:25:26 +03:00
|
|
|
pub fn contains_tx(&self, hash: Hash) -> bool {
|
|
|
|
self.entries.iter().any(|x| x.tx.hash() == hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_tx(&self, hash: Hash) -> Option<Transaction> {
|
|
|
|
self.entries
|
|
|
|
.iter()
|
|
|
|
.find(|x| x.tx.hash() == hash)
|
|
|
|
.map(|x| x.tx.clone())
|
2018-09-03 14:35:37 +03:00
|
|
|
}
|
|
|
|
|
2018-11-07 12:28:17 +03:00
|
|
|
/// Query the tx pool for an individual tx matching the given kernel hash.
|
|
|
|
pub fn retrieve_tx_by_kernel_hash(&self, hash: Hash) -> Option<Transaction> {
|
|
|
|
for x in &self.entries {
|
|
|
|
for k in x.tx.kernels() {
|
|
|
|
if k.hash() == hash {
|
|
|
|
return Some(x.tx.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
2018-02-16 18:42:27 +03:00
|
|
|
/// Query the tx pool for all known txs based on kernel short_ids
|
|
|
|
/// from the provided compact_block.
|
|
|
|
/// Note: does not validate that we return the full set of required txs.
|
|
|
|
/// The caller will need to validate that themselves.
|
2018-09-18 17:25:26 +03:00
|
|
|
pub fn retrieve_transactions(
|
|
|
|
&self,
|
|
|
|
hash: Hash,
|
|
|
|
nonce: u64,
|
2018-10-25 15:21:36 +03:00
|
|
|
kern_ids: &[ShortId],
|
2018-09-18 17:25:26 +03:00
|
|
|
) -> (Vec<Transaction>, Vec<ShortId>) {
|
2018-10-25 15:21:36 +03:00
|
|
|
let mut txs = vec![];
|
|
|
|
let mut found_ids = vec![];
|
2018-09-18 17:25:26 +03:00
|
|
|
|
|
|
|
// Rehash all entries in the pool using short_ids based on provided hash and nonce.
|
2018-10-25 15:21:36 +03:00
|
|
|
'outer: for x in &self.entries {
|
2018-09-18 17:25:26 +03:00
|
|
|
for k in x.tx.kernels() {
|
2018-02-16 18:42:27 +03:00
|
|
|
// rehash each kernel to calculate the block specific short_id
|
2018-09-18 17:25:26 +03:00
|
|
|
let short_id = k.short_id(&hash, nonce);
|
2018-10-25 15:21:36 +03:00
|
|
|
if kern_ids.contains(&short_id) {
|
|
|
|
txs.push(x.tx.clone());
|
|
|
|
found_ids.push(short_id);
|
|
|
|
}
|
|
|
|
if found_ids.len() == kern_ids.len() {
|
|
|
|
break 'outer;
|
|
|
|
}
|
2018-02-16 18:42:27 +03:00
|
|
|
}
|
|
|
|
}
|
2018-10-25 15:21:36 +03:00
|
|
|
txs.dedup();
|
|
|
|
(
|
|
|
|
txs,
|
|
|
|
kern_ids
|
2020-02-12 21:35:33 +03:00
|
|
|
.iter()
|
2018-10-25 15:21:36 +03:00
|
|
|
.filter(|id| !found_ids.contains(id))
|
|
|
|
.cloned()
|
|
|
|
.collect(),
|
|
|
|
)
|
2018-02-16 18:42:27 +03:00
|
|
|
}
|
|
|
|
|
2018-08-20 01:50:43 +03:00
|
|
|
/// Take pool transactions, filtering and ordering them in a way that's
|
|
|
|
/// appropriate to put in a mined block. Aggregates chains of dependent
|
2019-04-30 14:14:02 +03:00
|
|
|
/// transactions, orders by fee over weight and ensures the total weight
|
|
|
|
/// does not exceed the provided max_weight (miner defined block weight).
|
2018-12-08 02:59:40 +03:00
|
|
|
pub fn prepare_mineable_transactions(
|
|
|
|
&self,
|
|
|
|
max_weight: usize,
|
|
|
|
) -> Result<Vec<Transaction>, PoolError> {
|
2019-04-30 14:14:02 +03:00
|
|
|
let weighting = Weighting::AsLimitedTransaction(max_weight);
|
2019-02-01 13:44:04 +03:00
|
|
|
|
2019-04-30 14:14:02 +03:00
|
|
|
// Sort the txs in the pool via the "bucket" logic to -
|
|
|
|
// * maintain dependency ordering
|
|
|
|
// * maximize cut-through
|
|
|
|
// * maximize overall fees
|
|
|
|
let txs = self.bucket_transactions(weighting);
|
2018-08-20 01:50:43 +03:00
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
// Iteratively apply the txs to the current chain state,
|
|
|
|
// rejecting any that do not result in a valid state.
|
2019-04-30 14:14:02 +03:00
|
|
|
// Verify these txs produce an aggregated tx below max_weight.
|
2018-09-24 11:24:10 +03:00
|
|
|
// Return a vec of all the valid txs.
|
2019-04-30 14:14:02 +03:00
|
|
|
let header = self.blockchain.chain_head()?;
|
|
|
|
let valid_txs = self.validate_raw_txs(&txs, None, &header, weighting)?;
|
|
|
|
Ok(valid_txs)
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn all_transactions(&self) -> Vec<Transaction> {
|
|
|
|
self.entries.iter().map(|x| x.tx.clone()).collect()
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2019-01-25 23:48:15 +03:00
|
|
|
/// Return a single aggregate tx representing all txs in the txpool.
|
|
|
|
/// Returns None if the txpool is empty.
|
|
|
|
pub fn all_transactions_aggregate(&self) -> Result<Option<Transaction>, PoolError> {
|
2018-05-30 23:57:13 +03:00
|
|
|
let txs = self.all_transactions();
|
|
|
|
if txs.is_empty() {
|
|
|
|
return Ok(None);
|
|
|
|
}
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
let tx = transaction::aggregate(txs)?;
|
2019-01-25 23:48:15 +03:00
|
|
|
|
|
|
|
// Validate the single aggregate transaction "as pool", not subject to tx weight limits.
|
|
|
|
tx.validate(Weighting::NoLimit, self.verifier_cache.clone())?;
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
Ok(Some(tx))
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Aggregate this new tx with all existing txs in the pool.
|
|
|
|
// If we can validate the aggregated tx against the current chain state
|
|
|
|
// then we can safely add the tx to the pool.
|
|
|
|
pub fn add_to_pool(
|
2018-04-24 22:47:13 +03:00
|
|
|
&mut self,
|
2018-05-30 23:57:13 +03:00
|
|
|
entry: PoolEntry,
|
|
|
|
extra_txs: Vec<Transaction>,
|
2018-09-24 11:24:10 +03:00
|
|
|
header: &BlockHeader,
|
2018-04-24 22:47:13 +03:00
|
|
|
) -> Result<(), PoolError> {
|
2018-05-30 23:57:13 +03:00
|
|
|
// Combine all the txs from the pool with any extra txs provided.
|
|
|
|
let mut txs = self.all_transactions();
|
2018-08-21 01:02:28 +03:00
|
|
|
|
|
|
|
// Quick check to see if we have seen this tx before.
|
|
|
|
if txs.contains(&entry.tx) {
|
|
|
|
return Err(PoolError::DuplicateTx);
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
txs.extend(extra_txs);
|
2018-04-24 22:47:13 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
let agg_tx = if txs.is_empty() {
|
|
|
|
// If we have nothing to aggregate then simply return the tx itself.
|
|
|
|
entry.tx.clone()
|
2018-04-24 22:47:13 +03:00
|
|
|
} else {
|
2018-08-13 05:08:08 +03:00
|
|
|
// Create a single aggregated tx from the existing pool txs and the
|
|
|
|
// new entry
|
|
|
|
txs.push(entry.tx.clone());
|
2019-01-25 23:48:15 +03:00
|
|
|
transaction::aggregate(txs)?
|
2018-03-20 06:18:54 +03:00
|
|
|
};
|
|
|
|
|
2019-01-25 23:48:15 +03:00
|
|
|
// Validate aggregated tx (existing pool + new tx), ignoring tx weight limits.
|
|
|
|
// Validate against known chain state at the provided header.
|
|
|
|
self.validate_raw_tx(&agg_tx, header, Weighting::NoLimit)?;
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2019-01-25 23:48:15 +03:00
|
|
|
// If we get here successfully then we can safely add the entry to the pool.
|
|
|
|
self.log_pool_add(&entry, header);
|
|
|
|
self.entries.push(entry);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn log_pool_add(&self, entry: &PoolEntry, header: &BlockHeader) {
|
2018-10-24 19:57:31 +03:00
|
|
|
debug!(
|
2019-07-04 13:56:42 +03:00
|
|
|
"add_to_pool [{}]: {} ({:?}) [in/out/kern: {}/{}/{}] pool: {} (at block {})",
|
2018-10-24 19:57:31 +03:00
|
|
|
self.name,
|
|
|
|
entry.tx.hash(),
|
2019-07-04 13:56:42 +03:00
|
|
|
entry.src,
|
2018-10-24 19:57:31 +03:00
|
|
|
entry.tx.inputs().len(),
|
|
|
|
entry.tx.outputs().len(),
|
|
|
|
entry.tx.kernels().len(),
|
|
|
|
self.size(),
|
|
|
|
header.hash(),
|
|
|
|
);
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
fn validate_raw_tx(
|
|
|
|
&self,
|
|
|
|
tx: &Transaction,
|
|
|
|
header: &BlockHeader,
|
2019-01-25 23:48:15 +03:00
|
|
|
weighting: Weighting,
|
2018-09-24 11:24:10 +03:00
|
|
|
) -> Result<BlockSums, PoolError> {
|
2019-01-25 23:48:15 +03:00
|
|
|
// Validate the tx, conditionally checking against weight limits,
|
|
|
|
// based on weight verification type.
|
|
|
|
tx.validate(weighting, self.verifier_cache.clone())?;
|
2018-10-24 12:55:42 +03:00
|
|
|
|
|
|
|
// Validate the tx against current chain state.
|
|
|
|
// Check all inputs are in the current UTXO set.
|
|
|
|
// Check all outputs are unique in current UTXO set.
|
|
|
|
self.blockchain.validate_tx(tx)?;
|
|
|
|
|
|
|
|
let new_sums = self.apply_tx_to_block_sums(tx, header)?;
|
2018-09-24 11:24:10 +03:00
|
|
|
Ok(new_sums)
|
|
|
|
}
|
|
|
|
|
2019-03-20 16:08:56 +03:00
|
|
|
pub fn validate_raw_txs(
|
2018-09-24 11:24:10 +03:00
|
|
|
&self,
|
2019-03-20 16:08:56 +03:00
|
|
|
txs: &[Transaction],
|
2018-09-24 11:24:10 +03:00
|
|
|
extra_tx: Option<Transaction>,
|
|
|
|
header: &BlockHeader,
|
2019-01-25 23:48:15 +03:00
|
|
|
weighting: Weighting,
|
2018-09-24 11:24:10 +03:00
|
|
|
) -> Result<Vec<Transaction>, PoolError> {
|
|
|
|
let mut valid_txs = vec![];
|
|
|
|
|
|
|
|
for tx in txs {
|
|
|
|
let mut candidate_txs = vec![];
|
|
|
|
if let Some(extra_tx) = extra_tx.clone() {
|
|
|
|
candidate_txs.push(extra_tx);
|
|
|
|
};
|
|
|
|
candidate_txs.extend(valid_txs.clone());
|
|
|
|
candidate_txs.push(tx.clone());
|
2018-10-24 12:55:42 +03:00
|
|
|
|
|
|
|
// Build a single aggregate tx from candidate txs.
|
|
|
|
let agg_tx = transaction::aggregate(candidate_txs)?;
|
|
|
|
|
|
|
|
// We know the tx is valid if the entire aggregate tx is valid.
|
2019-01-25 23:48:15 +03:00
|
|
|
if self.validate_raw_tx(&agg_tx, header, weighting).is_ok() {
|
2019-03-20 16:08:56 +03:00
|
|
|
valid_txs.push(tx.clone());
|
2018-09-24 11:24:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(valid_txs)
|
|
|
|
}
|
|
|
|
|
2018-10-24 12:55:42 +03:00
|
|
|
fn apply_tx_to_block_sums(
|
2018-09-24 11:24:10 +03:00
|
|
|
&self,
|
2018-10-24 12:55:42 +03:00
|
|
|
tx: &Transaction,
|
2018-09-24 11:24:10 +03:00
|
|
|
header: &BlockHeader,
|
|
|
|
) -> Result<BlockSums, PoolError> {
|
|
|
|
let overage = tx.overage();
|
2019-05-31 00:16:53 +03:00
|
|
|
let offset = (header.total_kernel_offset() + tx.offset.clone())?;
|
2018-09-24 11:24:10 +03:00
|
|
|
|
2018-10-24 12:55:42 +03:00
|
|
|
let block_sums = self.blockchain.get_block_sums(&header.hash())?;
|
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
// Verify the kernel sums for the block_sums with the new tx applied,
|
|
|
|
// accounting for overage and offset.
|
|
|
|
let (utxo_sum, kernel_sum) =
|
2018-12-08 02:59:40 +03:00
|
|
|
(block_sums, tx as &dyn Committed).verify_kernel_sums(overage, offset)?;
|
2018-09-24 11:24:10 +03:00
|
|
|
|
|
|
|
Ok(BlockSums {
|
|
|
|
utxo_sum,
|
|
|
|
kernel_sum,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-08-20 16:48:05 +03:00
|
|
|
pub fn reconcile(
|
|
|
|
&mut self,
|
|
|
|
extra_tx: Option<Transaction>,
|
2018-09-24 11:24:10 +03:00
|
|
|
header: &BlockHeader,
|
2018-08-20 16:48:05 +03:00
|
|
|
) -> Result<(), PoolError> {
|
2018-09-24 11:24:10 +03:00
|
|
|
let existing_entries = self.entries.clone();
|
|
|
|
self.entries.clear();
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
let mut extra_txs = vec![];
|
|
|
|
if let Some(extra_tx) = extra_tx {
|
|
|
|
extra_txs.push(extra_tx);
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
for x in existing_entries {
|
2018-10-25 15:21:36 +03:00
|
|
|
let _ = self.add_to_pool(x, extra_txs.clone(), header);
|
2018-09-24 11:24:10 +03:00
|
|
|
}
|
2017-10-18 23:42:51 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
Ok(())
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2019-04-30 14:14:02 +03:00
|
|
|
/// Buckets consist of a vec of txs and track the aggregate fee_to_weight.
|
|
|
|
/// We aggregate (cut-through) dependent transactions within a bucket *unless* adding a tx
|
|
|
|
/// would reduce the aggregate fee_to_weight, in which case we start a new bucket.
|
|
|
|
/// Note this new bucket will by definition have a lower fee_to_weight than the bucket
|
|
|
|
/// containing the tx it depends on.
|
|
|
|
/// Sorting the buckets by fee_to_weight will therefore preserve dependency ordering,
|
|
|
|
/// maximizing both cut-through and overall fees.
|
2019-05-14 01:03:47 +03:00
|
|
|
pub fn bucket_transactions(&self, weighting: Weighting) -> Vec<Transaction> {
|
2019-04-30 14:14:02 +03:00
|
|
|
let mut tx_buckets: Vec<Bucket> = Vec::new();
|
2018-08-20 01:50:43 +03:00
|
|
|
let mut output_commits = HashMap::new();
|
2019-02-01 13:44:04 +03:00
|
|
|
let mut rejected = HashSet::new();
|
2018-08-20 01:50:43 +03:00
|
|
|
|
|
|
|
for entry in &self.entries {
|
|
|
|
// check the commits index to find parents and their position
|
2019-02-01 13:44:04 +03:00
|
|
|
// if single parent then we are good, we can bucket it with its parent
|
|
|
|
// if multiple parents then we need to combine buckets, but for now simply reject it (rare case)
|
|
|
|
let mut insert_pos = None;
|
|
|
|
let mut is_rejected = false;
|
|
|
|
|
2018-08-20 01:50:43 +03:00
|
|
|
for input in entry.tx.inputs() {
|
2019-02-01 13:44:04 +03:00
|
|
|
if rejected.contains(&input.commitment()) {
|
|
|
|
// Depends on a rejected tx, so reject this one.
|
|
|
|
is_rejected = true;
|
|
|
|
continue;
|
|
|
|
} else if let Some(pos) = output_commits.get(&input.commitment()) {
|
|
|
|
if insert_pos.is_some() {
|
|
|
|
// Multiple dependencies so reject this tx (pick it up in next block).
|
|
|
|
is_rejected = true;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
// Track the pos of the bucket we fall into.
|
|
|
|
insert_pos = Some(*pos);
|
2018-08-20 01:50:43 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-01 13:44:04 +03:00
|
|
|
|
|
|
|
// If this tx is rejected then store all output commitments in our rejected set.
|
|
|
|
if is_rejected {
|
|
|
|
for out in entry.tx.outputs() {
|
|
|
|
rejected.insert(out.commitment());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Done with this entry (rejected), continue to next entry.
|
|
|
|
continue;
|
2018-08-20 01:50:43 +03:00
|
|
|
}
|
|
|
|
|
2019-02-01 13:44:04 +03:00
|
|
|
match insert_pos {
|
|
|
|
None => {
|
|
|
|
// No parent tx, just add to the end in its own bucket.
|
|
|
|
// This is the common case for non 0-conf txs in the txpool.
|
|
|
|
// We assume the tx is valid here as we validated it on the way into the txpool.
|
|
|
|
insert_pos = Some(tx_buckets.len());
|
2019-06-27 19:50:10 +03:00
|
|
|
tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len()));
|
2019-02-01 13:44:04 +03:00
|
|
|
}
|
|
|
|
Some(pos) => {
|
|
|
|
// We found a single parent tx, so aggregate in the bucket
|
|
|
|
// if the aggregate tx is a valid tx.
|
|
|
|
// Otherwise discard and let the next block pick this tx up.
|
2019-04-30 14:14:02 +03:00
|
|
|
let bucket = &tx_buckets[pos];
|
|
|
|
|
|
|
|
if let Ok(new_bucket) = bucket.aggregate_with_tx(
|
|
|
|
entry.tx.clone(),
|
|
|
|
weighting,
|
|
|
|
self.verifier_cache.clone(),
|
|
|
|
) {
|
|
|
|
if new_bucket.fee_to_weight >= bucket.fee_to_weight {
|
|
|
|
// Only aggregate if it would not reduce the fee_to_weight ratio.
|
|
|
|
tx_buckets[pos] = new_bucket;
|
2019-02-01 13:44:04 +03:00
|
|
|
} else {
|
2019-04-30 14:14:02 +03:00
|
|
|
// Otherwise put it in its own bucket at the end.
|
|
|
|
// Note: This bucket will have a lower fee_to_weight
|
|
|
|
// than the bucket it depends on.
|
2019-06-27 19:50:10 +03:00
|
|
|
tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len()));
|
2019-02-01 13:44:04 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Aggregation failed so discard this new tx.
|
|
|
|
is_rejected = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if is_rejected {
|
|
|
|
for out in entry.tx.outputs() {
|
|
|
|
rejected.insert(out.commitment());
|
|
|
|
}
|
|
|
|
} else if let Some(insert_pos) = insert_pos {
|
|
|
|
// We successfully added this tx to our set of buckets.
|
|
|
|
// Update commits index for subsequent txs.
|
|
|
|
for out in entry.tx.outputs() {
|
|
|
|
output_commits.insert(out.commitment(), insert_pos);
|
|
|
|
}
|
2018-08-20 01:50:43 +03:00
|
|
|
}
|
|
|
|
}
|
2019-04-30 14:14:02 +03:00
|
|
|
|
2019-06-27 19:50:10 +03:00
|
|
|
// Sort buckets by fee_to_weight (descending) and age (oldest first).
|
|
|
|
// Txs with highest fee_to_weight will be prioritied.
|
|
|
|
// Aggregation that increases the fee_to_weight of a bucket will prioritize the bucket.
|
|
|
|
// Oldest (based on pool insertion time) will then be prioritized.
|
|
|
|
tx_buckets.sort_unstable_by_key(|x| (Reverse(x.fee_to_weight), x.age_idx));
|
2019-04-30 14:14:02 +03:00
|
|
|
|
2020-01-29 17:20:57 +03:00
|
|
|
tx_buckets.into_iter().flat_map(|x| x.raw_txs).collect()
|
2018-08-20 01:50:43 +03:00
|
|
|
}
|
|
|
|
|
2018-10-25 15:21:36 +03:00
|
|
|
pub fn find_matching_transactions(&self, kernels: &[TxKernel]) -> Vec<Transaction> {
|
2018-05-30 23:57:13 +03:00
|
|
|
// While the inputs outputs can be cut-through the kernel will stay intact
|
|
|
|
// In order to deaggregate tx we look for tx with the same kernel
|
|
|
|
let mut found_txs = vec![];
|
2018-03-20 06:18:54 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Gather all the kernels of the multi-kernel transaction in one set
|
2020-02-12 21:35:33 +03:00
|
|
|
let kernel_set = kernels.iter().collect::<HashSet<_>>();
|
2017-10-10 20:30:34 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Check each transaction in the pool
|
|
|
|
for entry in &self.entries {
|
2018-10-25 15:21:36 +03:00
|
|
|
let entry_kernel_set = entry.tx.kernels().iter().collect::<HashSet<_>>();
|
2018-05-30 23:57:13 +03:00
|
|
|
if entry_kernel_set.is_subset(&kernel_set) {
|
|
|
|
found_txs.push(entry.tx.clone());
|
2017-10-07 21:24:11 +03:00
|
|
|
}
|
|
|
|
}
|
2018-05-30 23:57:13 +03:00
|
|
|
found_txs
|
2017-10-07 21:24:11 +03:00
|
|
|
}
|
2018-03-20 06:18:54 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Quick reconciliation step - we can evict any txs in the pool where
|
|
|
|
/// inputs or kernels intersect with the block.
|
2018-10-25 15:21:36 +03:00
|
|
|
pub fn reconcile_block(&mut self, block: &Block) {
|
|
|
|
// Filter txs in the pool based on the latest block.
|
|
|
|
// Reject any txs where we see a matching tx kernel in the block.
|
|
|
|
// Also reject any txs where we see a conflicting tx,
|
|
|
|
// where an input is spent in a different tx.
|
|
|
|
self.entries.retain(|x| {
|
|
|
|
!x.tx.kernels().iter().any(|y| block.kernels().contains(y))
|
|
|
|
&& !x.tx.inputs().iter().any(|y| block.inputs().contains(y))
|
|
|
|
});
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
|
|
|
|
2019-01-08 02:09:04 +03:00
|
|
|
/// Size of the pool.
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn size(&self) -> usize {
|
|
|
|
self.entries.len()
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
2019-01-08 02:09:04 +03:00
|
|
|
|
2019-11-14 19:21:27 +03:00
|
|
|
/// Number of transaction kernels in the pool.
|
|
|
|
/// This may differ from the size (number of transactions) due to tx aggregation.
|
|
|
|
pub fn kernel_count(&self) -> usize {
|
|
|
|
self.entries.iter().map(|x| x.tx.kernels().len()).sum()
|
|
|
|
}
|
|
|
|
|
2019-01-08 02:09:04 +03:00
|
|
|
/// Is the pool empty?
|
|
|
|
pub fn is_empty(&self) -> bool {
|
|
|
|
self.entries.is_empty()
|
|
|
|
}
|
2017-05-19 18:22:08 +03:00
|
|
|
}
|
2019-04-30 14:14:02 +03:00
|
|
|
|
|
|
|
struct Bucket {
|
|
|
|
raw_txs: Vec<Transaction>,
|
|
|
|
fee_to_weight: u64,
|
2019-06-27 19:50:10 +03:00
|
|
|
age_idx: usize,
|
2019-04-30 14:14:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Bucket {
|
2019-06-27 19:50:10 +03:00
|
|
|
/// Construct a new bucket with the given tx.
|
|
|
|
/// also specifies an "age_idx" so we can sort buckets by age
|
|
|
|
/// as well as fee_to_weight. Txs are maintainedin the pool in insert order
|
|
|
|
/// so buckets with low age_idx contain oldest txs.
|
|
|
|
fn new(tx: Transaction, age_idx: usize) -> Bucket {
|
2019-04-30 14:14:02 +03:00
|
|
|
Bucket {
|
|
|
|
fee_to_weight: tx.fee_to_weight(),
|
2020-02-12 21:35:33 +03:00
|
|
|
raw_txs: vec![tx],
|
2019-06-27 19:50:10 +03:00
|
|
|
age_idx,
|
2019-04-30 14:14:02 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn aggregate_with_tx(
|
|
|
|
&self,
|
|
|
|
new_tx: Transaction,
|
|
|
|
weighting: Weighting,
|
|
|
|
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
|
|
|
) -> Result<Bucket, PoolError> {
|
|
|
|
let mut raw_txs = self.raw_txs.clone();
|
|
|
|
raw_txs.push(new_tx);
|
|
|
|
let agg_tx = transaction::aggregate(raw_txs.clone())?;
|
|
|
|
agg_tx.validate(weighting, verifier_cache)?;
|
|
|
|
Ok(Bucket {
|
|
|
|
fee_to_weight: agg_tx.fee_to_weight(),
|
|
|
|
raw_txs: raw_txs,
|
2019-06-27 19:50:10 +03:00
|
|
|
age_idx: self.age_idx,
|
2019-04-30 14:14:02 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|