2018-03-05 22:33:44 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2017-05-19 18:22:08 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
//! Transaction pool implementation.
|
|
|
|
//! Used for both the txpool and stempool layers in the pool.
|
2017-05-19 18:22:08 +03:00
|
|
|
|
2018-08-20 01:50:43 +03:00
|
|
|
use std::collections::{HashMap, HashSet};
|
2018-08-30 17:44:34 +03:00
|
|
|
use std::sync::{Arc, RwLock};
|
2017-05-19 18:22:08 +03:00
|
|
|
|
2018-08-20 01:50:43 +03:00
|
|
|
use core::consensus;
|
2018-08-20 16:48:05 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
2018-09-18 17:25:26 +03:00
|
|
|
use core::core::id::{ShortId, ShortIdentifiable};
|
2018-02-10 01:32:16 +03:00
|
|
|
use core::core::transaction;
|
2018-08-30 17:44:34 +03:00
|
|
|
use core::core::verifier_cache::VerifierCache;
|
2018-09-18 17:25:26 +03:00
|
|
|
use core::core::{Block, Transaction, TxKernel};
|
2018-06-14 15:16:14 +03:00
|
|
|
use types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
|
2018-05-30 23:57:13 +03:00
|
|
|
use util::LOGGER;
|
2017-05-19 18:22:08 +03:00
|
|
|
|
2018-08-20 01:50:43 +03:00
|
|
|
// max weight leaving minimum space for a coinbase
|
|
|
|
const MAX_MINEABLE_WEIGHT: usize =
|
|
|
|
consensus::MAX_BLOCK_WEIGHT - consensus::BLOCK_OUTPUT_WEIGHT - consensus::BLOCK_KERNEL_WEIGHT;
|
|
|
|
|
|
|
|
// longest chain of dependent transactions that can be included in a block
|
|
|
|
const MAX_TX_CHAIN: usize = 20;
|
|
|
|
|
2018-08-28 00:22:48 +03:00
|
|
|
pub struct Pool {
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Entries in the pool (tx + info + timer) in simple insertion order.
|
|
|
|
pub entries: Vec<PoolEntry>,
|
|
|
|
/// The blockchain
|
2018-08-28 00:22:48 +03:00
|
|
|
pub blockchain: Arc<BlockChain>,
|
2018-08-30 17:44:34 +03:00
|
|
|
pub verifier_cache: Arc<RwLock<VerifierCache>>,
|
2018-05-30 23:57:13 +03:00
|
|
|
pub name: String,
|
2017-05-19 18:22:08 +03:00
|
|
|
}
|
|
|
|
|
2018-08-28 00:22:48 +03:00
|
|
|
impl Pool {
|
2018-08-30 17:44:34 +03:00
|
|
|
pub fn new(
|
|
|
|
chain: Arc<BlockChain>,
|
|
|
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
|
|
|
name: String,
|
|
|
|
) -> Pool {
|
2018-05-30 23:57:13 +03:00
|
|
|
Pool {
|
|
|
|
entries: vec![],
|
|
|
|
blockchain: chain.clone(),
|
2018-08-30 17:44:34 +03:00
|
|
|
verifier_cache: verifier_cache.clone(),
|
2018-05-30 23:57:13 +03:00
|
|
|
name,
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-03 14:35:37 +03:00
|
|
|
/// Does the transaction pool contain an entry for the given transaction?
|
2018-09-18 17:25:26 +03:00
|
|
|
pub fn contains_tx(&self, hash: Hash) -> bool {
|
|
|
|
self.entries.iter().any(|x| x.tx.hash() == hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_tx(&self, hash: Hash) -> Option<Transaction> {
|
|
|
|
self.entries
|
|
|
|
.iter()
|
|
|
|
.find(|x| x.tx.hash() == hash)
|
|
|
|
.map(|x| x.tx.clone())
|
2018-09-03 14:35:37 +03:00
|
|
|
}
|
|
|
|
|
2018-02-16 18:42:27 +03:00
|
|
|
/// Query the tx pool for all known txs based on kernel short_ids
|
|
|
|
/// from the provided compact_block.
|
|
|
|
/// Note: does not validate that we return the full set of required txs.
|
|
|
|
/// The caller will need to validate that themselves.
|
2018-09-18 17:25:26 +03:00
|
|
|
pub fn retrieve_transactions(
|
|
|
|
&self,
|
|
|
|
hash: Hash,
|
|
|
|
nonce: u64,
|
|
|
|
kern_ids: &Vec<ShortId>,
|
|
|
|
) -> (Vec<Transaction>, Vec<ShortId>) {
|
|
|
|
let mut rehashed = HashMap::new();
|
|
|
|
|
|
|
|
// Rehash all entries in the pool using short_ids based on provided hash and nonce.
|
2018-05-30 23:57:13 +03:00
|
|
|
for x in &self.entries {
|
2018-09-18 17:25:26 +03:00
|
|
|
for k in x.tx.kernels() {
|
2018-02-16 18:42:27 +03:00
|
|
|
// rehash each kernel to calculate the block specific short_id
|
2018-09-18 17:25:26 +03:00
|
|
|
let short_id = k.short_id(&hash, nonce);
|
|
|
|
rehashed.insert(short_id, x.tx.hash());
|
2018-02-16 18:42:27 +03:00
|
|
|
}
|
|
|
|
}
|
2018-09-18 17:25:26 +03:00
|
|
|
|
|
|
|
// Retrive the txs from the pool by the set of unique hashes.
|
|
|
|
let hashes: HashSet<_> = rehashed.values().collect();
|
|
|
|
let txs = hashes.into_iter().filter_map(|x| self.get_tx(*x)).collect();
|
|
|
|
|
|
|
|
// Calculate the missing ids based on the ids passed in
|
|
|
|
// and the ids that successfully matched txs.
|
|
|
|
let matched_ids: HashSet<_> = rehashed.keys().collect();
|
|
|
|
let all_ids: HashSet<_> = kern_ids.iter().collect();
|
|
|
|
let missing_ids = all_ids
|
|
|
|
.difference(&matched_ids)
|
|
|
|
.map(|x| *x)
|
|
|
|
.cloned()
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
(txs, missing_ids)
|
2018-02-16 18:42:27 +03:00
|
|
|
}
|
|
|
|
|
2018-08-20 01:50:43 +03:00
|
|
|
/// Take pool transactions, filtering and ordering them in a way that's
|
|
|
|
/// appropriate to put in a mined block. Aggregates chains of dependent
|
|
|
|
/// transactions, orders by fee over weight and ensures to total weight
|
|
|
|
/// doesn't exceed block limits.
|
|
|
|
pub fn prepare_mineable_transactions(&self) -> Vec<Transaction> {
|
2018-08-20 16:48:05 +03:00
|
|
|
let header = self.blockchain.chain_head().unwrap();
|
|
|
|
|
2018-08-20 01:50:43 +03:00
|
|
|
let tx_buckets = self.bucket_transactions();
|
|
|
|
|
|
|
|
// flatten buckets using aggregate (with cut-through)
|
|
|
|
let mut flat_txs: Vec<Transaction> = tx_buckets
|
|
|
|
.into_iter()
|
|
|
|
.filter_map(|mut bucket| {
|
|
|
|
bucket.truncate(MAX_TX_CHAIN);
|
2018-08-30 17:44:34 +03:00
|
|
|
transaction::aggregate(bucket, self.verifier_cache.clone()).ok()
|
2018-09-18 17:25:26 +03:00
|
|
|
}).collect();
|
2018-08-20 01:50:43 +03:00
|
|
|
|
|
|
|
// sort by fees over weight, multiplying by 1000 to keep some precision
|
|
|
|
// don't think we'll ever see a >max_u64/1000 fee transaction
|
|
|
|
flat_txs.sort_unstable_by_key(|tx| tx.fee() * 1000 / tx.tx_weight() as u64);
|
|
|
|
|
|
|
|
// accumulate as long as we're not above the block weight
|
|
|
|
let mut weight = 0;
|
|
|
|
flat_txs.retain(|tx| {
|
|
|
|
weight += tx.tx_weight_as_block() as usize;
|
|
|
|
weight < MAX_MINEABLE_WEIGHT
|
|
|
|
});
|
|
|
|
|
|
|
|
// make sure those txs are all valid together, no Error is expected
|
|
|
|
// when passing None
|
|
|
|
self.blockchain
|
2018-08-20 16:48:05 +03:00
|
|
|
.validate_raw_txs(flat_txs, None, &header.hash())
|
2018-08-20 01:50:43 +03:00
|
|
|
.expect("should never happen")
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn all_transactions(&self) -> Vec<Transaction> {
|
|
|
|
self.entries.iter().map(|x| x.tx.clone()).collect()
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn aggregate_transaction(&self) -> Result<Option<Transaction>, PoolError> {
|
|
|
|
let txs = self.all_transactions();
|
|
|
|
if txs.is_empty() {
|
|
|
|
return Ok(None);
|
|
|
|
}
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-08-30 17:44:34 +03:00
|
|
|
let tx = transaction::aggregate(txs, self.verifier_cache.clone())?;
|
2018-05-30 23:57:13 +03:00
|
|
|
Ok(Some(tx))
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn select_valid_transactions(
|
2017-09-29 21:44:25 +03:00
|
|
|
&mut self,
|
2018-05-30 23:57:13 +03:00
|
|
|
from_state: PoolEntryState,
|
|
|
|
to_state: PoolEntryState,
|
|
|
|
extra_tx: Option<Transaction>,
|
2018-08-20 16:48:05 +03:00
|
|
|
block_hash: &Hash,
|
2018-05-30 23:57:13 +03:00
|
|
|
) -> Result<Vec<Transaction>, PoolError> {
|
2018-08-16 00:14:48 +03:00
|
|
|
let entries = &mut self
|
|
|
|
.entries
|
2018-05-30 23:57:13 +03:00
|
|
|
.iter_mut()
|
|
|
|
.filter(|x| x.state == from_state)
|
|
|
|
.collect::<Vec<_>>();
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-06-14 20:37:21 +03:00
|
|
|
let candidate_txs: Vec<Transaction> = entries.iter().map(|x| x.tx.clone()).collect();
|
|
|
|
if candidate_txs.is_empty() {
|
|
|
|
return Ok(vec![]);
|
|
|
|
}
|
2018-08-20 16:48:05 +03:00
|
|
|
let valid_txs = self
|
|
|
|
.blockchain
|
|
|
|
.validate_raw_txs(candidate_txs, extra_tx, block_hash)?;
|
2017-10-11 21:12:01 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Update state on all entries included in final vec of valid txs.
|
|
|
|
for x in &mut entries.iter_mut() {
|
|
|
|
if valid_txs.contains(&x.tx) {
|
|
|
|
x.state = to_state.clone();
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
Ok(valid_txs)
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Aggregate this new tx with all existing txs in the pool.
|
|
|
|
// If we can validate the aggregated tx against the current chain state
|
|
|
|
// then we can safely add the tx to the pool.
|
|
|
|
pub fn add_to_pool(
|
2018-04-24 22:47:13 +03:00
|
|
|
&mut self,
|
2018-05-30 23:57:13 +03:00
|
|
|
entry: PoolEntry,
|
|
|
|
extra_txs: Vec<Transaction>,
|
2018-08-20 16:48:05 +03:00
|
|
|
block_hash: &Hash,
|
2018-04-24 22:47:13 +03:00
|
|
|
) -> Result<(), PoolError> {
|
2018-05-30 23:57:13 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-08-20 16:48:05 +03:00
|
|
|
"pool [{}]: add_to_pool: {}, {:?}, inputs: {}, outputs: {}, kernels: {} (at block {})",
|
2018-05-30 23:57:13 +03:00
|
|
|
self.name,
|
|
|
|
entry.tx.hash(),
|
|
|
|
entry.src,
|
2018-08-19 20:15:42 +03:00
|
|
|
entry.tx.inputs().len(),
|
|
|
|
entry.tx.outputs().len(),
|
|
|
|
entry.tx.kernels().len(),
|
2018-08-20 16:48:05 +03:00
|
|
|
block_hash,
|
2018-05-30 23:57:13 +03:00
|
|
|
);
|
2018-04-24 22:47:13 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Combine all the txs from the pool with any extra txs provided.
|
|
|
|
let mut txs = self.all_transactions();
|
2018-08-21 01:02:28 +03:00
|
|
|
|
|
|
|
// Quick check to see if we have seen this tx before.
|
|
|
|
if txs.contains(&entry.tx) {
|
|
|
|
return Err(PoolError::DuplicateTx);
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
txs.extend(extra_txs);
|
2018-04-24 22:47:13 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
let agg_tx = if txs.is_empty() {
|
|
|
|
// If we have nothing to aggregate then simply return the tx itself.
|
|
|
|
entry.tx.clone()
|
2018-04-24 22:47:13 +03:00
|
|
|
} else {
|
2018-08-13 05:08:08 +03:00
|
|
|
// Create a single aggregated tx from the existing pool txs and the
|
|
|
|
// new entry
|
|
|
|
txs.push(entry.tx.clone());
|
2018-08-30 17:44:34 +03:00
|
|
|
transaction::aggregate(txs, self.verifier_cache.clone())?
|
2018-03-20 06:18:54 +03:00
|
|
|
};
|
|
|
|
|
2018-08-20 16:48:05 +03:00
|
|
|
// Validate aggregated tx against a known chain state (via txhashset
|
2018-05-30 23:57:13 +03:00
|
|
|
// extension).
|
2018-08-20 16:48:05 +03:00
|
|
|
self.blockchain
|
|
|
|
.validate_raw_txs(vec![], Some(agg_tx), block_hash)?;
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// If we get here successfully then we can safely add the entry to the pool.
|
|
|
|
self.entries.push(entry);
|
2017-09-29 21:44:25 +03:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-08-20 16:48:05 +03:00
|
|
|
pub fn reconcile(
|
|
|
|
&mut self,
|
|
|
|
extra_tx: Option<Transaction>,
|
|
|
|
block_hash: &Hash,
|
|
|
|
) -> Result<(), PoolError> {
|
2018-05-30 23:57:13 +03:00
|
|
|
let candidate_txs = self.all_transactions();
|
|
|
|
let existing_len = candidate_txs.len();
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
if candidate_txs.is_empty() {
|
|
|
|
return Ok(());
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Go through the candidate txs and keep everything that validates incrementally
|
2018-08-20 16:48:05 +03:00
|
|
|
// against a known chain state, accounting for the "extra tx" as necessary.
|
|
|
|
let valid_txs = self
|
|
|
|
.blockchain
|
|
|
|
.validate_raw_txs(candidate_txs, extra_tx, block_hash)?;
|
2018-05-30 23:57:13 +03:00
|
|
|
self.entries.retain(|x| valid_txs.contains(&x.tx));
|
2018-03-20 06:18:54 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"pool [{}]: reconcile: existing txs {}, retained txs {}",
|
|
|
|
self.name,
|
|
|
|
existing_len,
|
|
|
|
self.entries.len(),
|
|
|
|
);
|
2017-10-18 23:42:51 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
Ok(())
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-08-20 01:50:43 +03:00
|
|
|
// Group dependent transactions in buckets (vectors), each bucket
|
|
|
|
// is therefore independent from the others. Relies on the entries
|
|
|
|
// Vec having parent transactions first (should always be the case)
|
|
|
|
fn bucket_transactions(&self) -> Vec<Vec<Transaction>> {
|
|
|
|
let mut tx_buckets = vec![];
|
|
|
|
let mut output_commits = HashMap::new();
|
|
|
|
|
|
|
|
for entry in &self.entries {
|
|
|
|
// check the commits index to find parents and their position
|
|
|
|
// picking the last one for bucket (so all parents come first)
|
|
|
|
let mut insert_pos: i32 = -1;
|
|
|
|
for input in entry.tx.inputs() {
|
|
|
|
if let Some(pos) = output_commits.get(&input.commitment()) {
|
|
|
|
if *pos > insert_pos {
|
|
|
|
insert_pos = *pos;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if insert_pos == -1 {
|
|
|
|
// no parent, just add to the end in its own bucket
|
|
|
|
insert_pos = tx_buckets.len() as i32;
|
|
|
|
tx_buckets.push(vec![entry.tx.clone()]);
|
|
|
|
} else {
|
|
|
|
// parent found, add to its bucket
|
|
|
|
tx_buckets[insert_pos as usize].push(entry.tx.clone());
|
|
|
|
}
|
|
|
|
|
|
|
|
// update the commits index
|
|
|
|
for out in entry.tx.outputs() {
|
|
|
|
output_commits.insert(out.commitment(), insert_pos);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tx_buckets
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Filter txs in the pool based on the latest block.
|
|
|
|
// Reject any txs where we see a matching tx kernel in the block.
|
|
|
|
// Also reject any txs where we see a conflicting tx,
|
|
|
|
// where an input is spent in a different tx.
|
|
|
|
fn remaining_transactions(&self, block: &Block) -> Vec<Transaction> {
|
|
|
|
self.entries
|
2017-09-29 21:44:25 +03:00
|
|
|
.iter()
|
2018-08-16 00:14:48 +03:00
|
|
|
.filter(|x| !x.tx.kernels().iter().any(|y| block.kernels().contains(y)))
|
|
|
|
.filter(|x| !x.tx.inputs().iter().any(|y| block.inputs().contains(y)))
|
2018-05-30 23:57:13 +03:00
|
|
|
.map(|x| x.tx.clone())
|
2017-09-29 21:44:25 +03:00
|
|
|
.collect()
|
|
|
|
}
|
2017-10-07 21:24:11 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn find_matching_transactions(&self, kernels: Vec<TxKernel>) -> Vec<Transaction> {
|
|
|
|
// While the inputs outputs can be cut-through the kernel will stay intact
|
|
|
|
// In order to deaggregate tx we look for tx with the same kernel
|
|
|
|
let mut found_txs = vec![];
|
2018-03-20 06:18:54 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Gather all the kernels of the multi-kernel transaction in one set
|
|
|
|
let kernel_set = kernels.into_iter().collect::<HashSet<_>>();
|
2017-10-10 20:30:34 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
// Check each transaction in the pool
|
|
|
|
for entry in &self.entries {
|
2018-08-16 00:14:48 +03:00
|
|
|
let entry_kernel_set = entry.tx.kernels().iter().cloned().collect::<HashSet<_>>();
|
2018-05-30 23:57:13 +03:00
|
|
|
if entry_kernel_set.is_subset(&kernel_set) {
|
|
|
|
found_txs.push(entry.tx.clone());
|
2017-10-07 21:24:11 +03:00
|
|
|
}
|
|
|
|
}
|
2018-05-30 23:57:13 +03:00
|
|
|
found_txs
|
2017-10-07 21:24:11 +03:00
|
|
|
}
|
2018-03-20 06:18:54 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Quick reconciliation step - we can evict any txs in the pool where
|
|
|
|
/// inputs or kernels intersect with the block.
|
|
|
|
pub fn reconcile_block(&mut self, block: &Block) -> Result<(), PoolError> {
|
|
|
|
let candidate_txs = self.remaining_transactions(block);
|
|
|
|
self.entries.retain(|x| candidate_txs.contains(&x.tx));
|
2018-03-20 06:18:54 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn size(&self) -> usize {
|
|
|
|
self.entries.len()
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
2017-05-19 18:22:08 +03:00
|
|
|
}
|