mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
update_roots on pool to keep it consistent after reconciliation (#182)
* failing test case that exercises prepare_mineable_transactions and reconcile_block and highlights what appears to be unexpected behavior * adjust the failing test - the failure state is where we have a tx in the pool *but* it is not in the list of roots in the pool * zero confirmation txs are now working introduce update_roots to graph to ensure pool is consistent after calling remove_pool_transaction * move update_roots to sweep_transactions so we only call it once rework update_roots to be more efficient * use HashSet in reconcile_block to avoid marking txs multiple times * use HashSet and not HashMap return from mark_transaction early if already seen tx
This commit is contained in:
parent
6fc48dbae2
commit
bab7bd7060
3 changed files with 146 additions and 19 deletions
|
@ -15,7 +15,7 @@
|
||||||
//! Base types for the transaction pool's Directed Acyclic Graphs
|
//! Base types for the transaction pool's Directed Acyclic Graphs
|
||||||
|
|
||||||
use std::vec::Vec;
|
use std::vec::Vec;
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use secp::pedersen::Commitment;
|
use secp::pedersen::Commitment;
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@ use core::core::hash::Hashed;
|
||||||
|
|
||||||
/// An entry in the transaction pool.
|
/// An entry in the transaction pool.
|
||||||
/// These are the vertices of both of the graph structures
|
/// These are the vertices of both of the graph structures
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub struct PoolEntry {
|
pub struct PoolEntry {
|
||||||
// Core data
|
// Core data
|
||||||
/// Unique identifier of this pool entry and the corresponding transaction
|
/// Unique identifier of this pool entry and the corresponding transaction
|
||||||
|
@ -179,6 +180,32 @@ impl DirectedGraph {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Promote any non-root vertices to roots based on current edges.
|
||||||
|
/// For a given tx, if there are no edges with that tx as destination then it is a root.
|
||||||
|
pub fn update_roots(&mut self) {
|
||||||
|
let mut new_vertices: Vec<PoolEntry> = vec![];
|
||||||
|
|
||||||
|
// first find the set of all destinations from the edges in the graph
|
||||||
|
// a root is a vertex that is not a destination of any edge
|
||||||
|
let destinations = self.edges
|
||||||
|
.values()
|
||||||
|
.filter_map(|edge| edge.destination)
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
|
||||||
|
// now iterate over the current non-root vertices
|
||||||
|
// and check if it is now a root based on the set of edge destinations
|
||||||
|
for x in &self.vertices {
|
||||||
|
if destinations.contains(&x.transaction_hash) {
|
||||||
|
new_vertices.push(x.clone());
|
||||||
|
} else {
|
||||||
|
self.roots.push(x.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// now update our vertices to reflect the updated list
|
||||||
|
self.vertices = new_vertices;
|
||||||
|
}
|
||||||
|
|
||||||
/// Adds a vertex and a set of incoming edges to the graph.
|
/// Adds a vertex and a set of incoming edges to the graph.
|
||||||
///
|
///
|
||||||
/// The PoolEntry at vertex is added to the graph; depending on the
|
/// The PoolEntry at vertex is added to the graph; depending on the
|
||||||
|
|
117
pool/src/pool.rs
117
pool/src/pool.rs
|
@ -26,7 +26,7 @@ use secp;
|
||||||
use secp::pedersen::Commitment;
|
use secp::pedersen::Commitment;
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
/// The pool itself.
|
/// The pool itself.
|
||||||
/// The transactions HashMap holds ownership of all transactions in the pool,
|
/// The transactions HashMap holds ownership of all transactions in the pool,
|
||||||
|
@ -455,16 +455,19 @@ where
|
||||||
//
|
//
|
||||||
// After the pool has been successfully processed, an orphans
|
// After the pool has been successfully processed, an orphans
|
||||||
// reconciliation job is triggered.
|
// reconciliation job is triggered.
|
||||||
let mut marked_transactions: HashMap<hash::Hash, ()> = HashMap::new();
|
let mut marked_transactions: HashSet<hash::Hash> = HashSet::new();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut conflicting_txs: Vec<hash::Hash> = block
|
// find all conflicting txs based on inputs to the block
|
||||||
|
let conflicting_txs: HashSet<hash::Hash> = block
|
||||||
.inputs
|
.inputs
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|x| self.pool.get_external_spent_output(&x.commitment()))
|
.filter_map(|x| self.pool.get_external_spent_output(&x.commitment()))
|
||||||
.map(|x| x.destination_hash().unwrap())
|
.filter_map(|x| x.destination_hash())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut conflicting_outputs: Vec<hash::Hash> = block
|
// find all outputs that conflict - potential for duplicates so use a HashSet here
|
||||||
|
let conflicting_outputs: HashSet<hash::Hash> = block
|
||||||
.outputs
|
.outputs
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|x: &transaction::Output| {
|
.filter_map(|x: &transaction::Output| {
|
||||||
|
@ -472,12 +475,12 @@ where
|
||||||
self.pool.get_available_output(&x.commitment()),
|
self.pool.get_available_output(&x.commitment()),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.map(|x| x.source_hash().unwrap())
|
.filter_map(|x| x.source_hash())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
conflicting_txs.append(&mut conflicting_outputs);
|
// now iterate over all conflicting hashes from both txs and outputs
|
||||||
|
// we can just use the union of the two sets here to remove duplicates
|
||||||
for txh in conflicting_txs {
|
for &txh in conflicting_txs.union(&conflicting_outputs) {
|
||||||
self.mark_transaction(txh, &mut marked_transactions);
|
self.mark_transaction(txh, &mut marked_transactions);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -501,10 +504,14 @@ where
|
||||||
fn mark_transaction(
|
fn mark_transaction(
|
||||||
&self,
|
&self,
|
||||||
conflicting_tx: hash::Hash,
|
conflicting_tx: hash::Hash,
|
||||||
marked_txs: &mut HashMap<hash::Hash, ()>,
|
marked_txs: &mut HashSet<hash::Hash>,
|
||||||
) {
|
) {
|
||||||
|
// we can stop recursively visiting txs if we have already seen this one
|
||||||
|
if marked_txs.contains(&conflicting_tx) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
marked_txs.insert(conflicting_tx, ());
|
marked_txs.insert(conflicting_tx);
|
||||||
|
|
||||||
let tx_ref = self.transactions.get(&conflicting_tx);
|
let tx_ref = self.transactions.get(&conflicting_tx);
|
||||||
|
|
||||||
|
@ -532,13 +539,13 @@ where
|
||||||
/// Additional bookkeeping in the mark step could optimize that away.
|
/// Additional bookkeeping in the mark step could optimize that away.
|
||||||
fn sweep_transactions(
|
fn sweep_transactions(
|
||||||
&mut self,
|
&mut self,
|
||||||
marked_transactions: HashMap<hash::Hash, ()>,
|
marked_transactions: HashSet<hash::Hash>,
|
||||||
) -> Vec<Box<transaction::Transaction>> {
|
) -> Vec<Box<transaction::Transaction>> {
|
||||||
|
|
||||||
let mut removed_txs = Vec::new();
|
let mut removed_txs = Vec::new();
|
||||||
|
|
||||||
for tx_hash in marked_transactions.keys() {
|
for tx_hash in &marked_transactions {
|
||||||
let removed_tx = self.transactions.remove(tx_hash).unwrap();
|
let removed_tx = self.transactions.remove(&tx_hash).unwrap();
|
||||||
|
|
||||||
self.pool.remove_pool_transaction(
|
self.pool.remove_pool_transaction(
|
||||||
&removed_tx,
|
&removed_tx,
|
||||||
|
@ -547,6 +554,11 @@ where
|
||||||
|
|
||||||
removed_txs.push(removed_tx);
|
removed_txs.push(removed_tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// final step is to update the pool to reflect the new set of roots
|
||||||
|
// a tx that was non-root may now be root based on the txs removed
|
||||||
|
self.pool.update_roots();
|
||||||
|
|
||||||
removed_txs
|
removed_txs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -868,6 +880,83 @@ mod tests {
|
||||||
// TODO we need a test here
|
// TODO we need a test here
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_zero_confirmation_reconciliation() {
|
||||||
|
let mut dummy_chain = DummyChainImpl::new();
|
||||||
|
let head_header = block::BlockHeader {
|
||||||
|
height: 1,
|
||||||
|
..block::BlockHeader::default()
|
||||||
|
};
|
||||||
|
dummy_chain.store_head_header(&head_header);
|
||||||
|
|
||||||
|
// single UTXO
|
||||||
|
let new_utxo = DummyUtxoSet::empty()
|
||||||
|
.with_output(test_output(100));
|
||||||
|
|
||||||
|
dummy_chain.update_utxo_set(new_utxo);
|
||||||
|
let chain_ref = Arc::new(dummy_chain);
|
||||||
|
let pool = RwLock::new(test_setup(&chain_ref));
|
||||||
|
|
||||||
|
// now create two txs
|
||||||
|
// tx1 spends the UTXO
|
||||||
|
// tx2 spends output from tx1
|
||||||
|
let tx1 = test_transaction(vec![100], vec![90]);
|
||||||
|
let tx2 = test_transaction(vec![90], vec![80]);
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut write_pool = pool.write().unwrap();
|
||||||
|
assert_eq!(write_pool.total_size(), 0);
|
||||||
|
|
||||||
|
// now add both txs to the pool (tx2 spends tx1 with zero confirmations)
|
||||||
|
// both should be accepted if tx1 added before tx2
|
||||||
|
write_pool.add_to_memory_pool(test_source(), tx1).unwrap();
|
||||||
|
write_pool.add_to_memory_pool(test_source(), tx2).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(write_pool.pool_size(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
let txs: Vec<transaction::Transaction>;
|
||||||
|
{
|
||||||
|
let read_pool = pool.read().unwrap();
|
||||||
|
let mut mineable_txs = read_pool.prepare_mineable_transactions(3);
|
||||||
|
txs = mineable_txs.drain(..).map(|x| *x).collect();
|
||||||
|
|
||||||
|
// confirm we are only preparing a single tx for mining here
|
||||||
|
// even though we have 2 txs in the pool
|
||||||
|
assert_eq!(txs.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let keychain = Keychain::from_random_seed().unwrap();
|
||||||
|
let key_id = keychain.derive_key_id(1).unwrap();
|
||||||
|
|
||||||
|
// now "mine" the block passing in the mineable txs from earlier
|
||||||
|
let block = block::Block::new(
|
||||||
|
&block::BlockHeader::default(),
|
||||||
|
txs.iter().collect(),
|
||||||
|
&keychain,
|
||||||
|
&key_id,
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
// now apply the block to ensure the chainstate is updated before we reconcile
|
||||||
|
chain_ref.apply_block(&block);
|
||||||
|
|
||||||
|
// now reconcile the block
|
||||||
|
{
|
||||||
|
let mut write_pool = pool.write().unwrap();
|
||||||
|
let evicted_transactions = write_pool.reconcile_block(&block).unwrap();
|
||||||
|
assert_eq!(evicted_transactions.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the pool is consistent after reconciling the block
|
||||||
|
// we should have 1 tx in the pool and it should
|
||||||
|
// be in the list of roots (will not be mineable otherwise)
|
||||||
|
{
|
||||||
|
let read_pool = pool.write().unwrap();
|
||||||
|
assert_eq!(read_pool.pool.len_vertices(), 1);
|
||||||
|
assert_eq!(read_pool.pool.len_roots(), 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
/// Testing block reconciliation
|
/// Testing block reconciliation
|
||||||
fn test_block_reconciliation() {
|
fn test_block_reconciliation() {
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
//! and its top-level members.
|
//! and its top-level members.
|
||||||
|
|
||||||
use std::vec::Vec;
|
use std::vec::Vec;
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::iter::Iterator;
|
use std::iter::Iterator;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
|
@ -206,6 +206,13 @@ impl Pool {
|
||||||
.map(|x| x.destination_hash().unwrap())
|
.map(|x| x.destination_hash().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn len_roots(&self) -> usize {
|
||||||
|
self.graph.len_roots()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn len_vertices(&self) -> usize {
|
||||||
|
self.graph.len_vertices()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_blockchain_spent(&self, c: &Commitment) -> Option<&graph::Edge> {
|
pub fn get_blockchain_spent(&self, c: &Commitment) -> Option<&graph::Edge> {
|
||||||
self.consumed_blockchain_outputs.get(c)
|
self.consumed_blockchain_outputs.get(c)
|
||||||
|
@ -251,10 +258,14 @@ impl Pool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn update_roots(&mut self) {
|
||||||
|
self.graph.update_roots()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn remove_pool_transaction(
|
pub fn remove_pool_transaction(
|
||||||
&mut self,
|
&mut self,
|
||||||
tx: &transaction::Transaction,
|
tx: &transaction::Transaction,
|
||||||
marked_txs: &HashMap<hash::Hash, ()>,
|
marked_txs: &HashSet<hash::Hash>,
|
||||||
) {
|
) {
|
||||||
|
|
||||||
self.graph.remove_vertex(graph::transaction_identifier(tx));
|
self.graph.remove_vertex(graph::transaction_identifier(tx));
|
||||||
|
@ -262,7 +273,7 @@ impl Pool {
|
||||||
for input in tx.inputs.iter().map(|x| x.commitment()) {
|
for input in tx.inputs.iter().map(|x| x.commitment()) {
|
||||||
match self.graph.remove_edge_by_commitment(&input) {
|
match self.graph.remove_edge_by_commitment(&input) {
|
||||||
Some(x) => {
|
Some(x) => {
|
||||||
if !marked_txs.contains_key(&x.source_hash().unwrap()) {
|
if !marked_txs.contains(&x.source_hash().unwrap()) {
|
||||||
self.available_outputs.insert(
|
self.available_outputs.insert(
|
||||||
x.output_commitment(),
|
x.output_commitment(),
|
||||||
x.with_destination(None),
|
x.with_destination(None),
|
||||||
|
@ -278,7 +289,7 @@ impl Pool {
|
||||||
for output in tx.outputs.iter().map(|x| x.commitment()) {
|
for output in tx.outputs.iter().map(|x| x.commitment()) {
|
||||||
match self.graph.remove_edge_by_commitment(&output) {
|
match self.graph.remove_edge_by_commitment(&output) {
|
||||||
Some(x) => {
|
Some(x) => {
|
||||||
if !marked_txs.contains_key(&x.destination_hash().unwrap()) {
|
if !marked_txs.contains(&x.destination_hash().unwrap()) {
|
||||||
|
|
||||||
self.consumed_blockchain_outputs.insert(
|
self.consumed_blockchain_outputs.insert(
|
||||||
x.output_commitment(),
|
x.output_commitment(),
|
||||||
|
|
Loading…
Reference in a new issue