mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
tx pool lookup for kernel short ids (compact block hydration) (#710)
* wip - basic tx pool lookup for kernel short ids (compact block hydration) * use the nonce in the compact_block to correctly generate short_ids for lookup * query the tx pool based on kernel short_ids * tests passing * cleanup some logging * cleanup logging
This commit is contained in:
parent
b82fa0ea1d
commit
5572fa075e
7 changed files with 155 additions and 68 deletions
|
@ -482,7 +482,7 @@ impl Chain {
|
||||||
let header_head = self.get_header_head().unwrap();
|
let header_head = self.get_header_head().unwrap();
|
||||||
if header_head.height - head.height < global::cut_through_horizon() as u64 {
|
if header_head.height - head.height < global::cut_through_horizon() as u64 {
|
||||||
return Err(Error::InvalidSumtree("not needed".to_owned()));
|
return Err(Error::InvalidSumtree("not needed".to_owned()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let header = self.store.get_block_header(&h)?;
|
let header = self.store.get_block_header(&h)?;
|
||||||
sumtree::zip_write(self.db_root.clone(), sumtree_data)?;
|
sumtree::zip_write(self.db_root.clone(), sumtree_data)?;
|
||||||
|
|
|
@ -235,13 +235,15 @@ pub struct CompactBlock {
|
||||||
/// Implementation of Writeable for a compact block, defines how to write the block to a
|
/// Implementation of Writeable for a compact block, defines how to write the block to a
|
||||||
/// binary writer. Differentiates between writing the block for the purpose of
|
/// binary writer. Differentiates between writing the block for the purpose of
|
||||||
/// full serialization and the one of just extracting a hash.
|
/// full serialization and the one of just extracting a hash.
|
||||||
|
/// Note: compact block hash uses both the header *and* the nonce.
|
||||||
impl Writeable for CompactBlock {
|
impl Writeable for CompactBlock {
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||||
try!(self.header.write(writer));
|
try!(self.header.write(writer));
|
||||||
|
try!(writer.write_u64(self.nonce));
|
||||||
|
|
||||||
if writer.serialization_mode() != ser::SerializationMode::Hash {
|
if writer.serialization_mode() != ser::SerializationMode::Hash {
|
||||||
ser_multiwrite!(
|
ser_multiwrite!(
|
||||||
writer,
|
writer,
|
||||||
[write_u64, self.nonce],
|
|
||||||
[write_u64, self.out_full.len() as u64],
|
[write_u64, self.out_full.len() as u64],
|
||||||
[write_u64, self.kern_full.len() as u64],
|
[write_u64, self.kern_full.len() as u64],
|
||||||
[write_u64, self.kern_ids.len() as u64]
|
[write_u64, self.kern_ids.len() as u64]
|
||||||
|
@ -402,44 +404,43 @@ impl Block {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Hydrate a block from a compact block.
|
/// Hydrate a block from a compact block.
|
||||||
///
|
/// Note: caller must validate the block themselves, we do not validate it here.
|
||||||
/// TODO - only supporting empty compact blocks for now (coinbase output/kernel only)
|
pub fn hydrate_from(cb: CompactBlock, txs: Vec<Transaction>) -> Block {
|
||||||
/// eventually we want to support any compact blocks
|
|
||||||
/// we need to differentiate between a block with missing entries (not in tx pool)
|
|
||||||
/// and a truly invalid block (which will get the peer banned)
|
|
||||||
/// so we need to consider how to do this safely/robustly
|
|
||||||
/// presumably at this point we are confident we can generate a full block with no
|
|
||||||
/// missing pieces, but we cannot fully validate it until we push it through the pipeline
|
|
||||||
/// at which point the peer runs the risk of getting banned
|
|
||||||
pub fn hydrate_from(
|
|
||||||
cb: CompactBlock,
|
|
||||||
_inputs: Vec<Input>,
|
|
||||||
_outputs: Vec<Output>,
|
|
||||||
_kernels: Vec<TxKernel>,
|
|
||||||
) -> Block {
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"block: hydrate_from: {}, {} cb outputs, {} cb kernels, {} tx kern_ids",
|
"block: hydrate_from: {}, {} txs",
|
||||||
cb.hash(),
|
cb.hash(),
|
||||||
cb.out_full.len(),
|
txs.len(),
|
||||||
cb.kern_full.len(),
|
|
||||||
cb.kern_ids.len(),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// we only support "empty" compact block for now
|
let mut all_inputs = HashSet::new();
|
||||||
assert!(cb.kern_ids.is_empty());
|
let mut all_outputs = HashSet::new();
|
||||||
|
let mut all_kernels = HashSet::new();
|
||||||
|
|
||||||
let mut all_inputs = vec![];
|
// collect all the inputs, outputs and kernels from the txs
|
||||||
let mut all_outputs = vec![];
|
for tx in txs {
|
||||||
let mut all_kernels = vec![];
|
all_inputs.extend(tx.inputs);
|
||||||
|
all_outputs.extend(tx.outputs);
|
||||||
|
all_kernels.extend(tx.kernels);
|
||||||
|
}
|
||||||
|
|
||||||
|
// include the coinbase output(s) and kernel(s) from the compact_block
|
||||||
all_outputs.extend(cb.out_full);
|
all_outputs.extend(cb.out_full);
|
||||||
all_kernels.extend(cb.kern_full);
|
all_kernels.extend(cb.kern_full);
|
||||||
|
|
||||||
|
// convert the sets to vecs
|
||||||
|
let mut all_inputs = all_inputs.iter().cloned().collect::<Vec<_>>();
|
||||||
|
let mut all_outputs = all_outputs.iter().cloned().collect::<Vec<_>>();
|
||||||
|
let mut all_kernels = all_kernels.iter().cloned().collect::<Vec<_>>();
|
||||||
|
|
||||||
|
// sort them all lexicographically
|
||||||
all_inputs.sort();
|
all_inputs.sort();
|
||||||
all_outputs.sort();
|
all_outputs.sort();
|
||||||
all_kernels.sort();
|
all_kernels.sort();
|
||||||
|
|
||||||
|
// finally return the full block
|
||||||
|
// Note: we have not actually validated the block here
|
||||||
|
// leave it to the caller to actually validate the block
|
||||||
Block {
|
Block {
|
||||||
header: cb.header,
|
header: cb.header,
|
||||||
inputs: all_inputs,
|
inputs: all_inputs,
|
||||||
|
@ -640,10 +641,6 @@ impl Block {
|
||||||
/// Validates all the elements in a block that can be checked without
|
/// Validates all the elements in a block that can be checked without
|
||||||
/// additional data. Includes commitment sums and kernels, Merkle
|
/// additional data. Includes commitment sums and kernels, Merkle
|
||||||
/// trees, reward, etc.
|
/// trees, reward, etc.
|
||||||
///
|
|
||||||
/// TODO - performs various verification steps - discuss renaming this to "verify"
|
|
||||||
/// as all the steps within are verify steps.
|
|
||||||
///
|
|
||||||
pub fn validate(&self) -> Result<(), Error> {
|
pub fn validate(&self) -> Result<(), Error> {
|
||||||
self.verify_weight()?;
|
self.verify_weight()?;
|
||||||
self.verify_sorted()?;
|
self.verify_sorted()?;
|
||||||
|
@ -1150,6 +1147,26 @@ mod test {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn compact_block_hash_with_nonce() {
|
||||||
|
let keychain = Keychain::from_random_seed().unwrap();
|
||||||
|
let tx = tx1i2o();
|
||||||
|
let b = new_block(vec![&tx], &keychain);
|
||||||
|
let cb1 = b.as_compact_block();
|
||||||
|
let cb2 = b.as_compact_block();
|
||||||
|
|
||||||
|
// random nonce included in hash each time we generate a compact_block
|
||||||
|
// so the hash will always be unique (we use this to generate unique short_ids)
|
||||||
|
assert!(cb1.hash() != cb2.hash());
|
||||||
|
|
||||||
|
assert!(cb1.kern_ids[0] != cb2.kern_ids[0]);
|
||||||
|
|
||||||
|
// check we can identify the specified kernel from the short_id
|
||||||
|
// in either of the compact_blocks
|
||||||
|
assert_eq!(cb1.kern_ids[0], tx.kernels[0].short_id(&cb1.hash()));
|
||||||
|
assert_eq!(cb2.kern_ids[0], tx.kernels[0].short_id(&cb2.hash()));
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn convert_block_to_compact_block() {
|
fn convert_block_to_compact_block() {
|
||||||
let keychain = Keychain::from_random_seed().unwrap();
|
let keychain = Keychain::from_random_seed().unwrap();
|
||||||
|
@ -1157,8 +1174,6 @@ mod test {
|
||||||
let b = new_block(vec![&tx1], &keychain);
|
let b = new_block(vec![&tx1], &keychain);
|
||||||
let cb = b.as_compact_block();
|
let cb = b.as_compact_block();
|
||||||
|
|
||||||
let hash = (b.header, cb.nonce).hash();
|
|
||||||
|
|
||||||
assert_eq!(cb.out_full.len(), 1);
|
assert_eq!(cb.out_full.len(), 1);
|
||||||
assert_eq!(cb.kern_full.len(), 1);
|
assert_eq!(cb.kern_full.len(), 1);
|
||||||
assert_eq!(cb.kern_ids.len(), 1);
|
assert_eq!(cb.kern_ids.len(), 1);
|
||||||
|
@ -1169,7 +1184,7 @@ mod test {
|
||||||
.iter()
|
.iter()
|
||||||
.find(|x| !x.features.contains(KernelFeatures::COINBASE_KERNEL))
|
.find(|x| !x.features.contains(KernelFeatures::COINBASE_KERNEL))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.short_id(&hash)
|
.short_id(&cb.hash())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1178,7 +1193,7 @@ mod test {
|
||||||
let keychain = Keychain::from_random_seed().unwrap();
|
let keychain = Keychain::from_random_seed().unwrap();
|
||||||
let b = new_block(vec![], &keychain);
|
let b = new_block(vec![], &keychain);
|
||||||
let cb = b.as_compact_block();
|
let cb = b.as_compact_block();
|
||||||
let hb = Block::hydrate_from(cb, vec![], vec![], vec![]);
|
let hb = Block::hydrate_from(cb, vec![]);
|
||||||
assert_eq!(hb.header, b.header);
|
assert_eq!(hb.header, b.header);
|
||||||
assert_eq!(hb.outputs, b.outputs);
|
assert_eq!(hb.outputs, b.outputs);
|
||||||
assert_eq!(hb.kernels, b.kernels);
|
assert_eq!(hb.kernels, b.kernels);
|
||||||
|
|
|
@ -138,6 +138,15 @@ pub struct TxKernel {
|
||||||
|
|
||||||
hashable_ord!(TxKernel);
|
hashable_ord!(TxKernel);
|
||||||
|
|
||||||
|
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash implementations?
|
||||||
|
impl ::std::hash::Hash for Output {
|
||||||
|
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
ser::serialize(&mut vec, &self).expect("serialization failed");
|
||||||
|
::std::hash::Hash::hash(&vec, state);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Writeable for TxKernel {
|
impl Writeable for TxKernel {
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||||
ser_multiwrite!(
|
ser_multiwrite!(
|
||||||
|
@ -206,7 +215,7 @@ impl TxKernel {
|
||||||
..self
|
..self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Size in bytes of a kernel, necessary for binary storage
|
/// Size in bytes of a kernel, necessary for binary storage
|
||||||
pub fn size() -> usize {
|
pub fn size() -> usize {
|
||||||
17 + // features plus fee and lock_height
|
17 + // features plus fee and lock_height
|
||||||
|
@ -449,7 +458,7 @@ impl Transaction {
|
||||||
/// Primarily a reference to an output being spent by the transaction.
|
/// Primarily a reference to an output being spent by the transaction.
|
||||||
/// But also information required to verify coinbase maturity through
|
/// But also information required to verify coinbase maturity through
|
||||||
/// the lock_height hashed in the switch_commit_hash.
|
/// the lock_height hashed in the switch_commit_hash.
|
||||||
#[derive(Debug, Clone, Copy)]
|
#[derive(Debug, Clone, Copy, Hash)]
|
||||||
pub struct Input{
|
pub struct Input{
|
||||||
/// The features of the output being spent.
|
/// The features of the output being spent.
|
||||||
/// We will check maturity for coinbase output.
|
/// We will check maturity for coinbase output.
|
||||||
|
@ -571,7 +580,7 @@ impl SwitchCommitHashKey {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Definition of the switch commitment hash
|
/// Definition of the switch commitment hash
|
||||||
#[derive(Copy, Clone, PartialEq, Serialize, Deserialize)]
|
#[derive(Copy, Clone, Hash, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct SwitchCommitHash([u8; SWITCH_COMMIT_HASH_SIZE]);
|
pub struct SwitchCommitHash([u8; SWITCH_COMMIT_HASH_SIZE]);
|
||||||
|
|
||||||
/// Implementation of Writeable for a switch commitment hash
|
/// Implementation of Writeable for a switch commitment hash
|
||||||
|
@ -678,6 +687,15 @@ pub struct Output {
|
||||||
|
|
||||||
hashable_ord!(Output);
|
hashable_ord!(Output);
|
||||||
|
|
||||||
|
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash implementations?
|
||||||
|
impl ::std::hash::Hash for TxKernel {
|
||||||
|
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
ser::serialize(&mut vec, &self).expect("serialization failed");
|
||||||
|
::std::hash::Hash::hash(&vec, state);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Implementation of Writeable for a transaction Output, defines how to write
|
/// Implementation of Writeable for a transaction Output, defines how to write
|
||||||
/// an Output as binary.
|
/// an Output as binary.
|
||||||
impl Writeable for Output {
|
impl Writeable for Output {
|
||||||
|
|
|
@ -608,14 +608,6 @@ impl AsFixedBytes for ::util::secp::pedersen::RangeProof {
|
||||||
return self.plen;
|
return self.plen;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// // TODO - is this (single byte) so we do not ever serialize a secret_key?
|
|
||||||
// // Note: we *can* serialize a blinding_factor built from a secret_key
|
|
||||||
// // but this needs to be done explicitly (tx kernel offset for example)
|
|
||||||
// impl AsFixedBytes for ::util::secp::key::SecretKey {
|
|
||||||
// fn len(&self) -> usize {
|
|
||||||
// return 1;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
impl AsFixedBytes for ::util::secp::Signature {
|
impl AsFixedBytes for ::util::secp::Signature {
|
||||||
fn len(&self) -> usize {
|
fn len(&self) -> usize {
|
||||||
return 64;
|
return 64;
|
||||||
|
|
|
@ -103,20 +103,41 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
);
|
);
|
||||||
|
|
||||||
if cb.kern_ids.is_empty() {
|
if cb.kern_ids.is_empty() {
|
||||||
let block = core::Block::hydrate_from(cb, vec![], vec![], vec![]);
|
let block = core::Block::hydrate_from(cb, vec![]);
|
||||||
|
|
||||||
// push the freshly hydrated block through the chain pipeline
|
// push the freshly hydrated block through the chain pipeline
|
||||||
self.process_block(block)
|
self.process_block(block)
|
||||||
} else {
|
} else {
|
||||||
// TODO - do we need to validate the header here to be sure it is not total garbage?
|
// TODO - do we need to validate the header here?
|
||||||
|
|
||||||
|
let kernel_count = cb.kern_ids.len();
|
||||||
|
|
||||||
|
let txs = {
|
||||||
|
let tx_pool = self.tx_pool.read().unwrap();
|
||||||
|
tx_pool.retrieve_transactions(&cb)
|
||||||
|
};
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"*** cannot hydrate non-empty compact block (not yet implemented), \
|
"adapter: txs from tx pool - {}",
|
||||||
falling back to requesting full block",
|
txs.len(),
|
||||||
);
|
);
|
||||||
self.request_block(&cb.header, &addr);
|
|
||||||
true
|
// TODO - 3 scenarios here -
|
||||||
|
// 1) we hydrate a valid block (good to go)
|
||||||
|
// 2) we hydrate an invalid block (txs legit missing from our pool)
|
||||||
|
// 3) we hydrate an invalid block (peer sent us a "bad" compact block) - [TBD]
|
||||||
|
|
||||||
|
let block = core::Block::hydrate_from(cb.clone(), txs);
|
||||||
|
|
||||||
|
if let Ok(()) = block.validate() {
|
||||||
|
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
|
||||||
|
self.process_block(block)
|
||||||
|
} else {
|
||||||
|
debug!(LOGGER, "adapter: block invalid after hydration, requesting full block");
|
||||||
|
self.request_block(&cb.header, &addr);
|
||||||
|
true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -424,15 +445,15 @@ pub struct ChainToPoolAndNetAdapter {
|
||||||
|
|
||||||
impl ChainAdapter for ChainToPoolAndNetAdapter {
|
impl ChainAdapter for ChainToPoolAndNetAdapter {
|
||||||
fn block_accepted(&self, b: &core::Block, opts: Options) {
|
fn block_accepted(&self, b: &core::Block, opts: Options) {
|
||||||
{
|
debug!(LOGGER, "adapter: block_accepted: {:?}", b.hash());
|
||||||
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
|
|
||||||
error!(
|
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
|
||||||
LOGGER,
|
error!(
|
||||||
"Pool could not update itself at block {}: {:?}",
|
LOGGER,
|
||||||
b.hash(),
|
"Pool could not update itself at block {}: {:?}",
|
||||||
e
|
b.hash(),
|
||||||
);
|
e,
|
||||||
}
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we mined the block then we want to broadcast the block itself.
|
// If we mined the block then we want to broadcast the block itself.
|
||||||
|
@ -553,4 +574,3 @@ impl pool::BlockChain for PoolToChainAdapter {
|
||||||
.map_err(|_| pool::PoolError::GenericPoolError)
|
.map_err(|_| pool::PoolError::GenericPoolError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,7 +153,7 @@ impl Server {
|
||||||
Ok(added)
|
Ok(added)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(LOGGER, "Couldn not connect to {}: {:?}", addr, e);
|
debug!(LOGGER, "Could not connect to {}: {:?}", addr, e);
|
||||||
Err(Error::Connection(e))
|
Err(Error::Connection(e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -235,4 +235,3 @@ impl NetAdapter for DummyAdapter {
|
||||||
fn peer_addrs_received(&self, _: Vec<SocketAddr>) {}
|
fn peer_addrs_received(&self, _: Vec<SocketAddr>) {}
|
||||||
fn peer_difficulty(&self, _: SocketAddr, _: Difficulty, _:u64) {}
|
fn peer_difficulty(&self, _: SocketAddr, _: Difficulty, _:u64) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,11 +17,13 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use util::secp::pedersen::Commitment;
|
use core::core::hash::Hashed;
|
||||||
|
use core::core::id::ShortIdentifiable;
|
||||||
use core::core::transaction;
|
use core::core::transaction;
|
||||||
use core::core::{block, hash, OutputIdentifier};
|
use core::core::{OutputIdentifier, Transaction};
|
||||||
use core::global;
|
use core::core::{block, hash};
|
||||||
|
use util::LOGGER;
|
||||||
|
use util::secp::pedersen::Commitment;
|
||||||
|
|
||||||
use types::*;
|
use types::*;
|
||||||
pub use graph;
|
pub use graph;
|
||||||
|
@ -49,7 +51,11 @@ where
|
||||||
T: BlockChain,
|
T: BlockChain,
|
||||||
{
|
{
|
||||||
/// Create a new transaction pool
|
/// Create a new transaction pool
|
||||||
pub fn new(config: PoolConfig, chain: Arc<T>, adapter: Arc<PoolAdapter>) -> TransactionPool<T> {
|
pub fn new(
|
||||||
|
config: PoolConfig,
|
||||||
|
chain: Arc<T>,
|
||||||
|
adapter: Arc<PoolAdapter>,
|
||||||
|
) -> TransactionPool<T> {
|
||||||
TransactionPool {
|
TransactionPool {
|
||||||
config: config,
|
config: config,
|
||||||
transactions: HashMap::new(),
|
transactions: HashMap::new(),
|
||||||
|
@ -60,6 +66,43 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Query the tx pool for all known txs based on kernel short_ids
|
||||||
|
/// from the provided compact_block.
|
||||||
|
/// Note: does not validate that we return the full set of required txs.
|
||||||
|
/// The caller will need to validate that themselves.
|
||||||
|
pub fn retrieve_transactions(&self, cb: &block::CompactBlock) -> Vec<Transaction> {
|
||||||
|
debug!(
|
||||||
|
LOGGER,
|
||||||
|
"pool: retrieve_transactions: kern_ids - {:?}, txs - {}, {:?}",
|
||||||
|
cb.kern_ids,
|
||||||
|
self.transactions.len(),
|
||||||
|
self.transactions.keys(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut txs = vec![];
|
||||||
|
|
||||||
|
for tx in self.transactions.values() {
|
||||||
|
for kernel in &tx.kernels {
|
||||||
|
// rehash each kernel to calculate the block specific short_id
|
||||||
|
let short_id = kernel.short_id(&cb.hash());
|
||||||
|
|
||||||
|
// if any kernel matches then keep the tx for later
|
||||||
|
if cb.kern_ids.contains(&short_id) {
|
||||||
|
txs.push(*tx.clone());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
LOGGER,
|
||||||
|
"pool: retrieve_transactions: matching txs from pool - {}",
|
||||||
|
txs.len(),
|
||||||
|
);
|
||||||
|
|
||||||
|
txs
|
||||||
|
}
|
||||||
|
|
||||||
/// Searches for an output, designated by its commitment, from the current
|
/// Searches for an output, designated by its commitment, from the current
|
||||||
/// best UTXO view, presented by taking the best blockchain UTXO set (as
|
/// best UTXO view, presented by taking the best blockchain UTXO set (as
|
||||||
/// determined by the blockchain component) and rectifying pool spent and
|
/// determined by the blockchain component) and rectifying pool spent and
|
||||||
|
|
Loading…
Reference in a new issue