never speak of the verifier cache again ()

This commit is contained in:
Antioch Peverell 2021-04-01 15:04:53 +01:00 committed by GitHub
parent cccaf98493
commit f6ec77a592
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
37 changed files with 189 additions and 735 deletions

View file

@ -17,7 +17,6 @@
use crate::chain::{Chain, SyncState};
use crate::core::core::hash::Hash;
use crate::core::core::transaction::Transaction;
use crate::core::core::verifier_cache::VerifierCache;
use crate::handlers::blocks_api::{BlockHandler, HeaderHandler};
use crate::handlers::chain_api::{ChainHandler, KernelHandler, OutputHandler};
use crate::handlers::pool_api::PoolHandler;
@ -39,22 +38,20 @@ use std::sync::Weak;
/// Methods in this API are intended to be 'single use'.
///
pub struct Foreign<B, P, V>
pub struct Foreign<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
pub chain: Weak<Chain>,
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P, V>>>,
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P>>>,
pub sync_state: Weak<SyncState>,
}
impl<B, P, V> Foreign<B, P, V>
impl<B, P> Foreign<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
/// Create a new API instance with the chain, transaction pool, peers and `sync_state`. All subsequent
/// API calls will operate on this instance of node API.
@ -71,7 +68,7 @@ where
pub fn new(
chain: Weak<Chain>,
tx_pool: Weak<RwLock<pool::TransactionPool<B, P, V>>>,
tx_pool: Weak<RwLock<pool::TransactionPool<B, P>>>,
sync_state: Weak<SyncState>,
) -> Self {
Foreign {

View file

@ -16,7 +16,6 @@
use crate::core::core::hash::Hash;
use crate::core::core::transaction::Transaction;
use crate::core::core::verifier_cache::VerifierCache;
use crate::foreign::Foreign;
use crate::pool::PoolEntry;
use crate::pool::{BlockChain, PoolAdapter};
@ -742,11 +741,10 @@ pub trait ForeignRpc: Sync + Send {
fn push_transaction(&self, tx: Transaction, fluff: Option<bool>) -> Result<(), ErrorKind>;
}
impl<B, P, V> ForeignRpc for Foreign<B, P, V>
impl<B, P> ForeignRpc for Foreign<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
fn get_header(
&self,
@ -856,7 +854,7 @@ macro_rules! doctest_helper_json_rpc_foreign_assert_response {
// create temporary grin server, run jsonrpc request on node api, delete server, return
// json response.
{
{
/*use grin_servers::test_framework::framework::run_doctest;
use grin_util as util;
use serde_json;
@ -890,6 +888,6 @@ macro_rules! doctest_helper_json_rpc_foreign_assert_response {
serde_json::to_string_pretty(&expected_response).unwrap()
);
}*/
}
}
};
}

View file

@ -26,7 +26,6 @@ use crate::auth::{
};
use crate::chain;
use crate::chain::{Chain, SyncState};
use crate::core::core::verifier_cache::VerifierCache;
use crate::foreign::Foreign;
use crate::foreign_rpc::ForeignRpc;
use crate::owner::Owner;
@ -48,10 +47,10 @@ use std::sync::{Arc, Weak};
/// Listener version, providing same API but listening for requests on a
/// port and wrapping the calls
pub fn node_apis<B, P, V>(
pub fn node_apis<B, P>(
addr: &str,
chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<B, P, V>>>,
tx_pool: Arc<RwLock<pool::TransactionPool<B, P>>>,
peers: Arc<p2p::Peers>,
sync_state: Arc<chain::SyncState>,
api_secret: Option<String>,
@ -61,7 +60,6 @@ pub fn node_apis<B, P, V>(
where
B: BlockChain + 'static,
P: PoolAdapter + 'static,
V: VerifierCache + 'static,
{
let mut router = Router::new();
@ -173,27 +171,25 @@ impl crate::router::Handler for OwnerAPIHandlerV2 {
}
/// V2 API Handler/Wrapper for foreign functions
pub struct ForeignAPIHandlerV2<B, P, V>
pub struct ForeignAPIHandlerV2<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
pub chain: Weak<Chain>,
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P, V>>>,
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P>>>,
pub sync_state: Weak<SyncState>,
}
impl<B, P, V> ForeignAPIHandlerV2<B, P, V>
impl<B, P> ForeignAPIHandlerV2<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
/// Create a new foreign API handler for GET methods
pub fn new(
chain: Weak<Chain>,
tx_pool: Weak<RwLock<pool::TransactionPool<B, P, V>>>,
tx_pool: Weak<RwLock<pool::TransactionPool<B, P>>>,
sync_state: Weak<SyncState>,
) -> Self {
ForeignAPIHandlerV2 {
@ -204,11 +200,10 @@ where
}
}
impl<B, P, V> crate::router::Handler for ForeignAPIHandlerV2<B, P, V>
impl<B, P> crate::router::Handler for ForeignAPIHandlerV2<B, P>
where
B: BlockChain + 'static,
P: PoolAdapter + 'static,
V: VerifierCache + 'static,
{
fn post(&self, req: Request<Body>) -> ResponseFuture {
let api = Foreign::new(

View file

@ -14,7 +14,6 @@
use super::utils::w;
use crate::core::core::hash::Hashed;
use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::Transaction;
use crate::core::ser::{self, ProtocolVersion};
use crate::pool::{self, BlockChain, PoolAdapter, PoolEntry};
@ -30,20 +29,18 @@ use std::sync::Weak;
/// Get basic information about the transaction pool.
/// GET /v1/pool
pub struct PoolInfoHandler<B, P, V>
pub struct PoolInfoHandler<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P, V>>>,
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P>>>,
}
impl<B, P, V> Handler for PoolInfoHandler<B, P, V>
impl<B, P> Handler for PoolInfoHandler<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let pool_arc = w_fut!(&self.tx_pool);
@ -55,20 +52,18 @@ where
}
}
pub struct PoolHandler<B, P, V>
pub struct PoolHandler<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P, V>>>,
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P>>>,
}
impl<B, P, V> PoolHandler<B, P, V>
impl<B, P> PoolHandler<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
pub fn get_pool_size(&self) -> Result<usize, Error> {
let pool_arc = w(&self.tx_pool)?;
@ -117,23 +112,21 @@ struct TxWrapper {
/// Push new transaction to our local transaction pool.
/// POST /v1/pool/push_tx
pub struct PoolPushHandler<B, P, V>
pub struct PoolPushHandler<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P, V>>>,
pub tx_pool: Weak<RwLock<pool::TransactionPool<B, P>>>,
}
async fn update_pool<B, P, V>(
pool: Weak<RwLock<pool::TransactionPool<B, P, V>>>,
async fn update_pool<B, P>(
pool: Weak<RwLock<pool::TransactionPool<B, P>>>,
req: Request<Body>,
) -> Result<(), Error>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
let pool = w(&pool)?;
let params = QueryParams::from(req.uri().query());
@ -169,11 +162,10 @@ where
Ok(())
}
impl<B, P, V> Handler for PoolPushHandler<B, P, V>
impl<B, P> Handler for PoolPushHandler<B, P>
where
B: BlockChain + 'static,
P: PoolAdapter + 'static,
V: VerifierCache + 'static,
{
fn post(&self, req: Request<Body>) -> ResponseFuture {
let pool = self.tx_pool.clone();

View file

@ -17,7 +17,6 @@
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::{
Block, BlockHeader, BlockSums, Committed, Inputs, KernelFeatures, Output, OutputIdentifier,
SegmentIdentifier, Transaction, TxKernel,
@ -149,7 +148,6 @@ pub struct Chain {
orphans: Arc<OrphanBlockPool>,
txhashset: Arc<RwLock<txhashset::TxHashSet>>,
header_pmmr: Arc<RwLock<txhashset::PMMRHandle<BlockHeader>>>,
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
pibd_segmenter: Arc<RwLock<Option<Segmenter>>>,
// POW verification function
pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>,
@ -166,7 +164,6 @@ impl Chain {
adapter: Arc<dyn ChainAdapter + Send + Sync>,
genesis: Block,
pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>,
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
archive_mode: bool,
) -> Result<Chain, Error> {
let store = Arc::new(store::ChainStore::new(&db_root)?);
@ -201,7 +198,6 @@ impl Chain {
header_pmmr: Arc::new(RwLock::new(header_pmmr)),
pibd_segmenter: Arc::new(RwLock::new(None)),
pow_verifier,
verifier_cache,
archive_mode,
genesis: genesis.header,
};
@ -434,7 +430,6 @@ impl Chain {
Ok(pipe::BlockContext {
opts,
pow_verifier: self.pow_verifier,
verifier_cache: self.verifier_cache.clone(),
header_pmmr,
txhashset,
batch,

View file

@ -16,7 +16,6 @@
use crate::core::consensus;
use crate::core::core::hash::Hashed;
use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::Committed;
use crate::core::core::{
block, Block, BlockHeader, BlockSums, HeaderVersion, OutputIdentifier, TransactionBody,
@ -27,8 +26,6 @@ use crate::error::{Error, ErrorKind};
use crate::store;
use crate::txhashset;
use crate::types::{CommitPos, Options, Tip};
use crate::util::RwLock;
use std::sync::Arc;
/// Contextual information required to process a new block and either reject or
/// accept it.
@ -43,8 +40,6 @@ pub struct BlockContext<'a> {
pub header_pmmr: &'a mut txhashset::PMMRHandle<BlockHeader>,
/// The active batch to use for block processing.
pub batch: store::Batch<'a>,
/// The verifier cache (caching verifier for rangeproofs and kernel signatures)
pub verifier_cache: Arc<RwLock<dyn VerifierCache>>,
}
// If this block has greater total difficulty than treat as unknown in current context.
@ -419,7 +414,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(
fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
let prev = ctx.batch.get_previous_header(&block.header)?;
block
.validate(&prev.total_kernel_offset, ctx.verifier_cache.clone())
.validate(&prev.total_kernel_offset)
.map_err(ErrorKind::InvalidBlockProof)?;
Ok(())
}

View file

@ -16,19 +16,16 @@ use self::chain::types::NoopAdapter;
use self::chain::types::Options;
use self::chain::Chain;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::Block;
use self::core::genesis;
use self::core::global::ChainTypes;
use self::core::libtx::{self, reward};
use self::core::{consensus, global, pow};
use self::keychain::{ExtKeychainPath, Keychain};
use self::util::RwLock;
use chrono::Duration;
use grin_chain as chain;
use grin_core as core;
use grin_keychain as keychain;
use grin_util as util;
use std::fs;
use std::sync::Arc;
@ -37,13 +34,11 @@ pub fn clean_output_dir(dir_name: &str) {
}
pub fn init_chain(dir_name: &str, genesis: Block) -> Chain {
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
Chain::init(
dir_name.to_string(),
Arc::new(NoopAdapter {}),
genesis,
pow::verify_size,
verifier_cache,
false,
)
.unwrap()

View file

@ -15,7 +15,6 @@
use self::chain::types::{NoopAdapter, Tip};
use self::chain::Chain;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{Block, BlockHeader, KernelFeatures, Transaction};
use self::core::global::ChainTypes;
use self::core::libtx::{self, build, ProofBuilder};
@ -56,13 +55,11 @@ impl ChainAdapter for StatusAdapter {
fn setup_with_status_adapter(dir_name: &str, genesis: Block, adapter: Arc<StatusAdapter>) -> Chain {
util::init_test_logger();
clean_output_dir(dir_name);
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let chain = chain::Chain::init(
dir_name.to_string(),
adapter,
genesis,
pow::verify_size,
verifier_cache,
false,
)
.unwrap();
@ -904,13 +901,11 @@ where
fn actual_diff_iter_output() {
global::set_local_chain_type(ChainTypes::AutomatedTesting);
let genesis_block = pow::mine_genesis_block().unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let chain = chain::Chain::init(
"../.grin".to_string(),
Arc::new(NoopAdapter {}),
genesis_block,
pow::verify_size,
verifier_cache,
false,
)
.unwrap();

View file

@ -21,15 +21,12 @@ use grin_util as util;
use self::chain_test_helper::{clean_output_dir, genesis_block, init_chain};
use crate::chain::{pipe, Chain, Options};
use crate::core::core::verifier_cache::LruVerifierCache;
use crate::core::core::{block, pmmr, transaction};
use crate::core::core::{Block, FeeFields, KernelFeatures, Transaction, Weighting};
use crate::core::libtx::{build, reward, ProofBuilder};
use crate::core::{consensus, global, pow};
use crate::keychain::{ExtKeychain, ExtKeychainPath, Keychain, SwitchCommitmentType};
use crate::util::RwLock;
use chrono::Duration;
use std::sync::Arc;
fn build_block<K>(
chain: &Chain,
@ -128,12 +125,10 @@ fn process_block_cut_through() -> Result<(), chain::Error> {
.iter()
.any(|output| output.commitment() == commit));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Transaction is invalid due to cut-through.
let height = 7;
assert_eq!(
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height),
tx.validate(Weighting::AsTransaction, height),
Err(transaction::Error::CutThrough),
);
@ -149,7 +144,7 @@ fn process_block_cut_through() -> Result<(), chain::Error> {
// The block is invalid due to cut-through.
let prev = chain.head_header()?;
assert_eq!(
block.validate(&prev.total_kernel_offset(), verifier_cache),
block.validate(&prev.total_kernel_offset()),
Err(block::Error::Transaction(transaction::Error::CutThrough))
);

View file

@ -14,13 +14,11 @@
use self::chain::types::NoopAdapter;
use self::chain::ErrorKind;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::KernelFeatures;
use self::core::global::{self, ChainTypes};
use self::core::libtx::{self, build, ProofBuilder};
use self::core::{consensus, pow};
use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use self::util::RwLock;
use chrono::Duration;
use grin_chain as chain;
use grin_core as core;
@ -42,15 +40,12 @@ fn test_coinbase_maturity() {
let genesis_block = pow::mine_genesis_block().unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
{
let chain = chain::Chain::init(
chain_dir.to_string(),
Arc::new(NoopAdapter {}),
genesis_block,
pow::verify_size,
verifier_cache,
false,
)
.unwrap();

View file

@ -23,7 +23,6 @@ pub mod id;
pub mod merkle_proof;
pub mod pmmr;
pub mod transaction;
pub mod verifier_cache;
use crate::consensus::GRIN_BASE;
use util::secp::pedersen::Commitment;

View file

@ -18,7 +18,6 @@ use crate::consensus::{self, reward, REWARD};
use crate::core::committed::{self, Committed};
use crate::core::compact_block::CompactBlock;
use crate::core::hash::{DefaultHashable, Hash, Hashed, ZERO_HASH};
use crate::core::verifier_cache::VerifierCache;
use crate::core::{
pmmr, transaction, Commitment, Inputs, KernelFeatures, Output, Transaction, TransactionBody,
TxKernel, Weighting,
@ -34,9 +33,7 @@ use chrono::Duration;
use keychain::{self, BlindingFactor};
use std::convert::TryInto;
use std::fmt;
use std::sync::Arc;
use util::from_hex;
use util::RwLock;
use util::{secp, static_secp_instance};
/// Errors thrown by Block validation
@ -732,12 +729,8 @@ impl Block {
/// Validates all the elements in a block that can be checked without
/// additional data. Includes commitment sums and kernels, Merkle
/// trees, reward, etc.
pub fn validate(
&self,
prev_kernel_offset: &BlindingFactor,
verifier: Arc<RwLock<dyn VerifierCache>>,
) -> Result<(), Error> {
self.body.validate(Weighting::AsBlock, verifier)?;
pub fn validate(&self, prev_kernel_offset: &BlindingFactor) -> Result<(), Error> {
self.body.validate(Weighting::AsBlock)?;
self.verify_kernel_lock_heights()?;
self.verify_nrd_kernels_for_header_version()?;

View file

@ -16,7 +16,6 @@
use crate::core::block::HeaderVersion;
use crate::core::hash::{DefaultHashable, Hashed};
use crate::core::verifier_cache::VerifierCache;
use crate::core::{committed, Committed};
use crate::libtx::{aggsig, secp_ser};
use crate::ser::{
@ -32,12 +31,10 @@ use std::cmp::Ordering;
use std::cmp::{max, min};
use std::convert::{TryFrom, TryInto};
use std::fmt::Display;
use std::sync::Arc;
use std::{error, fmt};
use util::secp;
use util::secp::pedersen::{Commitment, RangeProof};
use util::static_secp_instance;
use util::RwLock;
use util::ToHex;
/// Fee fields as in fix-fees RFC: { future_use: 20, fee_shift: 4, fee: 40 }
@ -1243,11 +1240,7 @@ impl TransactionBody {
/// Validates all relevant parts of a transaction body. Checks the
/// excess value against the signature as well as range proofs for each
/// output.
pub fn validate(
&self,
weighting: Weighting,
_verifier: Arc<RwLock<dyn VerifierCache>>,
) -> Result<(), Error> {
pub fn validate(&self, weighting: Weighting) -> Result<(), Error> {
self.validate_read(weighting)?;
// Now batch verify all those unverified rangeproofs
@ -1458,14 +1451,9 @@ impl Transaction {
/// Validates all relevant parts of a fully built transaction. Checks the
/// excess value against the signature as well as range proofs for each
/// output.
pub fn validate(
&self,
weighting: Weighting,
verifier: Arc<RwLock<dyn VerifierCache>>,
height: u64,
) -> Result<(), Error> {
pub fn validate(&self, weighting: Weighting, height: u64) -> Result<(), Error> {
self.body.verify_features()?;
self.body.validate(weighting, verifier)?;
self.body.validate(weighting)?;
self.verify_kernel_sums(self.overage(height), self.offset.clone())?;
Ok(())
}

View file

@ -1,103 +0,0 @@
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! VerifierCache trait for batch verifying outputs and kernels.
//! We pass a "caching verifier" into the block validation processing with this.
use crate::core::hash::{Hash, Hashed};
use crate::core::{Output, TxKernel};
use lru_cache::LruCache;
/// Verifier cache for caching expensive verification results.
/// Specifically the following -
/// * kernel signature verification
/// * output rangeproof verification
pub trait VerifierCache: Sync + Send {
/// Takes a vec of tx kernels and returns those kernels
/// that have not yet been verified.
fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec<TxKernel>;
/// Takes a vec of tx outputs and returns those outputs
/// that have not yet had their rangeproofs verified.
fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec<Output>;
/// Adds a vec of tx kernels to the cache (used in conjunction with the the filter above).
fn add_kernel_sig_verified(&mut self, kernels: Vec<TxKernel>);
/// Adds a vec of outputs to the cache (used in conjunction with the the filter above).
fn add_rangeproof_verified(&mut self, outputs: Vec<Output>);
}
/// An implementation of verifier_cache using lru_cache.
/// Caches tx kernels by kernel hash.
/// Caches outputs by output rangeproof hash (rangeproofs are committed to separately).
pub struct LruVerifierCache {
kernel_sig_verification_cache: LruCache<Hash, ()>,
rangeproof_verification_cache: LruCache<Hash, ()>,
}
impl LruVerifierCache {
/// TODO how big should these caches be?
/// They need to be *at least* large enough to cover a maxed out block.
pub fn new() -> LruVerifierCache {
LruVerifierCache {
kernel_sig_verification_cache: LruCache::new(50_000),
rangeproof_verification_cache: LruCache::new(50_000),
}
}
}
impl VerifierCache for LruVerifierCache {
fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec<TxKernel> {
let res = kernels
.iter()
.filter(|x| !self.kernel_sig_verification_cache.contains_key(&x.hash()))
.cloned()
.collect::<Vec<_>>();
trace!(
"lru_verifier_cache: kernel sigs: {}, not cached (must verify): {}",
kernels.len(),
res.len()
);
res
}
fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec<Output> {
let res = outputs
.iter()
.filter(|x| {
!self
.rangeproof_verification_cache
.contains_key(&x.proof.hash())
})
.cloned()
.collect::<Vec<_>>();
trace!(
"lru_verifier_cache: rangeproofs: {}, not cached (must verify): {}",
outputs.len(),
res.len()
);
res
}
fn add_kernel_sig_verified(&mut self, kernels: Vec<TxKernel>) {
for k in kernels {
self.kernel_sig_verification_cache.insert(k.hash(), ());
}
}
fn add_rangeproof_verified(&mut self, outputs: Vec<Output>) {
for o in outputs {
self.rangeproof_verification_cache
.insert(o.proof.hash(), ());
}
}
}

View file

@ -253,20 +253,12 @@ where
// Just a simple test, most exhaustive tests in the core.
#[cfg(test)]
mod test {
use std::sync::Arc;
use util::RwLock;
use super::*;
use crate::core::transaction::Weighting;
use crate::core::verifier_cache::{LruVerifierCache, VerifierCache};
use crate::global;
use crate::libtx::ProofBuilder;
use keychain::{ExtKeychain, ExtKeychainPath};
fn verifier_cache() -> Arc<RwLock<dyn VerifierCache>> {
Arc::new(RwLock::new(LruVerifierCache::new()))
}
#[test]
fn blind_simple_tx() {
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
@ -276,8 +268,6 @@ mod test {
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let vc = verifier_cache();
let tx = transaction(
KernelFeatures::Plain { fee: 2.into() },
&[input(10, key_id1), input(12, key_id2), output(20, key_id3)],
@ -287,8 +277,7 @@ mod test {
.unwrap();
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, vc.clone(), height)
.unwrap();
tx.validate(Weighting::AsTransaction, height).unwrap();
}
#[test]
@ -300,8 +289,6 @@ mod test {
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let vc = verifier_cache();
let tx = transaction(
KernelFeatures::Plain { fee: 2.into() },
&[input(10, key_id1), input(12, key_id2), output(20, key_id3)],
@ -311,8 +298,7 @@ mod test {
.unwrap();
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, vc.clone(), height)
.unwrap();
tx.validate(Weighting::AsTransaction, height).unwrap();
}
#[test]
@ -323,8 +309,6 @@ mod test {
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let vc = verifier_cache();
let tx = transaction(
KernelFeatures::Plain { fee: 4.into() },
&[input(6, key_id1), output(2, key_id2)],
@ -334,7 +318,6 @@ mod test {
.unwrap();
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, vc.clone(), height)
.unwrap();
tx.validate(Weighting::AsTransaction, height).unwrap();
}
}

View file

@ -22,7 +22,6 @@ use crate::core::core::transaction::{
self, FeeFields, KernelFeatures, NRDRelativeHeight, Output, OutputFeatures, OutputIdentifier,
Transaction,
};
use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use crate::core::core::{Committed, CompactBlock};
use crate::core::libtx::build::{self, input, output};
use crate::core::libtx::ProofBuilder;
@ -30,8 +29,7 @@ use crate::core::{global, pow, ser};
use chrono::Duration;
use grin_core as core;
use keychain::{BlindingFactor, ExtKeychain, Keychain};
use std::sync::Arc;
use util::{secp, RwLock, ToHex};
use util::{secp, ToHex};
// Setup test with AutomatedTesting chain_type;
fn test_setup() {
@ -39,10 +37,6 @@ fn test_setup() {
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
}
fn verifier_cache() -> Arc<RwLock<dyn VerifierCache>> {
Arc::new(RwLock::new(LruVerifierCache::new()))
}
#[test]
fn too_large_block() {
test_setup();
@ -72,9 +66,7 @@ fn too_large_block() {
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(&[tx], &keychain, &builder, &prev, &key_id);
assert!(b
.validate(&BlindingFactor::zero(), verifier_cache())
.is_err());
assert!(b.validate(&BlindingFactor::zero()).is_err());
}
#[test]
@ -130,7 +122,7 @@ fn block_with_nrd_kernel_pre_post_hf3() {
// Block is invalid at header version 3 if it contains an NRD kernel.
assert_eq!(b.header.version, HeaderVersion(3));
assert_eq!(
b.validate(&BlindingFactor::zero(), verifier_cache()),
b.validate(&BlindingFactor::zero()),
Err(Error::NRDKernelPreHF3)
);
@ -151,9 +143,7 @@ fn block_with_nrd_kernel_pre_post_hf3() {
// Block is valid at header version 4 (at HF height) if it contains an NRD kernel.
assert_eq!(b.header.height, 3 * TESTING_HARD_FORK_INTERVAL);
assert_eq!(b.header.version, HeaderVersion(4));
assert!(b
.validate(&BlindingFactor::zero(), verifier_cache())
.is_ok());
assert!(b.validate(&BlindingFactor::zero()).is_ok());
let prev_height = 3 * TESTING_HARD_FORK_INTERVAL;
let prev = BlockHeader {
@ -171,9 +161,7 @@ fn block_with_nrd_kernel_pre_post_hf3() {
// Block is valid at header version 4 if it contains an NRD kernel.
assert_eq!(b.header.version, HeaderVersion(4));
assert!(b
.validate(&BlindingFactor::zero(), verifier_cache())
.is_ok());
assert!(b.validate(&BlindingFactor::zero()).is_ok());
}
#[test]
@ -216,7 +204,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() {
// Block is invalid as NRD not enabled.
assert_eq!(b.header.version, HeaderVersion(3));
assert_eq!(
b.validate(&BlindingFactor::zero(), verifier_cache()),
b.validate(&BlindingFactor::zero()),
Err(Error::NRDKernelNotEnabled)
);
@ -238,7 +226,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() {
assert_eq!(b.header.height, 3 * TESTING_HARD_FORK_INTERVAL);
assert_eq!(b.header.version, HeaderVersion(4));
assert_eq!(
b.validate(&BlindingFactor::zero(), verifier_cache()),
b.validate(&BlindingFactor::zero()),
Err(Error::NRDKernelNotEnabled)
);
@ -259,7 +247,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() {
// Block is invalid as NRD not enabled.
assert_eq!(b.header.version, HeaderVersion(4));
assert_eq!(
b.validate(&BlindingFactor::zero(), verifier_cache()),
b.validate(&BlindingFactor::zero()),
Err(Error::NRDKernelNotEnabled)
);
}
@ -292,8 +280,7 @@ fn block_with_cut_through() {
// block should have been automatically compacted (including reward
// output) and should still be valid
b.validate(&BlindingFactor::zero(), verifier_cache())
.unwrap();
b.validate(&BlindingFactor::zero()).unwrap();
assert_eq!(b.inputs().len(), 3);
assert_eq!(b.outputs().len(), 3);
}
@ -329,9 +316,7 @@ fn empty_block_with_coinbase_is_valid() {
// the block should be valid here (single coinbase output with corresponding
// txn kernel)
assert!(b
.validate(&BlindingFactor::zero(), verifier_cache())
.is_ok());
assert!(b.validate(&BlindingFactor::zero()).is_ok());
}
#[test]
@ -357,7 +342,7 @@ fn remove_coinbase_output_flag() {
.verify_kernel_sums(b.header.overage(), b.header.total_kernel_offset())
.is_ok());
assert_eq!(
b.validate(&BlindingFactor::zero(), verifier_cache()),
b.validate(&BlindingFactor::zero()),
Err(Error::CoinbaseSumMismatch)
);
}
@ -388,7 +373,7 @@ fn remove_coinbase_kernel_flag() {
// Also results in the block no longer validating correctly
// because the message being signed on each tx kernel includes the kernel features.
assert_eq!(
b.validate(&BlindingFactor::zero(), verifier_cache()),
b.validate(&BlindingFactor::zero()),
Err(Error::Transaction(transaction::Error::IncorrectSignature))
);
}
@ -778,7 +763,7 @@ fn same_amount_outputs_copy_range_proof() {
// block should have been automatically compacted (including reward
// output) and should still be valid
match b.validate(&BlindingFactor::zero(), verifier_cache()) {
match b.validate(&BlindingFactor::zero()) {
Err(Error::Transaction(transaction::Error::Secp(secp::Error::InvalidRangeProof))) => {}
_ => panic!("Bad range proof should be invalid"),
}
@ -830,7 +815,7 @@ fn wrong_amount_range_proof() {
// block should have been automatically compacted (including reward
// output) and should still be valid
match b.validate(&BlindingFactor::zero(), verifier_cache()) {
match b.validate(&BlindingFactor::zero()) {
Err(Error::Transaction(transaction::Error::Secp(secp::Error::InvalidRangeProof))) => {}
_ => panic!("Bad range proof should be invalid"),
}
@ -907,7 +892,7 @@ fn test_verify_cut_through_plain() -> Result<(), Error> {
// The block should fail validation due to cut-through.
assert_eq!(
block.validate(&BlindingFactor::zero(), verifier_cache()),
block.validate(&BlindingFactor::zero()),
Err(Error::Transaction(transaction::Error::CutThrough))
);
@ -928,7 +913,7 @@ fn test_verify_cut_through_plain() -> Result<(), Error> {
.replace_outputs(outputs);
// Block validates successfully after applying cut-through.
block.validate(&BlindingFactor::zero(), verifier_cache())?;
block.validate(&BlindingFactor::zero())?;
// Block validates via lightweight "read" validation.
block.validate_read()?;
@ -973,7 +958,7 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> {
// The block should fail validation due to cut-through.
assert_eq!(
block.validate(&BlindingFactor::zero(), verifier_cache()),
block.validate(&BlindingFactor::zero()),
Err(Error::Transaction(transaction::Error::CutThrough))
);
@ -994,7 +979,7 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> {
.replace_outputs(outputs);
// Block validates successfully after applying cut-through.
block.validate(&BlindingFactor::zero(), verifier_cache())?;
block.validate(&BlindingFactor::zero())?;
// Block validates via lightweight "read" validation.
block.validate_read()?;

View file

@ -19,7 +19,6 @@ pub mod common;
use self::core::core::block::BlockHeader;
use self::core::core::block::Error::KernelLockHeight;
use self::core::core::hash::{Hashed, ZERO_HASH};
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use self::core::core::{
aggregate, deaggregate, FeeFields, KernelFeatures, Output, OutputFeatures, OutputIdentifier,
Transaction, TxKernel, Weighting,
@ -30,9 +29,7 @@ use self::core::{global, ser};
use crate::common::{new_block, tx1i1o, tx1i2o, tx2i1o};
use grin_core as core;
use keychain::{BlindingFactor, ExtKeychain, Keychain};
use std::sync::Arc;
use util::static_secp_instance;
use util::RwLock;
// Setup test with AutomatedTesting chain_type;
fn test_setup() {
@ -141,10 +138,6 @@ fn test_zero_commit_fails() {
assert!(res.is_err());
}
fn verifier_cache() -> Arc<RwLock<dyn VerifierCache>> {
Arc::new(RwLock::new(LruVerifierCache::new()))
}
#[test]
fn build_tx_kernel() {
test_setup();
@ -165,8 +158,7 @@ fn build_tx_kernel() {
// check the tx is valid
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, verifier_cache(), height)
.unwrap();
tx.validate(Weighting::AsTransaction, height).unwrap();
// check the kernel is also itself valid
assert_eq!(tx.kernels().len(), 1);
@ -229,15 +221,9 @@ fn build_two_half_kernels() {
.unwrap();
let height = 42; // arbitrary
assert_eq!(
tx1.validate(Weighting::AsTransaction, verifier_cache(), height),
Ok(()),
);
assert_eq!(tx1.validate(Weighting::AsTransaction, height), Ok(()),);
assert_eq!(
tx2.validate(Weighting::AsTransaction, verifier_cache(), height),
Ok(()),
);
assert_eq!(tx2.validate(Weighting::AsTransaction, height), Ok(()),);
// The transactions share an identical kernel.
assert_eq!(tx1.kernels()[0], tx2.kernels()[0]);
@ -262,21 +248,13 @@ fn transaction_cut_through() {
let tx2 = tx2i1o();
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, verifier_cache(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, verifier_cache(), height)
.is_ok());
let vc = verifier_cache();
assert!(tx1.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, height).is_ok());
// now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(&[tx1, tx2]).unwrap();
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3.validate(Weighting::AsTransaction, height).is_ok());
}
// Attempt to deaggregate a multi-kernel transaction in a different way
@ -288,46 +266,30 @@ fn multi_kernel_transaction_deaggregation() {
let tx3 = tx1i1o();
let tx4 = tx1i1o();
let vc = verifier_cache();
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx4
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx1.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx4.validate(Weighting::AsTransaction, height).is_ok());
let tx1234 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
let tx12 = aggregate(&[tx1, tx2]).unwrap();
let tx34 = aggregate(&[tx3, tx4]).unwrap();
assert!(tx1234
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx12
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx34
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx1234.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx12.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx34.validate(Weighting::AsTransaction, height).is_ok());
let deaggregated_tx34 = deaggregate(tx1234.clone(), &[tx12.clone()]).unwrap();
assert!(deaggregated_tx34
.validate(Weighting::AsTransaction, vc.clone(), height)
.validate(Weighting::AsTransaction, height)
.is_ok());
assert_eq!(tx34, deaggregated_tx34);
let deaggregated_tx12 = deaggregate(tx1234, &[tx34]).unwrap();
assert!(deaggregated_tx12
.validate(Weighting::AsTransaction, vc.clone(), height)
.validate(Weighting::AsTransaction, height)
.is_ok());
assert_eq!(tx12, deaggregated_tx12);
}
@ -339,32 +301,20 @@ fn multi_kernel_transaction_deaggregation_2() {
let tx2 = tx1i1o();
let tx3 = tx1i1o();
let vc = verifier_cache();
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx1.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, height).is_ok());
let tx123 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx12 = aggregate(&[tx1, tx2]).unwrap();
assert!(tx123
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx12
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx123.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx12.validate(Weighting::AsTransaction, height).is_ok());
let deaggregated_tx3 = deaggregate(tx123, &[tx12]).unwrap();
assert!(deaggregated_tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.validate(Weighting::AsTransaction, height)
.is_ok());
assert_eq!(tx3, deaggregated_tx3);
}
@ -376,33 +326,21 @@ fn multi_kernel_transaction_deaggregation_3() {
let tx2 = tx1i1o();
let tx3 = tx1i1o();
let vc = verifier_cache();
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx1.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, height).is_ok());
let tx123 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx13 = aggregate(&[tx1, tx3]).unwrap();
let tx2 = aggregate(&[tx2]).unwrap();
assert!(tx123
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx123.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, height).is_ok());
let deaggregated_tx13 = deaggregate(tx123, &[tx2]).unwrap();
assert!(deaggregated_tx13
.validate(Weighting::AsTransaction, vc.clone(), height)
.validate(Weighting::AsTransaction, height)
.is_ok());
assert_eq!(tx13, deaggregated_tx13);
}
@ -416,24 +354,12 @@ fn multi_kernel_transaction_deaggregation_4() {
let tx4 = tx1i1o();
let tx5 = tx1i1o();
let vc = verifier_cache();
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx4
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx5
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx1.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx4.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx5.validate(Weighting::AsTransaction, height).is_ok());
let tx12345 = aggregate(&[
tx1.clone(),
@ -443,13 +369,11 @@ fn multi_kernel_transaction_deaggregation_4() {
tx5.clone(),
])
.unwrap();
assert!(tx12345
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx12345.validate(Weighting::AsTransaction, height).is_ok());
let deaggregated_tx5 = deaggregate(tx12345, &[tx1, tx2, tx3, tx4]).unwrap();
assert!(deaggregated_tx5
.validate(Weighting::AsTransaction, vc.clone(), height)
.validate(Weighting::AsTransaction, height)
.is_ok());
assert_eq!(tx5, deaggregated_tx5);
}
@ -463,24 +387,12 @@ fn multi_kernel_transaction_deaggregation_5() {
let tx4 = tx1i1o();
let tx5 = tx1i1o();
let vc = verifier_cache();
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx4
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx5
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx1.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx4.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx5.validate(Weighting::AsTransaction, height).is_ok());
let tx12345 = aggregate(&[
tx1.clone(),
@ -493,13 +405,11 @@ fn multi_kernel_transaction_deaggregation_5() {
let tx12 = aggregate(&[tx1, tx2]).unwrap();
let tx34 = aggregate(&[tx3, tx4]).unwrap();
assert!(tx12345
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx12345.validate(Weighting::AsTransaction, height).is_ok());
let deaggregated_tx5 = deaggregate(tx12345, &[tx12, tx34]).unwrap();
assert!(deaggregated_tx5
.validate(Weighting::AsTransaction, vc.clone(), height)
.validate(Weighting::AsTransaction, height)
.is_ok());
assert_eq!(tx5, deaggregated_tx5);
}
@ -511,34 +421,26 @@ fn basic_transaction_deaggregation() {
let tx1 = tx1i2o();
let tx2 = tx2i1o();
let vc = verifier_cache();
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx1.validate(Weighting::AsTransaction, height).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, height).is_ok());
// now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(&[tx1.clone(), tx2.clone()]).unwrap();
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3.validate(Weighting::AsTransaction, height).is_ok());
let deaggregated_tx1 = deaggregate(tx3.clone(), &[tx2.clone()]).unwrap();
assert!(deaggregated_tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.validate(Weighting::AsTransaction, height)
.is_ok());
assert_eq!(tx1, deaggregated_tx1);
let deaggregated_tx2 = deaggregate(tx3, &[tx1]).unwrap();
assert!(deaggregated_tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.validate(Weighting::AsTransaction, height)
.is_ok());
assert_eq!(tx2, deaggregated_tx2);
}
@ -569,9 +471,7 @@ fn hash_output() {
fn blind_tx() {
let btx = tx2i1o();
let height = 42; // arbitrary
assert!(btx
.validate(Weighting::AsTransaction, verifier_cache(), height)
.is_ok());
assert!(btx.validate(Weighting::AsTransaction, height).is_ok());
// Ignored for bullet proofs, because calling range_proof_info
// with a bullet proof causes painful errors
@ -642,9 +542,7 @@ fn tx_build_exchange() {
.unwrap();
let height = 42; // arbitrary
tx_final
.validate(Weighting::AsTransaction, verifier_cache(), height)
.unwrap();
tx_final.validate(Weighting::AsTransaction, height).unwrap();
}
#[test]
@ -658,8 +556,7 @@ fn reward_empty_block() {
let b = new_block(&[], &keychain, &builder, &previous_header, &key_id);
b.validate(&BlindingFactor::zero(), verifier_cache())
.unwrap();
b.validate(&BlindingFactor::zero()).unwrap();
}
#[test]
@ -669,19 +566,13 @@ fn reward_with_tx_block() {
let builder = ProofBuilder::new(&keychain);
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let vc = verifier_cache();
let tx1 = tx2i1o();
let previous_header = BlockHeader::default();
tx1.validate(
Weighting::AsTransaction,
vc.clone(),
previous_header.height + 1,
)
.unwrap();
tx1.validate(Weighting::AsTransaction, previous_header.height + 1)
.unwrap();
let block = new_block(&[tx1], &keychain, &builder, &previous_header, &key_id);
block.validate(&BlindingFactor::zero(), vc.clone()).unwrap();
block.validate(&BlindingFactor::zero()).unwrap();
}
#[test]
@ -691,15 +582,13 @@ fn simple_block() {
let builder = ProofBuilder::new(&keychain);
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let vc = verifier_cache();
let tx1 = tx2i1o();
let tx2 = tx1i1o();
let previous_header = BlockHeader::default();
let b = new_block(&[tx1, tx2], &keychain, &builder, &previous_header, &key_id);
b.validate(&BlindingFactor::zero(), vc.clone()).unwrap();
b.validate(&BlindingFactor::zero()).unwrap();
}
#[test]
@ -711,8 +600,6 @@ fn test_block_with_timelocked_tx() {
let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let vc = verifier_cache();
// first check we can add a timelocked tx where lock height matches current
// block height and that the resulting block is valid
let tx1 = build::transaction(
@ -735,7 +622,7 @@ fn test_block_with_timelocked_tx() {
&previous_header,
&key_id3.clone(),
);
b.validate(&BlindingFactor::zero(), vc.clone()).unwrap();
b.validate(&BlindingFactor::zero()).unwrap();
// now try adding a timelocked tx where lock height is greater than current
// block height
@ -753,7 +640,7 @@ fn test_block_with_timelocked_tx() {
let previous_header = BlockHeader::default();
let b = new_block(&[tx1], &keychain, &builder, &previous_header, &key_id3);
match b.validate(&BlindingFactor::zero(), vc.clone()) {
match b.validate(&BlindingFactor::zero()) {
Err(KernelLockHeight(height)) => {
assert_eq!(height, 2);
}
@ -766,8 +653,7 @@ pub fn test_verify_1i1o_sig() {
test_setup();
let tx = tx1i1o();
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, verifier_cache(), height)
.unwrap();
tx.validate(Weighting::AsTransaction, height).unwrap();
}
#[test]
@ -775,6 +661,5 @@ pub fn test_verify_2i1o_sig() {
test_setup();
let tx = tx2i1o();
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, verifier_cache(), height)
.unwrap();
tx.validate(Weighting::AsTransaction, height).unwrap();
}

View file

@ -17,7 +17,6 @@
pub mod common;
use crate::common::tx1i10_v2_compatible;
use crate::core::core::transaction::{self, Error};
use crate::core::core::verifier_cache::LruVerifierCache;
use crate::core::core::{
FeeFields, KernelFeatures, Output, OutputFeatures, Transaction, TxKernel, Weighting,
};
@ -27,8 +26,6 @@ use crate::core::libtx::{build, tx_fee};
use crate::core::{consensus, ser};
use grin_core as core;
use keychain::{ExtKeychain, Keychain};
use std::sync::Arc;
use util::RwLock;
// We use json serialization between wallet->node when pushing transactions to the network.
// This test ensures we exercise this serialization/deserialization code.
@ -111,12 +108,10 @@ fn test_verify_cut_through_plain() -> Result<(), Error> {
)
.expect("valid tx");
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Transaction should fail validation due to cut-through.
let height = 42; // arbitrary
assert_eq!(
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height),
tx.validate(Weighting::AsTransaction, height),
Err(Error::CutThrough),
);
@ -134,7 +129,7 @@ fn test_verify_cut_through_plain() -> Result<(), Error> {
.replace_outputs(outputs);
// Transaction validates successfully after applying cut-through.
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height)?;
tx.validate(Weighting::AsTransaction, height)?;
// Transaction validates via lightweight "read" validation as well.
tx.validate_read()?;
@ -173,12 +168,10 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> {
)
.expect("valid tx");
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Transaction should fail validation due to cut-through.
let height = 42; // arbitrary
assert_eq!(
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height),
tx.validate(Weighting::AsTransaction, height),
Err(Error::CutThrough),
);
@ -196,7 +189,7 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> {
.replace_outputs(outputs);
// Transaction validates successfully after applying cut-through.
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height)?;
tx.validate(Weighting::AsTransaction, height)?;
// Transaction validates via lightweight "read" validation as well.
tx.validate_read()?;

View file

@ -1,61 +0,0 @@
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod common;
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use self::core::core::{Output, OutputFeatures};
use self::core::libtx::proof;
use grin_core as core;
use keychain::{ExtKeychain, Keychain, SwitchCommitmentType};
use std::sync::Arc;
use util::RwLock;
fn verifier_cache() -> Arc<RwLock<dyn VerifierCache>> {
Arc::new(RwLock::new(LruVerifierCache::new()))
}
#[test]
fn test_verifier_cache_rangeproofs() {
let cache = verifier_cache();
let keychain = ExtKeychain::from_random_seed(false).unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let switch = SwitchCommitmentType::Regular;
let commit = keychain.commit(5, &key_id, switch).unwrap();
let builder = proof::ProofBuilder::new(&keychain);
let proof = proof::create(&keychain, &builder, 5, &key_id, switch, commit, None).unwrap();
let out = Output::new(OutputFeatures::Plain, commit, proof);
// Check our output is not verified according to the cache.
{
let mut cache = cache.write();
let unverified = cache.filter_rangeproof_unverified(&[out]);
assert_eq!(unverified, vec![out]);
}
// Add our output to the cache.
{
let mut cache = cache.write();
cache.add_rangeproof_verified(vec![out]);
}
// Check it shows as verified according to the cache.
{
let mut cache = cache.write();
let unverified = cache.filter_rangeproof_unverified(&[out]);
assert_eq!(unverified, vec![]);
}
}

View file

@ -18,11 +18,9 @@
use self::core::core::hash::{Hash, Hashed};
use self::core::core::id::{ShortId, ShortIdentifiable};
use self::core::core::transaction;
use self::core::core::verifier_cache::VerifierCache;
use self::core::core::{
Block, BlockHeader, BlockSums, Committed, OutputIdentifier, Transaction, TxKernel, Weighting,
};
use self::util::RwLock;
use crate::types::{BlockChain, PoolEntry, PoolError};
use grin_core as core;
use grin_util as util;
@ -31,29 +29,25 @@ use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use util::static_secp_instance;
pub struct Pool<B, V>
pub struct Pool<B>
where
B: BlockChain,
V: VerifierCache,
{
/// Entries in the pool (tx + info + timer) in simple insertion order.
pub entries: Vec<PoolEntry>,
/// The blockchain
pub blockchain: Arc<B>,
pub verifier_cache: Arc<RwLock<V>>,
pub name: String,
}
impl<B, V> Pool<B, V>
impl<B> Pool<B>
where
B: BlockChain,
V: VerifierCache + 'static,
{
pub fn new(chain: Arc<B>, verifier_cache: Arc<RwLock<V>>, name: String) -> Self {
pub fn new(chain: Arc<B>, name: String) -> Self {
Pool {
entries: vec![],
blockchain: chain,
verifier_cache,
name,
}
}
@ -162,11 +156,7 @@ where
// Validate the single aggregate transaction "as pool", not subject to tx weight limits.
let header = self.blockchain.chain_head()?;
tx.validate(
Weighting::NoLimit,
self.verifier_cache.clone(),
header.height,
)?;
tx.validate(Weighting::NoLimit, header.height)?;
Ok(Some(tx))
}
@ -234,7 +224,7 @@ where
) -> Result<BlockSums, PoolError> {
// Validate the tx, conditionally checking against weight limits,
// based on weight verification type.
tx.validate(weighting, self.verifier_cache.clone(), header.height)?;
tx.validate(weighting, header.height)?;
// Validate the tx against current chain state.
// Check all inputs are in the current UTXO set.
@ -414,12 +404,9 @@ where
// Otherwise discard and let the next block pick this tx up.
let bucket = &tx_buckets[pos];
if let Ok(new_bucket) = bucket.aggregate_with_tx(
entry.tx.clone(),
weighting,
self.verifier_cache.clone(),
height,
) {
if let Ok(new_bucket) =
bucket.aggregate_with_tx(entry.tx.clone(), weighting, height)
{
if new_bucket.fee_rate >= bucket.fee_rate {
// Only aggregate if it would not reduce the fee_rate ratio.
tx_buckets[pos] = new_bucket;
@ -536,13 +523,12 @@ impl Bucket {
&self,
new_tx: Transaction,
weighting: Weighting,
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
height: u64,
) -> Result<Bucket, PoolError> {
let mut raw_txs = self.raw_txs.clone();
raw_txs.push(new_tx);
let agg_tx = transaction::aggregate(&raw_txs)?;
agg_tx.validate(weighting, verifier_cache, height)?;
agg_tx.validate(weighting, height)?;
Ok(Bucket {
fee_rate: agg_tx.fee_rate(height),
raw_txs: raw_txs,

View file

@ -19,7 +19,6 @@
use self::core::core::hash::{Hash, Hashed};
use self::core::core::id::ShortId;
use self::core::core::verifier_cache::VerifierCache;
use self::core::core::{
transaction, Block, BlockHeader, HeaderVersion, OutputIdentifier, Transaction, Weighting,
};
@ -34,51 +33,38 @@ use std::collections::VecDeque;
use std::sync::Arc;
/// Transaction pool implementation.
pub struct TransactionPool<B, P, V>
pub struct TransactionPool<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache,
{
/// Pool Config
pub config: PoolConfig,
/// Our transaction pool.
pub txpool: Pool<B, V>,
pub txpool: Pool<B>,
/// Our Dandelion "stempool".
pub stempool: Pool<B, V>,
pub stempool: Pool<B>,
/// Cache of previous txs in case of a re-org.
pub reorg_cache: Arc<RwLock<VecDeque<PoolEntry>>>,
/// The blockchain
pub blockchain: Arc<B>,
pub verifier_cache: Arc<RwLock<V>>,
/// The pool adapter
pub adapter: Arc<P>,
}
impl<B, P, V> TransactionPool<B, P, V>
impl<B, P> TransactionPool<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
/// Create a new transaction pool
pub fn new(
config: PoolConfig,
chain: Arc<B>,
verifier_cache: Arc<RwLock<V>>,
adapter: Arc<P>,
) -> Self {
pub fn new(config: PoolConfig, chain: Arc<B>, adapter: Arc<P>) -> Self {
TransactionPool {
config,
txpool: Pool::new(chain.clone(), verifier_cache.clone(), "txpool".to_string()),
stempool: Pool::new(
chain.clone(),
verifier_cache.clone(),
"stempool".to_string(),
),
txpool: Pool::new(chain.clone(), "txpool".to_string()),
stempool: Pool::new(chain.clone(), "stempool".to_string()),
reorg_cache: Arc::new(RwLock::new(VecDeque::new())),
blockchain: chain,
verifier_cache,
adapter,
}
}
@ -193,12 +179,8 @@ where
// Make sure the transaction is valid before anything else.
// Validate tx accounting for max tx weight.
tx.validate(
Weighting::AsTransaction,
self.verifier_cache.clone(),
header.height,
)
.map_err(PoolError::InvalidTx)?;
tx.validate(Weighting::AsTransaction, header.height)
.map_err(PoolError::InvalidTx)?;
// Check the tx lock_time is valid based on current chain state.
self.blockchain.verify_tx_lock_height(tx)?;
@ -279,11 +261,7 @@ where
// Validate the tx to ensure our converted inputs are correct.
let header = self.chain_head()?;
tx.validate(
Weighting::AsTransaction,
self.verifier_cache.clone(),
header.height,
)?;
tx.validate(Weighting::AsTransaction, header.height)?;
Ok(PoolEntry::new(tx, entry.src))
}

View file

@ -15,7 +15,6 @@
pub mod common;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::PoolError;
@ -39,15 +38,11 @@ fn test_transaction_pool_block_building() -> Result<(), PoolError> {
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
let mut pool = init_transaction_pool(Arc::new(ChainAdapter {
chain: chain.clone(),
}));
// mine enough blocks to get past HF4
add_some_blocks(&chain, 4 * 3, &keychain);

View file

@ -16,7 +16,6 @@
pub mod common;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::util::RwLock;
@ -39,15 +38,11 @@ fn test_block_building_max_weight() {
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
let mut pool = init_transaction_pool(Arc::new(ChainAdapter {
chain: chain.clone(),
}));
// mine past HF4 to see effect of set_local_accept_fee_base
add_some_blocks(&chain, 4 * 3, &keychain);

View file

@ -15,7 +15,6 @@
pub mod common;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::util::RwLock;
@ -38,15 +37,11 @@ fn test_transaction_pool_block_reconciliation() {
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
let mut pool = init_transaction_pool(Arc::new(ChainAdapter {
chain: chain.clone(),
}));
// mine past HF4 to see effect of set_local_accept_fee_base
add_some_blocks(&chain, 4 * 3, &keychain);

View file

@ -14,11 +14,9 @@
pub mod common;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::types::PoolError;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
@ -39,15 +37,11 @@ fn test_coinbase_maturity() {
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
let mut pool = init_transaction_pool(Arc::new(ChainAdapter {
chain: chain.clone(),
}));
// Add a single block, introducing coinbase output to be spent later.
add_block(&chain, &[], &keychain);

View file

@ -18,7 +18,6 @@ use self::chain::types::{NoopAdapter, Options};
use self::chain::Chain;
use self::core::consensus;
use self::core::core::hash::Hash;
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use self::core::core::{
Block, BlockHeader, BlockSums, Inputs, KernelFeatures, OutputIdentifier, Transaction, TxKernel,
};
@ -29,13 +28,11 @@ use self::core::pow;
use self::keychain::{BlindingFactor, ExtKeychain, ExtKeychainPath, Keychain};
use self::pool::types::*;
use self::pool::TransactionPool;
use self::util::RwLock;
use chrono::Duration;
use grin_chain as chain;
use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::convert::TryInto;
use std::fs;
use std::sync::Arc;
@ -52,13 +49,11 @@ where
}
pub fn init_chain(dir_name: &str, genesis: Block) -> Chain {
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
Chain::init(
dir_name.to_string(),
Arc::new(NoopAdapter {}),
genesis,
pow::verify_size,
verifier_cache,
false,
)
.unwrap()
@ -157,13 +152,9 @@ impl BlockChain for ChainAdapter {
}
}
pub fn init_transaction_pool<B, V>(
chain: Arc<B>,
verifier_cache: Arc<RwLock<V>>,
) -> TransactionPool<B, NoopPoolAdapter, V>
pub fn init_transaction_pool<B>(chain: Arc<B>) -> TransactionPool<B, NoopPoolAdapter>
where
B: BlockChain,
V: VerifierCache + 'static,
{
TransactionPool::new(
PoolConfig {
@ -174,7 +165,6 @@ where
mineable_max_weight: 10_000,
},
chain.clone(),
verifier_cache.clone(),
Arc::new(NoopPoolAdapter {}),
)
}

View file

@ -16,7 +16,6 @@ pub mod common;
use self::core::consensus;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight, TxKernel};
use self::core::global;
use self::core::libtx::aggsig;
@ -44,15 +43,11 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
let mut pool = init_transaction_pool(Arc::new(ChainAdapter {
chain: chain.clone(),
}));
add_some_blocks(&chain, 3, &keychain);

View file

@ -15,7 +15,6 @@
pub mod common;
use self::core::consensus;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
@ -41,15 +40,11 @@ fn test_nrd_kernels_disabled() {
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
let mut pool = init_transaction_pool(Arc::new(ChainAdapter {
chain: chain.clone(),
}));
// Add some blocks.
add_some_blocks(&chain, 3, &keychain);

View file

@ -15,7 +15,6 @@
pub mod common;
use self::core::consensus;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
@ -42,15 +41,11 @@ fn test_nrd_kernels_enabled() {
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache,
);
let mut pool = init_transaction_pool(Arc::new(ChainAdapter {
chain: chain.clone(),
}));
// Add some blocks.
add_some_blocks(&chain, 3, &keychain);

View file

@ -14,12 +14,10 @@
pub mod common;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{transaction, Weighting};
use self::core::global;
use self::keychain::{ExtKeychain, Keychain};
use self::pool::TxSource;
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
@ -40,15 +38,11 @@ fn test_the_transaction_pool() {
let genesis = genesis_block(&keychain);
let chain = Arc::new(init_chain(db_root, genesis));
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Initialize a new pool with our chain adapter.
let mut pool = init_transaction_pool(
Arc::new(ChainAdapter {
chain: chain.clone(),
}),
verifier_cache.clone(),
);
let mut pool = init_transaction_pool(Arc::new(ChainAdapter {
chain: chain.clone(),
}));
// mine past HF4 to see effect of set_local_accept_fee_base
add_some_blocks(&chain, 4 * 3, &keychain);
@ -194,9 +188,7 @@ fn test_the_transaction_pool() {
let agg_tx = transaction::aggregate(&[tx1.clone(), tx2.clone(), tx4]).unwrap();
let height = 12 + 1;
agg_tx
.validate(Weighting::AsTransaction, verifier_cache.clone(), height)
.unwrap();
agg_tx.validate(Weighting::AsTransaction, height).unwrap();
pool.add_to_pool(test_source(), agg_tx, false, &header)
.unwrap();

View file

@ -30,7 +30,6 @@ use crate::common::hooks::{ChainEvents, NetEvents};
use crate::common::types::{ChainValidationMode, DandelionEpoch, ServerConfig};
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::transaction::Transaction;
use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::{
BlockHeader, BlockSums, CompactBlock, Inputs, OutputIdentifier, Segment, SegmentIdentifier,
TxKernel,
@ -56,26 +55,23 @@ const RANGEPROOF_SEGMENT_HEIGHT_RANGE: Range<u8> = 7..12;
/// Implementation of the NetAdapter for the . Gets notified when new
/// blocks and transactions are received and forwards to the chain and pool
/// implementations.
pub struct NetToChainAdapter<B, P, V>
pub struct NetToChainAdapter<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
sync_state: Arc<SyncState>,
chain: Weak<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<B, P, V>>>,
verifier_cache: Arc<RwLock<V>>,
tx_pool: Arc<RwLock<pool::TransactionPool<B, P>>>,
peers: OneTime<Weak<p2p::Peers>>,
config: ServerConfig,
hooks: Vec<Box<dyn NetEvents + Send + Sync>>,
}
impl<B, P, V> p2p::ChainAdapter for NetToChainAdapter<B, P, V>
impl<B, P> p2p::ChainAdapter for NetToChainAdapter<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
fn total_difficulty(&self) -> Result<Difficulty, chain::Error> {
Ok(self.chain().head()?.total_difficulty)
@ -245,10 +241,7 @@ where
};
if let Ok(prev) = self.chain().get_previous_header(&cb.header) {
if block
.validate(&prev.total_kernel_offset, self.verifier_cache.clone())
.is_ok()
{
if block.validate(&prev.total_kernel_offset).is_ok() {
debug!(
"successfully hydrated block: {} at {} ({})",
block.header.hash(),
@ -552,18 +545,16 @@ where
}
}
impl<B, P, V> NetToChainAdapter<B, P, V>
impl<B, P> NetToChainAdapter<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
/// Construct a new NetToChainAdapter instance
pub fn new(
sync_state: Arc<SyncState>,
chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<B, P, V>>>,
verifier_cache: Arc<RwLock<V>>,
tx_pool: Arc<RwLock<pool::TransactionPool<B, P>>>,
config: ServerConfig,
hooks: Vec<Box<dyn NetEvents + Send + Sync>>,
) -> Self {
@ -571,7 +562,6 @@ where
sync_state,
chain: Arc::downgrade(&chain),
tx_pool,
verifier_cache,
peers: OneTime::new(),
config,
hooks,
@ -787,22 +777,20 @@ where
/// Implementation of the ChainAdapter for the network. Gets notified when the
/// accepted a new block, asking the pool to update its state and
/// the network to broadcast the block
pub struct ChainToPoolAndNetAdapter<B, P, V>
pub struct ChainToPoolAndNetAdapter<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
tx_pool: Arc<RwLock<pool::TransactionPool<B, P, V>>>,
tx_pool: Arc<RwLock<pool::TransactionPool<B, P>>>,
peers: OneTime<Weak<p2p::Peers>>,
hooks: Vec<Box<dyn ChainEvents + Send + Sync>>,
}
impl<B, P, V> ChainAdapter for ChainToPoolAndNetAdapter<B, P, V>
impl<B, P> ChainAdapter for ChainToPoolAndNetAdapter<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
fn block_accepted(&self, b: &core::Block, status: BlockStatus, opts: Options) {
// Trigger all registered "on_block_accepted" hooks (logging and webhooks).
@ -845,15 +833,14 @@ where
}
}
impl<B, P, V> ChainToPoolAndNetAdapter<B, P, V>
impl<B, P> ChainToPoolAndNetAdapter<B, P>
where
B: BlockChain,
P: PoolAdapter,
V: VerifierCache + 'static,
{
/// Construct a ChainToPoolAndNetAdapter instance.
pub fn new(
tx_pool: Arc<RwLock<pool::TransactionPool<B, P, V>>>,
tx_pool: Arc<RwLock<pool::TransactionPool<B, P>>>,
hooks: Vec<Box<dyn ChainEvents + Send + Sync>>,
) -> Self {
ChainToPoolAndNetAdapter {

View file

@ -21,10 +21,9 @@ use std::time::{Duration, Instant};
use crate::common::adapters::DandelionAdapter;
use crate::core::core::hash::Hashed;
use crate::core::core::transaction;
use crate::core::core::verifier_cache::VerifierCache;
use crate::pool::{BlockChain, DandelionConfig, Pool, PoolEntry, PoolError, TxSource};
use crate::util::StopState;
use crate::{ServerTxPool, ServerVerifierCache};
use crate::ServerTxPool;
/// A process to monitor transactions in the stempool.
/// With Dandelion, transaction can be broadcasted in stem or fluff phase.
@ -38,7 +37,6 @@ pub fn monitor_transactions(
dandelion_config: DandelionConfig,
tx_pool: ServerTxPool,
adapter: Arc<dyn DandelionAdapter>,
verifier_cache: ServerVerifierCache,
stop_state: Arc<StopState>,
) -> std::io::Result<thread::JoinHandle<()>> {
debug!("Started Dandelion transaction monitor.");
@ -58,15 +56,11 @@ pub fn monitor_transactions(
if last_run.elapsed() > run_interval {
if !adapter.is_stem() {
let _ = process_fluff_phase(
&dandelion_config,
&tx_pool,
&adapter,
&verifier_cache,
)
.map_err(|e| {
error!("dand_mon: Problem processing fluff phase. {:?}", e);
});
let _ = process_fluff_phase(&dandelion_config, &tx_pool, &adapter).map_err(
|e| {
error!("dand_mon: Problem processing fluff phase. {:?}", e);
},
);
}
// Now find all expired entries based on embargo timer.
@ -91,10 +85,9 @@ pub fn monitor_transactions(
// Query the pool for transactions older than the cutoff.
// Used for both periodic fluffing and handling expired embargo timer.
fn select_txs_cutoff<B, V>(pool: &Pool<B, V>, cutoff_secs: u16) -> Vec<PoolEntry>
fn select_txs_cutoff<B>(pool: &Pool<B>, cutoff_secs: u16) -> Vec<PoolEntry>
where
B: BlockChain,
V: VerifierCache,
{
let cutoff = Utc::now().timestamp() - cutoff_secs as i64;
pool.entries
@ -108,7 +101,6 @@ fn process_fluff_phase(
dandelion_config: &DandelionConfig,
tx_pool: &ServerTxPool,
adapter: &Arc<dyn DandelionAdapter>,
verifier_cache: &ServerVerifierCache,
) -> Result<(), PoolError> {
// Take a write lock on the txpool for the duration of this processing.
let mut tx_pool = tx_pool.write();
@ -147,11 +139,7 @@ fn process_fluff_phase(
);
let agg_tx = transaction::aggregate(&fluffable_txs)?;
agg_tx.validate(
transaction::Weighting::AsTransaction,
verifier_cache.clone(),
header.height,
)?;
agg_tx.validate(transaction::Weighting::AsTransaction, header.height)?;
tx_pool.add_to_pool(TxSource::Fluff, agg_tx, false, &header)?;
Ok(())

View file

@ -41,7 +41,6 @@ use crate::common::stats::{
};
use crate::common::types::{Error, ServerConfig, StratumServerConfig};
use crate::core::core::hash::Hashed;
use crate::core::core::verifier_cache::LruVerifierCache;
use crate::core::ser::ProtocolVersion;
use crate::core::{consensus, genesis, global, pow};
use crate::grin::{dandelion_monitor, seed, sync};
@ -55,10 +54,7 @@ use crate::util::{RwLock, StopState};
use grin_util::logger::LogEntry;
/// Arcified thread-safe TransactionPool with type parameters used by server components
pub type ServerTxPool =
Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter, LruVerifierCache>>>;
/// Arcified thread-safe LruVerifierCache
pub type ServerVerifierCache = Arc<RwLock<LruVerifierCache>>;
pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter>>>;
/// Grin server holding internal structures.
pub struct Server {
@ -70,9 +66,6 @@ pub struct Server {
pub chain: Arc<chain::Chain>,
/// in-memory transaction pool
pub tx_pool: ServerTxPool,
/// Shared cache for verification results when
/// verifying rangeproof and kernel signatures.
verifier_cache: ServerVerifierCache,
/// Whether we're currently syncing
pub sync_state: Arc<SyncState>,
/// To be passed around to collect stats and info
@ -165,16 +158,11 @@ impl Server {
let stop_state = Arc::new(StopState::new());
// Shared cache for verification results.
// We cache rangeproof verification and kernel signature verification.
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
let pool_adapter = Arc::new(PoolToChainAdapter::new());
let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone()));
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(
config.pool_config.clone(),
pool_adapter.clone(),
verifier_cache.clone(),
pool_net_adapter.clone(),
)));
@ -199,7 +187,6 @@ impl Server {
chain_adapter.clone(),
genesis.clone(),
pow::verify_size,
verifier_cache.clone(),
archive_mode,
)?);
@ -209,7 +196,6 @@ impl Server {
sync_state.clone(),
shared_chain.clone(),
tx_pool.clone(),
verifier_cache.clone(),
config.clone(),
init_net_hooks(&config),
));
@ -320,7 +306,6 @@ impl Server {
config.dandelion_config.clone(),
tx_pool.clone(),
pool_net_adapter,
verifier_cache.clone(),
stop_state.clone(),
)?;
@ -330,7 +315,6 @@ impl Server {
p2p: p2p_server,
chain: shared_chain,
tx_pool,
verifier_cache,
sync_state,
state_info: ServerStateInfo {
..Default::default()
@ -377,7 +361,6 @@ impl Server {
config,
self.chain.clone(),
self.tx_pool.clone(),
self.verifier_cache.clone(),
self.state_info.stratum_stats.clone(),
);
let _ = thread::Builder::new()
@ -415,7 +398,6 @@ impl Server {
config,
self.chain.clone(),
self.tx_pool.clone(),
self.verifier_cache.clone(),
stop_state,
sync_state,
);

View file

@ -41,4 +41,4 @@ mod mining;
pub use crate::common::stats::{DiffBlock, PeerStats, ServerStats, StratumStats, WorkerStats};
pub use crate::common::types::{ServerConfig, StratumServerConfig};
pub use crate::grin::server::{Server, ServerTxPool, ServerVerifierCache};
pub use crate::grin::server::{Server, ServerTxPool};

View file

@ -30,7 +30,7 @@ use crate::core::libtx::secp_ser;
use crate::core::libtx::ProofBuilder;
use crate::core::{consensus, core, global};
use crate::keychain::{ExtKeychain, Identifier, Keychain};
use crate::{ServerTxPool, ServerVerifierCache};
use crate::ServerTxPool;
/// Fees in block to use for coinbase amount calculation
/// (Duplicated from Grin wallet project)
@ -70,19 +70,12 @@ pub struct CbData {
pub fn get_block(
chain: &Arc<chain::Chain>,
tx_pool: &ServerTxPool,
verifier_cache: ServerVerifierCache,
key_id: Option<Identifier>,
wallet_listener_url: Option<String>,
) -> (core::Block, BlockFees) {
let wallet_retry_interval = 5;
// get the latest chain state and build a block on top of it
let mut result = build_block(
chain,
tx_pool,
verifier_cache.clone(),
key_id.clone(),
wallet_listener_url.clone(),
);
let mut result = build_block(chain, tx_pool, key_id.clone(), wallet_listener_url.clone());
while let Err(e) = result {
let mut new_key_id = key_id.to_owned();
match e {
@ -116,13 +109,7 @@ pub fn get_block(
thread::sleep(Duration::from_millis(100));
}
result = build_block(
chain,
tx_pool,
verifier_cache.clone(),
new_key_id,
wallet_listener_url.clone(),
);
result = build_block(chain, tx_pool, new_key_id, wallet_listener_url.clone());
}
return result.unwrap();
}
@ -132,7 +119,6 @@ pub fn get_block(
fn build_block(
chain: &Arc<chain::Chain>,
tx_pool: &ServerTxPool,
verifier_cache: ServerVerifierCache,
key_id: Option<Identifier>,
wallet_listener_url: Option<String>,
) -> Result<(core::Block, BlockFees), Error> {
@ -178,7 +164,7 @@ fn build_block(
let mut b = core::Block::from_reward(&head, &txs, output, kernel, difficulty.difficulty)?;
// making sure we're not spending time mining a useless block
b.validate(&head.total_kernel_offset, verifier_cache)?;
b.validate(&head.total_kernel_offset)?;
b.header.pow.nonce = thread_rng().gen();
b.header.pow.secondary_scaling = difficulty.secondary_scaling;

View file

@ -41,7 +41,7 @@ use crate::core::{pow, ser};
use crate::keychain;
use crate::mining::mine_block;
use crate::util::ToHex;
use crate::{ServerTxPool, ServerVerifierCache};
use crate::ServerTxPool;
type Tx = mpsc::UnboundedSender<String>;
@ -522,12 +522,7 @@ impl Handler {
self.workers.broadcast(job_request_json);
}
pub fn run(
&self,
config: &StratumServerConfig,
tx_pool: &ServerTxPool,
verifier_cache: ServerVerifierCache,
) {
pub fn run(&self, config: &StratumServerConfig, tx_pool: &ServerTxPool) {
debug!("Run main loop");
let mut deadline: i64 = 0;
let mut head = self.chain.head().unwrap();
@ -558,7 +553,6 @@ impl Handler {
let (new_block, block_fees) = mine_block::get_block(
&self.chain,
tx_pool,
verifier_cache.clone(),
state.current_key_id.clone(),
wallet_listener_url,
);
@ -826,7 +820,6 @@ pub struct StratumServer {
config: StratumServerConfig,
chain: Arc<chain::Chain>,
pub tx_pool: ServerTxPool,
verifier_cache: ServerVerifierCache,
sync_state: Arc<SyncState>,
stratum_stats: Arc<RwLock<StratumStats>>,
}
@ -837,7 +830,6 @@ impl StratumServer {
config: StratumServerConfig,
chain: Arc<chain::Chain>,
tx_pool: ServerTxPool,
verifier_cache: ServerVerifierCache,
stratum_stats: Arc<RwLock<StratumStats>>,
) -> StratumServer {
StratumServer {
@ -845,7 +837,6 @@ impl StratumServer {
config,
chain,
tx_pool,
verifier_cache,
sync_state: Arc::new(SyncState::new()),
stratum_stats: stratum_stats,
}
@ -897,7 +888,7 @@ impl StratumServer {
thread::sleep(Duration::from_millis(50));
}
handler.run(&self.config, &self.tx_pool, self.verifier_cache.clone());
handler.run(&self.config, &self.tx_pool);
} // fn run_loop()
} // StratumServer

View file

@ -27,7 +27,7 @@ use crate::core::core::{Block, BlockHeader};
use crate::core::global;
use crate::mining::mine_block;
use crate::util::StopState;
use crate::{ServerTxPool, ServerVerifierCache};
use crate::ServerTxPool;
use grin_chain::SyncState;
use std::thread;
use std::time::Duration;
@ -36,7 +36,6 @@ pub struct Miner {
config: StratumServerConfig,
chain: Arc<chain::Chain>,
tx_pool: ServerTxPool,
verifier_cache: ServerVerifierCache,
stop_state: Arc<StopState>,
sync_state: Arc<SyncState>,
// Just to hold the port we're on, so this miner can be identified
@ -51,7 +50,6 @@ impl Miner {
config: StratumServerConfig,
chain: Arc<chain::Chain>,
tx_pool: ServerTxPool,
verifier_cache: ServerVerifierCache,
stop_state: Arc<StopState>,
sync_state: Arc<SyncState>,
) -> Miner {
@ -59,7 +57,6 @@ impl Miner {
config,
chain,
tx_pool,
verifier_cache,
debug_output_id: String::from("none"),
stop_state,
sync_state,
@ -156,7 +153,6 @@ impl Miner {
let (mut b, block_fees) = mine_block::get_block(
&self.chain,
&self.tx_pool,
self.verifier_cache.clone(),
key_id.clone(),
wallet_listener_url.clone(),
);