use an lru_cache for the block_hashes_cache ()

This commit is contained in:
Antioch Peverell 2018-09-28 17:27:31 +01:00 committed by GitHub
parent e64f4fbcd1
commit 2cad812b29
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 13 additions and 12 deletions

View file

@ -15,13 +15,14 @@
//! Facade and handler for the rest of the blockchain implementation //! Facade and handler for the rest of the blockchain implementation
//! and mostly the chain pipeline. //! and mostly the chain pipeline.
use std::collections::{HashMap, VecDeque}; use std::collections::HashMap;
use std::fs::File; use std::fs::File;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use lmdb; use lmdb;
use lru_cache::LruCache;
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::core::merkle_proof::MerkleProof; use core::core::merkle_proof::MerkleProof;
@ -150,7 +151,7 @@ pub struct Chain {
orphans: Arc<OrphanBlockPool>, orphans: Arc<OrphanBlockPool>,
txhashset: Arc<RwLock<txhashset::TxHashSet>>, txhashset: Arc<RwLock<txhashset::TxHashSet>>,
// Recently processed blocks to avoid double-processing // Recently processed blocks to avoid double-processing
block_hashes_cache: Arc<RwLock<VecDeque<Hash>>>, block_hashes_cache: Arc<RwLock<LruCache<Hash, bool>>>,
verifier_cache: Arc<RwLock<VerifierCache>>, verifier_cache: Arc<RwLock<VerifierCache>>,
// POW verification function // POW verification function
pow_verifier: fn(&BlockHeader, u8) -> Result<(), pow::Error>, pow_verifier: fn(&BlockHeader, u8) -> Result<(), pow::Error>,
@ -202,7 +203,7 @@ impl Chain {
txhashset: Arc::new(RwLock::new(txhashset)), txhashset: Arc::new(RwLock::new(txhashset)),
pow_verifier, pow_verifier,
verifier_cache, verifier_cache,
block_hashes_cache: Arc::new(RwLock::new(VecDeque::with_capacity(HASHES_CACHE_SIZE))), block_hashes_cache: Arc::new(RwLock::new(LruCache::new(HASHES_CACHE_SIZE))),
archive_mode, archive_mode,
}) })
} }
@ -244,8 +245,7 @@ impl Chain {
// only add to hash cache below if block is definitively accepted // only add to hash cache below if block is definitively accepted
// or rejected // or rejected
let mut cache = self.block_hashes_cache.write().unwrap(); let mut cache = self.block_hashes_cache.write().unwrap();
cache.push_front(bhash); cache.insert(bhash, true);
cache.truncate(HASHES_CACHE_SIZE);
}; };
match res { match res {

View file

@ -14,12 +14,13 @@
//! Implementation of the chain block acceptance (or refusal) pipeline. //! Implementation of the chain block acceptance (or refusal) pipeline.
use std::collections::VecDeque;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use chrono::prelude::Utc; use chrono::prelude::Utc;
use chrono::Duration; use chrono::Duration;
use lru_cache::LruCache;
use chain::OrphanBlockPool; use chain::OrphanBlockPool;
use core::consensus; use core::consensus;
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
@ -53,7 +54,7 @@ pub struct BlockContext {
/// MMR sum tree states /// MMR sum tree states
pub txhashset: Arc<RwLock<txhashset::TxHashSet>>, pub txhashset: Arc<RwLock<txhashset::TxHashSet>>,
/// Recently processed blocks to avoid double-processing /// Recently processed blocks to avoid double-processing
pub block_hashes_cache: Arc<RwLock<VecDeque<Hash>>>, pub block_hashes_cache: Arc<RwLock<LruCache<Hash, bool>>>,
/// Recent orphan blocks to avoid double-processing /// Recent orphan blocks to avoid double-processing
pub orphans: Arc<OrphanBlockPool>, pub orphans: Arc<OrphanBlockPool>,
} }
@ -290,8 +291,8 @@ fn check_known_head(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(),
/// Keeps duplicates from the network in check. /// Keeps duplicates from the network in check.
/// Checks against the cache of recently processed block hashes. /// Checks against the cache of recently processed block hashes.
fn check_known_cache(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> { fn check_known_cache(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
let cache = ctx.block_hashes_cache.read().unwrap(); let mut cache = ctx.block_hashes_cache.write().unwrap();
if cache.contains(&header.hash()) { if cache.contains_key(&header.hash()) {
return Err(ErrorKind::Unfit("already known in cache".to_string()).into()); return Err(ErrorKind::Unfit("already known in cache".to_string()).into());
} }
Ok(()) Ok(())
@ -527,8 +528,7 @@ fn validate_block(
&prev.total_kernel_offset, &prev.total_kernel_offset,
&prev.total_kernel_sum, &prev.total_kernel_sum,
verifier_cache, verifier_cache,
) ).map_err(|e| ErrorKind::InvalidBlockProof(e))?;
.map_err(|e| ErrorKind::InvalidBlockProof(e))?;
Ok(()) Ok(())
} }
@ -568,7 +568,8 @@ fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Er
let offset = b.header.total_kernel_offset(); let offset = b.header.total_kernel_offset();
// Verify the kernel sums for the block_sums with the new block applied. // Verify the kernel sums for the block_sums with the new block applied.
let (utxo_sum, kernel_sum) = (block_sums, b as &Committed).verify_kernel_sums(overage, offset)?; let (utxo_sum, kernel_sum) =
(block_sums, b as &Committed).verify_kernel_sums(overage, offset)?;
// Save the new block_sums for the new block to the db via the batch. // Save the new block_sums for the new block to the db via the batch.
ext.batch.save_block_sums( ext.batch.save_block_sums(