Fix sync hash cache ()

* Refactor hash cache handling to add properly. Only add to cache if the block is definitevely accepted or rejected.
* Dedup of header processing now looks like premature optimization. Very fast anyway. Removing the header hash cache.
This commit is contained in:
Ignotus Peverell 2018-07-22 12:43:55 -07:00 committed by GitHub
parent 75b72e48ff
commit f5b03a6d5a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 16 additions and 27 deletions

View file

@ -43,7 +43,7 @@ pub const MAX_ORPHAN_SIZE: usize = 200;
const MAX_ORPHAN_AGE_SECS: u64 = 300;
/// Number of recent hashes we keep to de-duplicate block or header sends
const HASHES_CACHE_SIZE: usize = 50;
const HASHES_CACHE_SIZE: usize = 200;
#[derive(Debug, Clone)]
struct Orphan {
@ -136,8 +136,6 @@ pub struct Chain {
txhashset: Arc<RwLock<txhashset::TxHashSet>>,
// Recently processed blocks to avoid double-processing
block_hashes_cache: Arc<RwLock<VecDeque<Hash>>>,
// Recently processed headers to avoid double-processing
header_hashes_cache: Arc<RwLock<VecDeque<Hash>>>,
// POW verification function
pow_verifier: fn(&BlockHeader, u8) -> bool,
@ -186,7 +184,6 @@ impl Chain {
txhashset: Arc::new(RwLock::new(txhashset)),
pow_verifier: pow_verifier,
block_hashes_cache: Arc::new(RwLock::new(VecDeque::with_capacity(HASHES_CACHE_SIZE))),
header_hashes_cache: Arc::new(RwLock::new(VecDeque::with_capacity(HASHES_CACHE_SIZE))),
})
}
@ -197,7 +194,7 @@ impl Chain {
b: Block,
opts: Options,
) -> Result<(Option<Tip>, Option<Block>), Error> {
let res = self.process_block_no_orphans(b, opts, false);
let res = self.process_block_no_orphans(b, opts);
match res {
Ok((t, b)) => {
// We accepted a block, so see if we can accept any orphans
@ -217,18 +214,20 @@ impl Chain {
&self,
b: Block,
opts: Options,
from_cache: bool,
) -> Result<(Option<Tip>, Option<Block>), Error> {
let head = self.store.head()?;
let bhash = b.hash();
let mut ctx = self.ctx_from_head(head, opts)?;
let res = pipe::process_block(&b, &mut ctx, from_cache);
let res = pipe::process_block(&b, &mut ctx);
if !from_cache{
let add_to_hash_cache = || {
// only add to hash cache below if block is definitively accepted
// or rejected
let mut cache = self.block_hashes_cache.write().unwrap();
cache.push_front(b.hash());
cache.push_front(bhash);
cache.truncate(HASHES_CACHE_SIZE);
}
};
match res {
Ok(Some(ref tip)) => {
@ -238,6 +237,7 @@ impl Chain {
let mut head = chain_head.lock().unwrap();
*head = tip.clone();
}
add_to_hash_cache();
// notifying other parts of the system of the update
self.adapter.block_accepted(&b, opts);
@ -245,6 +245,8 @@ impl Chain {
Ok((Some(tip.clone()), Some(b)))
}
Ok(None) => {
add_to_hash_cache();
// block got accepted but we did not extend the head
// so its on a fork (or is the start of a new fork)
// broadcast the block out so everyone knows about the fork
@ -295,6 +297,7 @@ impl Chain {
b.header.height,
e
);
add_to_hash_cache();
Err(ErrorKind::Other(format!("{:?}", e).to_owned()).into())
}
}
@ -307,11 +310,6 @@ impl Chain {
let header_head = self.get_header_head()?;
let mut ctx = self.ctx_from_head(header_head, opts)?;
let res = pipe::process_block_header(bh, &mut ctx);
{
let mut cache = self.header_hashes_cache.write().unwrap();
cache.push_front(bh.hash());
cache.truncate(HASHES_CACHE_SIZE);
}
res
}
@ -337,7 +335,6 @@ impl Chain {
head: head,
pow_verifier: self.pow_verifier,
block_hashes_cache: self.block_hashes_cache.clone(),
header_hashes_cache: self.header_hashes_cache.clone(),
txhashset: self.txhashset.clone(),
})
}
@ -366,7 +363,7 @@ impl Chain {
height,
self.orphans.len(),
);
let res = self.process_block_no_orphans(orphan.block, orphan.opts, true);
let res = self.process_block_no_orphans(orphan.block, orphan.opts);
if let Ok((_, Some(b))) = res {
// We accepted a block, so see if we can accept any orphans
height = b.header.height + 1;

View file

@ -48,14 +48,12 @@ pub struct BlockContext {
pub txhashset: Arc<RwLock<txhashset::TxHashSet>>,
/// Recently processed blocks to avoid double-processing
pub block_hashes_cache: Arc<RwLock<VecDeque<Hash>>>,
/// Recently processed headers to avoid double-processing
pub header_hashes_cache: Arc<RwLock<VecDeque<Hash>>>,
}
/// Runs the block processing pipeline, including validation and finding a
/// place for the new block in the chain. Returns the new chain head if
/// updated.
pub fn process_block(b: &Block, ctx: &mut BlockContext, from_cache: bool,) -> Result<Option<Tip>, Error> {
pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
// TODO should just take a promise for a block with a full header so we don't
// spend resources reading the full block when its header is invalid
@ -68,9 +66,7 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext, from_cache: bool,) -> Re
b.outputs.len(),
b.kernels.len(),
);
if !from_cache {
check_known(b.hash(), ctx)?;
}
check_known(b.hash(), ctx)?;
validate_header(&b.header, ctx)?;
@ -185,10 +181,6 @@ fn check_header_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
return Err(ErrorKind::Unfit("already known".to_string()).into());
}
let cache = ctx.header_hashes_cache.read().unwrap();
if cache.contains(&bh) {
return Err(ErrorKind::Unfit("already known in cache".to_string()).into());
}
Ok(())
}