Integration of target calculation with header validation and pow. Enforce strictly progessing time.

This commit is contained in:
Ignotus Peverell 2016-11-16 17:03:23 -08:00
parent ca26f0c3f7
commit 1e5ff0eeff
No known key found for this signature in database
GPG key ID: 99CD25F39F8F8211
4 changed files with 49 additions and 13 deletions

View file

@ -45,11 +45,15 @@ pub struct BlockContext<'a> {
pub enum Error {
/// The block doesn't fit anywhere in our chain
Unfit(String),
/// Target is too high either compared to ours or the block PoW hash
TargetTooHigh,
/// The proof of work is invalid
InvalidPow,
/// The block doesn't sum correctly or a tx signature is invalid
InvalidBlockProof(secp::Error),
/// Internal issue when trying to save the block
/// Block time is too old
InvalidBlockTime,
/// Internal issue when trying to save or load data from store
StoreErr(types::Error),
}
@ -57,7 +61,7 @@ pub fn process_block(b: &Block, store: &ChainStore, opts: Options) -> Result<(),
// TODO should just take a promise for a block with a full header so we don't
// spend resources reading the full block when its header is invalid
let head = try!(store.head().map_err(&Error::StoreErr));
let head = try!(store.head().map_err(&Error::StoreErr));
let mut ctx = BlockContext {
opts: opts,
store: store,
@ -92,19 +96,30 @@ fn validate_header(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
// TODO actually handle orphans and add them to a size-limited set
return Err(Error::Unfit("orphan".to_string()));
}
// TODO check time wrt to chain time, refuse older than 100 blocks or too far
// in future
// TODO check time wrt to chain time, refuse too far in future
// TODO maintain current difficulty
let diff_target = consensus::MAX_TARGET;
let prev = try!(ctx.store.get_block_header(&header.previous).map_err(&Error::StoreErr));
if header.timestamp <= prev.timestamp {
return Err(Error::InvalidBlockTime);
}
let (diff_target, cuckoo_sz) = consensus::next_target(header.timestamp.to_timespec().sec,
prev.timestamp.to_timespec().sec,
prev.target,
prev.cuckoo_len);
if header.target > diff_target {
return Err(Error::TargetTooHigh);
}
if ctx.opts.intersects(EASY_POW) {
if !pow::verify20(b, diff_target) {
if !pow::verify20(b) {
return Err(Error::InvalidPow);
}
} else if !pow::verify(b, diff_target) {
} else if !pow::verify_size(b, cuckoo_sz as u32) {
return Err(Error::InvalidPow);
}
Ok(())
}

View file

@ -17,14 +17,16 @@
use byteorder::{WriteBytesExt, BigEndian};
use types::*;
use core::core::Block;
use core::core::hash::Hash;
use core::core::{Block, BlockHeader};
use grin_store;
const STORE_PATH: &'static str = ".grin/chain";
const SEP: u8 = ':' as u8;
const BLOCK_PREFIX: u8 = 'B' as u8;
const BLOCK_HEADER_PREFIX: u8 = 'h' as u8;
const BLOCK_PREFIX: u8 = 'b' as u8;
const TIP_PREFIX: u8 = 'T' as u8;
const HEAD_PREFIX: u8 = 'H' as u8;
@ -47,7 +49,17 @@ impl ChainStore for ChainKVStore {
}
fn save_block(&self, b: &Block) -> Result<(), Error> {
self.db.put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b).map_err(&to_store_err)
try!(self.db
.put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)
.map_err(&to_store_err));
self.db
.put_ser(&to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..],
&b.header)
.map_err(&to_store_err)
}
fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
option_to_not_found(self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())))
}
fn save_head(&self, t: &Tip) -> Result<(), Error> {

View file

@ -15,7 +15,7 @@
//! Base types that the block chain pipeline requires.
use core::core::hash::Hash;
use core::core::Block;
use core::core::{Block, BlockHeader};
use core::ser;
/// The lineage of a fork, defined as a series of numbers. Each new branch gets
@ -137,6 +137,9 @@ pub trait ChainStore {
/// Get the tip that's also the head of the chain
fn head(&self) -> Result<Tip, Error>;
/// Gets a block header by hash
fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error>;
/// Save the provided block in store
fn save_block(&self, b: &Block) -> Result<(), Error>;

View file

@ -87,10 +87,16 @@ impl Store {
/// Gets a `Readable` value from the db, provided its key. Encapsulates
/// serialization.
pub fn get_ser<T: ser::Readable<T>>(&self, key: &[u8]) -> Result<Option<T>, Error> {
self.get_ser_limited(key, 0)
}
/// Gets a `Readable` value from the db, provided its key, allowing to extract only partial data. The underlying Readable size must align accordingly. Encapsulates serialization.
pub fn get_ser_limited<T: ser::Readable<T>>(&self, key: &[u8], len: usize) -> Result<Option<T>, Error> {
let data = try!(self.get(key));
match data {
Some(val) => {
let r = try!(ser::deserialize(&mut &val[..]).map_err(Error::SerErr));
let mut lval = if len > 0 { &val[..len] } else { &val[..] };
let r = try!(ser::deserialize(&mut lval).map_err(Error::SerErr));
Ok(Some(r))
}
None => Ok(None),