Chain logic to support multiple forks, select the most worked fork as the head. Significantly simplified from the original direction as it doesn't seem that explicitly materializing forks is necessary. Simple total difficulty accounting in block headers helping a lot there.

This commit is contained in:
Ignotus Peverell 2017-01-09 15:16:44 -08:00
parent 85e3255ee4
commit dda223f25b
No known key found for this signature in database
GPG key ID: 99CD25F39F8F8211
8 changed files with 104 additions and 156 deletions

View file

@ -44,7 +44,6 @@ pub struct BlockContext {
store: Arc<ChainStore>,
adapter: Arc<ChainAdapter>,
head: Tip,
tip: Option<Tip>,
}
#[derive(Debug)]
@ -63,6 +62,8 @@ pub enum Error {
InvalidBlockProof(secp::Error),
/// Block time is too old
InvalidBlockTime,
/// Block height is invalid (not previous + 1)
InvalidBlockHeight,
/// Internal issue when trying to save or load data from store
StoreErr(types::Error),
}
@ -85,7 +86,6 @@ pub fn process_block(b: &Block,
store: store,
adapter: adapter,
head: head,
tip: None,
};
info!("Starting validation pipeline for block {} at {}.",
@ -93,22 +93,19 @@ pub fn process_block(b: &Block,
b.header.height);
try!(check_known(b.hash(), &mut ctx));
try!(validate_header(&b, &mut ctx));
try!(set_tip(&b.header, &mut ctx));
try!(validate_block(b, &mut ctx));
info!("Block at {} with hash {} is valid, going to save and append.",
b.header.height,
b.hash());
try!(add_block(b, &mut ctx));
// TODO a global lock should be set before that step or even earlier
try!(update_tips(&mut ctx));
// TODO make sure we always return the head, and not a fork that just got longer
Ok(ctx.tip)
update_head(b, &mut ctx)
}
/// Quick in-memory check to fast-reject any block we've already handled
/// recently. Keeps duplicates from the network in check.
fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
// TODO ring buffer of the last few blocks that came through here
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
return Err(Error::Unfit("already known".to_string()));
}
@ -128,6 +125,9 @@ fn validate_header(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
let prev = try!(ctx.store.get_block_header(&header.previous).map_err(&Error::StoreErr));
if header.height != prev.height + 1 {
return Err(Error::InvalidBlockHeight);
}
if header.timestamp <= prev.timestamp {
// prevent time warp attacks and some timestamp manipulations by forcing strict
// time progression
@ -140,8 +140,7 @@ fn validate_header(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
return Err(Error::InvalidBlockTime);
}
if b.header.total_difficulty !=
prev.total_difficulty.clone() + Difficulty::from_hash(&prev.hash()) {
if header.total_difficulty != prev.total_difficulty.clone() + prev.pow.to_difficulty() {
return Err(Error::WrongTotalDifficulty);
}
@ -168,26 +167,17 @@ fn validate_header(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
Ok(())
}
fn set_tip(h: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
// TODO actually support more than one branch
if h.previous != ctx.head.last_block_h {
return Err(Error::Unfit("Just don't know where to put it right now".to_string()));
}
// TODO validate block header height
ctx.tip = Some(ctx.head.clone());
Ok(())
}
/// Fully validate the block content.
fn validate_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
// TODO check tx merkle tree
let curve = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
try!(b.verify(&curve).map_err(&Error::InvalidBlockProof));
// TODO check every input exists
Ok(())
}
/// Officially adds the block to our chain.
fn add_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
// save the block and appends it to the selected tip
ctx.tip = ctx.tip.as_ref().map(|t| t.append(b.hash()));
ctx.store.save_block(b).map_err(&Error::StoreErr);
// broadcast the block
@ -196,7 +186,18 @@ fn add_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
Ok(())
}
fn update_tips(ctx: &mut BlockContext) -> Result<(), Error> {
let tip = ctx.tip.as_ref().unwrap();
ctx.store.save_head(tip).map_err(&Error::StoreErr)
/// Directly updates the head if we've just appended a new block to it or handle
/// the situation where we've just added enough work to have a fork with more
/// work than the head.
fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
// if we made a fork with more work than the head (which should also be true
// when extending the head), update it
let tip = Tip::from_block(b);
if tip.total_difficulty > ctx.head.total_difficulty {
try!(ctx.store.save_head(&tip).map_err(&Error::StoreErr));
ctx.head = tip.clone();
Ok(Some(tip))
} else {
Ok(None)
}
}

View file

@ -27,7 +27,6 @@ const SEP: u8 = ':' as u8;
const BLOCK_HEADER_PREFIX: u8 = 'h' as u8;
const BLOCK_PREFIX: u8 = 'b' as u8;
const TIP_PREFIX: u8 = 'T' as u8;
const HEAD_PREFIX: u8 = 'H' as u8;
/// An implementation of the ChainStore trait backed by a simple key-value
@ -69,16 +68,8 @@ impl ChainStore for ChainKVStore {
}
fn save_head(&self, t: &Tip) -> Result<(), Error> {
try!(self.save_tip(t));
self.db.put_ser(&vec![HEAD_PREFIX], t).map_err(&to_store_err)
}
fn save_tip(&self, t: &Tip) -> Result<(), Error> {
let last_branch = t.lineage.last_branch();
let mut k = vec![TIP_PREFIX, SEP];
k.write_u32::<BigEndian>(last_branch);
self.db.put_ser(&mut k, t).map_err(&to_store_err)
}
}
fn to_key(prefix: u8, val: &mut Vec<u8>) -> &mut Vec<u8> {

View file

@ -14,56 +14,15 @@
//! Base types that the block chain pipeline requires.
use core::core::hash::Hash;
use core::core::{Block, BlockHeader};
use core::core::hash::Hash;
use core::core::target::Difficulty;
use core::ser;
/// The lineage of a fork, defined as a series of numbers. Each new branch gets
/// a new number that gets added to a fork's ancestry to form a new fork.
/// Example:
/// head [1] -> fork1 [1, 2]
/// fork2 [1, 3]
#[derive(Debug, Clone)]
pub struct Lineage(Vec<u32>);
impl Lineage {
/// New lineage initialized just with branch 0
pub fn new() -> Lineage {
Lineage(vec![0])
}
/// The last branch that was added to the lineage. Also the only branch
/// that's
/// unique to this lineage.
pub fn last_branch(&self) -> u32 {
*self.0.last().unwrap()
}
}
/// Serialization for lineage, necessary to serialize fork tips.
impl ser::Writeable for Lineage {
fn write(&self, writer: &mut ser::Writer) -> Result<(), ser::Error> {
try!(writer.write_u32(self.0.len() as u32));
for num in &self.0 {
try!(writer.write_u32(*num));
}
Ok(())
}
}
/// Deserialization for lineage, necessary to deserialize fork tips.
impl ser::Readable<Lineage> for Lineage {
fn read(reader: &mut ser::Reader) -> Result<Lineage, ser::Error> {
let len = try!(reader.read_u32());
let mut branches = Vec::with_capacity(len as usize);
for _ in 0..len {
branches.push(try!(reader.read_u32()));
}
Ok(Lineage(branches))
}
}
/// The tip of a fork. A handle to the fork ancestry from its leaf in the
/// blockchain tree. References both the lineage of the fork as well as its max
/// height and its latest and previous blocks for convenience.
/// blockchain tree. References the max height and the latest and previous
/// blocks
/// for convenience and the total difficulty.
#[derive(Debug, Clone)]
pub struct Tip {
/// Height of the tip (max height of the fork)
@ -72,8 +31,8 @@ pub struct Tip {
pub last_block_h: Hash,
/// Block previous to last
pub prev_block_h: Hash,
/// Lineage in branch numbers of the fork
pub lineage: Lineage,
/// Total difficulty accumulated on that fork
pub total_difficulty: Difficulty,
}
impl Tip {
@ -83,17 +42,17 @@ impl Tip {
height: 0,
last_block_h: gbh,
prev_block_h: gbh,
lineage: Lineage::new(),
total_difficulty: Difficulty::one(),
}
}
/// Append a new block hash to this tip, returning a new updated tip.
pub fn append(&self, bh: Hash) -> Tip {
/// Append a new block to this tip, returning a new updated tip.
pub fn from_block(b: &Block) -> Tip {
Tip {
height: self.height + 1,
last_block_h: bh,
prev_block_h: self.last_block_h,
lineage: self.lineage.clone(),
height: b.header.height,
last_block_h: b.hash(),
prev_block_h: b.header.previous,
total_difficulty: b.header.total_difficulty.clone() + Difficulty::from_hash(&b.hash()),
}
}
}
@ -104,7 +63,7 @@ impl ser::Writeable for Tip {
try!(writer.write_u64(self.height));
try!(writer.write_fixed_bytes(&self.last_block_h));
try!(writer.write_fixed_bytes(&self.prev_block_h));
self.lineage.write(writer)
self.total_difficulty.write(writer)
}
}
@ -113,12 +72,12 @@ impl ser::Readable<Tip> for Tip {
let height = try!(reader.read_u64());
let last = try!(Hash::read(reader));
let prev = try!(Hash::read(reader));
let line = try!(Lineage::read(reader));
let diff = try!(Difficulty::read(reader));
Ok(Tip {
height: height,
last_block_h: last,
prev_block_h: prev,
lineage: line,
total_difficulty: diff,
})
}
}
@ -148,9 +107,6 @@ pub trait ChainStore: Send + Sync {
/// Save the provided tip as the current head of our chain
fn save_head(&self, t: &Tip) -> Result<(), Error>;
/// Save the provided tip without setting it as head
fn save_tip(&self, t: &Tip) -> Result<(), Error>;
}
/// Bridge between the chain pipeline and the rest of the system. Handles

View file

@ -23,7 +23,6 @@ use rand::os::OsRng;
use grin_chain::types::*;
use grin_core::core::hash::Hashed;
use grin_core::core::target::Difficulty;
use grin_core::pow;
use grin_core::core;
use grin_core::consensus;
@ -33,42 +32,46 @@ fn mine_empty_chain() {
let mut rng = OsRng::new().unwrap();
let store = grin_chain::store::ChainKVStore::new(".grin".to_string()).unwrap();
// save a genesis block
let mut gen = grin_core::genesis::genesis();
gen.header.cuckoo_len = 16;
store.save_block(&gen).unwrap();
// save a genesis block
let mut gen = grin_core::genesis::genesis();
gen.header.cuckoo_len = 16;
let diff = gen.header.difficulty.clone();
pow::pow(&mut gen, diff).unwrap();
store.save_block(&gen).unwrap();
// setup a new head tip
let tip = Tip::new(gen.hash());
store.save_head(&tip).unwrap();
// setup a new head tip
let tip = Tip::new(gen.hash());
store.save_head(&tip).unwrap();
// mine and add a few blocks
let mut prev = gen;
// mine and add a few blocks
let mut prev = gen;
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
let arc_store = Arc::new(store);
let adapter = Arc::new(NoopAdapter{});
let arc_store = Arc::new(store);
let adapter = Arc::new(NoopAdapter {});
for n in 1..4 {
let mut b = core::Block::new(&prev.header, vec![], reward_key).unwrap();
for n in 1..4 {
let mut b = core::Block::new(&prev.header, vec![], reward_key).unwrap();
b.header.timestamp = prev.header.timestamp + time::Duration::seconds(60);
let (difficulty, _) = consensus::next_target(b.header.timestamp.to_timespec().sec,
prev.header.timestamp.to_timespec().sec,
prev.header.difficulty.clone(),
prev.header.cuckoo_len);
let (difficulty, _) = consensus::next_target(b.header.timestamp.to_timespec().sec,
prev.header.timestamp.to_timespec().sec,
prev.header.difficulty.clone(),
prev.header.cuckoo_len);
b.header.difficulty = difficulty.clone();
let (proof, nonce) = pow::pow_size(&b, difficulty.clone(), prev.header.cuckoo_len as u32).unwrap();
b.header.pow = proof;
b.header.nonce = nonce;
b.header.difficulty = difficulty;
grin_chain::pipe::process_block(&b, arc_store.clone(), adapter.clone(), grin_chain::pipe::EASY_POW).unwrap();
pow::pow(&mut b, difficulty).unwrap();
grin_chain::pipe::process_block(&b,
arc_store.clone(),
adapter.clone(),
grin_chain::pipe::EASY_POW)
.unwrap();
// checking our new head
let head = arc_store.clone().head().unwrap();
assert_eq!(head.height, n);
assert_eq!(head.last_block_h, b.hash());
// checking our new head
let head = arc_store.clone().head().unwrap();
assert_eq!(head.height, n);
assert_eq!(head.last_block_h, b.hash());
prev = b;
}
prev = b;
}
}

View file

@ -78,14 +78,14 @@ impl Writeable for BlockHeader {
[write_fixed_bytes, &self.utxo_merkle],
[write_fixed_bytes, &self.tx_merkle]);
try!(writer.write_u64(self.nonce));
try!(writer.write_u64(self.nonce));
try!(self.difficulty.write(writer));
try!(self.total_difficulty.write(writer));
if writer.serialization_mode() != ser::SerializationMode::Hash {
try!(self.pow.write(writer));
}
Ok(())
if writer.serialization_mode() != ser::SerializationMode::Hash {
try!(self.pow.write(writer));
}
Ok(())
}
}
@ -97,9 +97,9 @@ impl Readable<BlockHeader> for BlockHeader {
let (timestamp, cuckoo_len) = ser_multiread!(reader, read_i64, read_u8);
let utxo_merkle = try!(Hash::read(reader));
let tx_merkle = try!(Hash::read(reader));
let nonce = try!(reader.read_u64());
let difficulty = try!(Difficulty::read(reader));
let total_difficulty = try!(Difficulty::read(reader));
let nonce = try!(reader.read_u64());
let pow = try!(Proof::read(reader));
Ok(BlockHeader {
@ -139,22 +139,22 @@ impl Writeable for Block {
fn write(&self, writer: &mut Writer) -> Result<(), ser::Error> {
try!(self.header.write(writer));
if writer.serialization_mode() != ser::SerializationMode::Hash {
ser_multiwrite!(writer,
[write_u64, self.inputs.len() as u64],
[write_u64, self.outputs.len() as u64],
[write_u64, self.proofs.len() as u64]);
if writer.serialization_mode() != ser::SerializationMode::Hash {
ser_multiwrite!(writer,
[write_u64, self.inputs.len() as u64],
[write_u64, self.outputs.len() as u64],
[write_u64, self.proofs.len() as u64]);
for inp in &self.inputs {
try!(inp.write(writer));
}
for out in &self.outputs {
try!(out.write(writer));
}
for proof in &self.proofs {
try!(proof.write(writer));
}
}
for inp in &self.inputs {
try!(inp.write(writer));
}
for out in &self.outputs {
try!(out.write(writer));
}
for proof in &self.proofs {
try!(proof.write(writer));
}
}
Ok(())
}
}
@ -254,8 +254,7 @@ impl Block {
height: prev.height + 1,
timestamp: time::now(),
previous: prev.hash(),
total_difficulty: Difficulty::from_hash(&prev.hash()) +
prev.total_difficulty.clone(),
total_difficulty: prev.pow.to_difficulty() + prev.total_difficulty.clone(),
cuckoo_len: prev.cuckoo_len,
..Default::default()
},

View file

@ -54,7 +54,7 @@ pub fn verify_size(b: &Block, cuckoo_sz: u32) -> bool {
/// block, until the required difficulty target is reached. May take a
/// while for a low target...
pub fn pow(b: &mut Block, diff: Difficulty) -> Result<(), Error> {
let cuckoo_len = b.header.cuckoo_len as u32;
let cuckoo_len = b.header.cuckoo_len as u32;
pow_size(b, diff, cuckoo_len)
}
@ -77,7 +77,7 @@ pub fn pow_size(b: &mut Block, diff: Difficulty, sizeshift: u32) -> Result<(), E
// diff, we're all good
if let Ok(proof) = Miner::new(pow_hash.to_slice(), EASINESS, sizeshift).mine() {
if proof.to_difficulty() >= diff {
b.header.pow = proof;
b.header.pow = proof;
return Ok(());
}
}
@ -88,7 +88,7 @@ pub fn pow_size(b: &mut Block, diff: Difficulty, sizeshift: u32) -> Result<(), E
// and if we're back where we started, update the time (changes the hash as
// well)
if b.header.nonce == start_nonce {
b.header.timestamp = time::at_utc(time::Timespec { sec: 0, nsec: 0 });
b.header.timestamp = time::at_utc(time::Timespec { sec: 0, nsec: 0 });
}
}
}
@ -103,7 +103,7 @@ mod test {
#[test]
fn genesis_pow() {
let mut b = genesis::genesis();
b.header.nonce = 310;
b.header.nonce = 310;
pow20(&mut b, Difficulty::one()).unwrap();
assert!(b.header.nonce != 310);
assert!(b.header.pow.to_difficulty() >= Difficulty::one());

View file

@ -36,8 +36,8 @@ There may be several contexts in which data can be pruned:
* A fully validating node may get rid of some data it has already validated to
free space.
* A partially validating node (similar to SPV) may not do full validation and
hence not be interested in either receiving or keeping all the data.
* A partially validating node (similar to SPV) may not be interested in either
receiving or keeping all the data.
* When a new node joins the network, it may temporarily behave as a partially
validating node to make it available for use faster, even if it ultimately becomes
a fully validating node.

View file

@ -63,7 +63,6 @@ impl Miner {
latest_hash = self.chain_head.lock().unwrap().last_block_h;
}
let mut b = self.build_block(&head);
let mut pow_header = pow::PowHeader::from_block(&b);
// look for a pow for at most 2 sec on the same block (to give a chance to new
// transactions) and as long as the head hasn't changed
@ -74,7 +73,7 @@ impl Miner {
latest_hash);
let mut iter_count = 0;
while head.hash() == latest_hash && time::get_time().sec < deadline {
let pow_hash = pow_header.hash();
let pow_hash = b.hash();
let mut miner = cuckoo::Miner::new(pow_hash.to_slice(),
consensus::EASINESS,
b.header.cuckoo_len as u32);
@ -84,7 +83,7 @@ impl Miner {
break;
}
}
pow_header.nonce += 1;
b.header.nonce += 1;
{
latest_hash = self.chain_head.lock().unwrap().last_block_h;
}
@ -95,7 +94,6 @@ impl Miner {
if let Some(proof) = sol {
info!("Found valid proof of work, adding block {}.", b.hash());
b.header.pow = proof;
b.header.nonce = pow_header.nonce;
let res = chain::process_block(&b,
self.chain_store.clone(),
self.chain_adapter.clone(),