mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
Error handling improvements (particularly in chain) (#1208)
* update error handling in chain and other modules to use error/errorkind * sizeshift errorkind
This commit is contained in:
parent
f0d5406d0b
commit
d2a84b7600
28 changed files with 657 additions and 435 deletions
6
Cargo.lock
generated
6
Cargo.lock
generated
|
@ -591,6 +591,8 @@ dependencies = [
|
||||||
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"croaring 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"croaring 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"grin_core 0.2.0",
|
"grin_core 0.2.0",
|
||||||
"grin_keychain 0.2.0",
|
"grin_keychain 0.2.0",
|
||||||
"grin_store 0.2.0",
|
"grin_store 0.2.0",
|
||||||
|
@ -626,6 +628,8 @@ dependencies = [
|
||||||
"blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
"blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"croaring 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"croaring 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"grin_keychain 0.2.0",
|
"grin_keychain 0.2.0",
|
||||||
"grin_util 0.2.0",
|
"grin_util 0.2.0",
|
||||||
"grin_wallet 0.2.0",
|
"grin_wallet 0.2.0",
|
||||||
|
@ -729,6 +733,8 @@ dependencies = [
|
||||||
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"croaring 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"croaring 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"grin_core 0.2.0",
|
"grin_core 0.2.0",
|
||||||
"grin_util 0.2.0",
|
"grin_util 0.2.0",
|
||||||
"libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
|
|
@ -9,6 +9,8 @@ publish = false
|
||||||
bitflags = "1"
|
bitflags = "1"
|
||||||
byteorder = "1"
|
byteorder = "1"
|
||||||
lmdb-zero = "0.4.4"
|
lmdb-zero = "0.4.4"
|
||||||
|
failure = "0.1"
|
||||||
|
failure_derive = "0.1"
|
||||||
croaring = "0.3"
|
croaring = "0.3"
|
||||||
slog = { version = "~2.2", features = ["max_level_trace", "release_max_level_trace"] }
|
slog = { version = "~2.2", features = ["max_level_trace", "release_max_level_trace"] }
|
||||||
serde = "1"
|
serde = "1"
|
||||||
|
|
|
@ -27,13 +27,14 @@ use core::core::merkle_proof::MerkleProof;
|
||||||
use core::core::target::Difficulty;
|
use core::core::target::Difficulty;
|
||||||
use core::core::{Block, BlockHeader, Output, OutputIdentifier, Transaction, TxKernel};
|
use core::core::{Block, BlockHeader, Output, OutputIdentifier, Transaction, TxKernel};
|
||||||
use core::global;
|
use core::global;
|
||||||
|
use error::{Error, ErrorKind};
|
||||||
use grin_store::Error::NotFoundErr;
|
use grin_store::Error::NotFoundErr;
|
||||||
use pipe;
|
use pipe;
|
||||||
use store;
|
use store;
|
||||||
use txhashset;
|
use txhashset;
|
||||||
use types::{ChainAdapter, Error, Options, Tip};
|
use types::{ChainAdapter, Options, Tip};
|
||||||
use util::LOGGER;
|
|
||||||
use util::secp::pedersen::{Commitment, RangeProof};
|
use util::secp::pedersen::{Commitment, RangeProof};
|
||||||
|
use util::LOGGER;
|
||||||
|
|
||||||
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
|
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
|
||||||
pub const MAX_ORPHAN_SIZE: usize = 200;
|
pub const MAX_ORPHAN_SIZE: usize = 200;
|
||||||
|
@ -210,9 +211,7 @@ impl Chain {
|
||||||
b: Block,
|
b: Block,
|
||||||
opts: Options,
|
opts: Options,
|
||||||
) -> Result<(Option<Tip>, Option<Block>), Error> {
|
) -> Result<(Option<Tip>, Option<Block>), Error> {
|
||||||
let head = self.store
|
let head = self.store.head()?;
|
||||||
.head()
|
|
||||||
.map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?;
|
|
||||||
let mut ctx = self.ctx_from_head(head, opts)?;
|
let mut ctx = self.ctx_from_head(head, opts)?;
|
||||||
|
|
||||||
let res = pipe::process_block(&b, &mut ctx);
|
let res = pipe::process_block(&b, &mut ctx);
|
||||||
|
@ -252,47 +251,51 @@ impl Chain {
|
||||||
}
|
}
|
||||||
Ok((None, Some(b)))
|
Ok((None, Some(b)))
|
||||||
}
|
}
|
||||||
Err(Error::Orphan) => {
|
|
||||||
let block_hash = b.hash();
|
|
||||||
let orphan = Orphan {
|
|
||||||
block: b,
|
|
||||||
opts: opts,
|
|
||||||
added: Instant::now(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// In the case of a fork - it is possible to have multiple blocks
|
|
||||||
// that are children of a given block.
|
|
||||||
// We do not handle this currently for orphans (future enhancement?).
|
|
||||||
// We just assume "last one wins" for now.
|
|
||||||
&self.orphans.add(orphan);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"process_block: orphan: {:?}, # orphans {}",
|
|
||||||
block_hash,
|
|
||||||
self.orphans.len(),
|
|
||||||
);
|
|
||||||
Err(Error::Orphan)
|
|
||||||
}
|
|
||||||
Err(Error::Unfit(ref msg)) => {
|
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"Block {} at {} is unfit at this time: {}",
|
|
||||||
b.hash(),
|
|
||||||
b.header.height,
|
|
||||||
msg
|
|
||||||
);
|
|
||||||
Err(Error::Unfit(msg.clone()))
|
|
||||||
}
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
info!(
|
match e.kind() {
|
||||||
LOGGER,
|
ErrorKind::Orphan => {
|
||||||
"Rejected block {} at {}: {:?}",
|
let block_hash = b.hash();
|
||||||
b.hash(),
|
let orphan = Orphan {
|
||||||
b.header.height,
|
block: b,
|
||||||
e
|
opts: opts,
|
||||||
);
|
added: Instant::now(),
|
||||||
Err(e)
|
};
|
||||||
|
|
||||||
|
// In the case of a fork - it is possible to have multiple blocks
|
||||||
|
// that are children of a given block.
|
||||||
|
// We do not handle this currently for orphans (future enhancement?).
|
||||||
|
// We just assume "last one wins" for now.
|
||||||
|
&self.orphans.add(orphan);
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
LOGGER,
|
||||||
|
"process_block: orphan: {:?}, # orphans {}",
|
||||||
|
block_hash,
|
||||||
|
self.orphans.len(),
|
||||||
|
);
|
||||||
|
Err(ErrorKind::Orphan.into())
|
||||||
|
}
|
||||||
|
ErrorKind::Unfit(ref msg) => {
|
||||||
|
debug!(
|
||||||
|
LOGGER,
|
||||||
|
"Block {} at {} is unfit at this time: {}",
|
||||||
|
b.hash(),
|
||||||
|
b.header.height,
|
||||||
|
msg
|
||||||
|
);
|
||||||
|
Err(ErrorKind::Unfit(msg.clone()).into())
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
info!(
|
||||||
|
LOGGER,
|
||||||
|
"Rejected block {} at {}: {:?}",
|
||||||
|
b.hash(),
|
||||||
|
b.header.height,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
Err(ErrorKind::Other(format!("{:?}", e).to_owned()).into())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -409,7 +412,7 @@ impl Chain {
|
||||||
if tx.lock_height() <= height {
|
if tx.lock_height() <= height {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(Error::TxLockHeight)
|
Err(ErrorKind::TxLockHeight.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -529,7 +532,7 @@ impl Chain {
|
||||||
let head = self.head().unwrap();
|
let head = self.head().unwrap();
|
||||||
let header_head = self.get_header_head().unwrap();
|
let header_head = self.get_header_head().unwrap();
|
||||||
if header_head.height - head.height < global::cut_through_horizon() as u64 {
|
if header_head.height - head.height < global::cut_through_horizon() as u64 {
|
||||||
return Err(Error::InvalidTxHashSet("not needed".to_owned()));
|
return Err(ErrorKind::InvalidTxHashSet("not needed".to_owned()).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let header = self.store.get_block_header(&h)?;
|
let header = self.store.get_block_header(&h)?;
|
||||||
|
@ -623,17 +626,21 @@ impl Chain {
|
||||||
batch.delete_block(&b.hash())?;
|
batch.delete_block(&b.hash())?;
|
||||||
batch.delete_block_input_bitmap(&b.hash())?;
|
batch.delete_block_input_bitmap(&b.hash())?;
|
||||||
}
|
}
|
||||||
Err(NotFoundErr) => {
|
Err(NotFoundErr(_)) => {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(e) => return Err(Error::StoreErr(e, "retrieving block to compact".to_owned())),
|
Err(e) => {
|
||||||
|
return Err(
|
||||||
|
ErrorKind::StoreErr(e, "retrieving block to compact".to_owned()).into(),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if current.height <= 1 {
|
if current.height <= 1 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
match self.store.get_block_header(¤t.previous) {
|
match self.store.get_block_header(¤t.previous) {
|
||||||
Ok(h) => current = h,
|
Ok(h) => current = h,
|
||||||
Err(NotFoundErr) => break,
|
Err(NotFoundErr(_)) => break,
|
||||||
Err(e) => return Err(From::from(e)),
|
Err(e) => return Err(From::from(e)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -671,9 +678,9 @@ impl Chain {
|
||||||
let outputs = txhashset.outputs_by_insertion_index(start_index, max);
|
let outputs = txhashset.outputs_by_insertion_index(start_index, max);
|
||||||
let rangeproofs = txhashset.rangeproofs_by_insertion_index(start_index, max);
|
let rangeproofs = txhashset.rangeproofs_by_insertion_index(start_index, max);
|
||||||
if outputs.0 != rangeproofs.0 || outputs.1.len() != rangeproofs.1.len() {
|
if outputs.0 != rangeproofs.0 || outputs.1.len() != rangeproofs.1.len() {
|
||||||
return Err(Error::TxHashSetErr(String::from(
|
return Err(ErrorKind::TxHashSetErr(String::from(
|
||||||
"Output and rangeproof sets don't match",
|
"Output and rangeproof sets don't match",
|
||||||
)));
|
)).into());
|
||||||
}
|
}
|
||||||
let mut output_vec: Vec<Output> = vec![];
|
let mut output_vec: Vec<Output> = vec![];
|
||||||
for (ref x, &y) in outputs.1.iter().zip(rangeproofs.1.iter()) {
|
for (ref x, &y) in outputs.1.iter().zip(rangeproofs.1.iter()) {
|
||||||
|
@ -704,9 +711,7 @@ impl Chain {
|
||||||
/// Reset header_head and sync_head to head of current body chain
|
/// Reset header_head and sync_head to head of current body chain
|
||||||
pub fn reset_head(&self) -> Result<(), Error> {
|
pub fn reset_head(&self) -> Result<(), Error> {
|
||||||
let batch = self.store.batch()?;
|
let batch = self.store.batch()?;
|
||||||
batch
|
batch.reset_head()?;
|
||||||
.reset_head()
|
|
||||||
.map_err(|e| Error::StoreErr(e, "chain reset_head".to_owned()))?;
|
|
||||||
batch.commit()?;
|
batch.commit()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -720,28 +725,28 @@ impl Chain {
|
||||||
pub fn head_header(&self) -> Result<BlockHeader, Error> {
|
pub fn head_header(&self) -> Result<BlockHeader, Error> {
|
||||||
self.store
|
self.store
|
||||||
.head_header()
|
.head_header()
|
||||||
.map_err(|e| Error::StoreErr(e, "chain head header".to_owned()))
|
.map_err(|e| ErrorKind::StoreErr(e, "chain head header".to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets a block header by hash
|
/// Gets a block header by hash
|
||||||
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
||||||
self.store
|
self.store
|
||||||
.get_block(h)
|
.get_block(h)
|
||||||
.map_err(|e| Error::StoreErr(e, "chain get block".to_owned()))
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get block".to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets a block header by hash
|
/// Gets a block header by hash
|
||||||
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
||||||
self.store
|
self.store
|
||||||
.get_block_header(h)
|
.get_block_header(h)
|
||||||
.map_err(|e| Error::StoreErr(e, "chain get header".to_owned()))
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get header".to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the block header at the provided height
|
/// Gets the block header at the provided height
|
||||||
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||||
self.store
|
self.store
|
||||||
.get_header_by_height(height)
|
.get_header_by_height(height)
|
||||||
.map_err(|e| Error::StoreErr(e, "chain get header by height".to_owned()))
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get header by height".to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verifies the given block header is actually on the current chain.
|
/// Verifies the given block header is actually on the current chain.
|
||||||
|
@ -750,7 +755,7 @@ impl Chain {
|
||||||
pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
|
pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
self.store
|
self.store
|
||||||
.is_on_current_chain(header)
|
.is_on_current_chain(header)
|
||||||
.map_err(|e| Error::StoreErr(e, "chain is_on_current_chain".to_owned()))
|
.map_err(|e| ErrorKind::StoreErr(e, "chain is_on_current_chain".to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the tip of the current "sync" header chain.
|
/// Get the tip of the current "sync" header chain.
|
||||||
|
@ -758,14 +763,14 @@ impl Chain {
|
||||||
pub fn get_sync_head(&self) -> Result<Tip, Error> {
|
pub fn get_sync_head(&self) -> Result<Tip, Error> {
|
||||||
self.store
|
self.store
|
||||||
.get_sync_head()
|
.get_sync_head()
|
||||||
.map_err(|e| Error::StoreErr(e, "chain get sync head".to_owned()))
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get sync head".to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the tip of the header chain.
|
/// Get the tip of the header chain.
|
||||||
pub fn get_header_head(&self) -> Result<Tip, Error> {
|
pub fn get_header_head(&self) -> Result<Tip, Error> {
|
||||||
self.store
|
self.store
|
||||||
.get_header_head()
|
.get_header_head()
|
||||||
.map_err(|e| Error::StoreErr(e, "chain get header head".to_owned()))
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get header head".to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds an iterator on blocks starting from the current chain head and
|
/// Builds an iterator on blocks starting from the current chain head and
|
||||||
|
@ -780,7 +785,7 @@ impl Chain {
|
||||||
pub fn block_exists(&self, h: Hash) -> Result<bool, Error> {
|
pub fn block_exists(&self, h: Hash) -> Result<bool, Error> {
|
||||||
self.store
|
self.store
|
||||||
.block_exists(&h)
|
.block_exists(&h)
|
||||||
.map_err(|e| Error::StoreErr(e, "chain block exists".to_owned()))
|
.map_err(|e| ErrorKind::StoreErr(e, "chain block exists".to_owned()).into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -830,7 +835,7 @@ fn setup_head(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(NotFoundErr) => {
|
Err(NotFoundErr(_)) => {
|
||||||
let tip = Tip::from_block(&genesis.header);
|
let tip = Tip::from_block(&genesis.header);
|
||||||
batch.save_block(&genesis)?;
|
batch.save_block(&genesis)?;
|
||||||
batch.setup_height(&genesis.header, &tip)?;
|
batch.setup_height(&genesis.header, &tip)?;
|
||||||
|
@ -844,7 +849,7 @@ fn setup_head(
|
||||||
head = tip;
|
head = tip;
|
||||||
info!(LOGGER, "chain: init: saved genesis: {:?}", genesis.hash());
|
info!(LOGGER, "chain: init: saved genesis: {:?}", genesis.hash());
|
||||||
}
|
}
|
||||||
Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())),
|
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Initialize header_head and sync_head as necessary for chain init.
|
// Initialize header_head and sync_head as necessary for chain init.
|
||||||
|
|
248
chain/src/error.rs
Normal file
248
chain/src/error.rs
Normal file
|
@ -0,0 +1,248 @@
|
||||||
|
// Copyright 2018 The Grin Developers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Error types for chain
|
||||||
|
use failure::{Backtrace, Context, Fail};
|
||||||
|
use std::fmt::{self, Display};
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
use core::core::{block, committed, transaction};
|
||||||
|
use core::ser;
|
||||||
|
use grin_store as store;
|
||||||
|
use keychain;
|
||||||
|
use util::secp;
|
||||||
|
use util::secp::pedersen::Commitment;
|
||||||
|
|
||||||
|
/// Error definition
|
||||||
|
#[derive(Debug, Fail)]
|
||||||
|
pub struct Error {
|
||||||
|
inner: Context<ErrorKind>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Chain error definitions
|
||||||
|
#[derive(Clone, Eq, PartialEq, Debug, Fail)]
|
||||||
|
pub enum ErrorKind {
|
||||||
|
/// The block doesn't fit anywhere in our chain
|
||||||
|
#[fail(display = "Block is unfit: {}", _0)]
|
||||||
|
Unfit(String),
|
||||||
|
/// Special case of orphan blocks
|
||||||
|
#[fail(display = "Orphan")]
|
||||||
|
Orphan,
|
||||||
|
/// Difficulty is too low either compared to ours or the block PoW hash
|
||||||
|
#[fail(display = "Difficulty is too low compared to ours or the block PoW hash")]
|
||||||
|
DifficultyTooLow,
|
||||||
|
/// Addition of difficulties on all previous block is wrong
|
||||||
|
#[fail(display = "Addition of difficulties on all previous blocks is wrong")]
|
||||||
|
WrongTotalDifficulty,
|
||||||
|
/// Block header sizeshift is lower than our min
|
||||||
|
#[fail(display = "Cuckoo Size too Low")]
|
||||||
|
LowSizeshift,
|
||||||
|
/// The proof of work is invalid
|
||||||
|
#[fail(display = "Invalid PoW")]
|
||||||
|
InvalidPow,
|
||||||
|
/// The block doesn't sum correctly or a tx signature is invalid
|
||||||
|
#[fail(display = "Invalid Block Proof")]
|
||||||
|
InvalidBlockProof(block::Error),
|
||||||
|
/// Block time is too old
|
||||||
|
#[fail(display = "Invalid Block Time")]
|
||||||
|
InvalidBlockTime,
|
||||||
|
/// Block height is invalid (not previous + 1)
|
||||||
|
#[fail(display = "Invalid Block Height")]
|
||||||
|
InvalidBlockHeight,
|
||||||
|
/// One of the root hashes in the block is invalid
|
||||||
|
#[fail(display = "Invalid Root")]
|
||||||
|
InvalidRoot,
|
||||||
|
/// One of the MMR sizes in the block header is invalid
|
||||||
|
#[fail(display = "Invalid MMR Size")]
|
||||||
|
InvalidMMRSize,
|
||||||
|
/// Error from underlying keychain impl
|
||||||
|
#[fail(display = "Keychain Error")]
|
||||||
|
Keychain(keychain::Error),
|
||||||
|
/// Error from underlying secp lib
|
||||||
|
#[fail(display = "Secp Lib Error")]
|
||||||
|
Secp(secp::Error),
|
||||||
|
/// One of the inputs in the block has already been spent
|
||||||
|
#[fail(display = "Already Spent: {:?}", _0)]
|
||||||
|
AlreadySpent(Commitment),
|
||||||
|
/// An output with that commitment already exists (should be unique)
|
||||||
|
#[fail(display = "Dupliate Commitment: {:?}", _0)]
|
||||||
|
DuplicateCommitment(Commitment),
|
||||||
|
/// Attempt to spend a coinbase output before it sufficiently matures.
|
||||||
|
#[fail(display = "Attempt to spend immature coinbase")]
|
||||||
|
ImmatureCoinbase,
|
||||||
|
/// Error validating a Merkle proof (coinbase output)
|
||||||
|
#[fail(display = "Error validating merkle proof")]
|
||||||
|
MerkleProof,
|
||||||
|
/// output not found
|
||||||
|
#[fail(display = "Output not found")]
|
||||||
|
OutputNotFound,
|
||||||
|
/// output spent
|
||||||
|
#[fail(display = "Output is spent")]
|
||||||
|
OutputSpent,
|
||||||
|
/// Invalid block version, either a mistake or outdated software
|
||||||
|
#[fail(display = "Invalid Block Version: {}", _0)]
|
||||||
|
InvalidBlockVersion(u16),
|
||||||
|
/// We've been provided a bad txhashset
|
||||||
|
#[fail(display = "Invalid TxHashSet: {}", _0)]
|
||||||
|
InvalidTxHashSet(String),
|
||||||
|
/// Internal issue when trying to save or load data from store
|
||||||
|
#[fail(display = "Store Error: {}", _1)]
|
||||||
|
StoreErr(store::Error, String),
|
||||||
|
/// Internal issue when trying to save or load data from append only files
|
||||||
|
#[fail(display = "File Read Error: {}", _0)]
|
||||||
|
FileReadErr(String),
|
||||||
|
/// Error serializing or deserializing a type
|
||||||
|
#[fail(display = "Serialization Error")]
|
||||||
|
SerErr(ser::Error),
|
||||||
|
/// Error with the txhashset
|
||||||
|
#[fail(display = "TxHashSetErr: {}", _0)]
|
||||||
|
TxHashSetErr(String),
|
||||||
|
/// Tx not valid based on lock_height.
|
||||||
|
#[fail(display = "Transaction Lock Height")]
|
||||||
|
TxLockHeight,
|
||||||
|
/// No chain exists and genesis block is required
|
||||||
|
#[fail(display = "Genesis Block Required")]
|
||||||
|
GenesisBlockRequired,
|
||||||
|
/// Error from underlying tx handling
|
||||||
|
#[fail(display = "Transaction Error")]
|
||||||
|
Transaction(transaction::Error),
|
||||||
|
/// Anything else
|
||||||
|
#[fail(display = "Other Error: {}", _0)]
|
||||||
|
Other(String),
|
||||||
|
/// Error from summing and verifying kernel sums via committed trait.
|
||||||
|
#[fail(display = "Committed Trait: Error summing and verifying kernel sums")]
|
||||||
|
Committed(committed::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for Error {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
let cause = match self.cause() {
|
||||||
|
Some(c) => format!("{}", c),
|
||||||
|
None => String::from("Unknown"),
|
||||||
|
};
|
||||||
|
let backtrace = match self.backtrace() {
|
||||||
|
Some(b) => format!("{}", b),
|
||||||
|
None => String::from("Unknown"),
|
||||||
|
};
|
||||||
|
let output = format!(
|
||||||
|
"{} \n Cause: {} \n Backtrace: {}",
|
||||||
|
self.inner, cause, backtrace
|
||||||
|
);
|
||||||
|
Display::fmt(&output, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
/// get kind
|
||||||
|
pub fn kind(&self) -> ErrorKind {
|
||||||
|
self.inner.get_context().clone()
|
||||||
|
}
|
||||||
|
/// get cause
|
||||||
|
pub fn cause(&self) -> Option<&Fail> {
|
||||||
|
self.inner.cause()
|
||||||
|
}
|
||||||
|
/// get backtrace
|
||||||
|
pub fn backtrace(&self) -> Option<&Backtrace> {
|
||||||
|
self.inner.backtrace()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether the error is due to a block that was intrinsically wrong
|
||||||
|
pub fn is_bad_data(&self) -> bool {
|
||||||
|
// shorter to match on all the "not the block's fault" errors
|
||||||
|
match self.kind() {
|
||||||
|
ErrorKind::Unfit(_)
|
||||||
|
| ErrorKind::Orphan
|
||||||
|
| ErrorKind::StoreErr(_, _)
|
||||||
|
| ErrorKind::SerErr(_)
|
||||||
|
| ErrorKind::TxHashSetErr(_)
|
||||||
|
| ErrorKind::GenesisBlockRequired
|
||||||
|
| ErrorKind::Other(_) => false,
|
||||||
|
_ => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ErrorKind> for Error {
|
||||||
|
fn from(kind: ErrorKind) -> Error {
|
||||||
|
Error {
|
||||||
|
inner: Context::new(kind),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Context<ErrorKind>> for Error {
|
||||||
|
fn from(inner: Context<ErrorKind>) -> Error {
|
||||||
|
Error { inner: inner }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<block::Error> for Error {
|
||||||
|
fn from(error: block::Error) -> Error {
|
||||||
|
let ec = error.clone();
|
||||||
|
Error {
|
||||||
|
inner: error.context(ErrorKind::InvalidBlockProof(ec)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<store::Error> for Error {
|
||||||
|
fn from(error: store::Error) -> Error {
|
||||||
|
let ec = error.clone();
|
||||||
|
Error {
|
||||||
|
//inner: error.context();Context::new(ErrorKind::StoreErr(error.clone(),
|
||||||
|
// format!("{:?}", error))),
|
||||||
|
inner: error.context(ErrorKind::StoreErr(ec.clone(), format!("{:?}", ec))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<keychain::Error> for Error {
|
||||||
|
fn from(error: keychain::Error) -> Error {
|
||||||
|
Error {
|
||||||
|
inner: Context::new(ErrorKind::Keychain(error)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<transaction::Error> for Error {
|
||||||
|
fn from(error: transaction::Error) -> Error {
|
||||||
|
Error {
|
||||||
|
inner: Context::new(ErrorKind::Transaction(error)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<committed::Error> for Error {
|
||||||
|
fn from(error: committed::Error) -> Error {
|
||||||
|
Error {
|
||||||
|
inner: Context::new(ErrorKind::Committed(error)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<io::Error> for Error {
|
||||||
|
fn from(e: io::Error) -> Error {
|
||||||
|
Error {
|
||||||
|
inner: Context::new(ErrorKind::TxHashSetErr(e.to_string())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<secp::Error> for Error {
|
||||||
|
fn from(e: secp::Error) -> Error {
|
||||||
|
Error {
|
||||||
|
inner: Context::new(ErrorKind::Secp(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -31,7 +31,10 @@ extern crate serde;
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate slog;
|
||||||
|
extern crate failure;
|
||||||
extern crate time;
|
extern crate time;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate failure_derive;
|
||||||
|
|
||||||
extern crate grin_core as core;
|
extern crate grin_core as core;
|
||||||
extern crate grin_keychain as keychain;
|
extern crate grin_keychain as keychain;
|
||||||
|
@ -39,6 +42,7 @@ extern crate grin_store;
|
||||||
extern crate grin_util as util;
|
extern crate grin_util as util;
|
||||||
|
|
||||||
mod chain;
|
mod chain;
|
||||||
|
mod error;
|
||||||
pub mod pipe;
|
pub mod pipe;
|
||||||
pub mod store;
|
pub mod store;
|
||||||
pub mod txhashset;
|
pub mod txhashset;
|
||||||
|
@ -47,5 +51,6 @@ pub mod types;
|
||||||
// Re-export the base interface
|
// Re-export the base interface
|
||||||
|
|
||||||
pub use chain::{Chain, MAX_ORPHAN_SIZE};
|
pub use chain::{Chain, MAX_ORPHAN_SIZE};
|
||||||
|
pub use error::{Error, ErrorKind};
|
||||||
pub use store::ChainStore;
|
pub use store::ChainStore;
|
||||||
pub use types::{ChainAdapter, Error, Options, Tip};
|
pub use types::{ChainAdapter, Options, Tip};
|
||||||
|
|
|
@ -23,12 +23,15 @@ use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::target::Difficulty;
|
use core::core::target::Difficulty;
|
||||||
use core::core::{Block, BlockHeader};
|
use core::core::{Block, BlockHeader};
|
||||||
use core::global;
|
use core::global;
|
||||||
|
use error::{Error, ErrorKind};
|
||||||
use grin_store;
|
use grin_store;
|
||||||
use store;
|
use store;
|
||||||
use txhashset;
|
use txhashset;
|
||||||
use types::{Error, Options, Tip};
|
use types::{Options, Tip};
|
||||||
use util::LOGGER;
|
use util::LOGGER;
|
||||||
|
|
||||||
|
use failure::ResultExt;
|
||||||
|
|
||||||
/// Contextual information required to process a new block and either reject or
|
/// Contextual information required to process a new block and either reject or
|
||||||
/// accept it.
|
/// accept it.
|
||||||
pub struct BlockContext {
|
pub struct BlockContext {
|
||||||
|
@ -75,10 +78,10 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
|
||||||
match ctx.store.block_exists(&b.header.previous) {
|
match ctx.store.block_exists(&b.header.previous) {
|
||||||
Ok(true) => {}
|
Ok(true) => {}
|
||||||
Ok(false) => {
|
Ok(false) => {
|
||||||
return Err(Error::Orphan);
|
return Err(ErrorKind::Orphan.into());
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return Err(Error::StoreErr(e, "pipe get previous".to_owned()));
|
return Err(ErrorKind::StoreErr(e, "pipe get previous".to_owned()).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -93,9 +96,7 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
|
||||||
let mut txhashset = local_txhashset.write().unwrap();
|
let mut txhashset = local_txhashset.write().unwrap();
|
||||||
|
|
||||||
// update head now that we're in the lock
|
// update head now that we're in the lock
|
||||||
ctx.head = ctx.store
|
ctx.head = ctx.store.head()?;
|
||||||
.head()
|
|
||||||
.map_err(|e| Error::StoreErr(e, "pipe reload head".to_owned()))?;
|
|
||||||
|
|
||||||
let mut batch = ctx.store.batch()?;
|
let mut batch = ctx.store.batch()?;
|
||||||
|
|
||||||
|
@ -176,13 +177,13 @@ pub fn process_block_header(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<
|
||||||
fn check_header_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
fn check_header_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
// TODO ring buffer of the last few blocks that came through here
|
// TODO ring buffer of the last few blocks that came through here
|
||||||
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
||||||
return Err(Error::Unfit("already known".to_string()));
|
return Err(ErrorKind::Unfit("already known".to_string()).into());
|
||||||
}
|
}
|
||||||
if let Ok(h) = ctx.store.get_block_header(&bh) {
|
if let Ok(h) = ctx.store.get_block_header(&bh) {
|
||||||
// there is a window where a block header can be saved but the chain head not
|
// there is a window where a block header can be saved but the chain head not
|
||||||
// updated yet, we plug that window here by re-accepting the block
|
// updated yet, we plug that window here by re-accepting the block
|
||||||
if h.total_difficulty <= ctx.head.total_difficulty {
|
if h.total_difficulty <= ctx.head.total_difficulty {
|
||||||
return Err(Error::Unfit("already in store".to_string()));
|
return Err(ErrorKind::Unfit("already in store".to_string()).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -193,13 +194,13 @@ fn check_header_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
// TODO ring buffer of the last few blocks that came through here
|
// TODO ring buffer of the last few blocks that came through here
|
||||||
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
||||||
return Err(Error::Unfit("already known".to_string()));
|
return Err(ErrorKind::Unfit("already known".to_string()).into());
|
||||||
}
|
}
|
||||||
if let Ok(b) = ctx.store.get_block(&bh) {
|
if let Ok(b) = ctx.store.get_block(&bh) {
|
||||||
// there is a window where a block can be saved but the chain head not
|
// there is a window where a block can be saved but the chain head not
|
||||||
// updated yet, we plug that window here by re-accepting the block
|
// updated yet, we plug that window here by re-accepting the block
|
||||||
if b.header.total_difficulty <= ctx.head.total_difficulty {
|
if b.header.total_difficulty <= ctx.head.total_difficulty {
|
||||||
return Err(Error::Unfit("already in store".to_string()));
|
return Err(ErrorKind::Unfit("already in store".to_string()).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -216,7 +217,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"Invalid block header version received ({}), maybe update Grin?", header.version
|
"Invalid block header version received ({}), maybe update Grin?", header.version
|
||||||
);
|
);
|
||||||
return Err(Error::InvalidBlockVersion(header.version));
|
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: remove CI check from here somehow
|
// TODO: remove CI check from here somehow
|
||||||
|
@ -226,12 +227,12 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
{
|
{
|
||||||
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
|
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
|
||||||
// TODO add warning in p2p code if local time is too different from peers
|
// TODO add warning in p2p code if local time is too different from peers
|
||||||
return Err(Error::InvalidBlockTime);
|
return Err(ErrorKind::InvalidBlockTime.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if !ctx.opts.contains(Options::SKIP_POW) {
|
if !ctx.opts.contains(Options::SKIP_POW) {
|
||||||
if global::min_sizeshift() > header.pow.cuckoo_sizeshift {
|
if global::min_sizeshift() > header.pow.cuckoo_sizeshift {
|
||||||
return Err(Error::LowSizeshift);
|
return Err(ErrorKind::LowSizeshift.into());
|
||||||
}
|
}
|
||||||
if !(ctx.pow_verifier)(header, header.pow.cuckoo_sizeshift) {
|
if !(ctx.pow_verifier)(header, header.pow.cuckoo_sizeshift) {
|
||||||
error!(
|
error!(
|
||||||
|
@ -239,31 +240,32 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
"pipe: validate_header failed for cuckoo shift size {}",
|
"pipe: validate_header failed for cuckoo shift size {}",
|
||||||
header.pow.cuckoo_sizeshift,
|
header.pow.cuckoo_sizeshift,
|
||||||
);
|
);
|
||||||
return Err(Error::InvalidPow);
|
return Err(ErrorKind::InvalidPow.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// first I/O cost, better as late as possible
|
// first I/O cost, better as late as possible
|
||||||
let prev = match ctx.store.get_block_header(&header.previous) {
|
let prev = match ctx.store.get_block_header(&header.previous) {
|
||||||
Ok(prev) => Ok(prev),
|
Ok(prev) => prev,
|
||||||
Err(grin_store::Error::NotFoundErr) => Err(Error::Orphan),
|
Err(grin_store::Error::NotFoundErr(_)) => return Err(ErrorKind::Orphan.into()),
|
||||||
Err(e) => Err(Error::StoreErr(
|
Err(e) => {
|
||||||
e,
|
return Err(
|
||||||
format!("previous header {}", header.previous),
|
ErrorKind::StoreErr(e, format!("previous header {}", header.previous)).into(),
|
||||||
)),
|
)
|
||||||
}?;
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// make sure this header has a height exactly one higher than the previous
|
// make sure this header has a height exactly one higher than the previous
|
||||||
// header
|
// header
|
||||||
if header.height != prev.height + 1 {
|
if header.height != prev.height + 1 {
|
||||||
return Err(Error::InvalidBlockHeight);
|
return Err(ErrorKind::InvalidBlockHeight.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO - get rid of the automated testing mode check here somehow
|
// TODO - get rid of the automated testing mode check here somehow
|
||||||
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() {
|
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() {
|
||||||
// prevent time warp attacks and some timestamp manipulations by forcing strict
|
// prevent time warp attacks and some timestamp manipulations by forcing strict
|
||||||
// time progression (but not in CI mode)
|
// time progression (but not in CI mode)
|
||||||
return Err(Error::InvalidBlockTime);
|
return Err(ErrorKind::InvalidBlockTime.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify the proof of work and related parameters
|
// verify the proof of work and related parameters
|
||||||
|
@ -274,27 +276,27 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
// as the target difficulty
|
// as the target difficulty
|
||||||
if !ctx.opts.contains(Options::SKIP_POW) {
|
if !ctx.opts.contains(Options::SKIP_POW) {
|
||||||
if header.total_difficulty.clone() <= prev.total_difficulty.clone() {
|
if header.total_difficulty.clone() <= prev.total_difficulty.clone() {
|
||||||
return Err(Error::DifficultyTooLow);
|
return Err(ErrorKind::DifficultyTooLow.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let target_difficulty = header.total_difficulty.clone() - prev.total_difficulty.clone();
|
let target_difficulty = header.total_difficulty.clone() - prev.total_difficulty.clone();
|
||||||
|
|
||||||
if header.pow.to_difficulty() < target_difficulty {
|
if header.pow.to_difficulty() < target_difficulty {
|
||||||
return Err(Error::DifficultyTooLow);
|
return Err(ErrorKind::DifficultyTooLow.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// explicit check to ensure we are not below the minimum difficulty
|
// explicit check to ensure we are not below the minimum difficulty
|
||||||
// we will also check difficulty based on next_difficulty later on
|
// we will also check difficulty based on next_difficulty later on
|
||||||
if target_difficulty < Difficulty::one() {
|
if target_difficulty < Difficulty::one() {
|
||||||
return Err(Error::DifficultyTooLow);
|
return Err(ErrorKind::DifficultyTooLow.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// explicit check to ensure total_difficulty has increased by exactly
|
// explicit check to ensure total_difficulty has increased by exactly
|
||||||
// the _network_ difficulty of the previous block
|
// the _network_ difficulty of the previous block
|
||||||
// (during testnet1 we use _block_ difficulty here)
|
// (during testnet1 we use _block_ difficulty here)
|
||||||
let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone());
|
let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone());
|
||||||
let network_difficulty =
|
let network_difficulty = consensus::next_difficulty(diff_iter)
|
||||||
consensus::next_difficulty(diff_iter).map_err(|e| Error::Other(e.to_string()))?;
|
.context(ErrorKind::Other("network difficulty".to_owned()))?;
|
||||||
if target_difficulty != network_difficulty.clone() {
|
if target_difficulty != network_difficulty.clone() {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
|
@ -302,7 +304,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
target_difficulty.to_num(),
|
target_difficulty.to_num(),
|
||||||
prev.total_difficulty.to_num() + network_difficulty.to_num()
|
prev.total_difficulty.to_num() + network_difficulty.to_num()
|
||||||
);
|
);
|
||||||
return Err(Error::WrongTotalDifficulty);
|
return Err(ErrorKind::WrongTotalDifficulty.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -312,7 +314,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
fn validate_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
|
fn validate_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
let prev = ctx.store.get_block_header(&b.header.previous)?;
|
let prev = ctx.store.get_block_header(&b.header.previous)?;
|
||||||
b.validate(&prev.total_kernel_offset, &prev.total_kernel_sum)
|
b.validate(&prev.total_kernel_offset, &prev.total_kernel_sum)
|
||||||
.map_err(&Error::InvalidBlockProof)?;
|
.map_err(|e| ErrorKind::InvalidBlockProof(e))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -329,7 +331,8 @@ fn validate_block_via_txhashset(b: &Block, ext: &mut txhashset::Extension) -> Re
|
||||||
ext.apply_block(&b)?;
|
ext.apply_block(&b)?;
|
||||||
|
|
||||||
let roots = ext.roots();
|
let roots = ext.roots();
|
||||||
if roots.output_root != b.header.output_root || roots.rproof_root != b.header.range_proof_root
|
if roots.output_root != b.header.output_root
|
||||||
|
|| roots.rproof_root != b.header.range_proof_root
|
||||||
|| roots.kernel_root != b.header.kernel_root
|
|| roots.kernel_root != b.header.kernel_root
|
||||||
{
|
{
|
||||||
ext.dump(false);
|
ext.dump(false);
|
||||||
|
@ -353,11 +356,11 @@ fn validate_block_via_txhashset(b: &Block, ext: &mut txhashset::Extension) -> Re
|
||||||
b.header.kernel_root,
|
b.header.kernel_root,
|
||||||
);
|
);
|
||||||
|
|
||||||
return Err(Error::InvalidRoot);
|
return Err(ErrorKind::InvalidRoot.into());
|
||||||
}
|
}
|
||||||
let sizes = ext.sizes();
|
let sizes = ext.sizes();
|
||||||
if b.header.output_mmr_size != sizes.0 || b.header.kernel_mmr_size != sizes.2 {
|
if b.header.output_mmr_size != sizes.0 || b.header.kernel_mmr_size != sizes.2 {
|
||||||
return Err(Error::InvalidMMRSize);
|
return Err(ErrorKind::InvalidMMRSize.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -371,7 +374,7 @@ fn add_block(
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
batch
|
batch
|
||||||
.save_block(b)
|
.save_block(b)
|
||||||
.map_err(|e| Error::StoreErr(e, "pipe save block".to_owned()))?;
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save block".to_owned()))?;
|
||||||
let bitmap = store.build_and_cache_block_input_bitmap(&b)?;
|
let bitmap = store.build_and_cache_block_input_bitmap(&b)?;
|
||||||
batch.save_block_input_bitmap(&b.hash(), &bitmap)?;
|
batch.save_block_input_bitmap(&b.hash(), &bitmap)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -381,7 +384,7 @@ fn add_block(
|
||||||
fn add_block_header(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Error> {
|
fn add_block_header(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Error> {
|
||||||
batch
|
batch
|
||||||
.save_block_header(bh)
|
.save_block_header(bh)
|
||||||
.map_err(|e| Error::StoreErr(e, "pipe save header".to_owned()))
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header".to_owned()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Directly updates the head if we've just appended a new block to it or handle
|
/// Directly updates the head if we've just appended a new block to it or handle
|
||||||
|
@ -394,7 +397,7 @@ fn update_head(b: &Block, ctx: &BlockContext, batch: &store::Batch) -> Result<Op
|
||||||
// update the block height index
|
// update the block height index
|
||||||
batch
|
batch
|
||||||
.setup_height(&b.header, &ctx.head)
|
.setup_height(&b.header, &ctx.head)
|
||||||
.map_err(|e| Error::StoreErr(e, "pipe setup height".to_owned()))?;
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe setup height".to_owned()))?;
|
||||||
|
|
||||||
// in sync mode, only update the "body chain", otherwise update both the
|
// in sync mode, only update the "body chain", otherwise update both the
|
||||||
// "header chain" and "body chain", updating the header chain in sync resets
|
// "header chain" and "body chain", updating the header chain in sync resets
|
||||||
|
@ -403,11 +406,11 @@ fn update_head(b: &Block, ctx: &BlockContext, batch: &store::Batch) -> Result<Op
|
||||||
if ctx.opts.contains(Options::SYNC) {
|
if ctx.opts.contains(Options::SYNC) {
|
||||||
batch
|
batch
|
||||||
.save_body_head(&tip)
|
.save_body_head(&tip)
|
||||||
.map_err(|e| Error::StoreErr(e, "pipe save body".to_owned()))?;
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
|
||||||
} else {
|
} else {
|
||||||
batch
|
batch
|
||||||
.save_head(&tip)
|
.save_head(&tip)
|
||||||
.map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?;
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save head".to_owned()))?;
|
||||||
}
|
}
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
|
@ -436,7 +439,7 @@ fn update_sync_head(
|
||||||
let tip = Tip::from_block(bh);
|
let tip = Tip::from_block(bh);
|
||||||
batch
|
batch
|
||||||
.save_sync_head(&tip)
|
.save_sync_head(&tip)
|
||||||
.map_err(|e| Error::StoreErr(e, "pipe save sync head".to_owned()))?;
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
|
||||||
ctx.head = tip.clone();
|
ctx.head = tip.clone();
|
||||||
debug!(LOGGER, "sync head {} @ {}", bh.hash(), bh.height);
|
debug!(LOGGER, "sync head {} @ {}", bh.hash(), bh.height);
|
||||||
Ok(Some(tip))
|
Ok(Some(tip))
|
||||||
|
@ -451,7 +454,7 @@ fn update_header_head(
|
||||||
if tip.total_difficulty > ctx.head.total_difficulty {
|
if tip.total_difficulty > ctx.head.total_difficulty {
|
||||||
batch
|
batch
|
||||||
.save_header_head(&tip)
|
.save_header_head(&tip)
|
||||||
.map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
|
||||||
ctx.head = tip.clone();
|
ctx.head = tip.clone();
|
||||||
debug!(LOGGER, "header head {} @ {}", bh.hash(), bh.height);
|
debug!(LOGGER, "header head {} @ {}", bh.hash(), bh.height);
|
||||||
Ok(Some(tip))
|
Ok(Some(tip))
|
||||||
|
@ -509,7 +512,7 @@ pub fn rewind_and_apply_fork(
|
||||||
for (_, h) in fork_hashes {
|
for (_, h) in fork_hashes {
|
||||||
let fb = store
|
let fb = store
|
||||||
.get_block(&h)
|
.get_block(&h)
|
||||||
.map_err(|e| Error::StoreErr(e, format!("getting forked blocks")))?;
|
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked blocks")))?;
|
||||||
ext.apply_block(&fb)?;
|
ext.apply_block(&fb)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -27,7 +27,7 @@ use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::target::Difficulty;
|
use core::core::target::Difficulty;
|
||||||
use core::core::{Block, BlockHeader};
|
use core::core::{Block, BlockHeader};
|
||||||
use grin_store as store;
|
use grin_store as store;
|
||||||
use grin_store::{option_to_not_found, to_key, Error, u64_to_key};
|
use grin_store::{option_to_not_found, to_key, u64_to_key, Error};
|
||||||
use types::Tip;
|
use types::Tip;
|
||||||
|
|
||||||
const STORE_SUBPATH: &'static str = "chain";
|
const STORE_SUBPATH: &'static str = "chain";
|
||||||
|
@ -63,7 +63,7 @@ impl ChainStore {
|
||||||
#[allow(missing_docs)]
|
#[allow(missing_docs)]
|
||||||
impl ChainStore {
|
impl ChainStore {
|
||||||
pub fn head(&self) -> Result<Tip, Error> {
|
pub fn head(&self) -> Result<Tip, Error> {
|
||||||
option_to_not_found(self.db.get_ser(&vec![HEAD_PREFIX]))
|
option_to_not_found(self.db.get_ser(&vec![HEAD_PREFIX]), "HEAD")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn head_header(&self) -> Result<BlockHeader, Error> {
|
pub fn head_header(&self) -> Result<BlockHeader, Error> {
|
||||||
|
@ -71,15 +71,18 @@ impl ChainStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_header_head(&self) -> Result<Tip, Error> {
|
pub fn get_header_head(&self) -> Result<Tip, Error> {
|
||||||
option_to_not_found(self.db.get_ser(&vec![HEADER_HEAD_PREFIX]))
|
option_to_not_found(self.db.get_ser(&vec![HEADER_HEAD_PREFIX]), "HEADER_HEAD")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_sync_head(&self) -> Result<Tip, Error> {
|
pub fn get_sync_head(&self) -> Result<Tip, Error> {
|
||||||
option_to_not_found(self.db.get_ser(&vec![SYNC_HEAD_PREFIX]))
|
option_to_not_found(self.db.get_ser(&vec![SYNC_HEAD_PREFIX]), "SYNC_HEAD")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
||||||
option_to_not_found(self.db.get_ser(&to_key(BLOCK_PREFIX, &mut h.to_vec())))
|
option_to_not_found(
|
||||||
|
self.db.get_ser(&to_key(BLOCK_PREFIX, &mut h.to_vec())),
|
||||||
|
&format!("BLOCK: {} ", h),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn block_exists(&self, h: &Hash) -> Result<bool, Error> {
|
pub fn block_exists(&self, h: &Hash) -> Result<bool, Error> {
|
||||||
|
@ -99,6 +102,7 @@ impl ChainStore {
|
||||||
let header: Result<BlockHeader, Error> = option_to_not_found(
|
let header: Result<BlockHeader, Error> = option_to_not_found(
|
||||||
self.db
|
self.db
|
||||||
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
|
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
|
||||||
|
&format!("BLOCK HEADER: {}", h),
|
||||||
);
|
);
|
||||||
|
|
||||||
// cache miss - so adding to the cache for next time
|
// cache miss - so adding to the cache for next time
|
||||||
|
@ -121,26 +125,33 @@ impl ChainStore {
|
||||||
|
|
||||||
// check we are not out ahead of the current head
|
// check we are not out ahead of the current head
|
||||||
if header.height > head.height {
|
if header.height > head.height {
|
||||||
return Err(Error::NotFoundErr);
|
return Err(Error::NotFoundErr(String::from(
|
||||||
|
"header.height > head.height",
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let header_at_height = self.get_header_by_height(header.height)?;
|
let header_at_height = self.get_header_by_height(header.height)?;
|
||||||
if header.hash() == header_at_height.hash() {
|
if header.hash() == header_at_height.hash() {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(Error::NotFoundErr)
|
Err(Error::NotFoundErr(String::from(
|
||||||
|
"header.hash == header_at_height.hash",
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||||
option_to_not_found(self.db.get_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, height)))
|
option_to_not_found(
|
||||||
.and_then(|hash| self.get_block_header(&hash))
|
self.db.get_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, height)),
|
||||||
|
&format!("Header at height: {}", height),
|
||||||
|
).and_then(|hash| self.get_block_header(&hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
|
pub fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
|
||||||
option_to_not_found(
|
option_to_not_found(
|
||||||
self.db
|
self.db
|
||||||
.get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())),
|
.get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())),
|
||||||
|
&format!("Output position for: {:?}", commit),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -239,7 +250,7 @@ impl<'a> Batch<'a> {
|
||||||
pub fn init_sync_head(&self, t: &Tip) -> Result<(), Error> {
|
pub fn init_sync_head(&self, t: &Tip) -> Result<(), Error> {
|
||||||
let header_tip = match self.store.get_header_head() {
|
let header_tip = match self.store.get_header_head() {
|
||||||
Ok(hh) => hh,
|
Ok(hh) => hh,
|
||||||
Err(store::Error::NotFoundErr) => {
|
Err(store::Error::NotFoundErr(_)) => {
|
||||||
self.save_header_head(t)?;
|
self.save_header_head(t)?;
|
||||||
t.clone()
|
t.clone()
|
||||||
}
|
}
|
||||||
|
@ -257,7 +268,10 @@ impl<'a> Batch<'a> {
|
||||||
|
|
||||||
/// get block
|
/// get block
|
||||||
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
||||||
option_to_not_found(self.db.get_ser(&to_key(BLOCK_PREFIX, &mut h.to_vec())))
|
option_to_not_found(
|
||||||
|
self.db.get_ser(&to_key(BLOCK_PREFIX, &mut h.to_vec())),
|
||||||
|
&format!("Block with hash: {}", h),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Save the block and its header
|
/// Save the block and its header
|
||||||
|
@ -303,6 +317,7 @@ impl<'a> Batch<'a> {
|
||||||
option_to_not_found(
|
option_to_not_found(
|
||||||
self.db
|
self.db
|
||||||
.get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())),
|
.get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())),
|
||||||
|
&format!("Output position for commit: {:?}", commit),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,6 +330,7 @@ impl<'a> Batch<'a> {
|
||||||
option_to_not_found(
|
option_to_not_found(
|
||||||
self.db
|
self.db
|
||||||
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
|
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
|
||||||
|
&format!("Block header for block: {}", h),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,16 +31,18 @@ use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::merkle_proof::MerkleProof;
|
use core::core::merkle_proof::MerkleProof;
|
||||||
use core::core::pmmr;
|
use core::core::pmmr;
|
||||||
use core::core::pmmr::PMMR;
|
use core::core::pmmr::PMMR;
|
||||||
use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier, Transaction,
|
use core::core::{
|
||||||
TxKernel};
|
Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier, Transaction, TxKernel,
|
||||||
|
};
|
||||||
use core::global;
|
use core::global;
|
||||||
use core::ser::{PMMRIndexHashable, PMMRable};
|
use core::ser::{PMMRIndexHashable, PMMRable};
|
||||||
|
|
||||||
|
use error::{Error, ErrorKind};
|
||||||
use grin_store;
|
use grin_store;
|
||||||
use grin_store::pmmr::PMMRBackend;
|
use grin_store::pmmr::PMMRBackend;
|
||||||
use grin_store::types::prune_noop;
|
use grin_store::types::prune_noop;
|
||||||
use store::{Batch, ChainStore};
|
use store::{Batch, ChainStore};
|
||||||
use types::{Error, TxHashSetRoots};
|
use types::TxHashSetRoots;
|
||||||
use util::{secp_static, zip, LOGGER};
|
use util::{secp_static, zip, LOGGER};
|
||||||
|
|
||||||
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
||||||
|
@ -138,14 +140,14 @@ impl TxHashSet {
|
||||||
if hash == output_id.hash_with_index(pos - 1) {
|
if hash == output_id.hash_with_index(pos - 1) {
|
||||||
Ok(hash)
|
Ok(hash)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::TxHashSetErr(format!("txhashset hash mismatch")))
|
Err(ErrorKind::TxHashSetErr(format!("txhashset hash mismatch")).into())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Err(Error::OutputNotFound)
|
Err(ErrorKind::OutputNotFound.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(grin_store::Error::NotFoundErr) => Err(Error::OutputNotFound),
|
Err(grin_store::Error::NotFoundErr(_)) => Err(ErrorKind::OutputNotFound.into()),
|
||||||
Err(e) => Err(Error::StoreErr(e, format!("txhashset unspent check"))),
|
Err(e) => Err(ErrorKind::StoreErr(e, format!("txhashset unspent check")).into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,7 +340,7 @@ where
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"Error returned, discarding txhashset extension: {:?}", e
|
"Error returned, discarding txhashset extension: {}", e
|
||||||
);
|
);
|
||||||
trees.output_pmmr_h.backend.discard();
|
trees.output_pmmr_h.backend.discard();
|
||||||
trees.rproof_pmmr_h.backend.discard();
|
trees.rproof_pmmr_h.backend.discard();
|
||||||
|
@ -577,7 +579,7 @@ impl<'a> Extension<'a> {
|
||||||
// If we have not yet reached 1,000 blocks then
|
// If we have not yet reached 1,000 blocks then
|
||||||
// we can fail immediately as coinbase cannot be mature.
|
// we can fail immediately as coinbase cannot be mature.
|
||||||
if height < global::coinbase_maturity() {
|
if height < global::coinbase_maturity() {
|
||||||
return Err(Error::ImmatureCoinbase);
|
return Err(ErrorKind::ImmatureCoinbase.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the "cutoff" pos in the output MMR based on the
|
// Find the "cutoff" pos in the output MMR based on the
|
||||||
|
@ -586,11 +588,10 @@ impl<'a> Extension<'a> {
|
||||||
let cutoff_header = self.commit_index.get_header_by_height(cutoff_height)?;
|
let cutoff_header = self.commit_index.get_header_by_height(cutoff_height)?;
|
||||||
let cutoff_pos = cutoff_header.output_mmr_size;
|
let cutoff_pos = cutoff_header.output_mmr_size;
|
||||||
|
|
||||||
|
|
||||||
// If any output pos exceeed the cutoff_pos
|
// If any output pos exceeed the cutoff_pos
|
||||||
// we know they have not yet sufficiently matured.
|
// we know they have not yet sufficiently matured.
|
||||||
if pos > cutoff_pos {
|
if pos > cutoff_pos {
|
||||||
return Err(Error::ImmatureCoinbase);
|
return Err(ErrorKind::ImmatureCoinbase.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -641,7 +642,9 @@ impl<'a> Extension<'a> {
|
||||||
.expect("no output at pos")
|
.expect("no output at pos")
|
||||||
.hash_with_index(pos - 1);
|
.hash_with_index(pos - 1);
|
||||||
if output_id_hash != read_hash || output_id_hash != read_elem_hash {
|
if output_id_hash != read_hash || output_id_hash != read_elem_hash {
|
||||||
return Err(Error::TxHashSetErr(format!("output pmmr hash mismatch")));
|
return Err(
|
||||||
|
ErrorKind::TxHashSetErr(format!("output pmmr hash mismatch")).into(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -652,13 +655,13 @@ impl<'a> Extension<'a> {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
.prune(pos)
|
.prune(pos)
|
||||||
.map_err(|s| Error::TxHashSetErr(s))?;
|
.map_err(|s| ErrorKind::TxHashSetErr(s))?;
|
||||||
}
|
}
|
||||||
Ok(false) => return Err(Error::AlreadySpent(commit)),
|
Ok(false) => return Err(ErrorKind::AlreadySpent(commit).into()),
|
||||||
Err(s) => return Err(Error::TxHashSetErr(s)),
|
Err(s) => return Err(ErrorKind::TxHashSetErr(s).into()),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::AlreadySpent(commit));
|
return Err(ErrorKind::AlreadySpent(commit).into());
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -677,21 +680,21 @@ impl<'a> Extension<'a> {
|
||||||
// We may be on a fork which may result in the entry at that pos being
|
// We may be on a fork which may result in the entry at that pos being
|
||||||
// different to the one we expect.
|
// different to the one we expect.
|
||||||
if hash == OutputIdentifier::from_output(out).hash_with_index(pos - 1) {
|
if hash == OutputIdentifier::from_output(out).hash_with_index(pos - 1) {
|
||||||
return Err(Error::DuplicateCommitment(commit));
|
return Err(ErrorKind::DuplicateCommitment(commit).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// push new outputs in their MMR and save them in the index
|
// push new outputs in their MMR and save them in the index
|
||||||
let pos = self.output_pmmr
|
let pos = self.output_pmmr
|
||||||
.push(OutputIdentifier::from_output(out))
|
.push(OutputIdentifier::from_output(out))
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
self.batch.save_output_pos(&out.commitment(), pos)?;
|
self.batch.save_output_pos(&out.commitment(), pos)?;
|
||||||
self.new_output_commits.insert(out.commitment(), pos);
|
self.new_output_commits.insert(out.commitment(), pos);
|
||||||
|
|
||||||
// push range proofs in their MMR and file
|
// push range proofs in their MMR and file
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
.push(out.proof)
|
.push(out.proof)
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -699,7 +702,7 @@ impl<'a> Extension<'a> {
|
||||||
// push kernels in their MMR and file
|
// push kernels in their MMR and file
|
||||||
self.kernel_pmmr
|
self.kernel_pmmr
|
||||||
.push(kernel.clone())
|
.push(kernel.clone())
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -729,7 +732,7 @@ impl<'a> Extension<'a> {
|
||||||
let pos = self.batch.get_output_pos(&output.commit)?;
|
let pos = self.batch.get_output_pos(&output.commit)?;
|
||||||
let merkle_proof = self.output_pmmr
|
let merkle_proof = self.output_pmmr
|
||||||
.merkle_proof(pos)
|
.merkle_proof(pos)
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
|
|
||||||
Ok(merkle_proof)
|
Ok(merkle_proof)
|
||||||
}
|
}
|
||||||
|
@ -742,10 +745,10 @@ impl<'a> Extension<'a> {
|
||||||
pub fn snapshot(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
pub fn snapshot(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
self.output_pmmr
|
self.output_pmmr
|
||||||
.snapshot(header)
|
.snapshot(header)
|
||||||
.map_err(|e| Error::Other(e))?;
|
.map_err(|e| ErrorKind::Other(e))?;
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
.snapshot(header)
|
.snapshot(header)
|
||||||
.map_err(|e| Error::Other(e))?;
|
.map_err(|e| ErrorKind::Other(e))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -820,17 +823,17 @@ impl<'a> Extension<'a> {
|
||||||
if rewind_utxo {
|
if rewind_utxo {
|
||||||
self.output_pmmr
|
self.output_pmmr
|
||||||
.rewind(output_pos, rewind_add_pos, rewind_rm_pos)
|
.rewind(output_pos, rewind_add_pos, rewind_rm_pos)
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
}
|
}
|
||||||
if rewind_rproof {
|
if rewind_rproof {
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
.rewind(output_pos, rewind_add_pos, rewind_rm_pos)
|
.rewind(output_pos, rewind_add_pos, rewind_rm_pos)
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
}
|
}
|
||||||
if rewind_kernel {
|
if rewind_kernel {
|
||||||
self.kernel_pmmr
|
self.kernel_pmmr
|
||||||
.rewind(kernel_pos, rewind_add_pos, rewind_rm_pos)
|
.rewind(kernel_pos, rewind_add_pos, rewind_rm_pos)
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -863,10 +866,11 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
let roots = self.roots();
|
let roots = self.roots();
|
||||||
if roots.output_root != header.output_root || roots.rproof_root != header.range_proof_root
|
if roots.output_root != header.output_root
|
||||||
|
|| roots.rproof_root != header.range_proof_root
|
||||||
|| roots.kernel_root != header.kernel_root
|
|| roots.kernel_root != header.kernel_root
|
||||||
{
|
{
|
||||||
return Err(Error::InvalidRoot);
|
return Err(ErrorKind::InvalidRoot.into());
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -876,13 +880,13 @@ impl<'a> Extension<'a> {
|
||||||
|
|
||||||
// validate all hashes and sums within the trees
|
// validate all hashes and sums within the trees
|
||||||
if let Err(e) = self.output_pmmr.validate() {
|
if let Err(e) = self.output_pmmr.validate() {
|
||||||
return Err(Error::InvalidTxHashSet(e));
|
return Err(ErrorKind::InvalidTxHashSet(e).into());
|
||||||
}
|
}
|
||||||
if let Err(e) = self.rproof_pmmr.validate() {
|
if let Err(e) = self.rproof_pmmr.validate() {
|
||||||
return Err(Error::InvalidTxHashSet(e));
|
return Err(ErrorKind::InvalidTxHashSet(e).into());
|
||||||
}
|
}
|
||||||
if let Err(e) = self.kernel_pmmr.validate() {
|
if let Err(e) = self.kernel_pmmr.validate() {
|
||||||
return Err(Error::InvalidTxHashSet(e));
|
return Err(ErrorKind::InvalidTxHashSet(e).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
|
@ -1017,7 +1021,7 @@ impl<'a> Extension<'a> {
|
||||||
out.into_output(rp).verify_proof()?;
|
out.into_output(rp).verify_proof()?;
|
||||||
} else {
|
} else {
|
||||||
// TODO - rangeproof not found
|
// TODO - rangeproof not found
|
||||||
return Err(Error::OutputNotFound);
|
return Err(ErrorKind::OutputNotFound.into());
|
||||||
}
|
}
|
||||||
proof_count += 1;
|
proof_count += 1;
|
||||||
|
|
||||||
|
@ -1056,10 +1060,10 @@ impl<'a> Extension<'a> {
|
||||||
// rewinding further and further back
|
// rewinding further and further back
|
||||||
self.rewind(¤t, &head_header, false, true, false)?;
|
self.rewind(¤t, &head_header, false, true, false)?;
|
||||||
if self.kernel_pmmr.root() != current.kernel_root {
|
if self.kernel_pmmr.root() != current.kernel_root {
|
||||||
return Err(Error::InvalidTxHashSet(format!(
|
return Err(ErrorKind::InvalidTxHashSet(format!(
|
||||||
"Kernel root at {} does not match",
|
"Kernel root at {} does not match",
|
||||||
current.height
|
current.height
|
||||||
)));
|
)).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -1075,7 +1079,7 @@ pub fn zip_read(root_dir: String) -> Result<File, Error> {
|
||||||
// create the zip archive
|
// create the zip archive
|
||||||
{
|
{
|
||||||
zip::compress(&txhashset_path, &File::create(zip_path.clone())?)
|
zip::compress(&txhashset_path, &File::create(zip_path.clone())?)
|
||||||
.map_err(|ze| Error::Other(ze.to_string()))?;
|
.map_err(|ze| ErrorKind::Other(ze.to_string()))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// open it again to read it back
|
// open it again to read it back
|
||||||
|
@ -1089,7 +1093,8 @@ pub fn zip_write(root_dir: String, txhashset_data: File) -> Result<(), Error> {
|
||||||
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
|
let txhashset_path = Path::new(&root_dir).join(TXHASHSET_SUBDIR);
|
||||||
|
|
||||||
fs::create_dir_all(txhashset_path.clone())?;
|
fs::create_dir_all(txhashset_path.clone())?;
|
||||||
zip::decompress(txhashset_data, &txhashset_path).map_err(|ze| Error::Other(ze.to_string()))
|
zip::decompress(txhashset_data, &txhashset_path)
|
||||||
|
.map_err(|ze| ErrorKind::Other(ze.to_string()).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Given a block header to rewind to and the block header at the
|
/// Given a block header to rewind to and the block header at the
|
||||||
|
|
|
@ -14,18 +14,10 @@
|
||||||
|
|
||||||
//! Base types that the block chain pipeline requires.
|
//! Base types that the block chain pipeline requires.
|
||||||
|
|
||||||
use std::{error, fmt, io};
|
|
||||||
|
|
||||||
use util::secp;
|
|
||||||
use util::secp::pedersen::Commitment;
|
|
||||||
|
|
||||||
use core::core::committed;
|
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::target::Difficulty;
|
use core::core::target::Difficulty;
|
||||||
use core::core::{block, transaction, Block, BlockHeader};
|
use core::core::{Block, BlockHeader};
|
||||||
use core::ser;
|
use core::ser;
|
||||||
use grin_store as store;
|
|
||||||
use keychain;
|
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// Options for block validation
|
/// Options for block validation
|
||||||
|
@ -53,146 +45,6 @@ pub struct TxHashSetRoots {
|
||||||
pub kernel_root: Hash,
|
pub kernel_root: Hash,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Errors
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Error {
|
|
||||||
/// The block doesn't fit anywhere in our chain
|
|
||||||
Unfit(String),
|
|
||||||
/// Special case of orphan blocks
|
|
||||||
Orphan,
|
|
||||||
/// Difficulty is too low either compared to ours or the block PoW hash
|
|
||||||
DifficultyTooLow,
|
|
||||||
/// Addition of difficulties on all previous block is wrong
|
|
||||||
WrongTotalDifficulty,
|
|
||||||
/// Block header sizeshift is lower than our min
|
|
||||||
LowSizeshift,
|
|
||||||
/// The proof of work is invalid
|
|
||||||
InvalidPow,
|
|
||||||
/// The block doesn't sum correctly or a tx signature is invalid
|
|
||||||
InvalidBlockProof(block::Error),
|
|
||||||
/// Block time is too old
|
|
||||||
InvalidBlockTime,
|
|
||||||
/// Block height is invalid (not previous + 1)
|
|
||||||
InvalidBlockHeight,
|
|
||||||
/// One of the root hashes in the block is invalid
|
|
||||||
InvalidRoot,
|
|
||||||
/// One of the MMR sizes in the block header is invalid
|
|
||||||
InvalidMMRSize,
|
|
||||||
/// Error from underlying keychain impl
|
|
||||||
Keychain(keychain::Error),
|
|
||||||
/// Error from underlying secp lib
|
|
||||||
Secp(secp::Error),
|
|
||||||
/// One of the inputs in the block has already been spent
|
|
||||||
AlreadySpent(Commitment),
|
|
||||||
/// An output with that commitment already exists (should be unique)
|
|
||||||
DuplicateCommitment(Commitment),
|
|
||||||
/// Attempt to spend a coinbase output before it sufficiently matures.
|
|
||||||
ImmatureCoinbase,
|
|
||||||
/// Error validating a Merkle proof (coinbase output)
|
|
||||||
MerkleProof,
|
|
||||||
/// output not found
|
|
||||||
OutputNotFound,
|
|
||||||
/// output spent
|
|
||||||
OutputSpent,
|
|
||||||
/// Invalid block version, either a mistake or outdated software
|
|
||||||
InvalidBlockVersion(u16),
|
|
||||||
/// We've been provided a bad txhashset
|
|
||||||
InvalidTxHashSet(String),
|
|
||||||
/// Internal issue when trying to save or load data from store
|
|
||||||
StoreErr(store::Error, String),
|
|
||||||
/// Internal issue when trying to save or load data from append only files
|
|
||||||
FileReadErr(String),
|
|
||||||
/// Error serializing or deserializing a type
|
|
||||||
SerErr(ser::Error),
|
|
||||||
/// Error with the txhashset
|
|
||||||
TxHashSetErr(String),
|
|
||||||
/// Tx not valid based on lock_height.
|
|
||||||
TxLockHeight,
|
|
||||||
/// No chain exists and genesis block is required
|
|
||||||
GenesisBlockRequired,
|
|
||||||
/// Error from underlying tx handling
|
|
||||||
Transaction(transaction::Error),
|
|
||||||
/// Anything else
|
|
||||||
Other(String),
|
|
||||||
/// Error from summing and verifying kernel sums via committed trait.
|
|
||||||
Committed(committed::Error),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl error::Error for Error {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
match *self {
|
|
||||||
_ => "some kind of chain error",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Error {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
_ => write!(f, "some kind of chain error"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<store::Error> for Error {
|
|
||||||
fn from(e: store::Error) -> Error {
|
|
||||||
Error::StoreErr(e, "wrapped".to_owned())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<ser::Error> for Error {
|
|
||||||
fn from(e: ser::Error) -> Error {
|
|
||||||
Error::SerErr(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<io::Error> for Error {
|
|
||||||
fn from(e: io::Error) -> Error {
|
|
||||||
Error::TxHashSetErr(e.to_string())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<keychain::Error> for Error {
|
|
||||||
fn from(e: keychain::Error) -> Error {
|
|
||||||
Error::Keychain(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<secp::Error> for Error {
|
|
||||||
fn from(e: secp::Error) -> Error {
|
|
||||||
Error::Secp(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<committed::Error> for Error {
|
|
||||||
fn from(e: committed::Error) -> Error {
|
|
||||||
Error::Committed(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error {
|
|
||||||
/// Whether the error is due to a block that was intrinsically wrong
|
|
||||||
pub fn is_bad_data(&self) -> bool {
|
|
||||||
// shorter to match on all the "not the block's fault" errors
|
|
||||||
match *self {
|
|
||||||
Error::Unfit(_)
|
|
||||||
| Error::Orphan
|
|
||||||
| Error::StoreErr(_, _)
|
|
||||||
| Error::SerErr(_)
|
|
||||||
| Error::TxHashSetErr(_)
|
|
||||||
| Error::GenesisBlockRequired
|
|
||||||
| Error::Other(_) => false,
|
|
||||||
_ => true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<transaction::Error> for Error {
|
|
||||||
fn from(e: transaction::Error) -> Error {
|
|
||||||
Error::Transaction(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The tip of a fork. A handle to the fork ancestry from its leaf in the
|
/// The tip of a fork. A handle to the fork ancestry from its leaf in the
|
||||||
/// blockchain tree. References the max height and the latest and previous
|
/// blockchain tree. References the max height and the latest and previous
|
||||||
/// blocks
|
/// blocks
|
||||||
|
|
|
@ -24,7 +24,8 @@ extern crate time;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use chain::types::{Error, NoopAdapter};
|
use chain::types::NoopAdapter;
|
||||||
|
use chain::{Error, ErrorKind};
|
||||||
use core::core::target::Difficulty;
|
use core::core::target::Difficulty;
|
||||||
use core::core::{transaction, OutputIdentifier};
|
use core::core::{transaction, OutputIdentifier};
|
||||||
use core::global::{self, ChainTypes};
|
use core::global::{self, ChainTypes};
|
||||||
|
@ -126,8 +127,11 @@ fn test_coinbase_maturity() {
|
||||||
// Confirm the tx attempting to spend the coinbase output
|
// Confirm the tx attempting to spend the coinbase output
|
||||||
// is not valid at the current block height given the current chain state.
|
// is not valid at the current block height given the current chain state.
|
||||||
match chain.verify_coinbase_maturity(&coinbase_txn) {
|
match chain.verify_coinbase_maturity(&coinbase_txn) {
|
||||||
Err(Error::ImmatureCoinbase) => {}
|
Ok(_) => {}
|
||||||
_ => panic!("Expected transaction error with immature coinbase."),
|
Err(e) => match e.kind() {
|
||||||
|
ErrorKind::ImmatureCoinbase => {}
|
||||||
|
_ => panic!("Expected transaction error with immature coinbase."),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pow::pow_size(
|
pow::pow_size(
|
||||||
|
|
|
@ -10,6 +10,8 @@ bitflags = "1"
|
||||||
blake2-rfc = "0.2"
|
blake2-rfc = "0.2"
|
||||||
byteorder = "1"
|
byteorder = "1"
|
||||||
croaring = "0.3"
|
croaring = "0.3"
|
||||||
|
failure = "0.1"
|
||||||
|
failure_derive = "0.1"
|
||||||
lazy_static = "1"
|
lazy_static = "1"
|
||||||
num-bigint = "0.2"
|
num-bigint = "0.2"
|
||||||
rand = "0.3"
|
rand = "0.3"
|
||||||
|
|
|
@ -103,7 +103,8 @@ pub const MAX_TX_KERNELS: u64 = 2048;
|
||||||
|
|
||||||
/// Whether a block exceeds the maximum acceptable weight
|
/// Whether a block exceeds the maximum acceptable weight
|
||||||
pub fn exceeds_weight(input_len: usize, output_len: usize, kernel_len: usize) -> bool {
|
pub fn exceeds_weight(input_len: usize, output_len: usize, kernel_len: usize) -> bool {
|
||||||
input_len * BLOCK_INPUT_WEIGHT + output_len * BLOCK_OUTPUT_WEIGHT
|
input_len * BLOCK_INPUT_WEIGHT
|
||||||
|
+ output_len * BLOCK_OUTPUT_WEIGHT
|
||||||
+ kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT || input_len > MAX_BLOCK_INPUTS
|
+ kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT || input_len > MAX_BLOCK_INPUTS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,14 +160,20 @@ pub const DAMP_FACTOR: u64 = 3;
|
||||||
pub const INITIAL_DIFFICULTY: u64 = 1_000_000;
|
pub const INITIAL_DIFFICULTY: u64 = 1_000_000;
|
||||||
|
|
||||||
/// Consensus errors
|
/// Consensus errors
|
||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
#[derive(Clone, Debug, Eq, PartialEq, Fail)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// Inputs/outputs/kernels must be sorted lexicographically.
|
/// Inputs/outputs/kernels must be sorted lexicographically.
|
||||||
SortError,
|
SortError,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Error {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "Sort Error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Error when computing the next difficulty adjustment.
|
/// Error when computing the next difficulty adjustment.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, Fail)]
|
||||||
pub struct TargetError(pub String);
|
pub struct TargetError(pub String);
|
||||||
|
|
||||||
impl fmt::Display for TargetError {
|
impl fmt::Display for TargetError {
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
|
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
use std::fmt;
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
use time;
|
use time;
|
||||||
|
|
||||||
|
@ -24,15 +25,17 @@ use core::committed::{self, Committed};
|
||||||
use core::hash::{Hash, HashWriter, Hashed, ZERO_HASH};
|
use core::hash::{Hash, HashWriter, Hashed, ZERO_HASH};
|
||||||
use core::id::ShortIdentifiable;
|
use core::id::ShortIdentifiable;
|
||||||
use core::target::Difficulty;
|
use core::target::Difficulty;
|
||||||
use core::{transaction, Commitment, Input, KernelFeatures, Output, OutputFeatures, Proof, ShortId,
|
use core::{
|
||||||
Transaction, TxKernel};
|
transaction, Commitment, Input, KernelFeatures, Output, OutputFeatures, Proof, ShortId,
|
||||||
|
Transaction, TxKernel,
|
||||||
|
};
|
||||||
use global;
|
use global;
|
||||||
use keychain::{self, BlindingFactor};
|
use keychain::{self, BlindingFactor};
|
||||||
use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
|
use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
|
||||||
use util::{secp, secp_static, static_secp_instance, LOGGER};
|
use util::{secp, secp_static, static_secp_instance, LOGGER};
|
||||||
|
|
||||||
/// Errors thrown by Block validation
|
/// Errors thrown by Block validation
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, Eq, PartialEq, Fail)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// The sum of output minus input commitments does not
|
/// The sum of output minus input commitments does not
|
||||||
/// match the sum of kernel commitments
|
/// match the sum of kernel commitments
|
||||||
|
@ -95,6 +98,12 @@ impl From<consensus::Error> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Error {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "Block Error (display needs implementation")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Block header, fairly standard compared to other blockchains.
|
/// Block header, fairly standard compared to other blockchains.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
pub struct BlockHeader {
|
pub struct BlockHeader {
|
||||||
|
|
|
@ -14,8 +14,8 @@
|
||||||
|
|
||||||
//! short ids for compact blocks
|
//! short ids for compact blocks
|
||||||
|
|
||||||
use std::cmp::Ordering;
|
|
||||||
use std::cmp::min;
|
use std::cmp::min;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
|
||||||
use byteorder::{ByteOrder, LittleEndian};
|
use byteorder::{ByteOrder, LittleEndian};
|
||||||
use siphasher::sip::SipHasher24;
|
use siphasher::sip::SipHasher24;
|
||||||
|
|
|
@ -39,9 +39,9 @@ use std::marker;
|
||||||
|
|
||||||
use croaring::Bitmap;
|
use croaring::Bitmap;
|
||||||
|
|
||||||
use core::BlockHeader;
|
|
||||||
use core::hash::Hash;
|
use core::hash::Hash;
|
||||||
use core::merkle_proof::MerkleProof;
|
use core::merkle_proof::MerkleProof;
|
||||||
|
use core::BlockHeader;
|
||||||
use ser::{PMMRIndexHashable, PMMRable};
|
use ser::{PMMRIndexHashable, PMMRable};
|
||||||
use util::LOGGER;
|
use util::LOGGER;
|
||||||
|
|
||||||
|
|
|
@ -14,8 +14,8 @@
|
||||||
|
|
||||||
//! Transactions
|
//! Transactions
|
||||||
|
|
||||||
use std::cmp::Ordering;
|
|
||||||
use std::cmp::max;
|
use std::cmp::max;
|
||||||
|
use std::cmp::Ordering;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::{error, fmt};
|
use std::{error, fmt};
|
||||||
|
|
||||||
|
@ -27,8 +27,9 @@ use consensus::{self, VerifySortOrder};
|
||||||
use core::hash::Hashed;
|
use core::hash::Hashed;
|
||||||
use core::{committed, Committed};
|
use core::{committed, Committed};
|
||||||
use keychain::{self, BlindingFactor};
|
use keychain::{self, BlindingFactor};
|
||||||
use ser::{self, read_and_verify_sorted, PMMRable, Readable, Reader, Writeable,
|
use ser::{
|
||||||
WriteableSorted, Writer};
|
self, read_and_verify_sorted, PMMRable, Readable, Reader, Writeable, WriteableSorted, Writer,
|
||||||
|
};
|
||||||
use util;
|
use util;
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
|
@ -250,7 +251,9 @@ pub struct Transaction {
|
||||||
/// PartialEq
|
/// PartialEq
|
||||||
impl PartialEq for Transaction {
|
impl PartialEq for Transaction {
|
||||||
fn eq(&self, tx: &Transaction) -> bool {
|
fn eq(&self, tx: &Transaction) -> bool {
|
||||||
self.inputs == tx.inputs && self.outputs == tx.outputs && self.kernels == tx.kernels
|
self.inputs == tx.inputs
|
||||||
|
&& self.outputs == tx.outputs
|
||||||
|
&& self.kernels == tx.kernels
|
||||||
&& self.offset == tx.offset
|
&& self.offset == tx.offset
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -290,7 +293,8 @@ impl Readable for Transaction {
|
||||||
let (input_len, output_len, kernel_len) =
|
let (input_len, output_len, kernel_len) =
|
||||||
ser_multiread!(reader, read_u64, read_u64, read_u64);
|
ser_multiread!(reader, read_u64, read_u64, read_u64);
|
||||||
|
|
||||||
if input_len > consensus::MAX_TX_INPUTS || output_len > consensus::MAX_TX_OUTPUTS
|
if input_len > consensus::MAX_TX_INPUTS
|
||||||
|
|| output_len > consensus::MAX_TX_OUTPUTS
|
||||||
|| kernel_len > consensus::MAX_TX_KERNELS
|
|| kernel_len > consensus::MAX_TX_KERNELS
|
||||||
{
|
{
|
||||||
return Err(ser::Error::CorruptedData);
|
return Err(ser::Error::CorruptedData);
|
||||||
|
@ -436,16 +440,12 @@ impl Transaction {
|
||||||
|
|
||||||
/// Calculate transaction weight
|
/// Calculate transaction weight
|
||||||
pub fn tx_weight(&self) -> u32 {
|
pub fn tx_weight(&self) -> u32 {
|
||||||
Transaction::weight(
|
Transaction::weight(self.inputs.len(), self.outputs.len())
|
||||||
self.inputs.len(),
|
|
||||||
self.outputs.len(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate transaction weight from transaction details
|
/// Calculate transaction weight from transaction details
|
||||||
pub fn weight(input_len: usize, output_len: usize) -> u32 {
|
pub fn weight(input_len: usize, output_len: usize) -> u32 {
|
||||||
let mut tx_weight =
|
let mut tx_weight = -1 * (input_len as i32) + (4 * output_len as i32) + 1;
|
||||||
-1 * (input_len as i32) + (4 * output_len as i32) + 1;
|
|
||||||
if tx_weight < 1 {
|
if tx_weight < 1 {
|
||||||
tx_weight = 1;
|
tx_weight = 1;
|
||||||
}
|
}
|
||||||
|
@ -669,14 +669,8 @@ impl Readable for Input {
|
||||||
impl Input {
|
impl Input {
|
||||||
/// Build a new input from the data required to identify and verify an
|
/// Build a new input from the data required to identify and verify an
|
||||||
/// output being spent.
|
/// output being spent.
|
||||||
pub fn new(
|
pub fn new(features: OutputFeatures, commit: Commitment) -> Input {
|
||||||
features: OutputFeatures,
|
Input { features, commit }
|
||||||
commit: Commitment,
|
|
||||||
) -> Input {
|
|
||||||
Input {
|
|
||||||
features,
|
|
||||||
commit,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The input commitment which _partially_ identifies the output being
|
/// The input commitment which _partially_ identifies the output being
|
||||||
|
|
|
@ -17,9 +17,11 @@
|
||||||
//! should be used sparingly.
|
//! should be used sparingly.
|
||||||
|
|
||||||
use consensus::TargetError;
|
use consensus::TargetError;
|
||||||
use consensus::{BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DEFAULT_MIN_SIZESHIFT,
|
use consensus::{
|
||||||
DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, MEDIAN_TIME_WINDOW, PROOFSIZE,
|
BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DEFAULT_MIN_SIZESHIFT,
|
||||||
REFERENCE_SIZESHIFT};
|
DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, MEDIAN_TIME_WINDOW, PROOFSIZE,
|
||||||
|
REFERENCE_SIZESHIFT,
|
||||||
|
};
|
||||||
use core::target::Difficulty;
|
use core::target::Difficulty;
|
||||||
/// An enum collecting sets of parameters used throughout the
|
/// An enum collecting sets of parameters used throughout the
|
||||||
/// code wherever mining is needed. This should allow for
|
/// code wherever mining is needed. This should allow for
|
||||||
|
@ -184,7 +186,8 @@ pub fn is_user_testing_mode() -> bool {
|
||||||
/// Are we in production mode (a live public network)?
|
/// Are we in production mode (a live public network)?
|
||||||
pub fn is_production_mode() -> bool {
|
pub fn is_production_mode() -> bool {
|
||||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
let param_ref = CHAIN_TYPE.read().unwrap();
|
||||||
ChainTypes::Testnet1 == *param_ref || ChainTypes::Testnet2 == *param_ref
|
ChainTypes::Testnet1 == *param_ref
|
||||||
|
|| ChainTypes::Testnet2 == *param_ref
|
||||||
|| ChainTypes::Mainnet == *param_ref
|
|| ChainTypes::Mainnet == *param_ref
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,10 @@ extern crate serde_derive;
|
||||||
extern crate siphasher;
|
extern crate siphasher;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate slog;
|
||||||
|
extern crate failure;
|
||||||
extern crate time;
|
extern crate time;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate failure_derive;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod macros;
|
pub mod macros;
|
||||||
|
|
|
@ -171,7 +171,13 @@ impl Miner {
|
||||||
let size = 1 << sizeshift;
|
let size = 1 << sizeshift;
|
||||||
let graph = vec![0; size + 1];
|
let graph = vec![0; size + 1];
|
||||||
let easiness = (ease as u64) * (size as u64) / 100;
|
let easiness = (ease as u64) * (size as u64) / 100;
|
||||||
Miner{easiness, cuckoo, graph, proof_size, sizeshift}
|
Miner {
|
||||||
|
easiness,
|
||||||
|
cuckoo,
|
||||||
|
graph,
|
||||||
|
proof_size,
|
||||||
|
sizeshift,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Searches for a solution
|
/// Searches for a solution
|
||||||
|
@ -298,8 +304,13 @@ impl Miner {
|
||||||
|
|
||||||
/// Utility to transform a 8 bytes of a byte array into a u64.
|
/// Utility to transform a 8 bytes of a byte array into a u64.
|
||||||
fn u8_to_u64(p: &[u8], i: usize) -> u64 {
|
fn u8_to_u64(p: &[u8], i: usize) -> u64 {
|
||||||
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i + 3] as u64) << 24
|
(p[i] as u64)
|
||||||
| (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 | (p[i + 6] as u64) << 48
|
| (p[i + 1] as u64) << 8
|
||||||
|
| (p[i + 2] as u64) << 16
|
||||||
|
| (p[i + 3] as u64) << 24
|
||||||
|
| (p[i + 4] as u64) << 32
|
||||||
|
| (p[i + 5] as u64) << 40
|
||||||
|
| (p[i + 6] as u64) << 48
|
||||||
| (p[i + 7] as u64) << 56
|
| (p[i + 7] as u64) << 56
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,7 +411,9 @@ mod test {
|
||||||
#[test]
|
#[test]
|
||||||
fn validate_fail() {
|
fn validate_fail() {
|
||||||
// edge checks
|
// edge checks
|
||||||
assert!(!Cuckoo::from_hash(blake2(&[49]).as_bytes(), 20).verify(&Proof::new(vec![0; 42]), 75));
|
assert!(
|
||||||
|
!Cuckoo::from_hash(blake2(&[49]).as_bytes(), 20).verify(&Proof::new(vec![0; 42]), 75)
|
||||||
|
);
|
||||||
assert!(!Cuckoo::from_hash(blake2(&[49]).as_bytes(), 20)
|
assert!(!Cuckoo::from_hash(blake2(&[49]).as_bytes(), 20)
|
||||||
.verify(&Proof::new(vec![0xffff; 42]), 75));
|
.verify(&Proof::new(vec![0xffff; 42]), 75));
|
||||||
// wrong data for proof
|
// wrong data for proof
|
||||||
|
|
|
@ -25,16 +25,17 @@ use core::hash::{Hash, Hashed};
|
||||||
use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
|
use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
use std::{cmp, error, fmt, mem};
|
use std::{cmp, error, fmt, mem};
|
||||||
use util::secp::Signature;
|
use util::secp::constants::{
|
||||||
use util::secp::constants::{AGG_SIGNATURE_SIZE, MAX_PROOF_SIZE, PEDERSEN_COMMITMENT_SIZE,
|
AGG_SIGNATURE_SIZE, MAX_PROOF_SIZE, PEDERSEN_COMMITMENT_SIZE, SECRET_KEY_SIZE,
|
||||||
SECRET_KEY_SIZE};
|
};
|
||||||
use util::secp::pedersen::{Commitment, RangeProof};
|
use util::secp::pedersen::{Commitment, RangeProof};
|
||||||
|
use util::secp::Signature;
|
||||||
|
|
||||||
/// Possible errors deriving from serializing or deserializing.
|
/// Possible errors deriving from serializing or deserializing.
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// Wraps an io error produced when reading or writing
|
/// Wraps an io error produced when reading or writing
|
||||||
IOErr(io::Error),
|
IOErr(String, io::ErrorKind),
|
||||||
/// Expected a given value that wasn't found
|
/// Expected a given value that wasn't found
|
||||||
UnexpectedData {
|
UnexpectedData {
|
||||||
/// What we wanted
|
/// What we wanted
|
||||||
|
@ -54,7 +55,7 @@ pub enum Error {
|
||||||
|
|
||||||
impl From<io::Error> for Error {
|
impl From<io::Error> for Error {
|
||||||
fn from(e: io::Error) -> Error {
|
fn from(e: io::Error) -> Error {
|
||||||
Error::IOErr(e)
|
Error::IOErr(format!("{}", e), e.kind())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,7 +68,7 @@ impl From<consensus::Error> for Error {
|
||||||
impl fmt::Display for Error {
|
impl fmt::Display for Error {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
Error::IOErr(ref e) => write!(f, "{}", e),
|
Error::IOErr(ref e, ref _k) => write!(f, "{}", e),
|
||||||
Error::UnexpectedData {
|
Error::UnexpectedData {
|
||||||
expected: ref e,
|
expected: ref e,
|
||||||
received: ref r,
|
received: ref r,
|
||||||
|
@ -83,14 +84,14 @@ impl fmt::Display for Error {
|
||||||
impl error::Error for Error {
|
impl error::Error for Error {
|
||||||
fn cause(&self) -> Option<&error::Error> {
|
fn cause(&self) -> Option<&error::Error> {
|
||||||
match *self {
|
match *self {
|
||||||
Error::IOErr(ref e) => Some(e),
|
Error::IOErr(ref _e, ref _k) => Some(self),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn description(&self) -> &str {
|
fn description(&self) -> &str {
|
||||||
match *self {
|
match *self {
|
||||||
Error::IOErr(ref e) => error::Error::description(e),
|
Error::IOErr(ref e, _) => e,
|
||||||
Error::UnexpectedData {
|
Error::UnexpectedData {
|
||||||
expected: _,
|
expected: _,
|
||||||
received: _,
|
received: _,
|
||||||
|
@ -265,26 +266,30 @@ struct BinReader<'a> {
|
||||||
source: &'a mut Read,
|
source: &'a mut Read,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn map_io_err(err: io::Error) -> Error {
|
||||||
|
Error::IOErr(format!("{}", err), err.kind())
|
||||||
|
}
|
||||||
|
|
||||||
/// Utility wrapper for an underlying byte Reader. Defines higher level methods
|
/// Utility wrapper for an underlying byte Reader. Defines higher level methods
|
||||||
/// to read numbers, byte vectors, hashes, etc.
|
/// to read numbers, byte vectors, hashes, etc.
|
||||||
impl<'a> Reader for BinReader<'a> {
|
impl<'a> Reader for BinReader<'a> {
|
||||||
fn read_u8(&mut self) -> Result<u8, Error> {
|
fn read_u8(&mut self) -> Result<u8, Error> {
|
||||||
self.source.read_u8().map_err(Error::IOErr)
|
self.source.read_u8().map_err(map_io_err)
|
||||||
}
|
}
|
||||||
fn read_u16(&mut self) -> Result<u16, Error> {
|
fn read_u16(&mut self) -> Result<u16, Error> {
|
||||||
self.source.read_u16::<BigEndian>().map_err(Error::IOErr)
|
self.source.read_u16::<BigEndian>().map_err(map_io_err)
|
||||||
}
|
}
|
||||||
fn read_u32(&mut self) -> Result<u32, Error> {
|
fn read_u32(&mut self) -> Result<u32, Error> {
|
||||||
self.source.read_u32::<BigEndian>().map_err(Error::IOErr)
|
self.source.read_u32::<BigEndian>().map_err(map_io_err)
|
||||||
}
|
}
|
||||||
fn read_i32(&mut self) -> Result<i32, Error> {
|
fn read_i32(&mut self) -> Result<i32, Error> {
|
||||||
self.source.read_i32::<BigEndian>().map_err(Error::IOErr)
|
self.source.read_i32::<BigEndian>().map_err(map_io_err)
|
||||||
}
|
}
|
||||||
fn read_u64(&mut self) -> Result<u64, Error> {
|
fn read_u64(&mut self) -> Result<u64, Error> {
|
||||||
self.source.read_u64::<BigEndian>().map_err(Error::IOErr)
|
self.source.read_u64::<BigEndian>().map_err(map_io_err)
|
||||||
}
|
}
|
||||||
fn read_i64(&mut self) -> Result<i64, Error> {
|
fn read_i64(&mut self) -> Result<i64, Error> {
|
||||||
self.source.read_i64::<BigEndian>().map_err(Error::IOErr)
|
self.source.read_i64::<BigEndian>().map_err(map_io_err)
|
||||||
}
|
}
|
||||||
/// Read a variable size vector from the underlying Read. Expects a usize
|
/// Read a variable size vector from the underlying Read. Expects a usize
|
||||||
fn read_vec(&mut self) -> Result<Vec<u8>, Error> {
|
fn read_vec(&mut self) -> Result<Vec<u8>, Error> {
|
||||||
|
@ -306,7 +311,7 @@ impl<'a> Reader for BinReader<'a> {
|
||||||
self.source
|
self.source
|
||||||
.read_exact(&mut buf)
|
.read_exact(&mut buf)
|
||||||
.map(move |_| buf)
|
.map(move |_| buf)
|
||||||
.map_err(Error::IOErr)
|
.map_err(map_io_err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn expect_u8(&mut self, val: u8) -> Result<u8, Error> {
|
fn expect_u8(&mut self, val: u8) -> Result<u8, Error> {
|
||||||
|
@ -459,7 +464,7 @@ where
|
||||||
let elem = T::read(reader);
|
let elem = T::read(reader);
|
||||||
match elem {
|
match elem {
|
||||||
Ok(e) => buf.push(e),
|
Ok(e) => buf.push(e),
|
||||||
Err(Error::IOErr(ref ioerr)) if ioerr.kind() == io::ErrorKind::UnexpectedEof => {
|
Err(Error::IOErr(ref _d, ref kind)) if *kind == io::ErrorKind::UnexpectedEof => {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
|
|
|
@ -118,7 +118,10 @@ impl PeerStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_peer(&self, peer_addr: SocketAddr) -> Result<PeerData, Error> {
|
pub fn get_peer(&self, peer_addr: SocketAddr) -> Result<PeerData, Error> {
|
||||||
option_to_not_found(self.db.get_ser(&peer_key(peer_addr)[..]))
|
option_to_not_found(
|
||||||
|
self.db.get_ser(&peer_key(peer_addr)[..]),
|
||||||
|
&format!("Peer at address: {}", peer_addr),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
|
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
|
||||||
|
|
|
@ -216,28 +216,32 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
added_hs.push(bh.hash());
|
added_hs.push(bh.hash());
|
||||||
}
|
}
|
||||||
Err(chain::Error::Unfit(s)) => {
|
|
||||||
info!(
|
|
||||||
LOGGER,
|
|
||||||
"Received unfit block header {} at {}: {}.",
|
|
||||||
bh.hash(),
|
|
||||||
bh.height,
|
|
||||||
s
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(chain::Error::StoreErr(e, explanation)) => {
|
|
||||||
error!(
|
|
||||||
LOGGER,
|
|
||||||
"Store error processing block header {}: in {} {:?}",
|
|
||||||
bh.hash(),
|
|
||||||
explanation,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
info!(LOGGER, "Invalid block header {}: {:?}.", bh.hash(), e);
|
match e.kind() {
|
||||||
// TODO penalize peer somehow
|
chain::ErrorKind::Unfit(s) => {
|
||||||
|
info!(
|
||||||
|
LOGGER,
|
||||||
|
"Received unfit block header {} at {}: {}.",
|
||||||
|
bh.hash(),
|
||||||
|
bh.height,
|
||||||
|
s
|
||||||
|
);
|
||||||
|
}
|
||||||
|
chain::ErrorKind::StoreErr(e, explanation) => {
|
||||||
|
error!(
|
||||||
|
LOGGER,
|
||||||
|
"Store error processing block header {}: in {} {:?}",
|
||||||
|
bh.hash(),
|
||||||
|
explanation,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
info!(LOGGER, "Invalid block header {}: {:?}.", bh.hash(), e);
|
||||||
|
// TODO penalize peer somehow
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -269,11 +273,13 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
let header = w(&self.chain).get_header_by_height(h);
|
let header = w(&self.chain).get_header_by_height(h);
|
||||||
match header {
|
match header {
|
||||||
Ok(head) => headers.push(head),
|
Ok(head) => headers.push(head),
|
||||||
Err(chain::Error::StoreErr(store::Error::NotFoundErr, _)) => break,
|
Err(e) => match e.kind() {
|
||||||
Err(e) => {
|
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => break,
|
||||||
error!(LOGGER, "Could not build header locator: {:?}", e);
|
_ => {
|
||||||
return vec![];
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
||||||
}
|
return vec![];
|
||||||
|
}
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -331,7 +337,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
||||||
if let Err(e) =
|
if let Err(e) =
|
||||||
w(&self.chain).txhashset_write(h, rewind_to_output, rewind_to_kernel, txhashset_data)
|
w(&self.chain).txhashset_write(h, rewind_to_output, rewind_to_kernel, txhashset_data)
|
||||||
{
|
{
|
||||||
error!(LOGGER, "Failed to save txhashset archive: {:?}", e);
|
error!(LOGGER, "Failed to save txhashset archive: {}", e);
|
||||||
!e.is_bad_data()
|
!e.is_bad_data()
|
||||||
} else {
|
} else {
|
||||||
info!(LOGGER, "Received valid txhashset data for {}.", h);
|
info!(LOGGER, "Received valid txhashset data for {}.", h);
|
||||||
|
@ -391,13 +397,15 @@ impl NetToChainAdapter {
|
||||||
self.find_common_header(locator[1..].to_vec())
|
self.find_common_header(locator[1..].to_vec())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(chain::Error::StoreErr(store::Error::NotFoundErr, _)) => {
|
Err(e) => match e.kind() {
|
||||||
self.find_common_header(locator[1..].to_vec())
|
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => {
|
||||||
}
|
self.find_common_header(locator[1..].to_vec())
|
||||||
Err(e) => {
|
}
|
||||||
error!(LOGGER, "Could not build header locator: {:?}", e);
|
_ => {
|
||||||
None
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
||||||
}
|
None
|
||||||
|
}
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -413,14 +421,6 @@ impl NetToChainAdapter {
|
||||||
self.check_compact(tip);
|
self.check_compact(tip);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
Err(chain::Error::Orphan) => {
|
|
||||||
// make sure we did not miss the parent block
|
|
||||||
if !chain.is_orphan(&prev_hash) && !self.currently_syncing.load(Ordering::Relaxed) {
|
|
||||||
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
|
|
||||||
self.request_block_by_hash(prev_hash, &addr)
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
Err(ref e) if e.is_bad_data() => {
|
Err(ref e) if e.is_bad_data() => {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
|
@ -435,11 +435,25 @@ impl NetToChainAdapter {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(
|
match e.kind() {
|
||||||
LOGGER,
|
chain::ErrorKind::Orphan => {
|
||||||
"adapter: process_block: block {} refused by chain: {:?}", bhash, e
|
// make sure we did not miss the parent block
|
||||||
);
|
if !chain.is_orphan(&prev_hash)
|
||||||
true
|
&& !self.currently_syncing.load(Ordering::Relaxed)
|
||||||
|
{
|
||||||
|
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
|
||||||
|
self.request_block_by_hash(prev_hash, &addr)
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
debug!(
|
||||||
|
LOGGER,
|
||||||
|
"adapter: process_block: block {} refused by chain: {:?}", bhash, e
|
||||||
|
);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -451,7 +465,8 @@ impl NetToChainAdapter {
|
||||||
// down as soon as possible.
|
// down as soon as possible.
|
||||||
// Skip this if we are currently syncing (too slow).
|
// Skip this if we are currently syncing (too slow).
|
||||||
let chain = w(&self.chain);
|
let chain = w(&self.chain);
|
||||||
if chain.head().unwrap().height > 0 && !self.currently_syncing.load(Ordering::Relaxed)
|
if chain.head().unwrap().height > 0
|
||||||
|
&& !self.currently_syncing.load(Ordering::Relaxed)
|
||||||
&& self.config.chain_validation_mode == ChainValidationMode::EveryBlock
|
&& self.config.chain_validation_mode == ChainValidationMode::EveryBlock
|
||||||
{
|
{
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
|
@ -92,12 +92,17 @@ pub fn get_block(
|
||||||
);
|
);
|
||||||
while let Err(e) = result {
|
while let Err(e) = result {
|
||||||
match e {
|
match e {
|
||||||
self::Error::Chain(chain::Error::DuplicateCommitment(_)) => {
|
self::Error::Chain(c) => match c.kind() {
|
||||||
debug!(
|
chain::ErrorKind::DuplicateCommitment(_) => {
|
||||||
LOGGER,
|
debug!(
|
||||||
"Duplicate commit for potential coinbase detected. Trying next derivation."
|
LOGGER,
|
||||||
);
|
"Duplicate commit for potential coinbase detected. Trying next derivation."
|
||||||
}
|
);
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
error!(LOGGER, "Chain Error: {}", c);
|
||||||
|
}
|
||||||
|
},
|
||||||
self::Error::Wallet(_) => {
|
self::Error::Wallet(_) => {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
|
@ -187,17 +192,23 @@ fn build_block(
|
||||||
// If it's a duplicate commitment, it's likely trying to use
|
// If it's a duplicate commitment, it's likely trying to use
|
||||||
// a key that's already been derived but not in the wallet
|
// a key that's already been derived but not in the wallet
|
||||||
// for some reason, allow caller to retry
|
// for some reason, allow caller to retry
|
||||||
Err(chain::Error::DuplicateCommitment(e)) => {
|
|
||||||
Err(Error::Chain(chain::Error::DuplicateCommitment(e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
//Some other issue, possibly duplicate kernel
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(
|
match e.kind() {
|
||||||
LOGGER,
|
chain::ErrorKind::DuplicateCommitment(e) => Err(Error::Chain(
|
||||||
"Error setting txhashset root to build a block: {:?}", e
|
chain::ErrorKind::DuplicateCommitment(e).into(),
|
||||||
);
|
)),
|
||||||
Err(Error::Chain(chain::Error::Other(format!("{:?}", e))))
|
|
||||||
|
//Some other issue, possibly duplicate kernel
|
||||||
|
_ => {
|
||||||
|
error!(
|
||||||
|
LOGGER,
|
||||||
|
"Error setting txhashset root to build a block: {:?}", e
|
||||||
|
);
|
||||||
|
Err(Error::Chain(
|
||||||
|
chain::ErrorKind::Other(format!("{:?}", e)).into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,8 @@ byteorder = "1"
|
||||||
croaring = "0.3"
|
croaring = "0.3"
|
||||||
env_logger = "0.5"
|
env_logger = "0.5"
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
|
failure = "0.1"
|
||||||
|
failure_derive = "0.1"
|
||||||
lmdb-zero = "0.4.4"
|
lmdb-zero = "0.4.4"
|
||||||
memmap = { git = "https://github.com/danburkert/memmap-rs", tag = "0.6.2" }
|
memmap = { git = "https://github.com/danburkert/memmap-rs", tag = "0.6.2" }
|
||||||
serde = "1"
|
serde = "1"
|
||||||
|
|
|
@ -21,9 +21,9 @@ use std::path::Path;
|
||||||
|
|
||||||
use croaring::Bitmap;
|
use croaring::Bitmap;
|
||||||
|
|
||||||
use core::core::BlockHeader;
|
|
||||||
use core::core::hash::Hashed;
|
use core::core::hash::Hashed;
|
||||||
use core::core::pmmr;
|
use core::core::pmmr;
|
||||||
|
use core::core::BlockHeader;
|
||||||
use prune_list::PruneList;
|
use prune_list::PruneList;
|
||||||
use util::LOGGER;
|
use util::LOGGER;
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,9 @@ extern crate memmap;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate slog;
|
||||||
|
extern crate failure;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate failure_derive;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate grin_core as core;
|
extern crate grin_core as core;
|
||||||
|
|
|
@ -19,21 +19,24 @@ use std::marker;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use lmdb_zero as lmdb;
|
use lmdb_zero as lmdb;
|
||||||
use lmdb_zero::LmdbResultExt;
|
|
||||||
use lmdb_zero::traits::CreateCursor;
|
use lmdb_zero::traits::CreateCursor;
|
||||||
|
use lmdb_zero::LmdbResultExt;
|
||||||
|
|
||||||
use core::ser;
|
use core::ser;
|
||||||
|
|
||||||
/// Main error type for this lmdb
|
/// Main error type for this lmdb
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Eq, PartialEq, Debug, Fail)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// Couldn't find what we were looking for
|
/// Couldn't find what we were looking for
|
||||||
NotFoundErr,
|
#[fail(display = "DB Not Found Error: {}", _0)]
|
||||||
|
NotFoundErr(String),
|
||||||
/// Wraps an error originating from RocksDB (which unfortunately returns
|
/// Wraps an error originating from RocksDB (which unfortunately returns
|
||||||
/// string errors).
|
/// string errors).
|
||||||
|
#[fail(display = "LMDB error")]
|
||||||
LmdbErr(lmdb::error::Error),
|
LmdbErr(lmdb::error::Error),
|
||||||
/// Wraps a serialization error for Writeable or Readable
|
/// Wraps a serialization error for Writeable or Readable
|
||||||
SerErr(ser::Error),
|
#[fail(display = "Serialization Error")]
|
||||||
|
SerErr(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<lmdb::error::Error> for Error {
|
impl From<lmdb::error::Error> for Error {
|
||||||
|
@ -43,9 +46,9 @@ impl From<lmdb::error::Error> for Error {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// unwraps the inner option by converting the none case to a not found error
|
/// unwraps the inner option by converting the none case to a not found error
|
||||||
pub fn option_to_not_found<T>(res: Result<Option<T>, Error>) -> Result<T, Error> {
|
pub fn option_to_not_found<T>(res: Result<Option<T>, Error>, field_name: &str) -> Result<T, Error> {
|
||||||
match res {
|
match res {
|
||||||
Ok(None) => Err(Error::NotFoundErr),
|
Ok(None) => Err(Error::NotFoundErr(field_name.to_owned())),
|
||||||
Ok(Some(o)) => Ok(o),
|
Ok(Some(o)) => Ok(o),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
}
|
}
|
||||||
|
@ -117,9 +120,9 @@ impl Store {
|
||||||
) -> Result<Option<T>, Error> {
|
) -> Result<Option<T>, Error> {
|
||||||
let res: lmdb::error::Result<&[u8]> = access.get(&self.db, key);
|
let res: lmdb::error::Result<&[u8]> = access.get(&self.db, key);
|
||||||
match res.to_opt() {
|
match res.to_opt() {
|
||||||
Ok(Some(mut res)) => match ser::deserialize(&mut res).map_err(Error::SerErr) {
|
Ok(Some(mut res)) => match ser::deserialize(&mut res) {
|
||||||
Ok(res) => Ok(Some(res)),
|
Ok(res) => Ok(Some(res)),
|
||||||
Err(e) => Err(From::from(e)),
|
Err(e) => Err(Error::SerErr(format!("{}", e))),
|
||||||
},
|
},
|
||||||
Ok(None) => Ok(None),
|
Ok(None) => Ok(None),
|
||||||
Err(e) => Err(From::from(e)),
|
Err(e) => Err(From::from(e)),
|
||||||
|
@ -179,7 +182,7 @@ impl<'a> Batch<'a> {
|
||||||
let ser_value = ser::ser_vec(value);
|
let ser_value = ser::ser_vec(value);
|
||||||
match ser_value {
|
match ser_value {
|
||||||
Ok(data) => self.put(key, data),
|
Ok(data) => self.put(key, data),
|
||||||
Err(err) => Err(Error::SerErr(err)),
|
Err(err) => Err(Error::SerErr(format!("{}", err))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -96,7 +96,7 @@ where
|
||||||
|
|
||||||
fn get(&self, id: &Identifier) -> Result<OutputData, Error> {
|
fn get(&self, id: &Identifier) -> Result<OutputData, Error> {
|
||||||
let key = to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec());
|
let key = to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec());
|
||||||
option_to_not_found(self.db.get_ser(&key)).map_err(|e| e.into())
|
option_to_not_found(self.db.get_ser(&key), &format!("Key Id: {}", id)).map_err(|e| e.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter<'a>(&'a self) -> Box<Iterator<Item = OutputData> + 'a> {
|
fn iter<'a>(&'a self) -> Box<Iterator<Item = OutputData> + 'a> {
|
||||||
|
@ -166,7 +166,10 @@ impl<'a, K> WalletOutputBatch for Batch<'a, K> {
|
||||||
|
|
||||||
fn get(&self, id: &Identifier) -> Result<OutputData, Error> {
|
fn get(&self, id: &Identifier) -> Result<OutputData, Error> {
|
||||||
let key = to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec());
|
let key = to_key(OUTPUT_PREFIX, &mut id.to_bytes().to_vec());
|
||||||
option_to_not_found(self.db.borrow().as_ref().unwrap().get_ser(&key)).map_err(|e| e.into())
|
option_to_not_found(
|
||||||
|
self.db.borrow().as_ref().unwrap().get_ser(&key),
|
||||||
|
&format!("Key ID: {}", id),
|
||||||
|
).map_err(|e| e.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter<'b>(&'b self) -> Box<Iterator<Item = OutputData> + 'b> {
|
fn iter<'b>(&'b self) -> Box<Iterator<Item = OutputData> + 'b> {
|
||||||
|
|
Loading…
Reference in a new issue