Reintroduce block sums, verify full kernel sums per block (#1559)

* block_sums and full kernel sum verification

* rustfmt

* add docs/comments

* docs

* rustfmt

* comment on fact total_kernel_sum is redundant now

* make sure we setup block_sums correctly on a fork

* rustfmt

* replace those asserts with errors

* rustfmt
This commit is contained in:
Antioch Peverell 2018-09-20 09:19:32 +01:00 committed by GitHub
parent e1c8dc5a3a
commit f042f67fcd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 247 additions and 24 deletions

View file

@ -26,7 +26,7 @@ use lmdb;
use core::core::hash::{Hash, Hashed};
use core::core::merkle_proof::MerkleProof;
use core::core::verifier_cache::VerifierCache;
use core::core::{Block, BlockHeader, Output, OutputIdentifier, Transaction, TxKernel};
use core::core::{Block, BlockHeader, BlockSums, Output, OutputIdentifier, Transaction, TxKernel};
use core::global;
use core::pow::Difficulty;
use error::{Error, ErrorKind};
@ -616,16 +616,15 @@ impl Chain {
let mut txhashset =
txhashset::TxHashSet::open(self.db_root.clone(), self.store.clone(), Some(&header))?;
// Validate against a read-only extension first.
// The kernel history validation requires a read-only extension
// Validate kernel history against a readonly extension first.
// Kernel history validation requires a readonly extension
// due to the internal rewind behavior.
debug!(
LOGGER,
"chain: txhashset_write: rewinding and validating (read-only)"
"chain: txhashset_write: rewinding and validating kernel history (readonly)"
);
txhashset::extending_readonly(&mut txhashset, |extension| {
extension.rewind(&header)?;
extension.validate(&header, false, status)?;
// Now validate kernel sums at each historical header height
// so we know we can trust the kernel history.
@ -642,6 +641,26 @@ impl Chain {
let mut batch = self.store.batch()?;
txhashset::extending(&mut txhashset, &mut batch, |extension| {
extension.rewind(&header)?;
// Validate the extension, generating the utxo_sum and kernel_sum.
let (utxo_sum, kernel_sum) = extension.validate(&header, false, status)?;
// Now that we have block_sums the total_kernel_sum on the block_header is redundant.
if header.total_kernel_sum != kernel_sum {
return Err(
ErrorKind::Other(format!("total_kernel_sum in header does not match")).into(),
);
}
// Save the block_sums (utxo_sum, kernel_sum) to the db for use later.
extension.batch.save_block_sums(
&header.hash(),
&BlockSums {
utxo_sum,
kernel_sum,
},
)?;
extension.rebuild_index()?;
Ok(())
})?;
@ -739,6 +758,7 @@ impl Chain {
count += 1;
batch.delete_block(&b.hash())?;
batch.delete_block_input_bitmap(&b.hash())?;
batch.delete_block_sums(&b.hash())?;
}
Err(NotFoundErr(_)) => {
break;
@ -963,6 +983,33 @@ fn setup_head(
let res = txhashset::extending(txhashset, &mut batch, |extension| {
extension.rewind(&header)?;
extension.validate_roots(&header)?;
// now check we have the "block sums" for the block in question
// if we have no sums (migrating an existing node) we need to go
// back to the txhashset and sum the outputs and kernels
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
{
debug!(
LOGGER,
"chain: init: building (missing) block sums for {} @ {}",
header.height,
header.hash()
);
// Do a full (and slow) validation of the txhashset extension
// to calculate the utxo_sum and kernel_sum at this block height.
let (utxo_sum, kernel_sum) = extension.validate_kernel_sums(&header)?;
// Save the block_sums to the db for use later.
extension.batch.save_block_sums(
&header.hash(),
&BlockSums {
utxo_sum,
kernel_sum,
},
)?;
}
debug!(
LOGGER,
"chain: init: rewinding and validating before we start... {} at {}",
@ -992,6 +1039,12 @@ fn setup_head(
batch.setup_height(&genesis.header, &tip)?;
txhashset::extending(txhashset, &mut batch, |extension| {
extension.apply_block(&genesis)?;
// Save the block_sums to the db for use later.
extension
.batch
.save_block_sums(&genesis.hash(), &BlockSums::default())?;
Ok(())
})?;

View file

@ -24,7 +24,8 @@ use chain::OrphanBlockPool;
use core::consensus;
use core::core::hash::{Hash, Hashed};
use core::core::verifier_cache::VerifierCache;
use core::core::{Block, BlockHeader};
use core::core::Committed;
use core::core::{Block, BlockHeader, BlockSums};
use core::global;
use core::pow::Difficulty;
use error::{Error, ErrorKind};
@ -132,9 +133,8 @@ pub fn process_block(
check_prev_store(&b.header, ctx)?;
}
// Validate the block itself.
// Taking advantage of the verifier_cache for
// rangeproofs and kernel signatures.
// Validate the block itself, make sure it is internally consistent.
// Use the verifier_cache for verifying rangeproofs and kernel signatures.
validate_block(b, ctx, verifier_cache)?;
// Begin a new batch as we may begin modifying the db at this point.
@ -162,6 +162,12 @@ pub fn process_block(
// to applying the new block.
verify_coinbase_maturity(b, &mut extension)?;
// Using block_sums (utxo_sum, kernel_sum) for the previous block from the db
// we can verify_kernel_sums across the full UTXO sum and full kernel sum
// accounting for inputs/outputs/kernels in this new block.
// We know there are no double-spends etc. if this verifies successfully.
verify_block_sums(b, &mut extension)?;
// Apply the block to the txhashset state.
// Validate the txhashset roots and sizes against the block header.
// Block is invalid if there are any discrepencies.
@ -495,8 +501,7 @@ fn validate_block(
&prev.total_kernel_offset,
&prev.total_kernel_sum,
verifier_cache,
)
.map_err(|e| ErrorKind::InvalidBlockProof(e))?;
).map_err(|e| ErrorKind::InvalidBlockProof(e))?;
Ok(())
}
@ -508,6 +513,48 @@ fn verify_coinbase_maturity(block: &Block, ext: &mut txhashset::Extension) -> Re
Ok(())
}
/// Some "real magick" verification logic.
/// The (BlockSums, Block) tuple implements Committed...
/// This allows us to verify kernel sums across the full utxo and kernel sets
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
/// of the new block.
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
// Retrieve the block_sums for the previous block.
let block_sums = ext.batch.get_block_sums(&b.header.previous)?;
{
// Now that we have block_sums the total_kernel_sum on the block_header is redundant.
let prev = ext.batch.get_block_header(&b.header.previous)?;
if prev.total_kernel_sum != block_sums.kernel_sum {
return Err(
ErrorKind::Other(format!("total_kernel_sum in header does not match")).into(),
);
}
}
// Overage is based purely on the new block.
// Previous block_sums have taken all previous overage into account.
let overage = b.header.overage();
// Offset on the other hand is the total kernel offset from the new block.
let offset = b.header.total_kernel_offset();
// Verify the kernel sums for the block_sums with the new block applied.
let (utxo_sum, kernel_sum) =
(block_sums, b as &Committed).verify_kernel_sums(overage, offset)?;
// Save the new block_sums for the new block to the db via the batch.
ext.batch.save_block_sums(
&b.header.hash(),
&BlockSums {
utxo_sum,
kernel_sum,
},
)?;
Ok(())
}
/// Fully validate the block by applying it to the txhashset extension.
/// Check both the txhashset roots and sizes are correct after applying the block.
fn apply_block_to_txhashset(block: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
@ -661,7 +708,13 @@ pub fn rewind_and_apply_fork(
let fb = store
.get_block(&h)
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked blocks")))?;
ext.apply_block(&fb)?;
// Re-verify coinbase maturity along this fork.
verify_coinbase_maturity(&fb, ext)?;
// Re-verify block_sums to set the block_sums up on this fork correctly.
verify_block_sums(&fb, ext)?;
// Re-apply the blocks.
apply_block_to_txhashset(&fb, ext)?;
}
Ok(())
}

View file

@ -24,7 +24,7 @@ use util::secp::pedersen::Commitment;
use core::consensus::TargetError;
use core::core::hash::{Hash, Hashed};
use core::core::{Block, BlockHeader};
use core::core::{Block, BlockHeader, BlockSums};
use core::pow::Difficulty;
use grin_store as store;
use grin_store::{option_to_not_found, to_key, u64_to_key, Error};
@ -40,6 +40,7 @@ const SYNC_HEAD_PREFIX: u8 = 's' as u8;
const HEADER_HEIGHT_PREFIX: u8 = '8' as u8;
const COMMIT_POS_PREFIX: u8 = 'c' as u8;
const BLOCK_INPUT_BITMAP_PREFIX: u8 = 'B' as u8;
const BLOCK_SUMS_PREFIX: u8 = 'M' as u8;
/// All chain-related database operations
pub struct ChainStore {
@ -276,7 +277,7 @@ impl<'a> Batch<'a> {
.delete(&to_key(COMMIT_POS_PREFIX, &mut commit.to_vec()))
}
pub fn get_block_header_db(&self, h: &Hash) -> Result<BlockHeader, Error> {
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
option_to_not_found(
self.db
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
@ -296,6 +297,23 @@ impl<'a> Batch<'a> {
.delete(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec()))
}
pub fn save_block_sums(&self, bh: &Hash, sums: &BlockSums) -> Result<(), Error> {
self.db
.put_ser(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec())[..], &sums)
}
pub fn get_block_sums(&self, bh: &Hash) -> Result<BlockSums, Error> {
option_to_not_found(
self.db
.get_ser(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec())),
&format!("Block sums for block: {}", bh),
)
}
pub fn delete_block_sums(&self, bh: &Hash) -> Result<(), Error> {
self.db.delete(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec()))
}
/// Maintain consistency of the "header_by_height" index by traversing back
/// through the current chain and updating "header_by_height" until we reach
/// a block_header that is consistent with its height (everything prior to

View file

@ -482,9 +482,6 @@ impl<'a> Extension<'a> {
match self.apply_output(output) {
Ok(pos) => {
self.batch.save_output_pos(&output.commitment(), pos)?;
// We will rollback the batch later, but assert it works
// as we expect while we have it open.
assert_eq!(self.batch.get_output_pos(&output.commitment()), Ok(pos));
}
Err(e) => {
self.rewind_raw_tx(output_pos, kernel_pos, &rewind_rm_pos)?;
@ -675,11 +672,17 @@ impl<'a> Extension<'a> {
// The output and rproof MMRs should be exactly the same size
// and we should have inserted to both in exactly the same pos.
assert_eq!(
self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size()
{
if self.output_pmmr.unpruned_size() != self.rproof_pmmr.unpruned_size() {
return Err(
ErrorKind::Other(format!("output vs rproof MMRs different sizes")).into(),
);
assert_eq!(output_pos, rproof_pos);
}
if output_pos != rproof_pos {
return Err(ErrorKind::Other(format!("output vs rproof MMRs different pos")).into());
}
}
Ok(output_pos)
}
@ -868,6 +871,19 @@ impl<'a> Extension<'a> {
Ok(())
}
/// Validate full kernel sums against the provided header (for overage and kernel_offset).
/// This is an expensive operation as we need to retrieve all the UTXOs and kernels
/// from the respective MMRs.
/// For a significantly faster way of validating full kernel sums see BlockSums.
pub fn validate_kernel_sums(
&self,
header: &BlockHeader,
) -> Result<((Commitment, Commitment)), Error> {
let (utxo_sum, kernel_sum) =
self.verify_kernel_sums(header.total_overage(), header.total_kernel_offset())?;
Ok((utxo_sum, kernel_sum))
}
/// Validate the txhashset state against the provided block header.
pub fn validate(
&mut self,
@ -886,8 +902,7 @@ impl<'a> Extension<'a> {
// The real magicking happens here. Sum of kernel excesses should equal
// sum of unspent outputs minus total supply.
let (output_sum, kernel_sum) =
self.verify_kernel_sums(header.total_overage(), header.total_kernel_offset())?;
let (output_sum, kernel_sum) = self.validate_kernel_sums(header)?;
// This is an expensive verification step.
self.verify_kernel_signatures(status)?;

View file

@ -0,0 +1,82 @@
// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! BlockSums per-block running totals for utxo_sum and kernel_sum.
//! Allows fast "full" verification of kernel sums at a given block height.
use core::committed::Committed;
use ser::{self, Readable, Reader, Writeable, Writer};
use util::secp::pedersen::Commitment;
use util::secp_static;
/// The output_sum and kernel_sum for a given block.
/// This is used to validate the next block being processed by applying
/// the inputs, outputs, kernels and kernel_offset from the new block
/// and checking everything sums correctly.
#[derive(Debug, Clone)]
pub struct BlockSums {
/// The sum of the unspent outputs.
pub utxo_sum: Commitment,
/// The sum of all kernels.
pub kernel_sum: Commitment,
}
impl Writeable for BlockSums {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_fixed_bytes(&self.utxo_sum)?;
writer.write_fixed_bytes(&self.kernel_sum)?;
Ok(())
}
}
impl Readable for BlockSums {
fn read(reader: &mut Reader) -> Result<BlockSums, ser::Error> {
Ok(BlockSums {
utxo_sum: Commitment::read(reader)?,
kernel_sum: Commitment::read(reader)?,
})
}
}
impl Default for BlockSums {
fn default() -> BlockSums {
let zero_commit = secp_static::commit_to_zero_value();
BlockSums {
utxo_sum: zero_commit.clone(),
kernel_sum: zero_commit.clone(),
}
}
}
/// WAT?
/// It's a tuple but we can verify the "full" kernel sums on it.
/// This means we can take a previous block_sums, apply a new block to it
/// and verify the full kernel sums (full UTXO and kernel sets).
impl<'a> Committed for (BlockSums, &'a Committed) {
fn inputs_committed(&self) -> Vec<Commitment> {
self.1.inputs_committed()
}
fn outputs_committed(&self) -> Vec<Commitment> {
let mut outputs = vec![self.0.utxo_sum];
outputs.extend(&self.1.outputs_committed());
outputs
}
fn kernels_committed(&self) -> Vec<Commitment> {
let mut kernels = vec![self.0.kernel_sum];
kernels.extend(&self.1.kernels_committed());
kernels
}
}

View file

@ -15,6 +15,7 @@
//! Core types
pub mod block;
pub mod block_sums;
pub mod committed;
pub mod compact_block;
pub mod compact_transaction;
@ -30,6 +31,7 @@ use consensus::GRIN_BASE;
use util::secp::pedersen::Commitment;
pub use self::block::*;
pub use self::block_sums::*;
pub use self::committed::Committed;
pub use self::compact_block::*;
pub use self::compact_transaction::*;