[testnet2] Store output sum (#1043)

* block sums and reworked block validation
read and write block_sums
refactor validate on both block and txhashset
write block_sum on fast sync
we store the kernel_sum (need to account for the offset)

* block_sums

* rustfmt

* cleanup
This commit is contained in:
Antioch Peverell 2018-05-07 09:21:41 -04:00 committed by GitHub
parent b42b2a4f77
commit 4dd94ff39e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 426 additions and 141 deletions

View file

@ -187,9 +187,32 @@ impl Chain {
);
extension.rewind(&header)?;
extension.validate_roots(&header)?;
// now check we have the "block sums" for the block in question
// if we have no sums (migrating an existing node) we need to go
// back to the txhashset and sum the outputs and kernels
if header.height > 0 && store.get_block_sums(&header.hash()).is_err() {
debug!(
LOGGER,
"chain: init: building (missing) block sums for {} @ {}",
header.height,
header.hash()
);
let (output_sum, kernel_sum) = extension.validate_sums(&header)?;
store.save_block_sums(
&header.hash(),
&BlockSums {
output_sum,
kernel_sum,
},
)?;
}
Ok(())
});
if res.is_ok() {
break;
} else {
@ -453,7 +476,8 @@ impl Chain {
// consistent.
txhashset::extending_readonly(&mut txhashset, |extension| {
extension.rewind(&header)?;
extension.validate(&header, skip_rproofs)
extension.validate(&header, skip_rproofs)?;
Ok(())
})
}
@ -574,7 +598,8 @@ impl Chain {
// Note: we are validating against a writeable extension.
txhashset::extending(&mut txhashset, |extension| {
extension.rewind(&header)?;
extension.validate(&header, false)?;
let (output_sum, kernel_sum) = extension.validate(&header, false)?;
extension.save_latest_block_sums(&header, output_sum, kernel_sum)?;
extension.rebuild_index()?;
Ok(())
})?;
@ -647,6 +672,7 @@ impl Chain {
Ok(b) => {
self.store.delete_block(&b.hash())?;
self.store.delete_block_marker(&b.hash())?;
self.store.delete_block_sums(&b.hash())?;
}
Err(NotFoundErr) => {
break;
@ -764,6 +790,13 @@ impl Chain {
.map_err(|e| Error::StoreErr(e, "chain get block marker".to_owned()))
}
/// Get the blocks sums for the specified block hash.
pub fn get_block_sums(&self, bh: &Hash) -> Result<BlockSums, Error> {
self.store
.get_block_sums(bh)
.map_err(|e| Error::StoreErr(e, "chain get block sums".to_owned()))
}
/// Gets the block header at the provided height
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
self.store

View file

@ -64,7 +64,7 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
validate_header(&b.header, &mut ctx)?;
// valid header, now check we actually have the previous block in the store
// now check we actually have the previous block in the store
// not just the header but the block itself
// short circuit the test first both for performance (in-mem vs db access)
// but also for the specific case of the first fast sync full block
@ -83,8 +83,12 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
}
}
// valid header and we have a previous block, time to take the lock on the sum
// trees
// validate the block itself
// we can do this now before interact with the txhashset
validate_block(b, &mut ctx)?;
// header and block both valid, and we have a previous block
// so take the lock on the txhashset
let local_txhashset = ctx.txhashset.clone();
let mut txhashset = local_txhashset.write().unwrap();
@ -96,7 +100,7 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
// start a chain extension unit of work dependent on the success of the
// internal validation and saving operations
let result = txhashset::extending(&mut txhashset, |mut extension| {
validate_block(b, &mut ctx, &mut extension)?;
validate_block_via_txhashset(b, &mut ctx, &mut extension)?;
trace!(
LOGGER,
"pipe: process_block: {} at {} is valid, save and append.",
@ -295,18 +299,38 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
Ok(())
}
/// Fully validate the block content.
fn validate_block(
fn validate_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
// If this is the first block then we have no previous block sums stored.
let block_sums = if b.header.height == 1 {
BlockSums::default()
} else {
ctx.store.get_block_sums(&b.header.previous)?
};
let (new_output_sum, new_kernel_sum) =
b.validate(&block_sums.output_sum, &block_sums.kernel_sum)
.map_err(&Error::InvalidBlockProof)?;
ctx.store.save_block_sums(
&b.hash(),
&BlockSums {
output_sum: new_output_sum,
kernel_sum: new_kernel_sum,
},
)?;
Ok(())
}
/// Fully validate the block by applying it to the txhashset extension
/// and checking the roots.
/// Rewind and reapply forked blocks if necessary to put the txhashset extension
/// in the correct state to accept the block.
fn validate_block_via_txhashset(
b: &Block,
ctx: &mut BlockContext,
ext: &mut txhashset::Extension,
) -> Result<(), Error> {
let prev_header = ctx.store.get_block_header(&b.header.previous)?;
// main isolated block validation
// checks all commitment sums and sigs
b.validate(&prev_header).map_err(&Error::InvalidBlockProof)?;
if b.header.previous != ctx.head.last_block_h {
rewind_and_apply_fork(b, ctx.store.clone(), ext)?;
}
@ -322,17 +346,21 @@ fn validate_block(
debug!(
LOGGER,
"validate_block: output roots - {:?}, {:?}", roots.output_root, b.header.output_root,
"validate_block_via_txhashset: output roots - {:?}, {:?}",
roots.output_root,
b.header.output_root,
);
debug!(
LOGGER,
"validate_block: rproof roots - {:?}, {:?}",
"validate_block_via_txhashset: rproof roots - {:?}, {:?}",
roots.rproof_root,
b.header.range_proof_root,
);
debug!(
LOGGER,
"validate_block: kernel roots - {:?}, {:?}", roots.kernel_root, b.header.kernel_root,
"validate_block_via_txhashset: kernel roots - {:?}, {:?}",
roots.kernel_root,
b.header.kernel_root,
);
return Err(Error::InvalidRoot);

View file

@ -37,6 +37,7 @@ const SYNC_HEAD_PREFIX: u8 = 's' as u8;
const HEADER_HEIGHT_PREFIX: u8 = '8' as u8;
const COMMIT_POS_PREFIX: u8 = 'c' as u8;
const BLOCK_MARKER_PREFIX: u8 = 'm' as u8;
const BLOCK_SUMS_PREFIX: u8 = 'M' as u8;
/// An implementation of the ChainStore trait backed by a simple key-value
/// store.
@ -238,6 +239,22 @@ impl ChainStore for ChainKVStore {
.delete(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec()))
}
fn save_block_sums(&self, bh: &Hash, marker: &BlockSums) -> Result<(), Error> {
self.db
.put_ser(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec())[..], &marker)
}
fn get_block_sums(&self, bh: &Hash) -> Result<BlockSums, Error> {
option_to_not_found(
self.db
.get_ser(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec())),
)
}
fn delete_block_sums(&self, bh: &Hash) -> Result<(), Error> {
self.db.delete(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec()))
}
/// Maintain consistency of the "header_by_height" index by traversing back
/// through the current chain and updating "header_by_height" until we reach
/// a block_header that is consistent with its height (everything prior to

View file

@ -35,9 +35,8 @@ use core::ser::{PMMRIndexHashable, PMMRable};
use grin_store;
use grin_store::pmmr::PMMRBackend;
use grin_store::types::prune_noop;
use keychain::BlindingFactor;
use types::{BlockMarker, ChainStore, Error, TxHashSetRoots};
use util::{zip, LOGGER};
use types::{BlockMarker, BlockSums, ChainStore, Error, TxHashSetRoots};
use util::{secp_static, zip, LOGGER};
const TXHASHSET_SUBDIR: &'static str = "txhashset";
const OUTPUT_SUBDIR: &'static str = "output";
@ -607,8 +606,9 @@ impl<'a> Extension<'a> {
Ok(())
}
/// Validate the txhashset state against the provided block header.
pub fn validate(&mut self, header: &BlockHeader, skip_rproofs: bool) -> Result<(), Error> {
fn validate_mmrs(&self) -> Result<(), Error> {
let now = Instant::now();
// validate all hashes and sums within the trees
if let Err(e) = self.output_pmmr.validate() {
return Err(Error::InvalidTxHashSet(e));
@ -620,40 +620,106 @@ impl<'a> Extension<'a> {
return Err(Error::InvalidTxHashSet(e));
}
debug!(
LOGGER,
"txhashset: validated the output|rproof|kernel mmrs, took {}s",
now.elapsed().as_secs(),
);
Ok(())
}
/// The real magicking: the sum of all kernel excess should equal the sum
/// of all output commitments, minus the total supply.
pub fn validate_sums(&self, header: &BlockHeader) -> Result<((Commitment, Commitment)), Error> {
let now = Instant::now();
// supply is the sum of the coinbase outputs from each block header
let supply_commit = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let supply = header.height * REWARD;
secp.commit_value(supply)?
};
let output_sum = self.sum_outputs(supply_commit)?;
let kernel_sum = self.sum_kernels()?;
let zero_commit = secp_static::commit_to_zero_value();
let offset = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let key = header.total_kernel_offset.secret_key(&secp)?;
secp.commit(0, key)?
};
let mut excesses = vec![kernel_sum, offset];
excesses.retain(|x| *x != zero_commit);
let kernel_sum_plus_offset = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
secp.commit_sum(excesses, vec![])?
};
if output_sum != kernel_sum_plus_offset {
return Err(Error::InvalidTxHashSet(
"Differing Output commitment and kernel excess sums.".to_owned(),
));
}
debug!(
LOGGER,
"txhashset: validated sums, took (total) {}s",
now.elapsed().as_secs(),
);
Ok((output_sum, kernel_sum))
}
/// Validate the txhashset state against the provided block header.
pub fn validate(
&mut self,
header: &BlockHeader,
skip_rproofs: bool,
) -> Result<((Commitment, Commitment)), Error> {
self.validate_mmrs()?;
self.validate_roots(header)?;
if header.height == 0 {
return Ok(());
let zero_commit = secp_static::commit_to_zero_value();
return Ok((zero_commit.clone(), zero_commit.clone()));
}
// the real magicking: the sum of all kernel excess should equal the sum
// of all Output commitments, minus the total supply
let kernel_offset = self.sum_kernel_offsets(&header)?;
let kernel_sum = self.sum_kernels(kernel_offset)?;
let output_sum = self.sum_outputs()?;
let (output_sum, kernel_sum) = self.validate_sums(header)?;
// supply is the sum of the coinbase outputs from all the block headers
let supply = header.height * REWARD;
// this is a relatively expensive verification step
self.verify_kernel_signatures()?;
{
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let over_commit = secp.commit_value(supply)?;
let adjusted_sum_output = secp.commit_sum(vec![output_sum], vec![over_commit])?;
if adjusted_sum_output != kernel_sum {
return Err(Error::InvalidTxHashSet(
"Differing Output commitment and kernel excess sums.".to_owned(),
));
}
}
// now verify the rangeproof for each output in the sum above
// verify the rangeproof for each output in the sum above
// this is an expensive operation (only verified if requested)
if !skip_rproofs {
self.verify_rangeproofs()?;
}
Ok((output_sum, kernel_sum))
}
/// Save blocks sums (the output_sum and kernel_sum) for the given block
/// header.
pub fn save_latest_block_sums(
&self,
header: &BlockHeader,
output_sum: Commitment,
kernel_sum: Commitment,
) -> Result<(), Error> {
self.commit_index.save_block_sums(
&header.hash(),
&BlockSums {
output_sum,
kernel_sum,
},
)?;
Ok(())
}
@ -709,53 +775,59 @@ impl<'a> Extension<'a> {
)
}
// We maintain the total accumulated kernel offset in each block header.
// So "summing" is just a case of taking the total kernel offset
// directly from the current block header.
fn sum_kernel_offsets(&self, header: &BlockHeader) -> Result<Option<Commitment>, Error> {
let offset = if header.total_kernel_offset == BlindingFactor::zero() {
None
} else {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let skey = header.total_kernel_offset.secret_key(&secp)?;
Some(secp.commit(0, skey)?)
};
Ok(offset)
}
/// Sums the excess of all our kernels, validating their signatures on the
/// way
fn sum_kernels(&self, kernel_offset: Option<Commitment>) -> Result<Commitment, Error> {
/// Sums the excess of all our kernels.
fn sum_kernels(&self) -> Result<Commitment, Error> {
let now = Instant::now();
let mut commitments = vec![];
if let Some(offset) = kernel_offset {
commitments.push(offset);
}
for n in 1..self.kernel_pmmr.unpruned_size() + 1 {
if pmmr::is_leaf(n) {
if let Some(kernel) = self.kernel_pmmr.get_data(n) {
kernel.verify()?;
commitments.push(kernel.excess);
}
}
}
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let kern_count = commitments.len();
let sum_kernel = secp.commit_sum(commitments, vec![])?;
let kernel_sum = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
secp.commit_sum(commitments, vec![])?
};
debug!(
LOGGER,
"Validated, summed (and offset) {} kernels, pmmr size {}, took {}s",
"txhashset: summed {} kernels, pmmr size {}, took {}s",
kern_count,
self.kernel_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(sum_kernel)
Ok(kernel_sum)
}
fn verify_kernel_signatures(&self) -> Result<(), Error> {
let now = Instant::now();
let mut kern_count = 0;
for n in 1..self.kernel_pmmr.unpruned_size() + 1 {
if pmmr::is_leaf(n) {
if let Some(kernel) = self.kernel_pmmr.get_data(n) {
kernel.verify()?;
kern_count += 1;
}
}
}
debug!(
LOGGER,
"txhashset: verified {} kernel signatures, pmmr size {}, took {}s",
kern_count,
self.kernel_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(())
}
fn verify_rangeproofs(&self) -> Result<(), Error> {
@ -784,7 +856,7 @@ impl<'a> Extension<'a> {
}
debug!(
LOGGER,
"Verified {} Rangeproofs, pmmr size {}, took {}s",
"txhashset: verified {} rangeproofs, pmmr size {}, took {}s",
proof_count,
self.rproof_pmmr.unpruned_size(),
now.elapsed().as_secs(),
@ -792,8 +864,8 @@ impl<'a> Extension<'a> {
Ok(())
}
/// Sums all our Output commitments, checking range proofs at the same time
fn sum_outputs(&self) -> Result<Commitment, Error> {
/// Sums all our unspent output commitments.
fn sum_outputs(&self, supply_commit: Commitment) -> Result<Commitment, Error> {
let now = Instant::now();
let mut commitments = vec![];
@ -805,20 +877,23 @@ impl<'a> Extension<'a> {
}
}
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let commit_count = commitments.len();
let sum_output = secp.commit_sum(commitments, vec![])?;
let output_sum = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
secp.commit_sum(commitments, vec![supply_commit])?
};
debug!(
LOGGER,
"Summed {} Outputs, pmmr size {}, took {}s",
"txhashset: summed {} outputs, pmmr size {}, took {}s",
commit_count,
self.output_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(sum_output)
Ok(output_sum)
}
}

View file

@ -17,6 +17,7 @@
use std::{error, fmt, io};
use util::secp;
use util::secp_static;
use util::secp::pedersen::Commitment;
use core::core::hash::{Hash, Hashed};
@ -328,6 +329,15 @@ pub trait ChainStore: Send + Sync {
/// Deletes a block marker associated with the provided hash
fn delete_block_marker(&self, bh: &Hash) -> Result<(), store::Error>;
/// Save block sums for the given block hash.
fn save_block_sums(&self, bh: &Hash, marker: &BlockSums) -> Result<(), store::Error>;
/// Get block sums for the given block hash.
fn get_block_sums(&self, bh: &Hash) -> Result<BlockSums, store::Error>;
/// Delete block sums for the given block hash.
fn delete_block_sums(&self, bh: &Hash) -> Result<(), store::Error>;
/// Saves the provided block header at the corresponding height. Also check
/// the consistency of the height chain in store by assuring previous
/// headers are also at their respective heights.
@ -389,3 +399,42 @@ impl Default for BlockMarker {
}
}
}
/// The output_sum and kernel_sum for a given block.
/// This is used to validate the next block being processed by applying
/// the inputs, outputs, kernels and kernel_offset from the new block
/// and checking everything sums correctly.
#[derive(Debug, Clone)]
pub struct BlockSums {
/// The total output sum so far.
pub output_sum: Commitment,
/// The total kernel sum so far.
pub kernel_sum: Commitment,
}
impl Writeable for BlockSums {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_fixed_bytes(&self.output_sum)?;
writer.write_fixed_bytes(&self.kernel_sum)?;
Ok(())
}
}
impl Readable for BlockSums {
fn read(reader: &mut Reader) -> Result<BlockSums, ser::Error> {
Ok(BlockSums {
output_sum: Commitment::read(reader)?,
kernel_sum: Commitment::read(reader)?,
})
}
}
impl Default for BlockSums {
fn default() -> BlockSums {
let zero_commit = secp_static::commit_to_zero_value();
BlockSums {
output_sum: zero_commit.clone(),
kernel_sum: zero_commit.clone(),
}
}
}

View file

@ -86,7 +86,6 @@ fn data_files() {
global::sizeshift(),
).unwrap();
// let prev_bhash = b.header.previous;
let bhash = b.hash();
chain
.process_block(b.clone(), chain::Options::MINE)

View file

@ -18,8 +18,8 @@ use time;
use rand::{thread_rng, Rng};
use std::collections::HashSet;
use core::{Committed, Input, KernelFeatures, Output, OutputFeatures, Proof, ProofMessageElements,
ShortId, Transaction, TxKernel};
use core::{Commitment, Committed, Input, KernelFeatures, Output, OutputFeatures, Proof,
ProofMessageElements, ShortId, Transaction, TxKernel};
use consensus;
use consensus::{exceeds_weight, reward, VerifySortOrder, REWARD};
use core::hash::{Hash, HashWriter, Hashed, ZERO_HASH};
@ -32,7 +32,7 @@ use keychain;
use keychain::BlindingFactor;
use util::kernel_sig_msg;
use util::LOGGER;
use util::{secp, static_secp_instance};
use util::{secp, secp_static, static_secp_instance};
/// Errors thrown by Block validation
#[derive(Debug, Clone, PartialEq)]
@ -643,13 +643,19 @@ impl Block {
/// Validates all the elements in a block that can be checked without
/// additional data. Includes commitment sums and kernels, Merkle
/// trees, reward, etc.
pub fn validate(&self, prev: &BlockHeader) -> Result<(), Error> {
pub fn validate(
&self,
prev_output_sum: &Commitment,
prev_kernel_sum: &Commitment,
) -> Result<((Commitment, Commitment)), Error> {
self.verify_weight()?;
self.verify_sorted()?;
self.verify_coinbase()?;
self.verify_inputs()?;
self.verify_kernels(prev)?;
Ok(())
self.verify_kernel_lock_heights()?;
let (new_output_sum, new_kernel_sum) = self.verify_sums(prev_output_sum, prev_kernel_sum)?;
Ok((new_output_sum, new_kernel_sum))
}
fn verify_weight(&self) -> Result<(), Error> {
@ -686,9 +692,7 @@ impl Block {
Ok(())
}
/// Verifies the sum of input/output commitments match the sum in kernels
/// and that all kernel signatures are valid.
fn verify_kernels(&self, prev: &BlockHeader) -> Result<(), Error> {
fn verify_kernel_lock_heights(&self) -> Result<(), Error> {
for k in &self.kernels {
// check we have no kernels with lock_heights greater than current height
// no tx can be included in a block earlier than its lock_height
@ -696,45 +700,74 @@ impl Block {
return Err(Error::KernelLockHeight(k.lock_height));
}
}
Ok(())
}
// sum all inputs and outs commitments
fn verify_sums(
&self,
prev_output_sum: &Commitment,
prev_kernel_sum: &Commitment,
) -> Result<((Commitment, Commitment)), Error> {
// Note: we check the rangeproofs in here (expensive)
let io_sum = self.sum_commitments()?;
// sum all kernels commitments
let kernel_sum = {
let mut kernel_commits = self.kernels.iter().map(|x| x.excess).collect::<Vec<_>>();
// Note: we check the kernel signatures in here (expensive)
let kernel_sum = self.sum_kernel_excesses(prev_kernel_sum)?;
let mut output_commits = vec![io_sum, prev_output_sum.clone()];
// We do not (yet) know how to sum a "zero commit" so remove them
let zero_commit = secp_static::commit_to_zero_value();
output_commits.retain(|x| *x != zero_commit);
let output_sum = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
// given the total_kernel_offset of this block and the previous block
// we can account for the kernel_offset of this particular block
if self.header.total_kernel_offset != BlindingFactor::zero() {
let skey = self.header.total_kernel_offset.secret_key(&secp)?;
kernel_commits.push(secp.commit(0, skey)?);
}
let mut prev_offset_commits = vec![];
if prev.total_kernel_offset != BlindingFactor::zero() {
let skey = prev.total_kernel_offset.secret_key(&secp)?;
prev_offset_commits.push(secp.commit(0, skey)?);
}
secp.commit_sum(kernel_commits, prev_offset_commits)?
secp.commit_sum(output_commits, vec![])?
};
// sum of kernel commitments (including kernel_offset) must match
// the sum of input/output commitments (minus fee)
if kernel_sum != io_sum {
let offset = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let key = self.header.total_kernel_offset.secret_key(&secp)?;
secp.commit(0, key)?
};
let mut excesses = vec![kernel_sum, offset];
excesses.retain(|x| *x != zero_commit);
let kernel_sum_plus_offset = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
secp.commit_sum(excesses, vec![])?
};
if output_sum != kernel_sum_plus_offset {
return Err(Error::KernelSumMismatch);
}
// verify all signatures with the commitment as pk
Ok((output_sum, kernel_sum))
}
fn sum_kernel_excesses(&self, prev_excess: &Commitment) -> Result<Commitment, Error> {
for kernel in &self.kernels {
kernel.verify()?;
}
Ok(())
let mut excesses = self.kernels.iter().map(|x| x.excess).collect::<Vec<_>>();
excesses.push(prev_excess.clone());
// we do not (yet) know how to sum a "zero commit" so remove them
let zero_commit = secp_static::commit_to_zero_value();
excesses.retain(|x| *x != zero_commit);
let sum = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
secp.commit_sum(excesses, vec![])?
};
Ok(sum)
}
// Validate the coinbase outputs generated by miners. Entails 2 main checks:
@ -869,6 +902,8 @@ mod test {
let keychain = Keychain::from_random_seed().unwrap();
let max_out = MAX_BLOCK_WEIGHT / BLOCK_OUTPUT_WEIGHT;
let zero_commit = secp_static::commit_to_zero_value();
let mut pks = vec![];
for n in 0..(max_out + 1) {
pks.push(keychain.derive_key_id(n as u32).unwrap());
@ -886,7 +921,7 @@ mod test {
let prev = BlockHeader::default();
let b = new_block(vec![&mut tx], &keychain, &prev);
assert!(b.validate(&prev).is_err());
assert!(b.validate(&zero_commit, &zero_commit).is_err());
}
#[test]
@ -914,6 +949,8 @@ mod test {
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let mut btx1 = tx2i1o();
let mut btx2 = build::transaction(
vec![input(7, key_id1), output(5, key_id2.clone()), with_fee(2)],
@ -928,7 +965,7 @@ mod test {
// block should have been automatically compacted (including reward
// output) and should still be valid
b.validate(&prev).unwrap();
b.validate(&zero_commit, &zero_commit).unwrap();
assert_eq!(b.inputs.len(), 3);
assert_eq!(b.outputs.len(), 3);
}
@ -936,6 +973,7 @@ mod test {
#[test]
fn empty_block_with_coinbase_is_valid() {
let keychain = Keychain::from_random_seed().unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let prev = BlockHeader::default();
let b = new_block(vec![], &keychain, &prev);
@ -959,7 +997,7 @@ mod test {
// the block should be valid here (single coinbase output with corresponding
// txn kernel)
assert_eq!(b.validate(&prev), Ok(()));
assert!(b.validate(&zero_commit, &zero_commit).is_ok());
}
#[test]
@ -968,6 +1006,7 @@ mod test {
// additionally verifying the merkle_inputs_outputs also fails
fn remove_coinbase_output_flag() {
let keychain = Keychain::from_random_seed().unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let prev = BlockHeader::default();
let mut b = new_block(vec![], &keychain, &prev);
@ -981,9 +1020,11 @@ mod test {
.remove(OutputFeatures::COINBASE_OUTPUT);
assert_eq!(b.verify_coinbase(), Err(Error::CoinbaseSumMismatch));
assert_eq!(b.verify_kernels(&prev), Ok(()));
assert_eq!(b.validate(&prev), Err(Error::CoinbaseSumMismatch));
assert!(b.verify_sums(&zero_commit, &zero_commit).is_ok());
assert_eq!(
b.validate(&zero_commit, &zero_commit),
Err(Error::CoinbaseSumMismatch)
);
}
#[test]
@ -991,6 +1032,7 @@ mod test {
// invalidates the block and specifically it causes verify_coinbase to fail
fn remove_coinbase_kernel_flag() {
let keychain = Keychain::from_random_seed().unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let prev = BlockHeader::default();
let mut b = new_block(vec![], &keychain, &prev);
@ -1009,7 +1051,7 @@ mod test {
);
assert_eq!(
b.validate(&prev),
b.validate(&zero_commit, &zero_commit),
Err(Error::Secp(secp::Error::IncorrectCommitSum))
);
}

View file

@ -235,6 +235,7 @@ mod test {
use ser;
use keychain;
use keychain::Keychain;
use util::secp_static;
#[test]
pub fn test_amount_to_hr() {
@ -617,6 +618,8 @@ mod test {
let keychain = keychain::Keychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let previous_header = BlockHeader::default();
let b = Block::new(
@ -626,7 +629,9 @@ mod test {
&key_id,
Difficulty::one(),
).unwrap();
b.cut_through().validate(&previous_header).unwrap();
b.cut_through()
.validate(&zero_commit, &zero_commit)
.unwrap();
}
#[test]
@ -634,6 +639,8 @@ mod test {
let keychain = keychain::Keychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let mut tx1 = tx2i1o();
tx1.validate().unwrap();
@ -646,7 +653,10 @@ mod test {
&key_id,
Difficulty::one(),
).unwrap();
block.cut_through().validate(&previous_header).unwrap();
block
.cut_through()
.validate(&zero_commit, &zero_commit)
.unwrap();
}
#[test]
@ -654,6 +664,8 @@ mod test {
let keychain = keychain::Keychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let zero_commit = secp_static::commit_to_zero_value();
let mut tx1 = tx2i1o();
let mut tx2 = tx1i1o();
@ -666,7 +678,7 @@ mod test {
&key_id,
Difficulty::one(),
).unwrap();
b.validate(&previous_header).unwrap();
b.validate(&zero_commit, &zero_commit).unwrap();
}
#[test]
@ -677,6 +689,8 @@ mod test {
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
let zero_commit = secp_static::commit_to_zero_value();
// first check we can add a timelocked tx where lock height matches current
// block height and that the resulting block is valid
let tx1 = build::transaction(
@ -698,7 +712,7 @@ mod test {
&key_id3.clone(),
Difficulty::one(),
).unwrap();
b.validate(&previous_header).unwrap();
b.validate(&zero_commit, &zero_commit).unwrap();
// now try adding a timelocked tx where lock height is greater than current
// block height
@ -721,7 +735,7 @@ mod test {
&key_id3.clone(),
Difficulty::one(),
).unwrap();
match b.validate(&previous_header) {
match b.validate(&zero_commit, &zero_commit) {
Err(KernelLockHeight(height)) => {
assert_eq!(height, 2);
}

View file

@ -35,6 +35,7 @@ use ser::{self, read_and_verify_sorted, ser_vec, PMMRable, Readable, Reader, Wri
WriteableSorted, Writer};
use util;
use util::LOGGER;
use util::secp_static;
bitflags! {
/// Options for a kernel's structure or use
@ -394,16 +395,21 @@ impl Transaction {
let kernel_sum = {
let mut kernel_commits = self.kernels.iter().map(|x| x.excess).collect::<Vec<_>>();
let offset = {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let key = self.offset.secret_key(&secp)?;
secp.commit(0, key)?
};
kernel_commits.push(offset);
// We cannot sum zero commits so remove them here
let zero_commit = secp_static::commit_to_zero_value();
kernel_commits.retain(|x| *x != zero_commit);
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
// add the offset in as necessary (unless offset is zero)
if self.offset != BlindingFactor::zero() {
let skey = self.offset.secret_key(&secp)?;
let offset_commit = secp.commit(0, skey)?;
kernel_commits.push(offset_commit);
}
secp.commit_sum(kernel_commits, vec![])?
};
@ -413,7 +419,6 @@ impl Transaction {
return Err(Error::KernelSumMismatch);
}
// verify all signatures with the commitment as pk
for kernel in &self.kernels {
kernel.verify()?;
}

View file

@ -152,8 +152,8 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.upgrade()
.expect("failed to upgrade weak ref to chain");
if let Ok(prev_header) = chain.get_block_header(&cb.header.previous) {
if let Ok(()) = block.validate(&prev_header) {
if let Ok(sums) = chain.get_block_sums(&cb.header.previous) {
if block.validate(&sums.output_sum, &sums.kernel_sum).is_ok() {
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
self.process_block(block, addr)
} else {
@ -351,7 +351,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
}
impl NetToChainAdapter {
/// Construct a new NetToChainAdapter instance
pub fn new(
currently_syncing: Arc<AtomicBool>,
@ -599,7 +598,6 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
}
impl ChainToPoolAndNetAdapter {
/// Construct a ChainToPoolAndNetAdaper instance.
pub fn new(
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,

View file

@ -24,10 +24,12 @@ use itertools::Itertools;
use core::ser::AsFixedBytes;
use chain;
use chain::types::BlockSums;
use pool;
use core::consensus;
use core::core;
use core::core::Transaction;
use core::core::hash::Hashed;
use core::ser;
use keychain::{Identifier, Keychain};
use wallet;
@ -137,7 +139,14 @@ fn build_block(
wallet_listener_url: Option<String>,
) -> Result<(core::Block, BlockFees), Error> {
// prepare the block header timestamp
let head = chain.head_header().unwrap();
let head = chain.head_header()?;
let prev_sums = if head.height == 0 {
BlockSums::default()
} else {
chain.get_block_sums(&head.hash())?
};
let mut now_sec = time::get_time().sec;
let head_sec = head.timestamp.to_timespec().sec;
if now_sec <= head_sec {
@ -168,7 +177,7 @@ fn build_block(
let mut b = core::Block::with_reward(&head, txs, output, kernel, difficulty.clone())?;
// making sure we're not spending time mining a useless block
b.validate(&head)?;
b.validate(&prev_sums.output_sum, &prev_sums.kernel_sum)?;
let mut rng = rand::OsRng::new().unwrap();
b.header.nonce = rng.gen();

View file

@ -32,3 +32,11 @@ pub fn static_secp_instance() -> Arc<Mutex<secp::Secp256k1>> {
secp_inst.randomize(&mut thread_rng());
SECP256K1.clone()
}
// TODO - Can we generate this once and memoize it for subsequent use?
// Even if we clone it each time it will likely be faster than this.
pub fn commit_to_zero_value() -> secp::pedersen::Commitment {
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
secp.commit_value(0).unwrap()
}

View file

@ -88,6 +88,14 @@ pub fn issue_send_tx(
.aggsig_create_context(&tx_id, skey)
.context(ErrorKind::Keychain)?;
// let kernel_key = kernel_blind
// .secret_key(keychain.secp())
// .context(ErrorKind::Keychain)?;
// let kernel_offset = keychain
// .secp()
// .commit(0, kernel_key)
// .context(ErrorKind::Keychain)?;
let partial_tx = build_partial_tx(&tx_id, keychain, amount_with_fee, kernel_offset, None, tx);
// Closure to acquire wallet lock and lock the coins being spent