Switch commitments (#179)

* Adding switch commit to grin outputs
* logging output fix
* adding switch commitment hash to sum tree node
* added hash_with to Hashed trait, to allow for hashing to include another writeable element
* adding hash_with as method in hashed trait
This commit is contained in:
Yeastplume 2017-10-16 22:23:10 +01:00 committed by Ignotus Peverell
parent c84a136e48
commit 8f76746e84
18 changed files with 312 additions and 115 deletions

View file

@ -145,7 +145,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
if header.timestamp >
time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64)) {
time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
{
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
// TODO add warning in p2p code if local time is too different from peers
return Err(Error::InvalidBlockTime);
@ -164,9 +165,9 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
// first I/O cost, better as late as possible
let prev = try!(ctx.store
.get_block_header(&header.previous)
.map_err(&Error::StoreErr));
let prev = try!(ctx.store.get_block_header(&header.previous).map_err(
&Error::StoreErr,
));
if header.height != prev.height + 1 {
return Err(Error::InvalidBlockHeight);
@ -185,8 +186,9 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone());
let difficulty = consensus::next_difficulty(diff_iter)
.map_err(|e| Error::Other(e.to_string()))?;
let difficulty = consensus::next_difficulty(diff_iter).map_err(|e| {
Error::Other(e.to_string())
})?;
if header.difficulty < difficulty {
return Err(Error::DifficultyTooLow);
}
@ -196,10 +198,11 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
/// Fully validate the block content.
fn validate_block(b: &Block,
ctx: &mut BlockContext,
ext: &mut sumtree::Extension)
-> Result<(), Error> {
fn validate_block(
b: &Block,
ctx: &mut BlockContext,
ext: &mut sumtree::Extension,
) -> Result<(), Error> {
if b.header.height > ctx.head.height + 1 {
return Err(Error::Orphan);
}
@ -248,7 +251,11 @@ fn validate_block(b: &Block,
if forked_block.header.height > 0 {
let last_output = &forked_block.outputs[forked_block.outputs.len() - 1];
let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1];
ext.rewind(forked_block.header.height, last_output, last_kernel)?;
ext.rewind(
forked_block.header.height,
last_output,
last_kernel,
)?;
}
// apply all forked blocks, including this new one
@ -261,7 +268,8 @@ fn validate_block(b: &Block,
let (utxo_root, rproof_root, kernel_root) = ext.roots();
if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root ||
kernel_root.hash != b.header.kernel_root {
kernel_root.hash != b.header.kernel_root
{
ext.dump();
return Err(Error::InvalidRoot);
@ -272,8 +280,10 @@ fn validate_block(b: &Block,
if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) {
if output.features.contains(transaction::COINBASE_OUTPUT) {
if let Ok(output_header) =
ctx.store
.get_block_header_by_output_commit(&input.commitment()) {
ctx.store.get_block_header_by_output_commit(
&input.commitment(),
)
{
// TODO - make sure we are not off-by-1 here vs. the equivalent tansaction
// validation rule

View file

@ -141,8 +141,10 @@ impl ChainStore for ChainKVStore {
// in this index.
//
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, Error> {
let block_hash = self.db
.get_ser(&to_key(HEADER_BY_OUTPUT_PREFIX, &mut commit.as_ref().to_vec()))?;
let block_hash = self.db.get_ser(&to_key(
HEADER_BY_OUTPUT_PREFIX,
&mut commit.as_ref().to_vec(),
))?;
match block_hash {
Some(hash) => {
@ -211,8 +213,10 @@ impl ChainStore for ChainKVStore {
/// that is consistent with its height (everything prior to this will be
/// consistent)
fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> {
self.db
.put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), bh)?;
self.db.put_ser(
&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height),
bh,
)?;
if bh.height == 0 {
return Ok(());
}

View file

@ -36,14 +36,16 @@ const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
const KERNEL_SUBDIR: &'static str = "kernel";
struct PMMRHandle<T>
where T: Summable + Clone
where
T: Summable + Clone,
{
backend: PMMRBackend<T>,
last_pos: u64,
}
impl<T> PMMRHandle<T>
where T: Summable + Clone
where
T: Summable + Clone,
{
fn new(root_dir: String, file_name: &str) -> Result<PMMRHandle<T>, Error> {
let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name);
@ -105,7 +107,8 @@ impl SumTrees {
/// If the closure returns an error, modifications are canceled and the unit
/// of work is abandoned. Otherwise, the unit of work is permanently applied.
pub fn extending<'a, F, T>(trees: &'a mut SumTrees, inner: F) -> Result<T, Error>
where F: FnOnce(&mut Extension) -> Result<T, Error>
where
F: FnOnce(&mut Extension) -> Result<T, Error>,
{
let sizes: (u64, u64, u64);
@ -220,15 +223,16 @@ impl<'a> Extension<'a> {
.push(SumCommit {
commit: out.commitment(),
secp: secp.clone(),
})
},
Some(out.switch_commit_hash()))
.map_err(&Error::SumTreeErr)?;
self.new_output_commits.insert(out.commitment(), pos);
// push range proofs in their MMR
self.rproof_pmmr
.push(NoSum(out.proof))
.map_err(&Error::SumTreeErr)?;
self.rproof_pmmr.push(NoSum(out.proof), None::<RangeProof>).map_err(
&Error::SumTreeErr,
)?;
}
for kernel in &b.kernels {
@ -236,9 +240,9 @@ impl<'a> Extension<'a> {
return Err(Error::DuplicateKernel(kernel.excess.clone()));
}
// push kernels in their MMR
let pos = self.kernel_pmmr
.push(NoSum(kernel.clone()))
.map_err(&Error::SumTreeErr)?;
let pos = self.kernel_pmmr.push(NoSum(kernel.clone()),None::<RangeProof>).map_err(
&Error::SumTreeErr,
)?;
self.new_kernel_excesses.insert(kernel.excess, pos);
}
Ok(())
@ -274,9 +278,14 @@ impl<'a> Extension<'a> {
/// Current root hashes and sums (if applicable) for the UTXO, range proof
/// and kernel sum trees.
pub fn roots(&self)
-> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
(self.output_pmmr.root(), self.rproof_pmmr.root(), self.kernel_pmmr.root())
pub fn roots(
&self,
) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
(
self.output_pmmr.root(),
self.rproof_pmmr.root(),
self.kernel_pmmr.root(),
)
}
/// Force the rollback of this extension, no matter the result
@ -296,8 +305,10 @@ impl<'a> Extension<'a> {
// Sizes of the sum trees, used by `extending` on rollback.
fn sizes(&self) -> (u64, u64, u64) {
(self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size(),
self.kernel_pmmr.unpruned_size())
(
self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size(),
self.kernel_pmmr.unpruned_size(),
)
}
}

View file

@ -8,7 +8,7 @@ workspace = ".."
bitflags = "~0.7.0"
blake2-rfc = "~0.2.17"
byteorder = "^0.5"
log = "~0.3"
slog = { version = "^2.0.12", features = ["max_level_trace", "release_max_level_trace"] }
num-bigint = "^0.1.35"
rand = "^0.3"
serde = "~1.0.8"

View file

@ -19,11 +19,13 @@ use secp::{self, Secp256k1};
use std::collections::HashSet;
use core::Committed;
use core::{Input, Output, Proof, TxKernel, Transaction, COINBASE_KERNEL, COINBASE_OUTPUT};
use core::{Input, Output, SwitchCommitHash, Proof, TxKernel, Transaction, COINBASE_KERNEL,
COINBASE_OUTPUT};
use consensus::{MINIMUM_DIFFICULTY, REWARD, reward, exceeds_weight};
use core::hash::{Hash, Hashed, ZERO_HASH};
use core::target::Difficulty;
use ser::{self, Readable, Reader, Writeable, Writer, WriteableSorted, read_and_verify_sorted};
use util::LOGGER;
use global;
use keychain;
@ -500,13 +502,26 @@ impl Block {
let secp = keychain.secp();
let commit = keychain.commit(reward(fees), key_id)?;
// let switch_commit = keychain.switch_commit(key_id)?;
let switch_commit = keychain.switch_commit(key_id)?;
let switch_commit_hash = SwitchCommitHash::from_switch_commit(switch_commit);
trace!(
LOGGER,
"Block reward - Pedersen Commit is: {:?}, Switch Commit is: {:?}",
commit,
switch_commit
);
trace!(
LOGGER,
"Block reward - Switch Commit Hash is: {:?}",
switch_commit_hash
);
let msg = secp::pedersen::ProofMessage::empty();
let rproof = keychain.range_proof(reward(fees), key_id, commit, msg)?;
let output = Output {
features: COINBASE_OUTPUT,
commit: commit,
switch_commit_hash: switch_commit_hash,
proof: rproof,
};
@ -549,9 +564,16 @@ mod test {
// utility producing a transaction that spends an output with the provided
// value and blinding key
fn txspend1i1o(v: u64, keychain: &Keychain, key_id1: Identifier, key_id2: Identifier) -> Transaction {
build::transaction(vec![input(v, key_id1), output(3, key_id2), with_fee(2)], &keychain)
.map(|(tx, _)| tx)
fn txspend1i1o(
v: u64,
keychain: &Keychain,
key_id1: Identifier,
key_id2: Identifier,
) -> Transaction {
build::transaction(
vec![input(v, key_id1), output(3, key_id2), with_fee(2)],
&keychain,
).map(|(tx, _)| tx)
.unwrap()
}

View file

@ -27,8 +27,9 @@
use secp;
use core::{Transaction, Input, Output, DEFAULT_OUTPUT};
use core::{Transaction, Input, Output, SwitchCommitHash, DEFAULT_OUTPUT};
use core::transaction::kernel_sig_msg;
use util::LOGGER;
use keychain;
use keychain::{Keychain, BlindSum, BlindingFactor, Identifier};
@ -55,6 +56,19 @@ pub fn input(value: u64, key_id: Identifier) -> Box<Append> {
pub fn output(value: u64, key_id: Identifier) -> Box<Append> {
Box::new(move |build, (tx, sum)| -> (Transaction, BlindSum) {
let commit = build.keychain.commit(value, &key_id).unwrap();
let switch_commit = build.keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit(switch_commit);
trace!(
LOGGER,
"Builder - Pedersen Commit is: {:?}, Switch Commit is: {:?}",
commit,
switch_commit
);
trace!(
LOGGER,
"Builder - Switch Commit Hash is: {:?}",
switch_commit_hash
);
let msg = secp::pedersen::ProofMessage::empty();
let rproof = build
.keychain
@ -65,6 +79,7 @@ pub fn output(value: u64, key_id: Identifier) -> Box<Append> {
tx.with_output(Output {
features: DEFAULT_OUTPUT,
commit: commit,
switch_commit_hash: switch_commit_hash,
proof: rproof,
}),
sum.add_key_id(key_id.clone()),
@ -141,7 +156,12 @@ mod test {
let key_id3 = keychain.derive_key_id(3).unwrap();
let (tx, _) = transaction(
vec![input(10, key_id1), input(11, key_id2), output(20, key_id3), with_fee(1)],
vec![
input(10, key_id1),
input(11, key_id2),
output(20, key_id3),
with_fee(1),
],
&keychain,
).unwrap();
@ -154,8 +174,10 @@ mod test {
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let (tx, _) = transaction(vec![input(6, key_id1), output(2, key_id2), with_fee(4)], &keychain)
.unwrap();
let (tx, _) = transaction(
vec![input(6, key_id1), output(2, key_id2), with_fee(4)],
&keychain,
).unwrap();
tx.verify_sig(&keychain.secp()).unwrap();
}

View file

@ -25,6 +25,7 @@ use blake2::blake2b::Blake2b;
use consensus::VerifySortOrder;
use ser::{self, Reader, Readable, Writer, Writeable, Error, AsFixedBytes};
use util::LOGGER;
/// A hash consisting of all zeroes, used as a sentinel. No known preimage.
pub const ZERO_HASH: Hash = Hash([0; 32]);
@ -171,37 +172,37 @@ impl ser::Writer for HashWriter {
pub trait Hashed {
/// Obtain the hash of the object
fn hash(&self) -> Hash;
/// Hash the object together with another writeable object
fn hash_with<T: Writeable>(&self, other:T) -> Hash;
}
impl<W: ser::Writeable> Hashed for W {
fn hash(&self) -> Hash {
let mut hasher = HashWriter::default();
ser::Writeable::write(self, &mut hasher).unwrap();
let mut ret = [0; 32];
hasher.finalize(&mut ret);
Hash(ret)
}
fn hash_with<T: Writeable>(&self, other:T) -> Hash{
let mut hasher = HashWriter::default();
ser::Writeable::write(self, &mut hasher).unwrap();
trace!(LOGGER, "Hashing with additional data");
ser::Writeable::write(&other, &mut hasher).unwrap();
let mut ret = [0; 32];
hasher.finalize(&mut ret);
Hash(ret)
}
}
// Convenience for when we need to hash of an empty array.
impl Hashed for [u8; 0] {
fn hash(&self) -> Hash {
let hasher = HashWriter::default();
let mut ret = [0; 32];
hasher.finalize(&mut ret);
Hash(ret)
}
}
impl<T: Hashed> VerifySortOrder<T> for Vec<T> {
impl<T: Writeable> VerifySortOrder<T> for Vec<T> {
fn verify_sort_order(&self) -> Result<(), ser::Error> {
match self
.iter()
match self.iter()
.map(|item| item.hash())
.collect::<Vec<_>>()
.windows(2)
.any(|pair| pair[0] > pair[1])
{
.any(|pair| pair[0] > pair[1]) {
true => Err(ser::Error::BadlySorted),
false => Ok(()),
}

View file

@ -31,7 +31,7 @@ use secp::pedersen::*;
pub use self::block::*;
pub use self::transaction::*;
use self::hash::Hashed;
use self::hash::{Hashed};
use ser::{Writeable, Writer, Reader, Readable, Error};
use global;
// use keychain;
@ -191,6 +191,7 @@ mod test {
use ser;
use keychain;
use keychain::{Keychain, BlindingFactor};
use blake2::blake2b::blake2b;
#[test]
#[should_panic(expected = "InvalidSecretKey")]
@ -200,7 +201,11 @@ mod test {
// blinding should fail as signing with a zero r*G shouldn't work
build::transaction(
vec![input(10, key_id1.clone()), output(9, key_id1.clone()), with_fee(1)],
vec![
input(10, key_id1.clone()),
output(9, key_id1.clone()),
with_fee(1),
],
&keychain,
).unwrap();
}
@ -210,8 +215,8 @@ mod test {
let tx = tx2i1o();
let mut vec = Vec::new();
ser::serialize(&mut vec, &tx).expect("serialized failed");
assert!(vec.len() > 5340);
assert!(vec.len() < 5360);
assert!(vec.len() > 5360);
assert!(vec.len() < 5380);
}
#[test]
@ -310,7 +315,8 @@ mod test {
// Alice builds her transaction, with change, which also produces the sum
// of blinding factors before they're obscured.
let (tx, sum) =
build::transaction(vec![in1, in2, output(1, key_id3), with_fee(2)], &keychain).unwrap();
build::transaction(vec![in1, in2, output(1, key_id3), with_fee(2)], &keychain)
.unwrap();
tx_alice = tx;
blind_sum = sum;
}
@ -319,7 +325,11 @@ mod test {
// blinding factors. He adds his output, finalizes the transaction so it's
// ready for broadcast.
let (tx_final, _) = build::transaction(
vec![initial_tx(tx_alice), with_excess(blind_sum), output(4, key_id4)],
vec![
initial_tx(tx_alice),
with_excess(blind_sum),
output(4, key_id4),
],
&keychain,
).unwrap();
@ -375,9 +385,15 @@ mod test {
// first check we can add a timelocked tx where lock height matches current block height
// and that the resulting block is valid
let tx1 = build::transaction(
vec![input(5, key_id1.clone()), output(3, key_id2.clone()), with_fee(2), with_lock_height(1)],
vec![
input(5, key_id1.clone()),
output(3, key_id2.clone()),
with_fee(2),
with_lock_height(1),
],
&keychain,
).map(|(tx, _)| tx).unwrap();
).map(|(tx, _)| tx)
.unwrap();
let b = Block::new(
&BlockHeader::default(),
@ -389,9 +405,15 @@ mod test {
// now try adding a timelocked tx where lock height is greater than current block height
let tx1 = build::transaction(
vec![input(5, key_id1.clone()), output(3, key_id2.clone()), with_fee(2), with_lock_height(2)],
vec![
input(5, key_id1.clone()),
output(3, key_id2.clone()),
with_fee(2),
with_lock_height(2),
],
&keychain,
).map(|(tx, _)| tx).unwrap();
).map(|(tx, _)| tx)
.unwrap();
let b = Block::new(
&BlockHeader::default(),
@ -400,9 +422,9 @@ mod test {
&key_id3.clone(),
).unwrap();
match b.validate(keychain.secp()) {
Err(KernelLockHeight{ lock_height: height}) => {
Err(KernelLockHeight { lock_height: height }) => {
assert_eq!(height, 2);
},
}
_ => panic!("expecting KernelLockHeight error here"),
}
}
@ -429,7 +451,12 @@ mod test {
let key_id3 = keychain.derive_key_id(3).unwrap();
build::transaction(
vec![input(10, key_id1), input(11, key_id2), output(19, key_id3), with_fee(2)],
vec![
input(10, key_id1),
input(11, key_id2),
output(19, key_id3),
with_fee(2),
],
&keychain,
).map(|(tx, _)| tx)
.unwrap()
@ -441,8 +468,10 @@ mod test {
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
build::transaction(vec![input(5, key_id1), output(3, key_id2), with_fee(2)], &keychain)
.map(|(tx, _)| tx)
build::transaction(
vec![input(5, key_id1), output(3, key_id2), with_fee(2)],
&keychain,
).map(|(tx, _)| tx)
.unwrap()
}
}

View file

@ -118,8 +118,11 @@ where
T: Summable + Hashed,
{
/// Create a hash sum from a summable
pub fn from_summable(idx: u64, elmt: &T) -> HashSum<T> {
let hash = elmt.hash();
pub fn from_summable<W: Writeable>(idx: u64, elmt: &T, hash_with:Option<W>) -> HashSum<T> {
let hash = match hash_with {
Some(h) => elmt.hash_with(h),
None => elmt.hash(),
};
let sum = elmt.sum();
let node_hash = (idx, &sum, hash).hash();
HashSum {
@ -255,9 +258,9 @@ where
/// Push a new Summable element in the MMR. Computes new related peaks at
/// the same time if applicable.
pub fn push(&mut self, elmt: T) -> Result<u64, String> {
pub fn push<W: Writeable>(&mut self, elmt: T, hash_with:Option<W>) -> Result<u64, String> {
let elmt_pos = self.last_pos + 1;
let mut current_hashsum = HashSum::from_summable(elmt_pos, &elmt);
let mut current_hashsum = HashSum::from_summable(elmt_pos, &elmt, hash_with);
let mut to_append = vec![current_hashsum.clone()];
let mut height = 0;
let mut pos = elmt_pos;
@ -831,7 +834,7 @@ mod test {
let mut pmmr = PMMR::new(&mut ba);
// one element
pmmr.push(elems[0]).unwrap();
pmmr.push(elems[0], None::<TestElem>).unwrap();
let hash = Hashed::hash(&elems[0]);
let sum = elems[0].sum();
let node_hash = (1 as u64, &sum, hash).hash();
@ -845,54 +848,54 @@ mod test {
assert_eq!(pmmr.unpruned_size(), 1);
// two elements
pmmr.push(elems[1]).unwrap();
let sum2 = HashSum::from_summable(1, &elems[0]) + HashSum::from_summable(2, &elems[1]);
pmmr.push(elems[1], None::<TestElem>).unwrap();
let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>) + HashSum::from_summable(2, &elems[1], None::<TestElem>);
assert_eq!(pmmr.root(), sum2);
assert_eq!(pmmr.unpruned_size(), 3);
// three elements
pmmr.push(elems[2]).unwrap();
let sum3 = sum2.clone() + HashSum::from_summable(4, &elems[2]);
pmmr.push(elems[2], None::<TestElem>).unwrap();
let sum3 = sum2.clone() + HashSum::from_summable(4, &elems[2], None::<TestElem>);
assert_eq!(pmmr.root(), sum3);
assert_eq!(pmmr.unpruned_size(), 4);
// four elements
pmmr.push(elems[3]).unwrap();
pmmr.push(elems[3], None::<TestElem>).unwrap();
let sum4 = sum2 +
(HashSum::from_summable(4, &elems[2]) + HashSum::from_summable(5, &elems[3]));
(HashSum::from_summable(4, &elems[2], None::<TestElem>) + HashSum::from_summable(5, &elems[3], None::<TestElem>));
assert_eq!(pmmr.root(), sum4);
assert_eq!(pmmr.unpruned_size(), 7);
// five elements
pmmr.push(elems[4]).unwrap();
let sum5 = sum4.clone() + HashSum::from_summable(8, &elems[4]);
pmmr.push(elems[4], None::<TestElem>).unwrap();
let sum5 = sum4.clone() + HashSum::from_summable(8, &elems[4], None::<TestElem>);
assert_eq!(pmmr.root(), sum5);
assert_eq!(pmmr.unpruned_size(), 8);
// six elements
pmmr.push(elems[5]).unwrap();
pmmr.push(elems[5], None::<TestElem>).unwrap();
let sum6 = sum4.clone() +
(HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5]));
(HashSum::from_summable(8, &elems[4], None::<TestElem>) + HashSum::from_summable(9, &elems[5], None::<TestElem>));
assert_eq!(pmmr.root(), sum6.clone());
assert_eq!(pmmr.unpruned_size(), 10);
// seven elements
pmmr.push(elems[6]).unwrap();
let sum7 = sum6 + HashSum::from_summable(11, &elems[6]);
pmmr.push(elems[6], None::<TestElem>).unwrap();
let sum7 = sum6 + HashSum::from_summable(11, &elems[6], None::<TestElem>);
assert_eq!(pmmr.root(), sum7);
assert_eq!(pmmr.unpruned_size(), 11);
// eight elements
pmmr.push(elems[7]).unwrap();
pmmr.push(elems[7], None::<TestElem>).unwrap();
let sum8 = sum4 +
((HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])) +
(HashSum::from_summable(11, &elems[6]) + HashSum::from_summable(12, &elems[7])));
((HashSum::from_summable(8, &elems[4], None::<TestElem>) + HashSum::from_summable(9, &elems[5], None::<TestElem>)) +
(HashSum::from_summable(11, &elems[6], None::<TestElem>) + HashSum::from_summable(12, &elems[7], None::<TestElem>)));
assert_eq!(pmmr.root(), sum8);
assert_eq!(pmmr.unpruned_size(), 15);
// nine elements
pmmr.push(elems[8]).unwrap();
let sum9 = sum8 + HashSum::from_summable(16, &elems[8]);
pmmr.push(elems[8], None::<TestElem>).unwrap();
let sum9 = sum8 + HashSum::from_summable(16, &elems[8], None::<TestElem>);
assert_eq!(pmmr.root(), sum9);
assert_eq!(pmmr.unpruned_size(), 16);
}
@ -918,7 +921,7 @@ mod test {
{
let mut pmmr = PMMR::new(&mut ba);
for elem in &elems[..] {
pmmr.push(*elem).unwrap();
pmmr.push(*elem, None::<TestElem>).unwrap();
}
orig_root = pmmr.root();
sz = pmmr.unpruned_size();

View file

@ -15,6 +15,7 @@
//! Transactions
use byteorder::{ByteOrder, BigEndian};
use blake2::blake2b::blake2b;
use secp::{self, Secp256k1, Message, Signature};
use secp::pedersen::{RangeProof, Commitment};
use std::ops;
@ -23,7 +24,10 @@ use core::Committed;
use core::pmmr::Summable;
use keychain::{Identifier, Keychain};
use ser::{self, Reader, Writer, Readable, Writeable, WriteableSorted, read_and_verify_sorted};
use util::LOGGER;
/// The size to use for the stored blake2 hash of a switch_commitment
pub const SWITCH_COMMIT_HASH_SIZE: usize = 20;
bitflags! {
/// Options for a kernel's structure or use
@ -299,6 +303,7 @@ impl Transaction {
lock_height: self.lock_height,
};
debug!(
LOGGER,
"tx verify_sig: fee - {}, lock_height - {}",
kernel.fee,
kernel.lock_height
@ -362,19 +367,71 @@ bitflags! {
}
}
/// Definition of the switch commitment hash
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct SwitchCommitHash {
hash: [u8; SWITCH_COMMIT_HASH_SIZE],
}
/// Implementation of Writeable for a switch commitment hash
impl Writeable for SwitchCommitHash {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_fixed_bytes(&self.hash)?;
Ok(())
}
}
/// Implementation of Readable for a switch commitment hash
/// an Output from a binary stream.
impl Readable for SwitchCommitHash {
fn read(reader: &mut Reader) -> Result<SwitchCommitHash, ser::Error> {
let a = try!(reader.read_fixed_bytes(SWITCH_COMMIT_HASH_SIZE));
let mut c = [0; SWITCH_COMMIT_HASH_SIZE];
for i in 0..SWITCH_COMMIT_HASH_SIZE {
c[i] = a[i];
}
Ok(SwitchCommitHash { hash: c })
}
}
// As Ref for AsFixedBytes
impl AsRef<[u8]> for SwitchCommitHash {
fn as_ref(&self) -> &[u8] {
&self.hash
}
}
impl SwitchCommitHash {
/// Builds a switch commitment hash from a switch commit using blake2
pub fn from_switch_commit(switch_commit: Commitment) -> SwitchCommitHash {
let switch_commit_hash = blake2b(SWITCH_COMMIT_HASH_SIZE, &[], &switch_commit.0);
let switch_commit_hash = switch_commit_hash.as_bytes();
let mut h = [0; SWITCH_COMMIT_HASH_SIZE];
for i in 0..SWITCH_COMMIT_HASH_SIZE {
h[i] = switch_commit_hash[i];
}
SwitchCommitHash { hash: h }
}
}
/// Output for a transaction, defining the new ownership of coins that are being
/// transferred. The commitment is a blinded value for the output while the
/// range proof guarantees the commitment includes a positive value without
/// overflow and the ownership of the private key.
/// overflow and the ownership of the private key. The switch commitment hash
/// provides future-proofing against quantum-based attacks, as well as provides
/// wallet implementations with a way to identify their outputs for wallet
/// reconstruction
///
/// The hash of an output only covers its features, lock_height and commitment.
/// The range proof is expected to have its own hash and is stored and committed to separately.
/// The hash of an output only covers its features, lock_height, commitment,
/// and switch commitment. The range proof is expected to have its own hash
/// and is stored and committed to separately.
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct Output {
/// Options for an output's structure or use
pub features: OutputFeatures,
/// The homomorphic commitment representing the output's amount
pub commit: Commitment,
/// The switch commitment hash, a 160 bit length blake2 hash of blind*J
pub switch_commit_hash: SwitchCommitHash,
/// A proof that the commitment is in the right range
pub proof: RangeProof,
}
@ -385,6 +442,7 @@ impl Writeable for Output {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_u8(self.features.bits())?;
writer.write_fixed_bytes(&self.commit)?;
writer.write_fixed_bytes(&self.switch_commit_hash)?;
// The hash of an output doesn't include the range proof
if writer.serialization_mode() == ser::SerializationMode::Full {
@ -405,6 +463,7 @@ impl Readable for Output {
Ok(Output {
features: features,
commit: Commitment::read(reader)?,
switch_commit_hash: SwitchCommitHash::read(reader)?,
proof: RangeProof::read(reader)?,
})
}
@ -416,6 +475,11 @@ impl Output {
self.commit
}
/// Switch commitment hash for the output
pub fn switch_commit_hash(&self) -> SwitchCommitHash {
self.switch_commit_hash
}
/// Range proof for the output
pub fn proof(&self) -> RangeProof {
self.proof
@ -469,15 +533,18 @@ impl Summable for SumCommit {
impl Writeable for SumCommit {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.commit.write(writer)
self.commit.write(writer)?;
Ok(())
}
}
impl Readable for SumCommit {
fn read(reader: &mut Reader) -> Result<SumCommit, ser::Error> {
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
let commit = Commitment::read(reader)?;
Ok(SumCommit {
commit: Commitment::read(reader)?,
commit: commit,
secp: secp,
})
}
@ -557,12 +624,15 @@ mod test {
let keychain = Keychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let commit = keychain.commit(5, &key_id).unwrap();
let switch_commit = keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit(switch_commit);
let msg = secp::pedersen::ProofMessage::empty();
let proof = keychain.range_proof(5, &key_id, commit, msg).unwrap();
let out = Output {
features: DEFAULT_OUTPUT,
commit: commit,
switch_commit_hash: switch_commit_hash,
proof: proof,
};
@ -581,12 +651,15 @@ mod test {
let key_id = keychain.derive_key_id(1).unwrap();
let commit = keychain.commit(1003, &key_id).unwrap();
let switch_commit = keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit(switch_commit);
let msg = secp::pedersen::ProofMessage::empty();
let proof = keychain.range_proof(1003, &key_id, commit, msg).unwrap();
let output = Output {
features: DEFAULT_OUTPUT,
commit: commit,
switch_commit_hash: switch_commit_hash,
proof: proof,
};

View file

@ -18,7 +18,6 @@ use time;
use core;
use consensus::MINIMUM_DIFFICULTY;
use core::hash::Hashed;
use core::target::Difficulty;
use global;
@ -26,7 +25,7 @@ use global;
/// fees and a height of zero.
pub fn genesis() -> core::Block {
let proof_size = global::proofsize();
let empty_hash = [].hash();
let empty_hash = core::hash::Hash([0x0; 32]);
core::Block {
header: core::BlockHeader {
version: 1,

View file

@ -26,7 +26,7 @@ extern crate bitflags;
extern crate blake2_rfc as blake2;
extern crate byteorder;
#[macro_use]
extern crate log;
extern crate slog;
extern crate num_bigint as bigint;
extern crate rand;
extern crate secp256k1zkp as secp;

View file

@ -25,6 +25,7 @@ use byteorder::{ByteOrder, ReadBytesExt, BigEndian};
use keychain::{Identifier, IDENTIFIER_SIZE};
use core::hash::Hashed;
use consensus::VerifySortOrder;
use core::transaction::{SWITCH_COMMIT_HASH_SIZE, SwitchCommitHash};
use secp::pedersen::Commitment;
use secp::pedersen::RangeProof;
use secp::constants::{MAX_PROOF_SIZE, PEDERSEN_COMMITMENT_SIZE};
@ -198,7 +199,7 @@ pub trait WriteableSorted {
/// A consensus rule requires everything is sorted lexicographically to avoid
/// leaking any information through specific ordering of items.
pub fn read_and_verify_sorted<T>(reader: &mut Reader, count: u64) -> Result<Vec<T>, Error>
where T: Readable + Hashed
where T: Readable + Hashed + Writeable
{
let result: Vec<T> = try!((0..count).map(|_| T::read(reader)).collect());
result.verify_sort_order()?;
@ -527,7 +528,11 @@ impl AsFixedBytes for [u8; 8] {
return 8;
}
}
impl AsFixedBytes for [u8; 32] {
impl AsFixedBytes for [u8; 20] {
fn len(&self) -> usize {
return 20;
}
}impl AsFixedBytes for [u8; 32] {
fn len(&self) -> usize {
return 32;
}
@ -562,6 +567,11 @@ impl AsFixedBytes for ::secp::pedersen::Commitment {
return PEDERSEN_COMMITMENT_SIZE;
}
}
impl AsFixedBytes for SwitchCommitHash {
fn len(&self) -> usize {
return SWITCH_COMMIT_HASH_SIZE;
}
}
impl AsFixedBytes for ::keychain::Identifier {
fn len(&self) -> usize {
return IDENTIFIER_SIZE;

View file

@ -48,7 +48,7 @@ features=["no-plugin-build"]
This may help when building on 32 bit systems or non x86 architectures. You can still use the internal miner to mine by setting:
```
use_cuckoo_miner = true
use_cuckoo_miner = false
```
In `grin.toml`

View file

@ -250,6 +250,7 @@ mod tests {
use secp;
use keychain::Keychain;
use rand;
use core::core::SwitchCommitHash;
#[test]
fn test_add_entry() {
@ -259,6 +260,8 @@ mod tests {
let key_id3 = keychain.derive_key_id(3).unwrap();
let output_commit = keychain.commit(70, &key_id1).unwrap();
let switch_commit = keychain.switch_commit(&key_id1).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit(switch_commit);
let inputs = vec![
core::transaction::Input(keychain.commit(50, &key_id2).unwrap()),
core::transaction::Input(keychain.commit(25, &key_id3).unwrap()),
@ -268,6 +271,7 @@ mod tests {
core::transaction::Output {
features: core::transaction::DEFAULT_OUTPUT,
commit: output_commit,
switch_commit_hash: switch_commit_hash,
proof: keychain.range_proof(100, &key_id1, output_commit, msg).unwrap(),
},
];

View file

@ -604,6 +604,7 @@ mod tests {
use std::sync::{Arc, RwLock};
use blake2;
use core::global::MiningParameterMode;
use core::core::SwitchCommitHash;
macro_rules! expect_output_parent {
($pool:expr, $expected:pat, $( $output:expr ),+ ) => {
@ -1178,12 +1179,15 @@ mod tests {
let keychain = keychain_for_tests();
let key_id = keychain.derive_key_id(value as u32).unwrap();
let commit = keychain.commit(value, &key_id).unwrap();
let switch_commit = keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit(switch_commit);
let msg = secp::pedersen::ProofMessage::empty();
let proof = keychain.range_proof(value, &key_id, commit, msg).unwrap();
transaction::Output {
features: transaction::DEFAULT_OUTPUT,
commit: commit,
switch_commit_hash: switch_commit_hash,
proof: proof,
}
}
@ -1193,12 +1197,15 @@ mod tests {
let keychain = keychain_for_tests();
let key_id = keychain.derive_key_id(value as u32).unwrap();
let commit = keychain.commit(value, &key_id).unwrap();
let switch_commit = keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit(switch_commit);
let msg = secp::pedersen::ProofMessage::empty();
let proof = keychain.range_proof(value, &key_id, commit, msg).unwrap();
transaction::Output {
features: transaction::COINBASE_OUTPUT,
commit: commit,
switch_commit_hash: switch_commit_hash,
proof: proof,
}
}

View file

@ -42,7 +42,7 @@ use config::GlobalConfig;
use wallet::WalletConfig;
use core::global;
use keychain::Keychain;
use util::{LOGGER, init_logger};
use util::{LoggingConfig, LOGGER, init_logger};
fn start_from_config_file(mut global_config: GlobalConfig) {
info!(
@ -107,6 +107,8 @@ fn main() {
.mining_parameter_mode
.unwrap(),
);
} else {
init_logger(Some(LoggingConfig::default()));
}
let args = App::new("Grin")

View file

@ -48,12 +48,12 @@ fn sumtree_append() {
})
);
let sum2 = HashSum::from_summable(1, &elems[0]) + HashSum::from_summable(2, &elems[1]);
let sum4 = sum2 + (HashSum::from_summable(4, &elems[2]) + HashSum::from_summable(5, &elems[3]));
let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>) + HashSum::from_summable(2, &elems[1], None::<TestElem>);
let sum4 = sum2 + (HashSum::from_summable(4, &elems[2], None::<TestElem>) + HashSum::from_summable(5, &elems[3], None::<TestElem>));
let sum8 = sum4 +
((HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])) +
(HashSum::from_summable(11, &elems[6]) + HashSum::from_summable(12, &elems[7])));
let sum9 = sum8 + HashSum::from_summable(16, &elems[8]);
((HashSum::from_summable(8, &elems[4], None::<TestElem>) + HashSum::from_summable(9, &elems[5], None::<TestElem>)) +
(HashSum::from_summable(11, &elems[6], None::<TestElem>) + HashSum::from_summable(12, &elems[7], None::<TestElem>)));
let sum9 = sum8 + HashSum::from_summable(16, &elems[8], None::<TestElem>);
{
let pmmr = PMMR::at(&mut backend, mmr_size);
@ -226,7 +226,7 @@ fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend<
let mut pmmr = PMMR::at(backend, pos);
for elem in elems {
pmmr.push(elem.clone()).unwrap();
pmmr.push(elem.clone(), None::<TestElem>).unwrap();
}
pmmr.unpruned_size()
}