Consolidate validation in Block and Transaction ()

* Consolidate validation in Block and Transaction

Introduce TransactionBody which is included into block and tx.
Fixes 
This commit is contained in:
hashmap 2018-08-15 23:14:48 +02:00 committed by GitHub
parent 37fa413329
commit 99a66c1960
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 570 additions and 476 deletions

View file

@ -134,7 +134,7 @@ impl OutputHandler {
// in the period between accepting the block and refreshing the wallet
if let Ok(block) = w(&self.chain).get_block(&header.hash()) {
let outputs = block
.outputs
.outputs()
.iter()
.filter(|output| commitments.is_empty() || commitments.contains(&output.commit))
.map(|output| {
@ -226,7 +226,8 @@ impl OutputHandler {
impl Handler for OutputHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
match req.uri()
match req
.uri()
.path()
.trim_right_matches("/")
.rsplit("/")
@ -362,7 +363,8 @@ impl Handler for TxHashSetHandler {
}
}
}
match req.uri()
match req
.uri()
.path()
.trim_right()
.trim_right_matches("/")
@ -418,7 +420,8 @@ pub struct PeerHandler {
impl Handler for PeerHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
if let Ok(addr) = req.uri()
if let Ok(addr) = req
.uri()
.path()
.trim_right_matches("/")
.rsplit("/")
@ -604,7 +607,8 @@ fn check_block_param(input: &String) -> Result<(), Error> {
impl Handler for BlockHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
let el = req.uri()
let el = req
.uri()
.path()
.trim_right_matches("/")
.rsplit("/")
@ -649,7 +653,8 @@ impl Handler for BlockHandler {
impl Handler for HeaderHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
let el = req.uri()
let el = req
.uri()
.path()
.trim_right_matches("/")
.rsplit("/")
@ -736,8 +741,8 @@ where
info!(
LOGGER,
"Pushing transaction with {} inputs and {} outputs to pool.",
tx.inputs.len(),
tx.outputs.len()
tx.inputs().len(),
tx.outputs().len()
);
// Push to tx pool.

View file

@ -292,7 +292,8 @@ impl OutputPrintable {
}
pub fn range_proof(&self) -> Result<pedersen::RangeProof, ser::Error> {
let proof_str = self.proof
let proof_str = self
.proof
.clone()
.ok_or_else(|| ser::Error::HexError(format!("output range_proof missing")))
.unwrap();
@ -531,12 +532,12 @@ impl BlockPrintable {
include_proof: bool,
) -> BlockPrintable {
let inputs = block
.inputs
.inputs()
.iter()
.map(|x| util::to_hex(x.commitment().0.to_vec()))
.collect();
let outputs = block
.outputs
.outputs()
.iter()
.map(|output| {
OutputPrintable::from_output(
@ -548,7 +549,7 @@ impl BlockPrintable {
})
.collect();
let kernels = block
.kernels
.kernels()
.iter()
.map(|kernel| TxKernelPrintable::from_txkernel(kernel))
.collect();
@ -581,11 +582,13 @@ impl CompactBlockPrintable {
chain: Arc<chain::Chain>,
) -> CompactBlockPrintable {
let block = chain.get_block(&cb.hash()).unwrap();
let out_full = cb.out_full
let out_full = cb
.out_full
.iter()
.map(|x| OutputPrintable::from_output(x, chain.clone(), Some(&block.header), false))
.collect();
let kern_full = cb.kern_full
let kern_full = cb
.kern_full
.iter()
.map(|x| TxKernelPrintable::from_txkernel(x))
.collect();

View file

@ -421,7 +421,7 @@ impl Chain {
let height = self.next_block_height()?;
let mut txhashset = self.txhashset.write().unwrap();
txhashset::extending_readonly(&mut txhashset, |extension| {
extension.verify_coinbase_maturity(&tx.inputs, height)?;
extension.verify_coinbase_maturity(&tx.inputs(), height)?;
Ok(())
})
}

View file

@ -66,9 +66,9 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
b.hash(),
b.header.height,
b.inputs.len(),
b.outputs.len(),
b.kernels.len(),
b.inputs().len(),
b.outputs().len(),
b.kernels().len(),
);
check_known(b.hash(), ctx)?;
@ -328,7 +328,7 @@ fn validate_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
fn validate_block_via_txhashset(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
// First check we are not attempting to spend any coinbase outputs
// before they have matured sufficiently.
ext.verify_coinbase_maturity(&b.inputs, b.header.height)?;
ext.verify_coinbase_maturity(&b.inputs(), b.header.height)?;
// apply the new block to the MMR trees and check the new root hashes
ext.apply_block(&b)?;

View file

@ -157,7 +157,7 @@ impl ChainStore {
pub fn build_block_input_bitmap(&self, block: &Block) -> Result<Bitmap, Error> {
let bitmap = block
.inputs
.inputs()
.iter()
.filter_map(|x| self.get_output_pos(&x.commitment()).ok())
.map(|x| x as u32)

View file

@ -474,27 +474,27 @@ impl<'a> Extension<'a> {
// Build bitmap of output pos spent (as inputs) by this tx for rewind.
let rewind_rm_pos = tx
.inputs
.inputs()
.iter()
.filter_map(|x| self.get_output_pos(&x.commitment()).ok())
.map(|x| x as u32)
.collect();
for ref output in &tx.outputs {
for ref output in tx.outputs() {
if let Err(e) = self.apply_output(output) {
self.rewind_raw_tx(output_pos, kernel_pos, &rewind_rm_pos)?;
return Err(e);
}
}
for ref input in &tx.inputs {
for ref input in tx.inputs() {
if let Err(e) = self.apply_input(input) {
self.rewind_raw_tx(output_pos, kernel_pos, &rewind_rm_pos)?;
return Err(e);
}
}
for ref kernel in &tx.kernels {
for ref kernel in tx.kernels() {
if let Err(e) = self.apply_kernel(kernel) {
self.rewind_raw_tx(output_pos, kernel_pos, &rewind_rm_pos)?;
return Err(e);
@ -591,15 +591,15 @@ impl<'a> Extension<'a> {
// A block is not valid if it has not been fully cut-through.
// So we can safely apply outputs first (we will not spend these in the same
// block).
for out in &b.outputs {
for out in b.outputs() {
self.apply_output(out)?;
}
for input in &b.inputs {
for input in b.inputs() {
self.apply_input(input)?;
}
for kernel in &b.kernels {
for kernel in b.kernels() {
self.apply_kernel(kernel)?;
}

View file

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate chrono;
extern crate grin_chain as chain;
extern crate grin_core as core;
extern crate grin_keychain as keychain;
@ -19,14 +20,13 @@ extern crate grin_store as store;
extern crate grin_util as util;
extern crate grin_wallet as wallet;
extern crate rand;
extern crate chrono;
use chrono::Duration;
use std::fs;
use std::sync::Arc;
use chrono::Duration;
use chain::Chain;
use chain::types::NoopAdapter;
use chain::Chain;
use core::core::hash::Hashed;
use core::core::target::Difficulty;
use core::core::{Block, BlockHeader, OutputFeatures, OutputIdentifier, Transaction};
@ -74,12 +74,7 @@ fn mine_empty_chain() {
global::min_sizeshift()
};
b.header.pow.cuckoo_sizeshift = sizeshift;
pow::pow_size(
&mut b.header,
difficulty,
global::proofsize(),
sizeshift,
).unwrap();
pow::pow_size(&mut b.header, difficulty, global::proofsize(), sizeshift).unwrap();
b.header.pow.cuckoo_sizeshift = sizeshift;
let bhash = b.hash();
@ -99,7 +94,7 @@ fn mine_empty_chain() {
let block = chain.get_block(&header.hash()).unwrap();
assert_eq!(block.header.height, n);
assert_eq!(block.hash(), bhash);
assert_eq!(block.outputs.len(), 1);
assert_eq!(block.outputs().len(), 1);
// now check the block height index
let header_by_height = chain.get_header_by_height(n).unwrap();
@ -248,7 +243,7 @@ fn spend_in_fork_and_compact() {
// so we can spend the coinbase later
let b = prepare_block(&kc, &fork_head, &chain, 2);
let block_hash = b.hash();
let out_id = OutputIdentifier::from_output(&b.outputs[0]);
let out_id = OutputIdentifier::from_output(&b.outputs()[0]);
assert!(out_id.features.contains(OutputFeatures::COINBASE_OUTPUT));
fork_head = b.header.clone();
chain
@ -269,10 +264,7 @@ fn spend_in_fork_and_compact() {
let tx1 = build::transaction(
vec![
build::coinbase_input(
consensus::REWARD,
kc.derive_key_id(2).unwrap(),
),
build::coinbase_input(consensus::REWARD, kc.derive_key_id(2).unwrap()),
build::output(consensus::REWARD - 20000, kc.derive_key_id(30).unwrap()),
build::with_fee(20000),
],
@ -321,12 +313,12 @@ fn spend_in_fork_and_compact() {
assert_eq!(head.hash(), prev_main.hash());
assert!(
chain
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0]))
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
.is_ok()
);
assert!(
chain
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0]))
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
.is_err()
);
@ -344,12 +336,12 @@ fn spend_in_fork_and_compact() {
assert_eq!(head.hash(), prev_fork.hash());
assert!(
chain
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0]))
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs()[0]))
.is_ok()
);
assert!(
chain
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0]))
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs()[0]))
.is_err()
);

View file

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate chrono;
extern crate env_logger;
extern crate grin_chain as chain;
extern crate grin_core as core;
@ -19,11 +20,10 @@ extern crate grin_keychain as keychain;
extern crate grin_store as store;
extern crate grin_wallet as wallet;
extern crate rand;
extern crate chrono;
use chrono::Duration;
use std::fs;
use std::sync::Arc;
use chrono::Duration;
use chain::types::NoopAdapter;
use chain::{Error, ErrorKind};
@ -78,8 +78,8 @@ fn test_coinbase_maturity() {
global::min_sizeshift(),
).unwrap();
assert_eq!(block.outputs.len(), 1);
let coinbase_output = block.outputs[0];
assert_eq!(block.outputs().len(), 1);
let coinbase_output = block.outputs()[0];
assert!(
coinbase_output
.features

View file

@ -21,14 +21,14 @@ use std::collections::HashSet;
use std::fmt;
use std::iter::FromIterator;
use consensus::{self, reward, VerifySortOrder, REWARD};
use consensus::{self, reward, REWARD};
use core::committed::{self, Committed};
use core::hash::{Hash, HashWriter, Hashed, ZERO_HASH};
use core::id::ShortIdentifiable;
use core::target::Difficulty;
use core::{
transaction, Commitment, Input, KernelFeatures, Output, OutputFeatures, Proof, ShortId,
Transaction, TxKernel,
Transaction, TransactionBody, TxKernel,
};
use global;
use keychain::{self, BlindingFactor};
@ -350,13 +350,8 @@ impl Readable for CompactBlock {
pub struct Block {
/// The header with metadata and commitments to the rest of the data
pub header: BlockHeader,
/// List of transaction inputs
pub inputs: Vec<Input>,
/// List of transaction outputs
pub outputs: Vec<Output>,
/// List of kernels with associated proofs (note these are offset from
/// tx_kernels)
pub kernels: Vec<TxKernel>,
/// The body - inputs/outputs/kernels
body: TransactionBody,
}
/// Implementation of Writeable for a block, defines how to write the block to a
@ -367,22 +362,7 @@ impl Writeable for Block {
self.header.write(writer)?;
if writer.serialization_mode() != ser::SerializationMode::Hash {
ser_multiwrite!(
writer,
[write_u64, self.inputs.len() as u64],
[write_u64, self.outputs.len() as u64],
[write_u64, self.kernels.len() as u64]
);
let mut inputs = self.inputs.clone();
let mut outputs = self.outputs.clone();
let mut kernels = self.kernels.clone();
// Consensus rule that everything is sorted in lexicographical order on the
// wire.
inputs.write_sorted(writer)?;
outputs.write_sorted(writer)?;
kernels.write_sorted(writer)?;
self.body.write(writer)?;
}
Ok(())
}
@ -394,25 +374,11 @@ impl Readable for Block {
fn read(reader: &mut Reader) -> Result<Block, ser::Error> {
let header = BlockHeader::read(reader)?;
let (input_len, output_len, kernel_len) =
ser_multiread!(reader, read_u64, read_u64, read_u64);
let inputs = read_and_verify_sorted(reader, input_len)?;
let outputs = read_and_verify_sorted(reader, output_len)?;
let kernels = read_and_verify_sorted(reader, kernel_len)?;
// TODO - we do not verify the input|output|kernel counts here.
// I think should call block.validate() as part of a call to read()
// but block.validate() as it stands currently requires the previous sums etc.
// So there is no easy way to do this in isolation.
// Maybe we need two variations of validate() where one handles the validation
// rules that *can* be done in isolation.
let body = TransactionBody::read(reader)?;
body.validate(true).map_err(|_| ser::Error::CorruptedData)?;
Ok(Block {
header: header,
inputs: inputs,
outputs: outputs,
kernels: kernels,
body: body,
})
}
}
@ -421,15 +387,15 @@ impl Readable for Block {
/// Pedersen commitment.
impl Committed for Block {
fn inputs_committed(&self) -> Vec<Commitment> {
self.inputs.iter().map(|x| x.commitment()).collect()
self.body.inputs_committed()
}
fn outputs_committed(&self) -> Vec<Commitment> {
self.outputs.iter().map(|x| x.commitment()).collect()
self.body.outputs_committed()
}
fn kernels_committed(&self) -> Vec<Commitment> {
self.kernels.iter().map(|x| x.excess()).collect()
self.body.kernels_committed()
}
}
@ -438,9 +404,7 @@ impl Default for Block {
fn default() -> Block {
Block {
header: Default::default(),
inputs: vec![],
outputs: vec![],
kernels: vec![],
body: Default::default(),
}
}
}
@ -488,9 +452,10 @@ impl Block {
// collect all the inputs, outputs and kernels from the txs
for tx in txs {
all_inputs.extend(tx.inputs);
all_outputs.extend(tx.outputs);
all_kernels.extend(tx.kernels);
let tb: TransactionBody = tx.into();
all_inputs.extend(tb.inputs);
all_outputs.extend(tb.outputs);
all_kernels.extend(tb.kernels);
}
// include the coinbase output(s) and kernel(s) from the compact_block
@ -512,9 +477,7 @@ impl Block {
// leave it to the caller to actually validate the block
Block {
header: cb.header,
inputs: all_inputs,
outputs: all_outputs,
kernels: all_kernels,
body: TransactionBody::new(all_inputs, all_outputs, all_kernels),
}.cut_through()
}
@ -524,6 +487,7 @@ impl Block {
let nonce = thread_rng().next_u64();
let mut out_full = self
.body
.outputs
.iter()
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
@ -533,7 +497,7 @@ impl Block {
let mut kern_full = vec![];
let mut kern_ids = vec![];
for k in &self.kernels {
for k in self.kernels() {
if k.features.contains(KernelFeatures::COINBASE_KERNEL) {
kern_full.push(k.clone());
} else {
@ -555,6 +519,14 @@ impl Block {
}
}
/// Build a new empty block from a specified header
pub fn with_header(header: BlockHeader) -> Block {
Block {
header: header,
..Default::default()
}
}
/// Builds a new block ready to mine from the header of the previous block,
/// a vector of transactions and the reward information. Checks
/// that all transactions are valid and calculates the Merkle tree.
@ -577,7 +549,7 @@ impl Block {
let zero_commit = secp_static::commit_to_zero_value();
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let mut excesses = map_vec!(agg_tx.kernels, |x| x.excess());
let mut excesses = map_vec!(agg_tx.kernels(), |x| x.excess());
excesses.push(prev.total_kernel_sum);
excesses.retain(|x| *x != zero_commit);
secp.commit_sum(excesses, vec![])?
@ -593,12 +565,40 @@ impl Block {
total_kernel_sum,
..Default::default()
},
inputs: agg_tx.inputs,
outputs: agg_tx.outputs,
kernels: agg_tx.kernels,
body: agg_tx.into(),
}.cut_through())
}
/// Get inputs
pub fn inputs(&self) -> &Vec<Input> {
&self.body.inputs
}
/// Get inputs mutable
pub fn inputs_mut(&mut self) -> &mut Vec<Input> {
&mut self.body.inputs
}
/// Get outputs
pub fn outputs(&self) -> &Vec<Output> {
&self.body.outputs
}
/// Get outputs mutable
pub fn outputs_mut(&mut self) -> &mut Vec<Output> {
&mut self.body.outputs
}
/// Get kernels
pub fn kernels(&self) -> &Vec<TxKernel> {
&self.body.kernels
}
/// Get kernels mut
pub fn kernels_mut(&mut self) -> &mut Vec<TxKernel> {
&mut self.body.kernels
}
/// Blockhash, computed using only the POW
pub fn hash(&self) -> Hash {
self.header.hash()
@ -606,7 +606,7 @@ impl Block {
/// Sum of all fees (inputs less outputs) in the block
pub fn total_fees(&self) -> u64 {
self.kernels.iter().map(|p| p.fee).sum()
self.body.kernels.iter().map(|p| p.fee).sum()
}
/// Matches any output with a potential spending input, eliminating them
@ -621,12 +621,14 @@ impl Block {
///
pub fn cut_through(self) -> Block {
let in_set = self
.body
.inputs
.iter()
.map(|inp| inp.commitment())
.collect::<HashSet<_>>();
let out_set = self
.body
.outputs
.iter()
.filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT))
@ -636,12 +638,14 @@ impl Block {
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
let new_inputs = self
.body
.inputs
.into_iter()
.filter(|inp| !to_cut_through.contains(&inp.commitment()))
.collect::<Vec<_>>();
let new_outputs = self
.body
.outputs
.into_iter()
.filter(|out| !to_cut_through.contains(&out.commitment()))
@ -653,9 +657,7 @@ impl Block {
total_difficulty: self.header.total_difficulty,
..self.header
},
inputs: new_inputs,
outputs: new_outputs,
kernels: self.kernels,
body: TransactionBody::new(new_inputs, new_outputs, self.body.kernels),
}
}
@ -667,17 +669,13 @@ impl Block {
prev_kernel_offset: &BlindingFactor,
prev_kernel_sum: &Commitment,
) -> Result<(Commitment), Error> {
// Verify we do not exceed the max number of inputs|outputs|kernels
// and that the "weight" based on these does not exceed the max permitted weight.
self.verify_weight()?;
self.body.validate(true)?;
self.verify_sorted()?;
self.verify_cut_through()?;
self.verify_coinbase()?;
self.verify_kernel_lock_heights()?;
self.verify_coinbase()?;
// take the kernel offset for this block (block offset minus previous) and
// verify outputs and kernel sums
// verify.body.outputs and kernel sums
let block_kernel_offset = if self.header.total_kernel_offset() == prev_kernel_offset.clone()
{
// special case when the sum hasn't changed (typically an empty block),
@ -698,85 +696,22 @@ impl Block {
return Err(Error::InvalidTotalKernelSum);
}
self.verify_rangeproofs()?;
self.verify_kernel_signatures()?;
Ok(kernel_sum)
}
// Verify the block is not too big in terms of number of inputs|outputs|kernels.
fn verify_weight(&self) -> Result<(), Error> {
let tx_block_weight = self.inputs.len() * consensus::BLOCK_INPUT_WEIGHT
+ self.outputs.len() * consensus::BLOCK_OUTPUT_WEIGHT
+ self.kernels.len() * consensus::BLOCK_KERNEL_WEIGHT;
if tx_block_weight > consensus::MAX_BLOCK_WEIGHT {
return Err(Error::TooHeavy);
}
Ok(())
}
// Verify that inputs|outputs|kernels are all sorted in lexicographical order.
fn verify_sorted(&self) -> Result<(), Error> {
self.inputs.verify_sort_order()?;
self.outputs.verify_sort_order()?;
self.kernels.verify_sort_order()?;
Ok(())
}
// Verify that no input is spending an output from the same block.
fn verify_cut_through(&self) -> Result<(), Error> {
for inp in &self.inputs {
if self
.outputs
.iter()
.any(|out| out.commitment() == inp.commitment())
{
return Err(Error::CutThrough);
}
}
Ok(())
}
fn verify_kernel_lock_heights(&self) -> Result<(), Error> {
for k in &self.kernels {
// check we have no kernels with lock_heights greater than current height
// no tx can be included in a block earlier than its lock_height
if k.lock_height > self.header.height {
return Err(Error::KernelLockHeight(k.lock_height));
}
}
Ok(())
}
/// Verify the kernel signatures.
/// Note: this is expensive.
fn verify_kernel_signatures(&self) -> Result<(), Error> {
for x in &self.kernels {
x.verify()?;
}
Ok(())
}
/// Verify all the output rangeproofs.
/// Note: this is expensive.
fn verify_rangeproofs(&self) -> Result<(), Error> {
for x in &self.outputs {
x.verify_proof()?;
}
Ok(())
}
/// Validate the coinbase outputs generated by miners.
/// Validate the coinbase.body.outputs generated by miners.
/// Check the sum of coinbase-marked outputs match
/// the sum of coinbase-marked kernels accounting for fees.
pub fn verify_coinbase(&self) -> Result<(), Error> {
let cb_outs = self
.body
.outputs
.iter()
.filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT))
.collect::<Vec<&Output>>();
let cb_kerns = self
.body
.kernels
.iter()
.filter(|kernel| kernel.features.contains(KernelFeatures::COINBASE_KERNEL))
@ -802,4 +737,15 @@ impl Block {
Ok(())
}
fn verify_kernel_lock_heights(&self) -> Result<(), Error> {
for k in &self.body.kernels {
// check we have no kernels with lock_heights greater than current height
// no tx can be included in a block earlier than its lock_height
if k.lock_height > self.header.height {
return Err(Error::KernelLockHeight(k.lock_height));
}
}
Ok(())
}
}

View file

@ -43,7 +43,7 @@ bitflags! {
}
}
/// Errors thrown by Block validation
/// Errors thrown by Transaction validation
#[derive(Clone, Eq, Debug, PartialEq)]
pub enum Error {
/// Underlying Secp256k1 error (signature validation or invalid public key
@ -240,35 +240,28 @@ impl PMMRable for TxKernel {
}
}
/// A transaction
/// TransactionBody is acommon abstraction for transaction and block
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Transaction {
pub struct TransactionBody {
/// List of inputs spent by the transaction.
pub inputs: Vec<Input>,
/// List of outputs the transaction produces.
pub outputs: Vec<Output>,
/// List of kernels that make up this transaction (usually a single kernel).
pub kernels: Vec<TxKernel>,
/// The kernel "offset" k2
/// excess is k1G after splitting the key k = k1 + k2
pub offset: BlindingFactor,
}
/// PartialEq
impl PartialEq for Transaction {
fn eq(&self, tx: &Transaction) -> bool {
self.inputs == tx.inputs
&& self.outputs == tx.outputs
&& self.kernels == tx.kernels
&& self.offset == tx.offset
impl PartialEq for TransactionBody {
fn eq(&self, l: &TransactionBody) -> bool {
self.inputs == l.inputs && self.outputs == l.outputs && self.kernels == l.kernels
}
}
/// Implementation of Writeable for a fully blinded transaction, defines how to
/// write the transaction as binary.
impl Writeable for Transaction {
/// Implementation of Writeable for a body, defines how to
/// write the body as binary.
impl Writeable for TransactionBody {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.offset.write(writer)?;
ser_multiwrite!(
writer,
[write_u64, self.inputs.len() as u64],
@ -290,12 +283,10 @@ impl Writeable for Transaction {
}
}
/// Implementation of Readable for a transaction, defines how to read a full
/// transaction from a binary stream.
impl Readable for Transaction {
fn read(reader: &mut Reader) -> Result<Transaction, ser::Error> {
let offset = BlindingFactor::read(reader)?;
/// Implementation of Readable for a body, defines how to read a
/// body from a binary stream.
impl Readable for TransactionBody {
fn read(reader: &mut Reader) -> Result<TransactionBody, ser::Error> {
let (input_len, output_len, kernel_len) =
ser_multiread!(reader, read_u64, read_u64, read_u64);
@ -303,24 +294,17 @@ impl Readable for Transaction {
let outputs = read_and_verify_sorted(reader, output_len)?;
let kernels = read_and_verify_sorted(reader, kernel_len)?;
let tx = Transaction {
offset,
let body = TransactionBody {
inputs,
outputs,
kernels,
};
// Now validate the tx.
// Treat any validation issues as data corruption.
// An example of this would be reading a tx
// that exceeded the allowed number of inputs.
tx.validate(false).map_err(|_| ser::Error::CorruptedData)?;
Ok(tx)
Ok(body)
}
}
impl Committed for Transaction {
impl Committed for TransactionBody {
fn inputs_committed(&self) -> Vec<Commitment> {
self.inputs.iter().map(|x| x.commitment()).collect()
}
@ -334,17 +318,16 @@ impl Committed for Transaction {
}
}
impl Default for Transaction {
fn default() -> Transaction {
Transaction::empty()
impl Default for TransactionBody {
fn default() -> TransactionBody {
TransactionBody::empty()
}
}
impl Transaction {
impl TransactionBody {
/// Creates a new empty transaction (no inputs or outputs, zero fee).
pub fn empty() -> Transaction {
Transaction {
offset: BlindingFactor::zero(),
pub fn empty() -> TransactionBody {
TransactionBody {
inputs: vec![],
outputs: vec![],
kernels: vec![],
@ -353,49 +336,43 @@ impl Transaction {
/// Creates a new transaction initialized with
/// the provided inputs, outputs, kernels
pub fn new(inputs: Vec<Input>, outputs: Vec<Output>, kernels: Vec<TxKernel>) -> Transaction {
Transaction {
offset: BlindingFactor::zero(),
pub fn new(
inputs: Vec<Input>,
outputs: Vec<Output>,
kernels: Vec<TxKernel>,
) -> TransactionBody {
TransactionBody {
inputs: inputs,
outputs: outputs,
kernels: kernels,
}
}
/// Creates a new transaction using this transaction as a template
/// and with the specified offset.
pub fn with_offset(self, offset: BlindingFactor) -> Transaction {
Transaction {
offset: offset,
..self
}
}
/// Builds a new transaction with the provided inputs added. Existing
/// Builds a new body with the provided inputs added. Existing
/// inputs, if any, are kept intact.
pub fn with_input(self, input: Input) -> Transaction {
pub fn with_input(self, input: Input) -> TransactionBody {
let mut new_ins = self.inputs;
new_ins.push(input);
new_ins.sort();
Transaction {
TransactionBody {
inputs: new_ins,
..self
}
}
/// Builds a new transaction with the provided output added. Existing
/// Builds a new TransactionBody with the provided output added. Existing
/// outputs, if any, are kept intact.
pub fn with_output(self, output: Output) -> Transaction {
pub fn with_output(self, output: Output) -> TransactionBody {
let mut new_outs = self.outputs;
new_outs.push(output);
new_outs.sort();
Transaction {
TransactionBody {
outputs: new_outs,
..self
}
}
/// Total fee for a transaction is the sum of fees of all kernels.
/// Total fee for a TransactionBody is the sum of fees of all kernels.
pub fn fee(&self) -> u64 {
self.kernels
.iter()
@ -406,7 +383,21 @@ impl Transaction {
self.fee() as i64
}
/// Lock height of a transaction is the max lock height of the kernels.
/// Calculate transaction weight
pub fn body_weight(&self) -> u32 {
TransactionBody::weight(self.inputs.len(), self.outputs.len())
}
/// Calculate transaction weight from transaction details
pub fn weight(input_len: usize, output_len: usize) -> u32 {
let mut body_weight = -1 * (input_len as i32) + (4 * output_len as i32) + 1;
if body_weight < 1 {
body_weight = 1;
}
body_weight as u32
}
/// Lock height of a body is the max lock height of the kernels.
pub fn lock_height(&self) -> u64 {
self.kernels
.iter()
@ -431,13 +422,14 @@ impl Transaction {
Ok(())
}
// Verify the tx is not too big in terms of number of inputs|outputs|kernels.
fn verify_weight(&self) -> Result<(), Error> {
// check the tx as if it was a block, with an additional output and
// Verify the body is not too big in terms of number of inputs|outputs|kernels.
fn verify_weight(&self, with_reward: bool) -> Result<(), Error> {
// if as_block check the body as if it was a block, with an additional output and
// kernel for reward
let reserve = if with_reward { 0 } else { 1 };
let tx_block_weight = self.inputs.len() * consensus::BLOCK_INPUT_WEIGHT
+ (self.outputs.len() + 1) * consensus::BLOCK_OUTPUT_WEIGHT
+ (self.kernels.len() + 1) * consensus::BLOCK_KERNEL_WEIGHT;
+ (self.outputs.len() + reserve) * consensus::BLOCK_OUTPUT_WEIGHT
+ (self.kernels.len() + reserve) * consensus::BLOCK_KERNEL_WEIGHT;
if tx_block_weight > consensus::MAX_BLOCK_WEIGHT {
return Err(Error::TooHeavy);
@ -445,36 +437,6 @@ impl Transaction {
Ok(())
}
/// Validates all relevant parts of a fully built transaction. Checks the
/// excess value against the signature as well as range proofs for each
/// output.
pub fn validate(&self, as_block: bool) -> Result<(), Error> {
if !as_block {
self.verify_features()?;
self.verify_weight()?;
self.verify_kernel_sums(self.overage(), self.offset)?;
}
self.verify_sorted()?;
self.verify_cut_through()?;
self.verify_rangeproofs()?;
self.verify_kernel_signatures()?;
Ok(())
}
/// Calculate transaction weight
pub fn tx_weight(&self) -> u32 {
Transaction::weight(self.inputs.len(), self.outputs.len())
}
/// Calculate transaction weight from transaction details
pub fn weight(input_len: usize, output_len: usize) -> u32 {
let mut tx_weight = -1 * (input_len as i32) + (4 * output_len as i32) + 1;
if tx_weight < 1 {
tx_weight = 1;
}
tx_weight as u32
}
// Verify that inputs|outputs|kernels are all sorted in lexicographical order.
fn verify_sorted(&self) -> Result<(), Error> {
self.inputs.verify_sort_order()?;
@ -497,10 +459,10 @@ impl Transaction {
Ok(())
}
// Verify we have no invalid outputs or kernels in the transaction
// due to invalid features.
// Specifically, a transaction cannot contain a coinbase output or a coinbase kernel.
fn verify_features(&self) -> Result<(), Error> {
/// Verify we have no invalid outputs or kernels in the transaction
/// due to invalid features.
/// Specifically, a transaction cannot contain a coinbase output or a coinbase kernel.
pub fn verify_features(&self) -> Result<(), Error> {
self.verify_output_features()?;
self.verify_kernel_features()?;
Ok(())
@ -529,6 +491,202 @@ impl Transaction {
}
Ok(())
}
/// Validates all relevant parts of a transaction body. Checks the
/// excess value against the signature as well as range proofs for each
/// output.
pub fn validate(&self, with_reward: bool) -> Result<(), Error> {
self.verify_weight(with_reward)?;
self.verify_sorted()?;
self.verify_cut_through()?;
self.verify_rangeproofs()?;
self.verify_kernel_signatures()?;
Ok(())
}
}
/// A transaction
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Transaction {
/// The kernel "offset" k2
/// excess is k1G after splitting the key k = k1 + k2
pub offset: BlindingFactor,
/// The transaction body - inputs/outputs/kernels
body: TransactionBody,
}
/// PartialEq
impl PartialEq for Transaction {
fn eq(&self, tx: &Transaction) -> bool {
self.body == tx.body && self.offset == tx.offset
}
}
impl Into<TransactionBody> for Transaction {
fn into(self) -> TransactionBody {
self.body
}
}
/// Implementation of Writeable for a fully blinded transaction, defines how to
/// write the transaction as binary.
impl Writeable for Transaction {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.offset.write(writer)?;
self.body.write(writer)?;
Ok(())
}
}
/// Implementation of Readable for a transaction, defines how to read a full
/// transaction from a binary stream.
impl Readable for Transaction {
fn read(reader: &mut Reader) -> Result<Transaction, ser::Error> {
let offset = BlindingFactor::read(reader)?;
let body = TransactionBody::read(reader)?;
let tx = Transaction { offset, body };
// Now validate the tx.
// Treat any validation issues as data corruption.
// An example of this would be reading a tx
// that exceeded the allowed number of inputs.
tx.validate(false).map_err(|_| ser::Error::CorruptedData)?;
Ok(tx)
}
}
impl Committed for Transaction {
fn inputs_committed(&self) -> Vec<Commitment> {
self.body.inputs_committed()
}
fn outputs_committed(&self) -> Vec<Commitment> {
self.body.outputs_committed()
}
fn kernels_committed(&self) -> Vec<Commitment> {
self.body.kernels_committed()
}
}
impl Default for Transaction {
fn default() -> Transaction {
Transaction::empty()
}
}
impl Transaction {
/// Creates a new empty transaction (no inputs or outputs, zero fee).
pub fn empty() -> Transaction {
Transaction {
offset: BlindingFactor::zero(),
body: Default::default(),
}
}
/// Creates a new transaction initialized with
/// the provided inputs, outputs, kernels
pub fn new(inputs: Vec<Input>, outputs: Vec<Output>, kernels: Vec<TxKernel>) -> Transaction {
Transaction {
offset: BlindingFactor::zero(),
body: TransactionBody::new(inputs, outputs, kernels),
}
}
/// Creates a new transaction using this transaction as a template
/// and with the specified offset.
pub fn with_offset(self, offset: BlindingFactor) -> Transaction {
Transaction {
offset: offset,
..self
}
}
/// Builds a new transaction with the provided inputs added. Existing
/// inputs, if any, are kept intact.
pub fn with_input(self, input: Input) -> Transaction {
Transaction {
body: self.body.with_input(input),
..self
}
}
/// Builds a new transaction with the provided output added. Existing
/// outputs, if any, are kept intact.
pub fn with_output(self, output: Output) -> Transaction {
Transaction {
body: self.body.with_output(output),
..self
}
}
/// Get inputs
pub fn inputs(&self) -> &Vec<Input> {
&self.body.inputs
}
/// Get inputs mutable
pub fn inputs_mut(&mut self) -> &mut Vec<Input> {
&mut self.body.inputs
}
/// Get outputs
pub fn outputs(&self) -> &Vec<Output> {
&self.body.outputs
}
/// Get outputs mutable
pub fn outputs_mut(&mut self) -> &mut Vec<Output> {
&mut self.body.outputs
}
/// Get kernels
pub fn kernels(&self) -> &Vec<TxKernel> {
&self.body.kernels
}
/// Get kernels mut
pub fn kernels_mut(&mut self) -> &mut Vec<TxKernel> {
&mut self.body.kernels
}
/// Total fee for a transaction is the sum of fees of all kernels.
pub fn fee(&self) -> u64 {
self.body.fee()
}
fn overage(&self) -> i64 {
self.body.overage()
}
/// Lock height of a transaction is the max lock height of the kernels.
pub fn lock_height(&self) -> u64 {
self.body.lock_height()
}
/// Validates all relevant parts of a fully built transaction. Checks the
/// excess value against the signature as well as range proofs for each
/// output.
pub fn validate(&self, with_reward: bool) -> Result<(), Error> {
self.body.validate(with_reward)?;
if !with_reward {
self.body.verify_features()?;
self.verify_kernel_sums(self.overage(), self.offset)?;
}
Ok(())
}
/// Calculate transaction weight
pub fn tx_weight(&self) -> u32 {
self.body.body_weight()
}
/// Calculate transaction weight from transaction details
pub fn weight(input_len: usize, output_len: usize) -> u32 {
TransactionBody::weight(input_len, output_len)
}
}
/// Aggregate a vec of transactions into a multi-kernel transaction with
@ -550,11 +708,11 @@ pub fn aggregate(
// we will sum these later to give a single aggregate offset
kernel_offsets.push(transaction.offset);
inputs.append(&mut transaction.inputs);
outputs.append(&mut transaction.outputs);
kernels.append(&mut transaction.kernels);
inputs.append(&mut transaction.body.inputs);
outputs.append(&mut transaction.body.outputs);
kernels.append(&mut transaction.body.kernels);
}
let as_block = reward.is_some();
let with_reward = reward.is_some();
if let Some((out, kernel)) = reward {
outputs.push(out);
kernels.push(kernel);
@ -604,7 +762,7 @@ pub fn aggregate(
// The resulting tx could be invalid for a variety of reasons -
// * tx too large (too many inputs|outputs|kernels)
// * cut-through may have invalidated the sums
tx.validate(as_block)?;
tx.validate(with_reward)?;
Ok(tx)
}
@ -622,18 +780,18 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
let tx = aggregate(txs, None)?;
for mk_input in mk_tx.inputs {
if !tx.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
for mk_input in mk_tx.body.inputs {
if !tx.body.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
inputs.push(mk_input);
}
}
for mk_output in mk_tx.outputs {
if !tx.outputs.contains(&mk_output) && !outputs.contains(&mk_output) {
for mk_output in mk_tx.body.outputs {
if !tx.body.outputs.contains(&mk_output) && !outputs.contains(&mk_output) {
outputs.push(mk_output);
}
}
for mk_kernel in mk_tx.kernels {
if !tx.kernels.contains(&mk_kernel) && !kernels.contains(&mk_kernel) {
for mk_kernel in mk_tx.body.kernels {
if !tx.body.kernels.contains(&mk_kernel) && !kernels.contains(&mk_kernel) {
kernels.push(mk_kernel);
}
}

View file

@ -25,109 +25,83 @@ use global;
/// is small enough to mine it on the fly, so it does not contain its own
/// proof of work solution. Can also be easily mutated for different tests.
pub fn genesis_dev() -> core::Block {
core::Block {
header: core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(1997, 8, 4).and_hms(0, 0, 0),
nonce: global::get_genesis_nonce(),
..Default::default()
},
inputs: vec![],
outputs: vec![],
kernels: vec![],
}
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(1997, 8, 4).and_hms(0, 0, 0),
nonce: global::get_genesis_nonce(),
..Default::default()
})
}
/// First testnet genesis block, still subject to change (especially the date,
/// will hopefully come before Christmas).
pub fn genesis_testnet1() -> core::Block {
core::Block {
header: core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2017, 11, 16).and_hms(20, 0, 0),
nonce: 28205,
pow: core::Proof::new(vec![
0x21e, 0x7a2, 0xeae, 0x144e, 0x1b1c, 0x1fbd, 0x203a, 0x214b, 0x293b, 0x2b74,
0x2bfa, 0x2c26, 0x32bb, 0x346a, 0x34c7, 0x37c5, 0x4164, 0x42cc, 0x4cc3, 0x55af,
0x5a70, 0x5b14, 0x5e1c, 0x5f76, 0x6061, 0x60f9, 0x61d7, 0x6318, 0x63a1, 0x63fb,
0x649b, 0x64e5, 0x65a1, 0x6b69, 0x70f8, 0x71c7, 0x71cd, 0x7492, 0x7b11, 0x7db8,
0x7f29, 0x7ff8,
]),
..Default::default()
},
inputs: vec![],
outputs: vec![],
kernels: vec![],
}
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2017, 11, 16).and_hms(20, 0, 0),
nonce: 28205,
pow: core::Proof::new(vec![
0x21e, 0x7a2, 0xeae, 0x144e, 0x1b1c, 0x1fbd, 0x203a, 0x214b, 0x293b, 0x2b74, 0x2bfa,
0x2c26, 0x32bb, 0x346a, 0x34c7, 0x37c5, 0x4164, 0x42cc, 0x4cc3, 0x55af, 0x5a70, 0x5b14,
0x5e1c, 0x5f76, 0x6061, 0x60f9, 0x61d7, 0x6318, 0x63a1, 0x63fb, 0x649b, 0x64e5, 0x65a1,
0x6b69, 0x70f8, 0x71c7, 0x71cd, 0x7492, 0x7b11, 0x7db8, 0x7f29, 0x7ff8,
]),
..Default::default()
})
}
/// Second testnet genesis block (cuckoo30).
pub fn genesis_testnet2() -> core::Block {
core::Block {
header: core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 3, 26).and_hms(16, 0, 0),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
nonce: 1060,
pow: core::Proof::new(vec![
0x1940730, 0x333b9d0, 0x4739d6f, 0x4c6cfb1, 0x6e3d6c3, 0x74408a3, 0x7ba2bd2,
0x83e2024, 0x8ca22b5, 0x9d39ab8, 0xb6646dd, 0xc6698b6, 0xc6f78fe, 0xc99b662,
0xcf2ae8c, 0xcf41eed, 0xdd073e6, 0xded6af8, 0xf08d1a5, 0x1156a144, 0x11d1160a,
0x131bb0a5, 0x137ad703, 0x13b0831f, 0x1421683f, 0x147e3c1f, 0x1496fda0, 0x150ba22b,
0x15cc5bc6, 0x16edf697, 0x17ced40c, 0x17d84f9e, 0x18a515c1, 0x19320d9c, 0x19da4f6d,
0x1b50bcb1, 0x1b8bc72f, 0x1c7b6964, 0x1d07b3a9, 0x1d189d4d, 0x1d1f9a15, 0x1dafcd41,
]),
..Default::default()
},
inputs: vec![],
outputs: vec![],
kernels: vec![],
}
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 3, 26).and_hms(16, 0, 0),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
nonce: 1060,
pow: core::Proof::new(vec![
0x1940730, 0x333b9d0, 0x4739d6f, 0x4c6cfb1, 0x6e3d6c3, 0x74408a3, 0x7ba2bd2, 0x83e2024,
0x8ca22b5, 0x9d39ab8, 0xb6646dd, 0xc6698b6, 0xc6f78fe, 0xc99b662, 0xcf2ae8c, 0xcf41eed,
0xdd073e6, 0xded6af8, 0xf08d1a5, 0x1156a144, 0x11d1160a, 0x131bb0a5, 0x137ad703,
0x13b0831f, 0x1421683f, 0x147e3c1f, 0x1496fda0, 0x150ba22b, 0x15cc5bc6, 0x16edf697,
0x17ced40c, 0x17d84f9e, 0x18a515c1, 0x19320d9c, 0x19da4f6d, 0x1b50bcb1, 0x1b8bc72f,
0x1c7b6964, 0x1d07b3a9, 0x1d189d4d, 0x1d1f9a15, 0x1dafcd41,
]),
..Default::default()
})
}
/// Second testnet genesis block (cuckoo30). Temporary values for now.
pub fn genesis_testnet3() -> core::Block {
core::Block {
header: core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 7, 8).and_hms(18, 0, 0),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
nonce: 4956988373127691,
pow: core::Proof::new(vec![
0xa420dc, 0xc8ffee, 0x10e433e, 0x1de9428, 0x2ed4cea, 0x52d907b, 0x5af0e3f,
0x6b8fcae, 0x8319b53, 0x845ca8c, 0x8d2a13e, 0x8d6e4cc, 0x9349e8d, 0xa7a33c5,
0xaeac3cb, 0xb193e23, 0xb502e19, 0xb5d9804, 0xc9ac184, 0xd4f4de3, 0xd7a23b8,
0xf1d8660, 0xf443756, 0x10b833d2, 0x11418fc5, 0x11b8aeaf, 0x131836ec, 0x132ab818,
0x13a46a55, 0x13df89fe, 0x145d65b5, 0x166f9c3a, 0x166fe0ef, 0x178cb36f, 0x185baf68,
0x1bbfe563, 0x1bd637b4, 0x1cfc8382, 0x1d1ed012, 0x1e391ca5, 0x1e999b4c, 0x1f7c6d21,
]),
..Default::default()
},
inputs: vec![],
outputs: vec![],
kernels: vec![],
}
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 7, 8).and_hms(18, 0, 0),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
nonce: 4956988373127691,
pow: core::Proof::new(vec![
0xa420dc, 0xc8ffee, 0x10e433e, 0x1de9428, 0x2ed4cea, 0x52d907b, 0x5af0e3f, 0x6b8fcae,
0x8319b53, 0x845ca8c, 0x8d2a13e, 0x8d6e4cc, 0x9349e8d, 0xa7a33c5, 0xaeac3cb, 0xb193e23,
0xb502e19, 0xb5d9804, 0xc9ac184, 0xd4f4de3, 0xd7a23b8, 0xf1d8660, 0xf443756,
0x10b833d2, 0x11418fc5, 0x11b8aeaf, 0x131836ec, 0x132ab818, 0x13a46a55, 0x13df89fe,
0x145d65b5, 0x166f9c3a, 0x166fe0ef, 0x178cb36f, 0x185baf68, 0x1bbfe563, 0x1bd637b4,
0x1cfc8382, 0x1d1ed012, 0x1e391ca5, 0x1e999b4c, 0x1f7c6d21,
]),
..Default::default()
})
}
/// Placeholder for mainnet genesis block, will definitely change before
/// release so no use trying to pre-mine it.
pub fn genesis_main() -> core::Block {
core::Block {
header: core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 8, 14).and_hms(0, 0, 0),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
nonce: global::get_genesis_nonce(),
pow: core::Proof::zero(consensus::PROOFSIZE),
..Default::default()
},
inputs: vec![],
outputs: vec![],
kernels: vec![],
}
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 8, 14).and_hms(0, 0, 0),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
nonce: global::get_genesis_nonce(),
pow: core::Proof::zero(consensus::PROOFSIZE),
..Default::default()
})
}

View file

@ -12,25 +12,25 @@
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate chrono;
extern crate grin_core;
extern crate grin_keychain as keychain;
extern crate grin_util as util;
extern crate grin_wallet as wallet;
extern crate chrono;
pub mod common;
use chrono::Duration;
use common::{new_block, tx1i2o, tx2i1o, txspend1i1o};
use grin_core::consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT};
use grin_core::core::Committed;
use grin_core::core::block::Error;
use grin_core::core::hash::Hashed;
use grin_core::core::id::{ShortId, ShortIdentifiable};
use grin_core::core::Committed;
use grin_core::core::{Block, BlockHeader, CompactBlock, KernelFeatures, OutputFeatures};
use grin_core::{global, ser};
use keychain::{BlindingFactor, ExtKeychain, Keychain};
use std::time::Instant;
use chrono::Duration;
use util::{secp, secp_static};
use wallet::libtx::build::{self, input, output, with_fee};
@ -68,12 +68,7 @@ fn too_large_block() {
// block with no inputs/outputs/kernels
// no fees, no reward, no coinbase
fn very_empty_block() {
let b = Block {
header: BlockHeader::default(),
inputs: vec![],
outputs: vec![],
kernels: vec![],
};
let b = Block::with_header(BlockHeader::default());
assert_eq!(
b.verify_coinbase(),
@ -113,8 +108,8 @@ fn block_with_cut_through() {
// output) and should still be valid
println!("3");
b.validate(&BlindingFactor::zero(), &zero_commit).unwrap();
assert_eq!(b.inputs.len(), 3);
assert_eq!(b.outputs.len(), 3);
assert_eq!(b.inputs().len(), 3);
assert_eq!(b.outputs().len(), 3);
println!("4");
}
@ -126,18 +121,20 @@ fn empty_block_with_coinbase_is_valid() {
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(vec![], &keychain, &prev, &key_id);
assert_eq!(b.inputs.len(), 0);
assert_eq!(b.outputs.len(), 1);
assert_eq!(b.kernels.len(), 1);
assert_eq!(b.inputs().len(), 0);
assert_eq!(b.outputs().len(), 1);
assert_eq!(b.kernels().len(), 1);
let coinbase_outputs = b.outputs
let coinbase_outputs = b
.outputs()
.iter()
.filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT))
.map(|o| o.clone())
.collect::<Vec<_>>();
assert_eq!(coinbase_outputs.len(), 1);
let coinbase_kernels = b.kernels
let coinbase_kernels = b
.kernels()
.iter()
.filter(|out| out.features.contains(KernelFeatures::COINBASE_KERNEL))
.map(|o| o.clone())
@ -161,11 +158,11 @@ fn remove_coinbase_output_flag() {
let mut b = new_block(vec![], &keychain, &prev, &key_id);
assert!(
b.outputs[0]
b.outputs()[0]
.features
.contains(OutputFeatures::COINBASE_OUTPUT)
);
b.outputs[0]
b.outputs_mut()[0]
.features
.remove(OutputFeatures::COINBASE_OUTPUT);
@ -191,11 +188,11 @@ fn remove_coinbase_kernel_flag() {
let mut b = new_block(vec![], &keychain, &prev, &key_id);
assert!(
b.kernels[0]
b.kernels()[0]
.features
.contains(KernelFeatures::COINBASE_KERNEL)
);
b.kernels[0]
b.kernels_mut()[0]
.features
.remove(KernelFeatures::COINBASE_KERNEL);
@ -224,12 +221,13 @@ fn serialize_deserialize_block() {
// After header serialization, timestamp will lose 'nanos' info, that's the designed behavior.
// To suppress 'nanos' difference caused assertion fail, we force b.header also lose 'nanos'.
let origin_ts = b.header.timestamp;
b.header.timestamp = origin_ts - Duration::nanoseconds(origin_ts.timestamp_subsec_nanos() as i64);
b.header.timestamp =
origin_ts - Duration::nanoseconds(origin_ts.timestamp_subsec_nanos() as i64);
assert_eq!(b.header, b2.header);
assert_eq!(b.inputs, b2.inputs);
assert_eq!(b.outputs, b2.outputs);
assert_eq!(b.kernels, b2.kernels);
assert_eq!(b.inputs(), b2.inputs());
assert_eq!(b.outputs(), b2.outputs());
assert_eq!(b.kernels(), b2.kernels());
}
#[test]
@ -341,11 +339,11 @@ fn compact_block_hash_with_nonce() {
// correctly in both of the compact_blocks
assert_eq!(
cb1.kern_ids[0],
tx.kernels[0].short_id(&cb1.hash(), cb1.nonce)
tx.kernels()[0].short_id(&cb1.hash(), cb1.nonce)
);
assert_eq!(
cb2.kern_ids[0],
tx.kernels[0].short_id(&cb2.hash(), cb2.nonce)
tx.kernels()[0].short_id(&cb2.hash(), cb2.nonce)
);
}
@ -364,7 +362,7 @@ fn convert_block_to_compact_block() {
assert_eq!(
cb.kern_ids[0],
b.kernels
b.kernels()
.iter()
.find(|x| !x.features.contains(KernelFeatures::COINBASE_KERNEL))
.unwrap()
@ -381,8 +379,8 @@ fn hydrate_empty_compact_block() {
let cb = b.as_compact_block();
let hb = Block::hydrate_from(cb, vec![]);
assert_eq!(hb.header, b.header);
assert_eq!(hb.outputs, b.outputs);
assert_eq!(hb.kernels, b.kernels);
assert_eq!(hb.outputs(), b.outputs());
assert_eq!(hb.kernels(), b.kernels());
}
#[test]

View file

@ -28,8 +28,9 @@ use grin_core::core::{aggregate, deaggregate, KernelFeatures, Output, Transactio
use grin_core::ser;
use keychain::{BlindingFactor, ExtKeychain, Keychain};
use util::{secp_static, static_secp_instance};
use wallet::libtx::build::{self, initial_tx, input, output, with_excess, with_fee,
with_lock_height};
use wallet::libtx::build::{
self, initial_tx, input, output, with_excess, with_fee, with_lock_height,
};
#[test]
fn simple_tx_ser() {
@ -47,8 +48,8 @@ fn simple_tx_ser_deser() {
ser::serialize(&mut vec, &tx).expect("serialization failed");
let dtx: Transaction = ser::deserialize(&mut &vec[..]).unwrap();
assert_eq!(dtx.fee(), 2);
assert_eq!(dtx.inputs.len(), 2);
assert_eq!(dtx.outputs.len(), 1);
assert_eq!(dtx.inputs().len(), 2);
assert_eq!(dtx.outputs().len(), 1);
assert_eq!(tx.hash(), dtx.hash());
}
@ -108,8 +109,8 @@ fn build_tx_kernel() {
tx.validate(false).unwrap();
// check the kernel is also itself valid
assert_eq!(tx.kernels.len(), 1);
let kern = &tx.kernels[0];
assert_eq!(tx.kernels().len(), 1);
let kern = &tx.kernels()[0];
kern.verify().unwrap();
assert_eq!(kern.features, KernelFeatures::DEFAULT_KERNEL);
@ -145,7 +146,10 @@ fn multi_kernel_transaction_deaggregation() {
assert!(tx3.validate(false).is_ok());
assert!(tx4.validate(false).is_ok());
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()], None).unwrap();
let tx1234 = aggregate(
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
None,
).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], None).unwrap();
@ -220,13 +224,16 @@ fn multi_kernel_transaction_deaggregation_4() {
assert!(tx4.validate(false).is_ok());
assert!(tx5.validate(false).is_ok());
let tx12345 = aggregate(vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
], None).unwrap();
let tx12345 = aggregate(
vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
],
None,
).unwrap();
assert!(tx12345.validate(false).is_ok());
let deaggregated_tx5 = deaggregate(
@ -251,13 +258,16 @@ fn multi_kernel_transaction_deaggregation_5() {
assert!(tx4.validate(false).is_ok());
assert!(tx5.validate(false).is_ok());
let tx12345 = aggregate(vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
], None).unwrap();
let tx12345 = aggregate(
vec![
tx1.clone(),
tx2.clone(),
tx3.clone(),
tx4.clone(),
tx5.clone(),
],
None,
).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()], None).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()], None).unwrap();
@ -309,9 +319,9 @@ fn hash_output() {
],
&keychain,
).unwrap();
let h = tx.outputs[0].hash();
let h = tx.outputs()[0].hash();
assert!(h != ZERO_HASH);
let h2 = tx.outputs[1].hash();
let h2 = tx.outputs()[1].hash();
assert!(h != h2);
}
@ -325,7 +335,7 @@ fn blind_tx() {
// with a bullet proof causes painful errors
// checks that the range proof on our blind output is sufficiently hiding
let Output { proof, .. } = btx.outputs[0];
let Output { proof, .. } = btx.outputs()[0];
let secp = static_secp_instance();
let secp = secp.lock().unwrap();

View file

@ -53,7 +53,7 @@ where
let mut txs = vec![];
for x in &self.entries {
for kernel in &x.tx.kernels {
for kernel in x.tx.kernels() {
// rehash each kernel to calculate the block specific short_id
let short_id = kernel.short_id(&cb.hash(), cb.nonce);
@ -97,7 +97,8 @@ where
to_state: PoolEntryState,
extra_tx: Option<Transaction>,
) -> Result<Vec<Transaction>, PoolError> {
let entries = &mut self.entries
let entries = &mut self
.entries
.iter_mut()
.filter(|x| x.state == from_state)
.collect::<Vec<_>>();
@ -189,8 +190,8 @@ where
fn remaining_transactions(&self, block: &Block) -> Vec<Transaction> {
self.entries
.iter()
.filter(|x| !x.tx.kernels.iter().any(|y| block.kernels.contains(y)))
.filter(|x| !x.tx.inputs.iter().any(|y| block.inputs.contains(y)))
.filter(|x| !x.tx.kernels().iter().any(|y| block.kernels().contains(y)))
.filter(|x| !x.tx.inputs().iter().any(|y| block.inputs().contains(y)))
.map(|x| x.tx.clone())
.collect()
}
@ -205,7 +206,7 @@ where
// Check each transaction in the pool
for entry in &self.entries {
let entry_kernel_set = entry.tx.kernels.iter().cloned().collect::<HashSet<_>>();
let entry_kernel_set = entry.tx.kernels().iter().cloned().collect::<HashSet<_>>();
if entry_kernel_set.is_subset(&kernel_set) {
found_txs.push(entry.tx.clone());
}

View file

@ -17,8 +17,8 @@
//! resulting tx pool can be added to the current chain state to produce a
//! valid chain state.
use chrono::prelude::Utc;
use std::sync::Arc;
use chrono::prelude::{Utc};
use core::core::hash::Hashed;
use core::core::{transaction, Block, CompactBlock, Transaction};
@ -69,9 +69,10 @@ where
fn add_to_txpool(&mut self, mut entry: PoolEntry) -> Result<(), PoolError> {
// First deaggregate the tx based on current txpool txs.
if entry.tx.kernels.len() > 1 {
let txs = self.txpool
.find_matching_transactions(entry.tx.kernels.clone());
if entry.tx.kernels().len() > 1 {
let txs = self
.txpool
.find_matching_transactions(entry.tx.kernels().clone());
if !txs.is_empty() {
entry.tx = transaction::deaggregate(entry.tx, txs)?;
entry.src.debug_name = "deagg".to_string();
@ -100,7 +101,7 @@ where
LOGGER,
"pool: add_to_pool: {:?}, kernels - {}, stem? {}",
tx.hash(),
tx.kernels.len(),
tx.kernels().len(),
stem,
);

View file

@ -20,8 +20,8 @@ extern crate grin_pool as pool;
extern crate grin_util as util;
extern crate grin_wallet as wallet;
extern crate rand;
extern crate chrono;
extern crate rand;
pub mod common;
@ -29,8 +29,10 @@ use std::sync::{Arc, RwLock};
use chain::types::Tip;
use chain::{txhashset, ChainStore};
use common::{clean_output_dir, test_setup, test_source, test_transaction,
test_transaction_spending_coinbase, ChainAdapter};
use common::{
clean_output_dir, test_setup, test_source, test_transaction,
test_transaction_spending_coinbase, ChainAdapter,
};
use core::core::target::Difficulty;
use core::core::{transaction, Block, BlockHeader};
use keychain::{ExtKeychain, Keychain};
@ -171,7 +173,7 @@ fn test_the_transaction_pool() {
.aggregate_transaction()
.unwrap()
.unwrap();
assert_eq!(agg_tx.kernels.len(), 2);
assert_eq!(agg_tx.kernels().len(), 2);
write_pool
.add_to_pool(test_source(), agg_tx, false)
.unwrap();
@ -194,7 +196,7 @@ fn test_the_transaction_pool() {
.unwrap();
assert_eq!(write_pool.total_size(), 3);
let entry = write_pool.txpool.entries.last().unwrap();
assert_eq!(entry.tx.kernels.len(), 1);
assert_eq!(entry.tx.kernels().len(), 1);
assert_eq!(entry.src.debug_name, "deagg");
}

View file

@ -15,12 +15,12 @@
//! Build a block to mine: gathers transactions from the pool, assembles
//! them into a block and returns it.
use chrono::prelude::{DateTime, NaiveDateTime, Utc};
use itertools::Itertools;
use rand::{self, Rng};
use std::sync::{Arc, RwLock};
use std::thread;
use std::time::Duration;
use chrono::prelude::{DateTime, NaiveDateTime, Utc};
use chain;
use common::adapters::PoolToChainAdapter;
@ -178,8 +178,8 @@ fn build_block(
debug!(
LOGGER,
"Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}",
b.inputs.len(),
b.outputs.len(),
b.inputs().len(),
b.outputs().len(),
b_difficulty,
b.header.clone().total_difficulty.to_num(),
);

View file

@ -170,8 +170,8 @@ pub fn initial_tx<K>(mut tx: Transaction) -> Box<Append<K>>
where
K: Keychain,
{
assert_eq!(tx.kernels.len(), 1);
let kern = tx.kernels.remove(0);
assert_eq!(tx.kernels().len(), 1);
let kern = tx.kernels_mut().remove(0);
Box::new(
move |_build, (_, _, sum)| -> (Transaction, TxKernel, BlindSum) {
(tx.clone(), kern.clone(), sum)
@ -204,8 +204,8 @@ where
let blind_sum = ctx.keychain.blind_sum(&sum)?;
// we only support building a tx with a single kernel via build::transaction()
assert!(tx.kernels.is_empty());
tx.kernels.push(kern);
assert!(tx.kernels().is_empty());
tx.kernels_mut().push(kern);
Ok((tx, blind_sum))
}
@ -219,16 +219,16 @@ where
K: Keychain,
{
let (mut tx, blind_sum) = partial_transaction(elems, keychain)?;
assert_eq!(tx.kernels.len(), 1);
assert_eq!(tx.kernels().len(), 1);
let mut kern = tx.kernels.remove(0);
let mut kern = tx.kernels_mut().remove(0);
let msg = secp::Message::from_slice(&kernel_sig_msg(kern.fee, kern.lock_height))?;
let skey = blind_sum.secret_key(&keychain.secp())?;
kern.excess = keychain.secp().commit(0, skey)?;
kern.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &blind_sum).unwrap();
tx.kernels.push(kern);
tx.kernels_mut().push(kern);
Ok(tx)
}
@ -264,8 +264,8 @@ where
// commitments will sum correctly when including the offset
tx.offset = k2.clone();
assert!(tx.kernels.is_empty());
tx.kernels.push(kern);
assert!(tx.kernels().is_empty());
tx.kernels_mut().push(kern);
Ok(tx)
}

View file

@ -113,7 +113,7 @@ impl Slate {
K: Keychain,
{
// Append to the exiting transaction
if self.tx.kernels.len() != 0 {
if self.tx.kernels().len() != 0 {
elems.insert(0, build::initial_tx(self.tx.clone()));
}
let (tx, blind) = build::partial_transaction(elems, keychain)?;
@ -179,7 +179,8 @@ impl Slate {
/// Return the sum of public nonces
fn pub_nonce_sum(&self, secp: &secp::Secp256k1) -> Result<PublicKey, Error> {
let pub_nonces = self.participant_data
let pub_nonces = self
.participant_data
.iter()
.map(|p| &p.public_nonce)
.collect();
@ -191,7 +192,8 @@ impl Slate {
/// Return the sum of public blinding factors
fn pub_blind_sum(&self, secp: &secp::Secp256k1) -> Result<PublicKey, Error> {
let pub_blinds = self.participant_data
let pub_blinds = self
.participant_data
.iter()
.map(|p| &p.public_blind_excess)
.collect();
@ -250,9 +252,11 @@ impl Slate {
// the aggsig context with the "split" key
self.tx.offset =
BlindingFactor::from_secret_key(SecretKey::new(&keychain.secp(), &mut thread_rng()));
let blind_offset = keychain.blind_sum(&BlindSum::new()
.add_blinding_factor(BlindingFactor::from_secret_key(sec_key.clone()))
.sub_blinding_factor(self.tx.offset))?;
let blind_offset = keychain.blind_sum(
&BlindSum::new()
.add_blinding_factor(BlindingFactor::from_secret_key(sec_key.clone()))
.sub_blinding_factor(self.tx.offset),
)?;
*sec_key = blind_offset.secret_key(&keychain.secp())?;
Ok(())
}
@ -262,7 +266,7 @@ impl Slate {
// double check the fee amount included in the partial tx
// we don't necessarily want to just trust the sender
// we could just overwrite the fee here (but we won't) due to the sig
let fee = tx_fee(self.tx.inputs.len(), self.tx.outputs.len(), None);
let fee = tx_fee(self.tx.inputs().len(), self.tx.outputs().len(), None);
if fee > self.tx.fee() {
return Err(ErrorKind::Fee(
format!("Fee Dispute Error: {}, {}", self.tx.fee(), fee,).to_string(),
@ -361,7 +365,7 @@ impl Slate {
// build the final excess based on final tx and offset
let final_excess = {
// TODO - do we need to verify rangeproofs here?
for x in &final_tx.outputs {
for x in final_tx.outputs() {
x.verify_proof()?;
}
@ -379,13 +383,13 @@ impl Slate {
};
// update the tx kernel to reflect the offset excess and sig
assert_eq!(final_tx.kernels.len(), 1);
final_tx.kernels[0].excess = final_excess.clone();
final_tx.kernels[0].excess_sig = final_sig.clone();
assert_eq!(final_tx.kernels().len(), 1);
final_tx.kernels_mut()[0].excess = final_excess.clone();
final_tx.kernels_mut()[0].excess_sig = final_sig.clone();
// confirm the kernel verifies successfully before proceeding
debug!(LOGGER, "Validating final transaction");
final_tx.kernels[0].verify()?;
final_tx.kernels()[0].verify()?;
// confirm the overall transaction is valid (including the updated kernel)
let _ = final_tx.validate(false)?;

View file

@ -217,7 +217,7 @@ mod test {
let tx1 = build::transaction(vec![build::output(105, key_id1.clone())], &keychain).unwrap();
let tx2 = build::transaction(vec![build::input(105, key_id1.clone())], &keychain).unwrap();
assert_eq!(tx1.outputs[0].features, tx2.inputs[0].features);
assert_eq!(tx1.outputs[0].commitment(), tx2.inputs[0].commitment());
assert_eq!(tx1.outputs()[0].features, tx2.inputs()[0].features);
assert_eq!(tx1.outputs()[0].commitment(), tx2.inputs()[0].commitment());
}
}