introduce Inputs enum variants for future commit only support (#3406)

This commit is contained in:
Antioch Peverell 2020-07-28 21:21:57 +01:00 committed by GitHub
parent b43666af80
commit 83b269961a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 203 additions and 120 deletions

View file

@ -629,11 +629,8 @@ impl BlockPrintable {
include_proof: bool,
include_merkle_proof: bool,
) -> Result<BlockPrintable, chain::Error> {
let inputs = block
.inputs()
.iter()
.map(|x| x.commitment().to_hex())
.collect();
let inputs: Vec<_> = block.inputs().into();
let inputs = inputs.iter().map(|x| x.commitment().to_hex()).collect();
let outputs = block
.outputs()
.iter()

View file

@ -1052,7 +1052,8 @@ impl<'a> Extension<'a> {
// Add spent_pos to affected_pos to update the accumulator later on.
// Remove the spent output from the output_pos index.
let mut spent = vec![];
for input in b.inputs() {
let inputs: Vec<_> = b.inputs().into();
for input in &inputs {
let pos = self.apply_input(input, batch)?;
affected_pos.push(pos.pos);
batch.delete_output_pos_height(&input.commitment())?;
@ -1331,7 +1332,8 @@ impl<'a> Extension<'a> {
// reused output commitment. For example an output at pos 1, spent, reused at pos 2.
// The output_pos index should be updated to reflect the old pos 1 when unspent.
if let Ok(spent) = spent {
for (x, y) in block.inputs().iter().zip(spent) {
let inputs: Vec<_> = block.inputs().into();
for (x, y) in inputs.iter().zip(spent) {
batch.save_output_pos_height(&x.commitment(), y.pos, y.height)?;
}
}

View file

@ -16,7 +16,7 @@
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::pmmr::{self, ReadonlyPMMR};
use crate::core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, Transaction};
use crate::core::core::{Block, BlockHeader, Input, Inputs, Output, OutputIdentifier, Transaction};
use crate::core::global;
use crate::error::{Error, ErrorKind};
use crate::store::Batch;
@ -52,7 +52,8 @@ impl<'a> UTXOView<'a> {
self.validate_output(output, batch)?;
}
for input in block.inputs() {
let inputs: Vec<_> = block.inputs().into();
for input in &inputs {
self.validate_input(input, batch)?;
}
Ok(())
@ -66,7 +67,8 @@ impl<'a> UTXOView<'a> {
self.validate_output(output, batch)?;
}
for input in tx.inputs() {
let inputs: Vec<_> = tx.inputs().into();
for input in &inputs {
self.validate_input(input, batch)?;
}
Ok(())
@ -113,12 +115,13 @@ impl<'a> UTXOView<'a> {
/// that have not sufficiently matured.
pub fn verify_coinbase_maturity(
&self,
inputs: &[Input],
inputs: &Inputs,
height: u64,
batch: &Batch<'_>,
) -> Result<(), Error> {
// Find the greatest output pos of any coinbase
// outputs we are attempting to spend.
let inputs: Vec<_> = inputs.into();
let pos = inputs
.iter()
.filter(|x| x.is_coinbase())

View file

@ -16,12 +16,12 @@
use crate::consensus::{self, reward, REWARD};
use crate::core::committed::{self, Committed};
use crate::core::compact_block::{CompactBlock, CompactBlockBody};
use crate::core::compact_block::CompactBlock;
use crate::core::hash::{DefaultHashable, Hash, Hashed, ZERO_HASH};
use crate::core::verifier_cache::VerifierCache;
use crate::core::{
transaction, Commitment, Input, KernelFeatures, Output, Transaction, TransactionBody, TxKernel,
Weighting,
transaction, Commitment, Inputs, KernelFeatures, Output, Transaction, TransactionBody,
TxKernel, Weighting,
};
use crate::global;
use crate::pow::{verify_size, Difficulty, Proof, ProofOfWork};
@ -32,10 +32,8 @@ use chrono::naive::{MAX_DATE, MIN_DATE};
use chrono::prelude::{DateTime, NaiveDateTime, Utc};
use chrono::Duration;
use keychain::{self, BlindingFactor};
use std::collections::HashSet;
use std::convert::TryInto;
use std::fmt;
use std::iter::FromIterator;
use std::sync::Arc;
use util::from_hex;
use util::RwLock;
@ -562,36 +560,35 @@ impl Block {
let header = cb.header.clone();
let mut all_inputs = HashSet::new();
let mut all_outputs = HashSet::new();
let mut all_kernels = HashSet::new();
let mut inputs = vec![];
let mut outputs = vec![];
let mut kernels = vec![];
// collect all the inputs, outputs and kernels from the txs
for tx in txs {
all_inputs.extend(tx.inputs());
all_outputs.extend(tx.outputs());
all_kernels.extend(tx.kernels());
let tx_inputs: Vec<_> = tx.inputs().into();
inputs.extend_from_slice(tx_inputs.as_slice());
outputs.extend_from_slice(tx.outputs());
kernels.extend_from_slice(tx.kernels());
}
// include the coinbase output(s) and kernel(s) from the compact_block
{
let body: CompactBlockBody = cb.into();
all_outputs.extend(body.out_full);
all_kernels.extend(body.kern_full);
}
// apply cut-through to our tx inputs and outputs
let (inputs, outputs) = transaction::cut_through(&mut inputs, &mut outputs)?;
// convert the sets to vecs
let all_inputs = Vec::from_iter(all_inputs);
let all_outputs = Vec::from_iter(all_outputs);
let all_kernels = Vec::from_iter(all_kernels);
let mut outputs = outputs.to_vec();
let mut kernels = kernels.to_vec();
// include the full outputs and kernels from the compact block
outputs.extend_from_slice(cb.out_full());
kernels.extend_from_slice(cb.kern_full());
// Initialize a tx body and sort everything.
let body = TransactionBody::init(&all_inputs, &all_outputs, &all_kernels, false)?;
let body = TransactionBody::init(inputs, &outputs, &kernels, false)?;
// Finally return the full block.
// Note: we have not actually validated the block here,
// caller must validate the block.
Block { header, body }.cut_through()
Ok(Block { header, body })
}
/// Build a new empty block from a specified header
@ -635,7 +632,7 @@ impl Block {
// Now build the block with all the above information.
// Note: We have not validated the block here.
// Caller must validate the block as necessary.
Block {
let block = Block {
header: BlockHeader {
version,
height,
@ -649,8 +646,8 @@ impl Block {
..Default::default()
},
body: agg_tx.into(),
}
.cut_through()
};
Ok(block)
}
/// Consumes this block and returns a new block with the coinbase output
@ -662,18 +659,18 @@ impl Block {
}
/// Get inputs
pub fn inputs(&self) -> &[Input] {
&self.body.inputs
pub fn inputs(&self) -> Inputs {
self.body.inputs()
}
/// Get outputs
pub fn outputs(&self) -> &[Output] {
&self.body.outputs
&self.body.outputs()
}
/// Get kernels
pub fn kernels(&self) -> &[TxKernel] {
&self.body.kernels
&self.body.kernels()
}
/// Sum of all fees (inputs less outputs) in the block
@ -681,24 +678,6 @@ impl Block {
self.body.fee()
}
/// Matches any output with a potential spending input, eliminating them
/// from the block. Provides a simple way to cut-through the block. The
/// elimination is stable with respect to the order of inputs and outputs.
/// Method consumes the block.
pub fn cut_through(self) -> Result<Block, Error> {
let mut inputs = self.inputs().to_vec();
let mut outputs = self.outputs().to_vec();
let (inputs, outputs) = transaction::cut_through(&mut inputs, &mut outputs)?;
// Initialize tx body and sort everything.
let body = TransactionBody::init(inputs, outputs, self.kernels(), false)?;
Ok(Block {
header: self.header,
body,
})
}
/// "Lightweight" validation that we can perform quickly during read/deserialization.
/// Subset of full validation that skips expensive verification steps, specifically -
/// * rangeproof verification (on the body)

View file

@ -375,9 +375,6 @@ pub enum Error {
InvalidProofMessage,
/// Error when verifying kernel sums via committed trait.
Committed(committed::Error),
/// Error when sums do not verify correctly during tx aggregation.
/// Likely a "double spend" across two unconfirmed txs.
AggregationError,
/// Validation error relating to cut-through (tx is spending its own
/// output).
CutThrough,
@ -650,23 +647,16 @@ pub enum Weighting {
}
/// TransactionBody is a common abstraction for transaction and block
#[derive(Serialize, Deserialize, Debug, Clone)]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct TransactionBody {
/// List of inputs spent by the transaction.
pub inputs: Vec<Input>,
pub inputs: Inputs,
/// List of outputs the transaction produces.
pub outputs: Vec<Output>,
/// List of kernels that make up this transaction (usually a single kernel).
pub kernels: Vec<TxKernel>,
}
/// PartialEq
impl PartialEq for TransactionBody {
fn eq(&self, l: &TransactionBody) -> bool {
self.inputs == l.inputs && self.outputs == l.outputs && self.kernels == l.kernels
}
}
/// Implementation of Writeable for a body, defines how to
/// write the body as binary.
impl Writeable for TransactionBody {
@ -719,15 +709,16 @@ impl Readable for TransactionBody {
impl Committed for TransactionBody {
fn inputs_committed(&self) -> Vec<Commitment> {
self.inputs.iter().map(|x| x.commitment()).collect()
let inputs: Vec<_> = self.inputs().into();
inputs.iter().map(|x| x.commitment()).collect()
}
fn outputs_committed(&self) -> Vec<Commitment> {
self.outputs.iter().map(|x| x.commitment()).collect()
self.outputs().iter().map(|x| x.commitment()).collect()
}
fn kernels_committed(&self) -> Vec<Commitment> {
self.kernels.iter().map(|x| x.excess()).collect()
self.kernels().iter().map(|x| x.excess()).collect()
}
}
@ -747,7 +738,7 @@ impl TransactionBody {
/// Creates a new empty transaction (no inputs or outputs, zero fee).
pub fn empty() -> TransactionBody {
TransactionBody {
inputs: vec![],
inputs: Inputs::default(),
outputs: vec![],
kernels: vec![],
}
@ -770,7 +761,7 @@ impl TransactionBody {
verify_sorted: bool,
) -> Result<TransactionBody, Error> {
let mut body = TransactionBody {
inputs: inputs.to_vec(),
inputs: inputs.into(),
outputs: outputs.to_vec(),
kernels: kernels.to_vec(),
};
@ -786,13 +777,30 @@ impl TransactionBody {
Ok(body)
}
/// Transaction inputs.
pub fn inputs(&self) -> Inputs {
self.inputs.clone()
}
/// Transaction outputs.
pub fn outputs(&self) -> &[Output] {
&self.outputs
}
/// Transaction kernels.
pub fn kernels(&self) -> &[TxKernel] {
&self.kernels
}
/// Builds a new body with the provided inputs added. Existing
/// inputs, if any, are kept intact.
/// Sort order is maintained.
pub fn with_input(mut self, input: Input) -> TransactionBody {
if let Err(e) = self.inputs.binary_search(&input) {
self.inputs.insert(e, input)
let mut inputs: Vec<_> = self.inputs.into();
if let Err(e) = inputs.binary_search(&input) {
inputs.insert(e, input)
};
self.inputs = inputs.into();
self
}
@ -957,7 +965,8 @@ impl TransactionBody {
// Verify that no input is spending an output from the same block.
// Assumes inputs and outputs are sorted
fn verify_cut_through(&self) -> Result<(), Error> {
let mut inputs = self.inputs.iter().map(|x| x.hash()).peekable();
let inputs: Vec<_> = self.inputs().into();
let mut inputs = inputs.iter().map(|x| x.hash()).peekable();
let mut outputs = self.outputs.iter().map(|x| x.hash()).peekable();
while let (Some(ih), Some(oh)) = (inputs.peek(), outputs.peek()) {
match ih.cmp(oh) {
@ -1195,18 +1204,18 @@ impl Transaction {
}
/// Get inputs
pub fn inputs(&self) -> &[Input] {
&self.body.inputs
pub fn inputs(&self) -> Inputs {
self.body.inputs()
}
/// Get outputs
pub fn outputs(&self) -> &[Output] {
&self.body.outputs
&self.body.outputs()
}
/// Get kernels
pub fn kernels(&self) -> &[TxKernel] {
&self.body.kernels
&self.body.kernels()
}
/// Total fee for a transaction is the sum of fees of all kernels.
@ -1273,23 +1282,23 @@ impl Transaction {
/// Matches any output with a potential spending input, eliminating them
/// from the Vec. Provides a simple way to cut-through a block or aggregated
/// transaction. The elimination is stable with respect to the order of inputs
/// and outputs.
/// transaction.
pub fn cut_through<'a>(
inputs: &'a mut [Input],
outputs: &'a mut [Output],
) -> Result<(&'a [Input], &'a [Output]), Error> {
// assemble output commitments set, checking they're all unique
outputs.sort_unstable();
if outputs.windows(2).any(|pair| pair[0] == pair[1]) {
return Err(Error::AggregationError);
}
inputs.sort_unstable();
// Make sure inputs and outputs are sorted consistently by commitment.
inputs.sort_unstable_by_key(|x| x.commitment());
outputs.sort_unstable_by_key(|x| x.commitment());
let mut inputs_idx = 0;
let mut outputs_idx = 0;
let mut ncut = 0;
while inputs_idx < inputs.len() && outputs_idx < outputs.len() {
match inputs[inputs_idx].hash().cmp(&outputs[outputs_idx].hash()) {
match inputs[inputs_idx]
.partial_cmp(&outputs[outputs_idx])
.expect("compare input to output")
{
Ordering::Less => {
inputs.swap(inputs_idx - ncut, inputs_idx);
inputs_idx += 1;
@ -1318,10 +1327,21 @@ pub fn cut_through<'a>(
outputs_idx += 1;
}
Ok((
&inputs[..inputs.len() - ncut],
&outputs[..outputs.len() - ncut],
))
// Take new shorter slices with cut-through elements removed.
let inputs = &inputs[..inputs.len() - ncut];
let outputs = &outputs[..outputs.len() - ncut];
// Check we have no duplicate inputs after cut-through.
if inputs.windows(2).any(|pair| pair[0] == pair[1]) {
return Err(Error::CutThrough);
}
// Check we have no duplicate outputs after cut-through.
if outputs.windows(2).any(|pair| pair[0] == pair[1]) {
return Err(Error::CutThrough);
}
Ok((inputs, outputs))
}
/// Aggregate a vec of txs into a multi-kernel tx with cut_through.
@ -1352,17 +1372,14 @@ pub fn aggregate(txs: &[Transaction]) -> Result<Transaction, Error> {
// we will sum these later to give a single aggregate offset
kernel_offsets.push(tx.offset.clone());
inputs.extend_from_slice(tx.inputs());
let tx_inputs: Vec<_> = tx.inputs().into();
inputs.extend_from_slice(&tx_inputs);
outputs.extend_from_slice(tx.outputs());
kernels.extend_from_slice(tx.kernels());
}
// Sort inputs and outputs during cut_through.
let (inputs, outputs) = cut_through(&mut inputs, &mut outputs)?;
// Now sort kernels.
kernels.sort_unstable();
// now sum the kernel_offsets up to give us an aggregate offset for the
// transaction
let total_kernel_offset = committed::sum_kernel_offsets(kernel_offsets, vec![])?;
@ -1372,6 +1389,7 @@ pub fn aggregate(txs: &[Transaction]) -> Result<Transaction, Error> {
// * cut-through outputs
// * full set of tx kernels
// * sum of all kernel offsets
// Note: We sort input/outputs/kernels when building the transaction body internally.
let tx = Transaction::new(inputs, outputs, &kernels).with_offset(total_kernel_offset);
Ok(tx)
@ -1390,9 +1408,11 @@ pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result<Transactio
let tx = aggregate(txs)?;
for mk_input in mk_tx.inputs() {
if !tx.inputs().contains(&mk_input) && !inputs.contains(mk_input) {
inputs.push(*mk_input);
let mk_inputs: Vec<_> = mk_tx.inputs().into();
for mk_input in mk_inputs {
let tx_inputs: Vec<_> = tx.inputs().into();
if !tx_inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
inputs.push(mk_input);
}
}
for mk_output in mk_tx.outputs() {
@ -1460,6 +1480,20 @@ pub struct Input {
impl DefaultHashable for Input {}
hashable_ord!(Input);
// Inputs can be compared to outputs.
impl PartialEq<Output> for Input {
fn eq(&self, other: &Output) -> bool {
self.commitment() == other.commitment()
}
}
// Inputs can be compared to outputs.
impl PartialOrd<Output> for Input {
fn partial_cmp(&self, other: &Output) -> Option<Ordering> {
Some(self.commitment().cmp(&other.commitment()))
}
}
impl ::std::hash::Hash for Input {
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
let mut vec = Vec::new();
@ -1517,6 +1551,79 @@ impl Input {
self.features.is_plain()
}
}
/// Wrapper around a vec of inputs.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub enum Inputs {
/// Vec of inputs.
FeaturesAndCommit(Vec<Input>),
// Vec of commitments.
// CommitOnly(Vec<Commitment>),
}
impl From<Inputs> for Vec<Input> {
fn from(inputs: Inputs) -> Self {
match inputs {
Inputs::FeaturesAndCommit(inputs) => inputs,
}
}
}
impl From<&Inputs> for Vec<Input> {
fn from(inputs: &Inputs) -> Self {
match inputs {
Inputs::FeaturesAndCommit(inputs) => inputs.to_vec(),
}
}
}
impl From<&[Input]> for Inputs {
fn from(inputs: &[Input]) -> Self {
Inputs::FeaturesAndCommit(inputs.to_vec())
}
}
impl From<Vec<Input>> for Inputs {
fn from(inputs: Vec<Input>) -> Self {
Inputs::FeaturesAndCommit(inputs)
}
}
impl Default for Inputs {
fn default() -> Self {
Inputs::FeaturesAndCommit(vec![])
}
}
impl Writeable for Inputs {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
match self {
Inputs::FeaturesAndCommit(inputs) => inputs.write(writer)?,
}
Ok(())
}
}
impl Inputs {
/// Number of inputs.
pub fn len(&self) -> usize {
match self {
Inputs::FeaturesAndCommit(inputs) => inputs.len(),
}
}
/// Verify inputs are sorted and unique.
fn verify_sorted_and_unique(&self) -> Result<(), ser::Error> {
match self {
Inputs::FeaturesAndCommit(inputs) => inputs.verify_sorted_and_unique(),
}
}
fn sort_unstable(&mut self) {
match self {
Inputs::FeaturesAndCommit(inputs) => inputs.sort_unstable(),
}
}
}
// Enum of various supported kernel "features".
enum_from_primitive! {

View file

@ -676,15 +676,14 @@ fn same_amount_outputs_copy_range_proof() {
// now we reconstruct the transaction, swapping the rangeproofs so they
// have the wrong privkey
let ins = tx.inputs();
let ins: Vec<_> = tx.inputs().into();
let mut outs = tx.outputs().to_vec();
let kernels = tx.kernels();
outs[0].proof = outs[1].proof;
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
let prev = BlockHeader::default();
let b = new_block(
&[Transaction::new(ins, &outs, kernels)],
&[Transaction::new(&ins, &outs, tx.kernels())],
&keychain,
&builder,
&prev,
@ -729,16 +728,15 @@ fn wrong_amount_range_proof() {
.unwrap();
// we take the range proofs from tx2 into tx1 and rebuild the transaction
let ins = tx1.inputs();
let ins: Vec<_> = tx1.inputs().into();
let mut outs = tx1.outputs().to_vec();
let kernels = tx1.kernels();
outs[0].proof = tx2.outputs()[0].proof;
outs[1].proof = tx2.outputs()[1].proof;
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
let prev = BlockHeader::default();
let b = new_block(
&[Transaction::new(ins, &outs, kernels)],
&[Transaction::new(&ins, &outs, tx1.kernels())],
&keychain,
&builder,
&prev,

View file

@ -557,9 +557,7 @@ fn reward_empty_block() {
let b = new_block(&[], &keychain, &builder, &previous_header, &key_id);
b.cut_through()
.unwrap()
.validate(&BlindingFactor::zero(), verifier_cache())
b.validate(&BlindingFactor::zero(), verifier_cache())
.unwrap();
}
@ -578,11 +576,7 @@ fn reward_with_tx_block() {
let previous_header = BlockHeader::default();
let block = new_block(&[tx1], &keychain, &builder, &previous_header, &key_id);
block
.cut_through()
.unwrap()
.validate(&BlindingFactor::zero(), vc.clone())
.unwrap();
block.validate(&BlindingFactor::zero(), vc.clone()).unwrap();
}
#[test]

View file

@ -354,7 +354,8 @@ where
let mut insert_pos = None;
let mut is_rejected = false;
for input in entry.tx.inputs() {
let tx_inputs: Vec<_> = entry.tx.inputs().into();
for input in tx_inputs {
if rejected.contains(&input.commitment()) {
// Depends on a rejected tx, so reject this one.
is_rejected = true;
@ -464,9 +465,11 @@ where
// Reject any txs where we see a matching tx kernel in the block.
// Also reject any txs where we see a conflicting tx,
// where an input is spent in a different tx.
let block_inputs: Vec<_> = block.inputs().into();
self.entries.retain(|x| {
let tx_inputs: Vec<_> = x.tx.inputs().into();
!x.tx.kernels().iter().any(|y| block.kernels().contains(y))
&& !x.tx.inputs().iter().any(|y| block.inputs().contains(y))
&& !tx_inputs.iter().any(|y| block_inputs.contains(y))
});
}