fix txpool double spend edge case

This commit is contained in:
antiochp 2018-12-05 10:27:46 +00:00
parent 919fd84b78
commit bd771ecbc5
No known key found for this signature in database
GPG key ID: 49CBDBCE8AB061C1
7 changed files with 75 additions and 79 deletions

View file

@ -19,7 +19,6 @@
//! here.
use std::cmp::{max, min};
use std::fmt;
use global;
use pow::Difficulty;
@ -216,19 +215,6 @@ pub const UNIT_DIFFICULTY: u64 =
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
/// Consensus errors
#[derive(Clone, Debug, Eq, PartialEq, Fail)]
pub enum Error {
/// Inputs/outputs/kernels must be sorted lexicographically.
SortError,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Sort Error")
}
}
/// Minimal header information required for the Difficulty calculation to
/// take place
#[derive(Clone, Debug, Eq, PartialEq)]
@ -386,12 +372,6 @@ pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
max(MIN_DIFFICULTY, min(scale, MAX_SECONDARY_SCALING)) as u32
}
/// Consensus rule that collections of items are sorted lexicographically.
pub trait VerifySortOrder<T> {
/// Verify a collection of items is sorted as required.
fn verify_sort_order(&self) -> Result<(), Error>;
}
#[cfg(test)]
mod test {
use super::*;

View file

@ -22,7 +22,7 @@ use std::iter::FromIterator;
use std::sync::Arc;
use util::RwLock;
use consensus::{self, reward, REWARD};
use consensus::{reward, REWARD};
use core::committed::{self, Committed};
use core::compact_block::{CompactBlock, CompactBlockBody};
use core::hash::{Hash, Hashed, ZERO_HASH};
@ -60,8 +60,6 @@ pub enum Error {
Secp(secp::Error),
/// Underlying keychain related error
Keychain(keychain::Error),
/// Underlying consensus error (sort order currently)
Consensus(consensus::Error),
/// Underlying Merkle proof error
MerkleProof,
/// Error when verifying kernel sums via committed trait.
@ -69,6 +67,8 @@ pub enum Error {
/// Validation error relating to cut-through.
/// Specifically the tx is spending its own output, which is not valid.
CutThrough,
/// Underlying serialization error.
Serialization(ser::Error),
/// Other unspecified error condition
Other(String),
}
@ -85,6 +85,12 @@ impl From<transaction::Error> for Error {
}
}
impl From<ser::Error> for Error {
fn from(e: ser::Error) -> Error {
Error::Serialization(e)
}
}
impl From<secp::Error> for Error {
fn from(e: secp::Error) -> Error {
Error::Secp(e)
@ -97,12 +103,6 @@ impl From<keychain::Error> for Error {
}
}
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::Consensus(e)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Block Error (display needs implementation")

View file

@ -16,12 +16,11 @@
use rand::{thread_rng, Rng};
use consensus::VerifySortOrder;
use core::block::{Block, BlockHeader, Error};
use core::hash::Hashed;
use core::id::ShortIdentifiable;
use core::{KernelFeatures, Output, OutputFeatures, ShortId, TxKernel};
use ser::{self, read_multi, Readable, Reader, Writeable, Writer};
use ser::{self, read_multi, Readable, Reader, VerifySortOrder, Writeable, Writer};
/// Container for full (full) outputs and kernels and kern_ids for a compact block.
#[derive(Debug, Clone)]
@ -74,11 +73,11 @@ impl CompactBlockBody {
Ok(())
}
// Verify everything is sorted in lexicographical order.
// Verify everything is sorted in lexicographical order and no duplicates present.
fn verify_sorted(&self) -> Result<(), Error> {
self.out_full.verify_sort_order()?;
self.kern_full.verify_sort_order()?;
self.kern_ids.verify_sort_order()?;
self.out_full.verify_sorted_and_unique()?;
self.kern_full.verify_sorted_and_unique()?;
self.kern_ids.verify_sorted_and_unique()?;
Ok(())
}
}

View file

@ -25,7 +25,6 @@ use std::{fmt, ops};
use blake2::blake2b::Blake2b;
use consensus;
use ser::{self, AsFixedBytes, Error, FixedLength, Readable, Reader, Writeable, Writer};
use util;
@ -230,19 +229,3 @@ impl<W: ser::Writeable> Hashed for W {
Hash(ret)
}
}
impl<T: Writeable> consensus::VerifySortOrder<T> for Vec<T> {
fn verify_sort_order(&self) -> Result<(), consensus::Error> {
if self
.iter()
.map(|item| item.hash())
.collect::<Vec<_>>()
.windows(2)
.any(|pair| pair[0] > pair[1])
{
Err(consensus::Error::SortError)
} else {
Ok(())
}
}
}

View file

@ -23,12 +23,12 @@ use util::RwLock;
use byteorder::{BigEndian, ByteOrder};
use consensus::{self, VerifySortOrder};
use consensus;
use core::hash::Hashed;
use core::verifier_cache::VerifierCache;
use core::{committed, Committed};
use keychain::{self, BlindingFactor};
use ser::{self, read_multi, FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
use ser::{self, read_multi, FixedLength, PMMRable, Readable, Reader, VerifySortOrder, Writeable, Writer};
use util;
use util::secp;
use util::secp::pedersen::{Commitment, RangeProof};
@ -58,8 +58,6 @@ pub enum Error {
KernelSumMismatch,
/// Restrict tx total weight.
TooHeavy,
/// Underlying consensus error (currently for sort order)
ConsensusError(consensus::Error),
/// Error originating from an invalid lock-height
LockHeight(u64),
/// Range proof validation error
@ -85,6 +83,8 @@ pub enum Error {
InvalidKernelFeatures,
/// Signature verification error.
IncorrectSignature,
/// Underlying serialization error.
Serialization(ser::Error),
}
impl error::Error for Error {
@ -103,15 +103,15 @@ impl fmt::Display for Error {
}
}
impl From<secp::Error> for Error {
fn from(e: secp::Error) -> Error {
Error::Secp(e)
impl From<ser::Error> for Error {
fn from(e: ser::Error) -> Error {
Error::Serialization(e)
}
}
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::ConsensusError(e)
impl From<secp::Error> for Error {
fn from(e: secp::Error) -> Error {
Error::Secp(e)
}
}
@ -561,11 +561,12 @@ impl TransactionBody {
Ok(())
}
// Verify that inputs|outputs|kernels are all sorted in lexicographical order.
// Verify that inputs|outputs|kernels are sorted in lexicographical order
// and that there are no duplicates (they are all unique within this transaction).
fn verify_sorted(&self) -> Result<(), Error> {
self.inputs.verify_sort_order()?;
self.outputs.verify_sort_order()?;
self.kernels.verify_sort_order()?;
self.inputs.verify_sorted_and_unique()?;
self.outputs.verify_sorted_and_unique()?;
self.kernels.verify_sorted_and_unique()?;
Ok(())
}

View file

@ -22,7 +22,6 @@
use std::time::Duration;
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use consensus;
use core::hash::{Hash, Hashed};
use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
use std::fmt::Debug;
@ -54,10 +53,12 @@ pub enum Error {
CountError,
/// When asked to read too much data
TooLargeReadErr,
/// Consensus rule failure (currently sort order)
ConsensusError(consensus::Error),
/// Error from from_hex deserialization
HexError(String),
/// Inputs/outputs/kernels must be sorted lexicographically.
SortError,
/// Inputs/outputs/kernels must be unique.
DuplicateError,
}
impl From<io::Error> for Error {
@ -66,12 +67,6 @@ impl From<io::Error> for Error {
}
}
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::ConsensusError(e)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
@ -82,8 +77,9 @@ impl fmt::Display for Error {
} => write!(f, "expected {:?}, got {:?}", e, r),
Error::CorruptedData => f.write_str("corrupted data"),
Error::CountError => f.write_str("count error"),
Error::SortError => f.write_str("sort order"),
Error::DuplicateError => f.write_str("duplicate"),
Error::TooLargeReadErr => f.write_str("too large read"),
Error::ConsensusError(ref e) => write!(f, "consensus error {:?}", e),
Error::HexError(ref e) => write!(f, "hex error {:?}", e),
}
}
@ -103,8 +99,9 @@ impl error::Error for Error {
Error::UnexpectedData { .. } => "unexpected data",
Error::CorruptedData => "corrupted data",
Error::CountError => "count error",
Error::SortError => "sort order",
Error::DuplicateError => "duplicate error",
Error::TooLargeReadErr => "too large read",
Error::ConsensusError(_) => "consensus error (sort order)",
Error::HexError(_) => "hex error",
}
}
@ -539,6 +536,30 @@ impl FixedLength for Signature {
const LEN: usize = AGG_SIGNATURE_SIZE;
}
/// Collections of items must be sorted lexicographically and all unique.
pub trait VerifySortedAndUnique<T> {
/// Verify a collection of items is sorted and all unique.
fn verify_sorted_and_unique(&self) -> Result<(), Error>;
}
impl<T: Hashed> VerifySortedAndUnique<T> for Vec<T> {
fn verify_sorted_and_unique(&self) -> Result<(), Error> {
let hashes = self
.iter()
.map(|item| item.hash())
.collect::<Vec<_>>();
let pairs = hashes.windows(2);
for pair in pairs {
if pair[0] > pair[1] {
return Err(Error::SortError);
} else if pair[0] == pair[1] {
return Err(Error::DuplicateError);
}
}
Ok(())
}
}
/// Utility wrapper for an underlying byte Writer. Defines higher level methods
/// to write numbers, byte vectors, hashes, etc.
pub struct BinWriter<'a> {

View file

@ -80,6 +80,18 @@ fn test_the_transaction_pool() {
assert_eq!(write_pool.total_size(), 1);
}
// Test adding a tx that "double spends" an output currently spent by a tx
// already in the txpool. In this case we attempt to spend the original coinbase twice.
{
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
let mut write_pool = pool.write();
assert!(
write_pool
.add_to_pool(test_source(), tx, true, &header)
.is_err()
);
}
// tx1 spends some outputs from the initial test tx.
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
// tx2 spends some outputs from both tx1 and the initial test tx.
@ -119,8 +131,8 @@ fn test_the_transaction_pool() {
);
}
// Test adding a duplicate tx with the same input and outputs (not the *same*
// tx).
// Test adding a duplicate tx with the same input and outputs.
// Note: not the *same* tx, just same underlying inputs/outputs.
{
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let mut write_pool = pool.write();