mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
fix txpool double spend edge case
This commit is contained in:
parent
919fd84b78
commit
bd771ecbc5
7 changed files with 75 additions and 79 deletions
|
@ -19,7 +19,6 @@
|
||||||
//! here.
|
//! here.
|
||||||
|
|
||||||
use std::cmp::{max, min};
|
use std::cmp::{max, min};
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
use global;
|
use global;
|
||||||
use pow::Difficulty;
|
use pow::Difficulty;
|
||||||
|
@ -216,19 +215,6 @@ pub const UNIT_DIFFICULTY: u64 =
|
||||||
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
|
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
|
||||||
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
|
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
|
||||||
|
|
||||||
/// Consensus errors
|
|
||||||
#[derive(Clone, Debug, Eq, PartialEq, Fail)]
|
|
||||||
pub enum Error {
|
|
||||||
/// Inputs/outputs/kernels must be sorted lexicographically.
|
|
||||||
SortError,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Error {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(f, "Sort Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Minimal header information required for the Difficulty calculation to
|
/// Minimal header information required for the Difficulty calculation to
|
||||||
/// take place
|
/// take place
|
||||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||||
|
@ -386,12 +372,6 @@ pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
|
||||||
max(MIN_DIFFICULTY, min(scale, MAX_SECONDARY_SCALING)) as u32
|
max(MIN_DIFFICULTY, min(scale, MAX_SECONDARY_SCALING)) as u32
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Consensus rule that collections of items are sorted lexicographically.
|
|
||||||
pub trait VerifySortOrder<T> {
|
|
||||||
/// Verify a collection of items is sorted as required.
|
|
||||||
fn verify_sort_order(&self) -> Result<(), Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
|
@ -22,7 +22,7 @@ use std::iter::FromIterator;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use util::RwLock;
|
use util::RwLock;
|
||||||
|
|
||||||
use consensus::{self, reward, REWARD};
|
use consensus::{reward, REWARD};
|
||||||
use core::committed::{self, Committed};
|
use core::committed::{self, Committed};
|
||||||
use core::compact_block::{CompactBlock, CompactBlockBody};
|
use core::compact_block::{CompactBlock, CompactBlockBody};
|
||||||
use core::hash::{Hash, Hashed, ZERO_HASH};
|
use core::hash::{Hash, Hashed, ZERO_HASH};
|
||||||
|
@ -60,8 +60,6 @@ pub enum Error {
|
||||||
Secp(secp::Error),
|
Secp(secp::Error),
|
||||||
/// Underlying keychain related error
|
/// Underlying keychain related error
|
||||||
Keychain(keychain::Error),
|
Keychain(keychain::Error),
|
||||||
/// Underlying consensus error (sort order currently)
|
|
||||||
Consensus(consensus::Error),
|
|
||||||
/// Underlying Merkle proof error
|
/// Underlying Merkle proof error
|
||||||
MerkleProof,
|
MerkleProof,
|
||||||
/// Error when verifying kernel sums via committed trait.
|
/// Error when verifying kernel sums via committed trait.
|
||||||
|
@ -69,6 +67,8 @@ pub enum Error {
|
||||||
/// Validation error relating to cut-through.
|
/// Validation error relating to cut-through.
|
||||||
/// Specifically the tx is spending its own output, which is not valid.
|
/// Specifically the tx is spending its own output, which is not valid.
|
||||||
CutThrough,
|
CutThrough,
|
||||||
|
/// Underlying serialization error.
|
||||||
|
Serialization(ser::Error),
|
||||||
/// Other unspecified error condition
|
/// Other unspecified error condition
|
||||||
Other(String),
|
Other(String),
|
||||||
}
|
}
|
||||||
|
@ -85,6 +85,12 @@ impl From<transaction::Error> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<ser::Error> for Error {
|
||||||
|
fn from(e: ser::Error) -> Error {
|
||||||
|
Error::Serialization(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<secp::Error> for Error {
|
impl From<secp::Error> for Error {
|
||||||
fn from(e: secp::Error) -> Error {
|
fn from(e: secp::Error) -> Error {
|
||||||
Error::Secp(e)
|
Error::Secp(e)
|
||||||
|
@ -97,12 +103,6 @@ impl From<keychain::Error> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<consensus::Error> for Error {
|
|
||||||
fn from(e: consensus::Error) -> Error {
|
|
||||||
Error::Consensus(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Error {
|
impl fmt::Display for Error {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "Block Error (display needs implementation")
|
write!(f, "Block Error (display needs implementation")
|
||||||
|
|
|
@ -16,12 +16,11 @@
|
||||||
|
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
|
|
||||||
use consensus::VerifySortOrder;
|
|
||||||
use core::block::{Block, BlockHeader, Error};
|
use core::block::{Block, BlockHeader, Error};
|
||||||
use core::hash::Hashed;
|
use core::hash::Hashed;
|
||||||
use core::id::ShortIdentifiable;
|
use core::id::ShortIdentifiable;
|
||||||
use core::{KernelFeatures, Output, OutputFeatures, ShortId, TxKernel};
|
use core::{KernelFeatures, Output, OutputFeatures, ShortId, TxKernel};
|
||||||
use ser::{self, read_multi, Readable, Reader, Writeable, Writer};
|
use ser::{self, read_multi, Readable, Reader, VerifySortOrder, Writeable, Writer};
|
||||||
|
|
||||||
/// Container for full (full) outputs and kernels and kern_ids for a compact block.
|
/// Container for full (full) outputs and kernels and kern_ids for a compact block.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
|
@ -74,11 +73,11 @@ impl CompactBlockBody {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify everything is sorted in lexicographical order.
|
// Verify everything is sorted in lexicographical order and no duplicates present.
|
||||||
fn verify_sorted(&self) -> Result<(), Error> {
|
fn verify_sorted(&self) -> Result<(), Error> {
|
||||||
self.out_full.verify_sort_order()?;
|
self.out_full.verify_sorted_and_unique()?;
|
||||||
self.kern_full.verify_sort_order()?;
|
self.kern_full.verify_sorted_and_unique()?;
|
||||||
self.kern_ids.verify_sort_order()?;
|
self.kern_ids.verify_sorted_and_unique()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ use std::{fmt, ops};
|
||||||
|
|
||||||
use blake2::blake2b::Blake2b;
|
use blake2::blake2b::Blake2b;
|
||||||
|
|
||||||
use consensus;
|
|
||||||
use ser::{self, AsFixedBytes, Error, FixedLength, Readable, Reader, Writeable, Writer};
|
use ser::{self, AsFixedBytes, Error, FixedLength, Readable, Reader, Writeable, Writer};
|
||||||
use util;
|
use util;
|
||||||
|
|
||||||
|
@ -230,19 +229,3 @@ impl<W: ser::Writeable> Hashed for W {
|
||||||
Hash(ret)
|
Hash(ret)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Writeable> consensus::VerifySortOrder<T> for Vec<T> {
|
|
||||||
fn verify_sort_order(&self) -> Result<(), consensus::Error> {
|
|
||||||
if self
|
|
||||||
.iter()
|
|
||||||
.map(|item| item.hash())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.windows(2)
|
|
||||||
.any(|pair| pair[0] > pair[1])
|
|
||||||
{
|
|
||||||
Err(consensus::Error::SortError)
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -23,12 +23,12 @@ use util::RwLock;
|
||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
use byteorder::{BigEndian, ByteOrder};
|
||||||
|
|
||||||
use consensus::{self, VerifySortOrder};
|
use consensus;
|
||||||
use core::hash::Hashed;
|
use core::hash::Hashed;
|
||||||
use core::verifier_cache::VerifierCache;
|
use core::verifier_cache::VerifierCache;
|
||||||
use core::{committed, Committed};
|
use core::{committed, Committed};
|
||||||
use keychain::{self, BlindingFactor};
|
use keychain::{self, BlindingFactor};
|
||||||
use ser::{self, read_multi, FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
|
use ser::{self, read_multi, FixedLength, PMMRable, Readable, Reader, VerifySortOrder, Writeable, Writer};
|
||||||
use util;
|
use util;
|
||||||
use util::secp;
|
use util::secp;
|
||||||
use util::secp::pedersen::{Commitment, RangeProof};
|
use util::secp::pedersen::{Commitment, RangeProof};
|
||||||
|
@ -58,8 +58,6 @@ pub enum Error {
|
||||||
KernelSumMismatch,
|
KernelSumMismatch,
|
||||||
/// Restrict tx total weight.
|
/// Restrict tx total weight.
|
||||||
TooHeavy,
|
TooHeavy,
|
||||||
/// Underlying consensus error (currently for sort order)
|
|
||||||
ConsensusError(consensus::Error),
|
|
||||||
/// Error originating from an invalid lock-height
|
/// Error originating from an invalid lock-height
|
||||||
LockHeight(u64),
|
LockHeight(u64),
|
||||||
/// Range proof validation error
|
/// Range proof validation error
|
||||||
|
@ -85,6 +83,8 @@ pub enum Error {
|
||||||
InvalidKernelFeatures,
|
InvalidKernelFeatures,
|
||||||
/// Signature verification error.
|
/// Signature verification error.
|
||||||
IncorrectSignature,
|
IncorrectSignature,
|
||||||
|
/// Underlying serialization error.
|
||||||
|
Serialization(ser::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl error::Error for Error {
|
impl error::Error for Error {
|
||||||
|
@ -103,15 +103,15 @@ impl fmt::Display for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<secp::Error> for Error {
|
impl From<ser::Error> for Error {
|
||||||
fn from(e: secp::Error) -> Error {
|
fn from(e: ser::Error) -> Error {
|
||||||
Error::Secp(e)
|
Error::Serialization(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<consensus::Error> for Error {
|
impl From<secp::Error> for Error {
|
||||||
fn from(e: consensus::Error) -> Error {
|
fn from(e: secp::Error) -> Error {
|
||||||
Error::ConsensusError(e)
|
Error::Secp(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -561,11 +561,12 @@ impl TransactionBody {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that inputs|outputs|kernels are all sorted in lexicographical order.
|
// Verify that inputs|outputs|kernels are sorted in lexicographical order
|
||||||
|
// and that there are no duplicates (they are all unique within this transaction).
|
||||||
fn verify_sorted(&self) -> Result<(), Error> {
|
fn verify_sorted(&self) -> Result<(), Error> {
|
||||||
self.inputs.verify_sort_order()?;
|
self.inputs.verify_sorted_and_unique()?;
|
||||||
self.outputs.verify_sort_order()?;
|
self.outputs.verify_sorted_and_unique()?;
|
||||||
self.kernels.verify_sort_order()?;
|
self.kernels.verify_sorted_and_unique()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,6 @@
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
|
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
|
||||||
use consensus;
|
|
||||||
use core::hash::{Hash, Hashed};
|
use core::hash::{Hash, Hashed};
|
||||||
use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
|
use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
@ -54,10 +53,12 @@ pub enum Error {
|
||||||
CountError,
|
CountError,
|
||||||
/// When asked to read too much data
|
/// When asked to read too much data
|
||||||
TooLargeReadErr,
|
TooLargeReadErr,
|
||||||
/// Consensus rule failure (currently sort order)
|
|
||||||
ConsensusError(consensus::Error),
|
|
||||||
/// Error from from_hex deserialization
|
/// Error from from_hex deserialization
|
||||||
HexError(String),
|
HexError(String),
|
||||||
|
/// Inputs/outputs/kernels must be sorted lexicographically.
|
||||||
|
SortError,
|
||||||
|
/// Inputs/outputs/kernels must be unique.
|
||||||
|
DuplicateError,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<io::Error> for Error {
|
impl From<io::Error> for Error {
|
||||||
|
@ -66,12 +67,6 @@ impl From<io::Error> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<consensus::Error> for Error {
|
|
||||||
fn from(e: consensus::Error) -> Error {
|
|
||||||
Error::ConsensusError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Error {
|
impl fmt::Display for Error {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
match *self {
|
match *self {
|
||||||
|
@ -82,8 +77,9 @@ impl fmt::Display for Error {
|
||||||
} => write!(f, "expected {:?}, got {:?}", e, r),
|
} => write!(f, "expected {:?}, got {:?}", e, r),
|
||||||
Error::CorruptedData => f.write_str("corrupted data"),
|
Error::CorruptedData => f.write_str("corrupted data"),
|
||||||
Error::CountError => f.write_str("count error"),
|
Error::CountError => f.write_str("count error"),
|
||||||
|
Error::SortError => f.write_str("sort order"),
|
||||||
|
Error::DuplicateError => f.write_str("duplicate"),
|
||||||
Error::TooLargeReadErr => f.write_str("too large read"),
|
Error::TooLargeReadErr => f.write_str("too large read"),
|
||||||
Error::ConsensusError(ref e) => write!(f, "consensus error {:?}", e),
|
|
||||||
Error::HexError(ref e) => write!(f, "hex error {:?}", e),
|
Error::HexError(ref e) => write!(f, "hex error {:?}", e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,8 +99,9 @@ impl error::Error for Error {
|
||||||
Error::UnexpectedData { .. } => "unexpected data",
|
Error::UnexpectedData { .. } => "unexpected data",
|
||||||
Error::CorruptedData => "corrupted data",
|
Error::CorruptedData => "corrupted data",
|
||||||
Error::CountError => "count error",
|
Error::CountError => "count error",
|
||||||
|
Error::SortError => "sort order",
|
||||||
|
Error::DuplicateError => "duplicate error",
|
||||||
Error::TooLargeReadErr => "too large read",
|
Error::TooLargeReadErr => "too large read",
|
||||||
Error::ConsensusError(_) => "consensus error (sort order)",
|
|
||||||
Error::HexError(_) => "hex error",
|
Error::HexError(_) => "hex error",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -539,6 +536,30 @@ impl FixedLength for Signature {
|
||||||
const LEN: usize = AGG_SIGNATURE_SIZE;
|
const LEN: usize = AGG_SIGNATURE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Collections of items must be sorted lexicographically and all unique.
|
||||||
|
pub trait VerifySortedAndUnique<T> {
|
||||||
|
/// Verify a collection of items is sorted and all unique.
|
||||||
|
fn verify_sorted_and_unique(&self) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Hashed> VerifySortedAndUnique<T> for Vec<T> {
|
||||||
|
fn verify_sorted_and_unique(&self) -> Result<(), Error> {
|
||||||
|
let hashes = self
|
||||||
|
.iter()
|
||||||
|
.map(|item| item.hash())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let pairs = hashes.windows(2);
|
||||||
|
for pair in pairs {
|
||||||
|
if pair[0] > pair[1] {
|
||||||
|
return Err(Error::SortError);
|
||||||
|
} else if pair[0] == pair[1] {
|
||||||
|
return Err(Error::DuplicateError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Utility wrapper for an underlying byte Writer. Defines higher level methods
|
/// Utility wrapper for an underlying byte Writer. Defines higher level methods
|
||||||
/// to write numbers, byte vectors, hashes, etc.
|
/// to write numbers, byte vectors, hashes, etc.
|
||||||
pub struct BinWriter<'a> {
|
pub struct BinWriter<'a> {
|
||||||
|
|
|
@ -80,6 +80,18 @@ fn test_the_transaction_pool() {
|
||||||
assert_eq!(write_pool.total_size(), 1);
|
assert_eq!(write_pool.total_size(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test adding a tx that "double spends" an output currently spent by a tx
|
||||||
|
// already in the txpool. In this case we attempt to spend the original coinbase twice.
|
||||||
|
{
|
||||||
|
let tx = test_transaction_spending_coinbase(&keychain, &header, vec![501]);
|
||||||
|
let mut write_pool = pool.write();
|
||||||
|
assert!(
|
||||||
|
write_pool
|
||||||
|
.add_to_pool(test_source(), tx, true, &header)
|
||||||
|
.is_err()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// tx1 spends some outputs from the initial test tx.
|
// tx1 spends some outputs from the initial test tx.
|
||||||
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
||||||
// tx2 spends some outputs from both tx1 and the initial test tx.
|
// tx2 spends some outputs from both tx1 and the initial test tx.
|
||||||
|
@ -119,8 +131,8 @@ fn test_the_transaction_pool() {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test adding a duplicate tx with the same input and outputs (not the *same*
|
// Test adding a duplicate tx with the same input and outputs.
|
||||||
// tx).
|
// Note: not the *same* tx, just same underlying inputs/outputs.
|
||||||
{
|
{
|
||||||
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
|
||||||
let mut write_pool = pool.write();
|
let mut write_pool = pool.write();
|
||||||
|
|
Loading…
Reference in a new issue