fix - sort tx inputs/outputs (#533) (#542)

* added debug logging to see what we're doing in save_pos_index

* add counts to logging

* make sure inputs/outputs in tx are sorted
tweak debug logging

* DRY up sorting of inputs/outputs/kernels via a simple sort() in ser/deser

* add explicit validation fo sort order during tx and block validation

* sort order consensus rule for inputs/outputs/kernels in blocks

* we want a max of 10*num_peers blocks during sync...
not min

* fix test imports
This commit is contained in:
AntiochP 2017-12-22 12:15:44 -05:00 committed by GitHub
parent 9ddb1489a5
commit 4ba453694d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 85 additions and 22 deletions

View file

@ -323,12 +323,28 @@ impl<'a> Extension<'a> {
} }
fn save_pos_index(&self) -> Result<(), Error> { fn save_pos_index(&self) -> Result<(), Error> {
debug!(
LOGGER,
"sumtree: save_pos_index: outputs: {}, {:?}",
self.new_output_commits.len(),
self.new_output_commits.values().collect::<Vec<_>>(),
);
for (commit, pos) in &self.new_output_commits { for (commit, pos) in &self.new_output_commits {
self.commit_index.save_output_pos(commit, *pos)?; self.commit_index.save_output_pos(commit, *pos)?;
} }
debug!(
LOGGER,
"sumtree: save_pos_index: kernels: {}, {:?}",
self.new_kernel_excesses.len(),
self.new_kernel_excesses.values().collect::<Vec<_>>(),
);
for (excess, pos) in &self.new_kernel_excesses { for (excess, pos) in &self.new_kernel_excesses {
self.commit_index.save_kernel_pos(excess, *pos)?; self.commit_index.save_kernel_pos(excess, *pos)?;
} }
Ok(()) Ok(())
} }

View file

@ -22,7 +22,6 @@
use std::fmt; use std::fmt;
use std::cmp::max; use std::cmp::max;
use ser;
use core::target::Difficulty; use core::target::Difficulty;
/// A grin is divisible to 10^9, following the SI prefixes /// A grin is divisible to 10^9, following the SI prefixes
@ -137,6 +136,13 @@ pub const UPPER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 4 / 3;
/// Minimum size time window used for difficulty adjustments /// Minimum size time window used for difficulty adjustments
pub const LOWER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 5 / 6; pub const LOWER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 5 / 6;
/// Consensus errors
#[derive(Clone, Debug, PartialEq)]
pub enum Error {
/// Inputs/outputs/kernels must be sorted lexicographically.
SortError,
}
/// Error when computing the next difficulty adjustment. /// Error when computing the next difficulty adjustment.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct TargetError(pub String); pub struct TargetError(pub String);
@ -221,10 +227,10 @@ where
Ok(max(difficulty, Difficulty::minimum())) Ok(max(difficulty, Difficulty::minimum()))
} }
/// Consensus rule that collections of items are sorted lexicographically over the wire. /// Consensus rule that collections of items are sorted lexicographically.
pub trait VerifySortOrder<T> { pub trait VerifySortOrder<T> {
/// Verify a collection of items is sorted as required. /// Verify a collection of items is sorted as required.
fn verify_sort_order(&self) -> Result<(), ser::Error>; fn verify_sort_order(&self) -> Result<(), Error>;
} }
#[cfg(test)] #[cfg(test)]

View file

@ -21,8 +21,9 @@ use std::collections::HashSet;
use core::Committed; use core::Committed;
use core::{Input, Output, Proof, SwitchCommitHash, Transaction, TxKernel, COINBASE_KERNEL, use core::{Input, Output, Proof, SwitchCommitHash, Transaction, TxKernel, COINBASE_KERNEL,
COINBASE_OUTPUT}; COINBASE_OUTPUT};
use consensus::{exceeds_weight, reward, MINIMUM_DIFFICULTY, REWARD}; use consensus;
use consensus::{exceeds_weight, reward, MINIMUM_DIFFICULTY, REWARD, VerifySortOrder};
use core::hash::{Hash, Hashed, ZERO_HASH}; use core::hash::{Hash, Hashed, ZERO_HASH};
use core::target::Difficulty; use core::target::Difficulty;
use core::transaction; use core::transaction;
@ -54,6 +55,8 @@ pub enum Error {
Secp(secp::Error), Secp(secp::Error),
/// Underlying keychain related error /// Underlying keychain related error
Keychain(keychain::Error), Keychain(keychain::Error),
/// Underlying consensus error (sort order currently)
Consensus(consensus::Error),
} }
impl From<transaction::Error> for Error { impl From<transaction::Error> for Error {
@ -74,6 +77,12 @@ impl From<keychain::Error> for Error {
} }
} }
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::Consensus(e)
}
}
/// Block header, fairly standard compared to other blockchains. /// Block header, fairly standard compared to other blockchains.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct BlockHeader { pub struct BlockHeader {
@ -326,7 +335,7 @@ impl Block {
kernels.push(reward_kern); kernels.push(reward_kern);
outputs.push(reward_out); outputs.push(reward_out);
// now sort everything to the block is built deterministically // now sort everything so the block is built deterministically
inputs.sort(); inputs.sort();
outputs.sort(); outputs.sort();
kernels.sort(); kernels.sort();
@ -449,11 +458,19 @@ impl Block {
if exceeds_weight(self.inputs.len(), self.outputs.len(), self.kernels.len()) { if exceeds_weight(self.inputs.len(), self.outputs.len(), self.kernels.len()) {
return Err(Error::WeightExceeded); return Err(Error::WeightExceeded);
} }
self.verify_sorted()?;
self.verify_coinbase()?; self.verify_coinbase()?;
self.verify_kernels(false)?; self.verify_kernels(false)?;
Ok(()) Ok(())
} }
fn verify_sorted(&self) -> Result<(), Error> {
self.inputs.verify_sort_order()?;
self.outputs.verify_sort_order()?;
self.kernels.verify_sort_order()?;
Ok(())
}
/// Verifies the sum of input/output commitments match the sum in kernels /// Verifies the sum of input/output commitments match the sum in kernels
/// and that all kernel signatures are valid. /// and that all kernel signatures are valid.
/// TODO - when would we skip_sig? Is this needed or used anywhere? /// TODO - when would we skip_sig? Is this needed or used anywhere?
@ -590,7 +607,7 @@ mod test {
use core::build::{self, input, output, with_fee}; use core::build::{self, input, output, with_fee};
use core::test::tx2i1o; use core::test::tx2i1o;
use keychain::{Identifier, Keychain}; use keychain::{Identifier, Keychain};
use consensus::*; use consensus::{MAX_BLOCK_WEIGHT, BLOCK_OUTPUT_WEIGHT};
use std::time::Instant; use std::time::Instant;
use util::secp; use util::secp;

View file

@ -23,7 +23,7 @@ use std::convert::AsRef;
use blake2::blake2b::Blake2b; use blake2::blake2b::Blake2b;
use consensus::VerifySortOrder; use consensus;
use ser::{self, AsFixedBytes, Error, Readable, Reader, Writeable, Writer}; use ser::{self, AsFixedBytes, Error, Readable, Reader, Writeable, Writer};
use util::LOGGER; use util::LOGGER;
@ -196,15 +196,15 @@ impl<W: ser::Writeable> Hashed for W {
} }
} }
impl<T: Writeable> VerifySortOrder<T> for Vec<T> { impl<T: Writeable> consensus::VerifySortOrder<T> for Vec<T> {
fn verify_sort_order(&self) -> Result<(), ser::Error> { fn verify_sort_order(&self) -> Result<(), consensus::Error> {
match self.iter() match self.iter()
.map(|item| item.hash()) .map(|item| item.hash())
.collect::<Vec<_>>() .collect::<Vec<_>>()
.windows(2) .windows(2)
.any(|pair| pair[0] > pair[1]) { .any(|pair| pair[0] > pair[1]) {
true => Err(ser::Error::BadlySorted), true => Err(consensus::Error::SortError),
false => Ok(()), false => Ok(()),
} }
} }
} }

View file

@ -23,6 +23,7 @@ use std::cmp::Ordering;
use std::ops; use std::ops;
use consensus; use consensus;
use consensus::VerifySortOrder;
use core::Committed; use core::Committed;
use core::hash::Hashed; use core::hash::Hashed;
use core::pmmr::Summable; use core::pmmr::Summable;
@ -75,6 +76,8 @@ pub enum Error {
Secp(secp::Error), Secp(secp::Error),
/// Restrict number of incoming inputs /// Restrict number of incoming inputs
TooManyInputs, TooManyInputs,
/// Underlying consensus error (currently for sort order)
ConsensusError(consensus::Error),
} }
impl From<secp::Error> for Error { impl From<secp::Error> for Error {
@ -83,6 +86,12 @@ impl From<secp::Error> for Error {
} }
} }
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::ConsensusError(e)
}
}
/// Construct msg bytes from tx fee and lock_height /// Construct msg bytes from tx fee and lock_height
pub fn kernel_sig_msg(fee: u64, lock_height: u64) -> [u8; 32] { pub fn kernel_sig_msg(fee: u64, lock_height: u64) -> [u8; 32] {
let mut bytes = [0; 32]; let mut bytes = [0; 32];
@ -275,6 +284,7 @@ impl Transaction {
pub fn with_input(self, input: Input) -> Transaction { pub fn with_input(self, input: Input) -> Transaction {
let mut new_ins = self.inputs; let mut new_ins = self.inputs;
new_ins.push(input); new_ins.push(input);
new_ins.sort();
Transaction { Transaction {
inputs: new_ins, inputs: new_ins,
..self ..self
@ -286,6 +296,7 @@ impl Transaction {
pub fn with_output(self, output: Output) -> Transaction { pub fn with_output(self, output: Output) -> Transaction {
let mut new_outs = self.outputs; let mut new_outs = self.outputs;
new_outs.push(output); new_outs.push(output);
new_outs.sort();
Transaction { Transaction {
outputs: new_outs, outputs: new_outs,
..self ..self
@ -353,12 +364,19 @@ impl Transaction {
if self.inputs.len() > consensus::MAX_BLOCK_INPUTS { if self.inputs.len() > consensus::MAX_BLOCK_INPUTS {
return Err(Error::TooManyInputs); return Err(Error::TooManyInputs);
} }
self.verify_sorted()?;
for out in &self.outputs { for out in &self.outputs {
out.verify_proof()?; out.verify_proof()?;
} }
let excess = self.verify_sig()?; let excess = self.verify_sig()?;
Ok(excess) Ok(excess)
} }
fn verify_sorted(&self) -> Result<(), Error> {
self.inputs.verify_sort_order()?;
self.outputs.verify_sort_order()?;
Ok(())
}
} }
/// A transaction input, mostly a reference to an output being spent by the /// A transaction input, mostly a reference to an output being spent by the

View file

@ -23,8 +23,9 @@ use std::{cmp, error, fmt};
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use keychain::{Identifier, IDENTIFIER_SIZE}; use keychain::{Identifier, IDENTIFIER_SIZE};
use core::hash::Hashed; use consensus;
use consensus::VerifySortOrder; use consensus::VerifySortOrder;
use core::hash::Hashed;
use core::transaction::{SWITCH_COMMIT_HASH_SIZE, SwitchCommitHash}; use core::transaction::{SWITCH_COMMIT_HASH_SIZE, SwitchCommitHash};
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
use util::secp::pedersen::RangeProof; use util::secp::pedersen::RangeProof;
@ -46,8 +47,8 @@ pub enum Error {
CorruptedData, CorruptedData,
/// When asked to read too much data /// When asked to read too much data
TooLargeReadErr, TooLargeReadErr,
/// Something was not sorted when consensus rules requires it to be sorted /// Consensus rule failure (currently sort order)
BadlySorted, ConsensusError(consensus::Error),
} }
impl From<io::Error> for Error { impl From<io::Error> for Error {
@ -56,6 +57,12 @@ impl From<io::Error> for Error {
} }
} }
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::ConsensusError(e)
}
}
impl fmt::Display for Error { impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
@ -66,7 +73,7 @@ impl fmt::Display for Error {
} => write!(f, "expected {:?}, got {:?}", e, r), } => write!(f, "expected {:?}, got {:?}", e, r),
Error::CorruptedData => f.write_str("corrupted data"), Error::CorruptedData => f.write_str("corrupted data"),
Error::TooLargeReadErr => f.write_str("too large read"), Error::TooLargeReadErr => f.write_str("too large read"),
Error::BadlySorted => f.write_str("badly sorted data"), Error::ConsensusError(ref e) => write!(f, "consensus error {:?}", e),
} }
} }
} }
@ -88,7 +95,7 @@ impl error::Error for Error {
} => "unexpected data", } => "unexpected data",
Error::CorruptedData => "corrupted data", Error::CorruptedData => "corrupted data",
Error::TooLargeReadErr => "too large read", Error::TooLargeReadErr => "too large read",
Error::BadlySorted => "badly sorted data", Error::ConsensusError(_) => "consensus error (sort order)",
} }
} }
} }
@ -420,10 +427,10 @@ where
impl<T> WriteableSorted for Vec<T> impl<T> WriteableSorted for Vec<T>
where where
T: Writeable + Hashed, T: Writeable + Ord,
{ {
fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error> { fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error> {
self.sort_by_key(|elmt| elmt.hash()); self.sort();
for elmt in self { for elmt in self {
elmt.write(writer)?; elmt.write(writer)?;
} }

View file

@ -112,10 +112,9 @@ fn body_sync(peers: Peers, chain: Arc<chain::Chain>) {
// if we have 5 most_work_peers then ask for 50 blocks total (peer_count * 10) // if we have 5 most_work_peers then ask for 50 blocks total (peer_count * 10)
// max will be 80 if all 8 peers are advertising most_work // max will be 80 if all 8 peers are advertising most_work
let peer_count = cmp::max(peers.most_work_peers().len(), 10); let peer_count = cmp::min(peers.most_work_peers().len(), 10);
let block_count = peer_count * 10; let block_count = peer_count * 10;
let hashes_to_get = hashes let hashes_to_get = hashes
.iter() .iter()
.filter(|x| { .filter(|x| {