fix - sort tx inputs/outputs (#533) (#542)

* added debug logging to see what we're doing in save_pos_index

* add counts to logging

* make sure inputs/outputs in tx are sorted
tweak debug logging

* DRY up sorting of inputs/outputs/kernels via a simple sort() in ser/deser

* add explicit validation fo sort order during tx and block validation

* sort order consensus rule for inputs/outputs/kernels in blocks

* we want a max of 10*num_peers blocks during sync...
not min

* fix test imports
This commit is contained in:
AntiochP 2017-12-22 12:15:44 -05:00 committed by GitHub
parent 9ddb1489a5
commit 4ba453694d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 85 additions and 22 deletions

View file

@ -323,12 +323,28 @@ impl<'a> Extension<'a> {
}
fn save_pos_index(&self) -> Result<(), Error> {
debug!(
LOGGER,
"sumtree: save_pos_index: outputs: {}, {:?}",
self.new_output_commits.len(),
self.new_output_commits.values().collect::<Vec<_>>(),
);
for (commit, pos) in &self.new_output_commits {
self.commit_index.save_output_pos(commit, *pos)?;
}
debug!(
LOGGER,
"sumtree: save_pos_index: kernels: {}, {:?}",
self.new_kernel_excesses.len(),
self.new_kernel_excesses.values().collect::<Vec<_>>(),
);
for (excess, pos) in &self.new_kernel_excesses {
self.commit_index.save_kernel_pos(excess, *pos)?;
}
Ok(())
}

View file

@ -22,7 +22,6 @@
use std::fmt;
use std::cmp::max;
use ser;
use core::target::Difficulty;
/// A grin is divisible to 10^9, following the SI prefixes
@ -137,6 +136,13 @@ pub const UPPER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 4 / 3;
/// Minimum size time window used for difficulty adjustments
pub const LOWER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 5 / 6;
/// Consensus errors
#[derive(Clone, Debug, PartialEq)]
pub enum Error {
/// Inputs/outputs/kernels must be sorted lexicographically.
SortError,
}
/// Error when computing the next difficulty adjustment.
#[derive(Debug, Clone)]
pub struct TargetError(pub String);
@ -221,10 +227,10 @@ where
Ok(max(difficulty, Difficulty::minimum()))
}
/// Consensus rule that collections of items are sorted lexicographically over the wire.
/// Consensus rule that collections of items are sorted lexicographically.
pub trait VerifySortOrder<T> {
/// Verify a collection of items is sorted as required.
fn verify_sort_order(&self) -> Result<(), ser::Error>;
fn verify_sort_order(&self) -> Result<(), Error>;
}
#[cfg(test)]

View file

@ -21,8 +21,9 @@ use std::collections::HashSet;
use core::Committed;
use core::{Input, Output, Proof, SwitchCommitHash, Transaction, TxKernel, COINBASE_KERNEL,
COINBASE_OUTPUT};
use consensus::{exceeds_weight, reward, MINIMUM_DIFFICULTY, REWARD};
COINBASE_OUTPUT};
use consensus;
use consensus::{exceeds_weight, reward, MINIMUM_DIFFICULTY, REWARD, VerifySortOrder};
use core::hash::{Hash, Hashed, ZERO_HASH};
use core::target::Difficulty;
use core::transaction;
@ -54,6 +55,8 @@ pub enum Error {
Secp(secp::Error),
/// Underlying keychain related error
Keychain(keychain::Error),
/// Underlying consensus error (sort order currently)
Consensus(consensus::Error),
}
impl From<transaction::Error> for Error {
@ -74,6 +77,12 @@ impl From<keychain::Error> for Error {
}
}
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::Consensus(e)
}
}
/// Block header, fairly standard compared to other blockchains.
#[derive(Clone, Debug, PartialEq)]
pub struct BlockHeader {
@ -326,7 +335,7 @@ impl Block {
kernels.push(reward_kern);
outputs.push(reward_out);
// now sort everything to the block is built deterministically
// now sort everything so the block is built deterministically
inputs.sort();
outputs.sort();
kernels.sort();
@ -449,11 +458,19 @@ impl Block {
if exceeds_weight(self.inputs.len(), self.outputs.len(), self.kernels.len()) {
return Err(Error::WeightExceeded);
}
self.verify_sorted()?;
self.verify_coinbase()?;
self.verify_kernels(false)?;
Ok(())
}
fn verify_sorted(&self) -> Result<(), Error> {
self.inputs.verify_sort_order()?;
self.outputs.verify_sort_order()?;
self.kernels.verify_sort_order()?;
Ok(())
}
/// Verifies the sum of input/output commitments match the sum in kernels
/// and that all kernel signatures are valid.
/// TODO - when would we skip_sig? Is this needed or used anywhere?
@ -590,7 +607,7 @@ mod test {
use core::build::{self, input, output, with_fee};
use core::test::tx2i1o;
use keychain::{Identifier, Keychain};
use consensus::*;
use consensus::{MAX_BLOCK_WEIGHT, BLOCK_OUTPUT_WEIGHT};
use std::time::Instant;
use util::secp;

View file

@ -23,7 +23,7 @@ use std::convert::AsRef;
use blake2::blake2b::Blake2b;
use consensus::VerifySortOrder;
use consensus;
use ser::{self, AsFixedBytes, Error, Readable, Reader, Writeable, Writer};
use util::LOGGER;
@ -196,15 +196,15 @@ impl<W: ser::Writeable> Hashed for W {
}
}
impl<T: Writeable> VerifySortOrder<T> for Vec<T> {
fn verify_sort_order(&self) -> Result<(), ser::Error> {
impl<T: Writeable> consensus::VerifySortOrder<T> for Vec<T> {
fn verify_sort_order(&self) -> Result<(), consensus::Error> {
match self.iter()
.map(|item| item.hash())
.collect::<Vec<_>>()
.windows(2)
.any(|pair| pair[0] > pair[1]) {
true => Err(ser::Error::BadlySorted),
false => Ok(()),
}
true => Err(consensus::Error::SortError),
false => Ok(()),
}
}
}

View file

@ -23,6 +23,7 @@ use std::cmp::Ordering;
use std::ops;
use consensus;
use consensus::VerifySortOrder;
use core::Committed;
use core::hash::Hashed;
use core::pmmr::Summable;
@ -75,6 +76,8 @@ pub enum Error {
Secp(secp::Error),
/// Restrict number of incoming inputs
TooManyInputs,
/// Underlying consensus error (currently for sort order)
ConsensusError(consensus::Error),
}
impl From<secp::Error> for Error {
@ -83,6 +86,12 @@ impl From<secp::Error> for Error {
}
}
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::ConsensusError(e)
}
}
/// Construct msg bytes from tx fee and lock_height
pub fn kernel_sig_msg(fee: u64, lock_height: u64) -> [u8; 32] {
let mut bytes = [0; 32];
@ -275,6 +284,7 @@ impl Transaction {
pub fn with_input(self, input: Input) -> Transaction {
let mut new_ins = self.inputs;
new_ins.push(input);
new_ins.sort();
Transaction {
inputs: new_ins,
..self
@ -286,6 +296,7 @@ impl Transaction {
pub fn with_output(self, output: Output) -> Transaction {
let mut new_outs = self.outputs;
new_outs.push(output);
new_outs.sort();
Transaction {
outputs: new_outs,
..self
@ -353,12 +364,19 @@ impl Transaction {
if self.inputs.len() > consensus::MAX_BLOCK_INPUTS {
return Err(Error::TooManyInputs);
}
self.verify_sorted()?;
for out in &self.outputs {
out.verify_proof()?;
}
let excess = self.verify_sig()?;
Ok(excess)
}
fn verify_sorted(&self) -> Result<(), Error> {
self.inputs.verify_sort_order()?;
self.outputs.verify_sort_order()?;
Ok(())
}
}
/// A transaction input, mostly a reference to an output being spent by the

View file

@ -23,8 +23,9 @@ use std::{cmp, error, fmt};
use std::io::{self, Read, Write};
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use keychain::{Identifier, IDENTIFIER_SIZE};
use core::hash::Hashed;
use consensus;
use consensus::VerifySortOrder;
use core::hash::Hashed;
use core::transaction::{SWITCH_COMMIT_HASH_SIZE, SwitchCommitHash};
use util::secp::pedersen::Commitment;
use util::secp::pedersen::RangeProof;
@ -46,8 +47,8 @@ pub enum Error {
CorruptedData,
/// When asked to read too much data
TooLargeReadErr,
/// Something was not sorted when consensus rules requires it to be sorted
BadlySorted,
/// Consensus rule failure (currently sort order)
ConsensusError(consensus::Error),
}
impl From<io::Error> for Error {
@ -56,6 +57,12 @@ impl From<io::Error> for Error {
}
}
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::ConsensusError(e)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
@ -66,7 +73,7 @@ impl fmt::Display for Error {
} => write!(f, "expected {:?}, got {:?}", e, r),
Error::CorruptedData => f.write_str("corrupted data"),
Error::TooLargeReadErr => f.write_str("too large read"),
Error::BadlySorted => f.write_str("badly sorted data"),
Error::ConsensusError(ref e) => write!(f, "consensus error {:?}", e),
}
}
}
@ -88,7 +95,7 @@ impl error::Error for Error {
} => "unexpected data",
Error::CorruptedData => "corrupted data",
Error::TooLargeReadErr => "too large read",
Error::BadlySorted => "badly sorted data",
Error::ConsensusError(_) => "consensus error (sort order)",
}
}
}
@ -420,10 +427,10 @@ where
impl<T> WriteableSorted for Vec<T>
where
T: Writeable + Hashed,
T: Writeable + Ord,
{
fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error> {
self.sort_by_key(|elmt| elmt.hash());
self.sort();
for elmt in self {
elmt.write(writer)?;
}

View file

@ -112,10 +112,9 @@ fn body_sync(peers: Peers, chain: Arc<chain::Chain>) {
// if we have 5 most_work_peers then ask for 50 blocks total (peer_count * 10)
// max will be 80 if all 8 peers are advertising most_work
let peer_count = cmp::max(peers.most_work_peers().len(), 10);
let peer_count = cmp::min(peers.most_work_peers().len(), 10);
let block_count = peer_count * 10;
let hashes_to_get = hashes
.iter()
.filter(|x| {