Cleanup max sizes (#1345)

* Transaction max weight expressed in terms of block weight
* Cleaned up max block sizes, so everything is in therms of weight
* Cleanup block max constants
* Rename verify_size -> verify_weight
This commit is contained in:
Ignotus Peverell 2018-08-12 13:02:30 -07:00 committed by GitHub
parent 0d94d35a9d
commit 4be97abbbb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 56 additions and 110 deletions

View file

@ -72,10 +72,6 @@ pub const EASINESS: u32 = 50;
/// easier to reason about. /// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = 48 * 3600 / (BLOCK_TIME_SEC as u32); pub const CUT_THROUGH_HORIZON: u32 = 48 * 3600 / (BLOCK_TIME_SEC as u32);
/// The maximum size we're willing to accept for any message. Enforced by the
/// peer-to-peer networking layer only for DoS protection.
pub const MAX_MSG_LEN: u64 = 20_000_000;
/// Weight of an input when counted against the max block weight capacity /// Weight of an input when counted against the max block weight capacity
pub const BLOCK_INPUT_WEIGHT: usize = 1; pub const BLOCK_INPUT_WEIGHT: usize = 1;
@ -85,35 +81,21 @@ pub const BLOCK_OUTPUT_WEIGHT: usize = 10;
/// Weight of a kernel when counted against the max block weight capacity /// Weight of a kernel when counted against the max block weight capacity
pub const BLOCK_KERNEL_WEIGHT: usize = 2; pub const BLOCK_KERNEL_WEIGHT: usize = 2;
/// Total maximum block weight /// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:
/// * `(674 + 33 + 1) * 8_000 = 5_664_000` for a block with only outputs
/// * `(1 + 8 + 8 + 33 + 64) * 40_000 = 4_560_000` for a block with only kernels
/// * `(1 + 33) * 80_000 = 2_720_000` for a block with only inputs
///
/// Given that a block needs to have at least one kernel for the coinbase,
/// and one kernel for the transaction, practical maximum size is 5_663_520,
/// (ignoring the edge case of a miner producting a block with all coinbase
/// outputs and a single kernel).
///
/// A more "standard" block, filled with transactions of 2 inputs, 2 outputs
/// and one kernel, should be around 5_326_667 bytes.
pub const MAX_BLOCK_WEIGHT: usize = 80_000; pub const MAX_BLOCK_WEIGHT: usize = 80_000;
/// Reused consistently for various max lengths below.
/// Max transaction is effectively a full block of data.
/// Soft fork down when too high.
/// Likely we will need to tweak these all individually, but using a single constant for now.
const MAX_INP_OUT_KERN_LEN: usize = 300_000;
/// Maximum inputs for a block.
pub const MAX_BLOCK_INPUTS: usize = MAX_INP_OUT_KERN_LEN;
/// Maximum outputs for a block (max tx + single output for coinbase).
/// This is just a starting point - need to discuss this further.
pub const MAX_BLOCK_OUTPUTS: usize = MAX_INP_OUT_KERN_LEN + 1;
/// Maximum kernels for a block (max tx + single output for coinbase).
/// This is just a starting point - need to discuss this further.
pub const MAX_BLOCK_KERNELS: usize = MAX_INP_OUT_KERN_LEN + 1;
/// Maximum inputs in a transaction.
pub const MAX_TX_INPUTS: usize = MAX_INP_OUT_KERN_LEN;
/// Maximum outputs in a transaction.
pub const MAX_TX_OUTPUTS: usize = MAX_INP_OUT_KERN_LEN;
/// Maximum kernels in a transaction.
pub const MAX_TX_KERNELS: usize = MAX_INP_OUT_KERN_LEN;
/// Fork every 250,000 blocks for first 2 years, simple number and just a /// Fork every 250,000 blocks for first 2 years, simple number and just a
/// little less than 6 months. /// little less than 6 months.
pub const HARD_FORK_INTERVAL: u64 = 250_000; pub const HARD_FORK_INTERVAL: u64 = 250_000;

View file

@ -45,12 +45,8 @@ pub enum Error {
InvalidTotalKernelSum, InvalidTotalKernelSum,
/// Same as above but for the coinbase part of a block, including reward /// Same as above but for the coinbase part of a block, including reward
CoinbaseSumMismatch, CoinbaseSumMismatch,
/// Restrict number of block inputs. /// Restrict block total weight.
TooManyInputs, TooHeavy,
/// Restrict number of block outputs.
TooManyOutputs,
/// Retrict number of block kernels.
TooManyKernels,
/// Block weight (based on inputs|outputs|kernels) exceeded. /// Block weight (based on inputs|outputs|kernels) exceeded.
WeightExceeded, WeightExceeded,
/// Kernel not valid due to lock_height exceeding block header height /// Kernel not valid due to lock_height exceeding block header height
@ -527,8 +523,7 @@ impl Block {
let header = self.header.clone(); let header = self.header.clone();
let nonce = thread_rng().next_u64(); let nonce = thread_rng().next_u64();
let mut out_full = self let mut out_full = self.outputs
.outputs
.iter() .iter()
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT)) .filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
.cloned() .cloned()
@ -660,14 +655,12 @@ impl Block {
/// we do not want to cut-through (all coinbase must be preserved) /// we do not want to cut-through (all coinbase must be preserved)
/// ///
pub fn cut_through(self) -> Block { pub fn cut_through(self) -> Block {
let in_set = self let in_set = self.inputs
.inputs
.iter() .iter()
.map(|inp| inp.commitment()) .map(|inp| inp.commitment())
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
let out_set = self let out_set = self.outputs
.outputs
.iter() .iter()
.filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT)) .filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT))
.map(|out| out.commitment()) .map(|out| out.commitment())
@ -675,14 +668,12 @@ impl Block {
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>(); let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
let new_inputs = self let new_inputs = self.inputs
.inputs
.into_iter() .into_iter()
.filter(|inp| !to_cut_through.contains(&inp.commitment())) .filter(|inp| !to_cut_through.contains(&inp.commitment()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let new_outputs = self let new_outputs = self.outputs
.outputs
.into_iter() .into_iter()
.filter(|out| !to_cut_through.contains(&out.commitment())) .filter(|out| !to_cut_through.contains(&out.commitment()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -709,7 +700,6 @@ impl Block {
) -> Result<(Commitment), Error> { ) -> Result<(Commitment), Error> {
// Verify we do not exceed the max number of inputs|outputs|kernels // Verify we do not exceed the max number of inputs|outputs|kernels
// and that the "weight" based on these does not exceed the max permitted weight. // and that the "weight" based on these does not exceed the max permitted weight.
self.verify_size()?;
self.verify_weight()?; self.verify_weight()?;
self.verify_sorted()?; self.verify_sorted()?;
@ -744,29 +734,14 @@ impl Block {
Ok(kernel_sum) Ok(kernel_sum)
} }
// Verify the tx is not too big in terms of // Verify the block is not too big in terms of number of inputs|outputs|kernels.
// number of inputs|outputs|kernels.
fn verify_size(&self) -> Result<(), Error> {
if self.inputs.len() > consensus::MAX_BLOCK_INPUTS {
return Err(Error::TooManyInputs);
}
if self.outputs.len() > consensus::MAX_BLOCK_OUTPUTS {
return Err(Error::TooManyOutputs);
}
if self.kernels.len() > consensus::MAX_BLOCK_KERNELS {
return Err(Error::TooManyKernels);
}
Ok(())
}
fn verify_weight(&self) -> Result<(), Error> { fn verify_weight(&self) -> Result<(), Error> {
let weight = let tx_block_weight = self.inputs.len() * consensus::BLOCK_INPUT_WEIGHT
self.inputs.len() * consensus::BLOCK_INPUT_WEIGHT + + self.outputs.len() * consensus::BLOCK_OUTPUT_WEIGHT
self.outputs.len() * consensus::BLOCK_OUTPUT_WEIGHT + + self.kernels.len() * consensus::BLOCK_KERNEL_WEIGHT;
self.kernels.len() * consensus::BLOCK_KERNEL_WEIGHT;
if weight > consensus::MAX_BLOCK_WEIGHT { if tx_block_weight > consensus::MAX_BLOCK_WEIGHT {
return Err(Error::WeightExceeded); return Err(Error::TooHeavy);
} }
Ok(()) Ok(())
} }
@ -782,8 +757,7 @@ impl Block {
// Verify that no input is spending an output from the same block. // Verify that no input is spending an output from the same block.
fn verify_cut_through(&self) -> Result<(), Error> { fn verify_cut_through(&self) -> Result<(), Error> {
for inp in &self.inputs { for inp in &self.inputs {
if self if self.outputs
.outputs
.iter() .iter()
.any(|out| out.commitment() == inp.commitment()) .any(|out| out.commitment() == inp.commitment())
{ {
@ -826,14 +800,12 @@ impl Block {
/// Check the sum of coinbase-marked outputs match /// Check the sum of coinbase-marked outputs match
/// the sum of coinbase-marked kernels accounting for fees. /// the sum of coinbase-marked kernels accounting for fees.
pub fn verify_coinbase(&self) -> Result<(), Error> { pub fn verify_coinbase(&self) -> Result<(), Error> {
let cb_outs = self let cb_outs = self.outputs
.outputs
.iter() .iter()
.filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT)) .filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT))
.collect::<Vec<&Output>>(); .collect::<Vec<&Output>>();
let cb_kerns = self let cb_kerns = self.kernels
.kernels
.iter() .iter()
.filter(|kernel| kernel.features.contains(KernelFeatures::COINBASE_KERNEL)) .filter(|kernel| kernel.features.contains(KernelFeatures::COINBASE_KERNEL))
.collect::<Vec<&TxKernel>>(); .collect::<Vec<&TxKernel>>();

View file

@ -54,12 +54,8 @@ pub enum Error {
/// The sum of output minus input commitments does not /// The sum of output minus input commitments does not
/// match the sum of kernel commitments /// match the sum of kernel commitments
KernelSumMismatch, KernelSumMismatch,
/// Restrict number of tx inputs. /// Restrict tx total weight.
TooManyInputs, TooHeavy,
/// Restrict number of tx outputs.
TooManyOutputs,
/// Retrict number of tx kernels.
TooManyKernels,
/// Underlying consensus error (currently for sort order) /// Underlying consensus error (currently for sort order)
ConsensusError(consensus::Error), ConsensusError(consensus::Error),
/// Error originating from an invalid lock-height /// Error originating from an invalid lock-height
@ -433,17 +429,16 @@ impl Transaction {
Ok(()) Ok(())
} }
// Verify the tx is not too big in terms of // Verify the tx is not too big in terms of number of inputs|outputs|kernels.
// number of inputs|outputs|kernels. fn verify_weight(&self) -> Result<(), Error> {
fn verify_size(&self) -> Result<(), Error> { // check the tx as if it was a block, with an additional output and
if self.inputs.len() > consensus::MAX_TX_INPUTS { // kernel for reward
return Err(Error::TooManyInputs); let tx_block_weight = self.inputs.len() * consensus::BLOCK_INPUT_WEIGHT
} + (self.outputs.len() + 1) * consensus::BLOCK_OUTPUT_WEIGHT
if self.outputs.len() > consensus::MAX_TX_OUTPUTS { + (self.kernels.len() + 1) * consensus::BLOCK_KERNEL_WEIGHT;
return Err(Error::TooManyOutputs);
} if tx_block_weight > consensus::MAX_BLOCK_WEIGHT {
if self.kernels.len() > consensus::MAX_TX_KERNELS { return Err(Error::TooHeavy);
return Err(Error::TooManyKernels);
} }
Ok(()) Ok(())
} }
@ -453,7 +448,7 @@ impl Transaction {
/// output. /// output.
pub fn validate(&self) -> Result<(), Error> { pub fn validate(&self) -> Result<(), Error> {
self.verify_features()?; self.verify_features()?;
self.verify_size()?; self.verify_weight()?;
self.verify_sorted()?; self.verify_sorted()?;
self.verify_cut_through()?; self.verify_cut_through()?;
self.verify_kernel_sums(self.overage(), self.offset)?; self.verify_kernel_sums(self.overage(), self.offset)?;
@ -487,8 +482,7 @@ impl Transaction {
// Verify that no input is spending an output from the same block. // Verify that no input is spending an output from the same block.
fn verify_cut_through(&self) -> Result<(), Error> { fn verify_cut_through(&self) -> Result<(), Error> {
for inp in &self.inputs { for inp in &self.inputs {
if self if self.outputs
.outputs
.iter() .iter()
.any(|out| out.commitment() == inp.commitment()) .any(|out| out.commitment() == inp.commitment())
{ {
@ -509,8 +503,7 @@ impl Transaction {
// Verify we have no outputs tagged as COINBASE_OUTPUT. // Verify we have no outputs tagged as COINBASE_OUTPUT.
fn verify_output_features(&self) -> Result<(), Error> { fn verify_output_features(&self) -> Result<(), Error> {
if self if self.outputs
.outputs
.iter() .iter()
.any(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT)) .any(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
{ {
@ -521,8 +514,7 @@ impl Transaction {
// Verify we have no kernels tagged as COINBASE_KERNEL. // Verify we have no kernels tagged as COINBASE_KERNEL.
fn verify_kernel_features(&self) -> Result<(), Error> { fn verify_kernel_features(&self) -> Result<(), Error> {
if self if self.kernels
.kernels
.iter() .iter()
.any(|x| x.features.contains(KernelFeatures::COINBASE_KERNEL)) .any(|x| x.features.contains(KernelFeatures::COINBASE_KERNEL))
{ {

View file

@ -19,10 +19,10 @@ use std::io::{self, Read, Write};
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, TcpStream}; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, TcpStream};
use std::{thread, time}; use std::{thread, time};
use core::consensus::{MAX_MSG_LEN, MAX_TX_INPUTS, MAX_TX_KERNELS, MAX_TX_OUTPUTS}; use core::consensus;
use core::core::BlockHeader;
use core::core::hash::Hash; use core::core::hash::Hash;
use core::core::target::Difficulty; use core::core::target::Difficulty;
use core::core::BlockHeader;
use core::ser::{self, Readable, Reader, Writeable, Writer}; use core::ser::{self, Readable, Reader, Writeable, Writer};
use types::{Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS}; use types::{Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS};
@ -40,6 +40,10 @@ const MAGIC: [u8; 2] = [0x1e, 0xc5];
/// Size in bytes of a message header /// Size in bytes of a message header
pub const HEADER_LEN: u64 = 11; pub const HEADER_LEN: u64 = 11;
/// Max theoretical size of a block filled with outputs.
const MAX_BLOCK_SIZE: u64 =
(consensus::MAX_BLOCK_WEIGHT / consensus::BLOCK_OUTPUT_WEIGHT * 708) as u64;
/// Types of messages. /// Types of messages.
/// Note: Values here are *important* so we should only add new values at the /// Note: Values here are *important* so we should only add new values at the
/// end. /// end.
@ -82,11 +86,11 @@ fn max_msg_size(msg_type: Type) -> u64 {
Type::Header => 365, Type::Header => 365,
Type::Headers => 2 + 365 * MAX_BLOCK_HEADERS as u64, Type::Headers => 2 + 365 * MAX_BLOCK_HEADERS as u64,
Type::GetBlock => 32, Type::GetBlock => 32,
Type::Block => MAX_MSG_LEN, Type::Block => MAX_BLOCK_SIZE,
Type::GetCompactBlock => 32, Type::GetCompactBlock => 32,
Type::CompactBlock => MAX_MSG_LEN / 10, Type::CompactBlock => MAX_BLOCK_SIZE / 10,
Type::StemTransaction => (1000 * MAX_TX_INPUTS + 710 * MAX_TX_OUTPUTS + 114 * MAX_TX_KERNELS) as u64, Type::StemTransaction => MAX_BLOCK_SIZE,
Type::Transaction => (1000 * MAX_TX_INPUTS + 710 * MAX_TX_OUTPUTS + 114 * MAX_TX_KERNELS) as u64, Type::Transaction => MAX_BLOCK_SIZE,
Type::TxHashSetRequest => 40, Type::TxHashSetRequest => 40,
Type::TxHashSetArchive => 64, Type::TxHashSetArchive => 64,
Type::BanReason => 64, Type::BanReason => 64,
@ -729,11 +733,7 @@ pub struct TxHashSetArchive {
impl Writeable for TxHashSetArchive { impl Writeable for TxHashSetArchive {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.hash.write(writer)?; self.hash.write(writer)?;
ser_multiwrite!( ser_multiwrite!(writer, [write_u64, self.height], [write_u64, self.bytes]);
writer,
[write_u64, self.height],
[write_u64, self.bytes]
);
Ok(()) Ok(())
} }
} }