mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
Cleanup max sizes (#1345)
* Transaction max weight expressed in terms of block weight * Cleaned up max block sizes, so everything is in therms of weight * Cleanup block max constants * Rename verify_size -> verify_weight
This commit is contained in:
parent
0d94d35a9d
commit
4be97abbbb
4 changed files with 56 additions and 110 deletions
|
@ -72,10 +72,6 @@ pub const EASINESS: u32 = 50;
|
|||
/// easier to reason about.
|
||||
pub const CUT_THROUGH_HORIZON: u32 = 48 * 3600 / (BLOCK_TIME_SEC as u32);
|
||||
|
||||
/// The maximum size we're willing to accept for any message. Enforced by the
|
||||
/// peer-to-peer networking layer only for DoS protection.
|
||||
pub const MAX_MSG_LEN: u64 = 20_000_000;
|
||||
|
||||
/// Weight of an input when counted against the max block weight capacity
|
||||
pub const BLOCK_INPUT_WEIGHT: usize = 1;
|
||||
|
||||
|
@ -85,35 +81,21 @@ pub const BLOCK_OUTPUT_WEIGHT: usize = 10;
|
|||
/// Weight of a kernel when counted against the max block weight capacity
|
||||
pub const BLOCK_KERNEL_WEIGHT: usize = 2;
|
||||
|
||||
/// Total maximum block weight
|
||||
/// Total maximum block weight. At current sizes, this means a maximum
|
||||
/// theoretical size of:
|
||||
/// * `(674 + 33 + 1) * 8_000 = 5_664_000` for a block with only outputs
|
||||
/// * `(1 + 8 + 8 + 33 + 64) * 40_000 = 4_560_000` for a block with only kernels
|
||||
/// * `(1 + 33) * 80_000 = 2_720_000` for a block with only inputs
|
||||
///
|
||||
/// Given that a block needs to have at least one kernel for the coinbase,
|
||||
/// and one kernel for the transaction, practical maximum size is 5_663_520,
|
||||
/// (ignoring the edge case of a miner producting a block with all coinbase
|
||||
/// outputs and a single kernel).
|
||||
///
|
||||
/// A more "standard" block, filled with transactions of 2 inputs, 2 outputs
|
||||
/// and one kernel, should be around 5_326_667 bytes.
|
||||
pub const MAX_BLOCK_WEIGHT: usize = 80_000;
|
||||
|
||||
/// Reused consistently for various max lengths below.
|
||||
/// Max transaction is effectively a full block of data.
|
||||
/// Soft fork down when too high.
|
||||
/// Likely we will need to tweak these all individually, but using a single constant for now.
|
||||
const MAX_INP_OUT_KERN_LEN: usize = 300_000;
|
||||
|
||||
/// Maximum inputs for a block.
|
||||
pub const MAX_BLOCK_INPUTS: usize = MAX_INP_OUT_KERN_LEN;
|
||||
|
||||
/// Maximum outputs for a block (max tx + single output for coinbase).
|
||||
/// This is just a starting point - need to discuss this further.
|
||||
pub const MAX_BLOCK_OUTPUTS: usize = MAX_INP_OUT_KERN_LEN + 1;
|
||||
|
||||
/// Maximum kernels for a block (max tx + single output for coinbase).
|
||||
/// This is just a starting point - need to discuss this further.
|
||||
pub const MAX_BLOCK_KERNELS: usize = MAX_INP_OUT_KERN_LEN + 1;
|
||||
|
||||
/// Maximum inputs in a transaction.
|
||||
pub const MAX_TX_INPUTS: usize = MAX_INP_OUT_KERN_LEN;
|
||||
|
||||
/// Maximum outputs in a transaction.
|
||||
pub const MAX_TX_OUTPUTS: usize = MAX_INP_OUT_KERN_LEN;
|
||||
|
||||
/// Maximum kernels in a transaction.
|
||||
pub const MAX_TX_KERNELS: usize = MAX_INP_OUT_KERN_LEN;
|
||||
|
||||
/// Fork every 250,000 blocks for first 2 years, simple number and just a
|
||||
/// little less than 6 months.
|
||||
pub const HARD_FORK_INTERVAL: u64 = 250_000;
|
||||
|
|
|
@ -45,12 +45,8 @@ pub enum Error {
|
|||
InvalidTotalKernelSum,
|
||||
/// Same as above but for the coinbase part of a block, including reward
|
||||
CoinbaseSumMismatch,
|
||||
/// Restrict number of block inputs.
|
||||
TooManyInputs,
|
||||
/// Restrict number of block outputs.
|
||||
TooManyOutputs,
|
||||
/// Retrict number of block kernels.
|
||||
TooManyKernels,
|
||||
/// Restrict block total weight.
|
||||
TooHeavy,
|
||||
/// Block weight (based on inputs|outputs|kernels) exceeded.
|
||||
WeightExceeded,
|
||||
/// Kernel not valid due to lock_height exceeding block header height
|
||||
|
@ -527,8 +523,7 @@ impl Block {
|
|||
let header = self.header.clone();
|
||||
let nonce = thread_rng().next_u64();
|
||||
|
||||
let mut out_full = self
|
||||
.outputs
|
||||
let mut out_full = self.outputs
|
||||
.iter()
|
||||
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
.cloned()
|
||||
|
@ -660,14 +655,12 @@ impl Block {
|
|||
/// we do not want to cut-through (all coinbase must be preserved)
|
||||
///
|
||||
pub fn cut_through(self) -> Block {
|
||||
let in_set = self
|
||||
.inputs
|
||||
let in_set = self.inputs
|
||||
.iter()
|
||||
.map(|inp| inp.commitment())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let out_set = self
|
||||
.outputs
|
||||
let out_set = self.outputs
|
||||
.iter()
|
||||
.filter(|out| !out.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
.map(|out| out.commitment())
|
||||
|
@ -675,14 +668,12 @@ impl Block {
|
|||
|
||||
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
|
||||
|
||||
let new_inputs = self
|
||||
.inputs
|
||||
let new_inputs = self.inputs
|
||||
.into_iter()
|
||||
.filter(|inp| !to_cut_through.contains(&inp.commitment()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let new_outputs = self
|
||||
.outputs
|
||||
let new_outputs = self.outputs
|
||||
.into_iter()
|
||||
.filter(|out| !to_cut_through.contains(&out.commitment()))
|
||||
.collect::<Vec<_>>();
|
||||
|
@ -709,7 +700,6 @@ impl Block {
|
|||
) -> Result<(Commitment), Error> {
|
||||
// Verify we do not exceed the max number of inputs|outputs|kernels
|
||||
// and that the "weight" based on these does not exceed the max permitted weight.
|
||||
self.verify_size()?;
|
||||
self.verify_weight()?;
|
||||
|
||||
self.verify_sorted()?;
|
||||
|
@ -744,29 +734,14 @@ impl Block {
|
|||
Ok(kernel_sum)
|
||||
}
|
||||
|
||||
// Verify the tx is not too big in terms of
|
||||
// number of inputs|outputs|kernels.
|
||||
fn verify_size(&self) -> Result<(), Error> {
|
||||
if self.inputs.len() > consensus::MAX_BLOCK_INPUTS {
|
||||
return Err(Error::TooManyInputs);
|
||||
}
|
||||
if self.outputs.len() > consensus::MAX_BLOCK_OUTPUTS {
|
||||
return Err(Error::TooManyOutputs);
|
||||
}
|
||||
if self.kernels.len() > consensus::MAX_BLOCK_KERNELS {
|
||||
return Err(Error::TooManyKernels);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Verify the block is not too big in terms of number of inputs|outputs|kernels.
|
||||
fn verify_weight(&self) -> Result<(), Error> {
|
||||
let weight =
|
||||
self.inputs.len() * consensus::BLOCK_INPUT_WEIGHT +
|
||||
self.outputs.len() * consensus::BLOCK_OUTPUT_WEIGHT +
|
||||
self.kernels.len() * consensus::BLOCK_KERNEL_WEIGHT;
|
||||
let tx_block_weight = self.inputs.len() * consensus::BLOCK_INPUT_WEIGHT
|
||||
+ self.outputs.len() * consensus::BLOCK_OUTPUT_WEIGHT
|
||||
+ self.kernels.len() * consensus::BLOCK_KERNEL_WEIGHT;
|
||||
|
||||
if weight > consensus::MAX_BLOCK_WEIGHT {
|
||||
return Err(Error::WeightExceeded);
|
||||
if tx_block_weight > consensus::MAX_BLOCK_WEIGHT {
|
||||
return Err(Error::TooHeavy);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -782,8 +757,7 @@ impl Block {
|
|||
// Verify that no input is spending an output from the same block.
|
||||
fn verify_cut_through(&self) -> Result<(), Error> {
|
||||
for inp in &self.inputs {
|
||||
if self
|
||||
.outputs
|
||||
if self.outputs
|
||||
.iter()
|
||||
.any(|out| out.commitment() == inp.commitment())
|
||||
{
|
||||
|
@ -826,14 +800,12 @@ impl Block {
|
|||
/// Check the sum of coinbase-marked outputs match
|
||||
/// the sum of coinbase-marked kernels accounting for fees.
|
||||
pub fn verify_coinbase(&self) -> Result<(), Error> {
|
||||
let cb_outs = self
|
||||
.outputs
|
||||
let cb_outs = self.outputs
|
||||
.iter()
|
||||
.filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
.collect::<Vec<&Output>>();
|
||||
|
||||
let cb_kerns = self
|
||||
.kernels
|
||||
let cb_kerns = self.kernels
|
||||
.iter()
|
||||
.filter(|kernel| kernel.features.contains(KernelFeatures::COINBASE_KERNEL))
|
||||
.collect::<Vec<&TxKernel>>();
|
||||
|
|
|
@ -54,12 +54,8 @@ pub enum Error {
|
|||
/// The sum of output minus input commitments does not
|
||||
/// match the sum of kernel commitments
|
||||
KernelSumMismatch,
|
||||
/// Restrict number of tx inputs.
|
||||
TooManyInputs,
|
||||
/// Restrict number of tx outputs.
|
||||
TooManyOutputs,
|
||||
/// Retrict number of tx kernels.
|
||||
TooManyKernels,
|
||||
/// Restrict tx total weight.
|
||||
TooHeavy,
|
||||
/// Underlying consensus error (currently for sort order)
|
||||
ConsensusError(consensus::Error),
|
||||
/// Error originating from an invalid lock-height
|
||||
|
@ -433,17 +429,16 @@ impl Transaction {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// Verify the tx is not too big in terms of
|
||||
// number of inputs|outputs|kernels.
|
||||
fn verify_size(&self) -> Result<(), Error> {
|
||||
if self.inputs.len() > consensus::MAX_TX_INPUTS {
|
||||
return Err(Error::TooManyInputs);
|
||||
}
|
||||
if self.outputs.len() > consensus::MAX_TX_OUTPUTS {
|
||||
return Err(Error::TooManyOutputs);
|
||||
}
|
||||
if self.kernels.len() > consensus::MAX_TX_KERNELS {
|
||||
return Err(Error::TooManyKernels);
|
||||
// Verify the tx is not too big in terms of number of inputs|outputs|kernels.
|
||||
fn verify_weight(&self) -> Result<(), Error> {
|
||||
// check the tx as if it was a block, with an additional output and
|
||||
// kernel for reward
|
||||
let tx_block_weight = self.inputs.len() * consensus::BLOCK_INPUT_WEIGHT
|
||||
+ (self.outputs.len() + 1) * consensus::BLOCK_OUTPUT_WEIGHT
|
||||
+ (self.kernels.len() + 1) * consensus::BLOCK_KERNEL_WEIGHT;
|
||||
|
||||
if tx_block_weight > consensus::MAX_BLOCK_WEIGHT {
|
||||
return Err(Error::TooHeavy);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -453,7 +448,7 @@ impl Transaction {
|
|||
/// output.
|
||||
pub fn validate(&self) -> Result<(), Error> {
|
||||
self.verify_features()?;
|
||||
self.verify_size()?;
|
||||
self.verify_weight()?;
|
||||
self.verify_sorted()?;
|
||||
self.verify_cut_through()?;
|
||||
self.verify_kernel_sums(self.overage(), self.offset)?;
|
||||
|
@ -487,8 +482,7 @@ impl Transaction {
|
|||
// Verify that no input is spending an output from the same block.
|
||||
fn verify_cut_through(&self) -> Result<(), Error> {
|
||||
for inp in &self.inputs {
|
||||
if self
|
||||
.outputs
|
||||
if self.outputs
|
||||
.iter()
|
||||
.any(|out| out.commitment() == inp.commitment())
|
||||
{
|
||||
|
@ -509,8 +503,7 @@ impl Transaction {
|
|||
|
||||
// Verify we have no outputs tagged as COINBASE_OUTPUT.
|
||||
fn verify_output_features(&self) -> Result<(), Error> {
|
||||
if self
|
||||
.outputs
|
||||
if self.outputs
|
||||
.iter()
|
||||
.any(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
{
|
||||
|
@ -521,8 +514,7 @@ impl Transaction {
|
|||
|
||||
// Verify we have no kernels tagged as COINBASE_KERNEL.
|
||||
fn verify_kernel_features(&self) -> Result<(), Error> {
|
||||
if self
|
||||
.kernels
|
||||
if self.kernels
|
||||
.iter()
|
||||
.any(|x| x.features.contains(KernelFeatures::COINBASE_KERNEL))
|
||||
{
|
||||
|
|
|
@ -19,10 +19,10 @@ use std::io::{self, Read, Write};
|
|||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, TcpStream};
|
||||
use std::{thread, time};
|
||||
|
||||
use core::consensus::{MAX_MSG_LEN, MAX_TX_INPUTS, MAX_TX_KERNELS, MAX_TX_OUTPUTS};
|
||||
use core::core::BlockHeader;
|
||||
use core::consensus;
|
||||
use core::core::hash::Hash;
|
||||
use core::core::target::Difficulty;
|
||||
use core::core::BlockHeader;
|
||||
use core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
|
||||
use types::{Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS};
|
||||
|
@ -40,6 +40,10 @@ const MAGIC: [u8; 2] = [0x1e, 0xc5];
|
|||
/// Size in bytes of a message header
|
||||
pub const HEADER_LEN: u64 = 11;
|
||||
|
||||
/// Max theoretical size of a block filled with outputs.
|
||||
const MAX_BLOCK_SIZE: u64 =
|
||||
(consensus::MAX_BLOCK_WEIGHT / consensus::BLOCK_OUTPUT_WEIGHT * 708) as u64;
|
||||
|
||||
/// Types of messages.
|
||||
/// Note: Values here are *important* so we should only add new values at the
|
||||
/// end.
|
||||
|
@ -82,11 +86,11 @@ fn max_msg_size(msg_type: Type) -> u64 {
|
|||
Type::Header => 365,
|
||||
Type::Headers => 2 + 365 * MAX_BLOCK_HEADERS as u64,
|
||||
Type::GetBlock => 32,
|
||||
Type::Block => MAX_MSG_LEN,
|
||||
Type::Block => MAX_BLOCK_SIZE,
|
||||
Type::GetCompactBlock => 32,
|
||||
Type::CompactBlock => MAX_MSG_LEN / 10,
|
||||
Type::StemTransaction => (1000 * MAX_TX_INPUTS + 710 * MAX_TX_OUTPUTS + 114 * MAX_TX_KERNELS) as u64,
|
||||
Type::Transaction => (1000 * MAX_TX_INPUTS + 710 * MAX_TX_OUTPUTS + 114 * MAX_TX_KERNELS) as u64,
|
||||
Type::CompactBlock => MAX_BLOCK_SIZE / 10,
|
||||
Type::StemTransaction => MAX_BLOCK_SIZE,
|
||||
Type::Transaction => MAX_BLOCK_SIZE,
|
||||
Type::TxHashSetRequest => 40,
|
||||
Type::TxHashSetArchive => 64,
|
||||
Type::BanReason => 64,
|
||||
|
@ -729,11 +733,7 @@ pub struct TxHashSetArchive {
|
|||
impl Writeable for TxHashSetArchive {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
self.hash.write(writer)?;
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u64, self.height],
|
||||
[write_u64, self.bytes]
|
||||
);
|
||||
ser_multiwrite!(writer, [write_u64, self.height], [write_u64, self.bytes]);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue