mirror of
https://github.com/mimblewimble/grin.git
synced 2025-05-10 19:21:16 +03:00
Use mmr sizes in header to validate header weight (#3395)
* use mmr sizes in header to validate block weight given header only * add global weight validaton to headers when reading them untrusted off the network * fixup tests, one pending test * add test for deserializing untrusted block header exceeeding weight limits * validate header mmr sizes, must grow for each block * remove redundant height check * fix tests after rebase block specific TooHeavy error
This commit is contained in:
parent
78e3ec3df0
commit
1cff387f61
12 changed files with 239 additions and 45 deletions
chain
core
pool/src
|
@ -131,6 +131,9 @@ pub enum ErrorKind {
|
|||
/// Error from underlying tx handling
|
||||
#[fail(display = "Transaction Validation Error: {:?}", _0)]
|
||||
Transaction(transaction::Error),
|
||||
/// Error from underlying block handling
|
||||
#[fail(display = "Block Validation Error: {:?}", _0)]
|
||||
Block(block::Error),
|
||||
/// Anything else
|
||||
#[fail(display = "Other Error: {}", _0)]
|
||||
Other(String),
|
||||
|
|
|
@ -18,7 +18,8 @@ use crate::core::consensus;
|
|||
use crate::core::core::hash::Hashed;
|
||||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
use crate::core::core::Committed;
|
||||
use crate::core::core::{Block, BlockHeader, BlockSums, OutputIdentifier};
|
||||
use crate::core::core::{block, Block, BlockHeader, BlockSums, OutputIdentifier, TransactionBody};
|
||||
use crate::core::global;
|
||||
use crate::core::pow;
|
||||
use crate::error::{Error, ErrorKind};
|
||||
use crate::store;
|
||||
|
@ -336,6 +337,26 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(
|
|||
return Err(ErrorKind::InvalidBlockTime.into());
|
||||
}
|
||||
|
||||
// We can determine output and kernel counts for this block based on mmr sizes from previous header.
|
||||
// Assume 0 inputs and estimate a lower bound on the full block weight.
|
||||
let num_outputs = header
|
||||
.output_mmr_count()
|
||||
.saturating_sub(prev.output_mmr_count());
|
||||
let num_kernels = header
|
||||
.kernel_mmr_count()
|
||||
.saturating_sub(prev.kernel_mmr_count());
|
||||
|
||||
// Each block must contain at least 1 kernel and 1 output for the block reward.
|
||||
if num_outputs == 0 || num_kernels == 0 {
|
||||
return Err(ErrorKind::InvalidMMRSize.into());
|
||||
}
|
||||
|
||||
// Block header is invalid (and block is invalid) if this lower bound is too heavy for a full block.
|
||||
let weight = TransactionBody::weight_as_block(0, num_outputs, num_kernels);
|
||||
if weight > global::max_block_weight() {
|
||||
return Err(ErrorKind::Block(block::Error::TooHeavy).into());
|
||||
}
|
||||
|
||||
// verify the proof of work and related parameters
|
||||
// at this point we have a previous block header
|
||||
// we know the height increased by one
|
||||
|
|
|
@ -22,7 +22,7 @@ use grin_util as util;
|
|||
use self::chain_test_helper::{clean_output_dir, genesis_block, init_chain};
|
||||
use crate::chain::{pipe, Chain, Options};
|
||||
use crate::core::core::verifier_cache::LruVerifierCache;
|
||||
use crate::core::core::{block, transaction};
|
||||
use crate::core::core::{block, pmmr, transaction};
|
||||
use crate::core::core::{Block, KernelFeatures, Transaction, Weighting};
|
||||
use crate::core::libtx::{build, reward, ProofBuilder};
|
||||
use crate::core::{consensus, global, pow};
|
||||
|
@ -57,6 +57,10 @@ where
|
|||
// This allows us to build a header for an "invalid" block by ignoring outputs and kernels.
|
||||
if skip_roots {
|
||||
chain.set_prev_root_only(&mut block.header)?;
|
||||
|
||||
// Manually set the mmr sizes for a "valid" block (increment prev output and kernel counts).
|
||||
block.header.output_mmr_size = pmmr::insertion_to_pmmr_index(prev.output_mmr_count() + 1);
|
||||
block.header.kernel_mmr_size = pmmr::insertion_to_pmmr_index(prev.kernel_mmr_count() + 1);
|
||||
} else {
|
||||
chain.set_txhashset_roots(&mut block)?;
|
||||
}
|
||||
|
|
82
chain/tests/test_header_weight_validation.rs
Normal file
82
chain/tests/test_header_weight_validation.rs
Normal file
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2020 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
|
||||
mod chain_test_helper;
|
||||
|
||||
use self::chain_test_helper::{clean_output_dir, mine_chain};
|
||||
use crate::chain::{Chain, ErrorKind, Options};
|
||||
use crate::core::{
|
||||
consensus,
|
||||
core::{block, Block},
|
||||
global,
|
||||
libtx::{reward, ProofBuilder},
|
||||
pow,
|
||||
};
|
||||
use crate::keychain::{ExtKeychain, ExtKeychainPath, Keychain};
|
||||
use chrono::Duration;
|
||||
|
||||
fn build_block(chain: &Chain) -> Block {
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let pk = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||
|
||||
let prev = chain.head_header().unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let reward = reward::output(&keychain, &ProofBuilder::new(&keychain), &pk, 0, false).unwrap();
|
||||
let mut block = Block::new(&prev, &[], next_header_info.clone().difficulty, reward).unwrap();
|
||||
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
chain.set_txhashset_roots(&mut block).unwrap();
|
||||
|
||||
let edge_bits = global::min_edge_bits();
|
||||
block.header.pow.proof.edge_bits = edge_bits;
|
||||
pow::pow_size(
|
||||
&mut block.header,
|
||||
next_header_info.difficulty,
|
||||
global::proofsize(),
|
||||
edge_bits,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
block
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_header_weight_validation() {
|
||||
let chain_dir = ".grin.header_weight";
|
||||
clean_output_dir(chain_dir);
|
||||
let chain = mine_chain(chain_dir, 5);
|
||||
assert_eq!(chain.head().unwrap().height, 4);
|
||||
|
||||
let block = build_block(&chain);
|
||||
let mut header = block.header;
|
||||
|
||||
// Artificially set the output_mmr_size too large for a valid block.
|
||||
// Note: We will validate this even if just processing the header.
|
||||
header.output_mmr_size = 1_000;
|
||||
|
||||
let res = chain
|
||||
.process_block_header(&header, Options::NONE)
|
||||
.map_err(|e| e.kind());
|
||||
|
||||
// Weight validation is done via transaction body and results in a slightly counter-intuitive tx error.
|
||||
assert_eq!(res, Err(ErrorKind::Block(block::Error::TooHeavy)));
|
||||
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
|
@ -102,13 +102,13 @@ pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
|
|||
pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32;
|
||||
|
||||
/// Weight of an input when counted against the max block weight capacity
|
||||
pub const BLOCK_INPUT_WEIGHT: usize = 1;
|
||||
pub const BLOCK_INPUT_WEIGHT: u64 = 1;
|
||||
|
||||
/// Weight of an output when counted against the max block weight capacity
|
||||
pub const BLOCK_OUTPUT_WEIGHT: usize = 21;
|
||||
pub const BLOCK_OUTPUT_WEIGHT: u64 = 21;
|
||||
|
||||
/// Weight of a kernel when counted against the max block weight capacity
|
||||
pub const BLOCK_KERNEL_WEIGHT: usize = 3;
|
||||
pub const BLOCK_KERNEL_WEIGHT: u64 = 3;
|
||||
|
||||
/// Total maximum block weight. At current sizes, this means a maximum
|
||||
/// theoretical size of:
|
||||
|
@ -122,7 +122,7 @@ pub const BLOCK_KERNEL_WEIGHT: usize = 3;
|
|||
/// `(1 * 2) + (21 * 2) + (3 * 1) = 47` (weight per tx)
|
||||
/// `40_000 / 47 = 851` (txs per block)
|
||||
///
|
||||
pub const MAX_BLOCK_WEIGHT: usize = 40_000;
|
||||
pub const MAX_BLOCK_WEIGHT: u64 = 40_000;
|
||||
|
||||
/// Fork every 6 months.
|
||||
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
|
||||
|
|
|
@ -20,7 +20,7 @@ use crate::core::compact_block::CompactBlock;
|
|||
use crate::core::hash::{DefaultHashable, Hash, Hashed, ZERO_HASH};
|
||||
use crate::core::verifier_cache::VerifierCache;
|
||||
use crate::core::{
|
||||
transaction, Commitment, Inputs, KernelFeatures, Output, Transaction, TransactionBody,
|
||||
pmmr, transaction, Commitment, Inputs, KernelFeatures, Output, Transaction, TransactionBody,
|
||||
TxKernel, Weighting,
|
||||
};
|
||||
use crate::global;
|
||||
|
@ -51,8 +51,6 @@ pub enum Error {
|
|||
CoinbaseSumMismatch,
|
||||
/// Restrict block total weight.
|
||||
TooHeavy,
|
||||
/// Block weight (based on inputs|outputs|kernels) exceeded.
|
||||
WeightExceeded,
|
||||
/// Block version is invalid for a given block height
|
||||
InvalidBlockVersion(HeaderVersion),
|
||||
/// Block time is invalid
|
||||
|
@ -375,6 +373,22 @@ impl BlockHeader {
|
|||
Ok(deserialize_default(&mut &header_bytes[..])?)
|
||||
}
|
||||
|
||||
/// Total number of outputs (spent and unspent) based on output MMR size committed to in this block.
|
||||
/// Note: *Not* the number of outputs in this block but total up to and including this block.
|
||||
/// The MMR size is the total number of hashes contained in the full MMR structure.
|
||||
/// We want the corresponding number of leaves in the MMR given the size.
|
||||
pub fn output_mmr_count(&self) -> u64 {
|
||||
pmmr::n_leaves(self.output_mmr_size)
|
||||
}
|
||||
|
||||
/// Total number of kernels based on kernel MMR size committed to in this block.
|
||||
/// Note: *Not* the number of kernels in this block but total up to and including this block.
|
||||
/// The MMR size is the total number of hashes contained in the full MMR structure.
|
||||
/// We want the corresponding number of leaves in the MMR given the size.
|
||||
pub fn kernel_mmr_count(&self) -> u64 {
|
||||
pmmr::n_leaves(self.kernel_mmr_size)
|
||||
}
|
||||
|
||||
/// Total difficulty accumulated by the proof of work on this header
|
||||
pub fn total_difficulty(&self) -> Difficulty {
|
||||
self.pow.total_difficulty
|
||||
|
@ -411,6 +425,7 @@ impl From<UntrustedBlockHeader> for BlockHeader {
|
|||
|
||||
/// Block header which does lightweight validation as part of deserialization,
|
||||
/// it supposed to be used when we can't trust the channel (eg network)
|
||||
#[derive(Debug)]
|
||||
pub struct UntrustedBlockHeader(BlockHeader);
|
||||
|
||||
/// Deserialization of an untrusted block header
|
||||
|
@ -452,6 +467,17 @@ impl Readable for UntrustedBlockHeader {
|
|||
);
|
||||
return Err(ser::Error::CorruptedData);
|
||||
}
|
||||
|
||||
// Validate global output and kernel MMR sizes against upper bounds based on block height.
|
||||
let global_weight = TransactionBody::weight_as_block(
|
||||
0,
|
||||
header.output_mmr_count(),
|
||||
header.kernel_mmr_count(),
|
||||
);
|
||||
if global_weight > global::max_block_weight() * (header.height + 1) {
|
||||
return Err(ser::Error::CorruptedData);
|
||||
}
|
||||
|
||||
Ok(UntrustedBlockHeader(header))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -639,7 +639,7 @@ pub enum Weighting {
|
|||
AsTransaction,
|
||||
/// Tx representing a tx with artificially limited max_weight.
|
||||
/// This is used when selecting mineable txs from the pool.
|
||||
AsLimitedTransaction(usize),
|
||||
AsLimitedTransaction(u64),
|
||||
/// Tx represents a block (max block weight).
|
||||
AsBlock,
|
||||
/// No max weight limit (skip the weight check).
|
||||
|
@ -680,24 +680,20 @@ impl Writeable for TransactionBody {
|
|||
/// body from a binary stream.
|
||||
impl Readable for TransactionBody {
|
||||
fn read<R: Reader>(reader: &mut R) -> Result<TransactionBody, ser::Error> {
|
||||
let (input_len, output_len, kernel_len) =
|
||||
let (num_inputs, num_outputs, num_kernels) =
|
||||
ser_multiread!(reader, read_u64, read_u64, read_u64);
|
||||
|
||||
// Quick block weight check before proceeding.
|
||||
// Note: We use weight_as_block here (inputs have weight).
|
||||
let tx_block_weight = TransactionBody::weight_as_block(
|
||||
input_len as usize,
|
||||
output_len as usize,
|
||||
kernel_len as usize,
|
||||
);
|
||||
|
||||
let tx_block_weight =
|
||||
TransactionBody::weight_as_block(num_inputs, num_outputs, num_kernels);
|
||||
if tx_block_weight > global::max_block_weight() {
|
||||
return Err(ser::Error::TooLargeReadErr);
|
||||
}
|
||||
|
||||
let inputs = read_multi(reader, input_len)?;
|
||||
let outputs = read_multi(reader, output_len)?;
|
||||
let kernels = read_multi(reader, kernel_len)?;
|
||||
let inputs = read_multi(reader, num_inputs)?;
|
||||
let outputs = read_multi(reader, num_outputs)?;
|
||||
let kernels = read_multi(reader, num_kernels)?;
|
||||
|
||||
// Initialize tx body and verify everything is sorted.
|
||||
let body = TransactionBody::init(&inputs, &outputs, &kernels, true)
|
||||
|
@ -861,33 +857,41 @@ impl TransactionBody {
|
|||
}
|
||||
|
||||
/// Calculate transaction weight
|
||||
pub fn body_weight(&self) -> usize {
|
||||
TransactionBody::weight(self.inputs.len(), self.outputs.len(), self.kernels.len())
|
||||
pub fn body_weight(&self) -> u64 {
|
||||
TransactionBody::weight(
|
||||
self.inputs.len() as u64,
|
||||
self.outputs.len() as u64,
|
||||
self.kernels.len() as u64,
|
||||
)
|
||||
}
|
||||
|
||||
/// Calculate weight of transaction using block weighing
|
||||
pub fn body_weight_as_block(&self) -> usize {
|
||||
TransactionBody::weight_as_block(self.inputs.len(), self.outputs.len(), self.kernels.len())
|
||||
pub fn body_weight_as_block(&self) -> u64 {
|
||||
TransactionBody::weight_as_block(
|
||||
self.inputs.len() as u64,
|
||||
self.outputs.len() as u64,
|
||||
self.kernels.len() as u64,
|
||||
)
|
||||
}
|
||||
|
||||
/// Calculate transaction weight from transaction details. This is non
|
||||
/// consensus critical and compared to block weight, incentivizes spending
|
||||
/// more outputs (to lower the fee).
|
||||
pub fn weight(input_len: usize, output_len: usize, kernel_len: usize) -> usize {
|
||||
let body_weight = output_len
|
||||
pub fn weight(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 {
|
||||
let body_weight = num_outputs
|
||||
.saturating_mul(4)
|
||||
.saturating_add(kernel_len)
|
||||
.saturating_sub(input_len);
|
||||
.saturating_add(num_kernels)
|
||||
.saturating_sub(num_inputs);
|
||||
max(body_weight, 1)
|
||||
}
|
||||
|
||||
/// Calculate transaction weight using block weighing from transaction
|
||||
/// details. Consensus critical and uses consensus weight values.
|
||||
pub fn weight_as_block(input_len: usize, output_len: usize, kernel_len: usize) -> usize {
|
||||
input_len
|
||||
.saturating_mul(consensus::BLOCK_INPUT_WEIGHT)
|
||||
.saturating_add(output_len.saturating_mul(consensus::BLOCK_OUTPUT_WEIGHT))
|
||||
.saturating_add(kernel_len.saturating_mul(consensus::BLOCK_KERNEL_WEIGHT))
|
||||
pub fn weight_as_block(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 {
|
||||
num_inputs
|
||||
.saturating_mul(consensus::BLOCK_INPUT_WEIGHT as u64)
|
||||
.saturating_add(num_outputs.saturating_mul(consensus::BLOCK_OUTPUT_WEIGHT as u64))
|
||||
.saturating_add(num_kernels.saturating_mul(consensus::BLOCK_KERNEL_WEIGHT as u64))
|
||||
}
|
||||
|
||||
/// Lock height of a body is the max lock height of the kernels.
|
||||
|
@ -1278,18 +1282,18 @@ impl Transaction {
|
|||
}
|
||||
|
||||
/// Calculate transaction weight
|
||||
pub fn tx_weight(&self) -> usize {
|
||||
pub fn tx_weight(&self) -> u64 {
|
||||
self.body.body_weight()
|
||||
}
|
||||
|
||||
/// Calculate transaction weight as a block
|
||||
pub fn tx_weight_as_block(&self) -> usize {
|
||||
pub fn tx_weight_as_block(&self) -> u64 {
|
||||
self.body.body_weight_as_block()
|
||||
}
|
||||
|
||||
/// Calculate transaction weight from transaction details
|
||||
pub fn weight(input_len: usize, output_len: usize, kernel_len: usize) -> usize {
|
||||
TransactionBody::weight(input_len, output_len, kernel_len)
|
||||
pub fn weight(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 {
|
||||
TransactionBody::weight(num_inputs, num_outputs, num_kernels)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ pub const TESTING_INITIAL_GRAPH_WEIGHT: u32 = 1;
|
|||
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
|
||||
|
||||
/// Testing max_block_weight (artifically low, just enough to support a few txs).
|
||||
pub const TESTING_MAX_BLOCK_WEIGHT: usize = 250;
|
||||
pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250;
|
||||
|
||||
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
|
||||
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
|
||||
|
@ -311,7 +311,7 @@ pub fn initial_graph_weight() -> u32 {
|
|||
}
|
||||
|
||||
/// Maximum allowed block weight.
|
||||
pub fn max_block_weight() -> usize {
|
||||
pub fn max_block_weight() -> u64 {
|
||||
match get_chain_type() {
|
||||
ChainTypes::AutomatedTesting => TESTING_MAX_BLOCK_WEIGHT,
|
||||
ChainTypes::UserTesting => TESTING_MAX_BLOCK_WEIGHT,
|
||||
|
|
|
@ -48,5 +48,5 @@ pub fn tx_fee(
|
|||
None => DEFAULT_BASE_FEE,
|
||||
};
|
||||
|
||||
(Transaction::weight(input_len, output_len, kernel_len) as u64) * use_base_fee
|
||||
Transaction::weight(input_len as u64, output_len as u64, kernel_len as u64) * use_base_fee
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
mod common;
|
||||
use crate::common::{new_block, tx1i2o, tx2i1o, txspend1i1o};
|
||||
use crate::core::consensus::{self, BLOCK_OUTPUT_WEIGHT, TESTING_THIRD_HARD_FORK};
|
||||
use crate::core::core::block::{Block, BlockHeader, Error, HeaderVersion};
|
||||
use crate::core::core::block::{Block, BlockHeader, Error, HeaderVersion, UntrustedBlockHeader};
|
||||
use crate::core::core::hash::Hashed;
|
||||
use crate::core::core::id::ShortIdentifiable;
|
||||
use crate::core::core::transaction::{
|
||||
|
@ -25,7 +25,7 @@ use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
|||
use crate::core::core::{Committed, CompactBlock};
|
||||
use crate::core::libtx::build::{self, input, output};
|
||||
use crate::core::libtx::ProofBuilder;
|
||||
use crate::core::{global, ser};
|
||||
use crate::core::{global, pow, ser};
|
||||
use chrono::Duration;
|
||||
use grin_core as core;
|
||||
use keychain::{BlindingFactor, ExtKeychain, Keychain};
|
||||
|
@ -34,6 +34,7 @@ use util::{secp, RwLock, ToHex};
|
|||
|
||||
// Setup test with AutomatedTesting chain_type;
|
||||
fn test_setup() {
|
||||
util::init_test_logger();
|
||||
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
|
||||
}
|
||||
|
||||
|
@ -423,6 +424,59 @@ fn serialize_deserialize_block_header() {
|
|||
assert_eq!(header1, header2);
|
||||
}
|
||||
|
||||
fn set_pow(header: &mut BlockHeader) {
|
||||
// Set valid pow on the block as we will test deserialization of this "untrusted" from the network.
|
||||
let edge_bits = global::min_edge_bits();
|
||||
header.pow.proof.edge_bits = edge_bits;
|
||||
pow::pow_size(
|
||||
header,
|
||||
pow::Difficulty::min(),
|
||||
global::proofsize(),
|
||||
edge_bits,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_untrusted_header_weight() {
|
||||
test_setup();
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
let prev = BlockHeader::default();
|
||||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||
let mut b = new_block(&[], &keychain, &builder, &prev, &key_id);
|
||||
|
||||
// Set excessively large output mmr size on the header.
|
||||
b.header.output_mmr_size = 10_000;
|
||||
b.header.kernel_mmr_size = 0;
|
||||
set_pow(&mut b.header);
|
||||
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &b.header).expect("serialization failed");
|
||||
let res: Result<UntrustedBlockHeader, _> = ser::deserialize_default(&mut &vec[..]);
|
||||
assert_eq!(res.err(), Some(ser::Error::CorruptedData));
|
||||
|
||||
// Set excessively large kernel mmr size on the header.
|
||||
b.header.output_mmr_size = 0;
|
||||
b.header.kernel_mmr_size = 10_000;
|
||||
set_pow(&mut b.header);
|
||||
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &b.header).expect("serialization failed");
|
||||
let res: Result<UntrustedBlockHeader, _> = ser::deserialize_default(&mut &vec[..]);
|
||||
assert_eq!(res.err(), Some(ser::Error::CorruptedData));
|
||||
|
||||
// Set reasonable mmr sizes on the header to confirm the header can now be read "untrusted".
|
||||
b.header.output_mmr_size = 1;
|
||||
b.header.kernel_mmr_size = 1;
|
||||
set_pow(&mut b.header);
|
||||
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &b.header).expect("serialization failed");
|
||||
let res: Result<UntrustedBlockHeader, _> = ser::deserialize_default(&mut &vec[..]);
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_deserialize_block() {
|
||||
test_setup();
|
||||
|
|
|
@ -140,7 +140,7 @@ where
|
|||
/// does not exceed the provided max_weight (miner defined block weight).
|
||||
pub fn prepare_mineable_transactions(
|
||||
&self,
|
||||
max_weight: usize,
|
||||
max_weight: u64,
|
||||
) -> Result<Vec<Transaction>, PoolError> {
|
||||
let weighting = Weighting::AsLimitedTransaction(max_weight);
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ pub struct PoolConfig {
|
|||
/// block from. Allows miners to restrict the maximum weight of their
|
||||
/// blocks.
|
||||
#[serde(default = "default_mineable_max_weight")]
|
||||
pub mineable_max_weight: usize,
|
||||
pub mineable_max_weight: u64,
|
||||
}
|
||||
|
||||
impl Default for PoolConfig {
|
||||
|
@ -142,7 +142,7 @@ fn default_max_pool_size() -> usize {
|
|||
fn default_max_stempool_size() -> usize {
|
||||
50_000
|
||||
}
|
||||
fn default_mineable_max_weight() -> usize {
|
||||
fn default_mineable_max_weight() -> u64 {
|
||||
consensus::MAX_BLOCK_WEIGHT
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue