Rework "bucket transactions" logic (buckets are now weight limited) ()

* rework bucket txs

* introduce max_block_weight to global (based on chain_type)
not yet in use anywhere...

* now using global::max_block_weight() everywhere

* testing max_block_weight needs to be big enough to cover existing chain tests...

* add some test coverage for prepare mineable transactions at the block weight limit
introduce AsLimitedTx{max_weight} so we can build artifically small blocks (per mining config)

* cleanup

* default_mineable_max_weight is just the default max_block_weight
we do not need to account for coinbase reward here (tx vs block)

* 75 change outputs in a test is not valid now that we have a low block weight limit...
This commit is contained in:
Antioch Peverell 2019-02-01 10:44:04 +00:00 committed by GitHub
parent 062d41766e
commit a82e2a0126
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 290 additions and 82 deletions

View file

@ -14,7 +14,6 @@
//! Transactions
use crate::consensus;
use crate::core::hash::Hashed;
use crate::core::verifier_cache::VerifierCache;
use crate::core::{committed, Committed};
@ -28,9 +27,10 @@ use crate::util::secp;
use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::static_secp_instance;
use crate::util::RwLock;
use crate::{consensus, global};
use enum_primitive::FromPrimitive;
use std::cmp::max;
use std::cmp::Ordering;
use std::cmp::{max, min};
use std::collections::HashSet;
use std::sync::Arc;
use std::{error, fmt};
@ -375,6 +375,12 @@ impl FixedLength for TxKernelEntry {
pub enum Weighting {
/// Tx represents a tx (max block weight, accounting for additional coinbase reward).
AsTransaction,
/// Tx representing a tx with artificially limited max_weight.
/// This is used when selecting mineable txs from the pool.
AsLimitedTransaction {
/// The maximum (block) weight that we will allow.
max_weight: usize,
},
/// Tx represents a block (max block weight).
AsBlock,
/// No max weight limit (skip the weight check).
@ -433,7 +439,7 @@ impl Readable for TransactionBody {
kernel_len as usize,
);
if tx_block_weight > consensus::MAX_BLOCK_WEIGHT {
if tx_block_weight > global::max_block_weight() {
return Err(ser::Error::TooLargeReadErr);
}
@ -606,40 +612,31 @@ impl TransactionBody {
/// Verify the body is not too big in terms of number of inputs|outputs|kernels.
/// Weight rules vary depending on the "weight type" (block or tx or pool).
fn verify_weight(&self, weighting: Weighting) -> Result<(), Error> {
// If "tx" body then remember to reduce the max_block_weight for block requirements.
// A coinbase reward is a single output and a single kernel (for now).
// We need to account for this when verifying max tx weights.
let coinbase_weight = consensus::BLOCK_OUTPUT_WEIGHT + consensus::BLOCK_KERNEL_WEIGHT;
// If "tx" body then remember to reduce the max_block_weight by the weight of a kernel.
// If "limited tx" then compare against the provided max_weight.
// If "block" body then verify weight based on full set of inputs|outputs|kernels.
// If "pool" body then skip weight verification (pool can be larger than single block).
//
// Note: Taking a max tx and building a block from it we need to allow room
// for the additional coinbase reward (output + kernel).
// for the additional coinbase reward (1 output + 1 kernel).
//
let reserve = match weighting {
Weighting::AsTransaction => 1,
Weighting::AsBlock => 0,
let max_weight = match weighting {
Weighting::AsTransaction => global::max_block_weight().saturating_sub(coinbase_weight),
Weighting::AsLimitedTransaction { max_weight } => {
min(global::max_block_weight(), max_weight).saturating_sub(coinbase_weight)
}
Weighting::AsBlock => global::max_block_weight(),
Weighting::NoLimit => {
// We do not verify "tx as pool" weight so we are done here.
return Ok(());
}
};
// Note: If we add a reserve then we are reducing the number of outputs and kernels
// allowed in the body itself.
// A block is allowed to be slightly weightier than a tx to account for
// the additional coinbase reward output and kernel.
// i.e. We need to reserve space for coinbase reward when verifying tx weight.
//
// In effect we are verifying the tx _can_ be included in a block without exceeding
// MAX_BLOCK_WEIGHT.
//
// max_block = max_tx + coinbase
//
let tx_block_weight = TransactionBody::weight_as_block(
self.inputs.len(),
self.outputs.len() + reserve,
self.kernels.len() + reserve,
);
if tx_block_weight > consensus::MAX_BLOCK_WEIGHT {
if self.body_weight_as_block() > max_weight {
return Err(Error::TooHeavy);
}
Ok(())

View file

@ -19,8 +19,8 @@
use crate::consensus::HeaderInfo;
use crate::consensus::{
graph_weight, BASE_EDGE_BITS, BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON,
DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS, DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, PROOFSIZE,
SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS, DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY,
MAX_BLOCK_WEIGHT, PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
};
use crate::pow::{self, new_cuckaroo_ctx, new_cuckatoo_ctx, EdgeType, PoWContext};
/// An enum collecting sets of parameters used throughout the
@ -62,6 +62,9 @@ pub const TESTING_INITIAL_GRAPH_WEIGHT: u32 = 1;
/// Testing initial block difficulty
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: usize = 150;
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
@ -227,6 +230,17 @@ pub fn initial_graph_weight() -> u32 {
}
}
/// Maximum allowed block weight.
pub fn max_block_weight() -> usize {
let param_ref = CHAIN_TYPE.read();
match *param_ref {
ChainTypes::AutomatedTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::UserTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::Floonet => MAX_BLOCK_WEIGHT,
ChainTypes::Mainnet => MAX_BLOCK_WEIGHT,
}
}
/// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 {
let param_ref = CHAIN_TYPE.read();
@ -275,6 +289,12 @@ pub fn is_floonet() -> bool {
ChainTypes::Floonet == *param_ref
}
/// Are we for real?
pub fn is_mainnet() -> bool {
let param_ref = CHAIN_TYPE.read();
ChainTypes::Mainnet == *param_ref
}
/// Helper function to get a nonce known to create a valid POW on
/// the genesis block, to prevent it taking ages. Should be fine for now
/// as the genesis block POW solution turns out to be the same for every new

View file

@ -14,7 +14,7 @@
pub mod common;
use crate::common::{new_block, tx1i2o, tx2i1o, txspend1i1o};
use crate::core::consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT};
use crate::core::consensus::BLOCK_OUTPUT_WEIGHT;
use crate::core::core::block::Error;
use crate::core::core::hash::Hashed;
use crate::core::core::id::ShortIdentifiable;
@ -43,7 +43,7 @@ fn verifier_cache() -> Arc<RwLock<dyn VerifierCache>> {
#[allow(dead_code)]
fn too_large_block() {
let keychain = ExtKeychain::from_random_seed(false).unwrap();
let max_out = MAX_BLOCK_WEIGHT / BLOCK_OUTPUT_WEIGHT;
let max_out = global::max_block_weight() / BLOCK_OUTPUT_WEIGHT;
let mut pks = vec![];
for n in 0..(max_out + 1) {

View file

@ -40,10 +40,6 @@ const OTHER_MAGIC: [u8; 2] = [73, 43];
const FLOONET_MAGIC: [u8; 2] = [83, 59];
const MAINNET_MAGIC: [u8; 2] = [97, 61];
/// Max theoretical size of a block filled with outputs.
const MAX_BLOCK_SIZE: u64 =
(consensus::MAX_BLOCK_WEIGHT / consensus::BLOCK_OUTPUT_WEIGHT * 708) as u64;
/// Types of messages.
/// Note: Values here are *important* so we should only add new values at the
/// end.
@ -74,6 +70,11 @@ enum_from_primitive! {
}
}
/// Max theoretical size of a block filled with outputs.
fn max_block_size() -> u64 {
(global::max_block_weight() / consensus::BLOCK_OUTPUT_WEIGHT * 708) as u64
}
// Max msg size for each msg type.
fn max_msg_size(msg_type: Type) -> u64 {
match msg_type {
@ -88,11 +89,11 @@ fn max_msg_size(msg_type: Type) -> u64 {
Type::Header => 365,
Type::Headers => 2 + 365 * MAX_BLOCK_HEADERS as u64,
Type::GetBlock => 32,
Type::Block => MAX_BLOCK_SIZE,
Type::Block => max_block_size(),
Type::GetCompactBlock => 32,
Type::CompactBlock => MAX_BLOCK_SIZE / 10,
Type::StemTransaction => MAX_BLOCK_SIZE,
Type::Transaction => MAX_BLOCK_SIZE,
Type::CompactBlock => max_block_size() / 10,
Type::StemTransaction => max_block_size(),
Type::Transaction => max_block_size(),
Type::TxHashSetRequest => 40,
Type::TxHashSetArchive => 64,
Type::BanReason => 64,

View file

@ -123,35 +123,27 @@ impl Pool {
max_weight: usize,
) -> Result<Vec<Transaction>, PoolError> {
let header = self.blockchain.chain_head()?;
let tx_buckets = self.bucket_transactions();
let mut tx_buckets = self.bucket_transactions(max_weight);
// flatten buckets using aggregate (with cut-through)
let mut flat_txs: Vec<Transaction> = tx_buckets
.into_iter()
.filter_map(|bucket| transaction::aggregate(bucket).ok())
.filter(|x| {
// Here we validate the tx, subject to regular tx weight limits.
x.validate(Weighting::AsTransaction, self.verifier_cache.clone())
.is_ok()
})
.collect();
// At this point we know that all "buckets" are valid and that
// there are no dependencies between them.
// This allows us to arbitrarily sort them and filter them safely.
// sort by fees over weight, multiplying by 1000 to keep some precision
// don't think we'll ever see a >max_u64/1000 fee transaction
flat_txs.sort_unstable_by_key(|tx| tx.fee() * 1000 / tx.tx_weight() as u64);
// accumulate as long as we're not above the block weight
let mut weight = 0;
flat_txs.retain(|tx| {
weight += tx.tx_weight_as_block() as usize;
weight < max_weight
});
// Sort them by fees over weight, multiplying by 1000 to keep some precision
// don't think we'll ever see a >max_u64/1000 fee transaction.
// We want to select the txs with highest fee per unit of weight first.
tx_buckets.sort_unstable_by_key(|tx| tx.fee() * 1000 / tx.tx_weight() as u64);
// Iteratively apply the txs to the current chain state,
// rejecting any that do not result in a valid state.
// Verify these txs produce an aggregated tx below max tx weight.
// Return a vec of all the valid txs.
let txs = self.validate_raw_txs(flat_txs, None, &header, Weighting::AsTransaction)?;
let txs = self.validate_raw_txs(
tx_buckets,
None,
&header,
Weighting::AsLimitedTransaction { max_weight },
)?;
Ok(txs)
}
@ -345,36 +337,91 @@ impl Pool {
Ok(())
}
// Group dependent transactions in buckets (vectors), each bucket
// is therefore independent from the others. Relies on the entries
// Vec having parent transactions first (should always be the case)
fn bucket_transactions(&self) -> Vec<Vec<Transaction>> {
// Group dependent transactions in buckets (aggregated txs).
// Each bucket is independent from the others. Relies on the entries
// vector having parent transactions first (should always be the case).
fn bucket_transactions(&self, max_weight: usize) -> Vec<Transaction> {
let mut tx_buckets = vec![];
let mut output_commits = HashMap::new();
let mut rejected = HashSet::new();
for entry in &self.entries {
// check the commits index to find parents and their position
// picking the last one for bucket (so all parents come first)
let mut insert_pos: i32 = -1;
// if single parent then we are good, we can bucket it with its parent
// if multiple parents then we need to combine buckets, but for now simply reject it (rare case)
let mut insert_pos = None;
let mut is_rejected = false;
for input in entry.tx.inputs() {
if let Some(pos) = output_commits.get(&input.commitment()) {
if *pos > insert_pos {
insert_pos = *pos;
if rejected.contains(&input.commitment()) {
// Depends on a rejected tx, so reject this one.
is_rejected = true;
continue;
} else if let Some(pos) = output_commits.get(&input.commitment()) {
if insert_pos.is_some() {
// Multiple dependencies so reject this tx (pick it up in next block).
is_rejected = true;
continue;
} else {
// Track the pos of the bucket we fall into.
insert_pos = Some(*pos);
}
}
}
if insert_pos == -1 {
// no parent, just add to the end in its own bucket
insert_pos = tx_buckets.len() as i32;
tx_buckets.push(vec![entry.tx.clone()]);
} else {
// parent found, add to its bucket
tx_buckets[insert_pos as usize].push(entry.tx.clone());
// If this tx is rejected then store all output commitments in our rejected set.
if is_rejected {
for out in entry.tx.outputs() {
rejected.insert(out.commitment());
}
// Done with this entry (rejected), continue to next entry.
continue;
}
// update the commits index
for out in entry.tx.outputs() {
output_commits.insert(out.commitment(), insert_pos);
match insert_pos {
None => {
// No parent tx, just add to the end in its own bucket.
// This is the common case for non 0-conf txs in the txpool.
// We assume the tx is valid here as we validated it on the way into the txpool.
insert_pos = Some(tx_buckets.len());
tx_buckets.push(entry.tx.clone());
}
Some(pos) => {
// We found a single parent tx, so aggregate in the bucket
// if the aggregate tx is a valid tx.
// Otherwise discard and let the next block pick this tx up.
let current = tx_buckets[pos].clone();
if let Ok(agg_tx) = transaction::aggregate(vec![current, entry.tx.clone()]) {
if agg_tx
.validate(
Weighting::AsLimitedTransaction { max_weight },
self.verifier_cache.clone(),
)
.is_ok()
{
tx_buckets[pos] = agg_tx;
} else {
// Aggregated tx is not valid so discard this new tx.
is_rejected = true;
}
} else {
// Aggregation failed so discard this new tx.
is_rejected = true;
}
}
}
if is_rejected {
for out in entry.tx.outputs() {
rejected.insert(out.commitment());
}
} else if let Some(insert_pos) = insert_pos {
// We successfully added this tx to our set of buckets.
// Update commits index for subsequent txs.
for out in entry.tx.outputs() {
output_commits.insert(out.commitment(), insert_pos);
}
}
}
tx_buckets

View file

@ -17,12 +17,12 @@
use chrono::prelude::{DateTime, Utc};
use self::core::consensus;
use self::core::core::block;
use self::core::core::committed;
use self::core::core::hash::Hash;
use self::core::core::transaction::{self, Transaction};
use self::core::core::{BlockHeader, BlockSums};
use self::core::{consensus, global};
use grin_core as core;
use grin_keychain as keychain;
@ -130,7 +130,7 @@ fn default_max_stempool_size() -> usize {
50_000
}
fn default_mineable_max_weight() -> usize {
consensus::MAX_BLOCK_WEIGHT - consensus::BLOCK_OUTPUT_WEIGHT - consensus::BLOCK_KERNEL_WEIGHT
global::max_block_weight()
}
/// Represents a single entry in the pool.

View file

@ -0,0 +1,143 @@
// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Test coverage for block building at the limit of max_block_weight.
pub mod common;
use self::core::core::hash::Hashed;
use self::core::core::verifier_cache::LruVerifierCache;
use self::core::core::{Block, BlockHeader, Transaction};
use self::core::global;
use self::core::libtx;
use self::core::pow::Difficulty;
use self::keychain::{ExtKeychain, Keychain};
use self::util::RwLock;
use crate::common::*;
use grin_core as core;
use grin_keychain as keychain;
use grin_util as util;
use std::sync::Arc;
#[test]
fn test_block_building_max_weight() {
util::init_test_logger();
global::set_mining_mode(global::ChainTypes::AutomatedTesting);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = ".grin_block_building_max_weight".to_string();
clean_output_dir(db_root.clone());
let mut chain = ChainAdapter::init(db_root.clone()).unwrap();
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Convenient was to add a new block to the chain.
let add_block = |prev_header: BlockHeader, txs: Vec<Transaction>, chain: &mut ChainAdapter| {
let height = prev_header.height + 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fee).unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block
};
// Initialize the chain/txhashset with an initial block
// so we have a non-empty UTXO set.
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![100, 200, 300]);
// Mine that initial tx so we can spend it with multiple txs
let block = add_block(header, vec![initial_tx], &mut chain);
let header = block.header;
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
// Build some dependent txs to add to the txpool.
// We will build a block from a subset of these.
let txs = vec![
test_transaction(&keychain, vec![100], vec![90, 1]),
test_transaction(&keychain, vec![90], vec![80, 2]),
test_transaction(&keychain, vec![200], vec![199]),
test_transaction(&keychain, vec![300], vec![290, 3]),
test_transaction(&keychain, vec![290], vec![280, 4]),
];
// Populate our txpool with the txs.
{
let mut write_pool = pool.write();
for tx in txs {
write_pool
.add_to_pool(test_source(), tx, false, &header)
.unwrap();
}
}
// Check we added them all to the txpool successfully.
assert_eq!(pool.read().total_size(), 5);
// Prepare some "mineable txs" from the txpool.
// Note: We cannot fit all the txs from the txpool into a block.
let txs = pool.read().prepare_mineable_transactions().unwrap();
// Check resulting tx aggregation is what we expect.
// We expect to produce 2 aggregated txs based on txpool contents.
assert_eq!(txs.len(), 2);
// Check the tx we built is the aggregation of the correct set of underlying txs.
// We included 4 out of the 5 txs here.
assert_eq!(txs[0].kernels().len(), 1);
assert_eq!(txs[1].kernels().len(), 2);
// Check our weights after aggregation.
assert_eq!(txs[0].inputs().len(), 1);
assert_eq!(txs[0].outputs().len(), 1);
assert_eq!(txs[0].kernels().len(), 1);
assert_eq!(txs[0].tx_weight_as_block(), 25);
assert_eq!(txs[1].inputs().len(), 1);
assert_eq!(txs[1].outputs().len(), 3);
assert_eq!(txs[1].kernels().len(), 2);
assert_eq!(txs[1].tx_weight_as_block(), 70);
let block = add_block(header, txs, &mut chain);
// Check contents of the block itself (including coinbase reward).
assert_eq!(block.inputs().len(), 2);
assert_eq!(block.outputs().len(), 5);
assert_eq!(block.kernels().len(), 4);
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.
{
let mut write_pool = pool.write();
write_pool.reconcile_block(&block).unwrap();
// We should still have 2 tx in the pool after accepting the new block.
// This one exceeded the max block weight when building the block so
// remained in the txpool.
assert_eq!(write_pool.total_size(), 2);
}
}

View file

@ -432,7 +432,7 @@ mod wallet_tests {
"-g",
"Self love",
"-o",
"75",
"3",
"-s",
"smallest",
"10",