* add FeeFields type

* use FeeFields with ::zero and try_into().unwrap()

* fixed tests

* avoid 0 accept_base_fee

* add aggregate_fee_fields method for transaction

* implement std::fmt::Display trait for FeeFields

* make base_fee argument non-optional in libtx::mod::tx_fee

* add global and thread local accept_fee_base; use to simplify tests

* set unusually high fee base for a change

* revert to optional base fee argument; default coming from either grin-{server,wallet}.toml

* remove optional base fee argument; can be set with global::set_local_accept_fee_base instead

* define constant global::DEFAULT_ACCEPT_FEE_BASE and set global value

* add Transaction::accept_fee() method and use

* Manual (de)ser impl on FeeFields

* fix comment bug

* Serialize FeeFields as int in tx

* allow feefields: u32:into() for tests

* try adding height args everywhere

* make FeeFields shift/fee methods height dependent

* prior to hf4 feefield testing

* rename selected fee_fields back to fee for serialization compatibility

* fix test_fee_fields test, merge conflict, and doctest use of obsolete fee_fields

* make accept_fee height dependent

* Accept any u64 in FeeFields deser

Co-authored-by: Jasper van der Maarel <j@sper.dev>
This commit is contained in:
John Tromp 2020-11-26 18:03:06 +01:00 committed by GitHub
parent 14f4683ca1
commit 48efb693e2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
35 changed files with 866 additions and 369 deletions

View file

@ -13,9 +13,10 @@
// limitations under the License.
use crate::chain;
use crate::core::consensus::YEAR_HEIGHT;
use crate::core::core::hash::Hashed;
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::{KernelFeatures, TxKernel};
use crate::core::core::{FeeFields, KernelFeatures, TxKernel};
use crate::core::{core, ser};
use crate::p2p;
use crate::util::secp::pedersen;
@ -499,6 +500,7 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TxKernelPrintable {
pub features: String,
pub fee_shift: u8,
pub fee: u64,
pub lock_height: u64,
pub excess: String,
@ -508,17 +510,21 @@ pub struct TxKernelPrintable {
impl TxKernelPrintable {
pub fn from_txkernel(k: &core::TxKernel) -> TxKernelPrintable {
let features = k.features.as_string();
let (fee, lock_height) = match k.features {
let (fee_fields, lock_height) = match k.features {
KernelFeatures::Plain { fee } => (fee, 0),
KernelFeatures::Coinbase => (0, 0),
KernelFeatures::Coinbase => (FeeFields::zero(), 0),
KernelFeatures::HeightLocked { fee, lock_height } => (fee, lock_height),
KernelFeatures::NoRecentDuplicate {
fee,
relative_height,
} => (fee, relative_height.into()),
};
let height = 2 * YEAR_HEIGHT; // print as if post-HF4
let fee = fee_fields.fee(height);
let fee_shift: u8 = fee_fields.fee_shift(height);
TxKernelPrintable {
features,
fee_shift,
fee,
lock_height,
excess: k.excess.to_hex(),

View file

@ -355,7 +355,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(
}
// Block header is invalid (and block is invalid) if this lower bound is too heavy for a full block.
let weight = TransactionBody::weight_as_block(0, num_outputs, num_kernels);
let weight = TransactionBody::weight_by_iok(0, num_outputs, num_kernels);
if weight > global::max_block_weight() {
return Err(ErrorKind::Block(block::Error::TooHeavy).into());
}

View file

@ -33,7 +33,7 @@ where
{
let prev = chain.head_header().unwrap();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let fee = txs.iter().map(|x| x.fee()).sum();
let fee = txs.iter().map(|x| x.fee(prev.height + 1)).sum();
let reward =
reward::output(keychain, &ProofBuilder::new(keychain), key_id, fee, false).unwrap();
@ -84,7 +84,7 @@ fn mine_block_with_nrd_kernel_and_nrd_feature_enabled() {
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let tx = build::transaction(
KernelFeatures::NoRecentDuplicate {
fee: 20000,
fee: 20000.into(),
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
&[
@ -131,7 +131,7 @@ fn mine_invalid_block_with_nrd_kernel_and_nrd_feature_enabled_before_hf() {
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
let tx = build::transaction(
KernelFeatures::NoRecentDuplicate {
fee: 20000,
fee: 20000.into(),
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
&[

View file

@ -569,7 +569,7 @@ fn spend_rewind_spend() {
let key_id30 = ExtKeychainPath::new(1, 30, 0, 0, 0).to_identifier();
let tx1 = build::transaction(
KernelFeatures::Plain { fee: 20000 },
KernelFeatures::Plain { fee: 20000.into() },
&[
build::coinbase_input(consensus::REWARD, key_id_coinbase.clone()),
build::output(consensus::REWARD - 20000, key_id30.clone()),
@ -642,7 +642,7 @@ fn spend_in_fork_and_compact() {
let key_id31 = ExtKeychainPath::new(1, 31, 0, 0, 0).to_identifier();
let tx1 = build::transaction(
KernelFeatures::Plain { fee: 20000 },
KernelFeatures::Plain { fee: 20000.into() },
&[
build::coinbase_input(consensus::REWARD, key_id2.clone()),
build::output(consensus::REWARD - 20000, key_id30.clone()),
@ -660,7 +660,7 @@ fn spend_in_fork_and_compact() {
chain.validate(false).unwrap();
let tx2 = build::transaction(
KernelFeatures::Plain { fee: 20000 },
KernelFeatures::Plain { fee: 20000.into() },
&[
build::input(consensus::REWARD - 20000, key_id30.clone()),
build::output(consensus::REWARD - 40000, key_id31.clone()),
@ -885,7 +885,8 @@ where
let proof_size = global::proofsize();
let key_id = ExtKeychainPath::new(1, key_idx, 0, 0, 0).to_identifier();
let fees = txs.iter().map(|tx| tx.fee()).sum();
let height = prev.height + 1;
let fees = txs.iter().map(|tx| tx.fee(height)).sum();
let reward =
libtx::reward::output(kc, &libtx::ProofBuilder::new(kc), &key_id, fees, false).unwrap();
let mut b = match core::core::Block::new(prev, txs, Difficulty::from_num(diff), reward) {

View file

@ -54,7 +54,7 @@ where
{
let next_header_info =
consensus::next_difficulty(prev.height, chain.difficulty_iter().unwrap());
let fee = txs.iter().map(|x| x.fee()).sum();
let fee = txs.iter().map(|x| x.fee(prev.height + 1)).sum();
let reward =
reward::output(keychain, &ProofBuilder::new(keychain), key_id, fee, false).unwrap();
@ -100,7 +100,7 @@ fn process_block_nrd_validation() -> Result<(), Error> {
assert_eq!(chain.head()?.height, 8);
let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
fee: 20000,
fee: 20000.into(),
relative_height: NRDRelativeHeight::new(2)?,
});
@ -216,7 +216,7 @@ fn process_block_nrd_validation_relative_height_1() -> Result<(), Error> {
assert_eq!(chain.head()?.height, 8);
let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
fee: 20000,
fee: 20000.into(),
relative_height: NRDRelativeHeight::new(1)?,
});
@ -315,7 +315,7 @@ fn process_block_nrd_validation_fork() -> Result<(), Error> {
assert_eq!(header_8.height, 8);
let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
fee: 20000,
fee: 20000.into(),
relative_height: NRDRelativeHeight::new(2)?,
});

View file

@ -23,7 +23,7 @@ use self::chain_test_helper::{clean_output_dir, genesis_block, init_chain};
use crate::chain::{pipe, Chain, Options};
use crate::core::core::verifier_cache::LruVerifierCache;
use crate::core::core::{block, pmmr, transaction};
use crate::core::core::{Block, KernelFeatures, Transaction, Weighting};
use crate::core::core::{Block, FeeFields, KernelFeatures, Transaction, Weighting};
use crate::core::libtx::{build, reward, ProofBuilder};
use crate::core::{consensus, global, pow};
use crate::keychain::{ExtKeychain, ExtKeychainPath, Keychain, SwitchCommitmentType};
@ -43,7 +43,7 @@ where
let prev = chain.head_header().unwrap();
let next_height = prev.height + 1;
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter()?);
let fee = txs.iter().map(|x| x.fee()).sum();
let fee = txs.iter().map(|x| x.fee(next_height)).sum();
let key_id = ExtKeychainPath::new(1, next_height as u32, 0, 0, 0).to_identifier();
let reward =
reward::output(keychain, &ProofBuilder::new(keychain), &key_id, fee, false).unwrap();
@ -104,7 +104,9 @@ fn process_block_cut_through() -> Result<(), chain::Error> {
// Note: We reuse key_ids resulting in an input and an output sharing the same commitment.
// The input is coinbase and the output is plain.
let tx = build::transaction(
KernelFeatures::Plain { fee: 0 },
KernelFeatures::Plain {
fee: FeeFields::zero(),
},
&[
build::coinbase_input(consensus::REWARD, key_id1.clone()),
build::coinbase_input(consensus::REWARD, key_id2.clone()),
@ -129,8 +131,9 @@ fn process_block_cut_through() -> Result<(), chain::Error> {
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Transaction is invalid due to cut-through.
let height = 7;
assert_eq!(
tx.validate(Weighting::AsTransaction, verifier_cache.clone()),
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height),
Err(transaction::Error::CutThrough),
);

View file

@ -100,7 +100,7 @@ fn test_coinbase_maturity() {
// here we build a tx that attempts to spend the earlier coinbase output
// this is not a valid tx as the coinbase output cannot be spent yet
let coinbase_txn = build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[
build::coinbase_input(amount, key_id1.clone()),
build::output(amount - 2, key_id2.clone()),
@ -111,7 +111,7 @@ fn test_coinbase_maturity() {
.unwrap();
let txs = &[coinbase_txn.clone()];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let fees = txs.iter().map(|tx| tx.fee(prev.height + 1)).sum();
let reward = libtx::reward::output(&keychain, &builder, &key_id3, fees, false).unwrap();
let next_header_info =
consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap());
@ -186,7 +186,7 @@ fn test_coinbase_maturity() {
// here we build a tx that attempts to spend the earlier coinbase output
// this is not a valid tx as the coinbase output cannot be spent yet
let coinbase_txn = build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[
build::coinbase_input(amount, key_id1.clone()),
build::output(amount - 2, key_id2.clone()),
@ -197,7 +197,7 @@ fn test_coinbase_maturity() {
.unwrap();
let txs = &[coinbase_txn.clone()];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let fees = txs.iter().map(|tx| tx.fee(prev.height + 1)).sum();
let reward = libtx::reward::output(&keychain, &builder, &key_id3, fees, false).unwrap();
let next_header_info =
consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap());
@ -266,7 +266,7 @@ fn test_coinbase_maturity() {
.unwrap();
let txs = &[coinbase_txn];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let fees = txs.iter().map(|tx| tx.fee(prev.height + 1)).sum();
let next_header_info =
consensus::next_difficulty(prev.height + 1, chain.difficulty_iter().unwrap());
let reward = libtx::reward::output(&keychain, &builder, &key_id4, fees, false).unwrap();

View file

@ -99,13 +99,13 @@ pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
pub const STATE_SYNC_THRESHOLD: u32 = 2 * DAY_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
pub const BLOCK_INPUT_WEIGHT: u64 = 1;
pub const INPUT_WEIGHT: u64 = 1;
/// Weight of an output when counted against the max block weight capacity
pub const BLOCK_OUTPUT_WEIGHT: u64 = 21;
pub const OUTPUT_WEIGHT: u64 = 21;
/// Weight of a kernel when counted against the max block weight capacity
pub const BLOCK_KERNEL_WEIGHT: u64 = 3;
pub const KERNEL_WEIGHT: u64 = 3;
/// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:

View file

@ -469,11 +469,8 @@ impl Readable for UntrustedBlockHeader {
}
// Validate global output and kernel MMR sizes against upper bounds based on block height.
let global_weight = TransactionBody::weight_as_block(
0,
header.output_mmr_count(),
header.kernel_mmr_count(),
);
let global_weight =
TransactionBody::weight_by_iok(0, header.output_mmr_count(), header.kernel_mmr_count());
if global_weight > global::max_block_weight() * (header.height + 1) {
return Err(ser::Error::CorruptedData);
}
@ -700,7 +697,7 @@ impl Block {
/// Sum of all fees (inputs less outputs) in the block
pub fn total_fees(&self) -> u64 {
self.body.fee()
self.body.fee(self.header.height)
}
/// "Lightweight" validation that we can perform quickly during read/deserialization.

View file

@ -14,6 +14,7 @@
//! Transactions
use crate::core::block::HeaderVersion;
use crate::core::hash::{DefaultHashable, Hashed};
use crate::core::verifier_cache::VerifierCache;
use crate::core::{committed, Committed};
@ -25,9 +26,12 @@ use crate::ser::{
use crate::{consensus, global};
use enum_primitive::FromPrimitive;
use keychain::{self, BlindingFactor};
use serde::de;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::cmp::Ordering;
use std::cmp::{max, min};
use std::convert::{TryFrom, TryInto};
use std::fmt::Display;
use std::sync::Arc;
use std::{error, fmt};
use util::secp;
@ -36,6 +40,184 @@ use util::static_secp_instance;
use util::RwLock;
use util::ToHex;
/// Fee fields as in fix-fees RFC: { future_use: 20, fee_shift: 4, fee: 40 }
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct FeeFields(u64);
impl DefaultHashable for FeeFields {}
impl Writeable for FeeFields {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_u64(self.0)
}
}
impl Readable for FeeFields {
fn read<R: Reader>(reader: &mut R) -> Result<Self, ser::Error> {
let fee_fields = reader.read_u64()?;
Ok(Self(fee_fields))
}
}
impl Display for FeeFields {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl Serialize for FeeFields {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_str(&self.0)
}
}
impl<'de> Deserialize<'de> for FeeFields {
fn deserialize<D>(deserializer: D) -> Result<FeeFields, D::Error>
where
D: Deserializer<'de>,
{
struct FeeFieldsVisitor;
impl<'de> de::Visitor<'de> for FeeFieldsVisitor {
type Value = FeeFields;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("an 64-bit integer")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
let value = value
.parse()
.map_err(|_| E::custom(format!("invalid fee field")))?;
self.visit_u64(value)
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(FeeFields(value))
}
}
deserializer.deserialize_any(FeeFieldsVisitor)
}
}
/// Conversion from a valid fee to a FeeFields with 0 fee_shift
/// The valid fee range is 1..FEE_MASK
impl TryFrom<u64> for FeeFields {
type Error = Error;
fn try_from(fee: u64) -> Result<Self, Self::Error> {
if fee == 0 {
Err(Error::InvalidFeeFields)
} else if fee > FeeFields::FEE_MASK {
Err(Error::InvalidFeeFields)
} else {
Ok(Self(fee))
}
}
}
/// Conversion from a 32-bit fee to a FeeFields with 0 fee_shift
/// For use exclusively in tests with constant fees
impl From<u32> for FeeFields {
fn from(fee: u32) -> Self {
Self(fee as u64)
}
}
impl From<FeeFields> for u64 {
fn from(fee_fields: FeeFields) -> Self {
fee_fields.0 as u64
}
}
impl FeeFields {
/// Fees are limited to 40 bits
const FEE_BITS: u32 = 40;
/// Used to extract fee field
const FEE_MASK: u64 = (1u64 << FeeFields::FEE_BITS) - 1;
/// Fee shifts are limited to 4 bits
pub const FEE_SHIFT_BITS: u32 = 4;
/// Used to extract fee_shift field
pub const FEE_SHIFT_MASK: u64 = (1u64 << FeeFields::FEE_SHIFT_BITS) - 1;
/// Create a zero FeeFields with 0 fee and 0 fee_shift
pub fn zero() -> Self {
Self(0)
}
/// Create a new FeeFields from the provided shift and fee
/// Checks both are valid (in range)
pub fn new(fee_shift: u64, fee: u64) -> Result<Self, Error> {
if fee == 0 {
Err(Error::InvalidFeeFields)
} else if fee > FeeFields::FEE_MASK {
Err(Error::InvalidFeeFields)
} else if fee_shift > FeeFields::FEE_SHIFT_MASK {
Err(Error::InvalidFeeFields)
} else {
Ok(Self((fee_shift << FeeFields::FEE_BITS) | fee))
}
}
/// Extract fee_shift field
pub fn fee_shift(&self, height: u64) -> u8 {
if consensus::header_version(height) < HeaderVersion(5) {
0
} else {
((self.0 >> FeeFields::FEE_BITS) & FeeFields::FEE_SHIFT_MASK) as u8
}
}
/// Extract fee field
pub fn fee(&self, height: u64) -> u64 {
if consensus::header_version(height) < HeaderVersion(5) {
self.0
} else {
self.0 & FeeFields::FEE_MASK
}
}
/// Extract bitfields fee_shift and fee into tuple
/// ignore upper 64-FEE_BITS-FEE_SHIFT_BITS bits
pub fn as_tuple(&self) -> (u64, u64) {
let fee = self.0 & FeeFields::FEE_MASK;
let fee_shift = (self.0 >> FeeFields::FEE_BITS) & FeeFields::FEE_SHIFT_MASK;
(fee, fee_shift)
}
/// Turn a zero `FeeField` into a `None`, any other value into a `Some`.
/// We need this because a zero `FeeField` cannot be deserialized.
pub fn as_opt(&self) -> Option<Self> {
if self.is_zero() {
None
} else {
Some(*self)
}
}
/// Check if the `FeeFields` is set to zero
pub fn is_zero(&self) -> bool {
self.0 == 0
}
}
fn fee_fields_as_int<S>(fee_fields: &FeeFields, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_u64(fee_fields.0)
}
/// Relative height field on NRD kernel variant.
/// u16 representing a height between 1 and MAX (consensus::WEEK_HEIGHT).
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
@ -106,21 +288,24 @@ pub enum KernelFeatures {
/// Plain kernel (the default for Grin txs).
Plain {
/// Plain kernels have fees.
fee: u64,
#[serde(serialize_with = "fee_fields_as_int")]
fee: FeeFields,
},
/// A coinbase kernel.
Coinbase,
/// A kernel with an explicit lock height (and fee).
HeightLocked {
/// Height locked kernels have fees.
fee: u64,
#[serde(serialize_with = "fee_fields_as_int")]
fee: FeeFields,
/// Height locked kernels have lock heights.
lock_height: u64,
},
/// "No Recent Duplicate" (NRD) kernels enforcing relative lock height between instances.
NoRecentDuplicate {
/// These have fees.
fee: u64,
#[serde(serialize_with = "fee_fields_as_int")]
fee: FeeFields,
/// Relative lock height.
relative_height: NRDRelativeHeight,
},
@ -153,10 +338,10 @@ impl KernelFeatures {
}
}
/// msg = hash(features) for coinbase kernels
/// hash(features || fee) for plain kernels
/// hash(features || fee || lock_height) for height locked kernels
/// hash(features || fee || relative_height) for NRD kernels
/// msg = hash(features) for coinbase kernels
/// hash(features || fee_fields) for plain kernels
/// hash(features || fee_fields || lock_height) for height locked kernels
/// hash(features || fee_fields || relative_height) for NRD kernels
pub fn kernel_sig_msg(&self) -> Result<secp::Message, Error> {
let x = self.as_u8();
let hash = match self {
@ -174,21 +359,21 @@ impl KernelFeatures {
}
/// Write tx kernel features out in v1 protocol format.
/// Always include the fee and lock_height, writing 0 value if unused.
/// Always include the fee_fields and lock_height, writing 0 value if unused.
fn write_v1<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_u8(self.as_u8())?;
match self {
KernelFeatures::Plain { fee } => {
writer.write_u64(*fee)?;
fee.write(writer)?;
// Write "empty" bytes for feature specific data (8 bytes).
writer.write_empty_bytes(8)?;
}
KernelFeatures::Coinbase => {
// Write "empty" bytes for fee (8 bytes) and feature specific data (8 bytes).
// Write "empty" bytes for fee_fields (8 bytes) and feature specific data (8 bytes).
writer.write_empty_bytes(16)?;
}
KernelFeatures::HeightLocked { fee, lock_height } => {
writer.write_u64(*fee)?;
fee.write(writer)?;
// 8 bytes of feature specific data containing the lock height as big-endian u64.
writer.write_u64(*lock_height)?;
}
@ -196,7 +381,7 @@ impl KernelFeatures {
fee,
relative_height,
} => {
writer.write_u64(*fee)?;
fee.write(writer)?;
// 8 bytes of feature specific data. First 6 bytes are empty.
// Last 2 bytes contain the relative lock height as big-endian u16.
@ -211,20 +396,20 @@ impl KernelFeatures {
/// Write tx kernel features out in v2 protocol format.
/// These are variable sized based on feature variant.
/// Only write fee out for feature variants that support it.
/// Only write fee_fields out for feature variants that support it.
/// Only write lock_height out for feature variants that support it.
fn write_v2<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_u8(self.as_u8())?;
match self {
KernelFeatures::Plain { fee } => {
// Fee only, no additional data on plain kernels.
writer.write_u64(*fee)?;
fee.write(writer)?;
}
KernelFeatures::Coinbase => {
// No additional data.
}
KernelFeatures::HeightLocked { fee, lock_height } => {
writer.write_u64(*fee)?;
fee.write(writer)?;
// V2 height locked kernels use 8 bytes for the lock height.
writer.write_u64(*lock_height)?;
}
@ -232,7 +417,7 @@ impl KernelFeatures {
fee,
relative_height,
} => {
writer.write_u64(*fee)?;
fee.write(writer)?;
// V2 NRD kernels use 2 bytes for the relative lock height.
relative_height.write(writer)?;
}
@ -240,7 +425,7 @@ impl KernelFeatures {
Ok(())
}
// Always read feature byte, 8 bytes for fee and 8 bytes for additional data
// Always read feature byte, 8 bytes for fee_fields and 8 bytes for additional data
// representing lock height or relative height.
// Fee and additional data may be unused for some kernel variants but we need
// to read these bytes and verify they are 0 if unused.
@ -248,19 +433,19 @@ impl KernelFeatures {
let feature_byte = reader.read_u8()?;
let features = match feature_byte {
KernelFeatures::PLAIN_U8 => {
let fee = reader.read_u64()?;
let fee = FeeFields::read(reader)?;
// 8 "empty" bytes as additional data is not used.
reader.read_empty_bytes(8)?;
KernelFeatures::Plain { fee }
}
KernelFeatures::COINBASE_U8 => {
// 8 "empty" bytes as fee is not used.
// 8 "empty" bytes as fee_fields is not used.
// 8 "empty" bytes as additional data is not used.
reader.read_empty_bytes(16)?;
KernelFeatures::Coinbase
}
KernelFeatures::HEIGHT_LOCKED_U8 => {
let fee = reader.read_u64()?;
let fee = FeeFields::read(reader)?;
// 8 bytes of feature specific data, lock height as big-endian u64.
let lock_height = reader.read_u64()?;
KernelFeatures::HeightLocked { fee, lock_height }
@ -271,7 +456,7 @@ impl KernelFeatures {
return Err(ser::Error::CorruptedData);
}
let fee = reader.read_u64()?;
let fee = FeeFields::read(reader)?;
// 8 bytes of feature specific data.
// The first 6 bytes must be "empty".
@ -295,12 +480,12 @@ impl KernelFeatures {
fn read_v2<R: Reader>(reader: &mut R) -> Result<KernelFeatures, ser::Error> {
let features = match reader.read_u8()? {
KernelFeatures::PLAIN_U8 => {
let fee = reader.read_u64()?;
let fee = FeeFields::read(reader)?;
KernelFeatures::Plain { fee }
}
KernelFeatures::COINBASE_U8 => KernelFeatures::Coinbase,
KernelFeatures::HEIGHT_LOCKED_U8 => {
let fee = reader.read_u64()?;
let fee = FeeFields::read(reader)?;
let lock_height = reader.read_u64()?;
KernelFeatures::HeightLocked { fee, lock_height }
}
@ -310,7 +495,7 @@ impl KernelFeatures {
return Err(ser::Error::CorruptedData);
}
let fee = reader.read_u64()?;
let fee = FeeFields::read(reader)?;
let relative_height = NRDRelativeHeight::read(reader)?;
KernelFeatures::NoRecentDuplicate {
fee,
@ -384,6 +569,8 @@ pub enum Error {
/// Validation error relating to kernel features.
/// It is invalid for a transaction to contain a coinbase kernel, for example.
InvalidKernelFeatures,
/// feeshift is limited to 4 bits and fee must be positive and fit in 40 bits.
InvalidFeeFields,
/// NRD kernel relative height is limited to 1 week duration and must be greater than 0.
InvalidNRDRelativeHeight,
/// Signature verification error.
@ -435,7 +622,7 @@ impl From<committed::Error> for Error {
/// A proof that a transaction sums to zero. Includes both the transaction's
/// Pedersen commitment and the signature, that guarantees that the commitments
/// amount to zero.
/// The signature signs the fee and the lock_height, which are retained for
/// The signature signs the fee_fields and the lock_height, which are retained for
/// signature validation.
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
pub struct TxKernel {
@ -450,7 +637,7 @@ pub struct TxKernel {
)]
pub excess: Commitment,
/// The signature proving the excess is a valid public key, which signs
/// the transaction fee.
/// the transaction fee_fields.
#[serde(with = "secp_ser::sig_serde")]
pub excess_sig: secp::Signature,
}
@ -562,14 +749,14 @@ impl TxKernel {
}
/// The msg signed as part of the tx kernel.
/// Based on kernel features and associated fields (fee and lock_height).
/// Based on kernel features and associated fields (fee_fields and lock_height).
pub fn msg_to_sign(&self) -> Result<secp::Message, Error> {
let msg = self.features.kernel_sig_msg()?;
Ok(msg)
}
/// Verify the transaction proof validity. Entails handling the commitment
/// as a public key and checking the signature verifies with the fee as
/// as a public key and checking the signature verifies with the fee_fields as
/// message.
pub fn verify(&self) -> Result<(), Error> {
let secp = static_secp_instance();
@ -616,7 +803,9 @@ impl TxKernel {
/// Build an empty tx kernel with zero values.
pub fn empty() -> TxKernel {
TxKernel::with_features(KernelFeatures::Plain { fee: 0 })
TxKernel::with_features(KernelFeatures::Plain {
fee: FeeFields::zero(),
})
}
/// Build an empty tx kernel with the provided kernel features.
@ -687,8 +876,7 @@ impl Readable for TransactionBody {
// Quick block weight check before proceeding.
// Note: We use weight_as_block here (inputs have weight).
let tx_block_weight =
TransactionBody::weight_as_block(num_inputs, num_outputs, num_kernels);
let tx_block_weight = TransactionBody::weight_by_iok(num_inputs, num_outputs, num_kernels);
if tx_block_weight > global::max_block_weight() {
return Err(ser::Error::TooLargeReadErr);
}
@ -861,7 +1049,7 @@ impl TransactionBody {
}
/// Total fee for a TransactionBody is the sum of fees of all fee carrying kernels.
pub fn fee(&self) -> u64 {
pub fn fee(&self, height: u64) -> u64 {
self.kernels
.iter()
.filter_map(|k| match k.features {
@ -870,49 +1058,57 @@ impl TransactionBody {
KernelFeatures::HeightLocked { fee, .. } => Some(fee),
KernelFeatures::NoRecentDuplicate { fee, .. } => Some(fee),
})
.fold(0, |acc, fee| acc.saturating_add(fee))
.fold(0, |acc, fee_fields| {
acc.saturating_add(fee_fields.fee(height))
})
}
fn overage(&self) -> i64 {
self.fee() as i64
/// fee_shift for a TransactionBody is the maximum of fee_shifts of all fee carrying kernels.
pub fn fee_shift(&self, height: u64) -> u8 {
self.kernels
.iter()
.filter_map(|k| match k.features {
KernelFeatures::Coinbase => None,
KernelFeatures::Plain { fee } => Some(fee),
KernelFeatures::HeightLocked { fee, .. } => Some(fee),
KernelFeatures::NoRecentDuplicate { fee, .. } => Some(fee),
})
.fold(0, |acc, fee_fields| max(acc, fee_fields.fee_shift(height)))
}
/// Calculate transaction weight
pub fn body_weight(&self) -> u64 {
TransactionBody::weight(
self.inputs.len() as u64,
self.outputs.len() as u64,
self.kernels.len() as u64,
)
/// Shifted fee for a TransactionBody is the sum of fees shifted right by the maximum fee_shift
/// this is used to determine whether a tx can be relayed or accepted in a mempool
/// where transactions can specify a higher block-inclusion priority as a positive shift up to 15
/// but are required to overpay the minimum required fees by a factor of 2^priority
pub fn shifted_fee(&self, height: u64) -> u64 {
self.fee(height) >> self.fee_shift(height)
}
/// aggregate fee_fields from all appropriate kernels in TransactionBody into one, if possible
pub fn aggregate_fee_fields(&self, height: u64) -> Result<FeeFields, Error> {
FeeFields::new(self.fee_shift(height) as u64, self.fee(height))
}
fn overage(&self, height: u64) -> i64 {
self.fee(height) as i64
}
/// Calculate weight of transaction using block weighing
pub fn body_weight_as_block(&self) -> u64 {
TransactionBody::weight_as_block(
pub fn weight(&self) -> u64 {
TransactionBody::weight_by_iok(
self.inputs.len() as u64,
self.outputs.len() as u64,
self.kernels.len() as u64,
)
}
/// Calculate transaction weight from transaction details. This is non
/// consensus critical and compared to block weight, incentivizes spending
/// more outputs (to lower the fee).
pub fn weight(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 {
let body_weight = num_outputs
.saturating_mul(4)
.saturating_add(num_kernels)
.saturating_sub(num_inputs);
max(body_weight, 1)
}
/// Calculate transaction weight using block weighing from transaction
/// details. Consensus critical and uses consensus weight values.
pub fn weight_as_block(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 {
pub fn weight_by_iok(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 {
num_inputs
.saturating_mul(consensus::BLOCK_INPUT_WEIGHT as u64)
.saturating_add(num_outputs.saturating_mul(consensus::BLOCK_OUTPUT_WEIGHT as u64))
.saturating_add(num_kernels.saturating_mul(consensus::BLOCK_KERNEL_WEIGHT as u64))
.saturating_mul(consensus::INPUT_WEIGHT as u64)
.saturating_add(num_outputs.saturating_mul(consensus::OUTPUT_WEIGHT as u64))
.saturating_add(num_kernels.saturating_mul(consensus::KERNEL_WEIGHT as u64))
}
/// Lock height of a body is the max lock height of the kernels.
@ -932,7 +1128,7 @@ impl TransactionBody {
fn verify_weight(&self, weighting: Weighting) -> Result<(), Error> {
// A coinbase reward is a single output and a single kernel (for now).
// We need to account for this when verifying max tx weights.
let coinbase_weight = consensus::BLOCK_OUTPUT_WEIGHT + consensus::BLOCK_KERNEL_WEIGHT;
let coinbase_weight = consensus::OUTPUT_WEIGHT + consensus::KERNEL_WEIGHT;
// If "tx" body then remember to reduce the max_block_weight by the weight of a kernel.
// If "limited tx" then compare against the provided max_weight.
@ -943,7 +1139,7 @@ impl TransactionBody {
// for the additional coinbase reward (1 output + 1 kernel).
//
let max_weight = match weighting {
Weighting::AsTransaction => global::max_block_weight().saturating_sub(coinbase_weight),
Weighting::AsTransaction => global::max_tx_weight(),
Weighting::AsLimitedTransaction(max_weight) => {
min(global::max_block_weight(), max_weight).saturating_sub(coinbase_weight)
}
@ -954,7 +1150,7 @@ impl TransactionBody {
}
};
if self.body_weight_as_block() > max_weight {
if self.weight() > max_weight {
return Err(Error::TooHeavy);
}
Ok(())
@ -1258,13 +1454,23 @@ impl Transaction {
}
/// Total fee for a transaction is the sum of fees of all kernels.
pub fn fee(&self) -> u64 {
self.body.fee()
pub fn fee(&self, height: u64) -> u64 {
self.body.fee(height)
}
/// Shifted fee for a transaction is the sum of fees of all kernels shifted right by the maximum fee shift
pub fn shifted_fee(&self, height: u64) -> u64 {
self.body.shifted_fee(height)
}
/// aggregate fee_fields from all appropriate kernels in transaction into one
pub fn aggregate_fee_fields(&self, height: u64) -> Result<FeeFields, Error> {
self.body.aggregate_fee_fields(height)
}
/// Total overage across all kernels.
pub fn overage(&self) -> i64 {
self.body.overage()
pub fn overage(&self, height: u64) -> i64 {
self.body.overage(height)
}
/// Lock height of a transaction is the max lock height of the kernels.
@ -1290,32 +1496,50 @@ impl Transaction {
&self,
weighting: Weighting,
verifier: Arc<RwLock<dyn VerifierCache>>,
height: u64,
) -> Result<(), Error> {
self.body.verify_features()?;
self.body.validate(weighting, verifier)?;
self.verify_kernel_sums(self.overage(), self.offset.clone())?;
self.verify_kernel_sums(self.overage(height), self.offset.clone())?;
Ok(())
}
/// Can be used to compare txs by their fee/weight ratio.
/// Can be used to compare txs by their fee/weight ratio, aka feerate.
/// Don't use these values for anything else though due to precision multiplier.
pub fn fee_to_weight(&self) -> u64 {
self.fee() * 1_000 / self.tx_weight() as u64
pub fn fee_rate(&self, height: u64) -> u64 {
self.fee(height) / self.weight() as u64
}
/// Calculate transaction weight
pub fn tx_weight(&self) -> u64 {
self.body.body_weight()
pub fn weight(&self) -> u64 {
self.body.weight()
}
/// Calculate transaction weight as a block
pub fn tx_weight_as_block(&self) -> u64 {
self.body.body_weight_as_block()
/// Transaction minimum acceptable fee
pub fn accept_fee(&self, height: u64) -> u64 {
if consensus::header_version(height) < HeaderVersion(5) {
Transaction::old_weight_by_iok(
self.body.inputs.len() as u64,
self.body.outputs.len() as u64,
self.body.kernels.len() as u64,
) * consensus::MILLI_GRIN
} else {
self.weight() * global::get_accept_fee_base()
}
}
/// Old weight definition for pool acceptance
pub fn old_weight_by_iok(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 {
let body_weight = num_outputs
.saturating_mul(4)
.saturating_add(num_kernels)
.saturating_sub(num_inputs);
max(body_weight, 1)
}
/// Calculate transaction weight from transaction details
pub fn weight(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 {
TransactionBody::weight(num_inputs, num_outputs, num_kernels)
pub fn weight_by_iok(num_inputs: u64, num_outputs: u64, num_kernels: u64) -> u64 {
TransactionBody::weight_by_iok(num_inputs, num_outputs, num_kernels)
}
}
@ -2113,7 +2337,7 @@ mod test {
let sig = secp::Signature::from_raw_data(&[0; 64]).unwrap();
let kernel = TxKernel {
features: KernelFeatures::Plain { fee: 10 },
features: KernelFeatures::Plain { fee: 10.into() },
excess: commit,
excess_sig: sig.clone(),
};
@ -2123,7 +2347,7 @@ mod test {
let mut vec = vec![];
ser::serialize(&mut vec, version, &kernel).expect("serialized failed");
let kernel2: TxKernel = ser::deserialize(&mut &vec[..], version).unwrap();
assert_eq!(kernel2.features, KernelFeatures::Plain { fee: 10 });
assert_eq!(kernel2.features, KernelFeatures::Plain { fee: 10.into() });
assert_eq!(kernel2.excess, commit);
assert_eq!(kernel2.excess_sig, sig.clone());
}
@ -2132,7 +2356,7 @@ mod test {
let mut vec = vec![];
ser::serialize_default(&mut vec, &kernel).expect("serialized failed");
let kernel2: TxKernel = ser::deserialize_default(&mut &vec[..]).unwrap();
assert_eq!(kernel2.features, KernelFeatures::Plain { fee: 10 });
assert_eq!(kernel2.features, KernelFeatures::Plain { fee: 10.into() });
assert_eq!(kernel2.excess, commit);
assert_eq!(kernel2.excess_sig, sig.clone());
}
@ -2151,7 +2375,7 @@ mod test {
// now check a kernel with lock_height serialize/deserialize correctly
let kernel = TxKernel {
features: KernelFeatures::HeightLocked {
fee: 10,
fee: 10.into(),
lock_height: 100,
},
excess: commit,
@ -2193,7 +2417,7 @@ mod test {
// now check an NRD kernel will serialize/deserialize correctly
let kernel = TxKernel {
features: KernelFeatures::NoRecentDuplicate {
fee: 10,
fee: 10.into(),
relative_height: NRDRelativeHeight(100),
},
excess: commit,
@ -2225,7 +2449,7 @@ mod test {
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
fee: 10,
fee: 10.into(),
relative_height: NRDRelativeHeight(100),
});
@ -2251,25 +2475,25 @@ mod test {
// Modify the fee and check signature no longer verifies.
kernel.features = KernelFeatures::NoRecentDuplicate {
fee: 9,
fee: 9.into(),
relative_height: NRDRelativeHeight(100),
};
assert_eq!(kernel.verify(), Err(Error::IncorrectSignature));
// Modify the relative_height and check signature no longer verifies.
kernel.features = KernelFeatures::NoRecentDuplicate {
fee: 10,
fee: 10.into(),
relative_height: NRDRelativeHeight(101),
};
assert_eq!(kernel.verify(), Err(Error::IncorrectSignature));
// Swap the features out for something different and check signature no longer verifies.
kernel.features = KernelFeatures::Plain { fee: 10 };
kernel.features = KernelFeatures::Plain { fee: 10.into() };
assert_eq!(kernel.verify(), Err(Error::IncorrectSignature));
// Check signature verifies if we use the original features.
kernel.features = KernelFeatures::NoRecentDuplicate {
fee: 10,
fee: 10.into(),
relative_height: NRDRelativeHeight(100),
};
assert_eq!(kernel.verify(), Ok(()));
@ -2330,7 +2554,7 @@ mod test {
let mut vec = vec![];
ser::serialize_default(&mut vec, &(0u8, 10u64, 0u64))?;
let features: KernelFeatures = ser::deserialize_default(&mut &vec[..])?;
assert_eq!(features, KernelFeatures::Plain { fee: 10 });
assert_eq!(features, KernelFeatures::Plain { fee: 10.into() });
let mut vec = vec![];
ser::serialize_default(&mut vec, &(1u8, 0u64, 0u64))?;
@ -2343,7 +2567,7 @@ mod test {
assert_eq!(
features,
KernelFeatures::HeightLocked {
fee: 10,
fee: 10.into(),
lock_height: 100
}
);
@ -2373,7 +2597,7 @@ mod test {
assert_eq!(
features,
KernelFeatures::NoRecentDuplicate {
fee: 10,
fee: 10.into(),
relative_height: NRDRelativeHeight(100)
}
);

View file

@ -17,10 +17,10 @@
//! should be used sparingly.
use crate::consensus::{
graph_weight, header_version, HeaderInfo, BASE_EDGE_BITS, BLOCK_KERNEL_WEIGHT,
BLOCK_OUTPUT_WEIGHT, BLOCK_TIME_SEC, C32_GRAPH_WEIGHT, COINBASE_MATURITY, CUT_THROUGH_HORIZON,
DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS, DMA_WINDOW, INITIAL_DIFFICULTY, MAX_BLOCK_WEIGHT, PROOFSIZE,
SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
graph_weight, header_version, HeaderInfo, BASE_EDGE_BITS, BLOCK_TIME_SEC,
C32_GRAPH_WEIGHT, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS,
DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT,
OUTPUT_WEIGHT, PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
};
use crate::core::block::HeaderVersion;
use crate::pow::{
@ -78,6 +78,9 @@ pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250;
/// Default unit of fee per tx weight, making each output cost about a Grincent
pub const DEFAULT_ACCEPT_FEE_BASE: u64 = GRIN_BASE / 100 / 20; // 500_000
/// default Future Time Limit (FTL) of 5 minutes
pub const DEFAULT_FUTURE_TIME_LIMIT: u64 = 5 * 60;
@ -142,6 +145,11 @@ lazy_static! {
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_CHAIN_TYPE: OneTime<ChainTypes> = OneTime::new();
/// Global acccept fee base that must be initialized once on node startup.
/// This is accessed via get_acccept_fee_base() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_ACCEPT_FEE_BASE: OneTime<u64> = OneTime::new();
/// Global future time limit that must be initialized once on node startup.
/// This is accessed via get_future_time_limit() which allows the global value
/// to be overridden on a per-thread basis (for testing).
@ -157,6 +165,9 @@ thread_local! {
/// Mainnet|Testnet|UserTesting|AutomatedTesting
pub static CHAIN_TYPE: Cell<Option<ChainTypes>> = Cell::new(None);
/// minimum transaction fee per unit of transaction weight for mempool acceptance
pub static ACCEPT_FEE_BASE: Cell<Option<u64>> = Cell::new(None);
/// maximum number of seconds into future for timestamp of block to be acceptable
pub static FUTURE_TIME_LIMIT: Cell<Option<u64>> = Cell::new(None);
@ -196,6 +207,35 @@ pub fn init_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.init(new_ftl)
}
/// One time initialization of the global accept fee base
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.init(new_base)
}
/// Set the accept fee base on a per-thread basis via thread_local storage.
pub fn set_local_accept_fee_base(new_base: u64) {
ACCEPT_FEE_BASE.with(|base| base.set(Some(new_base)))
}
/// Accept Fee Base
/// Look at thread local config first. If not set fallback to global config.
/// Default to grin-cent/20 if global config unset.
pub fn get_accept_fee_base() -> u64 {
ACCEPT_FEE_BASE.with(|base| match base.get() {
None => {
let base = if GLOBAL_ACCEPT_FEE_BASE.is_init() {
GLOBAL_ACCEPT_FEE_BASE.borrow()
} else {
DEFAULT_ACCEPT_FEE_BASE
};
set_local_accept_fee_base(base);
base
}
Some(base) => base,
})
}
/// Set the future time limit on a per-thread basis via thread_local storage.
pub fn set_local_future_time_limit(new_ftl: u64) {
FUTURE_TIME_LIMIT.with(|ftl| ftl.set(Some(new_ftl)))
@ -357,7 +397,7 @@ pub fn max_block_weight() -> u64 {
/// Maximum allowed transaction weight (1 weight unit ~= 32 bytes)
pub fn max_tx_weight() -> u64 {
let coinbase_weight = BLOCK_OUTPUT_WEIGHT + BLOCK_KERNEL_WEIGHT;
let coinbase_weight = OUTPUT_WEIGHT + KERNEL_WEIGHT;
max_block_weight().saturating_sub(coinbase_weight) as u64
}

View file

@ -225,6 +225,7 @@ pub fn verify_partial_sig(
/// use core::core::transaction::KernelFeatures;
/// use core::core::{Output, OutputFeatures};
/// use keychain::{Keychain, ExtKeychain, SwitchCommitmentType};
/// use std::convert::TryInto;
///
/// let secp = Secp256k1::with_caps(ContextFlag::Commit);
/// let keychain = ExtKeychain::from_random_seed(false).unwrap();
@ -239,7 +240,7 @@ pub fn verify_partial_sig(
/// let height = 20;
/// let over_commit = secp.commit_value(reward(fees)).unwrap();
/// let out_commit = output.commitment();
/// let features = KernelFeatures::HeightLocked{fee: 0, lock_height: height};
/// let features = KernelFeatures::HeightLocked{fee: 1.into(), lock_height: height};
/// let msg = features.kernel_sig_msg().unwrap();
/// let excess = secp.commit_sum(vec![out_commit], vec![over_commit]).unwrap();
/// let pubkey = excess.to_pubkey(&secp).unwrap();
@ -287,6 +288,7 @@ where
/// use core::core::transaction::KernelFeatures;
/// use core::core::{Output, OutputFeatures};
/// use keychain::{Keychain, ExtKeychain, SwitchCommitmentType};
/// use std::convert::TryInto;
///
/// // Create signature
/// let secp = Secp256k1::with_caps(ContextFlag::Commit);
@ -302,7 +304,7 @@ where
/// let height = 20;
/// let over_commit = secp.commit_value(reward(fees)).unwrap();
/// let out_commit = output.commitment();
/// let features = KernelFeatures::HeightLocked{fee: 0, lock_height: height};
/// let features = KernelFeatures::HeightLocked{fee: 1.into(), lock_height: height};
/// let msg = features.kernel_sig_msg().unwrap();
/// let excess = secp.commit_sum(vec![out_commit], vec![over_commit]).unwrap();
/// let pubkey = excess.to_pubkey(&secp).unwrap();

View file

@ -23,7 +23,7 @@
//!
//! Example:
//! build::transaction(
//! KernelFeatures::Plain{ fee: 2 },
//! KernelFeatures::Plain{ fee: 2.try_into().unwrap() },
//! vec![
//! input_rand(75),
//! output_rand(42),
@ -279,14 +279,16 @@ mod test {
let vc = verifier_cache();
let tx = transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[input(10, key_id1), input(12, key_id2), output(20, key_id3)],
&keychain,
&builder,
)
.unwrap();
tx.validate(Weighting::AsTransaction, vc.clone()).unwrap();
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, vc.clone(), height)
.unwrap();
}
#[test]
@ -301,14 +303,16 @@ mod test {
let vc = verifier_cache();
let tx = transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[input(10, key_id1), input(12, key_id2), output(20, key_id3)],
&keychain,
&builder,
)
.unwrap();
tx.validate(Weighting::AsTransaction, vc.clone()).unwrap();
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, vc.clone(), height)
.unwrap();
}
#[test]
@ -322,13 +326,15 @@ mod test {
let vc = verifier_cache();
let tx = transaction(
KernelFeatures::Plain { fee: 4 },
KernelFeatures::Plain { fee: 4.into() },
&[input(6, key_id1), output(2, key_id2)],
&keychain,
&builder,
)
.unwrap();
tx.validate(Weighting::AsTransaction, vc.clone()).unwrap();
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, vc.clone(), height)
.unwrap();
}
}

View file

@ -28,25 +28,19 @@ pub mod proof;
pub mod reward;
pub mod secp_ser;
use crate::consensus;
use crate::core::Transaction;
use crate::global::get_accept_fee_base;
pub use self::proof::ProofBuilder;
pub use crate::libtx::error::{Error, ErrorKind};
const DEFAULT_BASE_FEE: u64 = consensus::MILLI_GRIN;
/// Transaction fee calculation
pub fn tx_fee(
input_len: usize,
output_len: usize,
kernel_len: usize,
base_fee: Option<u64>,
) -> u64 {
let use_base_fee = match base_fee {
Some(bf) => bf,
None => DEFAULT_BASE_FEE,
};
Transaction::weight(input_len as u64, output_len as u64, kernel_len as u64) * use_base_fee
/// Transaction fee calculation given numbers of inputs, outputs, and kernels
pub fn tx_fee(input_len: usize, output_len: usize, kernel_len: usize) -> u64 {
Transaction::weight_by_iok(input_len as u64, output_len as u64, kernel_len as u64)
* get_accept_fee_base()
}
/// Transaction fee calculation given transaction
pub fn accept_fee(tx: Transaction, height: u64) -> u64 {
tx.accept_fee(height)
}

View file

@ -14,12 +14,13 @@
mod common;
use crate::common::{new_block, tx1i2o, tx2i1o, txspend1i1o};
use crate::core::consensus::{self, BLOCK_OUTPUT_WEIGHT, TESTING_HARD_FORK_INTERVAL};
use crate::core::consensus::{self, OUTPUT_WEIGHT, TESTING_HARD_FORK_INTERVAL};
use crate::core::core::block::{Block, BlockHeader, Error, HeaderVersion, UntrustedBlockHeader};
use crate::core::core::hash::Hashed;
use crate::core::core::id::ShortIdentifiable;
use crate::core::core::transaction::{
self, KernelFeatures, NRDRelativeHeight, Output, OutputFeatures, OutputIdentifier, Transaction,
self, FeeFields, KernelFeatures, NRDRelativeHeight, Output, OutputFeatures, OutputIdentifier,
Transaction,
};
use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use crate::core::core::{Committed, CompactBlock};
@ -47,7 +48,7 @@ fn too_large_block() {
test_setup();
let keychain = ExtKeychain::from_random_seed(false).unwrap();
let builder = ProofBuilder::new(&keychain);
let max_out = global::max_block_weight() / BLOCK_OUTPUT_WEIGHT;
let max_out = global::max_block_weight() / OUTPUT_WEIGHT;
let mut pks = vec![];
for n in 0..(max_out + 1) {
@ -61,7 +62,7 @@ fn too_large_block() {
parts.append(&mut vec![input(500000, pks.pop().unwrap())]);
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&parts,
&keychain,
&builder,
@ -102,7 +103,7 @@ fn block_with_nrd_kernel_pre_post_hf3() {
let tx = build::transaction(
KernelFeatures::NoRecentDuplicate {
fee: 2,
fee: 2.into(),
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
&[input(7, key_id1), output(5, key_id2)],
@ -187,7 +188,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() {
let tx = build::transaction(
KernelFeatures::NoRecentDuplicate {
fee: 2,
fee: 2.into(),
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
&[input(7, key_id1), output(5, key_id2)],
@ -275,7 +276,7 @@ fn block_with_cut_through() {
let btx1 = tx2i1o();
let btx2 = build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[input(7, key_id1), output(5, key_id2.clone())],
&keychain,
&builder,
@ -373,7 +374,9 @@ fn remove_coinbase_kernel_flag() {
let mut b = new_block(&[], &keychain, &builder, &prev, &key_id);
let mut kernel = b.kernels()[0].clone();
kernel.features = KernelFeatures::Plain { fee: 0 };
kernel.features = KernelFeatures::Plain {
fee: FeeFields::zero(),
};
b.body = b.body.replace_kernel(kernel);
// Flipping the coinbase flag results in kernels not summing correctly.
@ -751,7 +754,7 @@ fn same_amount_outputs_copy_range_proof() {
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let tx = build::transaction(
KernelFeatures::Plain { fee: 1 },
KernelFeatures::Plain { fee: 1.into() },
&[input(7, key_id1), output(3, key_id2), output(3, key_id3)],
&keychain,
&builder,
@ -792,7 +795,7 @@ fn wrong_amount_range_proof() {
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let tx1 = build::transaction(
KernelFeatures::Plain { fee: 1 },
KernelFeatures::Plain { fee: 1.into() },
&[
input(7, key_id1.clone()),
output(3, key_id2.clone()),
@ -803,7 +806,7 @@ fn wrong_amount_range_proof() {
)
.unwrap();
let tx2 = build::transaction(
KernelFeatures::Plain { fee: 1 },
KernelFeatures::Plain { fee: 1.into() },
&[input(7, key_id1), output(2, key_id2), output(4, key_id3)],
&keychain,
&builder,
@ -883,7 +886,9 @@ fn test_verify_cut_through_plain() -> Result<(), Error> {
let builder = ProofBuilder::new(&keychain);
let tx = build::transaction(
KernelFeatures::Plain { fee: 0 },
KernelFeatures::Plain {
fee: FeeFields::zero(),
},
&[
build::input(10, key_id1.clone()),
build::input(10, key_id2.clone()),
@ -947,7 +952,9 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> {
let builder = ProofBuilder::new(&keychain);
let tx = build::transaction(
KernelFeatures::Plain { fee: 0 },
KernelFeatures::Plain {
fee: FeeFields::zero(),
},
&[
build::coinbase_input(consensus::REWARD, key_id1.clone()),
build::coinbase_input(consensus::REWARD, key_id2.clone()),

View file

@ -37,7 +37,7 @@ pub fn tx2i1o() -> Transaction {
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[input(10, key_id1), input(11, key_id2), output(19, key_id3)],
&keychain,
&builder,
@ -56,7 +56,7 @@ pub fn tx1i1o() -> Transaction {
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[input(5, key_id1), output(3, key_id2)],
&keychain,
&builder,
@ -96,7 +96,7 @@ pub fn tx1i2o() -> Transaction {
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[input(6, key_id1), output(3, key_id2), output(1, key_id3)],
&keychain,
&builder,
@ -120,7 +120,10 @@ where
K: Keychain,
B: ProofBuild,
{
let fees = txs.iter().map(|tx| tx.fee()).sum();
let fees = txs
.iter()
.map(|tx| tx.fee(previous_header.height + 1))
.sum();
let reward_output = reward::output(keychain, builder, &key_id, fees, false).unwrap();
Block::new(&previous_header, txs, Difficulty::min_dma(), reward_output).unwrap()
}
@ -140,7 +143,7 @@ where
B: ProofBuild,
{
build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[input(v, key_id1), output(3, key_id2)],
keychain,
builder,

View file

@ -21,8 +21,8 @@ use self::core::core::block::Error::KernelLockHeight;
use self::core::core::hash::{Hashed, ZERO_HASH};
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use self::core::core::{
aggregate, deaggregate, KernelFeatures, Output, OutputFeatures, OutputIdentifier, Transaction,
TxKernel, Weighting,
aggregate, deaggregate, FeeFields, KernelFeatures, Output, OutputFeatures, OutputIdentifier,
Transaction, TxKernel, Weighting,
};
use self::core::libtx::build::{self, initial_tx, input, output, with_excess};
use self::core::libtx::{aggsig, ProofBuilder};
@ -97,7 +97,8 @@ fn simple_tx_ser_deser() {
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &tx).expect("serialization failed");
let dtx: Transaction = ser::deserialize_default(&mut &vec[..]).unwrap();
assert_eq!(dtx.fee(), 2);
let height = 42; // arbitrary
assert_eq!(dtx.fee(height), 2);
assert_eq!(dtx.inputs().len(), 2);
assert_eq!(dtx.outputs().len(), 1);
assert_eq!(tx.hash(), dtx.hash());
@ -130,7 +131,9 @@ fn test_zero_commit_fails() {
// blinding should fail as signing with a zero r*G shouldn't work
let res = build::transaction(
KernelFeatures::Plain { fee: 0 },
KernelFeatures::Plain {
fee: FeeFields::zero(),
},
&[input(10, key_id1.clone()), output(10, key_id1)],
&keychain,
&builder,
@ -153,7 +156,7 @@ fn build_tx_kernel() {
// first build a valid tx with corresponding blinding factor
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[input(10, key_id1), output(5, key_id2), output(3, key_id3)],
&keychain,
&builder,
@ -161,7 +164,8 @@ fn build_tx_kernel() {
.unwrap();
// check the tx is valid
tx.validate(Weighting::AsTransaction, verifier_cache())
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, verifier_cache(), height)
.unwrap();
// check the kernel is also itself valid
@ -169,8 +173,8 @@ fn build_tx_kernel() {
let kern = &tx.kernels()[0];
kern.verify().unwrap();
assert_eq!(kern.features, KernelFeatures::Plain { fee: 2 });
assert_eq!(2, tx.fee());
assert_eq!(kern.features, KernelFeatures::Plain { fee: 2.into() });
assert_eq!(2, tx.fee(height));
}
// Proof of concept demonstrating we can build two transactions that share
@ -192,7 +196,7 @@ fn build_two_half_kernels() {
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
// build kernel with associated private excess
let mut kernel = TxKernel::with_features(KernelFeatures::Plain { fee: 2 });
let mut kernel = TxKernel::with_features(KernelFeatures::Plain { fee: 2.into() });
// Construct the message to be signed.
let msg = kernel.msg_to_sign().unwrap();
@ -224,13 +228,14 @@ fn build_two_half_kernels() {
)
.unwrap();
let height = 42; // arbitrary
assert_eq!(
tx1.validate(Weighting::AsTransaction, verifier_cache()),
tx1.validate(Weighting::AsTransaction, verifier_cache(), height),
Ok(()),
);
assert_eq!(
tx2.validate(Weighting::AsTransaction, verifier_cache()),
tx2.validate(Weighting::AsTransaction, verifier_cache(), height),
Ok(()),
);
@ -256,11 +261,12 @@ fn transaction_cut_through() {
let tx1 = tx1i2o();
let tx2 = tx2i1o();
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, verifier_cache())
.validate(Weighting::AsTransaction, verifier_cache(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, verifier_cache())
.validate(Weighting::AsTransaction, verifier_cache(), height)
.is_ok());
let vc = verifier_cache();
@ -268,7 +274,9 @@ fn transaction_cut_through() {
// now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(&[tx1, tx2]).unwrap();
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
}
// Attempt to deaggregate a multi-kernel transaction in a different way
@ -282,31 +290,44 @@ fn multi_kernel_transaction_deaggregation() {
let vc = verifier_cache();
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx4
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let tx1234 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
let tx12 = aggregate(&[tx1, tx2]).unwrap();
let tx34 = aggregate(&[tx3, tx4]).unwrap();
assert!(tx1234
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx12
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx34
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx12.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx34.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let deaggregated_tx34 = deaggregate(tx1234.clone(), &[tx12.clone()]).unwrap();
assert!(deaggregated_tx34
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert_eq!(tx34, deaggregated_tx34);
let deaggregated_tx12 = deaggregate(tx1234, &[tx34]).unwrap();
assert!(deaggregated_tx12
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert_eq!(tx12, deaggregated_tx12);
}
@ -320,19 +341,30 @@ fn multi_kernel_transaction_deaggregation_2() {
let vc = verifier_cache();
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let tx123 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx12 = aggregate(&[tx1, tx2]).unwrap();
assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx12.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx123
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx12
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let deaggregated_tx3 = deaggregate(tx123, &[tx12]).unwrap();
assert!(deaggregated_tx3
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert_eq!(tx3, deaggregated_tx3);
}
@ -346,20 +378,31 @@ fn multi_kernel_transaction_deaggregation_3() {
let vc = verifier_cache();
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let tx123 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx13 = aggregate(&[tx1, tx3]).unwrap();
let tx2 = aggregate(&[tx2]).unwrap();
assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx123
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let deaggregated_tx13 = deaggregate(tx123, &[tx2]).unwrap();
assert!(deaggregated_tx13
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert_eq!(tx13, deaggregated_tx13);
}
@ -375,11 +418,22 @@ fn multi_kernel_transaction_deaggregation_4() {
let vc = verifier_cache();
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx5.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx4
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx5
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let tx12345 = aggregate(&[
tx1.clone(),
@ -390,12 +444,12 @@ fn multi_kernel_transaction_deaggregation_4() {
])
.unwrap();
assert!(tx12345
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let deaggregated_tx5 = deaggregate(tx12345, &[tx1, tx2, tx3, tx4]).unwrap();
assert!(deaggregated_tx5
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert_eq!(tx5, deaggregated_tx5);
}
@ -411,11 +465,22 @@ fn multi_kernel_transaction_deaggregation_5() {
let vc = verifier_cache();
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx5.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx4
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx5
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let tx12345 = aggregate(&[
tx1.clone(),
@ -429,12 +494,12 @@ fn multi_kernel_transaction_deaggregation_5() {
let tx34 = aggregate(&[tx3, tx4]).unwrap();
assert!(tx12345
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let deaggregated_tx5 = deaggregate(tx12345, &[tx12, tx34]).unwrap();
assert!(deaggregated_tx5
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert_eq!(tx5, deaggregated_tx5);
}
@ -448,25 +513,32 @@ fn basic_transaction_deaggregation() {
let vc = verifier_cache();
assert!(tx1.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let height = 42; // arbitrary
assert!(tx1
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert!(tx2
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
// now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(&[tx1.clone(), tx2.clone()]).unwrap();
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx3
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
let deaggregated_tx1 = deaggregate(tx3.clone(), &[tx2.clone()]).unwrap();
assert!(deaggregated_tx1
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert_eq!(tx1, deaggregated_tx1);
let deaggregated_tx2 = deaggregate(tx3, &[tx1]).unwrap();
assert!(deaggregated_tx2
.validate(Weighting::AsTransaction, vc.clone())
.validate(Weighting::AsTransaction, vc.clone(), height)
.is_ok());
assert_eq!(tx2, deaggregated_tx2);
}
@ -480,7 +552,7 @@ fn hash_output() {
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let tx = build::transaction(
KernelFeatures::Plain { fee: 1 },
KernelFeatures::Plain { fee: 1.into() },
&[input(75, key_id1), output(42, key_id2), output(32, key_id3)],
&keychain,
&builder,
@ -496,8 +568,9 @@ fn hash_output() {
#[test]
fn blind_tx() {
let btx = tx2i1o();
let height = 42; // arbitrary
assert!(btx
.validate(Weighting::AsTransaction, verifier_cache())
.validate(Weighting::AsTransaction, verifier_cache(), height)
.is_ok());
// Ignored for bullet proofs, because calling range_proof_info
@ -543,8 +616,9 @@ fn tx_build_exchange() {
// Alice builds her transaction, with change, which also produces the sum
// of blinding factors before they're obscured.
let tx = Transaction::empty()
.with_kernel(TxKernel::with_features(KernelFeatures::Plain { fee: 2 }));
let tx = Transaction::empty().with_kernel(TxKernel::with_features(KernelFeatures::Plain {
fee: 2.into(),
}));
let (tx, sum) =
build::partial_transaction(tx, &[in1, in2, output(1, key_id3)], &keychain, &builder)
.unwrap();
@ -556,7 +630,7 @@ fn tx_build_exchange() {
// blinding factors. He adds his output, finalizes the transaction so it's
// ready for broadcast.
let tx_final = build::transaction(
KernelFeatures::Plain { fee: 2 },
KernelFeatures::Plain { fee: 2.into() },
&[
initial_tx(tx_alice),
with_excess(blind_sum),
@ -567,8 +641,9 @@ fn tx_build_exchange() {
)
.unwrap();
let height = 42; // arbitrary
tx_final
.validate(Weighting::AsTransaction, verifier_cache())
.validate(Weighting::AsTransaction, verifier_cache(), height)
.unwrap();
}
@ -597,9 +672,13 @@ fn reward_with_tx_block() {
let vc = verifier_cache();
let tx1 = tx2i1o();
tx1.validate(Weighting::AsTransaction, vc.clone()).unwrap();
let previous_header = BlockHeader::default();
tx1.validate(
Weighting::AsTransaction,
vc.clone(),
previous_header.height + 1,
)
.unwrap();
let block = new_block(&[tx1], &keychain, &builder, &previous_header, &key_id);
block.validate(&BlindingFactor::zero(), vc.clone()).unwrap();
@ -638,7 +717,7 @@ fn test_block_with_timelocked_tx() {
// block height and that the resulting block is valid
let tx1 = build::transaction(
KernelFeatures::HeightLocked {
fee: 2,
fee: 2.into(),
lock_height: 1,
},
&[input(5, key_id1.clone()), output(3, key_id2.clone())],
@ -662,7 +741,7 @@ fn test_block_with_timelocked_tx() {
// block height
let tx1 = build::transaction(
KernelFeatures::HeightLocked {
fee: 2,
fee: 2.into(),
lock_height: 2,
},
&[input(5, key_id1), output(3, key_id2)],
@ -686,7 +765,8 @@ fn test_block_with_timelocked_tx() {
pub fn test_verify_1i1o_sig() {
test_setup();
let tx = tx1i1o();
tx.validate(Weighting::AsTransaction, verifier_cache())
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, verifier_cache(), height)
.unwrap();
}
@ -694,6 +774,7 @@ pub fn test_verify_1i1o_sig() {
pub fn test_verify_2i1o_sig() {
test_setup();
let tx = tx2i1o();
tx.validate(Weighting::AsTransaction, verifier_cache())
let height = 42; // arbitrary
tx.validate(Weighting::AsTransaction, verifier_cache(), height)
.unwrap();
}

View file

@ -18,10 +18,12 @@ pub mod common;
use crate::common::tx1i10_v2_compatible;
use crate::core::core::transaction::{self, Error};
use crate::core::core::verifier_cache::LruVerifierCache;
use crate::core::core::{KernelFeatures, Output, OutputFeatures, Transaction, Weighting};
use crate::core::core::{
FeeFields, KernelFeatures, Output, OutputFeatures, Transaction, TxKernel, Weighting,
};
use crate::core::global;
use crate::core::libtx::build;
use crate::core::libtx::proof::{self, ProofBuilder};
use crate::core::libtx::{build, tx_fee};
use crate::core::{consensus, ser};
use grin_core as core;
use keychain::{ExtKeychain, Keychain};
@ -94,7 +96,9 @@ fn test_verify_cut_through_plain() -> Result<(), Error> {
let builder = proof::ProofBuilder::new(&keychain);
let mut tx = build::transaction(
KernelFeatures::Plain { fee: 0 },
KernelFeatures::Plain {
fee: FeeFields::zero(),
},
&[
build::input(10, key_id1.clone()),
build::input(10, key_id2.clone()),
@ -110,8 +114,9 @@ fn test_verify_cut_through_plain() -> Result<(), Error> {
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Transaction should fail validation due to cut-through.
let height = 42; // arbitrary
assert_eq!(
tx.validate(Weighting::AsTransaction, verifier_cache.clone()),
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height),
Err(Error::CutThrough),
);
@ -129,7 +134,7 @@ fn test_verify_cut_through_plain() -> Result<(), Error> {
.replace_outputs(outputs);
// Transaction validates successfully after applying cut-through.
tx.validate(Weighting::AsTransaction, verifier_cache.clone())?;
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height)?;
// Transaction validates via lightweight "read" validation as well.
tx.validate_read()?;
@ -153,7 +158,9 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> {
let builder = ProofBuilder::new(&keychain);
let mut tx = build::transaction(
KernelFeatures::Plain { fee: 0 },
KernelFeatures::Plain {
fee: FeeFields::zero(),
},
&[
build::coinbase_input(consensus::REWARD, key_id1.clone()),
build::coinbase_input(consensus::REWARD, key_id2.clone()),
@ -169,8 +176,9 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> {
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
// Transaction should fail validation due to cut-through.
let height = 42; // arbitrary
assert_eq!(
tx.validate(Weighting::AsTransaction, verifier_cache.clone()),
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height),
Err(Error::CutThrough),
);
@ -188,10 +196,65 @@ fn test_verify_cut_through_coinbase() -> Result<(), Error> {
.replace_outputs(outputs);
// Transaction validates successfully after applying cut-through.
tx.validate(Weighting::AsTransaction, verifier_cache.clone())?;
tx.validate(Weighting::AsTransaction, verifier_cache.clone(), height)?;
// Transaction validates via lightweight "read" validation as well.
tx.validate_read()?;
Ok(())
}
// Test coverage for FeeFields
#[test]
fn test_fee_fields() -> Result<(), Error> {
global::set_local_chain_type(global::ChainTypes::UserTesting);
global::set_local_accept_fee_base(500_000);
let keychain = ExtKeychain::from_random_seed(false)?;
let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let builder = ProofBuilder::new(&keychain);
let mut tx = build::transaction(
KernelFeatures::Plain {
fee: FeeFields::new(1, 42).unwrap(),
},
&[
build::coinbase_input(consensus::REWARD, key_id1.clone()),
build::output(60_000_000_000 - 84 - 42 - 21, key_id1.clone()),
],
&keychain,
&builder,
)
.expect("valid tx");
let hf4_height = 4 * consensus::TESTING_HARD_FORK_INTERVAL;
assert_eq!(
tx.accept_fee(hf4_height),
(1 * 1 + 1 * 21 + 1 * 3) * 500_000
);
assert_eq!(tx.fee(hf4_height), 42);
assert_eq!(tx.fee(hf4_height), 42);
assert_eq!(tx.shifted_fee(hf4_height), 21);
assert_eq!(
tx.accept_fee(hf4_height - 1),
(1 * 4 + 1 * 1 - 1 * 1) * 1_000_000
);
assert_eq!(tx.fee(hf4_height - 1), 42 + (1u64 << 40));
assert_eq!(tx.shifted_fee(hf4_height - 1), 42 + (1u64 << 40));
tx.body.kernels.append(&mut vec![
TxKernel::with_features(KernelFeatures::Plain {
fee: FeeFields::new(2, 84).unwrap(),
}),
TxKernel::with_features(KernelFeatures::Plain { fee: 21.into() }),
]);
assert_eq!(tx.fee(hf4_height), 147);
assert_eq!(tx.shifted_fee(hf4_height), 36);
assert_eq!(tx.aggregate_fee_fields(hf4_height), FeeFields::new(2, 147));
assert_eq!(tx_fee(1, 1, 3), 15_500_000);
Ok(())
}

View file

@ -87,7 +87,7 @@ enum_from_primitive! {
/// Max theoretical size of a block filled with outputs.
fn max_block_size() -> u64 {
(global::max_block_weight() / consensus::BLOCK_OUTPUT_WEIGHT * 708) as u64
(global::max_block_weight() / consensus::OUTPUT_WEIGHT * 708) as u64
}
// Max msg size when msg type is unknown.

View file

@ -22,7 +22,7 @@ use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
use self::core::core::{Block, BlockHeader, BlockSums, KernelFeatures, Transaction};
use self::core::genesis;
use self::core::global;
use self::core::libtx::{build, reward, ProofBuilder};
use self::core::libtx::{build, reward, ProofBuilder, DEFAULT_BASE_FEE};
use self::core::pow;
use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use self::pool::types::*;
@ -262,7 +262,7 @@ impl PoolFuzzer {
{
TransactionPool::new(
PoolConfig {
accept_fee_base: 0,
accept_fee_base: DEFAULT_BASE_FEE,
max_pool_size: 50,
max_stempool_size: 50,
mineable_max_weight: 10_000,

View file

@ -128,13 +128,13 @@ where
// * maintain dependency ordering
// * maximize cut-through
// * maximize overall fees
let header = self.blockchain.chain_head()?;
let txs = self.bucket_transactions(weighting);
// Iteratively apply the txs to the current chain state,
// rejecting any that do not result in a valid state.
// Verify these txs produce an aggregated tx below max_weight.
// Return a vec of all the valid txs.
let header = self.blockchain.chain_head()?;
let valid_txs = self.validate_raw_txs(&txs, None, &header, weighting)?;
Ok(valid_txs)
}
@ -161,7 +161,12 @@ where
let tx = transaction::aggregate(&txs)?;
// Validate the single aggregate transaction "as pool", not subject to tx weight limits.
tx.validate(Weighting::NoLimit, self.verifier_cache.clone())?;
let header = self.blockchain.chain_head()?;
tx.validate(
Weighting::NoLimit,
self.verifier_cache.clone(),
header.height,
)?;
Ok(Some(tx))
}
@ -229,7 +234,7 @@ where
) -> Result<BlockSums, PoolError> {
// Validate the tx, conditionally checking against weight limits,
// based on weight verification type.
tx.validate(weighting, self.verifier_cache.clone())?;
tx.validate(weighting, self.verifier_cache.clone(), header.height)?;
// Validate the tx against current chain state.
// Check all inputs are in the current UTXO set.
@ -304,7 +309,7 @@ where
tx: &Transaction,
header: &BlockHeader,
) -> Result<BlockSums, PoolError> {
let overage = tx.overage();
let overage = tx.overage(header.height);
let offset = {
let secp = static_secp_instance();
@ -340,19 +345,19 @@ where
// Use our bucket logic to identify the best transaction for eviction and evict it.
// We want to avoid evicting a transaction where another transaction depends on it.
// We want to evict a transaction with low fee_to_weight.
// We want to evict a transaction with low fee_rate.
pub fn evict_transaction(&mut self) {
if let Some(evictable_transaction) = self.bucket_transactions(Weighting::NoLimit).last() {
self.entries.retain(|x| x.tx != *evictable_transaction);
};
}
/// Buckets consist of a vec of txs and track the aggregate fee_to_weight.
/// Buckets consist of a vec of txs and track the aggregate fee_rate.
/// We aggregate (cut-through) dependent transactions within a bucket *unless* adding a tx
/// would reduce the aggregate fee_to_weight, in which case we start a new bucket.
/// Note this new bucket will by definition have a lower fee_to_weight than the bucket
/// would reduce the aggregate fee_rate, in which case we start a new bucket.
/// Note this new bucket will by definition have a lower fee_rate than the bucket
/// containing the tx it depends on.
/// Sorting the buckets by fee_to_weight will therefore preserve dependency ordering,
/// Sorting the buckets by fee_rate will therefore preserve dependency ordering,
/// maximizing both cut-through and overall fees.
fn bucket_transactions(&self, weighting: Weighting) -> Vec<Transaction> {
let mut tx_buckets: Vec<Bucket> = Vec::new();
@ -394,13 +399,14 @@ where
continue;
}
let height = self.blockchain.chain_head().map(|x| x.height).unwrap_or(0);
match insert_pos {
None => {
// No parent tx, just add to the end in its own bucket.
// This is the common case for non 0-conf txs in the txpool.
// We assume the tx is valid here as we validated it on the way into the txpool.
insert_pos = Some(tx_buckets.len());
tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len()));
tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len(), height));
}
Some(pos) => {
// We found a single parent tx, so aggregate in the bucket
@ -412,15 +418,20 @@ where
entry.tx.clone(),
weighting,
self.verifier_cache.clone(),
height,
) {
if new_bucket.fee_to_weight >= bucket.fee_to_weight {
// Only aggregate if it would not reduce the fee_to_weight ratio.
if new_bucket.fee_rate >= bucket.fee_rate {
// Only aggregate if it would not reduce the fee_rate ratio.
tx_buckets[pos] = new_bucket;
} else {
// Otherwise put it in its own bucket at the end.
// Note: This bucket will have a lower fee_to_weight
// Note: This bucket will have a lower fee_rate
// than the bucket it depends on.
tx_buckets.push(Bucket::new(entry.tx.clone(), tx_buckets.len()));
tx_buckets.push(Bucket::new(
entry.tx.clone(),
tx_buckets.len(),
height,
));
}
} else {
// Aggregation failed so discard this new tx.
@ -442,11 +453,11 @@ where
}
}
// Sort buckets by fee_to_weight (descending) and age (oldest first).
// Txs with highest fee_to_weight will be prioritied.
// Aggregation that increases the fee_to_weight of a bucket will prioritize the bucket.
// Sort buckets by fee_rate (descending) and age (oldest first).
// Txs with highest fee_rate will be prioritied.
// Aggregation that increases the fee_rate of a bucket will prioritize the bucket.
// Oldest (based on pool insertion time) will then be prioritized.
tx_buckets.sort_unstable_by_key(|x| (Reverse(x.fee_to_weight), x.age_idx));
tx_buckets.sort_unstable_by_key(|x| (Reverse(x.fee_rate), x.age_idx));
tx_buckets.into_iter().flat_map(|x| x.raw_txs).collect()
}
@ -504,18 +515,18 @@ where
struct Bucket {
raw_txs: Vec<Transaction>,
fee_to_weight: u64,
fee_rate: u64,
age_idx: usize,
}
impl Bucket {
/// Construct a new bucket with the given tx.
/// also specifies an "age_idx" so we can sort buckets by age
/// as well as fee_to_weight. Txs are maintainedin the pool in insert order
/// as well as fee_rate. Txs are maintained in the pool in insert order
/// so buckets with low age_idx contain oldest txs.
fn new(tx: Transaction, age_idx: usize) -> Bucket {
fn new(tx: Transaction, age_idx: usize, height: u64) -> Bucket {
Bucket {
fee_to_weight: tx.fee_to_weight(),
fee_rate: tx.fee_rate(height),
raw_txs: vec![tx],
age_idx,
}
@ -526,13 +537,14 @@ impl Bucket {
new_tx: Transaction,
weighting: Weighting,
verifier_cache: Arc<RwLock<dyn VerifierCache>>,
height: u64,
) -> Result<Bucket, PoolError> {
let mut raw_txs = self.raw_txs.clone();
raw_txs.push(new_tx);
let agg_tx = transaction::aggregate(&raw_txs)?;
agg_tx.validate(weighting, verifier_cache)?;
agg_tx.validate(weighting, verifier_cache, height)?;
Ok(Bucket {
fee_to_weight: agg_tx.fee_to_weight(),
fee_rate: agg_tx.fee_rate(height),
raw_txs: raw_txs,
age_idx: self.age_idx,
})

View file

@ -182,7 +182,7 @@ where
// NRD kernels only valid post HF3 and if NRD feature enabled.
self.verify_kernel_variants(tx, header)?;
// Do we have the capacity to accept this transaction?
// Does this transaction pay the required fees and fit within the pool capacity?
let acceptability = self.is_acceptable(tx, stem);
let mut evict = false;
if !stem && acceptability.as_ref().err() == Some(&PoolError::OverCapacity) {
@ -193,8 +193,12 @@ where
// Make sure the transaction is valid before anything else.
// Validate tx accounting for max tx weight.
tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())
.map_err(PoolError::InvalidTx)?;
tx.validate(
Weighting::AsTransaction,
self.verifier_cache.clone(),
header.height,
)
.map_err(PoolError::InvalidTx)?;
// Check the tx lock_time is valid based on current chain state.
self.blockchain.verify_tx_lock_height(tx)?;
@ -274,14 +278,19 @@ where
};
// Validate the tx to ensure our converted inputs are correct.
tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?;
let header = self.chain_head()?;
tx.validate(
Weighting::AsTransaction,
self.verifier_cache.clone(),
header.height,
)?;
Ok(PoolEntry::new(tx, entry.src))
}
// Evict a transaction from the txpool.
// Uses bucket logic to identify the "last" transaction.
// No other tx depends on it and it has low fee_to_weight.
// No other tx depends on it and it has low fee_rate
pub fn evict_from_txpool(&mut self) {
self.txpool.evict_transaction()
}
@ -362,14 +371,12 @@ where
return Err(PoolError::OverCapacity);
}
// for a basic transaction (1 input, 2 outputs) -
// (-1 * 1) + (4 * 2) + 1 = 8
// 8 * 10 = 80
if self.config.accept_fee_base > 0 {
let threshold = (tx.tx_weight() as u64) * self.config.accept_fee_base;
if tx.fee() < threshold {
return Err(PoolError::LowFeeTransaction(threshold));
}
// weight for a basic transaction (2 inputs, 2 outputs, 1 kernel) -
// (2 * 1) + (2 * 21) + (1 * 3) = 47
// minfees = 47 * 500_000 = 23_500_000
let header = self.chain_head()?;
if tx.shifted_fee(header.height) < tx.accept_fee(header.height) {
return Err(PoolError::LowFeeTransaction(tx.shifted_fee(header.height)));
}
Ok(())
}

View file

@ -21,6 +21,7 @@ use self::core::core::committed;
use self::core::core::hash::Hash;
use self::core::core::transaction::{self, Transaction};
use self::core::core::{BlockHeader, BlockSums, Inputs, OutputIdentifier};
use self::core::global::DEFAULT_ACCEPT_FEE_BASE;
use chrono::prelude::*;
use failure::Fail;
use grin_core as core;
@ -139,8 +140,9 @@ impl Default for PoolConfig {
}
}
fn default_accept_fee_base() -> u64 {
consensus::MILLI_GRIN
/// make output (of weight 21) cost about 1 Grin-cent by default, keeping a round number
pub fn default_accept_fee_base() -> u64 {
DEFAULT_ACCEPT_FEE_BASE
}
fn default_reorg_cache_period() -> u32 {
30

View file

@ -31,6 +31,7 @@ use std::sync::Arc;
fn test_transaction_pool_block_building() -> Result<(), PoolError> {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_accept_fee_base(1);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = "target/.block_building";
@ -48,25 +49,27 @@ fn test_transaction_pool_block_building() -> Result<(), PoolError> {
verifier_cache,
);
add_some_blocks(&chain, 3, &keychain);
// mine enough blocks to get past HF4
add_some_blocks(&chain, 4 * 3, &keychain);
let header_1 = chain.get_header_by_height(1).unwrap();
// Now create tx to spend an early coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header_1, vec![100, 200, 300, 400]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, &[initial_tx], &keychain);
let header = chain.head_header().unwrap();
let root_tx_1 = test_transaction(&keychain, vec![10, 20], vec![24]);
let root_tx_2 = test_transaction(&keychain, vec![30], vec![28]);
let root_tx_3 = test_transaction(&keychain, vec![40], vec![38]);
let root_tx_1 = test_transaction(&keychain, vec![100, 200], vec![240]);
let root_tx_2 = test_transaction(&keychain, vec![300], vec![270]);
let root_tx_3 = test_transaction(&keychain, vec![400], vec![370]);
let child_tx_1 = test_transaction(&keychain, vec![24], vec![22]);
let child_tx_2 = test_transaction(&keychain, vec![38], vec![32]);
let child_tx_1 = test_transaction(&keychain, vec![240], vec![210]);
let child_tx_2 = test_transaction(&keychain, vec![370], vec![320]);
{
// Add the three root txs to the pool.

View file

@ -30,6 +30,7 @@ use std::sync::Arc;
fn test_block_building_max_weight() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_accept_fee_base(1);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
@ -48,14 +49,18 @@ fn test_block_building_max_weight() {
verifier_cache,
);
add_some_blocks(&chain, 3, &keychain);
// mine past HF4 to see effect of set_local_accept_fee_base
add_some_blocks(&chain, 4 * 3, &keychain);
let header_1 = chain.get_header_by_height(1).unwrap();
// Now create tx to spend an early coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header_1, vec![100, 200, 300, 1000]);
let initial_tx = test_transaction_spending_coinbase(
&keychain,
&header_1,
vec![1_000_000, 2_000_000, 3_000_000, 10_000_000],
);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, &[initial_tx], &keychain);
@ -65,26 +70,32 @@ fn test_block_building_max_weight() {
// Build some dependent txs to add to the txpool.
// We will build a block from a subset of these.
let txs = vec![
test_transaction(&keychain, vec![1000], vec![390, 130, 120, 110]),
test_transaction(&keychain, vec![100], vec![90, 1]),
test_transaction(&keychain, vec![90], vec![80, 2]),
test_transaction(&keychain, vec![200], vec![199]),
test_transaction(&keychain, vec![300], vec![290, 3]),
test_transaction(&keychain, vec![290], vec![280, 4]),
test_transaction(
&keychain,
vec![10_000_000],
vec![3_900_000, 1_300_000, 1_200_000, 1_100_000],
),
test_transaction(&keychain, vec![1_000_000], vec![900_000, 10_000]),
test_transaction(&keychain, vec![900_000], vec![800_000, 20_000]),
test_transaction(&keychain, vec![2_000_000], vec![1_970_000]),
test_transaction(&keychain, vec![3_000_000], vec![2_900_000, 30_000]),
test_transaction(&keychain, vec![2_900_000], vec![2_800_000, 40_000]),
];
// Fees and weights of our original txs in insert order.
assert_eq!(
txs.iter().map(|x| x.fee()).collect::<Vec<_>>(),
[250, 9, 8, 1, 7, 6]
txs.iter().map(|x| x.fee(header.height)).collect::<Vec<_>>(),
[2_500_000, 90_000, 80_000, 30_000, 70_000, 60_000]
);
assert_eq!(
txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(),
[16, 8, 8, 4, 8, 8]
txs.iter().map(|x| x.weight()).collect::<Vec<_>>(),
[88, 46, 46, 25, 46, 46]
);
assert_eq!(
txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(),
[15625, 1125, 1000, 250, 875, 750]
txs.iter()
.map(|x| x.fee_rate(header.height))
.collect::<Vec<_>>(),
[28409, 1956, 1739, 1200, 1521, 1304]
);
// Populate our txpool with the txs.
@ -101,16 +112,18 @@ fn test_block_building_max_weight() {
// Fees and weights of the "mineable" txs.
assert_eq!(
txs.iter().map(|x| x.fee()).collect::<Vec<_>>(),
[250, 9, 8, 7]
txs.iter().map(|x| x.fee(header.height)).collect::<Vec<_>>(),
[2_500_000, 90_000, 80_000, 70_000]
);
assert_eq!(
txs.iter().map(|x| x.tx_weight()).collect::<Vec<_>>(),
[16, 8, 8, 8]
txs.iter().map(|x| x.weight()).collect::<Vec<_>>(),
[88, 46, 46, 46]
);
assert_eq!(
txs.iter().map(|x| x.fee_to_weight()).collect::<Vec<_>>(),
[15625, 1125, 1000, 875]
txs.iter()
.map(|x| x.fee_rate(header.height))
.collect::<Vec<_>>(),
[28409, 1956, 1739, 1521]
);
add_block(&chain, &txs, &keychain);

View file

@ -30,6 +30,7 @@ use std::sync::Arc;
fn test_transaction_pool_block_reconciliation() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_accept_fee_base(1);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = "target/.block_reconciliation";
@ -47,13 +48,15 @@ fn test_transaction_pool_block_reconciliation() {
verifier_cache,
);
add_some_blocks(&chain, 3, &keychain);
// mine past HF4 to see effect of set_local_accept_fee_base
add_some_blocks(&chain, 4 * 3, &keychain);
let header_1 = chain.get_header_by_height(1).unwrap();
// Now create tx to spend an early coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header_1, vec![1_000, 2_000, 3_000, 4_000]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, &[initial_tx], &keychain);
@ -66,34 +69,34 @@ fn test_transaction_pool_block_reconciliation() {
// 2. A transaction that should be invalidated because the input is
// consumed in the block, although it is not exactly consumed.
// 3. A transaction that should remain after block reconciliation.
let block_transaction = test_transaction(&keychain, vec![10], vec![8]);
let conflict_transaction = test_transaction(&keychain, vec![20], vec![12, 6]);
let valid_transaction = test_transaction(&keychain, vec![30], vec![13, 15]);
let block_transaction = test_transaction(&keychain, vec![1_000], vec![800]);
let conflict_transaction = test_transaction(&keychain, vec![2_000], vec![1_200, 600]);
let valid_transaction = test_transaction(&keychain, vec![3_000], vec![1_300, 1_500]);
// We will also introduce a few children:
// 4. A transaction that descends from transaction 1, that is in
// turn exactly contained in the block.
let block_child = test_transaction(&keychain, vec![8], vec![5, 1]);
let block_child = test_transaction(&keychain, vec![800], vec![500, 100]);
// 5. A transaction that descends from transaction 4, that is not
// contained in the block at all and should be valid after
// reconciliation.
let pool_child = test_transaction(&keychain, vec![5], vec![3]);
let pool_child = test_transaction(&keychain, vec![500], vec![300]);
// 6. A transaction that descends from transaction 2 that does not
// conflict with anything in the block in any way, but should be
// invalidated (orphaned).
let conflict_child = test_transaction(&keychain, vec![12], vec![2]);
let conflict_child = test_transaction(&keychain, vec![1_200], vec![200]);
// 7. A transaction that descends from transaction 2 that should be
// valid due to its inputs being satisfied by the block.
let conflict_valid_child = test_transaction(&keychain, vec![6], vec![4]);
let conflict_valid_child = test_transaction(&keychain, vec![600], vec![400]);
// 8. A transaction that descends from transaction 3 that should be
// invalidated due to an output conflict.
let valid_child_conflict = test_transaction(&keychain, vec![13], vec![9]);
let valid_child_conflict = test_transaction(&keychain, vec![1_300], vec![900]);
// 9. A transaction that descends from transaction 3 that should remain
// valid after reconciliation.
let valid_child_valid = test_transaction(&keychain, vec![15], vec![11]);
let valid_child_valid = test_transaction(&keychain, vec![1_500], vec![1_100]);
// 10. A transaction that descends from both transaction 6 and
// transaction 9
let mixed_child = test_transaction(&keychain, vec![2, 11], vec![7]);
let mixed_child = test_transaction(&keychain, vec![200, 1_100], vec![700]);
let txs_to_add = vec![
block_transaction,
@ -122,13 +125,13 @@ fn test_transaction_pool_block_reconciliation() {
// Now we prepare the block that will cause the above conditions to be met.
// First, the transactions we want in the block:
// - Copy of 1
let block_tx_1 = test_transaction(&keychain, vec![10], vec![8]);
let block_tx_1 = test_transaction(&keychain, vec![1_000], vec![800]);
// - Conflict w/ 2, satisfies 7
let block_tx_2 = test_transaction(&keychain, vec![20], vec![6]);
let block_tx_2 = test_transaction(&keychain, vec![2_000], vec![600]);
// - Copy of 4
let block_tx_3 = test_transaction(&keychain, vec![8], vec![5, 1]);
let block_tx_3 = test_transaction(&keychain, vec![800], vec![500, 100]);
// - Output conflict w/ 8
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
let block_tx_4 = test_transaction(&keychain, vec![4_000], vec![900, 2_900]);
let block_txs = &[block_tx_1, block_tx_2, block_tx_3, block_tx_4];
add_block(&chain, block_txs, &keychain);

View file

@ -31,6 +31,7 @@ use std::sync::Arc;
fn test_coinbase_maturity() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_accept_fee_base(50_000_000);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = "target/.coinbase_maturity";

View file

@ -36,6 +36,7 @@ use grin_core as core;
use grin_keychain as keychain;
use grin_pool as pool;
use grin_util as util;
use std::convert::TryInto;
use std::fs;
use std::sync::Arc;
@ -79,7 +80,7 @@ where
let prev = chain.head_header().unwrap();
let height = prev.height + 1;
let next_header_info = consensus::next_difficulty(height, chain.difficulty_iter().unwrap());
let fee = txs.iter().map(|x| x.fee()).sum();
let fee = txs.iter().map(|x| x.fee(height)).sum();
let key_id = ExtKeychainPath::new(1, height as u32, 0, 0, 0).to_identifier();
let reward =
reward::output(keychain, &ProofBuilder::new(keychain), &key_id, fee, false).unwrap();
@ -166,7 +167,7 @@ where
{
TransactionPool::new(
PoolConfig {
accept_fee_base: 0,
accept_fee_base: default_accept_fee_base(),
reorg_cache_period: 30,
max_pool_size: 50,
max_stempool_size: 50,
@ -207,7 +208,9 @@ where
}
build::transaction(
KernelFeatures::Plain { fee: fees as u64 },
KernelFeatures::Plain {
fee: (fees as u64).try_into().unwrap(),
},
&tx_elements,
keychain,
&ProofBuilder::new(keychain),
@ -232,7 +235,9 @@ where
keychain,
input_values,
output_values,
KernelFeatures::Plain { fee: fees as u64 },
KernelFeatures::Plain {
fee: (fees as u64).try_into().unwrap(),
},
)
}

View file

@ -34,6 +34,7 @@ use std::sync::Arc;
fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_accept_fee_base(10);
global::set_local_nrd_enabled(true);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
@ -59,21 +60,23 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
// Now create tx to spend an early coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
let initial_tx =
test_transaction_spending_coinbase(&keychain, &header_1, vec![1_000, 2_000, 3_000, 4_000]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, &[initial_tx], &keychain);
add_some_blocks(&chain, 5, &keychain);
// mine past HF4 to see effect of set_local_accept_fee_base
add_some_blocks(&chain, 8, &keychain);
let header = chain.head_header().unwrap();
assert_eq!(header.height, 3 * consensus::TESTING_HARD_FORK_INTERVAL);
assert_eq!(header.version, HeaderVersion(4));
assert_eq!(header.height, 4 * consensus::TESTING_HARD_FORK_INTERVAL);
assert_eq!(header.version, HeaderVersion(5));
let (tx1, tx2, tx3) = {
let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
fee: 6,
fee: 600.into(),
relative_height: NRDRelativeHeight::new(2)?,
});
let msg = kernel.msg_to_sign().unwrap();
@ -95,23 +98,23 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
let tx1 = test_transaction_with_kernel(
&keychain,
vec![10, 20],
vec![24],
vec![1_000, 2_000],
vec![2_400],
kernel.clone(),
excess.clone(),
);
let tx2 = test_transaction_with_kernel(
&keychain,
vec![24],
vec![18],
vec![2_400],
vec![1_800],
kernel2.clone(),
excess.clone(),
);
// Now reuse kernel excess for tx3 but with NRD relative_height=1 (and different fee).
let mut kernel_short = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
fee: 3,
fee: 300.into(),
relative_height: NRDRelativeHeight::new(1)?,
});
let msg_short = kernel_short.msg_to_sign().unwrap();
@ -123,8 +126,8 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
let tx3 = test_transaction_with_kernel(
&keychain,
vec![18],
vec![15],
vec![1_800],
vec![1_500],
kernel_short.clone(),
excess.clone(),
);

View file

@ -56,15 +56,16 @@ fn test_nrd_kernels_disabled() {
// Spend the initial coinbase.
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
let tx =
test_transaction_spending_coinbase(&keychain, &header_1, vec![1_000, 2_000, 3_000, 4_000]);
add_block(&chain, &[tx], &keychain);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
vec![1_000, 2_000],
vec![2_400],
KernelFeatures::NoRecentDuplicate {
fee: 6,
fee: 600.into(),
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);

View file

@ -32,6 +32,7 @@ use std::sync::Arc;
fn test_nrd_kernels_enabled() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_accept_fee_base(10);
global::set_local_nrd_enabled(true);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
@ -56,15 +57,20 @@ fn test_nrd_kernels_enabled() {
// Spend the initial coinbase.
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
let mg = consensus::MILLI_GRIN;
let tx = test_transaction_spending_coinbase(
&keychain,
&header_1,
vec![1_000 * mg, 2_000 * mg, 3_000 * mg, 4_000 * mg],
);
add_block(&chain, &[tx], &keychain);
let tx_1 = test_transaction_with_kernel_features(
&keychain,
vec![10, 20],
vec![24],
vec![1_000 * mg, 2_000 * mg],
vec![2_400 * mg],
KernelFeatures::NoRecentDuplicate {
fee: 6,
fee: (600 * mg as u32).into(),
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
);
@ -83,7 +89,7 @@ fn test_nrd_kernels_enabled() {
assert_eq!(header.height, 3 * consensus::TESTING_HARD_FORK_INTERVAL);
assert_eq!(header.version, HeaderVersion(4));
// NRD kernel support not enabled via feature flag, so not valid.
// NRD kernel support enabled via feature flag, so valid.
assert_eq!(
pool.add_to_pool(test_source(), tx_1.clone(), false, &header),
Ok(())

View file

@ -32,6 +32,7 @@ use std::sync::Arc;
fn test_the_transaction_pool() {
util::init_test_logger();
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
global::set_local_accept_fee_base(1);
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
let db_root = "target/.transaction_pool";
@ -49,7 +50,8 @@ fn test_the_transaction_pool() {
verifier_cache.clone(),
);
add_some_blocks(&chain, 3, &keychain);
// mine past HF4 to see effect of set_local_accept_fee_base
add_some_blocks(&chain, 4 * 3, &keychain);
let header = chain.head_header().unwrap();
let header_1 = chain.get_header_by_height(1).unwrap();
@ -74,9 +76,9 @@ fn test_the_transaction_pool() {
}
// tx1 spends some outputs from the initial test tx.
let tx1 = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let tx1 = test_transaction(&keychain, vec![500, 600], vec![469, 569]);
// tx2 spends some outputs from both tx1 and the initial test tx.
let tx2 = test_transaction(&keychain, vec![499, 700], vec![498]);
let tx2 = test_transaction(&keychain, vec![469, 700], vec![498]);
{
// Check we have a single initial tx in the pool.
@ -105,7 +107,7 @@ fn test_the_transaction_pool() {
// Test adding a duplicate tx with the same input and outputs.
// Note: not the *same* tx, just same underlying inputs/outputs.
{
let tx1a = test_transaction(&keychain, vec![500, 600], vec![499, 599]);
let tx1a = test_transaction(&keychain, vec![500, 600], vec![469, 569]);
assert!(pool
.add_to_pool(test_source(), tx1a, false, &header)
.is_err());
@ -113,7 +115,7 @@ fn test_the_transaction_pool() {
// Test adding a tx attempting to spend a non-existent output.
{
let bad_tx = test_transaction(&keychain, vec![10_001], vec![10_000]);
let bad_tx = test_transaction(&keychain, vec![10_001], vec![9_900]);
assert!(pool
.add_to_pool(test_source(), bad_tx, false, &header)
.is_err());
@ -130,7 +132,7 @@ fn test_the_transaction_pool() {
// Confirm the tx pool correctly identifies an invalid tx (already spent).
{
let tx3 = test_transaction(&keychain, vec![500], vec![497]);
let tx3 = test_transaction(&keychain, vec![500], vec![467]);
assert!(pool
.add_to_pool(test_source(), tx3, false, &header)
.is_err());
@ -139,9 +141,9 @@ fn test_the_transaction_pool() {
// Now add a couple of txs to the stempool (stem = true).
{
let tx = test_transaction(&keychain, vec![599], vec![598]);
let tx = test_transaction(&keychain, vec![569], vec![538]);
pool.add_to_pool(test_source(), tx, true, &header).unwrap();
let tx2 = test_transaction(&keychain, vec![598], vec![597]);
let tx2 = test_transaction(&keychain, vec![538], vec![507]);
pool.add_to_pool(test_source(), tx2, true, &header).unwrap();
assert_eq!(pool.total_size(), 3);
assert_eq!(pool.stempool.size(), 2);
@ -165,7 +167,7 @@ fn test_the_transaction_pool() {
// Adding a duplicate tx to the stempool will result in it being fluffed.
// This handles the case of the stem path having a cycle in it.
{
let tx = test_transaction(&keychain, vec![597], vec![596]);
let tx = test_transaction(&keychain, vec![507], vec![476]);
pool.add_to_pool(test_source(), tx.clone(), true, &header)
.unwrap();
assert_eq!(pool.total_size(), 4);
@ -185,14 +187,15 @@ fn test_the_transaction_pool() {
// We will do this be adding a new tx to the pool
// that is a superset of a tx already in the pool.
{
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
let tx4 = test_transaction(&keychain, vec![800], vec![769]);
// tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about
let agg_tx = transaction::aggregate(&[tx1.clone(), tx2.clone(), tx4]).unwrap();
let height = 12 + 1;
agg_tx
.validate(Weighting::AsTransaction, verifier_cache.clone())
.validate(Weighting::AsTransaction, verifier_cache.clone(), height)
.unwrap();
pool.add_to_pool(test_source(), agg_tx, false, &header)

View file

@ -150,6 +150,7 @@ fn process_fluff_phase(
agg_tx.validate(
transaction::Weighting::AsTransaction,
verifier_cache.clone(),
header.height,
)?;
tx_pool.add_to_pool(TxSource::Fluff, agg_tx, false, &header)?;

View file

@ -166,7 +166,7 @@ fn build_block(
};
// build the coinbase and the block itself
let fees = txs.iter().map(|tx| tx.fee()).sum();
let fees = txs.iter().map(|tx| tx.fee(head.height)).sum();
let height = head.height + 1;
let block_fees = BlockFees {
fees,

View file

@ -146,7 +146,7 @@ fn real_main() -> i32 {
log_build_info();
// Initialize our global chain_type, feature flags (NRD kernel support currently), and future_time_limit.
// Initialize our global chain_type, feature flags (NRD kernel support currently), accept_fee_base, and future_time_limit.
// These are read via global and not read from config beyond this point.
global::init_global_chain_type(config.members.as_ref().unwrap().server.chain_type);
info!("Chain: {:?}", global::get_chain_type());
@ -160,6 +160,16 @@ fn real_main() -> i32 {
global::init_global_nrd_enabled(true);
}
}
global::init_global_accept_fee_base(
config
.members
.as_ref()
.unwrap()
.server
.pool_config
.accept_fee_base,
);
info!("Accept Fee Base: {:?}", global::get_accept_fee_base());
global::init_global_future_time_limit(config.members.unwrap().server.future_time_limit);
info!("Future Time Limit: {:?}", global::get_future_time_limit());
log_feature_flags();