mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
Introduce CommitOnly variant of Inputs (#3419)
* Introduce CommitOnly variant of Inputs. Introduce CommitWrapper so we can sort commit only inputs correctly. * rememebr to resort if converting * write inputs based on variant and protocol version * read and write protocol version specific inputs * store full blocks in local db in v3 convert to v2 when relaying to v2 peers * add debug version_str for inputs * no assumptions about spent index sort order * add additional version debug logs * fix ser/deser tests for proto v3 * cleanup coinbase maturity * rework pool to better handle v2 conversion robustly * cleanup txpool add_to_pool * fix nrd kernel test * move init conversion earlier * cleanup * cleanup based on PR feedback
This commit is contained in:
parent
133089e985
commit
7dc94576bd
27 changed files with 593 additions and 318 deletions
|
@ -34,7 +34,7 @@ use crate::types::{
|
|||
BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus,
|
||||
};
|
||||
use crate::util::secp::pedersen::{Commitment, RangeProof};
|
||||
use crate::util::RwLock;
|
||||
use crate::{util::RwLock, ChainStore};
|
||||
use grin_store::Error::NotFoundErr;
|
||||
use std::collections::HashMap;
|
||||
use std::fs::{self, File};
|
||||
|
@ -171,6 +171,10 @@ impl Chain {
|
|||
) -> Result<Chain, Error> {
|
||||
let store = Arc::new(store::ChainStore::new(&db_root)?);
|
||||
|
||||
// DB migrations to be run prior to the chain being used.
|
||||
// Migrate full blocks to protocol version v3.
|
||||
Chain::migrate_db_v2_v3(&store)?;
|
||||
|
||||
// open the txhashset, creating a new one if necessary
|
||||
let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone(), None)?;
|
||||
|
||||
|
@ -218,12 +222,6 @@ impl Chain {
|
|||
genesis: genesis.header,
|
||||
};
|
||||
|
||||
// DB migrations to be run prior to the chain being used.
|
||||
{
|
||||
// Migrate full blocks to protocol version v2.
|
||||
chain.migrate_db_v1_v2()?;
|
||||
}
|
||||
|
||||
chain.log_heads()?;
|
||||
|
||||
Ok(chain)
|
||||
|
@ -275,11 +273,12 @@ impl Chain {
|
|||
/// We also need to support relaying blocks with FeaturesAndCommit inputs to peers.
|
||||
/// So we need a way to convert blocks from CommitOnly to FeaturesAndCommit.
|
||||
/// Validating the inputs against the utxo_view allows us to look the outputs up.
|
||||
fn convert_block_v2(&self, block: Block) -> Result<Block, Error> {
|
||||
pub fn convert_block_v2(&self, block: Block) -> Result<Block, Error> {
|
||||
debug!(
|
||||
"convert_block_v2: {} at {}",
|
||||
"convert_block_v2: {} at {} ({} -> v2)",
|
||||
block.header.hash(),
|
||||
block.header.height
|
||||
block.header.height,
|
||||
block.inputs().version_str(),
|
||||
);
|
||||
|
||||
if block.inputs().is_empty() {
|
||||
|
@ -291,18 +290,19 @@ impl Chain {
|
|||
|
||||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut txhashset = self.txhashset.write();
|
||||
let outputs =
|
||||
let inputs: Vec<_> =
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
|
||||
let previous_header = batch.get_previous_header(&block.header)?;
|
||||
pipe::rewind_and_apply_fork(&previous_header, ext, batch)?;
|
||||
ext.extension
|
||||
.utxo_view(ext.header_extension)
|
||||
.validate_inputs(block.inputs(), batch)
|
||||
.validate_inputs(&block.inputs(), batch)
|
||||
.map(|outputs| outputs.into_iter().map(|(out, _)| out).collect())
|
||||
})?;
|
||||
let outputs: Vec<_> = outputs.into_iter().map(|(out, _)| out).collect();
|
||||
let inputs = inputs.as_slice().into();
|
||||
Ok(Block {
|
||||
header: block.header,
|
||||
body: block.body.replace_inputs(outputs.into()),
|
||||
body: block.body.replace_inputs(inputs),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -397,8 +397,9 @@ impl Chain {
|
|||
// Only do this once we know the header PoW is valid.
|
||||
self.check_orphan(&b, opts)?;
|
||||
|
||||
// Convert block to FeaturesAndCommit inputs.
|
||||
// We know this block is not an orphan and header is valid at this point.
|
||||
// We can only reliably convert to "v2" if not an orphan (may spend output from previous block).
|
||||
// We convert from "v3" to "v2" by looking up outputs to be spent.
|
||||
// This conversion also ensures a block received in "v2" has valid input features (prevents malleability).
|
||||
let b = self.convert_block_v2(b)?;
|
||||
|
||||
let (maybe_new_head, prev_head) = {
|
||||
|
@ -646,7 +647,7 @@ impl Chain {
|
|||
/// that would be spent by the inputs.
|
||||
pub fn validate_inputs(
|
||||
&self,
|
||||
inputs: Inputs,
|
||||
inputs: &Inputs,
|
||||
) -> Result<Vec<(OutputIdentifier, CommitPos)>, Error> {
|
||||
let header_pmmr = self.header_pmmr.read();
|
||||
let txhashset = self.txhashset.read();
|
||||
|
@ -662,12 +663,12 @@ impl Chain {
|
|||
|
||||
/// Verify we are not attempting to spend a coinbase output
|
||||
/// that has not yet sufficiently matured.
|
||||
pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> {
|
||||
pub fn verify_coinbase_maturity(&self, inputs: &Inputs) -> Result<(), Error> {
|
||||
let height = self.next_block_height()?;
|
||||
let header_pmmr = self.header_pmmr.read();
|
||||
let txhashset = self.txhashset.read();
|
||||
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
|
||||
utxo.verify_coinbase_maturity(&tx.inputs(), height, batch)?;
|
||||
utxo.verify_coinbase_maturity(inputs, height, batch)?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
@ -1403,14 +1404,13 @@ impl Chain {
|
|||
self.header_pmmr.read().get_header_hash_by_height(height)
|
||||
}
|
||||
|
||||
/// Migrate our local db from v1 to v2.
|
||||
/// This covers blocks which themselves contain transactions.
|
||||
/// Transaction kernels changed in v2 due to "variable size kernels".
|
||||
fn migrate_db_v1_v2(&self) -> Result<(), Error> {
|
||||
let store_v1 = self.store.with_version(ProtocolVersion(1));
|
||||
let batch = store_v1.batch()?;
|
||||
/// Migrate our local db from v2 to v3.
|
||||
/// "commit only" inputs.
|
||||
fn migrate_db_v2_v3(store: &ChainStore) -> Result<(), Error> {
|
||||
let store_v2 = store.with_version(ProtocolVersion(2));
|
||||
let batch = store_v2.batch()?;
|
||||
for (_, block) in batch.blocks_iter()? {
|
||||
batch.migrate_block(&block, ProtocolVersion(2))?;
|
||||
batch.migrate_block(&block, ProtocolVersion(3))?;
|
||||
}
|
||||
batch.commit()?;
|
||||
Ok(())
|
||||
|
|
|
@ -84,12 +84,13 @@ pub fn process_block(
|
|||
ctx: &mut BlockContext<'_>,
|
||||
) -> Result<(Option<Tip>, BlockHeader), Error> {
|
||||
debug!(
|
||||
"pipe: process_block {} at {} [in/out/kern: {}/{}/{}]",
|
||||
"pipe: process_block {} at {} [in/out/kern: {}/{}/{}] ({})",
|
||||
b.hash(),
|
||||
b.header.height,
|
||||
b.inputs().len(),
|
||||
b.outputs().len(),
|
||||
b.kernels().len(),
|
||||
b.inputs().version_str(),
|
||||
);
|
||||
|
||||
// Read current chain head from db via the batch.
|
||||
|
|
|
@ -206,6 +206,13 @@ impl<'a> Batch<'a> {
|
|||
/// Save the block to the db.
|
||||
/// Note: the block header is not saved to the db here, assumes this has already been done.
|
||||
pub fn save_block(&self, b: &Block) -> Result<(), Error> {
|
||||
debug!(
|
||||
"save_block: {} at {} ({} -> v{})",
|
||||
b.header.hash(),
|
||||
b.header.height,
|
||||
b.inputs().version_str(),
|
||||
self.db.protocol_version(),
|
||||
);
|
||||
self.db.put_ser(&to_key(BLOCK_PREFIX, b.hash())[..], b)?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -222,7 +229,7 @@ impl<'a> Batch<'a> {
|
|||
/// Block may have been read using a previous protocol version but we do not actually care.
|
||||
pub fn migrate_block(&self, b: &Block, version: ProtocolVersion) -> Result<(), Error> {
|
||||
self.db
|
||||
.put_ser_with_version(&to_key(BLOCK_PREFIX, &mut b.hash())[..], b, version)?;
|
||||
.put_ser_with_version(&to_key(BLOCK_PREFIX, b.hash())[..], b, version)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -1105,7 +1105,7 @@ impl<'a> Extension<'a> {
|
|||
// Remove the spent outputs from the output_pos index.
|
||||
let spent = self
|
||||
.utxo_view(header_ext)
|
||||
.validate_inputs(b.inputs(), batch)?;
|
||||
.validate_inputs(&b.inputs(), batch)?;
|
||||
for (out, pos) in &spent {
|
||||
self.apply_input(out.commitment(), *pos)?;
|
||||
affected_pos.push(pos.pos);
|
||||
|
@ -1371,13 +1371,14 @@ impl<'a> Extension<'a> {
|
|||
}
|
||||
|
||||
// Update output_pos based on "unspending" all spent pos from this block.
|
||||
// This is necessary to ensure the output_pos index correclty reflects a
|
||||
// This is necessary to ensure the output_pos index correctly reflects a
|
||||
// reused output commitment. For example an output at pos 1, spent, reused at pos 2.
|
||||
// The output_pos index should be updated to reflect the old pos 1 when unspent.
|
||||
if let Ok(spent) = spent {
|
||||
let inputs: Vec<_> = block.inputs().into();
|
||||
for (input, pos) in inputs.iter().zip(spent) {
|
||||
batch.save_output_pos_height(&input.commitment(), pos)?;
|
||||
for pos in spent {
|
||||
if let Some(out) = self.output_pmmr.get_data(pos.pos) {
|
||||
batch.save_output_pos_height(&out.commitment(), pos)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ impl<'a> UTXOView<'a> {
|
|||
for output in block.outputs() {
|
||||
self.validate_output(output, batch)?;
|
||||
}
|
||||
self.validate_inputs(block.inputs(), batch)
|
||||
self.validate_inputs(&block.inputs(), batch)
|
||||
}
|
||||
|
||||
/// Validate a transaction against the current UTXO set.
|
||||
|
@ -70,7 +70,7 @@ impl<'a> UTXOView<'a> {
|
|||
for output in tx.outputs() {
|
||||
self.validate_output(output, batch)?;
|
||||
}
|
||||
self.validate_inputs(tx.inputs(), batch)
|
||||
self.validate_inputs(&tx.inputs(), batch)
|
||||
}
|
||||
|
||||
/// Validate the provided inputs.
|
||||
|
@ -78,10 +78,20 @@ impl<'a> UTXOView<'a> {
|
|||
/// that would be spent by the provided inputs.
|
||||
pub fn validate_inputs(
|
||||
&self,
|
||||
inputs: Inputs,
|
||||
inputs: &Inputs,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<Vec<(OutputIdentifier, CommitPos)>, Error> {
|
||||
match inputs {
|
||||
Inputs::CommitOnly(inputs) => {
|
||||
let outputs_spent: Result<Vec<_>, Error> = inputs
|
||||
.iter()
|
||||
.map(|input| {
|
||||
self.validate_input(input.commitment(), batch)
|
||||
.and_then(|(out, pos)| Ok((out, pos)))
|
||||
})
|
||||
.collect();
|
||||
outputs_spent
|
||||
}
|
||||
Inputs::FeaturesAndCommit(inputs) => {
|
||||
let outputs_spent: Result<Vec<_>, Error> = inputs
|
||||
.iter()
|
||||
|
@ -93,6 +103,7 @@ impl<'a> UTXOView<'a> {
|
|||
if out == input.into() {
|
||||
Ok((out, pos))
|
||||
} else {
|
||||
error!("input mismatch: {:?}, {:?}, {:?}", out, pos, input);
|
||||
Err(ErrorKind::Other("input mismatch".into()).into())
|
||||
}
|
||||
})
|
||||
|
@ -114,7 +125,15 @@ impl<'a> UTXOView<'a> {
|
|||
let pos = batch.get_output_pos_height(&input)?;
|
||||
if let Some(pos) = pos {
|
||||
if let Some(out) = self.output_pmmr.get_data(pos.pos) {
|
||||
return Ok((out, pos));
|
||||
if out.commitment() == input {
|
||||
return Ok((out, pos));
|
||||
} else {
|
||||
error!("input mismatch: {:?}, {:?}, {:?}", out, pos, input);
|
||||
return Err(ErrorKind::Other(
|
||||
"input mismatch (output_pos index mismatch?)".into(),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(ErrorKind::AlreadySpent(input).into())
|
||||
|
@ -151,17 +170,27 @@ impl<'a> UTXOView<'a> {
|
|||
height: u64,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
// Find the greatest output pos of any coinbase
|
||||
// outputs we are attempting to spend.
|
||||
let inputs: Vec<_> = inputs.into();
|
||||
let pos = inputs
|
||||
.iter()
|
||||
.filter(|x| x.is_coinbase())
|
||||
.filter_map(|x| batch.get_output_pos(&x.commitment()).ok())
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
if pos > 0 {
|
||||
// Lookup the outputs being spent.
|
||||
let spent: Result<Vec<_>, _> = inputs
|
||||
.iter()
|
||||
.map(|x| self.validate_input(x.commitment(), batch))
|
||||
.collect();
|
||||
|
||||
// Find the max pos of any coinbase being spent.
|
||||
let pos = spent?
|
||||
.iter()
|
||||
.filter_map(|(out, pos)| {
|
||||
if out.features.is_coinbase() {
|
||||
Some(pos.pos)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.max();
|
||||
|
||||
if let Some(pos) = pos {
|
||||
// If we have not yet reached 1440 blocks then
|
||||
// we can fail immediately as coinbase cannot be mature.
|
||||
if height < global::coinbase_maturity() {
|
||||
|
|
|
@ -122,7 +122,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
// Confirm the tx attempting to spend the coinbase output
|
||||
// is not valid at the current block height given the current chain state.
|
||||
match chain.verify_coinbase_maturity(&coinbase_txn) {
|
||||
match chain.verify_coinbase_maturity(&coinbase_txn.inputs()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => match e.kind() {
|
||||
ErrorKind::ImmatureCoinbase => {}
|
||||
|
@ -204,7 +204,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
// Confirm the tx attempting to spend the coinbase output
|
||||
// is not valid at the current block height given the current chain state.
|
||||
match chain.verify_coinbase_maturity(&coinbase_txn) {
|
||||
match chain.verify_coinbase_maturity(&coinbase_txn.inputs()) {
|
||||
Ok(_) => {}
|
||||
Err(e) => match e.kind() {
|
||||
ErrorKind::ImmatureCoinbase => {}
|
||||
|
@ -254,7 +254,9 @@ fn test_coinbase_maturity() {
|
|||
|
||||
// Confirm the tx spending the coinbase output is now valid.
|
||||
// The coinbase output has matured sufficiently based on current chain state.
|
||||
chain.verify_coinbase_maturity(&coinbase_txn).unwrap();
|
||||
chain
|
||||
.verify_coinbase_maturity(&coinbase_txn.inputs())
|
||||
.unwrap();
|
||||
|
||||
let txs = &[coinbase_txn];
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
|
|
|
@ -272,7 +272,7 @@ impl PMMRable for BlockHeader {
|
|||
/// Serialization of a block header
|
||||
impl Writeable for BlockHeader {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
if writer.serialization_mode() != ser::SerializationMode::Hash {
|
||||
if !writer.serialization_mode().is_hash_mode() {
|
||||
self.write_pre_pow(writer)?;
|
||||
}
|
||||
self.pow.write(writer)?;
|
||||
|
@ -507,8 +507,7 @@ impl Hashed for Block {
|
|||
impl Writeable for Block {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
self.header.write(writer)?;
|
||||
|
||||
if writer.serialization_mode() != ser::SerializationMode::Hash {
|
||||
if !writer.serialization_mode().is_hash_mode() {
|
||||
self.body.write(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
|
@ -609,7 +608,7 @@ impl Block {
|
|||
kernels.extend_from_slice(cb.kern_full());
|
||||
|
||||
// Initialize a tx body and sort everything.
|
||||
let body = TransactionBody::init(inputs, &outputs, &kernels, false)?;
|
||||
let body = TransactionBody::init(inputs.into(), &outputs, &kernels, false)?;
|
||||
|
||||
// Finally return the full block.
|
||||
// Note: we have not actually validated the block here,
|
||||
|
|
|
@ -331,7 +331,7 @@ impl Writeable for KernelFeatures {
|
|||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
// Care must be exercised when writing for hashing purposes.
|
||||
// All kernels are hashed using original v1 serialization strategy.
|
||||
if writer.serialization_mode() == ser::SerializationMode::Hash {
|
||||
if writer.serialization_mode().is_hash_mode() {
|
||||
return self.write_v1(writer);
|
||||
}
|
||||
|
||||
|
@ -458,6 +458,8 @@ pub struct TxKernel {
|
|||
impl DefaultHashable for TxKernel {}
|
||||
hashable_ord!(TxKernel);
|
||||
|
||||
/// We want to be able to put kernels in a hashset in the pool.
|
||||
/// So we need to be able to hash them.
|
||||
impl ::std::hash::Hash for TxKernel {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
let mut vec = Vec::new();
|
||||
|
@ -691,12 +693,23 @@ impl Readable for TransactionBody {
|
|||
return Err(ser::Error::TooLargeReadErr);
|
||||
}
|
||||
|
||||
let inputs = read_multi(reader, num_inputs)?;
|
||||
// Read protocol version specific inputs.
|
||||
let inputs = match reader.protocol_version().value() {
|
||||
0..=2 => {
|
||||
let inputs: Vec<Input> = read_multi(reader, num_inputs)?;
|
||||
Inputs::from(inputs.as_slice())
|
||||
}
|
||||
3..=ser::ProtocolVersion::MAX => {
|
||||
let inputs: Vec<CommitWrapper> = read_multi(reader, num_inputs)?;
|
||||
Inputs::from(inputs.as_slice())
|
||||
}
|
||||
};
|
||||
|
||||
let outputs = read_multi(reader, num_outputs)?;
|
||||
let kernels = read_multi(reader, num_kernels)?;
|
||||
|
||||
// Initialize tx body and verify everything is sorted.
|
||||
let body = TransactionBody::init(&inputs, &outputs, &kernels, true)
|
||||
let body = TransactionBody::init(inputs, &outputs, &kernels, true)
|
||||
.map_err(|_| ser::Error::CorruptedData)?;
|
||||
|
||||
Ok(body)
|
||||
|
@ -751,13 +764,13 @@ impl TransactionBody {
|
|||
/// the provided inputs, outputs and kernels.
|
||||
/// Guarantees inputs, outputs, kernels are sorted lexicographically.
|
||||
pub fn init(
|
||||
inputs: &[Input],
|
||||
inputs: Inputs,
|
||||
outputs: &[Output],
|
||||
kernels: &[TxKernel],
|
||||
verify_sorted: bool,
|
||||
) -> Result<TransactionBody, Error> {
|
||||
let mut body = TransactionBody {
|
||||
inputs: inputs.into(),
|
||||
inputs,
|
||||
outputs: outputs.to_vec(),
|
||||
kernels: kernels.to_vec(),
|
||||
};
|
||||
|
@ -792,11 +805,19 @@ impl TransactionBody {
|
|||
/// inputs, if any, are kept intact.
|
||||
/// Sort order is maintained.
|
||||
pub fn with_input(mut self, input: Input) -> TransactionBody {
|
||||
let mut inputs: Vec<_> = self.inputs.into();
|
||||
if let Err(e) = inputs.binary_search(&input) {
|
||||
inputs.insert(e, input)
|
||||
match &mut self.inputs {
|
||||
Inputs::CommitOnly(inputs) => {
|
||||
let commit = input.into();
|
||||
if let Err(e) = inputs.binary_search(&commit) {
|
||||
inputs.insert(e, commit)
|
||||
};
|
||||
}
|
||||
Inputs::FeaturesAndCommit(inputs) => {
|
||||
if let Err(e) = inputs.binary_search(&input) {
|
||||
inputs.insert(e, input)
|
||||
};
|
||||
}
|
||||
};
|
||||
self.inputs = inputs.into();
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -1166,14 +1187,15 @@ impl Transaction {
|
|||
|
||||
/// Creates a new transaction initialized with
|
||||
/// the provided inputs, outputs, kernels
|
||||
pub fn new(inputs: &[Input], outputs: &[Output], kernels: &[TxKernel]) -> Transaction {
|
||||
let offset = BlindingFactor::zero();
|
||||
|
||||
pub fn new(inputs: Inputs, outputs: &[Output], kernels: &[TxKernel]) -> Transaction {
|
||||
// Initialize a new tx body and sort everything.
|
||||
let body =
|
||||
TransactionBody::init(inputs, outputs, kernels, false).expect("sorting, not verifying");
|
||||
|
||||
Transaction { offset, body }
|
||||
Transaction {
|
||||
offset: BlindingFactor::zero(),
|
||||
body,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new transaction using this transaction as a template
|
||||
|
@ -1398,7 +1420,7 @@ pub fn aggregate(txs: &[Transaction]) -> Result<Transaction, Error> {
|
|||
kernels + tx.kernels().len(),
|
||||
)
|
||||
});
|
||||
let mut inputs: Vec<Input> = Vec::with_capacity(n_inputs);
|
||||
let mut inputs: Vec<CommitWrapper> = Vec::with_capacity(n_inputs);
|
||||
let mut outputs: Vec<Output> = Vec::with_capacity(n_outputs);
|
||||
let mut kernels: Vec<TxKernel> = Vec::with_capacity(n_kernels);
|
||||
|
||||
|
@ -1427,7 +1449,8 @@ pub fn aggregate(txs: &[Transaction]) -> Result<Transaction, Error> {
|
|||
// * full set of tx kernels
|
||||
// * sum of all kernel offsets
|
||||
// Note: We sort input/outputs/kernels when building the transaction body internally.
|
||||
let tx = Transaction::new(inputs, outputs, &kernels).with_offset(total_kernel_offset);
|
||||
let tx =
|
||||
Transaction::new(Inputs::from(inputs), outputs, &kernels).with_offset(total_kernel_offset);
|
||||
|
||||
Ok(tx)
|
||||
}
|
||||
|
@ -1435,7 +1458,7 @@ pub fn aggregate(txs: &[Transaction]) -> Result<Transaction, Error> {
|
|||
/// Attempt to deaggregate a multi-kernel transaction based on multiple
|
||||
/// transactions
|
||||
pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result<Transaction, Error> {
|
||||
let mut inputs: Vec<Input> = vec![];
|
||||
let mut inputs: Vec<CommitWrapper> = vec![];
|
||||
let mut outputs: Vec<Output> = vec![];
|
||||
let mut kernels: Vec<TxKernel> = vec![];
|
||||
|
||||
|
@ -1494,7 +1517,10 @@ pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result<Transactio
|
|||
kernels.sort_unstable();
|
||||
|
||||
// Build a new tx from the above data.
|
||||
Ok(Transaction::new(&inputs, &outputs, &kernels).with_offset(total_kernel_offset))
|
||||
Ok(
|
||||
Transaction::new(Inputs::from(inputs.as_slice()), &outputs, &kernels)
|
||||
.with_offset(total_kernel_offset),
|
||||
)
|
||||
}
|
||||
|
||||
/// A transaction input.
|
||||
|
@ -1531,14 +1557,6 @@ impl From<&OutputIdentifier> for Input {
|
|||
}
|
||||
}
|
||||
|
||||
impl ::std::hash::Hash for Input {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &self).expect("serialization failed");
|
||||
::std::hash::Hash::hash(&vec, state);
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of Writeable for a transaction Input, defines how to write
|
||||
/// an Input as binary.
|
||||
impl Writeable for Input {
|
||||
|
@ -1588,30 +1606,102 @@ impl Input {
|
|||
self.features.is_plain()
|
||||
}
|
||||
}
|
||||
|
||||
/// We need to wrap commitments so they can be sorted with hashable_ord.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||
#[serde(transparent)]
|
||||
pub struct CommitWrapper {
|
||||
#[serde(
|
||||
serialize_with = "secp_ser::as_hex",
|
||||
deserialize_with = "secp_ser::commitment_from_hex"
|
||||
)]
|
||||
commit: Commitment,
|
||||
}
|
||||
|
||||
impl DefaultHashable for CommitWrapper {}
|
||||
hashable_ord!(CommitWrapper);
|
||||
|
||||
impl From<Commitment> for CommitWrapper {
|
||||
fn from(commit: Commitment) -> Self {
|
||||
CommitWrapper { commit }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Input> for CommitWrapper {
|
||||
fn from(input: Input) -> Self {
|
||||
CommitWrapper {
|
||||
commit: input.commitment(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Input> for CommitWrapper {
|
||||
fn from(input: &Input) -> Self {
|
||||
CommitWrapper {
|
||||
commit: input.commitment(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<Commitment> for CommitWrapper {
|
||||
fn as_ref(&self) -> &Commitment {
|
||||
&self.commit
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for CommitWrapper {
|
||||
fn read<R: Reader>(reader: &mut R) -> Result<CommitWrapper, ser::Error> {
|
||||
let commit = Commitment::read(reader)?;
|
||||
Ok(CommitWrapper { commit })
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for CommitWrapper {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
self.commit.write(writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Inputs> for Vec<CommitWrapper> {
|
||||
fn from(inputs: Inputs) -> Self {
|
||||
match inputs {
|
||||
Inputs::CommitOnly(inputs) => inputs,
|
||||
Inputs::FeaturesAndCommit(inputs) => {
|
||||
let mut commits: Vec<_> = inputs.iter().map(|input| input.into()).collect();
|
||||
commits.sort_unstable();
|
||||
commits
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Inputs> for Vec<CommitWrapper> {
|
||||
fn from(inputs: &Inputs) -> Self {
|
||||
match inputs {
|
||||
Inputs::CommitOnly(inputs) => inputs.clone(),
|
||||
Inputs::FeaturesAndCommit(inputs) => {
|
||||
let mut commits: Vec<_> = inputs.iter().map(|input| input.into()).collect();
|
||||
commits.sort_unstable();
|
||||
commits
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CommitWrapper {
|
||||
/// Wrapped commitment.
|
||||
pub fn commitment(&self) -> Commitment {
|
||||
self.commit
|
||||
}
|
||||
}
|
||||
/// Wrapper around a vec of inputs.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
#[serde(untagged)]
|
||||
pub enum Inputs {
|
||||
/// Vec of commitments.
|
||||
CommitOnly(Vec<CommitWrapper>),
|
||||
/// Vec of inputs.
|
||||
FeaturesAndCommit(Vec<Input>),
|
||||
// Vec of commitments.
|
||||
// CommitOnly(Vec<Commitment>),
|
||||
}
|
||||
|
||||
impl From<Inputs> for Vec<Input> {
|
||||
fn from(inputs: Inputs) -> Self {
|
||||
match inputs {
|
||||
Inputs::FeaturesAndCommit(inputs) => inputs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Inputs> for Vec<Input> {
|
||||
fn from(inputs: &Inputs) -> Self {
|
||||
match inputs {
|
||||
Inputs::FeaturesAndCommit(inputs) => inputs.to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&[Input]> for Inputs {
|
||||
|
@ -1620,35 +1710,62 @@ impl From<&[Input]> for Inputs {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<Vec<Input>> for Inputs {
|
||||
fn from(inputs: Vec<Input>) -> Self {
|
||||
Inputs::FeaturesAndCommit(inputs)
|
||||
impl From<&[CommitWrapper]> for Inputs {
|
||||
fn from(commits: &[CommitWrapper]) -> Self {
|
||||
Inputs::CommitOnly(commits.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<OutputIdentifier>> for Inputs {
|
||||
fn from(outputs: Vec<OutputIdentifier>) -> Self {
|
||||
let inputs = outputs
|
||||
.into_iter()
|
||||
/// Used when converting to v2 compatibility.
|
||||
/// We want to preserve output features here.
|
||||
impl From<&[OutputIdentifier]> for Inputs {
|
||||
fn from(outputs: &[OutputIdentifier]) -> Self {
|
||||
let mut inputs: Vec<_> = outputs
|
||||
.iter()
|
||||
.map(|out| Input {
|
||||
features: out.features,
|
||||
commit: out.commit,
|
||||
})
|
||||
.collect();
|
||||
inputs.sort_unstable();
|
||||
Inputs::FeaturesAndCommit(inputs)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Inputs {
|
||||
fn default() -> Self {
|
||||
Inputs::FeaturesAndCommit(vec![])
|
||||
Inputs::CommitOnly(vec![])
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for Inputs {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
match self {
|
||||
Inputs::FeaturesAndCommit(inputs) => inputs.write(writer)?,
|
||||
// Nothing to write so we are done.
|
||||
if self.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// If writing for a hash then simply write all our inputs.
|
||||
if writer.serialization_mode().is_hash_mode() {
|
||||
match self {
|
||||
Inputs::CommitOnly(inputs) => inputs.write(writer)?,
|
||||
Inputs::FeaturesAndCommit(inputs) => inputs.write(writer)?,
|
||||
}
|
||||
} else {
|
||||
// Otherwise we are writing full data and need to consider our inputs variant and protocol version.
|
||||
match self {
|
||||
Inputs::CommitOnly(inputs) => match writer.protocol_version().value() {
|
||||
0..=2 => return Err(ser::Error::UnsupportedProtocolVersion),
|
||||
3..=ProtocolVersion::MAX => inputs.write(writer)?,
|
||||
},
|
||||
Inputs::FeaturesAndCommit(inputs) => match writer.protocol_version().value() {
|
||||
0..=2 => inputs.write(writer)?,
|
||||
3..=ProtocolVersion::MAX => {
|
||||
let inputs: Vec<CommitWrapper> = self.into();
|
||||
inputs.write(writer)?;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1658,6 +1775,7 @@ impl Inputs {
|
|||
/// Number of inputs.
|
||||
pub fn len(&self) -> usize {
|
||||
match self {
|
||||
Inputs::CommitOnly(inputs) => inputs.len(),
|
||||
Inputs::FeaturesAndCommit(inputs) => inputs.len(),
|
||||
}
|
||||
}
|
||||
|
@ -1670,15 +1788,26 @@ impl Inputs {
|
|||
/// Verify inputs are sorted and unique.
|
||||
fn verify_sorted_and_unique(&self) -> Result<(), ser::Error> {
|
||||
match self {
|
||||
Inputs::CommitOnly(inputs) => inputs.verify_sorted_and_unique(),
|
||||
Inputs::FeaturesAndCommit(inputs) => inputs.verify_sorted_and_unique(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sort the inputs.
|
||||
fn sort_unstable(&mut self) {
|
||||
match self {
|
||||
Inputs::CommitOnly(inputs) => inputs.sort_unstable(),
|
||||
Inputs::FeaturesAndCommit(inputs) => inputs.sort_unstable(),
|
||||
}
|
||||
}
|
||||
|
||||
/// For debug purposes only. Do not rely on this for anything.
|
||||
pub fn version_str(&self) -> &str {
|
||||
match self {
|
||||
Inputs::CommitOnly(_) => "v3",
|
||||
Inputs::FeaturesAndCommit(_) => "v2",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enum of various supported kernel "features".
|
||||
|
@ -1752,14 +1881,6 @@ impl AsRef<Commitment> for Output {
|
|||
}
|
||||
}
|
||||
|
||||
impl ::std::hash::Hash for Output {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &self).expect("serialization failed");
|
||||
::std::hash::Hash::hash(&vec, state);
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of Writeable for a transaction Output, defines how to write
|
||||
/// an Output as binary.
|
||||
impl Writeable for Output {
|
||||
|
|
|
@ -23,9 +23,12 @@ use crate::consensus::{
|
|||
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
|
||||
};
|
||||
use crate::core::block::HeaderVersion;
|
||||
use crate::pow::{
|
||||
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
|
||||
new_cuckatoo_ctx, PoWContext,
|
||||
use crate::{
|
||||
pow::{
|
||||
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
|
||||
new_cuckatoo_ctx, PoWContext,
|
||||
},
|
||||
ser::ProtocolVersion,
|
||||
};
|
||||
use std::cell::Cell;
|
||||
use util::OneTime;
|
||||
|
@ -42,7 +45,7 @@ use util::OneTime;
|
|||
/// Note: We also use a specific (possible different) protocol version
|
||||
/// for both the backend database and MMR data files.
|
||||
/// This defines the p2p layer protocol version for this node.
|
||||
pub const PROTOCOL_VERSION: u32 = 2;
|
||||
pub const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion(3);
|
||||
|
||||
/// Automated testing edge_bits
|
||||
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10;
|
||||
|
|
|
@ -69,6 +69,8 @@ pub enum Error {
|
|||
DuplicateError,
|
||||
/// Block header version (hard-fork schedule).
|
||||
InvalidBlockVersion,
|
||||
/// Unsupported protocol version
|
||||
UnsupportedProtocolVersion,
|
||||
}
|
||||
|
||||
impl From<io::Error> for Error {
|
||||
|
@ -92,6 +94,7 @@ impl fmt::Display for Error {
|
|||
Error::TooLargeReadErr => f.write_str("too large read"),
|
||||
Error::HexError(ref e) => write!(f, "hex error {:?}", e),
|
||||
Error::InvalidBlockVersion => f.write_str("invalid block version"),
|
||||
Error::UnsupportedProtocolVersion => f.write_str("unsupported protocol version"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -115,6 +118,7 @@ impl error::Error for Error {
|
|||
Error::TooLargeReadErr => "too large read",
|
||||
Error::HexError(_) => "hex error",
|
||||
Error::InvalidBlockVersion => "invalid block version",
|
||||
Error::UnsupportedProtocolVersion => "unsupported protocol version",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -128,6 +132,16 @@ pub enum SerializationMode {
|
|||
Hash,
|
||||
}
|
||||
|
||||
impl SerializationMode {
|
||||
/// Hash mode?
|
||||
pub fn is_hash_mode(&self) -> bool {
|
||||
match self {
|
||||
SerializationMode::Hash => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementations defined how different numbers and binary structures are
|
||||
/// written to an underlying stream or container (depending on implementation).
|
||||
pub trait Writer {
|
||||
|
@ -319,7 +333,7 @@ impl ProtocolVersion {
|
|||
/// negotiation in the p2p layer. Connected peers will negotiate a suitable
|
||||
/// protocol version for serialization/deserialization of p2p messages.
|
||||
pub fn local() -> ProtocolVersion {
|
||||
ProtocolVersion(PROTOCOL_VERSION)
|
||||
PROTOCOL_VERSION
|
||||
}
|
||||
|
||||
/// We need to specify a protocol version for our local database.
|
||||
|
|
|
@ -19,7 +19,7 @@ use crate::core::core::block::{Block, BlockHeader, Error, HeaderVersion, Untrust
|
|||
use crate::core::core::hash::Hashed;
|
||||
use crate::core::core::id::ShortIdentifiable;
|
||||
use crate::core::core::transaction::{
|
||||
self, KernelFeatures, NRDRelativeHeight, Output, OutputFeatures, Transaction,
|
||||
self, KernelFeatures, NRDRelativeHeight, Output, OutputFeatures, OutputIdentifier, Transaction,
|
||||
};
|
||||
use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
||||
use crate::core::core::{Committed, CompactBlock};
|
||||
|
@ -522,9 +522,53 @@ fn block_single_tx_serialized_size() {
|
|||
let prev = BlockHeader::default();
|
||||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||
let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id);
|
||||
|
||||
// Default protocol version (3)
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &b).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 2_669);
|
||||
|
||||
// Protocol version 3
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(3), &b).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 2_669);
|
||||
|
||||
// Protocol version 2.
|
||||
// Note: block must be in "v2" compatibility with "features and commit" inputs for this.
|
||||
// Normally we would convert the block by looking inputs up in utxo but we fake it here for testing.
|
||||
let inputs: Vec<_> = b.inputs().into();
|
||||
let inputs: Vec<_> = inputs
|
||||
.iter()
|
||||
.map(|input| OutputIdentifier {
|
||||
features: OutputFeatures::Plain,
|
||||
commit: input.commitment(),
|
||||
})
|
||||
.collect();
|
||||
let b = Block {
|
||||
header: b.header,
|
||||
body: b.body.replace_inputs(inputs.as_slice().into()),
|
||||
};
|
||||
|
||||
// Protocol version 2
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(2), &b).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 2_670);
|
||||
|
||||
// Protocol version 1 (fixed size kernels)
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(1), &b).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 2_694);
|
||||
|
||||
// Check we can also serialize a v2 compatibility block in v3 protocol version
|
||||
// without needing to explicitly convert the block.
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(3), &b).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 2_669);
|
||||
|
||||
// Default protocol version (3) for completeness
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &b).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 2_669);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -571,25 +615,10 @@ fn block_10_tx_serialized_size() {
|
|||
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
|
||||
let b = new_block(&txs, &keychain, &builder, &prev, &key_id);
|
||||
|
||||
// Default protocol version.
|
||||
{
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &b).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 16_836);
|
||||
}
|
||||
|
||||
// Explicit protocol version 1
|
||||
{
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(1), &b).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 16_932);
|
||||
}
|
||||
|
||||
// Explicit protocol version 2
|
||||
{
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(2), &b).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 16_836);
|
||||
assert_eq!(vec.len(), 16_826);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -732,14 +761,13 @@ fn same_amount_outputs_copy_range_proof() {
|
|||
|
||||
// now we reconstruct the transaction, swapping the rangeproofs so they
|
||||
// have the wrong privkey
|
||||
let ins: Vec<_> = tx.inputs().into();
|
||||
let mut outs = tx.outputs().to_vec();
|
||||
outs[0].proof = outs[1].proof;
|
||||
|
||||
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
|
||||
let prev = BlockHeader::default();
|
||||
let b = new_block(
|
||||
&[Transaction::new(&ins, &outs, tx.kernels())],
|
||||
&[Transaction::new(tx.inputs(), &outs, tx.kernels())],
|
||||
&keychain,
|
||||
&builder,
|
||||
&prev,
|
||||
|
@ -784,7 +812,6 @@ fn wrong_amount_range_proof() {
|
|||
.unwrap();
|
||||
|
||||
// we take the range proofs from tx2 into tx1 and rebuild the transaction
|
||||
let ins: Vec<_> = tx1.inputs().into();
|
||||
let mut outs = tx1.outputs().to_vec();
|
||||
outs[0].proof = tx2.outputs()[0].proof;
|
||||
outs[1].proof = tx2.outputs()[1].proof;
|
||||
|
@ -792,7 +819,7 @@ fn wrong_amount_range_proof() {
|
|||
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
|
||||
let prev = BlockHeader::default();
|
||||
let b = new_block(
|
||||
&[Transaction::new(&ins, &outs, tx1.kernels())],
|
||||
&[Transaction::new(tx1.inputs(), &outs, tx1.kernels())],
|
||||
&keychain,
|
||||
&builder,
|
||||
&prev,
|
||||
|
|
|
@ -15,7 +15,9 @@
|
|||
//! Common test functions
|
||||
|
||||
use grin_core::core::hash::DefaultHashable;
|
||||
use grin_core::core::{Block, BlockHeader, KernelFeatures, Transaction};
|
||||
use grin_core::core::{
|
||||
Block, BlockHeader, KernelFeatures, OutputFeatures, OutputIdentifier, Transaction,
|
||||
};
|
||||
use grin_core::libtx::{
|
||||
build::{self, input, output},
|
||||
proof::{ProofBuild, ProofBuilder},
|
||||
|
@ -64,6 +66,24 @@ pub fn tx1i1o() -> Transaction {
|
|||
tx
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn tx1i10_v2_compatible() -> Transaction {
|
||||
let tx = tx1i1o();
|
||||
|
||||
let inputs: Vec<_> = tx.inputs().into();
|
||||
let inputs: Vec<_> = inputs
|
||||
.iter()
|
||||
.map(|input| OutputIdentifier {
|
||||
features: OutputFeatures::Plain,
|
||||
commit: input.commitment(),
|
||||
})
|
||||
.collect();
|
||||
Transaction {
|
||||
body: tx.body.replace_inputs(inputs.as_slice().into()),
|
||||
..tx
|
||||
}
|
||||
}
|
||||
|
||||
// utility producing a transaction with a single input
|
||||
// and two outputs (one change output)
|
||||
// Note: this tx has an "offset" kernel
|
||||
|
|
|
@ -21,7 +21,8 @@ use self::core::core::block::Error::KernelLockHeight;
|
|||
use self::core::core::hash::{Hashed, ZERO_HASH};
|
||||
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
||||
use self::core::core::{
|
||||
aggregate, deaggregate, KernelFeatures, Output, Transaction, TxKernel, Weighting,
|
||||
aggregate, deaggregate, KernelFeatures, Output, OutputFeatures, OutputIdentifier, Transaction,
|
||||
TxKernel, Weighting,
|
||||
};
|
||||
use self::core::libtx::build::{self, initial_tx, input, output, with_excess};
|
||||
use self::core::libtx::{aggsig, ProofBuilder};
|
||||
|
@ -42,26 +43,51 @@ fn test_setup() {
|
|||
fn simple_tx_ser() {
|
||||
let tx = tx2i1o();
|
||||
|
||||
// Default protocol version.
|
||||
{
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &tx).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 947);
|
||||
}
|
||||
// Default protocol version (3).
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &tx).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 945);
|
||||
|
||||
// Explicit protocol version 3.
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(3), &tx).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 945);
|
||||
|
||||
// We need to convert the tx to v2 compatibility with "features and commitment" inputs
|
||||
// to serialize to any previous protocol version.
|
||||
// Normally we would do this conversion against the utxo and txpool but we fake it here for testing.
|
||||
let inputs: Vec<_> = tx.inputs().into();
|
||||
let inputs: Vec<_> = inputs
|
||||
.iter()
|
||||
.map(|input| OutputIdentifier {
|
||||
features: OutputFeatures::Plain,
|
||||
commit: input.commitment(),
|
||||
})
|
||||
.collect();
|
||||
let tx = Transaction {
|
||||
body: tx.body.replace_inputs(inputs.as_slice().into()),
|
||||
..tx
|
||||
};
|
||||
|
||||
// Explicit protocol version 1.
|
||||
{
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(1), &tx).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 955);
|
||||
}
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(1), &tx).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 955);
|
||||
|
||||
// Explicit protocol version 2.
|
||||
{
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(2), &tx).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 947);
|
||||
}
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(2), &tx).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 947);
|
||||
|
||||
// Check we can still serialize to protocol version 3 without explicitly converting the tx.
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, ser::ProtocolVersion(3), &tx).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 945);
|
||||
|
||||
// And default protocol version for completeness.
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize_default(&mut vec, &tx).expect("serialization failed");
|
||||
assert_eq!(vec.len(), 945);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
//! Transaction integration tests
|
||||
|
||||
pub mod common;
|
||||
use crate::common::tx1i1o;
|
||||
use crate::common::tx1i10_v2_compatible;
|
||||
use crate::core::core::transaction::{self, Error};
|
||||
use crate::core::core::verifier_cache::LruVerifierCache;
|
||||
use crate::core::core::{KernelFeatures, Output, OutputFeatures, Transaction, Weighting};
|
||||
|
@ -32,8 +32,10 @@ use util::RwLock;
|
|||
// This test ensures we exercise this serialization/deserialization code.
|
||||
#[test]
|
||||
fn test_transaction_json_ser_deser() {
|
||||
let tx1 = tx1i1o();
|
||||
let tx1 = tx1i10_v2_compatible();
|
||||
|
||||
let value = serde_json::to_value(&tx1).unwrap();
|
||||
println!("{:?}", value);
|
||||
|
||||
assert!(value["offset"].is_string());
|
||||
assert_eq!(value["body"]["inputs"][0]["features"], "Plain");
|
||||
|
@ -50,7 +52,6 @@ fn test_transaction_json_ser_deser() {
|
|||
let tx2: Transaction = serde_json::from_value(value).unwrap();
|
||||
assert_eq!(tx1, tx2);
|
||||
|
||||
let tx1 = tx1i1o();
|
||||
let str = serde_json::to_string(&tx1).unwrap();
|
||||
println!("{}", str);
|
||||
let tx2: Transaction = serde_json::from_str(&str).unwrap();
|
||||
|
|
|
@ -252,23 +252,6 @@ impl Peer {
|
|||
self.send(ban_reason_msg, msg::Type::BanReason).map(|_| ())
|
||||
}
|
||||
|
||||
/// Sends the provided block to the remote peer. The request may be dropped
|
||||
/// if the remote peer is known to already have the block.
|
||||
pub fn send_block(&self, b: &core::Block) -> Result<bool, Error> {
|
||||
if !self.tracking_adapter.has_recv(b.hash()) {
|
||||
trace!("Send block {} to {}", b.hash(), self.info.addr);
|
||||
self.send(b, msg::Type::Block)?;
|
||||
Ok(true)
|
||||
} else {
|
||||
debug!(
|
||||
"Suppress block send {} to {} (already seen)",
|
||||
b.hash(),
|
||||
self.info.addr,
|
||||
);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<bool, Error> {
|
||||
if !self.tracking_adapter.has_recv(b.hash()) {
|
||||
trace!("Send compact block {} to {}", b.hash(), self.info.addr);
|
||||
|
@ -540,8 +523,8 @@ impl ChainAdapter for TrackingAdapter {
|
|||
self.adapter.locate_headers(locator)
|
||||
}
|
||||
|
||||
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
||||
self.adapter.get_block(h)
|
||||
fn get_block(&self, h: Hash, peer_info: &PeerInfo) -> Option<core::Block> {
|
||||
self.adapter.get_block(h, peer_info)
|
||||
}
|
||||
|
||||
fn txhashset_read(&self, h: Hash) -> Option<TxHashSetRead> {
|
||||
|
|
|
@ -667,8 +667,8 @@ impl ChainAdapter for Peers {
|
|||
self.adapter.locate_headers(hs)
|
||||
}
|
||||
|
||||
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
||||
self.adapter.get_block(h)
|
||||
fn get_block(&self, h: Hash, peer_info: &PeerInfo) -> Option<core::Block> {
|
||||
self.adapter.get_block(h, peer_info)
|
||||
}
|
||||
|
||||
fn txhashset_read(&self, h: Hash) -> Option<TxHashSetRead> {
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
use crate::chain;
|
||||
use crate::conn::{Message, MessageHandler, Tracker};
|
||||
use crate::core::core::{self, hash::Hash, hash::Hashed, CompactBlock};
|
||||
|
||||
use crate::msg::{
|
||||
BanReason, GetPeerAddrs, Headers, Locator, Msg, PeerAddrs, Ping, Pong, TxHashSetArchive,
|
||||
TxHashSetRequest, Type,
|
||||
|
@ -153,7 +152,7 @@ impl MessageHandler for Protocol {
|
|||
msg.header.msg_len,
|
||||
);
|
||||
|
||||
let bo = adapter.get_block(h);
|
||||
let bo = adapter.get_block(h, &self.peer_info);
|
||||
if let Some(b) = bo {
|
||||
return Ok(Some(Msg::new(Type::Block, b, self.peer_info.version)?));
|
||||
}
|
||||
|
@ -177,7 +176,7 @@ impl MessageHandler for Protocol {
|
|||
|
||||
Type::GetCompactBlock => {
|
||||
let h: Hash = msg.body()?;
|
||||
if let Some(b) = adapter.get_block(h) {
|
||||
if let Some(b) = adapter.get_block(h, &self.peer_info) {
|
||||
let cb: CompactBlock = b.into();
|
||||
Ok(Some(Msg::new(
|
||||
Type::CompactBlock,
|
||||
|
|
|
@ -335,7 +335,7 @@ impl ChainAdapter for DummyAdapter {
|
|||
fn locate_headers(&self, _: &[Hash]) -> Result<Vec<core::BlockHeader>, chain::Error> {
|
||||
Ok(vec![])
|
||||
}
|
||||
fn get_block(&self, _: Hash) -> Option<core::Block> {
|
||||
fn get_block(&self, _: Hash, _: &PeerInfo) -> Option<core::Block> {
|
||||
None
|
||||
}
|
||||
fn txhashset_read(&self, _h: Hash) -> Option<TxHashSetRead> {
|
||||
|
|
|
@ -599,7 +599,8 @@ pub trait ChainAdapter: Sync + Send {
|
|||
fn locate_headers(&self, locator: &[Hash]) -> Result<Vec<core::BlockHeader>, chain::Error>;
|
||||
|
||||
/// Gets a full block by its hash.
|
||||
fn get_block(&self, h: Hash) -> Option<core::Block>;
|
||||
/// Converts block to v2 compatibility if necessary (based on peer protocol version).
|
||||
fn get_block(&self, h: Hash, peer_info: &PeerInfo) -> Option<core::Block>;
|
||||
|
||||
/// Provides a reading view into the current txhashset state as well as
|
||||
/// the required indexes for a consumer to rewind to a consistant state
|
||||
|
|
|
@ -29,7 +29,6 @@ use grin_util as util;
|
|||
use std::cmp::Reverse;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use util::secp::pedersen::Commitment;
|
||||
use util::static_secp_instance;
|
||||
|
||||
pub struct Pool<B, V>
|
||||
|
@ -60,28 +59,9 @@ where
|
|||
}
|
||||
|
||||
/// Does the transaction pool contain an entry for the given transaction?
|
||||
pub fn contains_tx(&self, hash: Hash) -> bool {
|
||||
self.entries.iter().any(|x| x.tx.hash() == hash)
|
||||
}
|
||||
|
||||
pub fn get_tx(&self, hash: Hash) -> Option<Transaction> {
|
||||
self.entries
|
||||
.iter()
|
||||
.find(|x| x.tx.hash() == hash)
|
||||
.map(|x| x.tx.clone())
|
||||
}
|
||||
|
||||
/// Query the tx pool for an individual tx matching the given public excess.
|
||||
/// Used for checking for duplicate NRD kernels in the txpool.
|
||||
pub fn retrieve_tx_by_kernel_excess(&self, excess: Commitment) -> Option<Transaction> {
|
||||
for x in &self.entries {
|
||||
for k in x.tx.kernels() {
|
||||
if k.excess() == excess {
|
||||
return Some(x.tx.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
/// Transactions are compared by their kernels.
|
||||
pub fn contains_tx(&self, tx: &Transaction) -> bool {
|
||||
self.entries.iter().any(|x| x.tx.kernels() == tx.kernels())
|
||||
}
|
||||
|
||||
/// Query the tx pool for an individual tx matching the given kernel hash.
|
||||
|
@ -289,15 +269,13 @@ where
|
|||
Ok(valid_txs)
|
||||
}
|
||||
|
||||
/// Convert a transaction for v2 compatibility.
|
||||
/// We may receive a transaction with "commit only" inputs.
|
||||
/// We convert it to "features and commit" so we can safely relay it to v2 peers.
|
||||
/// Converson is done by looking up outputs to be spent in both the pool and the current utxo.
|
||||
pub fn convert_tx_v2(
|
||||
/// Lookup unspent outputs to be spent by the provided transaction.
|
||||
/// We look for unspent outputs in the current txpool and then in the current utxo.
|
||||
pub fn locate_spends(
|
||||
&self,
|
||||
tx: Transaction,
|
||||
tx: &Transaction,
|
||||
extra_tx: Option<Transaction>,
|
||||
) -> Result<Transaction, PoolError> {
|
||||
) -> Result<(Vec<OutputIdentifier>, Vec<OutputIdentifier>), PoolError> {
|
||||
let mut inputs: Vec<_> = tx.inputs().into();
|
||||
|
||||
let agg_tx = self
|
||||
|
@ -312,31 +290,13 @@ where
|
|||
// By applying cut_through to tx inputs and agg_tx outputs we can
|
||||
// determine the outputs being spent from the pool and those still unspent
|
||||
// that need to be looked up via the current utxo.
|
||||
let (inputs, _, _, spent_pool) =
|
||||
let (spent_utxo, _, _, spent_pool) =
|
||||
transaction::cut_through(&mut inputs[..], &mut outputs[..])?;
|
||||
|
||||
// Lookup remaining outputs to be spent from the current utxo.
|
||||
let spent_utxo = self.blockchain.validate_inputs(inputs.into())?;
|
||||
let spent_utxo = self.blockchain.validate_inputs(&spent_utxo.into())?;
|
||||
|
||||
// Combine outputs spent in utxo with outputs spent in pool to give us the
|
||||
// full set of outputs being spent by this transaction.
|
||||
// This is our source of truth for input features.
|
||||
let mut spent = spent_pool.to_vec();
|
||||
spent.extend(spent_utxo);
|
||||
spent.sort();
|
||||
|
||||
// Now build the resulting transaction based on our inputs and outputs from the original transaction.
|
||||
// Remember to use the original kernels and kernel offset.
|
||||
let mut outputs = tx.outputs().to_vec();
|
||||
let (inputs, outputs, _, _) = transaction::cut_through(&mut spent[..], &mut outputs[..])?;
|
||||
let inputs: Vec<_> = inputs.iter().map(|out| out.into()).collect();
|
||||
let tx = Transaction::new(inputs.as_slice(), outputs, tx.kernels()).with_offset(tx.offset);
|
||||
|
||||
// Validate the tx to ensure our converted inputs are correct.
|
||||
tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())
|
||||
.map_err(PoolError::InvalidTx)?;
|
||||
|
||||
Ok(tx)
|
||||
Ok((spent_pool.to_vec(), spent_utxo))
|
||||
}
|
||||
|
||||
fn apply_tx_to_block_sums(
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
use self::core::core::hash::{Hash, Hashed};
|
||||
use self::core::core::id::ShortId;
|
||||
use self::core::core::verifier_cache::VerifierCache;
|
||||
use self::core::core::{transaction, Block, BlockHeader, HeaderVersion, Transaction, Weighting};
|
||||
use self::core::core::{
|
||||
transaction, Block, BlockHeader, HeaderVersion, OutputIdentifier, Transaction, Weighting,
|
||||
};
|
||||
use self::core::global;
|
||||
use self::util::RwLock;
|
||||
use crate::pool::Pool;
|
||||
|
@ -88,22 +90,11 @@ where
|
|||
// Add tx to stempool (passing in all txs from txpool to validate against).
|
||||
fn add_to_stempool(
|
||||
&mut self,
|
||||
entry: PoolEntry,
|
||||
entry: &PoolEntry,
|
||||
header: &BlockHeader,
|
||||
) -> Result<PoolEntry, PoolError> {
|
||||
let txpool_agg = self.txpool.all_transactions_aggregate(None)?;
|
||||
|
||||
// Convert the tx to v2 looking for unspent outputs in both stempool and txpool, and utxo.
|
||||
let src = entry.src;
|
||||
let tx = entry.tx;
|
||||
let tx_v2 = self.stempool.convert_tx_v2(tx, txpool_agg.clone())?;
|
||||
let entry = PoolEntry::new(tx_v2, src);
|
||||
|
||||
self.stempool
|
||||
.add_to_pool(entry.clone(), txpool_agg, header)?;
|
||||
|
||||
// If all is good return our pool entry with the converted tx.
|
||||
Ok(entry)
|
||||
extra_tx: Option<Transaction>,
|
||||
) -> Result<(), PoolError> {
|
||||
self.stempool.add_to_pool(entry.clone(), extra_tx, header)
|
||||
}
|
||||
|
||||
fn add_to_reorg_cache(&mut self, entry: &PoolEntry) {
|
||||
|
@ -118,34 +109,20 @@ where
|
|||
debug!("added tx to reorg_cache: size now {}", cache.len());
|
||||
}
|
||||
|
||||
fn add_to_txpool(
|
||||
&mut self,
|
||||
entry: PoolEntry,
|
||||
header: &BlockHeader,
|
||||
) -> Result<PoolEntry, PoolError> {
|
||||
// First deaggregate the tx based on current txpool txs.
|
||||
let entry = if entry.tx.kernels().len() == 1 {
|
||||
entry
|
||||
} else {
|
||||
let tx = entry.tx.clone();
|
||||
let txs = self.txpool.find_matching_transactions(tx.kernels());
|
||||
// Deaggregate this tx against the txpool.
|
||||
// Returns the new deaggregated tx or the original tx if no deaggregation.
|
||||
fn deaggregate_tx(&self, entry: PoolEntry) -> Result<PoolEntry, PoolError> {
|
||||
if entry.tx.kernels().len() > 1 {
|
||||
let txs = self.txpool.find_matching_transactions(entry.tx.kernels());
|
||||
if !txs.is_empty() {
|
||||
let tx = transaction::deaggregate(tx, &txs)?;
|
||||
|
||||
// Validate this deaggregated tx "as tx", subject to regular tx weight limits.
|
||||
tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?;
|
||||
|
||||
PoolEntry::new(tx, TxSource::Deaggregate)
|
||||
} else {
|
||||
entry
|
||||
let tx = transaction::deaggregate(entry.tx, &txs)?;
|
||||
return Ok(PoolEntry::new(tx, TxSource::Deaggregate));
|
||||
}
|
||||
};
|
||||
|
||||
// Convert the deaggregated tx to v2 looking for unspent outputs in the txpool, and utxo.
|
||||
let src = entry.src;
|
||||
let tx_v2 = self.txpool.convert_tx_v2(entry.tx, None)?;
|
||||
let entry = PoolEntry::new(tx_v2, src);
|
||||
}
|
||||
Ok(entry)
|
||||
}
|
||||
|
||||
fn add_to_txpool(&mut self, entry: &PoolEntry, header: &BlockHeader) -> Result<(), PoolError> {
|
||||
self.txpool.add_to_pool(entry.clone(), None, header)?;
|
||||
|
||||
// We now need to reconcile the stempool based on the new state of the txpool.
|
||||
|
@ -153,8 +130,7 @@ where
|
|||
let txpool_agg = self.txpool.all_transactions_aggregate(None)?;
|
||||
self.stempool.reconcile(txpool_agg, header)?;
|
||||
|
||||
// If all is good return our pool entry with the deaggregated and converted tx.
|
||||
Ok(entry)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify the tx kernel variants and ensure they can all be accepted to the txpool/stempool
|
||||
|
@ -188,18 +164,26 @@ where
|
|||
// Our stempool is private and we do not want to reveal anything about the txs contained.
|
||||
// If this is a stem tx and is already present in stempool then fluff by adding to txpool.
|
||||
// Otherwise if already present in txpool return a "duplicate tx" error.
|
||||
if stem && self.stempool.contains_tx(tx.hash()) {
|
||||
if stem && self.stempool.contains_tx(&tx) {
|
||||
return self.add_to_pool(src, tx, false, header);
|
||||
} else if self.txpool.contains_tx(tx.hash()) {
|
||||
} else if self.txpool.contains_tx(&tx) {
|
||||
return Err(PoolError::DuplicateTx);
|
||||
}
|
||||
|
||||
// Attempt to deaggregate the tx if not stem tx.
|
||||
let entry = if stem {
|
||||
PoolEntry::new(tx, src)
|
||||
} else {
|
||||
self.deaggregate_tx(PoolEntry::new(tx, src))?
|
||||
};
|
||||
let ref tx = entry.tx;
|
||||
|
||||
// Check this tx is valid based on current header version.
|
||||
// NRD kernels only valid post HF3 and if NRD feature enabled.
|
||||
self.verify_kernel_variants(&tx, header)?;
|
||||
self.verify_kernel_variants(tx, header)?;
|
||||
|
||||
// Do we have the capacity to accept this transaction?
|
||||
let acceptability = self.is_acceptable(&tx, stem);
|
||||
let acceptability = self.is_acceptable(tx, stem);
|
||||
let mut evict = false;
|
||||
if !stem && acceptability.as_ref().err() == Some(&PoolError::OverCapacity) {
|
||||
evict = true;
|
||||
|
@ -213,23 +197,47 @@ where
|
|||
.map_err(PoolError::InvalidTx)?;
|
||||
|
||||
// Check the tx lock_time is valid based on current chain state.
|
||||
self.blockchain.verify_tx_lock_height(&tx)?;
|
||||
self.blockchain.verify_tx_lock_height(tx)?;
|
||||
|
||||
// If stem we want to account for the txpool.
|
||||
let extra_tx = if stem {
|
||||
self.txpool.all_transactions_aggregate(None)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Locate outputs being spent from pool and current utxo.
|
||||
let (spent_pool, spent_utxo) = if stem {
|
||||
self.stempool.locate_spends(tx, extra_tx.clone())
|
||||
} else {
|
||||
self.txpool.locate_spends(tx, None)
|
||||
}?;
|
||||
|
||||
// Check coinbase maturity before we go any further.
|
||||
self.blockchain.verify_coinbase_maturity(&tx)?;
|
||||
let coinbase_inputs: Vec<_> = spent_utxo
|
||||
.iter()
|
||||
.filter(|x| x.is_coinbase())
|
||||
.cloned()
|
||||
.collect();
|
||||
self.blockchain
|
||||
.verify_coinbase_maturity(&coinbase_inputs.as_slice().into())?;
|
||||
|
||||
// Convert the tx to "v2" compatibility with "features and commit" inputs.
|
||||
let ref entry = self.convert_tx_v2(entry, &spent_pool, &spent_utxo)?;
|
||||
|
||||
// If this is a stem tx then attempt to add it to stempool.
|
||||
// If the adapter fails to accept the new stem tx then fallback to fluff via txpool.
|
||||
if stem {
|
||||
let entry = self.add_to_stempool(PoolEntry::new(tx.clone(), src), header)?;
|
||||
if self.adapter.stem_tx_accepted(&entry).is_ok() {
|
||||
self.add_to_stempool(entry, header, extra_tx)?;
|
||||
if self.adapter.stem_tx_accepted(entry).is_ok() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let entry = self.add_to_txpool(PoolEntry::new(tx, src), header)?;
|
||||
self.add_to_reorg_cache(&entry);
|
||||
self.adapter.tx_accepted(&entry);
|
||||
// Add tx to txpool.
|
||||
self.add_to_txpool(entry, header)?;
|
||||
self.add_to_reorg_cache(entry);
|
||||
self.adapter.tx_accepted(entry);
|
||||
|
||||
// Transaction passed all the checks but we have to make space for it
|
||||
if evict {
|
||||
|
@ -239,6 +247,38 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert a transaction for v2 compatibility.
|
||||
/// We may receive a transaction with "commit only" inputs.
|
||||
/// We convert it to "features and commit" so we can safely relay it to v2 peers.
|
||||
/// Conversion is done using outputs previously looked up in both the pool and the current utxo.
|
||||
fn convert_tx_v2(
|
||||
&self,
|
||||
entry: PoolEntry,
|
||||
spent_pool: &[OutputIdentifier],
|
||||
spent_utxo: &[OutputIdentifier],
|
||||
) -> Result<PoolEntry, PoolError> {
|
||||
let tx = entry.tx;
|
||||
debug!(
|
||||
"convert_tx_v2: {} ({} -> v2)",
|
||||
tx.hash(),
|
||||
tx.inputs().version_str(),
|
||||
);
|
||||
|
||||
let mut inputs = spent_utxo.to_vec();
|
||||
inputs.extend_from_slice(spent_pool);
|
||||
inputs.sort_unstable();
|
||||
|
||||
let tx = Transaction {
|
||||
body: tx.body.replace_inputs(inputs.as_slice().into()),
|
||||
..tx
|
||||
};
|
||||
|
||||
// Validate the tx to ensure our converted inputs are correct.
|
||||
tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?;
|
||||
|
||||
Ok(PoolEntry::new(tx, entry.src))
|
||||
}
|
||||
|
||||
// Evict a transaction from the txpool.
|
||||
// Uses bucket logic to identify the "last" transaction.
|
||||
// No other tx depends on it and it has low fee_to_weight.
|
||||
|
@ -265,7 +305,7 @@ where
|
|||
header.hash(),
|
||||
);
|
||||
for entry in entries {
|
||||
let _ = self.add_to_txpool(entry, header);
|
||||
let _ = self.add_to_txpool(&entry, header);
|
||||
}
|
||||
debug!(
|
||||
"reconcile_reorg_cache: block: {:?} ... done.",
|
||||
|
|
|
@ -275,7 +275,7 @@ impl From<committed::Error> for PoolError {
|
|||
pub trait BlockChain: Sync + Send {
|
||||
/// Verify any coinbase outputs being spent
|
||||
/// have matured sufficiently.
|
||||
fn verify_coinbase_maturity(&self, tx: &transaction::Transaction) -> Result<(), PoolError>;
|
||||
fn verify_coinbase_maturity(&self, inputs: &Inputs) -> Result<(), PoolError>;
|
||||
|
||||
/// Verify any coinbase outputs being spent
|
||||
/// have matured sufficiently.
|
||||
|
@ -287,7 +287,7 @@ pub trait BlockChain: Sync + Send {
|
|||
/// Validate inputs against the current utxo.
|
||||
/// Returns the vec of output identifiers that would be spent
|
||||
/// by these inputs if they can all be successfully spent.
|
||||
fn validate_inputs(&self, inputs: Inputs) -> Result<Vec<OutputIdentifier>, PoolError>;
|
||||
fn validate_inputs(&self, inputs: &Inputs) -> Result<Vec<OutputIdentifier>, PoolError>;
|
||||
|
||||
fn chain_head(&self) -> Result<BlockHeader, PoolError>;
|
||||
|
||||
|
|
|
@ -141,10 +141,20 @@ fn test_transaction_pool_block_reconciliation() {
|
|||
pool.reconcile_block(&block).unwrap();
|
||||
|
||||
assert_eq!(pool.total_size(), 4);
|
||||
assert_eq!(pool.txpool.entries[0].tx, valid_transaction);
|
||||
assert_eq!(pool.txpool.entries[1].tx, pool_child);
|
||||
assert_eq!(pool.txpool.entries[2].tx, conflict_valid_child);
|
||||
assert_eq!(pool.txpool.entries[3].tx, valid_child_valid);
|
||||
// Compare the various txs by their kernels as entries in the pool are "v2" compatibility.
|
||||
assert_eq!(
|
||||
pool.txpool.entries[0].tx.kernels(),
|
||||
valid_transaction.kernels()
|
||||
);
|
||||
assert_eq!(pool.txpool.entries[1].tx.kernels(), pool_child.kernels());
|
||||
assert_eq!(
|
||||
pool.txpool.entries[2].tx.kernels(),
|
||||
conflict_valid_child.kernels()
|
||||
);
|
||||
assert_eq!(
|
||||
pool.txpool.entries[3].tx.kernels(),
|
||||
valid_child_valid.kernels()
|
||||
);
|
||||
|
||||
// Cleanup db directory
|
||||
clean_output_dir(db_root.into());
|
||||
|
|
|
@ -136,16 +136,16 @@ impl BlockChain for ChainAdapter {
|
|||
})
|
||||
}
|
||||
|
||||
fn validate_inputs(&self, inputs: Inputs) -> Result<Vec<OutputIdentifier>, PoolError> {
|
||||
fn validate_inputs(&self, inputs: &Inputs) -> Result<Vec<OutputIdentifier>, PoolError> {
|
||||
self.chain
|
||||
.validate_inputs(inputs)
|
||||
.map(|outputs| outputs.into_iter().map(|(out, _)| out).collect::<Vec<_>>())
|
||||
.map_err(|_| PoolError::Other("failed to validate inputs".into()))
|
||||
}
|
||||
|
||||
fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), PoolError> {
|
||||
fn verify_coinbase_maturity(&self, inputs: &Inputs) -> Result<(), PoolError> {
|
||||
self.chain
|
||||
.verify_coinbase_maturity(tx)
|
||||
.verify_coinbase_maturity(inputs)
|
||||
.map_err(|_| PoolError::ImmatureCoinbase)
|
||||
}
|
||||
|
||||
|
|
|
@ -87,6 +87,12 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
|
|||
aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap();
|
||||
kernel.verify().unwrap();
|
||||
|
||||
// Generate a 2nd NRD kernel sharing the same excess commitment but with different signature.
|
||||
let mut kernel2 = kernel.clone();
|
||||
kernel2.excess_sig =
|
||||
aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap();
|
||||
kernel2.verify().unwrap();
|
||||
|
||||
let tx1 = test_transaction_with_kernel(
|
||||
&keychain,
|
||||
vec![10, 20],
|
||||
|
@ -99,7 +105,7 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
|
|||
&keychain,
|
||||
vec![24],
|
||||
vec![18],
|
||||
kernel.clone(),
|
||||
kernel2.clone(),
|
||||
excess.clone(),
|
||||
);
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ use crate::core::core::transaction::Transaction;
|
|||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
use crate::core::core::{BlockHeader, BlockSums, CompactBlock, Inputs, OutputIdentifier};
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::core::ser::ProtocolVersion;
|
||||
use crate::core::{core, global};
|
||||
use crate::p2p;
|
||||
use crate::p2p::types::PeerInfo;
|
||||
|
@ -171,6 +172,12 @@ where
|
|||
// push the freshly hydrated block through the chain pipeline
|
||||
match core::Block::hydrate_from(cb, &[]) {
|
||||
Ok(block) => {
|
||||
debug!(
|
||||
"successfully hydrated (empty) block: {} at {} ({})",
|
||||
block.header.hash(),
|
||||
block.header.height,
|
||||
block.inputs().version_str(),
|
||||
);
|
||||
if !self.sync_state.is_syncing() {
|
||||
for hook in &self.hooks {
|
||||
hook.on_block_received(&block, &peer_info.addr);
|
||||
|
@ -231,7 +238,12 @@ where
|
|||
.validate(&prev.total_kernel_offset, self.verifier_cache.clone())
|
||||
.is_ok()
|
||||
{
|
||||
debug!("successfully hydrated block from tx pool!");
|
||||
debug!(
|
||||
"successfully hydrated block: {} at {} ({})",
|
||||
block.header.hash(),
|
||||
block.header.height,
|
||||
block.inputs().version_str(),
|
||||
);
|
||||
self.process_block(block, peer_info, chain::Options::NONE)
|
||||
} else if self.sync_state.status() == SyncStatus::NoSync {
|
||||
debug!("adapter: block invalid after hydration, requesting full block");
|
||||
|
@ -355,12 +367,15 @@ where
|
|||
}
|
||||
|
||||
/// Gets a full block by its hash.
|
||||
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
||||
let b = self.chain().get_block(&h);
|
||||
match b {
|
||||
Ok(b) => Some(b),
|
||||
_ => None,
|
||||
}
|
||||
/// Will convert to v2 compatibility based on peer protocol version.
|
||||
fn get_block(&self, h: Hash, peer_info: &PeerInfo) -> Option<core::Block> {
|
||||
self.chain()
|
||||
.get_block(&h)
|
||||
.map(|b| match peer_info.version.value() {
|
||||
0..=2 => Some(b),
|
||||
3..=ProtocolVersion::MAX => self.chain().convert_block_v2(b).ok(),
|
||||
})
|
||||
.unwrap_or(None)
|
||||
}
|
||||
|
||||
/// Provides a reading view into the current txhashset state as well as
|
||||
|
@ -938,16 +953,16 @@ impl pool::BlockChain for PoolToChainAdapter {
|
|||
.map_err(|_| pool::PoolError::Other("failed to validate tx".to_string()))
|
||||
}
|
||||
|
||||
fn validate_inputs(&self, inputs: Inputs) -> Result<Vec<OutputIdentifier>, pool::PoolError> {
|
||||
fn validate_inputs(&self, inputs: &Inputs) -> Result<Vec<OutputIdentifier>, pool::PoolError> {
|
||||
self.chain()
|
||||
.validate_inputs(inputs)
|
||||
.map(|outputs| outputs.into_iter().map(|(out, _)| out).collect::<Vec<_>>())
|
||||
.map_err(|_| pool::PoolError::Other("failed to validate tx".to_string()))
|
||||
}
|
||||
|
||||
fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
||||
fn verify_coinbase_maturity(&self, inputs: &Inputs) -> Result<(), pool::PoolError> {
|
||||
self.chain()
|
||||
.verify_coinbase_maturity(tx)
|
||||
.verify_coinbase_maturity(inputs)
|
||||
.map_err(|_| pool::PoolError::ImmatureCoinbase)
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
const DEFAULT_DB_VERSION: ProtocolVersion = ProtocolVersion(2);
|
||||
const DEFAULT_DB_VERSION: ProtocolVersion = ProtocolVersion(3);
|
||||
|
||||
/// LMDB-backed store facilitating data access and serialization. All writes
|
||||
/// are done through a Batch abstraction providing atomicity.
|
||||
|
@ -157,11 +157,16 @@ impl Store {
|
|||
env: self.env.clone(),
|
||||
db: self.db.clone(),
|
||||
name: self.name.clone(),
|
||||
version: version,
|
||||
version,
|
||||
alloc_chunk_size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Protocol version for the store.
|
||||
pub fn protocol_version(&self) -> ProtocolVersion {
|
||||
self.version
|
||||
}
|
||||
|
||||
/// Opens the database environment
|
||||
pub fn open(&self) -> Result<(), Error> {
|
||||
let mut w = self.db.write();
|
||||
|
@ -275,7 +280,7 @@ impl Store {
|
|||
) -> Result<Option<T>, Error> {
|
||||
let res: lmdb::error::Result<&[u8]> = access.get(&db, key);
|
||||
match res.to_opt() {
|
||||
Ok(Some(mut res)) => match ser::deserialize(&mut res, self.version) {
|
||||
Ok(Some(mut res)) => match ser::deserialize(&mut res, self.protocol_version()) {
|
||||
Ok(res) => Ok(Some(res)),
|
||||
Err(e) => Err(Error::SerErr(format!("{}", e))),
|
||||
},
|
||||
|
@ -310,7 +315,7 @@ impl Store {
|
|||
cursor,
|
||||
seek: false,
|
||||
prefix: from.to_vec(),
|
||||
version: self.version,
|
||||
version: self.protocol_version(),
|
||||
_marker: marker::PhantomData,
|
||||
})
|
||||
}
|
||||
|
@ -348,7 +353,12 @@ impl<'a> Batch<'a> {
|
|||
/// Writes a single key and its `Writeable` value to the db.
|
||||
/// Encapsulates serialization using the (default) version configured on the store instance.
|
||||
pub fn put_ser<W: ser::Writeable>(&self, key: &[u8], value: &W) -> Result<(), Error> {
|
||||
self.put_ser_with_version(key, value, self.store.version)
|
||||
self.put_ser_with_version(key, value, self.store.protocol_version())
|
||||
}
|
||||
|
||||
/// Protocol version used by this batch.
|
||||
pub fn protocol_version(&self) -> ProtocolVersion {
|
||||
self.store.protocol_version()
|
||||
}
|
||||
|
||||
/// Writes a single key and its `Writeable` value to the db.
|
||||
|
|
Loading…
Reference in a new issue