mirror of
https://github.com/mimblewimble/grin.git
synced 2025-04-23 02:41:14 +03:00
NRD rules and "recent" kernel pos index (#3302)
* variants for output_pos linked list entries (head/tail/middle/unique) next and prev and Vec<u8> lmdb keys * get_pos on enum * break list and list entries out into separate enums * track output features in the new output_pos index, so we can determine coinbase maturity * push entry impl for none and unique * some test coverage for output_pos_list * commit * wip - FooListEntry * use instance of the index * linked list of output_pos and commit_pos both now supported * linked_list * cleanup and rename * rename * peek_pos * push some, peek some, pop some * cleanup * commit pos cleanup * split list and entry out into separate db prefixes * cleanup and add placeholder for pop_back * pop_pos_back (for popping off the back of the linked list) test coverage for pop_pos_back * wip * placeholder for prune via a trait pos must always increase in the index * rewind kernel_pos_idx when calling rewind_single_block * RewindableListIndex with rewind support. * test coverage for rewindable list index * test coverage for rewind back to 0 * rewind past end of list * add tests for kernel_pos_idx with multiple commits * commit * cleanup * hook NRD relative lock height validation into block processing and tx validation * cleanup * set local chain type for kernel_idx tests * add test coverage for NRD rules in block processing * NRD test coverage and cleanup * NRD relative height 1 test * test coverage for NRD kernels in block processing * cleanup * start of test coverage for txpool NRD kernel rules * wip * rework pool tests to use real chain (was mock chain) to better reflect reality (tx/block validation rules etc.) * cleanup * cleanup pruneable trait for kernel pos index * add clear() to kernel_pos idx and test coverage * hook kernel_pos rebuild into node startup, compaction and fast sync * verify full NRD history on fast sync * return early if nrd disabled * fix header sync issue
This commit is contained in:
parent
b98e5e06a6
commit
20e5c1910b
22 changed files with 2209 additions and 66 deletions
1
Cargo.lock
generated
1
Cargo.lock
generated
|
@ -893,6 +893,7 @@ dependencies = [
|
|||
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"croaring-mw 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"enum_primitive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"failure 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"failure_derive 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
|
|
|
@ -26,7 +26,7 @@ use std::sync::{Arc, Weak};
|
|||
// boilerplate of dealing with `Weak`.
|
||||
pub fn w<T>(weak: &Weak<T>) -> Result<Arc<T>, Error> {
|
||||
weak.upgrade()
|
||||
.ok_or_else(|| ErrorKind::Internal("failed to upgrade weak refernce".to_owned()).into())
|
||||
.ok_or_else(|| ErrorKind::Internal("failed to upgrade weak reference".to_owned()).into())
|
||||
}
|
||||
|
||||
/// Internal function to retrieves an output by a given commitment
|
||||
|
|
|
@ -16,6 +16,7 @@ byteorder = "1"
|
|||
failure = "0.1"
|
||||
failure_derive = "0.1"
|
||||
croaring = { version = "0.4.5", package = "croaring-mw", features = ["compat"] }
|
||||
enum_primitive = "0.1"
|
||||
log = "0.4"
|
||||
serde = "1"
|
||||
serde_derive = "1"
|
||||
|
|
|
@ -19,7 +19,8 @@ use crate::core::core::hash::{Hash, Hashed, ZERO_HASH};
|
|||
use crate::core::core::merkle_proof::MerkleProof;
|
||||
use crate::core::core::verifier_cache::VerifierCache;
|
||||
use crate::core::core::{
|
||||
Block, BlockHeader, BlockSums, Committed, Output, OutputIdentifier, Transaction, TxKernel,
|
||||
Block, BlockHeader, BlockSums, Committed, KernelFeatures, Output, OutputIdentifier,
|
||||
Transaction, TxKernel,
|
||||
};
|
||||
use crate::core::global;
|
||||
use crate::core::pow;
|
||||
|
@ -195,12 +196,12 @@ impl Chain {
|
|||
&mut txhashset,
|
||||
)?;
|
||||
|
||||
// Initialize the output_pos index based on UTXO set.
|
||||
// This is fast as we only look for stale and missing entries
|
||||
// and do not need to rebuild the entire index.
|
||||
// Initialize the output_pos index based on UTXO set
|
||||
// and NRD kernel_pos index based recent kernel history.
|
||||
{
|
||||
let batch = store.batch()?;
|
||||
txhashset.init_output_pos_index(&header_pmmr, &batch)?;
|
||||
txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?;
|
||||
batch.commit()?;
|
||||
}
|
||||
|
||||
|
@ -296,6 +297,11 @@ impl Chain {
|
|||
/// Returns true if it has been added to the longest chain
|
||||
/// or false if it has added to a fork (or orphan?).
|
||||
fn process_block_single(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
|
||||
// Process the header first.
|
||||
// If invalid then fail early.
|
||||
// If valid then continue with block processing with header_head committed to db etc.
|
||||
self.process_block_header(&b.header, opts)?;
|
||||
|
||||
let (maybe_new_head, prev_head) = {
|
||||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut txhashset = self.txhashset.write();
|
||||
|
@ -513,13 +519,38 @@ impl Chain {
|
|||
})
|
||||
}
|
||||
|
||||
/// Validate the tx against the current UTXO set.
|
||||
/// Validate the tx against the current UTXO set and recent kernels (NRD relative lock heights).
|
||||
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
|
||||
self.validate_tx_against_utxo(tx)?;
|
||||
self.validate_tx_kernels(tx)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates NRD relative height locks against "recent" kernel history.
|
||||
/// Applies the kernels to the current kernel MMR in a readonly extension.
|
||||
/// The extension and the db batch are discarded.
|
||||
/// The batch ensures duplicate NRD kernels within the tx are handled correctly.
|
||||
fn validate_tx_kernels(&self, tx: &Transaction) -> Result<(), Error> {
|
||||
let has_nrd_kernel = tx.kernels().iter().any(|k| match k.features {
|
||||
KernelFeatures::NoRecentDuplicate { .. } => true,
|
||||
_ => false,
|
||||
});
|
||||
if !has_nrd_kernel {
|
||||
return Ok(());
|
||||
}
|
||||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut txhashset = self.txhashset.write();
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
|
||||
let height = self.next_block_height()?;
|
||||
ext.extension.apply_kernels(tx.kernels(), height, batch)
|
||||
})
|
||||
}
|
||||
|
||||
fn validate_tx_against_utxo(&self, tx: &Transaction) -> Result<(), Error> {
|
||||
let header_pmmr = self.header_pmmr.read();
|
||||
let txhashset = self.txhashset.read();
|
||||
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
|
||||
utxo.validate_tx(tx, batch)?;
|
||||
Ok(())
|
||||
utxo.validate_tx(tx, batch)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -929,8 +960,16 @@ impl Chain {
|
|||
Some(&header),
|
||||
)?;
|
||||
|
||||
// Validate the full kernel history (kernel MMR root for every block header).
|
||||
self.validate_kernel_history(&header, &txhashset)?;
|
||||
// Validate the full kernel history.
|
||||
// Check kernel MMR root for every block header.
|
||||
// Check NRD relative height rules for full kernel history.
|
||||
{
|
||||
self.validate_kernel_history(&header, &txhashset)?;
|
||||
|
||||
let header_pmmr = self.header_pmmr.read();
|
||||
let batch = self.store.batch()?;
|
||||
txhashset.verify_kernel_pos_index(&self.genesis, &header_pmmr, &batch)?;
|
||||
}
|
||||
|
||||
// all good, prepare a new batch and update all the required records
|
||||
debug!("txhashset_write: rewinding a 2nd time (writeable)");
|
||||
|
@ -979,6 +1018,9 @@ impl Chain {
|
|||
// Rebuild our output_pos index in the db based on fresh UTXO set.
|
||||
txhashset.init_output_pos_index(&header_pmmr, &batch)?;
|
||||
|
||||
// Rebuild our NRD kernel_pos index based on recent kernel history.
|
||||
txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?;
|
||||
|
||||
// Commit all the changes to the db.
|
||||
batch.commit()?;
|
||||
|
||||
|
@ -1115,6 +1157,9 @@ impl Chain {
|
|||
// Make sure our output_pos index is consistent with the UTXO set.
|
||||
txhashset.init_output_pos_index(&header_pmmr, &batch)?;
|
||||
|
||||
// Rebuild our NRD kernel_pos index based on recent kernel history.
|
||||
txhashset.init_recent_kernel_pos_index(&header_pmmr, &batch)?;
|
||||
|
||||
// Commit all the above db changes.
|
||||
batch.commit()?;
|
||||
|
||||
|
|
|
@ -122,6 +122,9 @@ pub enum ErrorKind {
|
|||
/// Tx not valid based on lock_height.
|
||||
#[fail(display = "Transaction Lock Height")]
|
||||
TxLockHeight,
|
||||
/// Tx is not valid due to NRD relative_height restriction.
|
||||
#[fail(display = "NRD Relative Height")]
|
||||
NRDRelativeHeight,
|
||||
/// No chain exists and genesis block is required
|
||||
#[fail(display = "Genesis Block Required")]
|
||||
GenesisBlockRequired,
|
||||
|
|
|
@ -23,6 +23,9 @@
|
|||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
|
||||
#[macro_use]
|
||||
extern crate enum_primitive;
|
||||
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
#[macro_use]
|
||||
|
@ -35,6 +38,7 @@ use grin_util as util;
|
|||
|
||||
mod chain;
|
||||
mod error;
|
||||
pub mod linked_list;
|
||||
pub mod pipe;
|
||||
pub mod store;
|
||||
pub mod txhashset;
|
||||
|
|
582
chain/src/linked_list.rs
Normal file
582
chain/src/linked_list.rs
Normal file
|
@ -0,0 +1,582 @@
|
|||
// Copyright 2020 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Implements "linked list" storage primitive for lmdb index supporting multiple entries.
|
||||
|
||||
use crate::core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use crate::store::Batch;
|
||||
use crate::types::CommitPos;
|
||||
use crate::util::secp::pedersen::Commitment;
|
||||
use enum_primitive::FromPrimitive;
|
||||
use grin_store as store;
|
||||
use std::marker::PhantomData;
|
||||
use store::{to_key, to_key_u64, Error};
|
||||
|
||||
enum_from_primitive! {
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
enum ListWrapperVariant {
|
||||
Single = 0,
|
||||
Multi = 1,
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for ListWrapperVariant {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
writer.write_u8(*self as u8)
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for ListWrapperVariant {
|
||||
fn read<R: Reader>(reader: &mut R) -> Result<ListWrapperVariant, ser::Error> {
|
||||
ListWrapperVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData)
|
||||
}
|
||||
}
|
||||
|
||||
enum_from_primitive! {
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
enum ListEntryVariant {
|
||||
// Start at 2 here to differentiate from ListWrapperVariant above.
|
||||
Head = 2,
|
||||
Tail = 3,
|
||||
Middle = 4,
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for ListEntryVariant {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
writer.write_u8(*self as u8)
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for ListEntryVariant {
|
||||
fn read<R: Reader>(reader: &mut R) -> Result<ListEntryVariant, ser::Error> {
|
||||
ListEntryVariant::from_u8(reader.read_u8()?).ok_or(ser::Error::CorruptedData)
|
||||
}
|
||||
}
|
||||
|
||||
/// Index supporting a list of (duplicate) entries per commitment.
|
||||
/// Each entry will be at a unique MMR pos.
|
||||
pub trait ListIndex {
|
||||
/// List type
|
||||
type List: Readable + Writeable;
|
||||
|
||||
/// List entry type
|
||||
type Entry: ListIndexEntry;
|
||||
|
||||
/// Construct a key for the list.
|
||||
fn list_key(&self, commit: Commitment) -> Vec<u8>;
|
||||
|
||||
/// Construct a key for an individual entry in the list.
|
||||
fn entry_key(&self, commit: Commitment, pos: u64) -> Vec<u8>;
|
||||
|
||||
/// Returns either a "Single" with embedded "pos" or a "list" with "head" and "tail".
|
||||
/// Key is "prefix|commit".
|
||||
/// Note the key for an individual entry in the list is "prefix|commit|pos".
|
||||
fn get_list(&self, batch: &Batch<'_>, commit: Commitment) -> Result<Option<Self::List>, Error> {
|
||||
batch.db.get_ser(&self.list_key(commit))
|
||||
}
|
||||
|
||||
/// Returns one of "head", "tail" or "middle" entry variants.
|
||||
/// Key is "prefix|commit|pos".
|
||||
fn get_entry(
|
||||
&self,
|
||||
batch: &Batch<'_>,
|
||||
commit: Commitment,
|
||||
pos: u64,
|
||||
) -> Result<Option<Self::Entry>, Error> {
|
||||
batch.db.get_ser(&self.entry_key(commit, pos))
|
||||
}
|
||||
|
||||
/// Peek the head of the list for the specified commitment.
|
||||
fn peek_pos(
|
||||
&self,
|
||||
batch: &Batch<'_>,
|
||||
commit: Commitment,
|
||||
) -> Result<Option<<Self::Entry as ListIndexEntry>::Pos>, Error>;
|
||||
|
||||
/// Push a pos onto the list for the specified commitment.
|
||||
fn push_pos(
|
||||
&self,
|
||||
batch: &Batch<'_>,
|
||||
commit: Commitment,
|
||||
new_pos: <Self::Entry as ListIndexEntry>::Pos,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Pop a pos off the list for the specified commitment.
|
||||
fn pop_pos(
|
||||
&self,
|
||||
batch: &Batch<'_>,
|
||||
commit: Commitment,
|
||||
) -> Result<Option<<Self::Entry as ListIndexEntry>::Pos>, Error>;
|
||||
}
|
||||
|
||||
/// Supports "rewind" given the provided commit and a pos to rewind back to.
|
||||
pub trait RewindableListIndex {
|
||||
/// Rewind the index for the given commitment to the specified position.
|
||||
fn rewind(&self, batch: &Batch<'_>, commit: Commitment, rewind_pos: u64) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// A pruneable list index supports pruning of old data from the index lists.
|
||||
/// This allows us to efficiently maintain an index of "recent" kernel data.
|
||||
/// We can maintain a window of 2 weeks of recent data, discarding anything older than this.
|
||||
pub trait PruneableListIndex: ListIndex {
|
||||
/// Clear all data from the index.
|
||||
/// Used when rebuilding the index.
|
||||
fn clear(&self, batch: &Batch<'_>) -> Result<(), Error>;
|
||||
|
||||
/// Prune old data.
|
||||
fn prune(&self, batch: &Batch<'_>, commit: Commitment, cutoff_pos: u64) -> Result<(), Error>;
|
||||
|
||||
/// Pop a pos off the back of the list (used for pruning old data).
|
||||
fn pop_pos_back(
|
||||
&self,
|
||||
batch: &Batch<'_>,
|
||||
commit: Commitment,
|
||||
) -> Result<Option<<Self::Entry as ListIndexEntry>::Pos>, Error>;
|
||||
}
|
||||
|
||||
/// Wrapper for the list to handle either `Single` or `Multi` entries.
|
||||
/// Optimized for the common case where we have a single entry in the list.
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
pub enum ListWrapper<T> {
|
||||
/// List with a single entry.
|
||||
/// Allows direct access to the pos.
|
||||
Single {
|
||||
/// The MMR pos where this single entry is located.
|
||||
pos: T,
|
||||
},
|
||||
/// List with multiple entries.
|
||||
/// Maintains head and tail of the underlying linked list.
|
||||
Multi {
|
||||
/// Head of the linked list.
|
||||
head: u64,
|
||||
/// Tail of the linked list.
|
||||
tail: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl<T> Writeable for ListWrapper<T>
|
||||
where
|
||||
T: Writeable,
|
||||
{
|
||||
/// Write first byte representing the variant, followed by variant specific data.
|
||||
/// "Single" is optimized with embedded "pos".
|
||||
/// "Multi" has references to "head" and "tail".
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
match self {
|
||||
ListWrapper::Single { pos } => {
|
||||
ListWrapperVariant::Single.write(writer)?;
|
||||
pos.write(writer)?;
|
||||
}
|
||||
ListWrapper::Multi { head, tail } => {
|
||||
ListWrapperVariant::Multi.write(writer)?;
|
||||
writer.write_u64(*head)?;
|
||||
writer.write_u64(*tail)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Readable for ListWrapper<T>
|
||||
where
|
||||
T: Readable,
|
||||
{
|
||||
/// Read the first byte to determine what needs to be read beyond that.
|
||||
fn read<R: Reader>(reader: &mut R) -> Result<ListWrapper<T>, ser::Error> {
|
||||
let entry = match ListWrapperVariant::read(reader)? {
|
||||
ListWrapperVariant::Single => ListWrapper::Single {
|
||||
pos: T::read(reader)?,
|
||||
},
|
||||
ListWrapperVariant::Multi => ListWrapper::Multi {
|
||||
head: reader.read_u64()?,
|
||||
tail: reader.read_u64()?,
|
||||
},
|
||||
};
|
||||
Ok(entry)
|
||||
}
|
||||
}
|
||||
|
||||
/// Index supporting multiple duplicate entries.
|
||||
pub struct MultiIndex<T> {
|
||||
phantom: PhantomData<*const T>,
|
||||
list_prefix: u8,
|
||||
entry_prefix: u8,
|
||||
}
|
||||
|
||||
impl<T> MultiIndex<T> {
|
||||
/// Initialize a new multi index with the specified list and entry prefixes.
|
||||
pub fn init(list_prefix: u8, entry_prefix: u8) -> MultiIndex<T> {
|
||||
MultiIndex {
|
||||
phantom: PhantomData,
|
||||
list_prefix,
|
||||
entry_prefix,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ListIndex for MultiIndex<T>
|
||||
where
|
||||
T: PosEntry,
|
||||
{
|
||||
type List = ListWrapper<T>;
|
||||
type Entry = ListEntry<T>;
|
||||
|
||||
fn list_key(&self, commit: Commitment) -> Vec<u8> {
|
||||
to_key(self.list_prefix, &mut commit.as_ref().to_vec())
|
||||
}
|
||||
|
||||
fn entry_key(&self, commit: Commitment, pos: u64) -> Vec<u8> {
|
||||
to_key_u64(self.entry_prefix, &mut commit.as_ref().to_vec(), pos)
|
||||
}
|
||||
|
||||
fn peek_pos(&self, batch: &Batch<'_>, commit: Commitment) -> Result<Option<T>, Error> {
|
||||
match self.get_list(batch, commit)? {
|
||||
None => Ok(None),
|
||||
Some(ListWrapper::Single { pos }) => Ok(Some(pos)),
|
||||
Some(ListWrapper::Multi { head, .. }) => {
|
||||
if let Some(ListEntry::Head { pos, .. }) = self.get_entry(batch, commit, head)? {
|
||||
Ok(Some(pos))
|
||||
} else {
|
||||
Err(Error::OtherErr("expected head to be head variant".into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn push_pos(&self, batch: &Batch<'_>, commit: Commitment, new_pos: T) -> Result<(), Error> {
|
||||
match self.get_list(batch, commit)? {
|
||||
None => {
|
||||
let list = ListWrapper::Single { pos: new_pos };
|
||||
batch.db.put_ser(&self.list_key(commit), &list)?;
|
||||
}
|
||||
Some(ListWrapper::Single { pos: current_pos }) => {
|
||||
if new_pos.pos() <= current_pos.pos() {
|
||||
return Err(Error::OtherErr("pos must be increasing".into()));
|
||||
}
|
||||
|
||||
let head = ListEntry::Head {
|
||||
pos: new_pos,
|
||||
next: current_pos.pos(),
|
||||
};
|
||||
let tail = ListEntry::Tail {
|
||||
pos: current_pos,
|
||||
prev: new_pos.pos(),
|
||||
};
|
||||
let list: ListWrapper<T> = ListWrapper::Multi {
|
||||
head: new_pos.pos(),
|
||||
tail: current_pos.pos(),
|
||||
};
|
||||
batch
|
||||
.db
|
||||
.put_ser(&self.entry_key(commit, new_pos.pos()), &head)?;
|
||||
batch
|
||||
.db
|
||||
.put_ser(&self.entry_key(commit, current_pos.pos()), &tail)?;
|
||||
batch.db.put_ser(&self.list_key(commit), &list)?;
|
||||
}
|
||||
Some(ListWrapper::Multi { head, tail }) => {
|
||||
if new_pos.pos() <= head {
|
||||
return Err(Error::OtherErr("pos must be increasing".into()));
|
||||
}
|
||||
|
||||
if let Some(ListEntry::Head {
|
||||
pos: current_pos,
|
||||
next: current_next,
|
||||
}) = self.get_entry(batch, commit, head)?
|
||||
{
|
||||
let head = ListEntry::Head {
|
||||
pos: new_pos,
|
||||
next: current_pos.pos(),
|
||||
};
|
||||
let middle = ListEntry::Middle {
|
||||
pos: current_pos,
|
||||
next: current_next,
|
||||
prev: new_pos.pos(),
|
||||
};
|
||||
let list: ListWrapper<T> = ListWrapper::Multi {
|
||||
head: new_pos.pos(),
|
||||
tail,
|
||||
};
|
||||
batch
|
||||
.db
|
||||
.put_ser(&self.entry_key(commit, new_pos.pos()), &head)?;
|
||||
batch
|
||||
.db
|
||||
.put_ser(&self.entry_key(commit, current_pos.pos()), &middle)?;
|
||||
batch.db.put_ser(&self.list_key(commit), &list)?;
|
||||
} else {
|
||||
return Err(Error::OtherErr("expected head to be head variant".into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Pop the head of the list.
|
||||
/// Returns the output_pos.
|
||||
/// Returns None if list was empty.
|
||||
fn pop_pos(&self, batch: &Batch<'_>, commit: Commitment) -> Result<Option<T>, Error> {
|
||||
match self.get_list(batch, commit)? {
|
||||
None => Ok(None),
|
||||
Some(ListWrapper::Single { pos }) => {
|
||||
batch.delete(&self.list_key(commit))?;
|
||||
Ok(Some(pos))
|
||||
}
|
||||
Some(ListWrapper::Multi { head, tail }) => {
|
||||
if let Some(ListEntry::Head {
|
||||
pos: current_pos,
|
||||
next: current_next,
|
||||
}) = self.get_entry(batch, commit, head)?
|
||||
{
|
||||
match self.get_entry(batch, commit, current_next)? {
|
||||
Some(ListEntry::Middle { pos, next, .. }) => {
|
||||
let head = ListEntry::Head { pos, next };
|
||||
let list: ListWrapper<T> = ListWrapper::Multi {
|
||||
head: pos.pos(),
|
||||
tail,
|
||||
};
|
||||
batch.delete(&self.entry_key(commit, current_pos.pos()))?;
|
||||
batch
|
||||
.db
|
||||
.put_ser(&self.entry_key(commit, pos.pos()), &head)?;
|
||||
batch.db.put_ser(&self.list_key(commit), &list)?;
|
||||
Ok(Some(current_pos))
|
||||
}
|
||||
Some(ListEntry::Tail { pos, .. }) => {
|
||||
let list = ListWrapper::Single { pos };
|
||||
batch.delete(&self.entry_key(commit, current_pos.pos()))?;
|
||||
batch.db.put_ser(&self.list_key(commit), &list)?;
|
||||
Ok(Some(current_pos))
|
||||
}
|
||||
Some(_) => Err(Error::OtherErr("next was unexpected".into())),
|
||||
None => Err(Error::OtherErr("next missing".into())),
|
||||
}
|
||||
} else {
|
||||
Err(Error::OtherErr("expected head to be head variant".into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// List index that supports rewind.
|
||||
impl<T: PosEntry> RewindableListIndex for MultiIndex<T> {
|
||||
fn rewind(&self, batch: &Batch<'_>, commit: Commitment, rewind_pos: u64) -> Result<(), Error> {
|
||||
while self
|
||||
.peek_pos(batch, commit)?
|
||||
.map(|x| x.pos() > rewind_pos)
|
||||
.unwrap_or(false)
|
||||
{
|
||||
self.pop_pos(batch, commit)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PosEntry> PruneableListIndex for MultiIndex<T> {
|
||||
fn clear(&self, batch: &Batch<'_>) -> Result<(), Error> {
|
||||
let mut list_count = 0;
|
||||
let mut entry_count = 0;
|
||||
let prefix = to_key(self.list_prefix, "");
|
||||
for (key, _) in batch.db.iter::<ListWrapper<T>>(&prefix)? {
|
||||
let _ = batch.delete(&key);
|
||||
list_count += 1;
|
||||
}
|
||||
let prefix = to_key(self.entry_prefix, "");
|
||||
for (key, _) in batch.db.iter::<ListEntry<T>>(&prefix)? {
|
||||
let _ = batch.delete(&key);
|
||||
entry_count += 1;
|
||||
}
|
||||
debug!(
|
||||
"clear: lists deleted: {}, entries deleted: {}",
|
||||
list_count, entry_count
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Pruning will be more performant than full rebuild but not yet necessary.
|
||||
fn prune(
|
||||
&self,
|
||||
_batch: &Batch<'_>,
|
||||
_commit: Commitment,
|
||||
_cutoff_pos: u64,
|
||||
) -> Result<(), Error> {
|
||||
unimplemented!(
|
||||
"we currently rebuild index on startup/compaction, pruning not yet implemented"
|
||||
);
|
||||
}
|
||||
|
||||
/// Pop off the back/tail of the linked list.
|
||||
/// Used when pruning old data.
|
||||
fn pop_pos_back(&self, batch: &Batch<'_>, commit: Commitment) -> Result<Option<T>, Error> {
|
||||
match self.get_list(batch, commit)? {
|
||||
None => Ok(None),
|
||||
Some(ListWrapper::Single { pos }) => {
|
||||
batch.delete(&self.list_key(commit))?;
|
||||
Ok(Some(pos))
|
||||
}
|
||||
Some(ListWrapper::Multi { head, tail }) => {
|
||||
if let Some(ListEntry::Tail {
|
||||
pos: current_pos,
|
||||
prev: current_prev,
|
||||
}) = self.get_entry(batch, commit, tail)?
|
||||
{
|
||||
match self.get_entry(batch, commit, current_prev)? {
|
||||
Some(ListEntry::Middle { pos, prev, .. }) => {
|
||||
let tail = ListEntry::Tail { pos, prev };
|
||||
let list: ListWrapper<T> = ListWrapper::Multi {
|
||||
head,
|
||||
tail: pos.pos(),
|
||||
};
|
||||
batch.delete(&self.entry_key(commit, current_pos.pos()))?;
|
||||
batch
|
||||
.db
|
||||
.put_ser(&self.entry_key(commit, pos.pos()), &tail)?;
|
||||
batch.db.put_ser(&self.list_key(commit), &list)?;
|
||||
Ok(Some(current_pos))
|
||||
}
|
||||
Some(ListEntry::Head { pos, .. }) => {
|
||||
let list = ListWrapper::Single { pos };
|
||||
batch.delete(&self.entry_key(commit, current_pos.pos()))?;
|
||||
batch.db.put_ser(&self.list_key(commit), &list)?;
|
||||
Ok(Some(current_pos))
|
||||
}
|
||||
Some(_) => Err(Error::OtherErr("prev was unexpected".into())),
|
||||
None => Err(Error::OtherErr("prev missing".into())),
|
||||
}
|
||||
} else {
|
||||
Err(Error::OtherErr("expected tail to be tail variant".into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Something that tracks pos (in an MMR).
|
||||
pub trait PosEntry: Readable + Writeable + Copy {
|
||||
/// Accessor for the underlying (MMR) pos.
|
||||
fn pos(&self) -> u64;
|
||||
}
|
||||
|
||||
impl PosEntry for CommitPos {
|
||||
fn pos(&self) -> u64 {
|
||||
self.pos
|
||||
}
|
||||
}
|
||||
|
||||
/// Entry maintained in the list index.
|
||||
pub trait ListIndexEntry: Readable + Writeable {
|
||||
/// Type of the underlying pos indexed in the list.
|
||||
type Pos: PosEntry;
|
||||
|
||||
/// Accessor for the underlying pos.
|
||||
fn get_pos(&self) -> Self::Pos;
|
||||
}
|
||||
|
||||
impl<T> ListIndexEntry for ListEntry<T>
|
||||
where
|
||||
T: PosEntry,
|
||||
{
|
||||
type Pos = T;
|
||||
|
||||
/// Read the common pos from the various enum variants.
|
||||
fn get_pos(&self) -> Self::Pos {
|
||||
match self {
|
||||
Self::Head { pos, .. } => *pos,
|
||||
Self::Tail { pos, .. } => *pos,
|
||||
Self::Middle { pos, .. } => *pos,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Head|Middle|Tail variants for the linked list entries.
|
||||
pub enum ListEntry<T> {
|
||||
/// Head of ther list.
|
||||
Head {
|
||||
/// The thing in the list.
|
||||
pos: T,
|
||||
/// The next entry in the list.
|
||||
next: u64,
|
||||
},
|
||||
/// Tail of the list.
|
||||
Tail {
|
||||
/// The thing in the list.
|
||||
pos: T,
|
||||
/// The previous entry in the list.
|
||||
prev: u64,
|
||||
},
|
||||
/// An entry in the middle of the list.
|
||||
Middle {
|
||||
/// The thing in the list.
|
||||
pos: T,
|
||||
/// The next entry in the list.
|
||||
next: u64,
|
||||
/// The previous entry in the list.
|
||||
prev: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl<T> Writeable for ListEntry<T>
|
||||
where
|
||||
T: Writeable,
|
||||
{
|
||||
/// Write first byte representing the variant, followed by variant specific data.
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
match self {
|
||||
ListEntry::Head { pos, next } => {
|
||||
ListEntryVariant::Head.write(writer)?;
|
||||
pos.write(writer)?;
|
||||
writer.write_u64(*next)?;
|
||||
}
|
||||
ListEntry::Tail { pos, prev } => {
|
||||
ListEntryVariant::Tail.write(writer)?;
|
||||
pos.write(writer)?;
|
||||
writer.write_u64(*prev)?;
|
||||
}
|
||||
ListEntry::Middle { pos, next, prev } => {
|
||||
ListEntryVariant::Middle.write(writer)?;
|
||||
pos.write(writer)?;
|
||||
writer.write_u64(*next)?;
|
||||
writer.write_u64(*prev)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Readable for ListEntry<T>
|
||||
where
|
||||
T: Readable,
|
||||
{
|
||||
/// Read the first byte to determine what needs to be read beyond that.
|
||||
fn read<R: Reader>(reader: &mut R) -> Result<ListEntry<T>, ser::Error> {
|
||||
let entry = match ListEntryVariant::read(reader)? {
|
||||
ListEntryVariant::Head => ListEntry::Head {
|
||||
pos: T::read(reader)?,
|
||||
next: reader.read_u64()?,
|
||||
},
|
||||
ListEntryVariant::Tail => ListEntry::Tail {
|
||||
pos: T::read(reader)?,
|
||||
prev: reader.read_u64()?,
|
||||
},
|
||||
ListEntryVariant::Middle => ListEntry::Middle {
|
||||
pos: T::read(reader)?,
|
||||
next: reader.read_u64()?,
|
||||
prev: reader.read_u64()?,
|
||||
},
|
||||
};
|
||||
Ok(entry)
|
||||
}
|
||||
}
|
|
@ -227,9 +227,6 @@ pub fn sync_block_headers(
|
|||
/// Note: In contrast to processing a full block we treat "already known" as success
|
||||
/// to allow processing to continue (for header itself).
|
||||
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
||||
// Check this header is not an orphan, we must know about the previous header to continue.
|
||||
let prev_header = ctx.batch.get_previous_header(&header)?;
|
||||
|
||||
// If we have already processed the full block for this header then done.
|
||||
// Note: "already known" in this context is success so subsequent processing can continue.
|
||||
{
|
||||
|
@ -239,6 +236,9 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) ->
|
|||
}
|
||||
}
|
||||
|
||||
// Check this header is not an orphan, we must know about the previous header to continue.
|
||||
let prev_header = ctx.batch.get_previous_header(&header)?;
|
||||
|
||||
// If we have not yet seen the full block then check if we have seen this header.
|
||||
// If it does not increase total_difficulty beyond our current header_head
|
||||
// then we can (re)accept this header and process the full block (or request it).
|
||||
|
|
|
@ -19,6 +19,7 @@ use crate::core::core::hash::{Hash, Hashed};
|
|||
use crate::core::core::{Block, BlockHeader, BlockSums};
|
||||
use crate::core::pow::Difficulty;
|
||||
use crate::core::ser::ProtocolVersion;
|
||||
use crate::linked_list::MultiIndex;
|
||||
use crate::types::{CommitPos, Tip};
|
||||
use crate::util::secp::pedersen::Commitment;
|
||||
use croaring::Bitmap;
|
||||
|
@ -35,6 +36,12 @@ const HEAD_PREFIX: u8 = b'H';
|
|||
const TAIL_PREFIX: u8 = b'T';
|
||||
const HEADER_HEAD_PREFIX: u8 = b'G';
|
||||
const OUTPUT_POS_PREFIX: u8 = b'p';
|
||||
|
||||
/// Prefix for NRD kernel pos index lists.
|
||||
pub const NRD_KERNEL_LIST_PREFIX: u8 = b'K';
|
||||
/// Prefix for NRD kernel pos index entries.
|
||||
pub const NRD_KERNEL_ENTRY_PREFIX: u8 = b'k';
|
||||
|
||||
const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B';
|
||||
const BLOCK_SUMS_PREFIX: u8 = b'M';
|
||||
const BLOCK_SPENT_PREFIX: u8 = b'S';
|
||||
|
@ -61,9 +68,7 @@ impl ChainStore {
|
|||
db: db_with_version,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ChainStore {
|
||||
/// The current chain head.
|
||||
pub fn head(&self) -> Result<Tip, Error> {
|
||||
option_to_not_found(self.db.get_ser(&[HEAD_PREFIX]), || "HEAD".to_owned())
|
||||
|
@ -144,7 +149,8 @@ impl ChainStore {
|
|||
/// An atomic batch in which all changes can be committed all at once or
|
||||
/// discarded on error.
|
||||
pub struct Batch<'a> {
|
||||
db: store::Batch<'a>,
|
||||
/// The underlying db instance.
|
||||
pub db: store::Batch<'a>,
|
||||
}
|
||||
|
||||
impl<'a> Batch<'a> {
|
||||
|
@ -475,3 +481,10 @@ impl<'a> Iterator for DifficultyIter<'a> {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Init the NRD "recent history" kernel index backed by the underlying db.
|
||||
/// List index supports multiple entries per key, maintaining insertion order.
|
||||
/// Allows for fast lookup of the most recent entry per excess commitment.
|
||||
pub fn nrd_recent_kernel_index() -> MultiIndex<CommitPos> {
|
||||
MultiIndex::init(NRD_KERNEL_LIST_PREFIX, NRD_KERNEL_ENTRY_PREFIX)
|
||||
}
|
||||
|
|
|
@ -15,14 +15,19 @@
|
|||
//! Utility structs to handle the 3 MMRs (output, rangeproof,
|
||||
//! kernel) along the overall header MMR conveniently and transactionally.
|
||||
|
||||
use crate::core::consensus::WEEK_HEIGHT;
|
||||
use crate::core::core::committed::Committed;
|
||||
use crate::core::core::hash::{Hash, Hashed};
|
||||
use crate::core::core::merkle_proof::MerkleProof;
|
||||
use crate::core::core::pmmr::{self, Backend, ReadonlyPMMR, RewindablePMMR, PMMR};
|
||||
use crate::core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, TxKernel};
|
||||
use crate::core::core::{
|
||||
Block, BlockHeader, Input, KernelFeatures, Output, OutputIdentifier, TxKernel,
|
||||
};
|
||||
use crate::core::global;
|
||||
use crate::core::ser::{PMMRable, ProtocolVersion};
|
||||
use crate::error::{Error, ErrorKind};
|
||||
use crate::store::{Batch, ChainStore};
|
||||
use crate::linked_list::{ListIndex, PruneableListIndex, RewindableListIndex};
|
||||
use crate::store::{self, Batch, ChainStore};
|
||||
use crate::txhashset::bitmap_accumulator::BitmapAccumulator;
|
||||
use crate::txhashset::{RewindableKernelView, UTXOView};
|
||||
use crate::types::{CommitPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus};
|
||||
|
@ -375,6 +380,86 @@ impl TxHashSet {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// (Re)build the NRD kernel_pos index based on 2 weeks of recent kernel history.
|
||||
pub fn init_recent_kernel_pos_index(
|
||||
&self,
|
||||
header_pmmr: &PMMRHandle<BlockHeader>,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
let head = batch.head()?;
|
||||
let cutoff = head.height.saturating_sub(WEEK_HEIGHT * 2);
|
||||
let cutoff_hash = header_pmmr.get_header_hash_by_height(cutoff)?;
|
||||
let cutoff_header = batch.get_block_header(&cutoff_hash)?;
|
||||
self.verify_kernel_pos_index(&cutoff_header, header_pmmr, batch)
|
||||
}
|
||||
|
||||
/// Verify and (re)build the NRD kernel_pos index from the provided header onwards.
|
||||
pub fn verify_kernel_pos_index(
|
||||
&self,
|
||||
from_header: &BlockHeader,
|
||||
header_pmmr: &PMMRHandle<BlockHeader>,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
if !global::is_nrd_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let kernel_index = store::nrd_recent_kernel_index();
|
||||
kernel_index.clear(batch)?;
|
||||
|
||||
let prev_size = if from_header.height == 0 {
|
||||
0
|
||||
} else {
|
||||
let prev_header = batch.get_previous_header(&from_header)?;
|
||||
prev_header.kernel_mmr_size
|
||||
};
|
||||
|
||||
debug!(
|
||||
"verify_kernel_pos_index: header: {} at {}, prev kernel_mmr_size: {}",
|
||||
from_header.hash(),
|
||||
from_header.height,
|
||||
prev_size,
|
||||
);
|
||||
|
||||
let kernel_pmmr =
|
||||
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
||||
|
||||
let mut current_pos = prev_size + 1;
|
||||
let mut current_header = from_header.clone();
|
||||
let mut count = 0;
|
||||
while current_pos <= self.kernel_pmmr_h.last_pos {
|
||||
if pmmr::is_leaf(current_pos) {
|
||||
if let Some(kernel) = kernel_pmmr.get_data(current_pos) {
|
||||
match kernel.features {
|
||||
KernelFeatures::NoRecentDuplicate { .. } => {
|
||||
while current_pos > current_header.kernel_mmr_size {
|
||||
let hash = header_pmmr
|
||||
.get_header_hash_by_height(current_header.height + 1)?;
|
||||
current_header = batch.get_block_header(&hash)?;
|
||||
}
|
||||
let new_pos = CommitPos {
|
||||
pos: current_pos,
|
||||
height: current_header.height,
|
||||
};
|
||||
apply_kernel_rules(&kernel, new_pos, batch)?;
|
||||
count += 1;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
current_pos += 1;
|
||||
}
|
||||
|
||||
debug!(
|
||||
"verify_kernel_pos_index: pushed {} entries to the index, took {}s",
|
||||
count,
|
||||
now.elapsed().as_secs(),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// (Re)build the output_pos index to be consistent with the current UTXO set.
|
||||
/// Remove any "stale" index entries that do not correspond to outputs in the UTXO set.
|
||||
/// Add any missing index entries based on UTXO set.
|
||||
|
@ -453,7 +538,7 @@ impl TxHashSet {
|
|||
}
|
||||
}
|
||||
debug!(
|
||||
"init_height_pos_index: added entries for {} utxos, took {}s",
|
||||
"init_output_pos_index: added entries for {} utxos, took {}s",
|
||||
total_outputs,
|
||||
now.elapsed().as_secs(),
|
||||
);
|
||||
|
@ -933,16 +1018,16 @@ impl<'a> Extension<'a> {
|
|||
// Remove the spent output from the output_pos index.
|
||||
let mut spent = vec![];
|
||||
for input in b.inputs() {
|
||||
let spent_pos = self.apply_input(input, batch)?;
|
||||
affected_pos.push(spent_pos.pos);
|
||||
let pos = self.apply_input(input, batch)?;
|
||||
affected_pos.push(pos.pos);
|
||||
batch.delete_output_pos_height(&input.commitment())?;
|
||||
spent.push(spent_pos);
|
||||
spent.push(pos);
|
||||
}
|
||||
batch.save_spent_index(&b.hash(), &spent)?;
|
||||
|
||||
for kernel in b.kernels() {
|
||||
self.apply_kernel(kernel)?;
|
||||
}
|
||||
// Apply the kernels to the kernel MMR.
|
||||
// Note: This validates and NRD relative height locks via the "recent" kernel index.
|
||||
self.apply_kernels(b.kernels(), b.header.height, batch)?;
|
||||
|
||||
// Update our BitmapAccumulator based on affected outputs (both spent and created).
|
||||
self.apply_to_bitmap_accumulator(&affected_pos)?;
|
||||
|
@ -1037,12 +1122,32 @@ impl<'a> Extension<'a> {
|
|||
Ok(output_pos)
|
||||
}
|
||||
|
||||
/// Apply kernels to the kernel MMR.
|
||||
/// Validate any NRD relative height locks via the "recent" kernel index.
|
||||
/// Note: This is used for both block processing and tx validation.
|
||||
/// In the block processing case we use the block height.
|
||||
/// In the tx validation case we use the "next" block height based on current chain head.
|
||||
pub fn apply_kernels(
|
||||
&mut self,
|
||||
kernels: &[TxKernel],
|
||||
height: u64,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
for kernel in kernels {
|
||||
let pos = self.apply_kernel(kernel)?;
|
||||
let commit_pos = CommitPos { pos, height };
|
||||
apply_kernel_rules(kernel, commit_pos, batch)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Push kernel onto MMR (hash and data files).
|
||||
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> {
|
||||
self.kernel_pmmr
|
||||
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<u64, Error> {
|
||||
let pos = self
|
||||
.kernel_pmmr
|
||||
.push(kernel)
|
||||
.map_err(&ErrorKind::TxHashSetErr)?;
|
||||
Ok(())
|
||||
Ok(pos)
|
||||
}
|
||||
|
||||
/// Build a Merkle proof for the given output and the block
|
||||
|
@ -1109,7 +1214,8 @@ impl<'a> Extension<'a> {
|
|||
let mut affected_pos = vec![];
|
||||
let mut current = head_header;
|
||||
while header.height < current.height {
|
||||
let mut affected_pos_single_block = self.rewind_single_block(¤t, batch)?;
|
||||
let block = batch.get_block(¤t.hash())?;
|
||||
let mut affected_pos_single_block = self.rewind_single_block(&block, batch)?;
|
||||
affected_pos.append(&mut affected_pos_single_block);
|
||||
current = batch.get_previous_header(¤t)?;
|
||||
}
|
||||
|
@ -1126,11 +1232,10 @@ impl<'a> Extension<'a> {
|
|||
// Rewind the MMRs and the output_pos index.
|
||||
// Returns a vec of "affected_pos" so we can apply the necessary updates to the bitmap
|
||||
// accumulator in a single pass for all rewound blocks.
|
||||
fn rewind_single_block(
|
||||
&mut self,
|
||||
header: &BlockHeader,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<Vec<u64>, Error> {
|
||||
fn rewind_single_block(&mut self, block: &Block, batch: &Batch<'_>) -> Result<Vec<u64>, Error> {
|
||||
let header = &block.header;
|
||||
let prev_header = batch.get_previous_header(&header)?;
|
||||
|
||||
// The spent index allows us to conveniently "unspend" everything in a block.
|
||||
let spent = batch.get_spent_index(&header.hash());
|
||||
|
||||
|
@ -1149,7 +1254,7 @@ impl<'a> Extension<'a> {
|
|||
if header.height == 0 {
|
||||
self.rewind_mmrs_to_pos(0, 0, &spent_pos)?;
|
||||
} else {
|
||||
let prev = batch.get_previous_header(&header)?;
|
||||
let prev = batch.get_previous_header(header)?;
|
||||
self.rewind_mmrs_to_pos(prev.output_mmr_size, prev.kernel_mmr_size, &spent_pos)?;
|
||||
}
|
||||
|
||||
|
@ -1160,7 +1265,6 @@ impl<'a> Extension<'a> {
|
|||
affected_pos.push(self.output_pmmr.last_pos);
|
||||
|
||||
// Remove any entries from the output_pos created by the block being rewound.
|
||||
let block = batch.get_block(&header.hash())?;
|
||||
let mut missing_count = 0;
|
||||
for out in block.outputs() {
|
||||
if batch.delete_output_pos_height(&out.commitment()).is_err() {
|
||||
|
@ -1176,6 +1280,17 @@ impl<'a> Extension<'a> {
|
|||
);
|
||||
}
|
||||
|
||||
// If NRD feature flag is enabled rewind the kernel_pos index
|
||||
// for any NRD kernels in the block being rewound.
|
||||
if global::is_nrd_enabled() {
|
||||
let kernel_index = store::nrd_recent_kernel_index();
|
||||
for kernel in block.kernels() {
|
||||
if let KernelFeatures::NoRecentDuplicate { .. } = kernel.features {
|
||||
kernel_index.rewind(batch, kernel.excess(), prev_header.kernel_mmr_size)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update output_pos based on "unspending" all spent pos from this block.
|
||||
// This is necessary to ensure the output_pos index correclty reflects a
|
||||
// reused output commitment. For example an output at pos 1, spent, reused at pos 2.
|
||||
|
@ -1649,3 +1764,36 @@ fn input_pos_to_rewind(
|
|||
}
|
||||
Ok(bitmap)
|
||||
}
|
||||
|
||||
/// If NRD enabled then enforce NRD relative height rules.
|
||||
fn apply_kernel_rules(kernel: &TxKernel, pos: CommitPos, batch: &Batch<'_>) -> Result<(), Error> {
|
||||
if !global::is_nrd_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
match kernel.features {
|
||||
KernelFeatures::NoRecentDuplicate {
|
||||
relative_height, ..
|
||||
} => {
|
||||
let kernel_index = store::nrd_recent_kernel_index();
|
||||
debug!("checking NRD index: {:?}", kernel.excess());
|
||||
if let Some(prev) = kernel_index.peek_pos(batch, kernel.excess())? {
|
||||
let diff = pos.height.saturating_sub(prev.height);
|
||||
debug!(
|
||||
"NRD check: {}, {:?}, {:?}",
|
||||
pos.height, prev, relative_height
|
||||
);
|
||||
if diff < relative_height.into() {
|
||||
return Err(ErrorKind::NRDRelativeHeight.into());
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
"pushing entry to NRD index: {:?}: {:?}",
|
||||
kernel.excess(),
|
||||
pos,
|
||||
);
|
||||
kernel_index.push_pos(batch, kernel.excess(), pos)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -303,7 +303,7 @@ impl OutputRoots {
|
|||
}
|
||||
|
||||
/// Minimal struct representing a known MMR position and associated block height.
|
||||
#[derive(Debug)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub struct CommitPos {
|
||||
/// MMR position
|
||||
pub pos: u64,
|
||||
|
|
|
@ -530,13 +530,11 @@ fn longer_fork() {
|
|||
fn spend_rewind_spend() {
|
||||
global::set_local_chain_type(ChainTypes::AutomatedTesting);
|
||||
util::init_test_logger();
|
||||
clean_output_dir(".grin_spend_rewind_spend");
|
||||
let chain_dir = ".grin_spend_rewind_spend";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
{
|
||||
let chain = init_chain(
|
||||
".grin_spend_rewind_spend",
|
||||
pow::mine_genesis_block().unwrap(),
|
||||
);
|
||||
let chain = init_chain(chain_dir, pow::mine_genesis_block().unwrap());
|
||||
let prev = chain.head_header().unwrap();
|
||||
let kc = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let pb = ProofBuilder::new(&kc);
|
||||
|
@ -601,7 +599,7 @@ fn spend_rewind_spend() {
|
|||
}
|
||||
}
|
||||
|
||||
clean_output_dir(".grin_spend_rewind_spend");
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
400
chain/tests/nrd_validation_rules.rs
Normal file
400
chain/tests/nrd_validation_rules.rs
Normal file
|
@ -0,0 +1,400 @@
|
|||
// Copyright 2020 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod chain_test_helper;
|
||||
|
||||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_util as util;
|
||||
|
||||
use self::chain_test_helper::{clean_output_dir, genesis_block, init_chain};
|
||||
use crate::chain::{Chain, Error, Options};
|
||||
use crate::core::core::{
|
||||
Block, BlockHeader, KernelFeatures, NRDRelativeHeight, Transaction, TxKernel,
|
||||
};
|
||||
use crate::core::libtx::{aggsig, build, reward, ProofBuilder};
|
||||
use crate::core::{consensus, global, pow};
|
||||
use crate::keychain::{BlindingFactor, ExtKeychain, ExtKeychainPath, Identifier, Keychain};
|
||||
use chrono::Duration;
|
||||
|
||||
fn build_block<K>(
|
||||
chain: &Chain,
|
||||
keychain: &K,
|
||||
key_id: &Identifier,
|
||||
txs: Vec<Transaction>,
|
||||
) -> Result<Block, Error>
|
||||
where
|
||||
K: Keychain,
|
||||
{
|
||||
let prev = chain.head_header()?;
|
||||
build_block_from_prev(&prev, chain, keychain, key_id, txs)
|
||||
}
|
||||
|
||||
fn build_block_from_prev<K>(
|
||||
prev: &BlockHeader,
|
||||
chain: &Chain,
|
||||
keychain: &K,
|
||||
key_id: &Identifier,
|
||||
txs: Vec<Transaction>,
|
||||
) -> Result<Block, Error>
|
||||
where
|
||||
K: Keychain,
|
||||
{
|
||||
let next_header_info =
|
||||
consensus::next_difficulty(prev.height, chain.difficulty_iter().unwrap());
|
||||
let fee = txs.iter().map(|x| x.fee()).sum();
|
||||
let reward =
|
||||
reward::output(keychain, &ProofBuilder::new(keychain), key_id, fee, false).unwrap();
|
||||
|
||||
let mut block = Block::new(prev, txs, next_header_info.clone().difficulty, reward)?;
|
||||
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
chain.set_txhashset_roots(&mut block)?;
|
||||
|
||||
block.header.pow.proof.edge_bits = global::min_edge_bits();
|
||||
pow::pow_size(
|
||||
&mut block.header,
|
||||
next_header_info.difficulty,
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_block_nrd_validation() -> Result<(), Error> {
|
||||
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
|
||||
global::set_local_nrd_enabled(true);
|
||||
|
||||
util::init_test_logger();
|
||||
|
||||
let chain_dir = ".grin.nrd_kernel";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
let genesis = genesis_block(&keychain);
|
||||
let chain = init_chain(chain_dir, genesis.clone());
|
||||
|
||||
for n in 1..9 {
|
||||
let key_id = ExtKeychainPath::new(1, n, 0, 0, 0).to_identifier();
|
||||
let block = build_block(&chain, &keychain, &key_id, vec![])?;
|
||||
chain.process_block(block, Options::NONE)?;
|
||||
}
|
||||
|
||||
assert_eq!(chain.head()?.height, 8);
|
||||
|
||||
let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
|
||||
fee: 20000,
|
||||
relative_height: NRDRelativeHeight::new(2)?,
|
||||
});
|
||||
|
||||
// // Construct the message to be signed.
|
||||
let msg = kernel.msg_to_sign().unwrap();
|
||||
|
||||
// // Generate a kernel with public excess and associated signature.
|
||||
let excess = BlindingFactor::rand(&keychain.secp());
|
||||
let skey = excess.secret_key(&keychain.secp()).unwrap();
|
||||
kernel.excess = keychain.secp().commit(0, skey).unwrap();
|
||||
let pubkey = &kernel.excess.to_pubkey(&keychain.secp()).unwrap();
|
||||
kernel.excess_sig =
|
||||
aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap();
|
||||
kernel.verify().unwrap();
|
||||
|
||||
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
|
||||
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
|
||||
|
||||
let tx1 = build::transaction_with_kernel(
|
||||
vec![
|
||||
build::coinbase_input(consensus::REWARD, key_id1.clone()),
|
||||
build::output(consensus::REWARD - 20000, key_id2.clone()),
|
||||
],
|
||||
kernel.clone(),
|
||||
excess.clone(),
|
||||
&keychain,
|
||||
&builder,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let tx2 = build::transaction_with_kernel(
|
||||
vec![
|
||||
build::input(consensus::REWARD - 20000, key_id2.clone()),
|
||||
build::output(consensus::REWARD - 40000, key_id3.clone()),
|
||||
],
|
||||
kernel.clone(),
|
||||
excess.clone(),
|
||||
&keychain,
|
||||
&builder,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let key_id9 = ExtKeychainPath::new(1, 9, 0, 0, 0).to_identifier();
|
||||
let key_id10 = ExtKeychainPath::new(1, 10, 0, 0, 0).to_identifier();
|
||||
let key_id11 = ExtKeychainPath::new(1, 11, 0, 0, 0).to_identifier();
|
||||
|
||||
// Block containing both tx1 and tx2 is invalid.
|
||||
// Not valid for two duplicate NRD kernels to co-exist in same block.
|
||||
// Jump through some hoops to build an invalid block by disabling the feature flag.
|
||||
// TODO - We need a good way of building invalid stuff in tests.
|
||||
let block_invalid_9 = {
|
||||
global::set_local_nrd_enabled(false);
|
||||
let block = build_block(&chain, &keychain, &key_id9, vec![tx1.clone(), tx2.clone()])?;
|
||||
global::set_local_nrd_enabled(true);
|
||||
block
|
||||
};
|
||||
assert!(chain.process_block(block_invalid_9, Options::NONE).is_err());
|
||||
|
||||
assert_eq!(chain.head()?.height, 8);
|
||||
|
||||
// Block containing tx1 is valid.
|
||||
let block_valid_9 = build_block(&chain, &keychain, &key_id9, vec![tx1.clone()])?;
|
||||
chain.process_block(block_valid_9, Options::NONE)?;
|
||||
|
||||
// Block at height 10 is invalid if it contains tx2 due to NRD rule (relative_height=2).
|
||||
// Jump through some hoops to build an invalid block by disabling the feature flag.
|
||||
// TODO - We need a good way of building invalid stuff in tests.
|
||||
let block_invalid_10 = {
|
||||
global::set_local_nrd_enabled(false);
|
||||
let block = build_block(&chain, &keychain, &key_id10, vec![tx2.clone()])?;
|
||||
global::set_local_nrd_enabled(true);
|
||||
block
|
||||
};
|
||||
|
||||
assert!(chain
|
||||
.process_block(block_invalid_10, Options::NONE)
|
||||
.is_err());
|
||||
|
||||
// Block at height 10 is valid if we do not include tx2.
|
||||
let block_valid_10 = build_block(&chain, &keychain, &key_id10, vec![])?;
|
||||
chain.process_block(block_valid_10, Options::NONE)?;
|
||||
|
||||
// Block at height 11 is valid with tx2 as NRD rule is met (relative_height=2).
|
||||
let block_valid_11 = build_block(&chain, &keychain, &key_id11, vec![tx2.clone()])?;
|
||||
chain.process_block(block_valid_11, Options::NONE)?;
|
||||
|
||||
clean_output_dir(chain_dir);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_block_nrd_validation_relative_height_1() -> Result<(), Error> {
|
||||
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
|
||||
global::set_local_nrd_enabled(true);
|
||||
|
||||
util::init_test_logger();
|
||||
|
||||
let chain_dir = ".grin.nrd_kernel_relative_height_1";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
let genesis = genesis_block(&keychain);
|
||||
let chain = init_chain(chain_dir, genesis.clone());
|
||||
|
||||
for n in 1..9 {
|
||||
let key_id = ExtKeychainPath::new(1, n, 0, 0, 0).to_identifier();
|
||||
let block = build_block(&chain, &keychain, &key_id, vec![])?;
|
||||
chain.process_block(block, Options::NONE)?;
|
||||
}
|
||||
|
||||
assert_eq!(chain.head()?.height, 8);
|
||||
|
||||
let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
|
||||
fee: 20000,
|
||||
relative_height: NRDRelativeHeight::new(1)?,
|
||||
});
|
||||
|
||||
// // Construct the message to be signed.
|
||||
let msg = kernel.msg_to_sign().unwrap();
|
||||
|
||||
// // Generate a kernel with public excess and associated signature.
|
||||
let excess = BlindingFactor::rand(&keychain.secp());
|
||||
let skey = excess.secret_key(&keychain.secp()).unwrap();
|
||||
kernel.excess = keychain.secp().commit(0, skey).unwrap();
|
||||
let pubkey = &kernel.excess.to_pubkey(&keychain.secp()).unwrap();
|
||||
kernel.excess_sig =
|
||||
aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap();
|
||||
kernel.verify().unwrap();
|
||||
|
||||
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
|
||||
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
|
||||
|
||||
let tx1 = build::transaction_with_kernel(
|
||||
vec![
|
||||
build::coinbase_input(consensus::REWARD, key_id1.clone()),
|
||||
build::output(consensus::REWARD - 20000, key_id2.clone()),
|
||||
],
|
||||
kernel.clone(),
|
||||
excess.clone(),
|
||||
&keychain,
|
||||
&builder,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let tx2 = build::transaction_with_kernel(
|
||||
vec![
|
||||
build::input(consensus::REWARD - 20000, key_id2.clone()),
|
||||
build::output(consensus::REWARD - 40000, key_id3.clone()),
|
||||
],
|
||||
kernel.clone(),
|
||||
excess.clone(),
|
||||
&keychain,
|
||||
&builder,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let key_id9 = ExtKeychainPath::new(1, 9, 0, 0, 0).to_identifier();
|
||||
let key_id10 = ExtKeychainPath::new(1, 10, 0, 0, 0).to_identifier();
|
||||
|
||||
// Block containing both tx1 and tx2 is invalid.
|
||||
// Not valid for two duplicate NRD kernels to co-exist in same block.
|
||||
// Jump through some hoops here to build an "invalid" block.
|
||||
// TODO - We need a good way of building invalid stuff for tests.
|
||||
let block_invalid_9 = {
|
||||
global::set_local_nrd_enabled(false);
|
||||
let block = build_block(&chain, &keychain, &key_id9, vec![tx1.clone(), tx2.clone()])?;
|
||||
global::set_local_nrd_enabled(true);
|
||||
block
|
||||
};
|
||||
|
||||
assert!(chain.process_block(block_invalid_9, Options::NONE).is_err());
|
||||
|
||||
assert_eq!(chain.head()?.height, 8);
|
||||
|
||||
// Block containing tx1 is valid.
|
||||
let block_valid_9 = build_block(&chain, &keychain, &key_id9, vec![tx1.clone()])?;
|
||||
chain.process_block(block_valid_9, Options::NONE)?;
|
||||
|
||||
// Block at height 10 is valid with tx2 as NRD rule is met (relative_height=1).
|
||||
let block_valid_10 = build_block(&chain, &keychain, &key_id10, vec![tx2.clone()])?;
|
||||
chain.process_block(block_valid_10, Options::NONE)?;
|
||||
|
||||
clean_output_dir(chain_dir);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_block_nrd_validation_fork() -> Result<(), Error> {
|
||||
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
|
||||
global::set_local_nrd_enabled(true);
|
||||
|
||||
util::init_test_logger();
|
||||
|
||||
let chain_dir = ".grin.nrd_kernel_fork";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
let keychain = ExtKeychain::from_random_seed(false).unwrap();
|
||||
let builder = ProofBuilder::new(&keychain);
|
||||
let genesis = genesis_block(&keychain);
|
||||
let chain = init_chain(chain_dir, genesis.clone());
|
||||
|
||||
for n in 1..9 {
|
||||
let key_id = ExtKeychainPath::new(1, n, 0, 0, 0).to_identifier();
|
||||
let block = build_block(&chain, &keychain, &key_id, vec![])?;
|
||||
chain.process_block(block, Options::NONE)?;
|
||||
}
|
||||
|
||||
let header_8 = chain.head_header()?;
|
||||
assert_eq!(header_8.height, 8);
|
||||
|
||||
let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
|
||||
fee: 20000,
|
||||
relative_height: NRDRelativeHeight::new(2)?,
|
||||
});
|
||||
|
||||
// // Construct the message to be signed.
|
||||
let msg = kernel.msg_to_sign().unwrap();
|
||||
|
||||
// // Generate a kernel with public excess and associated signature.
|
||||
let excess = BlindingFactor::rand(&keychain.secp());
|
||||
let skey = excess.secret_key(&keychain.secp()).unwrap();
|
||||
kernel.excess = keychain.secp().commit(0, skey).unwrap();
|
||||
let pubkey = &kernel.excess.to_pubkey(&keychain.secp()).unwrap();
|
||||
kernel.excess_sig =
|
||||
aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap();
|
||||
kernel.verify().unwrap();
|
||||
|
||||
let key_id1 = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
|
||||
let key_id2 = ExtKeychainPath::new(1, 2, 0, 0, 0).to_identifier();
|
||||
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
|
||||
|
||||
let tx1 = build::transaction_with_kernel(
|
||||
vec![
|
||||
build::coinbase_input(consensus::REWARD, key_id1.clone()),
|
||||
build::output(consensus::REWARD - 20000, key_id2.clone()),
|
||||
],
|
||||
kernel.clone(),
|
||||
excess.clone(),
|
||||
&keychain,
|
||||
&builder,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let tx2 = build::transaction_with_kernel(
|
||||
vec![
|
||||
build::input(consensus::REWARD - 20000, key_id2.clone()),
|
||||
build::output(consensus::REWARD - 40000, key_id3.clone()),
|
||||
],
|
||||
kernel.clone(),
|
||||
excess.clone(),
|
||||
&keychain,
|
||||
&builder,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let key_id9 = ExtKeychainPath::new(1, 9, 0, 0, 0).to_identifier();
|
||||
let key_id10 = ExtKeychainPath::new(1, 10, 0, 0, 0).to_identifier();
|
||||
let key_id11 = ExtKeychainPath::new(1, 11, 0, 0, 0).to_identifier();
|
||||
|
||||
// Block containing tx1 is valid.
|
||||
let block_valid_9 =
|
||||
build_block_from_prev(&header_8, &chain, &keychain, &key_id9, vec![tx1.clone()])?;
|
||||
chain.process_block(block_valid_9.clone(), Options::NONE)?;
|
||||
|
||||
// Block at height 10 is valid if we do not include tx2.
|
||||
let block_valid_10 =
|
||||
build_block_from_prev(&block_valid_9.header, &chain, &keychain, &key_id10, vec![])?;
|
||||
chain.process_block(block_valid_10, Options::NONE)?;
|
||||
|
||||
// Process an alternative "fork" block also at height 9.
|
||||
// The "other" block at height 9 should not affect this one in terms of NRD kernels
|
||||
// as the recent kernel index should be rewound.
|
||||
let block_valid_9b =
|
||||
build_block_from_prev(&header_8, &chain, &keychain, &key_id9, vec![tx1.clone()])?;
|
||||
chain.process_block(block_valid_9b.clone(), Options::NONE)?;
|
||||
|
||||
// Process an alternative block at height 10 on this same fork.
|
||||
let block_valid_10b =
|
||||
build_block_from_prev(&block_valid_9b.header, &chain, &keychain, &key_id10, vec![])?;
|
||||
chain.process_block(block_valid_10b.clone(), Options::NONE)?;
|
||||
|
||||
// Block at height 11 is valid with tx2 as NRD rule is met (relative_height=2).
|
||||
let block_valid_11b = build_block_from_prev(
|
||||
&block_valid_10b.header,
|
||||
&chain,
|
||||
&keychain,
|
||||
&key_id11,
|
||||
vec![tx2.clone()],
|
||||
)?;
|
||||
chain.process_block(block_valid_11b, Options::NONE)?;
|
||||
|
||||
clean_output_dir(chain_dir);
|
||||
Ok(())
|
||||
}
|
589
chain/tests/store_kernel_pos_index.rs
Normal file
589
chain/tests/store_kernel_pos_index.rs
Normal file
|
@ -0,0 +1,589 @@
|
|||
// Copyright 2020 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::chain::linked_list::{ListIndex, ListWrapper, PruneableListIndex, RewindableListIndex};
|
||||
use crate::chain::store::{self, ChainStore};
|
||||
use crate::chain::types::CommitPos;
|
||||
use crate::core::global;
|
||||
use crate::util::secp::pedersen::Commitment;
|
||||
use grin_chain as chain;
|
||||
use grin_core as core;
|
||||
use grin_store;
|
||||
use grin_util as util;
|
||||
mod chain_test_helper;
|
||||
use self::chain_test_helper::clean_output_dir;
|
||||
use crate::grin_store::Error;
|
||||
|
||||
fn setup_test() {
|
||||
util::init_test_logger();
|
||||
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_kernel_idx() {
|
||||
setup_test();
|
||||
let chain_dir = ".grin_idx_1";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
let commit = Commitment::from_vec(vec![]);
|
||||
|
||||
let store = ChainStore::new(chain_dir).unwrap();
|
||||
let batch = store.batch().unwrap();
|
||||
let index = store::nrd_recent_kernel_index();
|
||||
|
||||
assert_eq!(index.peek_pos(&batch, commit), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None));
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 1, height: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 2, height: 2 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })),
|
||||
);
|
||||
|
||||
// Pos must always increase.
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }),
|
||||
Err(Error::OtherErr("pos must be increasing".into())),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }),
|
||||
Err(Error::OtherErr("pos must be increasing".into())),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 3, height: 3 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 3, height: 3 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 2, height: 2 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 3, height: 3 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 3, height: 3 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 2, height: 2 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 2, height: 2 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 1, height: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 1, height: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(index.peek_pos(&batch, commit), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None));
|
||||
|
||||
// Cleanup chain directory
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_kernel_idx_pop_back() {
|
||||
setup_test();
|
||||
let chain_dir = ".grin_idx_2";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
let commit = Commitment::from_vec(vec![]);
|
||||
|
||||
let store = ChainStore::new(chain_dir).unwrap();
|
||||
let batch = store.batch().unwrap();
|
||||
let index = store::nrd_recent_kernel_index();
|
||||
|
||||
assert_eq!(index.peek_pos(&batch, commit), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None));
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 1, height: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos_back(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 1, height: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(index.peek_pos(&batch, commit), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None));
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 3, height: 3 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos_back(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 1, height: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 3, height: 3 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 3, tail: 2 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos_back(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 2, height: 2 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 3, height: 3 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 3, height: 3 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos_back(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 3, height: 3 })),
|
||||
);
|
||||
|
||||
assert_eq!(index.peek_pos(&batch, commit), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None));
|
||||
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_kernel_idx_rewind() {
|
||||
setup_test();
|
||||
let chain_dir = ".grin_idx_3";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
let commit = Commitment::from_vec(vec![]);
|
||||
|
||||
let store = ChainStore::new(chain_dir).unwrap();
|
||||
let batch = store.batch().unwrap();
|
||||
let index = store::nrd_recent_kernel_index();
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(index.rewind(&batch, commit, 1), Ok(()),);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
// Check we can safely noop rewind.
|
||||
assert_eq!(index.rewind(&batch, commit, 2), Ok(()),);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(index.rewind(&batch, commit, 1), Ok(()),);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
// Check we can rewind back to 0.
|
||||
assert_eq!(index.rewind(&batch, commit, 0), Ok(()),);
|
||||
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None),);
|
||||
|
||||
assert_eq!(index.rewind(&batch, commit, 0), Ok(()),);
|
||||
|
||||
// Now check we can rewind past the end of a list safely.
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos_back(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 1, height: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 3, tail: 2 })),
|
||||
);
|
||||
|
||||
assert_eq!(index.rewind(&batch, commit, 1), Ok(()),);
|
||||
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None),);
|
||||
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_kernel_idx_multiple_commits() {
|
||||
setup_test();
|
||||
let chain_dir = ".grin_idx_4";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
let commit = Commitment::from_vec(vec![]);
|
||||
let commit2 = Commitment::from_vec(vec![1]);
|
||||
|
||||
let store = ChainStore::new(chain_dir).unwrap();
|
||||
let batch = store.batch().unwrap();
|
||||
let index = store::nrd_recent_kernel_index();
|
||||
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit2), Ok(None));
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(index.get_list(&batch, commit2), Ok(None));
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit2, CommitPos { pos: 2, height: 2 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit2),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 2, height: 2 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 3, height: 3 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 3, tail: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit2),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 2, height: 2 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.pop_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 3, height: 3 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit2),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 2, height: 2 }
|
||||
})),
|
||||
);
|
||||
|
||||
clean_output_dir(chain_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_store_kernel_idx_clear() -> Result<(), Error> {
|
||||
setup_test();
|
||||
let chain_dir = ".grin_idx_clear";
|
||||
clean_output_dir(chain_dir);
|
||||
|
||||
let commit = Commitment::from_vec(vec![]);
|
||||
let commit2 = Commitment::from_vec(vec![1]);
|
||||
|
||||
let store = ChainStore::new(chain_dir)?;
|
||||
let index = store::nrd_recent_kernel_index();
|
||||
|
||||
// Add a couple of single entries to the index and commit the batch.
|
||||
{
|
||||
let batch = store.batch()?;
|
||||
assert_eq!(index.peek_pos(&batch, commit), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None));
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.push_pos(
|
||||
&batch,
|
||||
commit2,
|
||||
CommitPos {
|
||||
pos: 10,
|
||||
height: 10
|
||||
}
|
||||
),
|
||||
Ok(()),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 1, height: 1 })),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos { pos: 1, height: 1 }
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit2),
|
||||
Ok(Some(CommitPos {
|
||||
pos: 10,
|
||||
height: 10
|
||||
})),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit2),
|
||||
Ok(Some(ListWrapper::Single {
|
||||
pos: CommitPos {
|
||||
pos: 10,
|
||||
height: 10
|
||||
}
|
||||
})),
|
||||
);
|
||||
|
||||
batch.commit()?;
|
||||
}
|
||||
|
||||
// Clear the index and confirm everything was deleted as expected.
|
||||
{
|
||||
let batch = store.batch()?;
|
||||
assert_eq!(index.clear(&batch), Ok(()));
|
||||
assert_eq!(index.peek_pos(&batch, commit), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None));
|
||||
assert_eq!(index.peek_pos(&batch, commit2), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit2), Ok(None));
|
||||
batch.commit()?;
|
||||
}
|
||||
|
||||
// Add multiple entries to the index, commit the batch.
|
||||
{
|
||||
let batch = store.batch()?;
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 1, height: 1 }),
|
||||
Ok(()),
|
||||
);
|
||||
assert_eq!(
|
||||
index.push_pos(&batch, commit, CommitPos { pos: 2, height: 2 }),
|
||||
Ok(()),
|
||||
);
|
||||
assert_eq!(
|
||||
index.peek_pos(&batch, commit),
|
||||
Ok(Some(CommitPos { pos: 2, height: 2 })),
|
||||
);
|
||||
assert_eq!(
|
||||
index.get_list(&batch, commit),
|
||||
Ok(Some(ListWrapper::Multi { head: 2, tail: 1 })),
|
||||
);
|
||||
batch.commit()?;
|
||||
}
|
||||
|
||||
// Clear the index and confirm everything was deleted as expected.
|
||||
{
|
||||
let batch = store.batch()?;
|
||||
assert_eq!(index.clear(&batch), Ok(()));
|
||||
assert_eq!(index.peek_pos(&batch, commit), Ok(None));
|
||||
assert_eq!(index.get_list(&batch, commit), Ok(None));
|
||||
batch.commit()?;
|
||||
}
|
||||
|
||||
clean_output_dir(chain_dir);
|
||||
Ok(())
|
||||
}
|
|
@ -909,6 +909,36 @@ impl TransactionBody {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// It is never valid to have multiple duplicate NRD kernels (by public excess)
|
||||
// in the same transaction or block. We check this here.
|
||||
// We skip this check if NRD feature is not enabled.
|
||||
fn verify_no_nrd_duplicates(&self) -> Result<(), Error> {
|
||||
if !global::is_nrd_enabled() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut nrd_excess: Vec<Commitment> = self
|
||||
.kernels
|
||||
.iter()
|
||||
.filter(|x| match x.features {
|
||||
KernelFeatures::NoRecentDuplicate { .. } => true,
|
||||
_ => false,
|
||||
})
|
||||
.map(|x| x.excess())
|
||||
.collect();
|
||||
|
||||
// Sort and dedup and compare length to look for duplicates.
|
||||
nrd_excess.sort();
|
||||
let original_count = nrd_excess.len();
|
||||
nrd_excess.dedup();
|
||||
let dedup_count = nrd_excess.len();
|
||||
if original_count == dedup_count {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidNRDRelativeHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that inputs|outputs|kernels are sorted in lexicographical order
|
||||
// and that there are no duplicates (they are all unique within this transaction).
|
||||
fn verify_sorted(&self) -> Result<(), Error> {
|
||||
|
@ -970,6 +1000,7 @@ impl TransactionBody {
|
|||
/// * kernel signature verification
|
||||
pub fn validate_read(&self, weighting: Weighting) -> Result<(), Error> {
|
||||
self.verify_weight(weighting)?;
|
||||
self.verify_no_nrd_duplicates()?;
|
||||
self.verify_sorted()?;
|
||||
self.verify_cut_through()?;
|
||||
Ok(())
|
||||
|
@ -1227,8 +1258,8 @@ impl Transaction {
|
|||
weighting: Weighting,
|
||||
verifier: Arc<RwLock<dyn VerifierCache>>,
|
||||
) -> Result<(), Error> {
|
||||
self.body.validate(weighting, verifier)?;
|
||||
self.body.verify_features()?;
|
||||
self.body.validate(weighting, verifier)?;
|
||||
self.verify_kernel_sums(self.overage(), self.offset.clone())?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ use grin_util as util;
|
|||
use std::cmp::Reverse;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use util::secp::pedersen::Commitment;
|
||||
use util::static_secp_instance;
|
||||
|
||||
pub struct Pool<B, V>
|
||||
|
@ -70,6 +71,19 @@ where
|
|||
.map(|x| x.tx.clone())
|
||||
}
|
||||
|
||||
/// Query the tx pool for an individual tx matching the given public excess.
|
||||
/// Used for checking for duplicate NRD kernels in the txpool.
|
||||
pub fn retrieve_tx_by_kernel_excess(&self, excess: Commitment) -> Option<Transaction> {
|
||||
for x in &self.entries {
|
||||
for k in x.tx.kernels() {
|
||||
if k.excess() == excess {
|
||||
return Some(x.tx.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Query the tx pool for an individual tx matching the given kernel hash.
|
||||
pub fn retrieve_tx_by_kernel_hash(&self, hash: Hash) -> Option<Transaction> {
|
||||
for x in &self.entries {
|
||||
|
@ -197,7 +211,6 @@ where
|
|||
// Validate aggregated tx (existing pool + new tx), ignoring tx weight limits.
|
||||
// Validate against known chain state at the provided header.
|
||||
self.validate_raw_tx(&agg_tx, header, Weighting::NoLimit)?;
|
||||
|
||||
// If we get here successfully then we can safely add the entry to the pool.
|
||||
self.log_pool_add(&entry, header);
|
||||
self.entries.push(entry);
|
||||
|
@ -425,6 +438,7 @@ where
|
|||
tx_buckets.into_iter().flat_map(|x| x.raw_txs).collect()
|
||||
}
|
||||
|
||||
/// TODO - This is kernel based. How does this interact with NRD?
|
||||
pub fn find_matching_transactions(&self, kernels: &[TxKernel]) -> Vec<Transaction> {
|
||||
// While the inputs outputs can be cut-through the kernel will stay intact
|
||||
// In order to deaggregate tx we look for tx with the same kernel
|
||||
|
|
|
@ -160,13 +160,18 @@ where
|
|||
stem: bool,
|
||||
header: &BlockHeader,
|
||||
) -> Result<(), PoolError> {
|
||||
// Quick check to deal with common case of seeing the *same* tx
|
||||
// broadcast from multiple peers simultaneously.
|
||||
if !stem && self.txpool.contains_tx(tx.hash()) {
|
||||
// Quick check for duplicate txs.
|
||||
// Our stempool is private and we do not want to reveal anything about the txs contained.
|
||||
// If this is a stem tx and is already present in stempool then fluff by adding to txpool.
|
||||
// Otherwise if already present in txpool return a "duplicate tx" error.
|
||||
if stem && self.stempool.contains_tx(tx.hash()) {
|
||||
return self.add_to_pool(src, tx, false, header);
|
||||
} else if self.txpool.contains_tx(tx.hash()) {
|
||||
return Err(PoolError::DuplicateTx);
|
||||
}
|
||||
|
||||
// Check this tx is valid based on current header version.
|
||||
// NRD kernels only valid post HF3 and if NRD feature enabled.
|
||||
self.verify_kernel_variants(&tx, header)?;
|
||||
|
||||
// Do we have the capacity to accept this transaction?
|
||||
|
@ -195,20 +200,19 @@ where
|
|||
tx,
|
||||
};
|
||||
|
||||
// If not stem then we are fluff.
|
||||
// If this is a stem tx then attempt to stem.
|
||||
// Any problems during stem, fallback to fluff.
|
||||
if !stem
|
||||
|| self
|
||||
.add_to_stempool(entry.clone(), header)
|
||||
.and_then(|_| self.adapter.stem_tx_accepted(&entry))
|
||||
.is_err()
|
||||
{
|
||||
self.add_to_txpool(entry.clone(), header)?;
|
||||
self.add_to_reorg_cache(entry.clone());
|
||||
self.adapter.tx_accepted(&entry);
|
||||
// If this is a stem tx then attempt to add it to stempool.
|
||||
// If the adapter fails to accept the new stem tx then fallback to fluff via txpool.
|
||||
if stem {
|
||||
self.add_to_stempool(entry.clone(), header)?;
|
||||
if self.adapter.stem_tx_accepted(&entry).is_ok() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
self.add_to_txpool(entry.clone(), header)?;
|
||||
self.add_to_reorg_cache(entry.clone());
|
||||
self.adapter.tx_accepted(&entry);
|
||||
|
||||
// Transaction passed all the checks but we have to make space for it
|
||||
if evict {
|
||||
self.evict_from_txpool();
|
||||
|
|
|
@ -227,6 +227,9 @@ pub enum PoolError {
|
|||
/// NRD kernels are not valid if disabled locally via "feature flag".
|
||||
#[fail(display = "NRD kernel not enabled")]
|
||||
NRDKernelNotEnabled,
|
||||
/// NRD kernels are not valid if relative_height rule not met.
|
||||
#[fail(display = "NRD kernel relative height")]
|
||||
NRDKernelRelativeHeight,
|
||||
/// Other kinds of error (not yet pulled out into meaningful errors).
|
||||
#[fail(display = "General pool error {}", _0)]
|
||||
Other(String),
|
||||
|
@ -234,7 +237,10 @@ pub enum PoolError {
|
|||
|
||||
impl From<transaction::Error> for PoolError {
|
||||
fn from(e: transaction::Error) -> PoolError {
|
||||
PoolError::InvalidTx(e)
|
||||
match e {
|
||||
transaction::Error::InvalidNRDRelativeHeight => PoolError::NRDKernelRelativeHeight,
|
||||
e @ _ => PoolError::InvalidTx(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,12 +19,12 @@ use self::chain::Chain;
|
|||
use self::core::consensus;
|
||||
use self::core::core::hash::Hash;
|
||||
use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache};
|
||||
use self::core::core::{Block, BlockHeader, BlockSums, KernelFeatures, Transaction};
|
||||
use self::core::core::{Block, BlockHeader, BlockSums, KernelFeatures, Transaction, TxKernel};
|
||||
use self::core::genesis;
|
||||
use self::core::global;
|
||||
use self::core::libtx::{build, reward, ProofBuilder};
|
||||
use self::core::pow;
|
||||
use self::keychain::{ExtKeychain, ExtKeychainPath, Keychain};
|
||||
use self::keychain::{BlindingFactor, ExtKeychain, ExtKeychainPath, Keychain};
|
||||
use self::pool::types::*;
|
||||
use self::pool::TransactionPool;
|
||||
use self::util::RwLock;
|
||||
|
@ -127,9 +127,11 @@ impl BlockChain for ChainAdapter {
|
|||
}
|
||||
|
||||
fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
||||
self.chain
|
||||
.validate_tx(tx)
|
||||
.map_err(|_| PoolError::Other("failed to validate tx".into()))
|
||||
self.chain.validate_tx(tx).map_err(|e| match e.kind() {
|
||||
chain::ErrorKind::Transaction(txe) => txe.into(),
|
||||
chain::ErrorKind::NRDRelativeHeight => PoolError::NRDKernelRelativeHeight,
|
||||
_ => PoolError::Other("failed to validate tx".into()),
|
||||
})
|
||||
}
|
||||
|
||||
fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), PoolError> {
|
||||
|
@ -254,6 +256,38 @@ where
|
|||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn test_transaction_with_kernel<K>(
|
||||
keychain: &K,
|
||||
input_values: Vec<u64>,
|
||||
output_values: Vec<u64>,
|
||||
kernel: TxKernel,
|
||||
excess: BlindingFactor,
|
||||
) -> Transaction
|
||||
where
|
||||
K: Keychain,
|
||||
{
|
||||
let mut tx_elements = Vec::new();
|
||||
|
||||
for input_value in input_values {
|
||||
let key_id = ExtKeychain::derive_key_id(1, input_value as u32, 0, 0, 0);
|
||||
tx_elements.push(build::input(input_value, key_id));
|
||||
}
|
||||
|
||||
for output_value in output_values {
|
||||
let key_id = ExtKeychain::derive_key_id(1, output_value as u32, 0, 0, 0);
|
||||
tx_elements.push(build::output(output_value, key_id));
|
||||
}
|
||||
|
||||
build::transaction_with_kernel(
|
||||
tx_elements,
|
||||
kernel,
|
||||
excess,
|
||||
keychain,
|
||||
&ProofBuilder::new(keychain),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn test_source() -> TxSource {
|
||||
TxSource::Broadcast
|
||||
}
|
||||
|
|
265
pool/tests/nrd_kernel_relative_height.rs
Normal file
265
pool/tests/nrd_kernel_relative_height.rs
Normal file
|
@ -0,0 +1,265 @@
|
|||
// Copyright 2020 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod common;
|
||||
|
||||
use self::core::consensus;
|
||||
use self::core::core::hash::Hashed;
|
||||
use self::core::core::verifier_cache::LruVerifierCache;
|
||||
use self::core::core::{HeaderVersion, KernelFeatures, NRDRelativeHeight, TxKernel};
|
||||
use self::core::global;
|
||||
use self::core::libtx::aggsig;
|
||||
use self::keychain::{BlindingFactor, ExtKeychain, Keychain};
|
||||
use self::pool::types::PoolError;
|
||||
use self::util::RwLock;
|
||||
use crate::common::*;
|
||||
use grin_core as core;
|
||||
use grin_keychain as keychain;
|
||||
use grin_pool as pool;
|
||||
use grin_util as util;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
|
||||
util::init_test_logger();
|
||||
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
|
||||
global::set_local_nrd_enabled(true);
|
||||
|
||||
let keychain: ExtKeychain = Keychain::from_random_seed(false).unwrap();
|
||||
|
||||
let db_root = "target/.nrd_kernel_relative_height";
|
||||
clean_output_dir(db_root.into());
|
||||
|
||||
let genesis = genesis_block(&keychain);
|
||||
let chain = Arc::new(init_chain(db_root, genesis));
|
||||
let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new()));
|
||||
|
||||
// Initialize a new pool with our chain adapter.
|
||||
let mut pool = init_transaction_pool(
|
||||
Arc::new(ChainAdapter {
|
||||
chain: chain.clone(),
|
||||
}),
|
||||
verifier_cache,
|
||||
);
|
||||
|
||||
add_some_blocks(&chain, 3, &keychain);
|
||||
|
||||
let header_1 = chain.get_header_by_height(1).unwrap();
|
||||
|
||||
// Now create tx to spend an early coinbase (now matured).
|
||||
// Provides us with some useful outputs to test with.
|
||||
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
|
||||
|
||||
// Mine that initial tx so we can spend it with multiple txs.
|
||||
add_block(&chain, vec![initial_tx], &keychain);
|
||||
|
||||
add_some_blocks(&chain, 5, &keychain);
|
||||
|
||||
let header = chain.head_header().unwrap();
|
||||
|
||||
assert_eq!(header.height, consensus::TESTING_THIRD_HARD_FORK);
|
||||
assert_eq!(header.version, HeaderVersion(4));
|
||||
|
||||
let (tx1, tx2, tx3) = {
|
||||
let mut kernel = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
|
||||
fee: 6,
|
||||
relative_height: NRDRelativeHeight::new(2)?,
|
||||
});
|
||||
let msg = kernel.msg_to_sign().unwrap();
|
||||
|
||||
// Generate a kernel with public excess and associated signature.
|
||||
let excess = BlindingFactor::rand(&keychain.secp());
|
||||
let skey = excess.secret_key(&keychain.secp()).unwrap();
|
||||
kernel.excess = keychain.secp().commit(0, skey).unwrap();
|
||||
let pubkey = &kernel.excess.to_pubkey(&keychain.secp()).unwrap();
|
||||
kernel.excess_sig =
|
||||
aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap();
|
||||
kernel.verify().unwrap();
|
||||
|
||||
let tx1 = test_transaction_with_kernel(
|
||||
&keychain,
|
||||
vec![10, 20],
|
||||
vec![24],
|
||||
kernel.clone(),
|
||||
excess.clone(),
|
||||
);
|
||||
|
||||
let tx2 = test_transaction_with_kernel(
|
||||
&keychain,
|
||||
vec![24],
|
||||
vec![18],
|
||||
kernel.clone(),
|
||||
excess.clone(),
|
||||
);
|
||||
|
||||
// Now reuse kernel excess for tx3 but with NRD relative_height=1 (and different fee).
|
||||
let mut kernel_short = TxKernel::with_features(KernelFeatures::NoRecentDuplicate {
|
||||
fee: 3,
|
||||
relative_height: NRDRelativeHeight::new(1)?,
|
||||
});
|
||||
let msg_short = kernel_short.msg_to_sign().unwrap();
|
||||
kernel_short.excess = kernel.excess;
|
||||
kernel_short.excess_sig =
|
||||
aggsig::sign_with_blinding(&keychain.secp(), &msg_short, &excess, Some(&pubkey))
|
||||
.unwrap();
|
||||
kernel_short.verify().unwrap();
|
||||
|
||||
let tx3 = test_transaction_with_kernel(
|
||||
&keychain,
|
||||
vec![18],
|
||||
vec![15],
|
||||
kernel_short.clone(),
|
||||
excess.clone(),
|
||||
);
|
||||
|
||||
(tx1, tx2, tx3)
|
||||
};
|
||||
|
||||
// Confirm we can successfully add tx1 with NRD kernel to stempool.
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx1.clone(), true, &header),
|
||||
Ok(()),
|
||||
);
|
||||
assert_eq!(pool.stempool.size(), 1);
|
||||
|
||||
// Confirm we cannot add tx2 to stempool while tx1 is in there (duplicate NRD kernels).
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx2.clone(), true, &header),
|
||||
Err(PoolError::NRDKernelRelativeHeight)
|
||||
);
|
||||
|
||||
// Confirm we can successfully add tx1 with NRD kernel to txpool,
|
||||
// removing existing instance of tx1 from stempool in the process.
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx1.clone(), false, &header),
|
||||
Ok(()),
|
||||
);
|
||||
assert_eq!(pool.txpool.size(), 1);
|
||||
assert_eq!(pool.stempool.size(), 0);
|
||||
|
||||
// Confirm we cannot add tx2 to stempool while tx1 is in txpool (duplicate NRD kernels).
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx2.clone(), true, &header),
|
||||
Err(PoolError::NRDKernelRelativeHeight)
|
||||
);
|
||||
|
||||
// Confirm we cannot add tx2 to txpool while tx1 is in there (duplicate NRD kernels).
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx2.clone(), false, &header),
|
||||
Err(PoolError::NRDKernelRelativeHeight)
|
||||
);
|
||||
|
||||
assert_eq!(pool.total_size(), 1);
|
||||
assert_eq!(pool.txpool.size(), 1);
|
||||
assert_eq!(pool.stempool.size(), 0);
|
||||
|
||||
let txs = pool.prepare_mineable_transactions().unwrap();
|
||||
assert_eq!(txs.len(), 1);
|
||||
|
||||
// Mine block containing tx1 from the txpool.
|
||||
add_block(&chain, txs, &keychain);
|
||||
let header = chain.head_header().unwrap();
|
||||
let block = chain.get_block(&header.hash()).unwrap();
|
||||
|
||||
// Confirm the stempool/txpool is empty after reconciling the new block.
|
||||
pool.reconcile_block(&block)?;
|
||||
assert_eq!(pool.total_size(), 0);
|
||||
assert_eq!(pool.txpool.size(), 0);
|
||||
assert_eq!(pool.stempool.size(), 0);
|
||||
|
||||
// Confirm we cannot add tx2 to stempool with tx1 in previous block (NRD relative_height=2)
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx2.clone(), true, &header),
|
||||
Err(PoolError::NRDKernelRelativeHeight)
|
||||
);
|
||||
|
||||
// Confirm we cannot add tx2 to txpool with tx1 in previous block (NRD relative_height=2)
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx2.clone(), false, &header),
|
||||
Err(PoolError::NRDKernelRelativeHeight)
|
||||
);
|
||||
|
||||
// Add another block so NRD relative_height rule is now met.
|
||||
add_block(&chain, vec![], &keychain);
|
||||
let header = chain.head_header().unwrap();
|
||||
|
||||
// Confirm we can now add tx2 to stempool with NRD relative_height rule met.
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx2.clone(), true, &header),
|
||||
Ok(())
|
||||
);
|
||||
assert_eq!(pool.total_size(), 0);
|
||||
assert_eq!(pool.txpool.size(), 0);
|
||||
assert_eq!(pool.stempool.size(), 1);
|
||||
|
||||
// Confirm we cannot yet add tx3 to stempool (NRD relative_height=1)
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx3.clone(), true, &header),
|
||||
Err(PoolError::NRDKernelRelativeHeight)
|
||||
);
|
||||
|
||||
// Confirm we can now add tx2 to txpool with NRD relative_height rule met.
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx2.clone(), false, &header),
|
||||
Ok(())
|
||||
);
|
||||
|
||||
// Confirm we cannot yet add tx3 to txpool (NRD relative_height=1)
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx3.clone(), false, &header),
|
||||
Err(PoolError::NRDKernelRelativeHeight)
|
||||
);
|
||||
|
||||
assert_eq!(pool.total_size(), 1);
|
||||
assert_eq!(pool.txpool.size(), 1);
|
||||
assert_eq!(pool.stempool.size(), 0);
|
||||
|
||||
let txs = pool.prepare_mineable_transactions().unwrap();
|
||||
assert_eq!(txs.len(), 1);
|
||||
|
||||
// Mine block containing tx2 from the txpool.
|
||||
add_block(&chain, txs, &keychain);
|
||||
let header = chain.head_header().unwrap();
|
||||
let block = chain.get_block(&header.hash()).unwrap();
|
||||
pool.reconcile_block(&block)?;
|
||||
|
||||
assert_eq!(pool.total_size(), 0);
|
||||
assert_eq!(pool.txpool.size(), 0);
|
||||
assert_eq!(pool.stempool.size(), 0);
|
||||
|
||||
// Confirm we can now add tx3 to stempool with tx2 in immediate previous block (NRD relative_height=1)
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx3.clone(), true, &header),
|
||||
Ok(())
|
||||
);
|
||||
|
||||
assert_eq!(pool.total_size(), 0);
|
||||
assert_eq!(pool.txpool.size(), 0);
|
||||
assert_eq!(pool.stempool.size(), 1);
|
||||
|
||||
// Confirm we can now add tx3 to txpool with tx2 in immediate previous block (NRD relative_height=1)
|
||||
assert_eq!(
|
||||
pool.add_to_pool(test_source(), tx3.clone(), false, &header),
|
||||
Ok(())
|
||||
);
|
||||
|
||||
assert_eq!(pool.total_size(), 1);
|
||||
assert_eq!(pool.txpool.size(), 1);
|
||||
assert_eq!(pool.stempool.size(), 0);
|
||||
|
||||
// Cleanup db directory
|
||||
clean_output_dir(db_root.into());
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -165,12 +165,14 @@ fn test_the_transaction_pool() {
|
|||
pool.add_to_pool(test_source(), tx.clone(), true, &header)
|
||||
.unwrap();
|
||||
assert_eq!(pool.total_size(), 4);
|
||||
assert_eq!(pool.txpool.size(), 4);
|
||||
assert_eq!(pool.stempool.size(), 1);
|
||||
|
||||
// Duplicate stem tx so fluff, adding it to txpool and removing it from stempool.
|
||||
pool.add_to_pool(test_source(), tx.clone(), true, &header)
|
||||
.unwrap();
|
||||
assert_eq!(pool.total_size(), 5);
|
||||
assert_eq!(pool.txpool.size(), 5);
|
||||
assert!(pool.stempool.is_empty());
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,9 @@ pub enum Error {
|
|||
/// Wraps a serialization error for Writeable or Readable
|
||||
#[fail(display = "Serialization Error")]
|
||||
SerErr(String),
|
||||
/// Other error
|
||||
#[fail(display = "Other Error")]
|
||||
OtherErr(String),
|
||||
}
|
||||
|
||||
impl From<lmdb::error::Error> for Error {
|
||||
|
|
Loading…
Add table
Reference in a new issue