pass slices around and not refs to vecs ()

* pass slices around and not refs to vecs

* use slice.swap()

* use inputs() not body.inputs
This commit is contained in:
Antioch Peverell 2020-07-27 11:07:18 +01:00 committed by GitHub
parent 105f50b26b
commit 80841f16d2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
29 changed files with 272 additions and 275 deletions

View file

@ -212,8 +212,9 @@ impl<'a> Batch<'a> {
/// We maintain a "spent" index for each full block to allow the output_pos
/// to be easily reverted during rewind.
pub fn save_spent_index(&self, h: &Hash, spent: &Vec<CommitPos>) -> Result<(), Error> {
self.db.put_ser(&to_key(BLOCK_SPENT_PREFIX, h)[..], spent)?;
pub fn save_spent_index(&self, h: &Hash, spent: &[CommitPos]) -> Result<(), Error> {
self.db
.put_ser(&to_key(BLOCK_SPENT_PREFIX, h)[..], &spent.to_vec())?;
Ok(())
}

View file

@ -91,9 +91,8 @@ where
let reward =
libtx::reward::output(keychain, &libtx::ProofBuilder::new(keychain), &pk, 0, false)
.unwrap();
let mut b =
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
.unwrap();
let mut b = core::core::Block::new(&prev, &[], next_header_info.clone().difficulty, reward)
.unwrap();
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;

View file

@ -37,7 +37,7 @@ where
let reward =
reward::output(keychain, &ProofBuilder::new(keychain), key_id, fee, false).unwrap();
let mut block = Block::new(&prev, txs, next_header_info.clone().difficulty, reward).unwrap();
let mut block = Block::new(&prev, &txs, next_header_info.clone().difficulty, reward).unwrap();
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
@ -87,7 +87,7 @@ fn mine_block_with_nrd_kernel_and_nrd_feature_enabled() {
fee: 20000,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
vec![
&[
build::coinbase_input(consensus::REWARD, key_id1.clone()),
build::output(consensus::REWARD - 20000, key_id2.clone()),
],
@ -134,7 +134,7 @@ fn mine_invalid_block_with_nrd_kernel_and_nrd_feature_enabled_before_hf() {
fee: 20000,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
vec![
&[
build::coinbase_input(consensus::REWARD, key_id1.clone()),
build::output(consensus::REWARD - 20000, key_id2.clone()),
],

View file

@ -570,7 +570,7 @@ fn spend_rewind_spend() {
let tx1 = build::transaction(
KernelFeatures::Plain { fee: 20000 },
vec![
&[
build::coinbase_input(consensus::REWARD, key_id_coinbase.clone()),
build::output(consensus::REWARD - 20000, key_id30.clone()),
],
@ -579,7 +579,7 @@ fn spend_rewind_spend() {
)
.unwrap();
let b = prepare_block_tx(&kc, &head, &chain, 6, vec![&tx1]);
let b = prepare_block_tx(&kc, &head, &chain, 6, &[tx1.clone()]);
head = b.header.clone();
chain
.process_block(b.clone(), chain::Options::SKIP_POW)
@ -595,7 +595,7 @@ fn spend_rewind_spend() {
// Now mine a competing block also spending the same coinbase output from earlier.
// Rewind back prior to the tx that spends it to "unspend" it.
{
let b = prepare_block_tx(&kc, &rewind_to, &chain, 6, vec![&tx1]);
let b = prepare_block_tx(&kc, &rewind_to, &chain, 6, &[tx1]);
chain
.process_block(b.clone(), chain::Options::SKIP_POW)
.unwrap();
@ -644,7 +644,7 @@ fn spend_in_fork_and_compact() {
let tx1 = build::transaction(
KernelFeatures::Plain { fee: 20000 },
vec![
&[
build::coinbase_input(consensus::REWARD, key_id2.clone()),
build::output(consensus::REWARD - 20000, key_id30.clone()),
],
@ -653,7 +653,7 @@ fn spend_in_fork_and_compact() {
)
.unwrap();
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]);
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, &[tx1.clone()]);
let prev_main = next.header.clone();
chain
.process_block(next.clone(), chain::Options::SKIP_POW)
@ -662,7 +662,7 @@ fn spend_in_fork_and_compact() {
let tx2 = build::transaction(
KernelFeatures::Plain { fee: 20000 },
vec![
&[
build::input(consensus::REWARD - 20000, key_id30.clone()),
build::output(consensus::REWARD - 40000, key_id31.clone()),
],
@ -671,7 +671,7 @@ fn spend_in_fork_and_compact() {
)
.unwrap();
let next = prepare_block_tx(&kc, &prev_main, &chain, 9, vec![&tx2]);
let next = prepare_block_tx(&kc, &prev_main, &chain, 9, &[tx2.clone()]);
let prev_main = next.header.clone();
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
@ -679,11 +679,11 @@ fn spend_in_fork_and_compact() {
chain.validate(false).unwrap();
// mine 2 forked blocks from the first
let fork = prepare_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]);
let fork = prepare_block_tx(&kc, &fork_head, &chain, 6, &[tx1.clone()]);
let prev_fork = fork.header.clone();
chain.process_block(fork, chain::Options::SKIP_POW).unwrap();
let fork_next = prepare_block_tx(&kc, &prev_fork, &chain, 8, vec![&tx2]);
let fork_next = prepare_block_tx(&kc, &prev_fork, &chain, 8, &[tx2.clone()]);
let prev_fork = fork_next.header.clone();
chain
.process_block(fork_next, chain::Options::SKIP_POW)
@ -771,7 +771,7 @@ fn output_header_mappings() {
.unwrap();
reward_outputs.push(reward.0.clone());
let mut b =
core::core::Block::new(&prev, vec![], next_header_info.clone().difficulty, reward)
core::core::Block::new(&prev, &[], next_header_info.clone().difficulty, reward)
.unwrap();
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
@ -834,7 +834,7 @@ fn prepare_block_key_idx<K>(
where
K: Keychain,
{
let mut b = prepare_block_nosum(kc, prev, diff, key_idx, vec![]);
let mut b = prepare_block_nosum(kc, prev, diff, key_idx, &[]);
chain.set_txhashset_roots(&mut b).unwrap();
b
}
@ -845,7 +845,7 @@ fn prepare_block_tx<K>(
prev: &BlockHeader,
chain: &Chain,
diff: u64,
txs: Vec<&Transaction>,
txs: &[Transaction],
) -> Block
where
K: Keychain,
@ -860,7 +860,7 @@ fn prepare_block_tx_key_idx<K>(
chain: &Chain,
diff: u64,
key_idx: u32,
txs: Vec<&Transaction>,
txs: &[Transaction],
) -> Block
where
K: Keychain,
@ -875,7 +875,7 @@ fn prepare_block_nosum<K>(
prev: &BlockHeader,
diff: u64,
key_idx: u32,
txs: Vec<&Transaction>,
txs: &[Transaction],
) -> Block
where
K: Keychain,
@ -886,12 +886,7 @@ where
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward =
libtx::reward::output(kc, &libtx::ProofBuilder::new(kc), &key_id, fees, false).unwrap();
let mut b = match core::core::Block::new(
prev,
txs.into_iter().cloned().collect(),
Difficulty::from_num(diff),
reward,
) {
let mut b = match core::core::Block::new(prev, txs, Difficulty::from_num(diff), reward) {
Err(e) => panic!("{:?}", e),
Ok(b) => b,
};

View file

@ -58,7 +58,7 @@ where
let reward =
reward::output(keychain, &ProofBuilder::new(keychain), key_id, fee, false).unwrap();
let mut block = Block::new(prev, txs, next_header_info.clone().difficulty, reward)?;
let mut block = Block::new(prev, &txs, next_header_info.clone().difficulty, reward)?;
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
@ -121,7 +121,7 @@ fn process_block_nrd_validation() -> Result<(), Error> {
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let tx1 = build::transaction_with_kernel(
vec![
&[
build::coinbase_input(consensus::REWARD, key_id1.clone()),
build::output(consensus::REWARD - 20000, key_id2.clone()),
],
@ -133,7 +133,7 @@ fn process_block_nrd_validation() -> Result<(), Error> {
.unwrap();
let tx2 = build::transaction_with_kernel(
vec![
&[
build::input(consensus::REWARD - 20000, key_id2.clone()),
build::output(consensus::REWARD - 40000, key_id3.clone()),
],
@ -237,7 +237,7 @@ fn process_block_nrd_validation_relative_height_1() -> Result<(), Error> {
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let tx1 = build::transaction_with_kernel(
vec![
&[
build::coinbase_input(consensus::REWARD, key_id1.clone()),
build::output(consensus::REWARD - 20000, key_id2.clone()),
],
@ -249,7 +249,7 @@ fn process_block_nrd_validation_relative_height_1() -> Result<(), Error> {
.unwrap();
let tx2 = build::transaction_with_kernel(
vec![
&[
build::input(consensus::REWARD - 20000, key_id2.clone()),
build::output(consensus::REWARD - 40000, key_id3.clone()),
],
@ -336,7 +336,7 @@ fn process_block_nrd_validation_fork() -> Result<(), Error> {
let key_id3 = ExtKeychainPath::new(1, 3, 0, 0, 0).to_identifier();
let tx1 = build::transaction_with_kernel(
vec![
&[
build::coinbase_input(consensus::REWARD, key_id1.clone()),
build::output(consensus::REWARD - 20000, key_id2.clone()),
],
@ -348,7 +348,7 @@ fn process_block_nrd_validation_fork() -> Result<(), Error> {
.unwrap();
let tx2 = build::transaction_with_kernel(
vec![
&[
build::input(consensus::REWARD - 20000, key_id2.clone()),
build::output(consensus::REWARD - 40000, key_id3.clone()),
],

View file

@ -68,7 +68,7 @@ fn test_coinbase_maturity() {
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let reward = libtx::reward::output(&keychain, &builder, &key_id1, 0, false).unwrap();
let mut block = core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
let mut block = core::core::Block::new(&prev, &[], Difficulty::min(), reward).unwrap();
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
@ -101,7 +101,7 @@ fn test_coinbase_maturity() {
// this is not a valid tx as the coinbase output cannot be spent yet
let coinbase_txn = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![
&[
build::coinbase_input(amount, key_id1.clone()),
build::output(amount - 2, key_id2.clone()),
],
@ -110,7 +110,7 @@ fn test_coinbase_maturity() {
)
.unwrap();
let txs = vec![coinbase_txn.clone()];
let txs = &[coinbase_txn.clone()];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &builder, &key_id3, fees, false).unwrap();
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
@ -149,8 +149,7 @@ fn test_coinbase_maturity() {
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let reward = libtx::reward::output(&keychain, &builder, &key_id1, 0, false).unwrap();
let mut block =
core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
let mut block = core::core::Block::new(&prev, &[], Difficulty::min(), reward).unwrap();
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
@ -184,7 +183,7 @@ fn test_coinbase_maturity() {
// this is not a valid tx as the coinbase output cannot be spent yet
let coinbase_txn = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![
&[
build::coinbase_input(amount, key_id1.clone()),
build::output(amount - 2, key_id2.clone()),
],
@ -193,7 +192,7 @@ fn test_coinbase_maturity() {
)
.unwrap();
let txs = vec![coinbase_txn.clone()];
let txs = &[coinbase_txn.clone()];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &builder, &key_id3, fees, false).unwrap();
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
@ -232,7 +231,7 @@ fn test_coinbase_maturity() {
let reward = libtx::reward::output(&keychain, &builder, &pk, 0, false).unwrap();
let mut block =
core::core::Block::new(&prev, vec![], Difficulty::min(), reward).unwrap();
core::core::Block::new(&prev, &[], Difficulty::min(), reward).unwrap();
let next_header_info =
consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
block.header.timestamp = prev.timestamp + Duration::seconds(60);
@ -257,7 +256,7 @@ fn test_coinbase_maturity() {
// The coinbase output has matured sufficiently based on current chain state.
chain.verify_coinbase_maturity(&coinbase_txn).unwrap();
let txs = vec![coinbase_txn];
let txs = &[coinbase_txn];
let fees = txs.iter().map(|tx| tx.fee()).sum();
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
let reward = libtx::reward::output(&keychain, &builder, &key_id4, fees, false).unwrap();

View file

@ -538,7 +538,7 @@ impl Block {
#[warn(clippy::new_ret_no_self)]
pub fn new(
prev: &BlockHeader,
txs: Vec<Transaction>,
txs: &[Transaction],
difficulty: Difficulty,
reward_output: (Output, TxKernel),
) -> Result<Block, Error> {
@ -557,7 +557,7 @@ impl Block {
/// Hydrate a block from a compact block.
/// Note: caller must validate the block themselves, we do not validate it
/// here.
pub fn hydrate_from(cb: CompactBlock, txs: Vec<Transaction>) -> Result<Block, Error> {
pub fn hydrate_from(cb: CompactBlock, txs: &[Transaction]) -> Result<Block, Error> {
trace!("block: hydrate_from: {}, {} txs", cb.hash(), txs.len(),);
let header = cb.header.clone();
@ -568,10 +568,9 @@ impl Block {
// collect all the inputs, outputs and kernels from the txs
for tx in txs {
let tb: TransactionBody = tx.into();
all_inputs.extend(tb.inputs);
all_outputs.extend(tb.outputs);
all_kernels.extend(tb.kernels);
all_inputs.extend(tx.inputs());
all_outputs.extend(tx.outputs());
all_kernels.extend(tx.kernels());
}
// include the coinbase output(s) and kernel(s) from the compact_block
@ -587,7 +586,7 @@ impl Block {
let all_kernels = Vec::from_iter(all_kernels);
// Initialize a tx body and sort everything.
let body = TransactionBody::init(all_inputs, all_outputs, all_kernels, false)?;
let body = TransactionBody::init(&all_inputs, &all_outputs, &all_kernels, false)?;
// Finally return the full block.
// Note: we have not actually validated the block here,
@ -608,7 +607,7 @@ impl Block {
/// that all transactions are valid and calculates the Merkle tree.
pub fn from_reward(
prev: &BlockHeader,
txs: Vec<Transaction>,
txs: &[Transaction],
reward_out: Output,
reward_kern: TxKernel,
difficulty: Difficulty,
@ -663,32 +662,32 @@ impl Block {
}
/// Get inputs
pub fn inputs(&self) -> &Vec<Input> {
pub fn inputs(&self) -> &[Input] {
&self.body.inputs
}
/// Get inputs mutable
pub fn inputs_mut(&mut self) -> &mut Vec<Input> {
pub fn inputs_mut(&mut self) -> &mut [Input] {
&mut self.body.inputs
}
/// Get outputs
pub fn outputs(&self) -> &Vec<Output> {
pub fn outputs(&self) -> &[Output] {
&self.body.outputs
}
/// Get outputs mutable
pub fn outputs_mut(&mut self) -> &mut Vec<Output> {
pub fn outputs_mut(&mut self) -> &mut [Output] {
&mut self.body.outputs
}
/// Get kernels
pub fn kernels(&self) -> &Vec<TxKernel> {
pub fn kernels(&self) -> &[TxKernel] {
&self.body.kernels
}
/// Get kernels mut
pub fn kernels_mut(&mut self) -> &mut Vec<TxKernel> {
pub fn kernels_mut(&mut self) -> &mut [TxKernel] {
&mut self.body.kernels
}
@ -702,14 +701,12 @@ impl Block {
/// elimination is stable with respect to the order of inputs and outputs.
/// Method consumes the block.
pub fn cut_through(self) -> Result<Block, Error> {
let mut inputs = self.inputs().clone();
let mut outputs = self.outputs().clone();
transaction::cut_through(&mut inputs, &mut outputs)?;
let kernels = self.kernels().clone();
let mut inputs = self.inputs().to_vec();
let mut outputs = self.outputs().to_vec();
let (inputs, outputs) = transaction::cut_through(&mut inputs, &mut outputs)?;
// Initialize tx body and sort everything.
let body = TransactionBody::init(inputs, outputs, kernels, false)?;
let body = TransactionBody::init(inputs, outputs, self.kernels(), false)?;
Ok(Block {
header: self.header,
@ -809,7 +806,7 @@ impl Block {
// Verify any absolute kernel lock heights.
fn verify_kernel_lock_heights(&self) -> Result<(), Error> {
for k in &self.body.kernels {
for k in self.kernels() {
// check we have no kernels with lock_heights greater than current height
// no tx can be included in a block earlier than its lock_height
if let KernelFeatures::HeightLocked { lock_height, .. } = k.features {
@ -825,7 +822,7 @@ impl Block {
// NRD kernels were introduced in HF3 and are not valid for block version < 4.
// Blocks prior to HF3 containing any NRD kernel(s) are invalid.
fn verify_nrd_kernels_for_header_version(&self) -> Result<(), Error> {
if self.body.kernels.iter().any(|k| k.is_nrd()) {
if self.kernels().iter().any(|k| k.is_nrd()) {
if !global::is_nrd_enabled() {
return Err(Error::NRDKernelNotEnabled);
}

View file

@ -146,17 +146,17 @@ impl CompactBlock {
}
/// Get kern_ids
pub fn kern_ids(&self) -> &Vec<ShortId> {
pub fn kern_ids(&self) -> &[ShortId] {
&self.body.kern_ids
}
/// Get full (coinbase) kernels
pub fn kern_full(&self) -> &Vec<TxKernel> {
pub fn kern_full(&self) -> &[TxKernel] {
&self.body.kern_full
}
/// Get full (coinbase) outputs
pub fn out_full(&self) -> &Vec<Output> {
pub fn out_full(&self) -> &[Output] {
&self.body.out_full
}
}

View file

@ -440,7 +440,7 @@ impl From<committed::Error> for Error {
/// amount to zero.
/// The signature signs the fee and the lock_height, which are retained for
/// signature validation.
#[derive(Serialize, Deserialize, Debug, Clone)]
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
pub struct TxKernel {
/// Options for a kernel's structure or use
pub features: KernelFeatures,
@ -710,7 +710,7 @@ impl Readable for TransactionBody {
let kernels = read_multi(reader, kernel_len)?;
// Initialize tx body and verify everything is sorted.
let body = TransactionBody::init(inputs, outputs, kernels, true)
let body = TransactionBody::init(&inputs, &outputs, &kernels, true)
.map_err(|_| ser::Error::CorruptedData)?;
Ok(body)
@ -737,6 +737,12 @@ impl Default for TransactionBody {
}
}
impl From<Transaction> for TransactionBody {
fn from(tx: Transaction) -> Self {
tx.body
}
}
impl TransactionBody {
/// Creates a new empty transaction (no inputs or outputs, zero fee).
pub fn empty() -> TransactionBody {
@ -758,15 +764,15 @@ impl TransactionBody {
/// the provided inputs, outputs and kernels.
/// Guarantees inputs, outputs, kernels are sorted lexicographically.
pub fn init(
inputs: Vec<Input>,
outputs: Vec<Output>,
kernels: Vec<TxKernel>,
inputs: &[Input],
outputs: &[Output],
kernels: &[TxKernel],
verify_sorted: bool,
) -> Result<TransactionBody, Error> {
let mut body = TransactionBody {
inputs,
outputs,
kernels,
inputs: inputs.to_vec(),
outputs: outputs.to_vec(),
kernels: kernels.to_vec(),
};
if verify_sorted {
@ -1075,12 +1081,6 @@ impl PartialEq for Transaction {
}
}
impl Into<TransactionBody> for Transaction {
fn into(self) -> TransactionBody {
self.body
}
}
/// Implementation of Writeable for a fully blinded transaction, defines how to
/// write the transaction as binary.
impl Writeable for Transaction {
@ -1140,7 +1140,7 @@ impl Transaction {
/// Creates a new transaction initialized with
/// the provided inputs, outputs, kernels
pub fn new(inputs: Vec<Input>, outputs: Vec<Output>, kernels: Vec<TxKernel>) -> Transaction {
pub fn new(inputs: &[Input], outputs: &[Output], kernels: &[TxKernel]) -> Transaction {
let offset = BlindingFactor::zero();
// Initialize a new tx body and sort everything.
@ -1195,32 +1195,32 @@ impl Transaction {
}
/// Get inputs
pub fn inputs(&self) -> &Vec<Input> {
pub fn inputs(&self) -> &[Input] {
&self.body.inputs
}
/// Get inputs mutable
pub fn inputs_mut(&mut self) -> &mut Vec<Input> {
pub fn inputs_mut(&mut self) -> &mut [Input] {
&mut self.body.inputs
}
/// Get outputs
pub fn outputs(&self) -> &Vec<Output> {
pub fn outputs(&self) -> &[Output] {
&self.body.outputs
}
/// Get outputs mutable
pub fn outputs_mut(&mut self) -> &mut Vec<Output> {
pub fn outputs_mut(&mut self) -> &mut [Output] {
&mut self.body.outputs
}
/// Get kernels
pub fn kernels(&self) -> &Vec<TxKernel> {
pub fn kernels(&self) -> &[TxKernel] {
&self.body.kernels
}
/// Get kernels mut
pub fn kernels_mut(&mut self) -> &mut Vec<TxKernel> {
pub fn kernels_mut(&mut self) -> &mut [TxKernel] {
&mut self.body.kernels
}
@ -1290,7 +1290,10 @@ impl Transaction {
/// from the Vec. Provides a simple way to cut-through a block or aggregated
/// transaction. The elimination is stable with respect to the order of inputs
/// and outputs.
pub fn cut_through(inputs: &mut Vec<Input>, outputs: &mut Vec<Output>) -> Result<(), Error> {
pub fn cut_through<'a>(
inputs: &'a mut [Input],
outputs: &'a mut [Output],
) -> Result<(&'a [Input], &'a [Output]), Error> {
// assemble output commitments set, checking they're all unique
outputs.sort_unstable();
if outputs.windows(2).any(|pair| pair[0] == pair[1]) {
@ -1303,11 +1306,11 @@ pub fn cut_through(inputs: &mut Vec<Input>, outputs: &mut Vec<Output>) -> Result
while inputs_idx < inputs.len() && outputs_idx < outputs.len() {
match inputs[inputs_idx].hash().cmp(&outputs[outputs_idx].hash()) {
Ordering::Less => {
inputs[inputs_idx - ncut] = inputs[inputs_idx];
inputs.swap(inputs_idx - ncut, inputs_idx);
inputs_idx += 1;
}
Ordering::Greater => {
outputs[outputs_idx - ncut] = outputs[outputs_idx];
outputs.swap(outputs_idx - ncut, outputs_idx);
outputs_idx += 1;
}
Ordering::Equal => {
@ -1317,27 +1320,40 @@ pub fn cut_through(inputs: &mut Vec<Input>, outputs: &mut Vec<Output>) -> Result
}
}
}
// Cut elements that have already been copied
outputs.drain(outputs_idx - ncut..outputs_idx);
inputs.drain(inputs_idx - ncut..inputs_idx);
Ok(())
// Make sure we move any the remaining inputs into the slice to be returned.
while inputs_idx < inputs.len() {
inputs.swap(inputs_idx - ncut, inputs_idx);
inputs_idx += 1;
}
// Make sure we move any the remaining outputs into the slice to be returned.
while outputs_idx < outputs.len() {
outputs.swap(outputs_idx - ncut, outputs_idx);
outputs_idx += 1;
}
Ok((
&inputs[..inputs.len() - ncut],
&outputs[..outputs.len() - ncut],
))
}
/// Aggregate a vec of txs into a multi-kernel tx with cut_through.
pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
pub fn aggregate(txs: &[Transaction]) -> Result<Transaction, Error> {
// convenience short-circuiting
if txs.is_empty() {
return Ok(Transaction::empty());
} else if txs.len() == 1 {
return Ok(txs.pop().unwrap());
return Ok(txs[0].clone());
}
let mut n_inputs = 0;
let mut n_outputs = 0;
let mut n_kernels = 0;
for tx in txs.iter() {
n_inputs += tx.body.inputs.len();
n_outputs += tx.body.outputs.len();
n_kernels += tx.body.kernels.len();
n_inputs += tx.inputs().len();
n_outputs += tx.outputs().len();
n_kernels += tx.kernels().len();
}
let mut inputs: Vec<Input> = Vec::with_capacity(n_inputs);
@ -1347,17 +1363,17 @@ pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
// we will sum these together at the end to give us the overall offset for the
// transaction
let mut kernel_offsets: Vec<BlindingFactor> = Vec::with_capacity(txs.len());
for mut tx in txs {
for tx in txs {
// we will sum these later to give a single aggregate offset
kernel_offsets.push(tx.offset);
kernel_offsets.push(tx.offset.clone());
inputs.append(&mut tx.body.inputs);
outputs.append(&mut tx.body.outputs);
kernels.append(&mut tx.body.kernels);
inputs.extend_from_slice(tx.inputs());
outputs.extend_from_slice(tx.outputs());
kernels.extend_from_slice(tx.kernels());
}
// Sort inputs and outputs during cut_through.
cut_through(&mut inputs, &mut outputs)?;
let (inputs, outputs) = cut_through(&mut inputs, &mut outputs)?;
// Now sort kernels.
kernels.sort_unstable();
@ -1371,14 +1387,14 @@ pub fn aggregate(mut txs: Vec<Transaction>) -> Result<Transaction, Error> {
// * cut-through outputs
// * full set of tx kernels
// * sum of all kernel offsets
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
let tx = Transaction::new(inputs, outputs, &kernels).with_offset(total_kernel_offset);
Ok(tx)
}
/// Attempt to deaggregate a multi-kernel transaction based on multiple
/// transactions
pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transaction, Error> {
pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result<Transaction, Error> {
let mut inputs: Vec<Input> = vec![];
let mut outputs: Vec<Output> = vec![];
let mut kernels: Vec<TxKernel> = vec![];
@ -1389,19 +1405,19 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
let tx = aggregate(txs)?;
for mk_input in mk_tx.body.inputs {
if !tx.body.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
inputs.push(mk_input);
for mk_input in mk_tx.inputs() {
if !tx.inputs().contains(&mk_input) && !inputs.contains(mk_input) {
inputs.push(*mk_input);
}
}
for mk_output in mk_tx.body.outputs {
if !tx.body.outputs.contains(&mk_output) && !outputs.contains(&mk_output) {
outputs.push(mk_output);
for mk_output in mk_tx.outputs() {
if !tx.outputs().contains(&mk_output) && !outputs.contains(mk_output) {
outputs.push(*mk_output);
}
}
for mk_kernel in mk_tx.body.kernels {
if !tx.body.kernels.contains(&mk_kernel) && !kernels.contains(&mk_kernel) {
kernels.push(mk_kernel);
for mk_kernel in mk_tx.kernels() {
if !tx.kernels().contains(&mk_kernel) && !kernels.contains(mk_kernel) {
kernels.push(*mk_kernel);
}
}
@ -1436,7 +1452,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
kernels.sort_unstable();
// Build a new tx from the above data.
let tx = Transaction::new(inputs, outputs, kernels).with_offset(total_kernel_offset);
let tx = Transaction::new(&inputs, &outputs, &kernels).with_offset(total_kernel_offset);
Ok(tx)
}

View file

@ -182,7 +182,7 @@ where
///
pub fn partial_transaction<K, B>(
tx: Transaction,
elems: Vec<Box<Append<K, B>>>,
elems: &[Box<Append<K, B>>],
keychain: &K,
builder: &B,
) -> Result<(Transaction, BlindingFactor), Error>
@ -203,7 +203,7 @@ where
/// In the real world we use signature aggregation across multiple participants.
pub fn transaction<K, B>(
features: KernelFeatures,
elems: Vec<Box<Append<K, B>>>,
elems: &[Box<Append<K, B>>],
keychain: &K,
builder: &B,
) -> Result<Transaction, Error>
@ -230,7 +230,7 @@ where
/// NOTE: Only used in tests (for convenience).
/// Cannot recommend passing private excess around like this in the real world.
pub fn transaction_with_kernel<K, B>(
elems: Vec<Box<Append<K, B>>>,
elems: &[Box<Append<K, B>>],
kernel: TxKernel,
excess: BlindingFactor,
keychain: &K,
@ -284,7 +284,7 @@ mod test {
let tx = transaction(
KernelFeatures::Plain { fee: 2 },
vec![input(10, key_id1), input(12, key_id2), output(20, key_id3)],
&[input(10, key_id1), input(12, key_id2), output(20, key_id3)],
&keychain,
&builder,
)
@ -306,7 +306,7 @@ mod test {
let tx = transaction(
KernelFeatures::Plain { fee: 2 },
vec![input(10, key_id1), input(12, key_id2), output(20, key_id3)],
&[input(10, key_id1), input(12, key_id2), output(20, key_id3)],
&keychain,
&builder,
)
@ -327,7 +327,7 @@ mod test {
let tx = transaction(
KernelFeatures::Plain { fee: 4 },
vec![input(6, key_id1), output(2, key_id2)],
&[input(6, key_id1), output(2, key_id2)],
&keychain,
&builder,
)

View file

@ -59,12 +59,17 @@ fn too_large_block() {
}
parts.append(&mut vec![input(500000, pks.pop().unwrap())]);
let tx =
build::transaction(KernelFeatures::Plain { fee: 2 }, parts, &keychain, &builder).unwrap();
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
&parts,
&keychain,
&builder,
)
.unwrap();
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx], &keychain, &builder, &prev, &key_id);
let b = new_block(&[tx], &keychain, &builder, &prev, &key_id);
assert!(b
.validate(&BlindingFactor::zero(), verifier_cache())
.is_err());
@ -95,16 +100,17 @@ fn block_with_nrd_kernel_pre_post_hf3() {
let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let mut tx = build::transaction(
let tx = build::transaction(
KernelFeatures::NoRecentDuplicate {
fee: 2,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
vec![input(7, key_id1), output(5, key_id2)],
&[input(7, key_id1), output(5, key_id2)],
&keychain,
&builder,
)
.unwrap();
let txs = &[tx];
let prev_height = TESTING_THIRD_HARD_FORK - 2;
let prev = BlockHeader {
@ -113,7 +119,7 @@ fn block_with_nrd_kernel_pre_post_hf3() {
..BlockHeader::default()
};
let b = new_block(
vec![&mut tx],
txs,
&keychain,
&builder,
&prev,
@ -134,7 +140,7 @@ fn block_with_nrd_kernel_pre_post_hf3() {
..BlockHeader::default()
};
let b = new_block(
vec![&mut tx],
txs,
&keychain,
&builder,
&prev,
@ -155,7 +161,7 @@ fn block_with_nrd_kernel_pre_post_hf3() {
..BlockHeader::default()
};
let b = new_block(
vec![&mut tx],
txs,
&keychain,
&builder,
&prev,
@ -179,17 +185,19 @@ fn block_with_nrd_kernel_nrd_not_enabled() {
let key_id1 = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let mut tx = build::transaction(
let tx = build::transaction(
KernelFeatures::NoRecentDuplicate {
fee: 2,
relative_height: NRDRelativeHeight::new(1440).unwrap(),
},
vec![input(7, key_id1), output(5, key_id2)],
&[input(7, key_id1), output(5, key_id2)],
&keychain,
&builder,
)
.unwrap();
let txs = &[tx];
let prev_height = TESTING_THIRD_HARD_FORK - 2;
let prev = BlockHeader {
height: prev_height,
@ -197,7 +205,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() {
..BlockHeader::default()
};
let b = new_block(
vec![&mut tx],
txs,
&keychain,
&builder,
&prev,
@ -218,7 +226,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() {
..BlockHeader::default()
};
let b = new_block(
vec![&mut tx],
txs,
&keychain,
&builder,
&prev,
@ -240,7 +248,7 @@ fn block_with_nrd_kernel_nrd_not_enabled() {
..BlockHeader::default()
};
let b = new_block(
vec![&mut tx],
txs,
&keychain,
&builder,
&prev,
@ -265,10 +273,10 @@ fn block_with_cut_through() {
let key_id2 = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let mut btx1 = tx2i1o();
let mut btx2 = build::transaction(
let btx1 = tx2i1o();
let btx2 = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![input(7, key_id1), output(5, key_id2.clone())],
&[input(7, key_id1), output(5, key_id2.clone())],
&keychain,
&builder,
)
@ -276,16 +284,10 @@ fn block_with_cut_through() {
// spending tx2 - reuse key_id2
let mut btx3 = txspend1i1o(5, &keychain, &builder, key_id2, key_id3);
let btx3 = txspend1i1o(5, &keychain, &builder, key_id2, key_id3);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(
vec![&mut btx1, &mut btx2, &mut btx3],
&keychain,
&builder,
&prev,
&key_id,
);
let b = new_block(&[btx1, btx2, btx3], &keychain, &builder, &prev, &key_id);
// block should have been automatically compacted (including reward
// output) and should still be valid
@ -302,7 +304,7 @@ fn empty_block_with_coinbase_is_valid() {
let builder = ProofBuilder::new(&keychain);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &builder, &prev, &key_id);
let b = new_block(&[], &keychain, &builder, &prev, &key_id);
assert_eq!(b.inputs().len(), 0);
assert_eq!(b.outputs().len(), 1);
@ -341,7 +343,7 @@ fn remove_coinbase_output_flag() {
let builder = ProofBuilder::new(&keychain);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let mut b = new_block(vec![], &keychain, &builder, &prev, &key_id);
let mut b = new_block(&[], &keychain, &builder, &prev, &key_id);
assert!(b.outputs()[0].is_coinbase());
b.outputs_mut()[0].features = OutputFeatures::Plain;
@ -365,7 +367,7 @@ fn remove_coinbase_kernel_flag() {
let builder = ProofBuilder::new(&keychain);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let mut b = new_block(vec![], &keychain, &builder, &prev, &key_id);
let mut b = new_block(&[], &keychain, &builder, &prev, &key_id);
assert!(b.kernels()[0].is_coinbase());
b.kernels_mut()[0].features = KernelFeatures::Plain { fee: 0 };
@ -408,7 +410,7 @@ fn serialize_deserialize_block_header() {
let builder = ProofBuilder::new(&keychain);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &builder, &prev, &key_id);
let b = new_block(&[], &keychain, &builder, &prev, &key_id);
let header1 = b.header;
let mut vec = Vec::new();
@ -427,7 +429,7 @@ fn serialize_deserialize_block() {
let builder = ProofBuilder::new(&keychain);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &builder, &prev, &key_id);
let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &b).expect("serialization failed");
@ -447,7 +449,7 @@ fn empty_block_serialized_size() {
let builder = ProofBuilder::new(&keychain);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &builder, &prev, &key_id);
let b = new_block(&[], &keychain, &builder, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &b).expect("serialization failed");
assert_eq!(vec.len(), 1_096);
@ -461,7 +463,7 @@ fn block_single_tx_serialized_size() {
let tx1 = tx1i2o();
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &builder, &prev, &key_id);
let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &b).expect("serialization failed");
assert_eq!(vec.len(), 2_670);
@ -474,7 +476,7 @@ fn empty_compact_block_serialized_size() {
let builder = ProofBuilder::new(&keychain);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &builder, &prev, &key_id);
let b = new_block(&[], &keychain, &builder, &prev, &key_id);
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &cb).expect("serialization failed");
@ -489,7 +491,7 @@ fn compact_block_single_tx_serialized_size() {
let tx1 = tx1i2o();
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &builder, &prev, &key_id);
let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id);
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &cb).expect("serialization failed");
@ -509,7 +511,7 @@ fn block_10_tx_serialized_size() {
}
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(txs.iter().collect(), &keychain, &builder, &prev, &key_id);
let b = new_block(&txs, &keychain, &builder, &prev, &key_id);
// Default protocol version.
{
@ -546,7 +548,7 @@ fn compact_block_10_tx_serialized_size() {
}
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(txs.iter().collect(), &keychain, &builder, &prev, &key_id);
let b = new_block(&txs, &keychain, &builder, &prev, &key_id);
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &cb).expect("serialization failed");
@ -561,7 +563,7 @@ fn compact_block_hash_with_nonce() {
let tx = tx1i2o();
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx], &keychain, &builder, &prev, &key_id);
let b = new_block(&[tx.clone()], &keychain, &builder, &prev, &key_id);
let cb1: CompactBlock = b.clone().into();
let cb2: CompactBlock = b.clone().into();
@ -593,7 +595,7 @@ fn convert_block_to_compact_block() {
let tx1 = tx1i2o();
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &builder, &prev, &key_id);
let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id);
let cb: CompactBlock = b.clone().into();
assert_eq!(cb.out_full().len(), 1);
@ -617,9 +619,9 @@ fn hydrate_empty_compact_block() {
let builder = ProofBuilder::new(&keychain);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &builder, &prev, &key_id);
let b = new_block(&[], &keychain, &builder, &prev, &key_id);
let cb: CompactBlock = b.clone().into();
let hb = Block::hydrate_from(cb, vec![]).unwrap();
let hb = Block::hydrate_from(cb, &[]).unwrap();
assert_eq!(hb.header, b.header);
assert_eq!(hb.outputs(), b.outputs());
assert_eq!(hb.kernels(), b.kernels());
@ -633,7 +635,7 @@ fn serialize_deserialize_compact_block() {
let tx1 = tx1i2o();
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![&tx1], &keychain, &builder, &prev, &key_id);
let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id);
let mut cb1: CompactBlock = b.into();
@ -664,7 +666,7 @@ fn same_amount_outputs_copy_range_proof() {
let tx = build::transaction(
KernelFeatures::Plain { fee: 1 },
vec![input(7, key_id1), output(3, key_id2), output(3, key_id3)],
&[input(7, key_id1), output(3, key_id2), output(3, key_id3)],
&keychain,
&builder,
)
@ -673,14 +675,14 @@ fn same_amount_outputs_copy_range_proof() {
// now we reconstruct the transaction, swapping the rangeproofs so they
// have the wrong privkey
let ins = tx.inputs();
let mut outs = tx.outputs().clone();
let mut outs = tx.outputs().to_vec();
let kernels = tx.kernels();
outs[0].proof = outs[1].proof;
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
let prev = BlockHeader::default();
let b = new_block(
vec![&mut Transaction::new(ins.clone(), outs, kernels.clone())],
&[Transaction::new(ins, &outs, kernels)],
&keychain,
&builder,
&prev,
@ -707,7 +709,7 @@ fn wrong_amount_range_proof() {
let tx1 = build::transaction(
KernelFeatures::Plain { fee: 1 },
vec![
&[
input(7, key_id1.clone()),
output(3, key_id2.clone()),
output(3, key_id3.clone()),
@ -718,7 +720,7 @@ fn wrong_amount_range_proof() {
.unwrap();
let tx2 = build::transaction(
KernelFeatures::Plain { fee: 1 },
vec![input(7, key_id1), output(2, key_id2), output(4, key_id3)],
&[input(7, key_id1), output(2, key_id2), output(4, key_id3)],
&keychain,
&builder,
)
@ -726,7 +728,7 @@ fn wrong_amount_range_proof() {
// we take the range proofs from tx2 into tx1 and rebuild the transaction
let ins = tx1.inputs();
let mut outs = tx1.outputs().clone();
let mut outs = tx1.outputs().to_vec();
let kernels = tx1.kernels();
outs[0].proof = tx2.outputs()[0].proof;
outs[1].proof = tx2.outputs()[1].proof;
@ -734,7 +736,7 @@ fn wrong_amount_range_proof() {
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
let prev = BlockHeader::default();
let b = new_block(
vec![&mut Transaction::new(ins.clone(), outs, kernels.clone())],
&[Transaction::new(ins, &outs, kernels)],
&keychain,
&builder,
&prev,
@ -756,7 +758,7 @@ fn validate_header_proof() {
let builder = ProofBuilder::new(&keychain);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(vec![], &keychain, &builder, &prev, &key_id);
let b = new_block(&[], &keychain, &builder, &prev, &key_id);
let mut header_buf = vec![];
{

View file

@ -34,13 +34,15 @@ pub fn tx2i1o() -> Transaction {
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
build::transaction(
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![input(10, key_id1), input(11, key_id2), output(19, key_id3)],
&[input(10, key_id1), input(11, key_id2), output(19, key_id3)],
&keychain,
&builder,
)
.unwrap()
.unwrap();
tx
}
// utility producing a transaction with a single input and output
@ -51,13 +53,15 @@ pub fn tx1i1o() -> Transaction {
let key_id1 = keychain::ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
build::transaction(
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![input(5, key_id1), output(3, key_id2)],
&[input(5, key_id1), output(3, key_id2)],
&keychain,
&builder,
)
.unwrap()
.unwrap();
tx
}
// utility producing a transaction with a single input
@ -71,20 +75,22 @@ pub fn tx1i2o() -> Transaction {
let key_id2 = keychain::ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let key_id3 = keychain::ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
build::transaction(
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![input(6, key_id1), output(3, key_id2), output(1, key_id3)],
&[input(6, key_id1), output(3, key_id2), output(1, key_id3)],
&keychain,
&builder,
)
.unwrap()
.unwrap();
tx
}
// utility to create a block without worrying about the key or previous
// header
#[allow(dead_code)]
pub fn new_block<K, B>(
txs: Vec<&Transaction>,
txs: &[Transaction],
keychain: &K,
builder: &B,
previous_header: &BlockHeader,
@ -96,13 +102,7 @@ where
{
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward_output = reward::output(keychain, builder, &key_id, fees, false).unwrap();
Block::new(
&previous_header,
txs.into_iter().cloned().collect(),
Difficulty::min(),
reward_output,
)
.unwrap()
Block::new(&previous_header, txs, Difficulty::min(), reward_output).unwrap()
}
// utility producing a transaction that spends an output with the provided
@ -121,7 +121,7 @@ where
{
build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![input(v, key_id1), output(3, key_id2)],
&[input(v, key_id1), output(3, key_id2)],
keychain,
builder,
)

View file

@ -93,9 +93,9 @@ fn create_chain_sim(diff: u64) -> Vec<(HeaderInfo, DiffStats)> {
)]
}
fn get_diff_stats(chain_sim: &Vec<HeaderInfo>) -> DiffStats {
fn get_diff_stats(chain_sim: &[HeaderInfo]) -> DiffStats {
// Fill out some difficulty stats for convenience
let diff_iter = chain_sim.clone();
let diff_iter = chain_sim.to_vec();
let last_blocks: Vec<HeaderInfo> = global::difficulty_data_to_vector(diff_iter.iter().cloned());
let mut last_time = last_blocks[0].timestamp;

View file

@ -105,7 +105,7 @@ fn test_zero_commit_fails() {
// blinding should fail as signing with a zero r*G shouldn't work
let res = build::transaction(
KernelFeatures::Plain { fee: 0 },
vec![input(10, key_id1.clone()), output(10, key_id1)],
&[input(10, key_id1.clone()), output(10, key_id1)],
&keychain,
&builder,
);
@ -128,7 +128,7 @@ fn build_tx_kernel() {
// first build a valid tx with corresponding blinding factor
let tx = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![input(10, key_id1), output(5, key_id2), output(3, key_id3)],
&[input(10, key_id1), output(5, key_id2), output(3, key_id3)],
&keychain,
&builder,
)
@ -181,7 +181,7 @@ fn build_two_half_kernels() {
kernel.verify().unwrap();
let tx1 = build::transaction_with_kernel(
vec![input(10, key_id1), output(8, key_id2.clone())],
&[input(10, key_id1), output(8, key_id2.clone())],
kernel.clone(),
excess.clone(),
&keychain,
@ -190,7 +190,7 @@ fn build_two_half_kernels() {
.unwrap();
let tx2 = build::transaction_with_kernel(
vec![input(8, key_id2), output(6, key_id3)],
&[input(8, key_id2), output(6, key_id3)],
kernel.clone(),
excess.clone(),
&keychain,
@ -240,7 +240,7 @@ fn transaction_cut_through() {
let vc = verifier_cache();
// now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(vec![tx1, tx2]).unwrap();
let tx3 = aggregate(&[tx1, tx2]).unwrap();
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
}
@ -261,9 +261,9 @@ fn multi_kernel_transaction_deaggregation() {
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
let tx12 = aggregate(vec![tx1, tx2]).unwrap();
let tx34 = aggregate(vec![tx3, tx4]).unwrap();
let tx1234 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
let tx12 = aggregate(&[tx1, tx2]).unwrap();
let tx34 = aggregate(&[tx3, tx4]).unwrap();
assert!(tx1234
.validate(Weighting::AsTransaction, vc.clone())
@ -271,13 +271,13 @@ fn multi_kernel_transaction_deaggregation() {
assert!(tx12.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx34.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let deaggregated_tx34 = deaggregate(tx1234.clone(), vec![tx12.clone()]).unwrap();
let deaggregated_tx34 = deaggregate(tx1234.clone(), &[tx12.clone()]).unwrap();
assert!(deaggregated_tx34
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
assert_eq!(tx34, deaggregated_tx34);
let deaggregated_tx12 = deaggregate(tx1234, vec![tx34]).unwrap();
let deaggregated_tx12 = deaggregate(tx1234, &[tx34]).unwrap();
assert!(deaggregated_tx12
.validate(Weighting::AsTransaction, vc.clone())
@ -298,13 +298,13 @@ fn multi_kernel_transaction_deaggregation_2() {
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx12 = aggregate(vec![tx1, tx2]).unwrap();
let tx123 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx12 = aggregate(&[tx1, tx2]).unwrap();
assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx12.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let deaggregated_tx3 = deaggregate(tx123, vec![tx12]).unwrap();
let deaggregated_tx3 = deaggregate(tx123, &[tx12]).unwrap();
assert!(deaggregated_tx3
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
@ -324,14 +324,14 @@ fn multi_kernel_transaction_deaggregation_3() {
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx13 = aggregate(vec![tx1, tx3]).unwrap();
let tx2 = aggregate(vec![tx2]).unwrap();
let tx123 = aggregate(&[tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx13 = aggregate(&[tx1, tx3]).unwrap();
let tx2 = aggregate(&[tx2]).unwrap();
assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let deaggregated_tx13 = deaggregate(tx123, vec![tx2]).unwrap();
let deaggregated_tx13 = deaggregate(tx123, &[tx2]).unwrap();
assert!(deaggregated_tx13
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
@ -355,7 +355,7 @@ fn multi_kernel_transaction_deaggregation_4() {
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx5.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let tx12345 = aggregate(vec![
let tx12345 = aggregate(&[
tx1.clone(),
tx2.clone(),
tx3.clone(),
@ -367,7 +367,7 @@ fn multi_kernel_transaction_deaggregation_4() {
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
let deaggregated_tx5 = deaggregate(tx12345, vec![tx1, tx2, tx3, tx4]).unwrap();
let deaggregated_tx5 = deaggregate(tx12345, &[tx1, tx2, tx3, tx4]).unwrap();
assert!(deaggregated_tx5
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
@ -391,7 +391,7 @@ fn multi_kernel_transaction_deaggregation_5() {
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx5.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let tx12345 = aggregate(vec![
let tx12345 = aggregate(&[
tx1.clone(),
tx2.clone(),
tx3.clone(),
@ -399,14 +399,14 @@ fn multi_kernel_transaction_deaggregation_5() {
tx5.clone(),
])
.unwrap();
let tx12 = aggregate(vec![tx1, tx2]).unwrap();
let tx34 = aggregate(vec![tx3, tx4]).unwrap();
let tx12 = aggregate(&[tx1, tx2]).unwrap();
let tx34 = aggregate(&[tx3, tx4]).unwrap();
assert!(tx12345
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
let deaggregated_tx5 = deaggregate(tx12345, vec![tx12, tx34]).unwrap();
let deaggregated_tx5 = deaggregate(tx12345, &[tx12, tx34]).unwrap();
assert!(deaggregated_tx5
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
@ -426,18 +426,18 @@ fn basic_transaction_deaggregation() {
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
// now build a "cut_through" tx from tx1 and tx2
let tx3 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
let tx3 = aggregate(&[tx1.clone(), tx2.clone()]).unwrap();
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let deaggregated_tx1 = deaggregate(tx3.clone(), vec![tx2.clone()]).unwrap();
let deaggregated_tx1 = deaggregate(tx3.clone(), &[tx2.clone()]).unwrap();
assert!(deaggregated_tx1
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
assert_eq!(tx1, deaggregated_tx1);
let deaggregated_tx2 = deaggregate(tx3, vec![tx1]).unwrap();
let deaggregated_tx2 = deaggregate(tx3, &[tx1]).unwrap();
assert!(deaggregated_tx2
.validate(Weighting::AsTransaction, vc.clone())
@ -455,7 +455,7 @@ fn hash_output() {
let tx = build::transaction(
KernelFeatures::Plain { fee: 1 },
vec![input(75, key_id1), output(42, key_id2), output(32, key_id3)],
&[input(75, key_id1), output(42, key_id2), output(32, key_id3)],
&keychain,
&builder,
)
@ -520,7 +520,7 @@ fn tx_build_exchange() {
let tx = Transaction::empty()
.with_kernel(TxKernel::with_features(KernelFeatures::Plain { fee: 2 }));
let (tx, sum) =
build::partial_transaction(tx, vec![in1, in2, output(1, key_id3)], &keychain, &builder)
build::partial_transaction(tx, &[in1, in2, output(1, key_id3)], &keychain, &builder)
.unwrap();
(tx, sum)
@ -531,7 +531,7 @@ fn tx_build_exchange() {
// ready for broadcast.
let tx_final = build::transaction(
KernelFeatures::Plain { fee: 2 },
vec![
&[
initial_tx(tx_alice),
with_excess(blind_sum),
output(4, key_id4),
@ -555,7 +555,7 @@ fn reward_empty_block() {
let previous_header = BlockHeader::default();
let b = new_block(vec![], &keychain, &builder, &previous_header, &key_id);
let b = new_block(&[], &keychain, &builder, &previous_header, &key_id);
b.cut_through()
.unwrap()
@ -572,18 +572,12 @@ fn reward_with_tx_block() {
let vc = verifier_cache();
let mut tx1 = tx2i1o();
let tx1 = tx2i1o();
tx1.validate(Weighting::AsTransaction, vc.clone()).unwrap();
let previous_header = BlockHeader::default();
let block = new_block(
vec![&mut tx1],
&keychain,
&builder,
&previous_header,
&key_id,
);
let block = new_block(&[tx1], &keychain, &builder, &previous_header, &key_id);
block
.cut_through()
.unwrap()
@ -600,17 +594,11 @@ fn simple_block() {
let vc = verifier_cache();
let mut tx1 = tx2i1o();
let mut tx2 = tx1i1o();
let tx1 = tx2i1o();
let tx2 = tx1i1o();
let previous_header = BlockHeader::default();
let b = new_block(
vec![&mut tx1, &mut tx2],
&keychain,
&builder,
&previous_header,
&key_id,
);
let b = new_block(&[tx1, tx2], &keychain, &builder, &previous_header, &key_id);
b.validate(&BlindingFactor::zero(), vc.clone()).unwrap();
}
@ -633,7 +621,7 @@ fn test_block_with_timelocked_tx() {
fee: 2,
lock_height: 1,
},
vec![input(5, key_id1.clone()), output(3, key_id2.clone())],
&[input(5, key_id1.clone()), output(3, key_id2.clone())],
&keychain,
&builder,
)
@ -642,7 +630,7 @@ fn test_block_with_timelocked_tx() {
let previous_header = BlockHeader::default();
let b = new_block(
vec![&tx1],
&[tx1],
&keychain,
&builder,
&previous_header,
@ -657,14 +645,14 @@ fn test_block_with_timelocked_tx() {
fee: 2,
lock_height: 2,
},
vec![input(5, key_id1), output(3, key_id2)],
&[input(5, key_id1), output(3, key_id2)],
&keychain,
&builder,
)
.unwrap();
let previous_header = BlockHeader::default();
let b = new_block(vec![&tx1], &keychain, &builder, &previous_header, &key_id3);
let b = new_block(&[tx1], &keychain, &builder, &previous_header, &key_id3);
match b.validate(&BlindingFactor::zero(), vc.clone()) {
Err(KernelLockHeight(height)) => {

View file

@ -171,7 +171,7 @@ where
return Ok(None);
}
let tx = transaction::aggregate(txs)?;
let tx = transaction::aggregate(&txs)?;
// Validate the single aggregate transaction "as pool", not subject to tx weight limits.
tx.validate(Weighting::NoLimit, self.verifier_cache.clone())?;
@ -205,7 +205,7 @@ where
// Create a single aggregated tx from the existing pool txs and the
// new entry
txs.push(entry.tx.clone());
transaction::aggregate(txs)?
transaction::aggregate(&txs)?
};
// Validate aggregated tx (existing pool + new tx), ignoring tx weight limits.
@ -269,7 +269,7 @@ where
candidate_txs.push(tx.clone());
// Build a single aggregate tx from candidate txs.
let agg_tx = transaction::aggregate(candidate_txs)?;
let agg_tx = transaction::aggregate(&candidate_txs)?;
// We know the tx is valid if the entire aggregate tx is valid.
if self.validate_raw_tx(&agg_tx, header, weighting).is_ok() {
@ -514,7 +514,7 @@ impl Bucket {
) -> Result<Bucket, PoolError> {
let mut raw_txs = self.raw_txs.clone();
raw_txs.push(new_tx);
let agg_tx = transaction::aggregate(raw_txs.clone())?;
let agg_tx = transaction::aggregate(&raw_txs)?;
agg_tx.validate(weighting, verifier_cache)?;
Ok(Bucket {
fee_to_weight: agg_tx.fee_to_weight(),

View file

@ -113,7 +113,7 @@ where
if entry.tx.kernels().len() > 1 {
let txs = self.txpool.find_matching_transactions(entry.tx.kernels());
if !txs.is_empty() {
let tx = transaction::deaggregate(entry.tx, txs)?;
let tx = transaction::deaggregate(entry.tx, &txs)?;
// Validate this deaggregated tx "as tx", subject to regular tx weight limits.
tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?;

View file

@ -57,7 +57,7 @@ fn test_transaction_pool_block_building() -> Result<(), PoolError> {
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, vec![initial_tx], &keychain);
add_block(&chain, &[initial_tx], &keychain);
let header = chain.head_header().unwrap();
@ -83,7 +83,7 @@ fn test_transaction_pool_block_building() -> Result<(), PoolError> {
let txs = pool.prepare_mineable_transactions()?;
add_block(&chain, txs, &keychain);
add_block(&chain, &txs, &keychain);
// Get full block from head of the chain (block we just processed).
let block = chain.get_block(&chain.head().unwrap().hash()).unwrap();

View file

@ -58,7 +58,7 @@ fn test_block_building_max_weight() {
test_transaction_spending_coinbase(&keychain, &header_1, vec![100, 200, 300, 1000]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, vec![initial_tx], &keychain);
add_block(&chain, &[initial_tx], &keychain);
let header = chain.head_header().unwrap();
@ -113,7 +113,7 @@ fn test_block_building_max_weight() {
[15625, 1125, 1000, 875]
);
add_block(&chain, txs, &keychain);
add_block(&chain, &txs, &keychain);
let block = chain.get_block(&chain.head().unwrap().hash()).unwrap();
// Check contents of the block itself (including coinbase reward).

View file

@ -56,7 +56,7 @@ fn test_transaction_pool_block_reconciliation() {
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, vec![initial_tx], &keychain);
add_block(&chain, &[initial_tx], &keychain);
let header = chain.head_header().unwrap();
@ -130,7 +130,7 @@ fn test_transaction_pool_block_reconciliation() {
// - Output conflict w/ 8
let block_tx_4 = test_transaction(&keychain, vec![40], vec![9, 31]);
let block_txs = vec![block_tx_1, block_tx_2, block_tx_3, block_tx_4];
let block_txs = &[block_tx_1, block_tx_2, block_tx_3, block_tx_4];
add_block(&chain, block_txs, &keychain);
let block = chain.get_block(&chain.head().unwrap().hash()).unwrap();

View file

@ -49,7 +49,7 @@ fn test_coinbase_maturity() {
);
// Add a single block, introducing coinbase output to be spent later.
add_block(&chain, vec![], &keychain);
add_block(&chain, &[], &keychain);
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![100]);

View file

@ -66,11 +66,11 @@ where
K: Keychain,
{
for _ in 0..count {
add_block(chain, vec![], keychain);
add_block(chain, &[], keychain);
}
}
pub fn add_block<K>(chain: &Chain, txs: Vec<Transaction>, keychain: &K)
pub fn add_block<K>(chain: &Chain, txs: &[Transaction], keychain: &K)
where
K: Keychain,
{
@ -198,7 +198,7 @@ where
build::transaction(
KernelFeatures::Plain { fee: fees as u64 },
tx_elements,
&tx_elements,
keychain,
&ProofBuilder::new(keychain),
)
@ -249,7 +249,7 @@ where
build::transaction(
kernel_features,
tx_elements,
&tx_elements,
keychain,
&ProofBuilder::new(keychain),
)
@ -279,7 +279,7 @@ where
}
build::transaction_with_kernel(
tx_elements,
&tx_elements,
kernel,
excess,
keychain,

View file

@ -62,7 +62,7 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
let initial_tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs.
add_block(&chain, vec![initial_tx], &keychain);
add_block(&chain, &[initial_tx], &keychain);
add_some_blocks(&chain, 5, &keychain);
@ -168,7 +168,7 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
assert_eq!(txs.len(), 1);
// Mine block containing tx1 from the txpool.
add_block(&chain, txs, &keychain);
add_block(&chain, &txs, &keychain);
let header = chain.head_header().unwrap();
let block = chain.get_block(&header.hash()).unwrap();
@ -191,7 +191,7 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
);
// Add another block so NRD relative_height rule is now met.
add_block(&chain, vec![], &keychain);
add_block(&chain, &[], &keychain);
let header = chain.head_header().unwrap();
// Confirm we can now add tx2 to stempool with NRD relative_height rule met.
@ -229,7 +229,7 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> {
assert_eq!(txs.len(), 1);
// Mine block containing tx2 from the txpool.
add_block(&chain, txs, &keychain);
add_block(&chain, &txs, &keychain);
let header = chain.head_header().unwrap();
let block = chain.get_block(&header.hash()).unwrap();
pool.reconcile_block(&block)?;

View file

@ -57,7 +57,7 @@ fn test_nrd_kernels_disabled() {
// Spend the initial coinbase.
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
add_block(&chain, vec![tx], &keychain);
add_block(&chain, &[tx], &keychain);
let tx_1 = test_transaction_with_kernel_features(
&keychain,

View file

@ -57,7 +57,7 @@ fn test_nrd_kernels_enabled() {
// Spend the initial coinbase.
let header_1 = chain.get_header_by_height(1).unwrap();
let tx = test_transaction_spending_coinbase(&keychain, &header_1, vec![10, 20, 30, 40]);
add_block(&chain, vec![tx], &keychain);
add_block(&chain, &[tx], &keychain);
let tx_1 = test_transaction_with_kernel_features(
&keychain,

View file

@ -184,7 +184,7 @@ fn test_the_transaction_pool() {
let tx4 = test_transaction(&keychain, vec![800], vec![799]);
// tx1 and tx2 are already in the txpool (in aggregated form)
// tx4 is the "new" part of this aggregated tx that we care about
let agg_tx = transaction::aggregate(vec![tx1.clone(), tx2.clone(), tx4]).unwrap();
let agg_tx = transaction::aggregate(&[tx1.clone(), tx2.clone(), tx4]).unwrap();
agg_tx
.validate(Weighting::AsTransaction, verifier_cache.clone())

View file

@ -169,7 +169,7 @@ where
let cb_hash = cb.hash();
if cb.kern_ids().is_empty() {
// push the freshly hydrated block through the chain pipeline
match core::Block::hydrate_from(cb, vec![]) {
match core::Block::hydrate_from(cb, &[]) {
Ok(block) => {
if !self.sync_state.is_syncing() {
for hook in &self.hooks {
@ -211,7 +211,7 @@ where
return Ok(true);
}
let block = match core::Block::hydrate_from(cb.clone(), txs) {
let block = match core::Block::hydrate_from(cb.clone(), &txs) {
Ok(block) => {
if !self.sync_state.is_syncing() {
for hook in &self.hooks {

View file

@ -146,7 +146,7 @@ fn process_fluff_phase(
fluffable_txs.len()
);
let agg_tx = transaction::aggregate(fluffable_txs)?;
let agg_tx = transaction::aggregate(&fluffable_txs)?;
agg_tx.validate(
transaction::Weighting::AsTransaction,
verifier_cache.clone(),

View file

@ -175,7 +175,7 @@ fn build_block(
};
let (output, kernel, block_fees) = get_coinbase(wallet_listener_url, block_fees)?;
let mut b = core::Block::from_reward(&head, txs, output, kernel, difficulty.difficulty)?;
let mut b = core::Block::from_reward(&head, &txs, output, kernel, difficulty.difficulty)?;
// making sure we're not spending time mining a useless block
b.validate(&head.total_kernel_offset, verifier_cache)?;

View file

@ -488,14 +488,14 @@ impl<T: TableViewItem<H> + PartialEq, H: Eq + Hash + Copy + Clone + 'static> Tab
}
/// Returns a immutable reference to the items contained within the table.
pub fn borrow_items(&mut self) -> &Vec<T> {
pub fn borrow_items(&mut self) -> &[T] {
&self.items
}
/// Returns a mutable reference to the items contained within the table.
///
/// Can be used to modify the items in place.
pub fn borrow_items_mut(&mut self) -> &mut Vec<T> {
pub fn borrow_items_mut(&mut self) -> &mut [T] {
&mut self.items
}