Refactor compact block body (#1394)

* introduce CompactBlockBody

* rustfmt

* implement From<Block> for CompactBlock

* rustfmt

* remove debug logging

* wip

* rustfmt
This commit is contained in:
Antioch Peverell 2018-08-22 20:19:37 +01:00 committed by GitHub
parent 105bfacaa6
commit 7dfca6077c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 424 additions and 264 deletions

View file

@ -619,7 +619,7 @@ impl BlockHandler {
fn get_compact_block(&self, h: &Hash) -> Result<CompactBlockPrintable, Error> {
let block = w(&self.chain).get_block(h).context(ErrorKind::NotFound)?;
Ok(CompactBlockPrintable::from_compact_block(
&block.as_compact_block(),
&block.into(),
w(&self.chain),
))
}

View file

@ -601,12 +601,12 @@ impl CompactBlockPrintable {
) -> CompactBlockPrintable {
let block = chain.get_block(&cb.hash()).unwrap();
let out_full = cb
.out_full
.out_full()
.iter()
.map(|x| OutputPrintable::from_output(x, chain.clone(), Some(&block.header), false))
.collect();
let kern_full = cb
.kern_full
.kern_full()
.iter()
.map(|x| TxKernelPrintable::from_txkernel(x))
.collect();
@ -614,7 +614,7 @@ impl CompactBlockPrintable {
header: BlockHeaderPrintable::from_header(&cb.header),
out_full,
kern_full,
kern_ids: cb.kern_ids.iter().map(|x| x.to_hex()).collect(),
kern_ids: cb.kern_ids().iter().map(|x| x.to_hex()).collect(),
}
}
}

View file

@ -26,7 +26,7 @@ use chrono::Duration;
use std::fs;
use std::sync::Arc;
use chain::types::{NoopAdapter, Tip};
use chain::types::NoopAdapter;
use chain::Chain;
use core::core::target::Difficulty;
use core::core::{Block, BlockHeader, Transaction};

View file

@ -28,7 +28,7 @@ use std::sync::Arc;
use chain::types::NoopAdapter;
use chain::ErrorKind;
use core::core::target::Difficulty;
use core::core::{transaction, OutputIdentifier};
use core::core::transaction;
use core::global::{self, ChainTypes};
use core::{consensus, pow};
use keychain::{ExtKeychain, Keychain};

View file

@ -16,23 +16,22 @@
use chrono::naive::{MAX_DATE, MIN_DATE};
use chrono::prelude::{DateTime, NaiveDateTime, Utc};
use rand::{thread_rng, Rng};
use std::collections::HashSet;
use std::fmt;
use std::iter::FromIterator;
use consensus::{self, reward, REWARD};
use core::committed::{self, Committed};
use core::compact_block::{CompactBlock, CompactBlockBody};
use core::hash::{Hash, HashWriter, Hashed, ZERO_HASH};
use core::id::ShortIdentifiable;
use core::target::Difficulty;
use core::{
transaction, Commitment, Input, KernelFeatures, Output, OutputFeatures, Proof, ShortId,
Transaction, TransactionBody, TxKernel,
transaction, Commitment, Input, KernelFeatures, Output, OutputFeatures, Proof, Transaction,
TransactionBody, TxKernel,
};
use global;
use keychain::{self, BlindingFactor};
use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
use ser::{self, Readable, Reader, Writeable, Writer};
use util::{secp, secp_static, static_secp_instance, LOGGER};
/// Errors thrown by Block validation
@ -268,80 +267,6 @@ impl BlockHeader {
}
}
/// Compact representation of a full block.
/// Each input/output/kernel is represented as a short_id.
/// A node is reasonably likely to have already seen all tx data (tx broadcast
/// before block) and can go request missing tx data from peers if necessary to
/// hydrate a compact block into a full block.
#[derive(Debug, Clone)]
pub struct CompactBlock {
/// The header with metadata and commitments to the rest of the data
pub header: BlockHeader,
/// Nonce for connection specific short_ids
pub nonce: u64,
/// List of full outputs - specifically the coinbase output(s)
pub out_full: Vec<Output>,
/// List of full kernels - specifically the coinbase kernel(s)
pub kern_full: Vec<TxKernel>,
/// List of transaction kernels, excluding those in the full list
/// (short_ids)
pub kern_ids: Vec<ShortId>,
}
/// Implementation of Writeable for a compact block, defines how to write the
/// block to a binary writer. Differentiates between writing the block for the
/// purpose of full serialization and the one of just extracting a hash.
impl Writeable for CompactBlock {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.header.write(writer)?;
if writer.serialization_mode() != ser::SerializationMode::Hash {
writer.write_u64(self.nonce)?;
ser_multiwrite!(
writer,
[write_u64, self.out_full.len() as u64],
[write_u64, self.kern_full.len() as u64],
[write_u64, self.kern_ids.len() as u64]
);
let mut out_full = self.out_full.clone();
let mut kern_full = self.kern_full.clone();
let mut kern_ids = self.kern_ids.clone();
// Consensus rule that everything is sorted in lexicographical order on the
// wire.
out_full.write_sorted(writer)?;
kern_full.write_sorted(writer)?;
kern_ids.write_sorted(writer)?;
}
Ok(())
}
}
/// Implementation of Readable for a compact block, defines how to read a
/// compact block from a binary stream.
impl Readable for CompactBlock {
fn read(reader: &mut Reader) -> Result<CompactBlock, ser::Error> {
let header = BlockHeader::read(reader)?;
let (nonce, out_full_len, kern_full_len, kern_id_len) =
ser_multiread!(reader, read_u64, read_u64, read_u64, read_u64);
let out_full = read_and_verify_sorted(reader, out_full_len as u64)?;
let kern_full = read_and_verify_sorted(reader, kern_full_len as u64)?;
let kern_ids = read_and_verify_sorted(reader, kern_id_len)?;
Ok(CompactBlock {
header,
nonce,
out_full,
kern_full,
kern_ids,
})
}
}
/// A block as expressed in the MimbleWimble protocol. The reward is
/// non-explicit, assumed to be deducible from block height (similar to
/// bitcoin's schedule) and expressed as a global transaction fee (added v.H),
@ -375,7 +300,10 @@ impl Readable for Block {
let header = BlockHeader::read(reader)?;
let body = TransactionBody::read(reader)?;
// Now validate the body and treat any validation error as corrupted data.
body.validate(true).map_err(|_| ser::Error::CorruptedData)?;
Ok(Block {
header: header,
body: body,
@ -446,6 +374,8 @@ impl Block {
txs.len(),
);
let header = cb.header.clone();
let mut all_inputs = HashSet::new();
let mut all_outputs = HashSet::new();
let mut all_kernels = HashSet::new();
@ -459,64 +389,24 @@ impl Block {
}
// include the coinbase output(s) and kernel(s) from the compact_block
all_outputs.extend(cb.out_full);
all_kernels.extend(cb.kern_full);
{
let body: CompactBlockBody = cb.into();
all_outputs.extend(body.out_full);
all_kernels.extend(body.kern_full);
}
// convert the sets to vecs
let mut all_inputs = Vec::from_iter(all_inputs);
let mut all_outputs = Vec::from_iter(all_outputs);
let mut all_kernels = Vec::from_iter(all_kernels);
let all_inputs = Vec::from_iter(all_inputs);
let all_outputs = Vec::from_iter(all_outputs);
let all_kernels = Vec::from_iter(all_kernels);
// sort them all lexicographically
all_inputs.sort();
all_outputs.sort();
all_kernels.sort();
// Initialize a tx body and sort everything.
let body = TransactionBody::init(all_inputs, all_outputs, all_kernels, false)?;
// finally return the full block
// Note: we have not actually validated the block here
// leave it to the caller to actually validate the block
Block {
header: cb.header,
body: TransactionBody::new(all_inputs, all_outputs, all_kernels),
}.cut_through()
}
/// Generate the compact block representation.
pub fn as_compact_block(&self) -> CompactBlock {
let header = self.header.clone();
let nonce = thread_rng().next_u64();
let mut out_full = self
.body
.outputs
.iter()
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
.cloned()
.collect::<Vec<_>>();
let mut kern_full = vec![];
let mut kern_ids = vec![];
for k in self.kernels() {
if k.features.contains(KernelFeatures::COINBASE_KERNEL) {
kern_full.push(k.clone());
} else {
kern_ids.push(k.short_id(&header.hash(), nonce));
}
}
// sort all the lists
out_full.sort();
kern_full.sort();
kern_ids.sort();
CompactBlock {
header,
nonce,
out_full,
kern_full,
kern_ids,
}
// Finally return the full block.
// Note: we have not actually validated the block here,
// caller must validate the block.
Block { header, body }.cut_through()
}
/// Build a new empty block from a specified header
@ -627,13 +517,14 @@ impl Block {
let mut outputs = self.outputs().clone();
transaction::cut_through(&mut inputs, &mut outputs)?;
let kernels = self.kernels().clone();
// Initialize tx body and sort everything.
let body = TransactionBody::init(inputs, outputs, kernels, false)?;
Ok(Block {
header: BlockHeader {
pow: self.header.pow,
total_difficulty: self.header.total_difficulty,
..self.header
},
body: TransactionBody::new(inputs, outputs, self.body.kernels),
header: self.header,
body,
})
}

View file

@ -0,0 +1,232 @@
// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Compact Blocks.
use rand::{thread_rng, Rng};
use consensus::VerifySortOrder;
use core::block::{Block, BlockHeader, Error};
use core::hash::Hashed;
use core::id::ShortIdentifiable;
use core::{KernelFeatures, Output, OutputFeatures, ShortId, TxKernel};
use ser::{self, read_multi, Readable, Reader, Writeable, Writer};
/// Container for full (full) outputs and kernels and kern_ids for a compact block.
#[derive(Debug, Clone)]
pub struct CompactBlockBody {
/// List of full outputs - specifically the coinbase output(s)
pub out_full: Vec<Output>,
/// List of full kernels - specifically the coinbase kernel(s)
pub kern_full: Vec<TxKernel>,
/// List of transaction kernels, excluding those in the full list
/// (short_ids)
pub kern_ids: Vec<ShortId>,
}
impl CompactBlockBody {
fn init(
out_full: Vec<Output>,
kern_full: Vec<TxKernel>,
kern_ids: Vec<ShortId>,
verify_sorted: bool,
) -> Result<Self, Error> {
let body = CompactBlockBody {
out_full,
kern_full,
kern_ids,
};
if verify_sorted {
// If we are verifying sort order then verify and
// return an error if not sorted lexicographically.
body.verify_sorted()?;
Ok(body)
} else {
// If we are not verifying sort order then sort in place and return.
let mut body = body;
body.sort();
Ok(body)
}
}
/// Sort everything.
fn sort(&mut self) {
self.out_full.sort();
self.kern_full.sort();
self.kern_ids.sort();
}
fn validate(&self) -> Result<(), Error> {
self.verify_sorted()?;
Ok(())
}
// Verify everything is sorted in lexicographical order.
fn verify_sorted(&self) -> Result<(), Error> {
self.out_full.verify_sort_order()?;
self.kern_full.verify_sort_order()?;
self.kern_ids.verify_sort_order()?;
Ok(())
}
}
impl Readable for CompactBlockBody {
fn read(reader: &mut Reader) -> Result<CompactBlockBody, ser::Error> {
let (out_full_len, kern_full_len, kern_id_len) =
ser_multiread!(reader, read_u64, read_u64, read_u64);
let out_full = read_multi(reader, out_full_len)?;
let kern_full = read_multi(reader, kern_full_len)?;
let kern_ids = read_multi(reader, kern_id_len)?;
// Initialize compact block body, verifying sort order.
let body = CompactBlockBody::init(out_full, kern_full, kern_ids, true)
.map_err(|_| ser::Error::CorruptedData)?;
Ok(body)
}
}
impl Writeable for CompactBlockBody {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(
writer,
[write_u64, self.out_full.len() as u64],
[write_u64, self.kern_full.len() as u64],
[write_u64, self.kern_ids.len() as u64]
);
self.out_full.write(writer)?;
self.kern_full.write(writer)?;
self.kern_ids.write(writer)?;
Ok(())
}
}
impl Into<CompactBlockBody> for CompactBlock {
fn into(self) -> CompactBlockBody {
self.body
}
}
/// Compact representation of a full block.
/// Each input/output/kernel is represented as a short_id.
/// A node is reasonably likely to have already seen all tx data (tx broadcast
/// before block) and can go request missing tx data from peers if necessary to
/// hydrate a compact block into a full block.
#[derive(Debug, Clone)]
pub struct CompactBlock {
/// The header with metadata and commitments to the rest of the data
pub header: BlockHeader,
/// Nonce for connection specific short_ids
pub nonce: u64,
/// Container for out_full, kern_full and kern_ids in the compact block.
body: CompactBlockBody,
}
impl CompactBlock {
fn validate(&self) -> Result<(), Error> {
self.body.validate()?;
Ok(())
}
/// Get kern_ids
pub fn kern_ids(&self) -> &Vec<ShortId> {
&self.body.kern_ids
}
/// Get full (coinbase) kernels
pub fn kern_full(&self) -> &Vec<TxKernel> {
&self.body.kern_full
}
/// Get full (coinbase) outputs
pub fn out_full(&self) -> &Vec<Output> {
&self.body.out_full
}
}
impl From<Block> for CompactBlock {
fn from(block: Block) -> Self {
let header = block.header.clone();
let nonce = thread_rng().next_u64();
let out_full = block
.outputs()
.iter()
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
.cloned()
.collect::<Vec<_>>();
let mut kern_full = vec![];
let mut kern_ids = vec![];
for k in block.kernels() {
if k.features.contains(KernelFeatures::COINBASE_KERNEL) {
kern_full.push(k.clone());
} else {
kern_ids.push(k.short_id(&header.hash(), nonce));
}
}
// Initialize a compact block body and sort everything.
let body = CompactBlockBody::init(out_full, kern_full, kern_ids, false)
.expect("sorting, not verifying");
CompactBlock {
header,
nonce,
body,
}
}
}
/// Implementation of Writeable for a compact block, defines how to write the
/// block to a binary writer. Differentiates between writing the block for the
/// purpose of full serialization and the one of just extracting a hash.
impl Writeable for CompactBlock {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.header.write(writer)?;
if writer.serialization_mode() != ser::SerializationMode::Hash {
writer.write_u64(self.nonce)?;
self.body.write(writer)?;
}
Ok(())
}
}
/// Implementation of Readable for a compact block, defines how to read a
/// compact block from a binary stream.
impl Readable for CompactBlock {
fn read(reader: &mut Reader) -> Result<CompactBlock, ser::Error> {
let header = BlockHeader::read(reader)?;
let nonce = reader.read_u64()?;
let body = CompactBlockBody::read(reader)?;
let cb = CompactBlock {
header,
nonce,
body,
};
// Now validate the compact block and treat any validation error as corrupted data.
cb.validate().map_err(|_| ser::Error::CorruptedData)?;
Ok(cb)
}
}

View file

@ -16,6 +16,7 @@
pub mod block;
pub mod committed;
pub mod compact_block;
pub mod hash;
pub mod id;
pub mod merkle_proof;
@ -33,6 +34,7 @@ use util::secp::pedersen::Commitment;
pub use self::block::*;
pub use self::committed::Committed;
pub use self::compact_block::*;
pub use self::id::ShortId;
pub use self::transaction::*;
use core::hash::Hashed;

View file

@ -19,18 +19,15 @@ use std::cmp::Ordering;
use std::collections::HashSet;
use std::{error, fmt};
use util::secp::pedersen::{Commitment, RangeProof};
use util::secp::{self, Message, Signature};
use util::{kernel_sig_msg, static_secp_instance};
use consensus::{self, VerifySortOrder};
use core::hash::Hashed;
use core::{committed, Committed};
use keychain::{self, BlindingFactor};
use ser::{
self, read_and_verify_sorted, PMMRable, Readable, Reader, Writeable, WriteableSorted, Writer,
};
use ser::{self, read_multi, PMMRable, Readable, Reader, Writeable, Writer};
use util;
use util::secp::pedersen::{Commitment, RangeProof};
use util::secp::{self, Message, Signature};
use util::{kernel_sig_msg, static_secp_instance};
bitflags! {
/// Options for a kernel's structure or use
@ -240,7 +237,7 @@ impl PMMRable for TxKernel {
}
}
/// TransactionBody is acommon abstraction for transaction and block
/// TransactionBody is a common abstraction for transaction and block
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TransactionBody {
/// List of inputs spent by the transaction.
@ -269,15 +266,9 @@ impl Writeable for TransactionBody {
[write_u64, self.kernels.len() as u64]
);
// Consensus rule that everything is sorted in lexicographical order on the
// wire.
let mut inputs = self.inputs.clone();
let mut outputs = self.outputs.clone();
let mut kernels = self.kernels.clone();
inputs.write_sorted(writer)?;
outputs.write_sorted(writer)?;
kernels.write_sorted(writer)?;
self.inputs.write(writer)?;
self.outputs.write(writer)?;
self.kernels.write(writer)?;
Ok(())
}
@ -290,15 +281,17 @@ impl Readable for TransactionBody {
let (input_len, output_len, kernel_len) =
ser_multiread!(reader, read_u64, read_u64, read_u64);
let inputs = read_and_verify_sorted(reader, input_len)?;
let outputs = read_and_verify_sorted(reader, output_len)?;
let kernels = read_and_verify_sorted(reader, kernel_len)?;
// TODO at this point we know how many input, outputs and kernels
// we are about to read. We may want to call a variant of
// verify_weight() here as a quick early check.
let body = TransactionBody {
inputs,
outputs,
kernels,
};
let inputs = read_multi(reader, input_len)?;
let outputs = read_multi(reader, output_len)?;
let kernels = read_multi(reader, kernel_len)?;
// Initialize tx body and verify everything is sorted.
let body = TransactionBody::init(inputs, outputs, kernels, true)
.map_err(|_| ser::Error::CorruptedData)?;
Ok(body)
}
@ -334,22 +327,44 @@ impl TransactionBody {
}
}
/// Creates a new transaction initialized with
/// the provided inputs, outputs, kernels
pub fn new(
/// Sort the inputs|outputs|kernels.
pub fn sort(&mut self) {
self.inputs.sort();
self.outputs.sort();
self.kernels.sort();
}
/// Creates a new transaction body initialized with
/// the provided inputs, outputs and kernels.
/// Guarantees inputs, outputs, kernels are sorted lexicographically.
pub fn init(
inputs: Vec<Input>,
outputs: Vec<Output>,
kernels: Vec<TxKernel>,
) -> TransactionBody {
TransactionBody {
verify_sorted: bool,
) -> Result<TransactionBody, Error> {
let body = TransactionBody {
inputs: inputs,
outputs: outputs,
kernels: kernels,
};
if verify_sorted {
// If we are verifying sort order then verify and
// return an error if not sorted lexicographically.
body.verify_sorted()?;
Ok(body)
} else {
// If we are not verifying sort order then sort in place and return.
let mut body = body;
body.sort();
Ok(body)
}
}
/// Builds a new body with the provided inputs added. Existing
/// inputs, if any, are kept intact.
/// Sort order is maintained.
pub fn with_input(self, input: Input) -> TransactionBody {
let mut new_ins = self.inputs;
new_ins.push(input);
@ -362,6 +377,7 @@ impl TransactionBody {
/// Builds a new TransactionBody with the provided output added. Existing
/// outputs, if any, are kept intact.
/// Sort order is maintained.
pub fn with_output(self, output: Output) -> TransactionBody {
let mut new_outs = self.outputs;
new_outs.push(output);
@ -372,6 +388,19 @@ impl TransactionBody {
}
}
/// Builds a new TransactionBody with the provided kernel added. Existing
/// kernels, if any, are kept intact.
/// Sort order is maintained.
pub fn with_kernel(self, kernel: TxKernel) -> TransactionBody {
let mut new_kerns = self.kernels;
new_kerns.push(kernel);
new_kerns.sort();
TransactionBody {
kernels: new_kerns,
..self
}
}
/// Total fee for a TransactionBody is the sum of fees of all kernels.
pub fn fee(&self) -> u64 {
self.kernels
@ -557,7 +586,6 @@ impl Writeable for Transaction {
impl Readable for Transaction {
fn read(reader: &mut Reader) -> Result<Transaction, ser::Error> {
let offset = BlindingFactor::read(reader)?;
let body = TransactionBody::read(reader)?;
let tx = Transaction { offset, body };
@ -603,10 +631,13 @@ impl Transaction {
/// Creates a new transaction initialized with
/// the provided inputs, outputs, kernels
pub fn new(inputs: Vec<Input>, outputs: Vec<Output>, kernels: Vec<TxKernel>) -> Transaction {
Transaction {
offset: BlindingFactor::zero(),
body: TransactionBody::new(inputs, outputs, kernels),
}
let offset = BlindingFactor::zero();
// Initialize a new tx body and sort everything.
let body =
TransactionBody::init(inputs, outputs, kernels, false).expect("sorting, not verifying");
Transaction { offset, body }
}
/// Creates a new transaction using this transaction as a template
@ -620,6 +651,7 @@ impl Transaction {
/// Builds a new transaction with the provided inputs added. Existing
/// inputs, if any, are kept intact.
/// Sort order is maintained.
pub fn with_input(self, input: Input) -> Transaction {
Transaction {
body: self.body.with_input(input),
@ -629,6 +661,7 @@ impl Transaction {
/// Builds a new transaction with the provided output added. Existing
/// outputs, if any, are kept intact.
/// Sort order is maintained.
pub fn with_output(self, output: Output) -> Transaction {
Transaction {
body: self.body.with_output(output),
@ -636,6 +669,16 @@ impl Transaction {
}
}
/// Builds a new transaction with the provided output added. Existing
/// outputs, if any, are kept intact.
/// Sort order is maintained.
pub fn with_kernel(self, kernel: TxKernel) -> Transaction {
Transaction {
body: self.body.with_kernel(kernel),
..self
}
}
/// Get inputs
pub fn inputs(&self) -> &Vec<Input> {
&self.body.inputs
@ -768,10 +811,13 @@ pub fn aggregate(
outputs.push(out);
kernels.push(kernel);
}
kernels.sort();
// Sort inputs and outputs during cut_through.
cut_through(&mut inputs, &mut outputs)?;
// Now sort kernels.
kernels.sort();
// now sum the kernel_offsets up to give us an aggregate offset for the
// transaction
let total_kernel_offset = committed::sum_kernel_offsets(kernel_offsets, vec![])?;

View file

@ -20,7 +20,7 @@
//! `serialize` or `deserialize` functions on them as appropriate.
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use consensus::{self, VerifySortOrder};
use consensus;
use core::hash::{Hash, Hashed};
use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
use std::io::{self, Read, Write};
@ -205,19 +205,8 @@ pub trait Writeable {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error>;
}
/// Trait to allow a collection of Writeables to be written in lexicographical
/// sort order.
pub trait WriteableSorted {
/// Write the data but sort it first.
fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error>;
}
/// Reads a collection of serialized items into a Vec
/// and verifies they are lexicographically ordered.
///
/// A consensus rule requires everything is sorted lexicographically to avoid
/// leaking any information through specific ordering of items.
pub fn read_and_verify_sorted<T>(reader: &mut Reader, count: u64) -> Result<Vec<T>, Error>
/// Reads multiple serialized items into a Vec.
pub fn read_multi<T>(reader: &mut Reader, count: u64) -> Result<Vec<T>, Error>
where
T: Readable + Hashed + Writeable,
{
@ -226,7 +215,6 @@ where
return Err(Error::TooLargeReadErr);
}
let result: Vec<T> = try!((0..count).map(|_| T::read(reader)).collect());
result.verify_sort_order()?;
Ok(result)
}
@ -486,19 +474,6 @@ where
}
}
impl<T> WriteableSorted for Vec<T>
where
T: Writeable + Ord,
{
fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error> {
self.sort();
for elmt in self {
elmt.write(writer)?;
}
Ok(())
}
}
impl<'a, A: Writeable> Writeable for &'a A {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
Writeable::write(*self, writer)

View file

@ -25,7 +25,7 @@ use common::{new_block, tx1i2o, tx2i1o, txspend1i1o};
use grin_core::consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT};
use grin_core::core::block::Error;
use grin_core::core::hash::Hashed;
use grin_core::core::id::{ShortId, ShortIdentifiable};
use grin_core::core::id::ShortIdentifiable;
use grin_core::core::Committed;
use grin_core::core::{Block, BlockHeader, CompactBlock, KernelFeatures, OutputFeatures};
use grin_core::{global, ser};
@ -273,8 +273,9 @@ fn empty_compact_block_serialized_size() {
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(vec![], &keychain, &prev, &key_id);
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
ser::serialize(&mut vec, &cb).expect("serialization failed");
let target_len = 1_260;
assert_eq!(vec.len(), target_len);
}
@ -286,8 +287,9 @@ fn compact_block_single_tx_serialized_size() {
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
ser::serialize(&mut vec, &cb).expect("serialization failed");
let target_len = 1_266;
assert_eq!(vec.len(), target_len);
}
@ -323,8 +325,9 @@ fn compact_block_10_tx_serialized_size() {
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(txs.iter().collect(), &keychain, &prev, &key_id);
let cb: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
ser::serialize(&mut vec, &cb).expect("serialization failed");
let target_len = 1_320;
assert_eq!(vec.len(), target_len,);
}
@ -336,8 +339,8 @@ fn compact_block_hash_with_nonce() {
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(vec![&tx], &keychain, &prev, &key_id);
let cb1 = b.as_compact_block();
let cb2 = b.as_compact_block();
let cb1: CompactBlock = b.clone().into();
let cb2: CompactBlock = b.clone().into();
// random nonce will not affect the hash of the compact block itself
// hash is based on header POW only
@ -345,16 +348,16 @@ fn compact_block_hash_with_nonce() {
assert_eq!(b.hash(), cb1.hash());
assert_eq!(cb1.hash(), cb2.hash());
assert!(cb1.kern_ids[0] != cb2.kern_ids[0]);
assert!(cb1.kern_ids()[0] != cb2.kern_ids()[0]);
// check we can identify the specified kernel from the short_id
// correctly in both of the compact_blocks
assert_eq!(
cb1.kern_ids[0],
cb1.kern_ids()[0],
tx.kernels()[0].short_id(&cb1.hash(), cb1.nonce)
);
assert_eq!(
cb2.kern_ids[0],
cb2.kern_ids()[0],
tx.kernels()[0].short_id(&cb2.hash(), cb2.nonce)
);
}
@ -366,14 +369,14 @@ fn convert_block_to_compact_block() {
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let cb = b.as_compact_block();
let cb: CompactBlock = b.clone().into();
assert_eq!(cb.out_full.len(), 1);
assert_eq!(cb.kern_full.len(), 1);
assert_eq!(cb.kern_ids.len(), 1);
assert_eq!(cb.out_full().len(), 1);
assert_eq!(cb.kern_full().len(), 1);
assert_eq!(cb.kern_ids().len(), 1);
assert_eq!(
cb.kern_ids[0],
cb.kern_ids()[0],
b.kernels()
.iter()
.find(|x| !x.features.contains(KernelFeatures::COINBASE_KERNEL))
@ -388,7 +391,7 @@ fn hydrate_empty_compact_block() {
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(vec![], &keychain, &prev, &key_id);
let cb = b.as_compact_block();
let cb: CompactBlock = b.clone().into();
let hb = Block::hydrate_from(cb, vec![]).unwrap();
assert_eq!(hb.header, b.header);
assert_eq!(hb.outputs(), b.outputs());
@ -397,18 +400,25 @@ fn hydrate_empty_compact_block() {
#[test]
fn serialize_deserialize_compact_block() {
let b = CompactBlock {
header: BlockHeader::default(),
nonce: 0,
out_full: vec![],
kern_full: vec![],
kern_ids: vec![ShortId::zero()],
};
let keychain = ExtKeychain::from_random_seed().unwrap();
let tx1 = tx1i2o();
let prev = BlockHeader::default();
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let mut cb1: CompactBlock = b.into();
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let b2: CompactBlock = ser::deserialize(&mut &vec[..]).unwrap();
ser::serialize(&mut vec, &cb1).expect("serialization failed");
assert_eq!(b.header, b2.header);
assert_eq!(b.kern_ids, b2.kern_ids);
// After header serialization, timestamp will lose 'nanos' info, that's the designed behavior.
// To suppress 'nanos' difference caused assertion fail, we force b.header also lose 'nanos'.
let origin_ts = cb1.header.timestamp;
cb1.header.timestamp =
origin_ts - Duration::nanoseconds(origin_ts.timestamp_subsec_nanos() as i64);
let cb2: CompactBlock = ser::deserialize(&mut &vec[..]).unwrap();
assert_eq!(cb1.header, cb2.header);
assert_eq!(cb1.kern_ids(), cb2.kern_ids());
}

View file

@ -21,6 +21,7 @@ use std::sync::Arc;
use conn::{Message, MessageHandler, Response};
use core::core;
use core::core::hash::Hash;
use core::core::CompactBlock;
use msg::{
BanReason, GetPeerAddrs, Headers, Locator, PeerAddrs, Ping, Pong, SockAddr, TxHashSetArchive,
TxHashSetRequest, Type,
@ -129,17 +130,13 @@ impl MessageHandler for Protocol {
let h: Hash = msg.body()?;
if let Some(b) = adapter.get_block(h) {
let cb = b.as_compact_block();
// serialize and send the block over in compact representation
// if we have txs in the block send a compact block
// but if block is empty -
// to allow us to test all code paths, randomly choose to send
// either the block or the compact block
let mut rng = rand::thread_rng();
if cb.kern_ids.is_empty() && rng.gen() {
if b.kernels().len() == 1 && rng.gen() {
debug!(
LOGGER,
"handle_payload: GetCompactBlock: empty block, sending full block",
@ -147,6 +144,7 @@ impl MessageHandler for Protocol {
Ok(Some(msg.respond(Type::Block, b)))
} else {
let cb: CompactBlock = b.into();
Ok(Some(msg.respond(Type::CompactBlock, cb)))
}
} else {

View file

@ -66,7 +66,7 @@ where
let short_id = kernel.short_id(&cb.hash(), cb.nonce);
// if any kernel matches then keep the tx for later
if cb.kern_ids.contains(&short_id) {
if cb.kern_ids().contains(&short_id) {
txs.push(x.tx.clone());
break;
}

View file

@ -25,10 +25,10 @@ use std::time::Instant;
use chain::{self, ChainAdapter, Options, Tip};
use common::types::{self, ChainValidationMode, ServerConfig, SyncState, SyncStatus};
use core::core::block::BlockHeader;
use core::core::hash::{Hash, Hashed};
use core::core::target::Difficulty;
use core::core::transaction::Transaction;
use core::core::{BlockHeader, CompactBlock};
use core::{core, global};
use p2p;
use pool;
@ -122,25 +122,25 @@ impl p2p::ChainAdapter for NetToChainAdapter {
bhash,
cb.header.height,
addr,
cb.out_full.len(),
cb.kern_full.len(),
cb.kern_ids.len(),
cb.out_full().len(),
cb.kern_full().len(),
cb.kern_ids().len(),
);
if cb.kern_ids.is_empty() {
let cbh = cb.hash();
let cb_hash = cb.hash();
if cb.kern_ids().is_empty() {
// push the freshly hydrated block through the chain pipeline
match core::Block::hydrate_from(cb, vec![]) {
Ok(block) => self.process_block(block, addr),
Err(e) => {
debug!(LOGGER, "Invalid hydrated block {}: {}", cbh, e);
debug!(LOGGER, "Invalid hydrated block {}: {}", cb_hash, e);
return false;
}
}
} else {
// check at least the header is valid before hydrating
if let Err(e) = w(&self.chain).process_block_header(&cb.header, self.chain_opts()) {
debug!(LOGGER, "Invalid compact block header {}: {}", cb.hash(), e);
debug!(LOGGER, "Invalid compact block header {}: {}", cb_hash, e);
return !e.is_bad_data();
}
@ -634,11 +634,10 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
if opts.contains(Options::MINE) {
// propagate compact block out if we mined the block
// but broadcast full block if we have no txs
let cb = b.as_compact_block();
if cb.kern_ids.is_empty() {
// in the interest of testing all code paths
// randomly decide how we send an empty block out
// TODO - lock this down once we are comfortable it works...
let cb: CompactBlock = b.clone().into();
if cb.kern_ids().is_empty() {
// In the interest of exercising all code paths
// randomly decide how we send an empty block out.
let mut rng = rand::thread_rng();
if rng.gen() {
wo(&self.peers).broadcast_block(&b);
@ -657,7 +656,7 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
if rng.gen() {
wo(&self.peers).broadcast_header(&b.header);
} else {
let cb = b.as_compact_block();
let cb = b.clone().into();
wo(&self.peers).broadcast_compact_block(&cb);
}
}

View file

@ -196,7 +196,7 @@ where
K: Keychain,
{
let mut ctx = Context { keychain };
let (mut tx, kern, sum) = elems.iter().fold(
let (tx, kern, sum) = elems.iter().fold(
(Transaction::empty(), TxKernel::empty(), BlindSum::new()),
|acc, elem| elem(&mut ctx, acc),
);
@ -204,7 +204,8 @@ where
// we only support building a tx with a single kernel via build::transaction()
assert!(tx.kernels().is_empty());
tx.kernels_mut().push(kern);
let tx = tx.with_kernel(kern);
Ok((tx, blind_sum))
}
@ -220,14 +221,19 @@ where
let (mut tx, blind_sum) = partial_transaction(elems, keychain)?;
assert_eq!(tx.kernels().len(), 1);
let kern = {
let mut kern = tx.kernels_mut().remove(0);
let msg = secp::Message::from_slice(&kernel_sig_msg(kern.fee, kern.lock_height))?;
let skey = blind_sum.secret_key(&keychain.secp())?;
kern.excess = keychain.secp().commit(0, skey)?;
kern.excess_sig = aggsig::sign_with_blinding(&keychain.secp(), &msg, &blind_sum).unwrap();
kern
};
tx.kernels_mut().push(kern);
// Now build a new tx with this single kernel.
let tx = tx.with_kernel(kern);
assert_eq!(tx.kernels().len(), 1);
Ok(tx)
}
@ -264,7 +270,8 @@ where
tx.offset = k2.clone();
assert!(tx.kernels().is_empty());
tx.kernels_mut().push(kern);
let tx = tx.with_kernel(kern);
assert_eq!(tx.kernels().len(), 1);
Ok(tx)
}