we can now hydrate empty compact blocks and process them (#675)

* we can now hydrate empty compact blocks and process them

* add some tests to verify size of
various serialized blocks and compact blocks

* add_test_hydrate_empty_block

* fix broken test
This commit is contained in:
Antioch Peverell 2018-02-01 16:40:55 -05:00 committed by GitHub
parent 1e50d56c7e
commit 5fe06e3e3b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 209 additions and 24 deletions

View file

@ -398,6 +398,53 @@ impl Block {
Ok(block)
}
/// Hydrate a block from a compact block.
///
/// TODO - only supporting empty compact blocks for now (coinbase output/kernel only)
/// eventually we want to support any compact blocks
/// we need to differentiate between a block with missing entries (not in tx pool)
/// and a truly invalid block (which will get the peer banned)
/// so we need to consider how to do this safely/robustly
/// presumably at this point we are confident we can generate a full block with no
/// missing pieces, but we cannot fully validate it until we push it through the pipeline
/// at which point the peer runs the risk of getting banned
pub fn hydrate_from(
cb: CompactBlock,
_inputs: Vec<Input>,
_outputs: Vec<Output>,
_kernels: Vec<TxKernel>,
) -> Block {
debug!(
LOGGER,
"block: hydrate_from: {}, {} cb outputs, {} cb kernels, {} tx kern_ids",
cb.hash(),
cb.out_full.len(),
cb.kern_full.len(),
cb.kern_ids.len(),
);
// we only support "empty" compact block for now
assert!(cb.kern_ids.is_empty());
let mut all_inputs = vec![];
let mut all_outputs = vec![];
let mut all_kernels = vec![];
all_outputs.extend(cb.out_full);
all_kernels.extend(cb.kern_full);
all_inputs.sort();
all_outputs.sort();
all_kernels.sort();
Block {
header: cb.header,
inputs: all_inputs,
outputs: all_outputs,
kernels: all_kernels,
}.cut_through()
}
/// Generate the compact block representation.
pub fn as_compact_block(&self) -> CompactBlock {
let header = self.header.clone();
@ -780,7 +827,7 @@ mod test {
use core::hash::ZERO_HASH;
use core::Transaction;
use core::build::{self, input, output, with_fee};
use core::test::tx2i1o;
use core::test::{tx1i2o, tx2i1o};
use keychain::{Identifier, Keychain};
use consensus::{MAX_BLOCK_WEIGHT, BLOCK_OUTPUT_WEIGHT};
use std::time::Instant;
@ -974,10 +1021,104 @@ mod test {
assert_eq!(b.kernels, b2.kernels);
}
#[test]
fn empty_block_serialized_size() {
let keychain = Keychain::from_random_seed().unwrap();
let b = new_block(vec![], &keychain);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
assert_eq!(
vec.len(),
5_676,
);
}
#[test]
fn block_single_tx_serialized_size() {
let keychain = Keychain::from_random_seed().unwrap();
let tx1 = tx1i2o();
let b = new_block(vec![&tx1], &keychain);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
assert_eq!(
vec.len(),
16_224,
);
}
#[test]
fn empty_compact_block_serialized_size() {
let keychain = Keychain::from_random_seed().unwrap();
let b = new_block(vec![], &keychain);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
assert_eq!(
vec.len(),
5_662,
);
}
#[test]
fn compact_block_single_tx_serialized_size() {
let keychain = Keychain::from_random_seed().unwrap();
let tx1 = tx1i2o();
let b = new_block(vec![&tx1], &keychain);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
assert_eq!(
vec.len(),
5_668,
);
}
#[test]
fn block_10_tx_serialized_size() {
let keychain = Keychain::from_random_seed().unwrap();
let mut txs = vec![];
for _ in 0..10 {
let tx = tx1i2o();
txs.push(tx);
}
let b = new_block(
txs.iter().collect(),
&keychain,
);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
assert_eq!(
vec.len(),
111_156,
);
}
#[test]
fn compact_block_10_tx_serialized_size() {
let keychain = Keychain::from_random_seed().unwrap();
let mut txs = vec![];
for _ in 0..10 {
let tx = tx1i2o();
txs.push(tx);
}
let b = new_block(
txs.iter().collect(),
&keychain,
);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
assert_eq!(
vec.len(),
5_722,
);
}
#[test]
fn convert_block_to_compact_block() {
let keychain = Keychain::from_random_seed().unwrap();
let tx1 = tx2i1o();
let tx1 = tx1i2o();
let b = new_block(vec![&tx1], &keychain);
let cb = b.as_compact_block();
@ -996,6 +1137,17 @@ mod test {
);
}
#[test]
fn hydrate_empty_compact_block() {
let keychain = Keychain::from_random_seed().unwrap();
let b = new_block(vec![], &keychain);
let cb = b.as_compact_block();
let hb = Block::hydrate_from(cb, vec![], vec![], vec![]);
assert_eq!(hb.header, b.header);
assert_eq!(hb.outputs, b.outputs);
assert_eq!(hb.kernels, b.kernels);
}
#[test]
fn serialize_deserialize_compact_block() {
let b = CompactBlock {

View file

@ -537,4 +537,24 @@ mod test {
).map(|(tx, _)| tx)
.unwrap()
}
// utility producing a transaction with a single input
// and two outputs (one change output)
pub fn tx1i2o() -> Transaction {
let keychain = keychain::Keychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap();
build::transaction(
vec![
input(6, ZERO_HASH, key_id1),
output(3, key_id2),
output(1, key_id3),
with_fee(2),
],
&keychain,
).map(|(tx, _)| tx)
.unwrap()
}
}

View file

@ -68,46 +68,43 @@ impl p2p::ChainAdapter for NetToChainAdapter {
}
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
let bhash = b.hash();
debug!(
LOGGER,
"Received block {} at {} from {}, going to process.",
bhash,
b.hash(),
b.header.height,
addr,
);
// pushing the new block through the chain pipeline
let res = self.chain.process_block(b, self.chain_opts());
if let Err(ref e) = res {
debug!(LOGGER, "Block {} refused by chain: {:?}", bhash, e);
if e.is_bad_block() {
debug!(LOGGER, "block_received: {} is a bad block, resetting head", bhash);
let _ = self.chain.reset_head();
return false;
}
};
true
self.process_block(b)
}
fn compact_block_received(&self, bh: core::CompactBlock, addr: SocketAddr) -> bool {
let bhash = bh.hash();
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
let bhash = cb.hash();
debug!(
LOGGER,
"Received compact_block {} at {} from {}, going to process.",
bhash,
bh.header.height,
cb.header.height,
addr,
);
debug!(
LOGGER,
"*** cannot hydrate compact block (not yet implemented), falling back to requesting full block",
);
if cb.kern_ids.is_empty() {
let block = core::Block::hydrate_from(cb, vec![], vec![], vec![]);
self.request_block(&bh.header, &addr);
// push the freshly hydrated block through the chain pipeline
self.process_block(block)
} else {
// TODO - do we need to validate the header here to be sure it is not total garbage?
true
debug!(
LOGGER,
"*** cannot hydrate non-empty compact block (not yet implemented), \
falling back to requesting full block",
);
self.request_block(&cb.header, &addr);
true
}
}
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
@ -298,6 +295,22 @@ impl NetToChainAdapter {
}
}
// pushing the new block through the chain pipeline
// remembering to reset the head if we have a bad block
fn process_block(&self, b: core::Block) -> bool {
let bhash = b.hash();
let res = self.chain.process_block(b, self.chain_opts());
if let Err(ref e) = res {
debug!(LOGGER, "Block {} refused by chain: {:?}", bhash, e);
if e.is_bad_block() {
debug!(LOGGER, "adapter: process_block: {} is a bad block, resetting head", bhash);
let _ = self.chain.reset_head();
return false;
}
};
true
}
// After receiving a compact block if we cannot successfully hydrate
// it into a full block then fallback to requesting the full block
// from the same peer that gave us the compact block