ShortId implementation (and CompactBlock) (#637)

* [wip] short_id implementation (first attempt)
todo - make this more reusable (a trait?) so we can use it for inputs/outputs/kernels easily

* factor short_id support out into ShortIdentifiable trait

* block can now be converted to compact_block
rename existing block.compact() -> block.cut_through()

* expose compact block representation via block api endpoint
optional with ?compact query param
This commit is contained in:
AntiochP 2018-01-19 17:43:02 -05:00 committed by GitHub
parent f9726e8154
commit 9085e548f7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 418 additions and 25 deletions

View file

@ -385,9 +385,13 @@ impl Handler for ChainHandler {
} }
} }
// Gets block details given either a hash or height. /// Gets block details given either a hash or height.
// GET /v1/block/<hash> /// GET /v1/blocks/<hash>
// GET /v1/block/<height> /// GET /v1/blocks/<height>
///
/// Optionally return results as "compact blocks" by passing "?compact" query param
/// GET /v1/blocks/<hash>?compact
///
pub struct BlockHandler { pub struct BlockHandler {
pub chain: Arc<chain::Chain>, pub chain: Arc<chain::Chain>,
} }
@ -398,6 +402,11 @@ impl BlockHandler {
Ok(BlockPrintable::from_block(&block, self.chain.clone(), false)) Ok(BlockPrintable::from_block(&block, self.chain.clone(), false))
} }
fn get_compact_block(&self, h: &Hash) -> Result<CompactBlockPrintable, Error> {
let block = self.chain.clone().get_block(h).map_err(|_| Error::NotFound)?;
Ok(CompactBlockPrintable::from_compact_block(&block.as_compact_block()))
}
// Try to decode the string as a height or a hash. // Try to decode the string as a height or a hash.
fn parse_input(&self, input: String) -> Result<Hash, Error> { fn parse_input(&self, input: String) -> Result<Hash, Error> {
if let Ok(height) = input.parse() { if let Ok(height) = input.parse() {
@ -426,9 +435,22 @@ impl Handler for BlockHandler {
} }
let el = *path_elems.last().unwrap(); let el = *path_elems.last().unwrap();
let h = try!(self.parse_input(el.to_string())); let h = try!(self.parse_input(el.to_string()));
let mut compact = false;
if let Ok(params) = req.get_ref::<UrlEncodedQuery>() {
if let Some(_) = params.get("compact") {
compact = true;
}
}
if compact {
let b = try!(self.get_compact_block(&h));
json_response(&b)
} else {
let b = try!(self.get_block(&h)); let b = try!(self.get_block(&h));
json_response(&b) json_response(&b)
} }
}
} }
// Get basic information about the transaction pool. // Get basic information about the transaction pool.

View file

@ -367,6 +367,30 @@ impl BlockPrintable {
} }
} }
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct CompactBlockPrintable {
/// The block header
pub header: BlockHeaderPrintable,
/// Inputs (hex short_ids)
pub inputs: Vec<String>,
/// Outputs (hex short_ids)
pub outputs: Vec<String>,
/// Kernels (hex short_ids)
pub kernels: Vec<String>,
}
impl CompactBlockPrintable {
/// Convert a compact block into a printable representation suitable for api response
pub fn from_compact_block(cb: &core::CompactBlock) -> CompactBlockPrintable {
CompactBlockPrintable {
header: BlockHeaderPrintable::from_header(&cb.header),
inputs: cb.inputs.iter().map(|x| x.to_hex()).collect(),
outputs: cb.outputs.iter().map(|x| x.to_hex()).collect(),
kernels: cb.kernels.iter().map(|x| x.to_hex()).collect(),
}
}
}
// For wallet reconstruction, include the header info along with the // For wallet reconstruction, include the header info along with the
// transactions in the block // transactions in the block
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]

View file

@ -13,6 +13,7 @@ num-bigint = "^0.1.35"
rand = "^0.3" rand = "^0.3"
serde = "~1.0.8" serde = "~1.0.8"
serde_derive = "~1.0.8" serde_derive = "~1.0.8"
siphasher = "~0.1"
time = "^0.1" time = "^0.1"
lazy_static = "~0.2.8" lazy_static = "~0.2.8"
grin_keychain = { path = "../keychain" } grin_keychain = { path = "../keychain" }

View file

@ -24,6 +24,7 @@ use core::{
Input, Input,
Output, Output,
OutputIdentifier, OutputIdentifier,
ShortId,
SwitchCommitHash, SwitchCommitHash,
Proof, Proof,
TxKernel, TxKernel,
@ -34,6 +35,7 @@ use core::{
use consensus; use consensus;
use consensus::{exceeds_weight, reward, MINIMUM_DIFFICULTY, REWARD, VerifySortOrder}; use consensus::{exceeds_weight, reward, MINIMUM_DIFFICULTY, REWARD, VerifySortOrder};
use core::hash::{Hash, Hashed, ZERO_HASH}; use core::hash::{Hash, Hashed, ZERO_HASH};
use core::id::ShortIdentifiable;
use core::target::Difficulty; use core::target::Difficulty;
use core::transaction; use core::transaction;
use ser::{self, Readable, Reader, Writeable, Writer, WriteableSorted, read_and_verify_sorted}; use ser::{self, Readable, Reader, Writeable, Writer, WriteableSorted, read_and_verify_sorted};
@ -203,6 +205,73 @@ impl Readable for BlockHeader {
} }
} }
/// Compact representation of a full block.
/// Each input/output/kernel is represented as a short_id.
/// A node is reasonably likely to have already seen all tx data (tx broadcast before block)
/// and can go request missing tx data from peers if necessary to hydrate a compact block
/// into a full block.
#[derive(Debug, Clone)]
pub struct CompactBlock {
/// The header with metadata and commitments to the rest of the data
pub header: BlockHeader,
/// List of transaction inputs (short_ids)
pub inputs: Vec<ShortId>,
/// List of transaction outputs (short_ids)
pub outputs: Vec<ShortId>,
/// List of transaction kernels (short_ids)
pub kernels: Vec<ShortId>,
}
/// Implementation of Writeable for a compact block, defines how to write the block to a
/// binary writer. Differentiates between writing the block for the purpose of
/// full serialization and the one of just extracting a hash.
impl Writeable for CompactBlock {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
try!(self.header.write(writer));
if writer.serialization_mode() != ser::SerializationMode::Hash {
ser_multiwrite!(
writer,
[write_u64, self.inputs.len() as u64],
[write_u64, self.outputs.len() as u64],
[write_u64, self.kernels.len() as u64]
);
let mut inputs = self.inputs.clone();
let mut outputs = self.outputs.clone();
let mut kernels = self.kernels.clone();
// Consensus rule that everything is sorted in lexicographical order on the wire.
try!(inputs.write_sorted(writer));
try!(outputs.write_sorted(writer));
try!(kernels.write_sorted(writer));
}
Ok(())
}
}
/// Implementation of Readable for a compact block, defines how to read a compact block
/// from a binary stream.
impl Readable for CompactBlock {
fn read(reader: &mut Reader) -> Result<CompactBlock, ser::Error> {
let header = try!(BlockHeader::read(reader));
let (input_len, output_len, kernel_len) =
ser_multiread!(reader, read_u64, read_u64, read_u64);
let inputs = read_and_verify_sorted(reader, input_len)?;
let outputs = read_and_verify_sorted(reader, output_len)?;
let kernels = read_and_verify_sorted(reader, kernel_len)?;
Ok(CompactBlock {
header,
inputs,
outputs,
kernels,
})
}
}
/// A block as expressed in the MimbleWimble protocol. The reward is /// A block as expressed in the MimbleWimble protocol. The reward is
/// non-explicit, assumed to be deducible from block height (similar to /// non-explicit, assumed to be deducible from block height (similar to
/// bitcoin's schedule) and expressed as a global transaction fee (added v.H), /// bitcoin's schedule) and expressed as a global transaction fee (added v.H),
@ -321,6 +390,37 @@ impl Block {
Ok(block) Ok(block)
} }
/// Generate the compact block representation.
pub fn as_compact_block(&self) -> CompactBlock {
let header = self.header.clone();
let block_hash = self.hash();
let mut inputs = self.inputs
.iter()
.map(|x| x.short_id(&block_hash))
.collect::<Vec<_>>();
let mut outputs = self.outputs
.iter()
.map(|x| x.short_id(&block_hash))
.collect::<Vec<_>>();
let mut kernels = self.kernels
.iter()
.map(|x| x.short_id(&block_hash))
.collect::<Vec<_>>();
// sort all the lists of short_ids
inputs.sort();
outputs.sort();
kernels.sort();
CompactBlock {
header,
inputs,
outputs,
kernels,
}
}
/// Builds a new block ready to mine from the header of the previous block, /// Builds a new block ready to mine from the header of the previous block,
/// a vector of transactions and the reward information. Checks /// a vector of transactions and the reward information. Checks
/// that all transactions are valid and calculates the Merkle tree. /// that all transactions are valid and calculates the Merkle tree.
@ -380,11 +480,10 @@ impl Block {
inputs: inputs, inputs: inputs,
outputs: outputs, outputs: outputs,
kernels: kernels, kernels: kernels,
}.compact(), }.cut_through(),
) )
} }
/// Blockhash, computed using only the header /// Blockhash, computed using only the header
pub fn hash(&self) -> Hash { pub fn hash(&self) -> Hash {
self.header.hash() self.header.hash()
@ -396,15 +495,15 @@ impl Block {
} }
/// Matches any output with a potential spending input, eliminating them /// Matches any output with a potential spending input, eliminating them
/// from the block. Provides a simple way to compact the block. The /// from the block. Provides a simple way to cut-through the block. The
/// elimination is stable with respect to inputs and outputs order. /// elimination is stable with respect to the order of inputs and outputs.
/// ///
/// NOTE: exclude coinbase from compaction process /// NOTE: exclude coinbase from cut-through process
/// if a block contains a new coinbase output and /// if a block contains a new coinbase output and
/// is a transaction spending a previous coinbase /// is a transaction spending a previous coinbase
/// we do not want to compact these away /// we do not want to cut-through (all coinbase must be preserved)
/// ///
pub fn compact(&self) -> Block { pub fn cut_through(&self) -> Block {
let in_set = self.inputs let in_set = self.inputs
.iter() .iter()
.map(|inp| inp.commitment()) .map(|inp| inp.commitment())
@ -416,17 +515,17 @@ impl Block {
.map(|out| out.commitment()) .map(|out| out.commitment())
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
let commitments_to_compact = in_set.intersection(&out_set).collect::<HashSet<_>>(); let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
let new_inputs = self.inputs let new_inputs = self.inputs
.iter() .iter()
.filter(|inp| !commitments_to_compact.contains(&inp.commitment())) .filter(|inp| !to_cut_through.contains(&inp.commitment()))
.map(|&inp| inp) .map(|&inp| inp)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let new_outputs = self.outputs let new_outputs = self.outputs
.iter() .iter()
.filter(|out| !commitments_to_compact.contains(&out.commitment())) .filter(|out| !to_cut_through.contains(&out.commitment()))
.map(|&out| out) .map(|&out| out)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -467,7 +566,7 @@ impl Block {
inputs: all_inputs, inputs: all_inputs,
outputs: all_outputs, outputs: all_outputs,
kernels: all_kernels, kernels: all_kernels,
}.compact() }.cut_through()
} }
/// Validates all the elements in a block that can be checked without /// Validates all the elements in a block that can be checked without
@ -745,8 +844,8 @@ mod test {
} }
#[test] #[test]
// builds a block with a tx spending another and check if merging occurred // builds a block with a tx spending another and check that cut_through occurred
fn compactable_block() { fn block_with_cut_through() {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let key_id1 = keychain.derive_key_id(1).unwrap(); let key_id1 = keychain.derive_key_id(1).unwrap();
let key_id2 = keychain.derive_key_id(2).unwrap(); let key_id2 = keychain.derive_key_id(2).unwrap();
@ -882,9 +981,37 @@ mod test {
ser::serialize(&mut vec, &b).expect("serialization failed"); ser::serialize(&mut vec, &b).expect("serialization failed");
let b2: Block = ser::deserialize(&mut &vec[..]).unwrap(); let b2: Block = ser::deserialize(&mut &vec[..]).unwrap();
assert_eq!(b.header, b2.header);
assert_eq!(b.inputs, b2.inputs); assert_eq!(b.inputs, b2.inputs);
assert_eq!(b.outputs, b2.outputs); assert_eq!(b.outputs, b2.outputs);
assert_eq!(b.kernels, b2.kernels); assert_eq!(b.kernels, b2.kernels);
}
#[test]
fn convert_block_to_compact_block() {
let keychain = Keychain::from_random_seed().unwrap();
let b = new_block(vec![], &keychain);
let cb = b.as_compact_block();
assert_eq!(cb.kernels.len(), 1);
assert_eq!(cb.kernels[0], b.kernels[0].short_id(&b.hash()));
}
#[test]
fn serialize_deserialize_compact_block() {
let b = CompactBlock {
header: BlockHeader::default(),
inputs: vec![ShortId::zero(), ShortId::zero()],
outputs: vec![ShortId::zero(), ShortId::zero(), ShortId::zero()],
kernels: vec![ShortId::zero()],
};
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let b2: CompactBlock = ser::deserialize(&mut &vec[..]).unwrap();
assert_eq!(b.header, b2.header); assert_eq!(b.header, b2.header);
assert_eq!(b.inputs, b2.inputs);
assert_eq!(b.outputs, b2.outputs);
assert_eq!(b.kernels, b2.kernels);
} }
} }

175
core/src/core/id.rs Normal file
View file

@ -0,0 +1,175 @@
// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! short ids for compact blocks
use std::cmp::min;
use byteorder::{LittleEndian, ByteOrder};
use siphasher::sip::SipHasher24;
use core::hash::{Hash, Hashed};
use ser;
use ser::{Reader, Readable, Writer, Writeable};
use util;
/// The size of a short id used to identify inputs|outputs|kernels (6 bytes)
pub const SHORT_ID_SIZE: usize = 6;
/// A trait for types that have a short_id (inputs/outputs/kernels)
pub trait ShortIdentifiable {
/// The short_id of the instance.
fn short_id(&self, block_hash: &Hash) -> ShortId;
}
impl<H: Hashed> ShortIdentifiable for H {
/// Generate a short_id via the following -
///
/// * extract k0/k1 from block_hash (first two u64 values)
/// * initialize a siphasher24 with k0/k1
/// * self.hash() passing in the siphasher24 instance
/// * drop the 2 most significant bytes (to return a 6 byte short_id)
///
fn short_id(&self, block_hash: &Hash) -> ShortId {
// we "use" core::hash::Hash in the outer namespace
// so doing this here in the fn to minimize collateral damage/confusion
use std::hash::Hasher;
// extract k0/k1 from the block_hash
let k0 = LittleEndian::read_u64(&block_hash.0[0..8]);
let k1 = LittleEndian::read_u64(&block_hash.0[8..16]);
// initialize a siphasher24 with k0/k1
let mut sip_hasher = SipHasher24::new_with_keys(k0, k1);
// hash our id (self.hash()) using the siphasher24 instance
sip_hasher.write(&self.hash().to_vec()[..]);
let res = sip_hasher.finish();
// construct a short_id from the resulting bytes (dropping the 2 most significant bytes)
let mut buf = [0; 8];
LittleEndian::write_u64(&mut buf, res);
ShortId::from_bytes(&buf[0..6])
}
}
/// Short id for identifying inputs/outputs/kernels
#[derive(PartialEq, Clone, PartialOrd, Ord, Eq, Serialize, Deserialize)]
pub struct ShortId([u8; 6]);
impl ::std::fmt::Debug for ShortId {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
try!(write!(f, "{}(", stringify!(ShortId)));
try!(write!(f, "{}", self.to_hex()));
write!(f, ")")
}
}
impl Readable for ShortId {
fn read(reader: &mut Reader) -> Result<ShortId, ser::Error> {
let v = try!(reader.read_fixed_bytes(SHORT_ID_SIZE));
let mut a = [0; SHORT_ID_SIZE];
for i in 0..a.len() {
a[i] = v[i];
}
Ok(ShortId(a))
}
}
impl Writeable for ShortId {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_fixed_bytes(&self.0)
}
}
impl ShortId {
/// Build a new short_id from a byte slice
pub fn from_bytes(bytes: &[u8]) -> ShortId {
let mut hash = [0; SHORT_ID_SIZE];
for i in 0..min(SHORT_ID_SIZE, bytes.len()) {
hash[i] = bytes[i];
}
ShortId(hash)
}
/// Hex string representation of a short_id
pub fn to_hex(&self) -> String {
util::to_hex(self.0.to_vec())
}
/// Reconstructs a switch commit hash from a hex string.
pub fn from_hex(hex: &str) -> Result<ShortId, ser::Error> {
let bytes = util::from_hex(hex.to_string())
.map_err(|_| ser::Error::HexError(format!("short_id from_hex error")))?;
Ok(ShortId::from_bytes(&bytes))
}
/// The zero short_id, convenient for generating a short_id for testing.
pub fn zero() -> ShortId {
ShortId::from_bytes(&[0])
}
}
#[cfg(test)]
mod test {
use super::*;
use ser::{Writeable, Writer};
#[test]
fn test_short_id() {
// minimal struct for testing
// make it implement Writeable, therefore Hashable, therefore ShortIdentifiable
struct Foo(u64);
impl Writeable for Foo {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_u64(self.0)?;
Ok(())
}
}
let foo = Foo(0);
let expected_hash = Hash::from_hex(
"81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c",
).unwrap();
assert_eq!(foo.hash(), expected_hash);
let other_hash = Hash::zero();
println!("{:?}", foo.short_id(&other_hash));
assert_eq!(foo.short_id(&other_hash), ShortId::from_hex("e973960ba690").unwrap());
let foo = Foo(5);
let expected_hash = Hash::from_hex(
"3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673",
).unwrap();
assert_eq!(foo.hash(), expected_hash);
let other_hash = Hash::zero();
println!("{:?}", foo.short_id(&other_hash));
assert_eq!(foo.short_id(&other_hash), ShortId::from_hex("f0c06e838e59").unwrap());
let foo = Foo(5);
let expected_hash = Hash::from_hex(
"3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673",
).unwrap();
assert_eq!(foo.hash(), expected_hash);
let other_hash = Hash::from_hex(
"81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c",
).unwrap();
println!("{:?}", foo.short_id(&other_hash));
assert_eq!(foo.short_id(&other_hash), ShortId::from_hex("95bf0ca12d5b").unwrap());
}
}

View file

@ -1,4 +1,4 @@
// Copyright 2016 The Grin Developers // Copyright 2018 The Grin Developers
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -17,6 +17,7 @@
pub mod block; pub mod block;
pub mod build; pub mod build;
pub mod hash; pub mod hash;
pub mod id;
pub mod pmmr; pub mod pmmr;
pub mod target; pub mod target;
pub mod transaction; pub mod transaction;
@ -33,6 +34,7 @@ use util::secp::pedersen::*;
pub use self::block::*; pub use self::block::*;
pub use self::transaction::*; pub use self::transaction::*;
pub use self::id::ShortId;
use self::hash::Hashed; use self::hash::Hashed;
use ser::{Error, Readable, Reader, Writeable, Writer}; use ser::{Error, Readable, Reader, Writeable, Writer};
use global; use global;
@ -396,7 +398,7 @@ mod test {
&key_id, &key_id,
Difficulty::minimum(), Difficulty::minimum(),
).unwrap(); ).unwrap();
b.compact().validate().unwrap(); b.cut_through().validate().unwrap();
} }
#[test] #[test]
@ -414,7 +416,7 @@ mod test {
&key_id, &key_id,
Difficulty::minimum(), Difficulty::minimum(),
).unwrap(); ).unwrap();
b.compact().validate().unwrap(); b.cut_through().validate().unwrap();
} }
#[test] #[test]

View file

@ -1,4 +1,4 @@
// Copyright 2016 The Grin Developers // Copyright 2018 The Grin Developers
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -541,7 +541,7 @@ impl SwitchCommitHash {
SwitchCommitHash(h) SwitchCommitHash(h)
} }
/// Reconstructs a switch commit hash from an array of bytes. /// Reconstructs a switch commit hash from a byte slice.
pub fn from_bytes(bytes: &[u8]) -> SwitchCommitHash { pub fn from_bytes(bytes: &[u8]) -> SwitchCommitHash {
let mut hash = [0; SWITCH_COMMIT_HASH_SIZE]; let mut hash = [0; SWITCH_COMMIT_HASH_SIZE];
for i in 0..min(SWITCH_COMMIT_HASH_SIZE, bytes.len()) { for i in 0..min(SWITCH_COMMIT_HASH_SIZE, bytes.len()) {
@ -550,12 +550,12 @@ impl SwitchCommitHash {
SwitchCommitHash(hash) SwitchCommitHash(hash)
} }
/// Hex string represenation of a switch commitment hash. /// Hex string representation of a switch commitment hash.
pub fn to_hex(&self) -> String { pub fn to_hex(&self) -> String {
util::to_hex(self.0.to_vec()) util::to_hex(self.0.to_vec())
} }
/// Reconstrcuts a switch commit hash from a hex string. /// Reconstructs a switch commit hash from a hex string.
pub fn from_hex(hex: &str) -> Result<SwitchCommitHash, ser::Error> { pub fn from_hex(hex: &str) -> Result<SwitchCommitHash, ser::Error> {
let bytes = util::from_hex(hex.to_string()) let bytes = util::from_hex(hex.to_string())
.map_err(|_| ser::Error::HexError(format!("switch_commit_hash from_hex error")))?; .map_err(|_| ser::Error::HexError(format!("switch_commit_hash from_hex error")))?;
@ -884,6 +884,7 @@ impl ops::Add for SumCommit {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use core::id::{ShortId, ShortIdentifiable};
use keychain::Keychain; use keychain::Keychain;
use util::secp; use util::secp;
@ -1008,4 +1009,39 @@ mod test {
assert!(commit == commit_2); assert!(commit == commit_2);
assert!(switch_commit == switch_commit_2); assert!(switch_commit == switch_commit_2);
} }
#[test]
fn input_short_id() {
let keychain = Keychain::from_seed(&[0; 32]).unwrap();
let key_id = keychain.derive_key_id(1).unwrap();
let commit = keychain.commit(5, &key_id).unwrap();
let input = Input {
features: DEFAULT_OUTPUT,
commit: commit,
out_block: None,
};
let block_hash = Hash::from_hex(
"3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673",
).unwrap();
let short_id = input.short_id(&block_hash);
assert_eq!(short_id, ShortId::from_hex("ff2c91d85fcd").unwrap());
// now generate the short_id for a *very* similar output (single feature flag different)
// and check it generates a different short_id
let input = Input {
features: COINBASE_OUTPUT,
commit: commit,
out_block: None,
};
let block_hash = Hash::from_hex(
"3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673",
).unwrap();
let short_id = input.short_id(&block_hash);
assert_eq!(short_id, ShortId::from_hex("b91a8d669bf9").unwrap());
}
} }

View file

@ -34,6 +34,7 @@ extern crate rand;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
extern crate siphasher;
#[macro_use] #[macro_use]
extern crate slog; extern crate slog;
extern crate time; extern crate time;

View file

@ -555,6 +555,11 @@ impl AsFixedBytes for [u8; 4] {
return 4; return 4;
} }
} }
impl AsFixedBytes for [u8; 6] {
fn len(&self) -> usize {
return 6;
}
}
impl AsFixedBytes for [u8; 8] { impl AsFixedBytes for [u8; 8] {
fn len(&self) -> usize { fn len(&self) -> usize {
return 8; return 8;