diff --git a/api/src/handlers.rs b/api/src/handlers.rs index 58e56df3c..6e36f47c0 100644 --- a/api/src/handlers.rs +++ b/api/src/handlers.rs @@ -385,9 +385,13 @@ impl Handler for ChainHandler { } } -// Gets block details given either a hash or height. -// GET /v1/block/ -// GET /v1/block/ +/// Gets block details given either a hash or height. +/// GET /v1/blocks/ +/// GET /v1/blocks/ +/// +/// Optionally return results as "compact blocks" by passing "?compact" query param +/// GET /v1/blocks/?compact +/// pub struct BlockHandler { pub chain: Arc, } @@ -398,6 +402,11 @@ impl BlockHandler { Ok(BlockPrintable::from_block(&block, self.chain.clone(), false)) } + fn get_compact_block(&self, h: &Hash) -> Result { + let block = self.chain.clone().get_block(h).map_err(|_| Error::NotFound)?; + Ok(CompactBlockPrintable::from_compact_block(&block.as_compact_block())) + } + // Try to decode the string as a height or a hash. fn parse_input(&self, input: String) -> Result { if let Ok(height) = input.parse() { @@ -426,8 +435,21 @@ impl Handler for BlockHandler { } let el = *path_elems.last().unwrap(); let h = try!(self.parse_input(el.to_string())); - let b = try!(self.get_block(&h)); - json_response(&b) + + let mut compact = false; + if let Ok(params) = req.get_ref::() { + if let Some(_) = params.get("compact") { + compact = true; + } + } + + if compact { + let b = try!(self.get_compact_block(&h)); + json_response(&b) + } else { + let b = try!(self.get_block(&h)); + json_response(&b) + } } } diff --git a/api/src/types.rs b/api/src/types.rs index 5dea62f29..8c483c05e 100644 --- a/api/src/types.rs +++ b/api/src/types.rs @@ -367,6 +367,30 @@ impl BlockPrintable { } } +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct CompactBlockPrintable { + /// The block header + pub header: BlockHeaderPrintable, + /// Inputs (hex short_ids) + pub inputs: Vec, + /// Outputs (hex short_ids) + pub outputs: Vec, + /// Kernels (hex short_ids) + pub kernels: Vec, +} + +impl CompactBlockPrintable { + /// Convert a compact block into a printable representation suitable for api response + pub fn from_compact_block(cb: &core::CompactBlock) -> CompactBlockPrintable { + CompactBlockPrintable { + header: BlockHeaderPrintable::from_header(&cb.header), + inputs: cb.inputs.iter().map(|x| x.to_hex()).collect(), + outputs: cb.outputs.iter().map(|x| x.to_hex()).collect(), + kernels: cb.kernels.iter().map(|x| x.to_hex()).collect(), + } + } +} + // For wallet reconstruction, include the header info along with the // transactions in the block #[derive(Debug, Serialize, Deserialize, Clone)] diff --git a/core/Cargo.toml b/core/Cargo.toml index 6201ef54b..31ac8a7d9 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -13,6 +13,7 @@ num-bigint = "^0.1.35" rand = "^0.3" serde = "~1.0.8" serde_derive = "~1.0.8" +siphasher = "~0.1" time = "^0.1" lazy_static = "~0.2.8" grin_keychain = { path = "../keychain" } diff --git a/core/src/core/block.rs b/core/src/core/block.rs index 31da8168e..9dd7fffc5 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -24,6 +24,7 @@ use core::{ Input, Output, OutputIdentifier, + ShortId, SwitchCommitHash, Proof, TxKernel, @@ -34,6 +35,7 @@ use core::{ use consensus; use consensus::{exceeds_weight, reward, MINIMUM_DIFFICULTY, REWARD, VerifySortOrder}; use core::hash::{Hash, Hashed, ZERO_HASH}; +use core::id::ShortIdentifiable; use core::target::Difficulty; use core::transaction; use ser::{self, Readable, Reader, Writeable, Writer, WriteableSorted, read_and_verify_sorted}; @@ -203,6 +205,73 @@ impl Readable for BlockHeader { } } +/// Compact representation of a full block. +/// Each input/output/kernel is represented as a short_id. +/// A node is reasonably likely to have already seen all tx data (tx broadcast before block) +/// and can go request missing tx data from peers if necessary to hydrate a compact block +/// into a full block. +#[derive(Debug, Clone)] +pub struct CompactBlock { + /// The header with metadata and commitments to the rest of the data + pub header: BlockHeader, + /// List of transaction inputs (short_ids) + pub inputs: Vec, + /// List of transaction outputs (short_ids) + pub outputs: Vec, + /// List of transaction kernels (short_ids) + pub kernels: Vec, +} + +/// Implementation of Writeable for a compact block, defines how to write the block to a +/// binary writer. Differentiates between writing the block for the purpose of +/// full serialization and the one of just extracting a hash. +impl Writeable for CompactBlock { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + try!(self.header.write(writer)); + + if writer.serialization_mode() != ser::SerializationMode::Hash { + ser_multiwrite!( + writer, + [write_u64, self.inputs.len() as u64], + [write_u64, self.outputs.len() as u64], + [write_u64, self.kernels.len() as u64] + ); + + let mut inputs = self.inputs.clone(); + let mut outputs = self.outputs.clone(); + let mut kernels = self.kernels.clone(); + + // Consensus rule that everything is sorted in lexicographical order on the wire. + try!(inputs.write_sorted(writer)); + try!(outputs.write_sorted(writer)); + try!(kernels.write_sorted(writer)); + } + Ok(()) + } +} + +/// Implementation of Readable for a compact block, defines how to read a compact block +/// from a binary stream. +impl Readable for CompactBlock { + fn read(reader: &mut Reader) -> Result { + let header = try!(BlockHeader::read(reader)); + + let (input_len, output_len, kernel_len) = + ser_multiread!(reader, read_u64, read_u64, read_u64); + + let inputs = read_and_verify_sorted(reader, input_len)?; + let outputs = read_and_verify_sorted(reader, output_len)?; + let kernels = read_and_verify_sorted(reader, kernel_len)?; + + Ok(CompactBlock { + header, + inputs, + outputs, + kernels, + }) + } +} + /// A block as expressed in the MimbleWimble protocol. The reward is /// non-explicit, assumed to be deducible from block height (similar to /// bitcoin's schedule) and expressed as a global transaction fee (added v.H), @@ -321,6 +390,37 @@ impl Block { Ok(block) } + /// Generate the compact block representation. + pub fn as_compact_block(&self) -> CompactBlock { + let header = self.header.clone(); + let block_hash = self.hash(); + + let mut inputs = self.inputs + .iter() + .map(|x| x.short_id(&block_hash)) + .collect::>(); + let mut outputs = self.outputs + .iter() + .map(|x| x.short_id(&block_hash)) + .collect::>(); + let mut kernels = self.kernels + .iter() + .map(|x| x.short_id(&block_hash)) + .collect::>(); + + // sort all the lists of short_ids + inputs.sort(); + outputs.sort(); + kernels.sort(); + + CompactBlock { + header, + inputs, + outputs, + kernels, + } + } + /// Builds a new block ready to mine from the header of the previous block, /// a vector of transactions and the reward information. Checks /// that all transactions are valid and calculates the Merkle tree. @@ -380,11 +480,10 @@ impl Block { inputs: inputs, outputs: outputs, kernels: kernels, - }.compact(), + }.cut_through(), ) } - /// Blockhash, computed using only the header pub fn hash(&self) -> Hash { self.header.hash() @@ -396,15 +495,15 @@ impl Block { } /// Matches any output with a potential spending input, eliminating them - /// from the block. Provides a simple way to compact the block. The - /// elimination is stable with respect to inputs and outputs order. + /// from the block. Provides a simple way to cut-through the block. The + /// elimination is stable with respect to the order of inputs and outputs. /// - /// NOTE: exclude coinbase from compaction process + /// NOTE: exclude coinbase from cut-through process /// if a block contains a new coinbase output and /// is a transaction spending a previous coinbase - /// we do not want to compact these away + /// we do not want to cut-through (all coinbase must be preserved) /// - pub fn compact(&self) -> Block { + pub fn cut_through(&self) -> Block { let in_set = self.inputs .iter() .map(|inp| inp.commitment()) @@ -416,17 +515,17 @@ impl Block { .map(|out| out.commitment()) .collect::>(); - let commitments_to_compact = in_set.intersection(&out_set).collect::>(); + let to_cut_through = in_set.intersection(&out_set).collect::>(); let new_inputs = self.inputs .iter() - .filter(|inp| !commitments_to_compact.contains(&inp.commitment())) + .filter(|inp| !to_cut_through.contains(&inp.commitment())) .map(|&inp| inp) .collect::>(); let new_outputs = self.outputs .iter() - .filter(|out| !commitments_to_compact.contains(&out.commitment())) + .filter(|out| !to_cut_through.contains(&out.commitment())) .map(|&out| out) .collect::>(); @@ -467,7 +566,7 @@ impl Block { inputs: all_inputs, outputs: all_outputs, kernels: all_kernels, - }.compact() + }.cut_through() } /// Validates all the elements in a block that can be checked without @@ -745,8 +844,8 @@ mod test { } #[test] - // builds a block with a tx spending another and check if merging occurred - fn compactable_block() { + // builds a block with a tx spending another and check that cut_through occurred + fn block_with_cut_through() { let keychain = Keychain::from_random_seed().unwrap(); let key_id1 = keychain.derive_key_id(1).unwrap(); let key_id2 = keychain.derive_key_id(2).unwrap(); @@ -882,9 +981,37 @@ mod test { ser::serialize(&mut vec, &b).expect("serialization failed"); let b2: Block = ser::deserialize(&mut &vec[..]).unwrap(); + assert_eq!(b.header, b2.header); assert_eq!(b.inputs, b2.inputs); assert_eq!(b.outputs, b2.outputs); assert_eq!(b.kernels, b2.kernels); + } + + #[test] + fn convert_block_to_compact_block() { + let keychain = Keychain::from_random_seed().unwrap(); + let b = new_block(vec![], &keychain); + let cb = b.as_compact_block(); + assert_eq!(cb.kernels.len(), 1); + assert_eq!(cb.kernels[0], b.kernels[0].short_id(&b.hash())); + } + + #[test] + fn serialize_deserialize_compact_block() { + let b = CompactBlock { + header: BlockHeader::default(), + inputs: vec![ShortId::zero(), ShortId::zero()], + outputs: vec![ShortId::zero(), ShortId::zero(), ShortId::zero()], + kernels: vec![ShortId::zero()], + }; + + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b).expect("serialization failed"); + let b2: CompactBlock = ser::deserialize(&mut &vec[..]).unwrap(); + assert_eq!(b.header, b2.header); + assert_eq!(b.inputs, b2.inputs); + assert_eq!(b.outputs, b2.outputs); + assert_eq!(b.kernels, b2.kernels); } } diff --git a/core/src/core/id.rs b/core/src/core/id.rs new file mode 100644 index 000000000..816c80c3c --- /dev/null +++ b/core/src/core/id.rs @@ -0,0 +1,175 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! short ids for compact blocks + +use std::cmp::min; + +use byteorder::{LittleEndian, ByteOrder}; +use siphasher::sip::SipHasher24; + +use core::hash::{Hash, Hashed}; +use ser; +use ser::{Reader, Readable, Writer, Writeable}; +use util; + + +/// The size of a short id used to identify inputs|outputs|kernels (6 bytes) +pub const SHORT_ID_SIZE: usize = 6; + +/// A trait for types that have a short_id (inputs/outputs/kernels) +pub trait ShortIdentifiable { + /// The short_id of the instance. + fn short_id(&self, block_hash: &Hash) -> ShortId; +} + +impl ShortIdentifiable for H { + /// Generate a short_id via the following - + /// + /// * extract k0/k1 from block_hash (first two u64 values) + /// * initialize a siphasher24 with k0/k1 + /// * self.hash() passing in the siphasher24 instance + /// * drop the 2 most significant bytes (to return a 6 byte short_id) + /// + fn short_id(&self, block_hash: &Hash) -> ShortId { + // we "use" core::hash::Hash in the outer namespace + // so doing this here in the fn to minimize collateral damage/confusion + use std::hash::Hasher; + + // extract k0/k1 from the block_hash + let k0 = LittleEndian::read_u64(&block_hash.0[0..8]); + let k1 = LittleEndian::read_u64(&block_hash.0[8..16]); + + // initialize a siphasher24 with k0/k1 + let mut sip_hasher = SipHasher24::new_with_keys(k0, k1); + + // hash our id (self.hash()) using the siphasher24 instance + sip_hasher.write(&self.hash().to_vec()[..]); + let res = sip_hasher.finish(); + + // construct a short_id from the resulting bytes (dropping the 2 most significant bytes) + let mut buf = [0; 8]; + LittleEndian::write_u64(&mut buf, res); + ShortId::from_bytes(&buf[0..6]) + } +} + +/// Short id for identifying inputs/outputs/kernels +#[derive(PartialEq, Clone, PartialOrd, Ord, Eq, Serialize, Deserialize)] +pub struct ShortId([u8; 6]); + +impl ::std::fmt::Debug for ShortId { + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + try!(write!(f, "{}(", stringify!(ShortId))); + try!(write!(f, "{}", self.to_hex())); + write!(f, ")") + } +} + +impl Readable for ShortId { + fn read(reader: &mut Reader) -> Result { + let v = try!(reader.read_fixed_bytes(SHORT_ID_SIZE)); + let mut a = [0; SHORT_ID_SIZE]; + for i in 0..a.len() { + a[i] = v[i]; + } + Ok(ShortId(a)) + } +} + +impl Writeable for ShortId { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_fixed_bytes(&self.0) + } +} + +impl ShortId { + /// Build a new short_id from a byte slice + pub fn from_bytes(bytes: &[u8]) -> ShortId { + let mut hash = [0; SHORT_ID_SIZE]; + for i in 0..min(SHORT_ID_SIZE, bytes.len()) { + hash[i] = bytes[i]; + } + ShortId(hash) + } + + /// Hex string representation of a short_id + pub fn to_hex(&self) -> String { + util::to_hex(self.0.to_vec()) + } + + /// Reconstructs a switch commit hash from a hex string. + pub fn from_hex(hex: &str) -> Result { + let bytes = util::from_hex(hex.to_string()) + .map_err(|_| ser::Error::HexError(format!("short_id from_hex error")))?; + Ok(ShortId::from_bytes(&bytes)) + } + + /// The zero short_id, convenient for generating a short_id for testing. + pub fn zero() -> ShortId { + ShortId::from_bytes(&[0]) + } +} + +#[cfg(test)] +mod test { + use super::*; + use ser::{Writeable, Writer}; + + + #[test] + fn test_short_id() { + // minimal struct for testing + // make it implement Writeable, therefore Hashable, therefore ShortIdentifiable + struct Foo(u64); + impl Writeable for Foo { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u64(self.0)?; + Ok(()) + } + } + + let foo = Foo(0); + let expected_hash = Hash::from_hex( + "81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c", + ).unwrap(); + assert_eq!(foo.hash(), expected_hash); + + let other_hash = Hash::zero(); + println!("{:?}", foo.short_id(&other_hash)); + assert_eq!(foo.short_id(&other_hash), ShortId::from_hex("e973960ba690").unwrap()); + + let foo = Foo(5); + let expected_hash = Hash::from_hex( + "3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673", + ).unwrap(); + assert_eq!(foo.hash(), expected_hash); + + let other_hash = Hash::zero(); + println!("{:?}", foo.short_id(&other_hash)); + assert_eq!(foo.short_id(&other_hash), ShortId::from_hex("f0c06e838e59").unwrap()); + + let foo = Foo(5); + let expected_hash = Hash::from_hex( + "3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673", + ).unwrap(); + assert_eq!(foo.hash(), expected_hash); + + let other_hash = Hash::from_hex( + "81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c", + ).unwrap(); + println!("{:?}", foo.short_id(&other_hash)); + assert_eq!(foo.short_id(&other_hash), ShortId::from_hex("95bf0ca12d5b").unwrap()); + } +} diff --git a/core/src/core/mod.rs b/core/src/core/mod.rs index 3fa932d78..ffee5fe02 100644 --- a/core/src/core/mod.rs +++ b/core/src/core/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2016 The Grin Developers +// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ pub mod block; pub mod build; pub mod hash; +pub mod id; pub mod pmmr; pub mod target; pub mod transaction; @@ -33,6 +34,7 @@ use util::secp::pedersen::*; pub use self::block::*; pub use self::transaction::*; +pub use self::id::ShortId; use self::hash::Hashed; use ser::{Error, Readable, Reader, Writeable, Writer}; use global; @@ -396,7 +398,7 @@ mod test { &key_id, Difficulty::minimum(), ).unwrap(); - b.compact().validate().unwrap(); + b.cut_through().validate().unwrap(); } #[test] @@ -414,7 +416,7 @@ mod test { &key_id, Difficulty::minimum(), ).unwrap(); - b.compact().validate().unwrap(); + b.cut_through().validate().unwrap(); } #[test] diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index 0aaddeab0..83b01bb68 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -1,4 +1,4 @@ -// Copyright 2016 The Grin Developers +// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -541,7 +541,7 @@ impl SwitchCommitHash { SwitchCommitHash(h) } - /// Reconstructs a switch commit hash from an array of bytes. + /// Reconstructs a switch commit hash from a byte slice. pub fn from_bytes(bytes: &[u8]) -> SwitchCommitHash { let mut hash = [0; SWITCH_COMMIT_HASH_SIZE]; for i in 0..min(SWITCH_COMMIT_HASH_SIZE, bytes.len()) { @@ -550,12 +550,12 @@ impl SwitchCommitHash { SwitchCommitHash(hash) } - /// Hex string represenation of a switch commitment hash. + /// Hex string representation of a switch commitment hash. pub fn to_hex(&self) -> String { util::to_hex(self.0.to_vec()) } - /// Reconstrcuts a switch commit hash from a hex string. + /// Reconstructs a switch commit hash from a hex string. pub fn from_hex(hex: &str) -> Result { let bytes = util::from_hex(hex.to_string()) .map_err(|_| ser::Error::HexError(format!("switch_commit_hash from_hex error")))?; @@ -884,6 +884,7 @@ impl ops::Add for SumCommit { #[cfg(test)] mod test { use super::*; + use core::id::{ShortId, ShortIdentifiable}; use keychain::Keychain; use util::secp; @@ -1008,4 +1009,39 @@ mod test { assert!(commit == commit_2); assert!(switch_commit == switch_commit_2); } + + #[test] + fn input_short_id() { + let keychain = Keychain::from_seed(&[0; 32]).unwrap(); + let key_id = keychain.derive_key_id(1).unwrap(); + let commit = keychain.commit(5, &key_id).unwrap(); + + let input = Input { + features: DEFAULT_OUTPUT, + commit: commit, + out_block: None, + }; + + let block_hash = Hash::from_hex( + "3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673", + ).unwrap(); + + let short_id = input.short_id(&block_hash); + assert_eq!(short_id, ShortId::from_hex("ff2c91d85fcd").unwrap()); + + // now generate the short_id for a *very* similar output (single feature flag different) + // and check it generates a different short_id + let input = Input { + features: COINBASE_OUTPUT, + commit: commit, + out_block: None, + }; + + let block_hash = Hash::from_hex( + "3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673", + ).unwrap(); + + let short_id = input.short_id(&block_hash); + assert_eq!(short_id, ShortId::from_hex("b91a8d669bf9").unwrap()); + } } diff --git a/core/src/lib.rs b/core/src/lib.rs index af76e1d90..5d4581518 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -34,6 +34,7 @@ extern crate rand; extern crate serde; #[macro_use] extern crate serde_derive; +extern crate siphasher; #[macro_use] extern crate slog; extern crate time; diff --git a/core/src/ser.rs b/core/src/ser.rs index 3170558cb..6afde4039 100644 --- a/core/src/ser.rs +++ b/core/src/ser.rs @@ -555,6 +555,11 @@ impl AsFixedBytes for [u8; 4] { return 4; } } +impl AsFixedBytes for [u8; 6] { + fn len(&self) -> usize { + return 6; + } +} impl AsFixedBytes for [u8; 8] { fn len(&self) -> usize { return 8;