grin/core/src/ser.rs

652 lines
18 KiB
Rust
Raw Normal View History

// Copyright 2016 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-10-21 03:06:12 +03:00
//! Serialization and deserialization layer specialized for binary encoding.
//! Ensures consistency and safety. Basically a minimal subset or
//! rustc_serialize customized for our need.
//!
//! To use it simply implement `Writeable` or `Readable` and then use the
//! `serialize` or `deserialize` functions on them as appropriate.
use std::{cmp, error, fmt};
use std::io::{self, Read, Write};
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use keychain::{Identifier, BlindingFactor, IDENTIFIER_SIZE};
use consensus;
use consensus::VerifySortOrder;
use core::hash::Hashed;
use core::transaction::{SWITCH_COMMIT_HASH_SIZE, SwitchCommitHash};
use util::secp::pedersen::Commitment;
use util::secp::pedersen::RangeProof;
use util::secp::Signature;
use util::secp::constants::{
MAX_PROOF_SIZE,
PEDERSEN_COMMITMENT_SIZE,
AGG_SIGNATURE_SIZE,
SECRET_KEY_SIZE,
};
2016-10-21 03:06:12 +03:00
/// Possible errors deriving from serializing or deserializing.
#[derive(Debug)]
pub enum Error {
/// Wraps an io error produced when reading or writing
IOErr(io::Error),
/// Expected a given value that wasn't found
UnexpectedData {
2017-06-01 00:44:44 +03:00
/// What we wanted
expected: Vec<u8>,
2017-06-01 00:44:44 +03:00
/// What we got
received: Vec<u8>,
},
/// Data wasn't in a consumable format
CorruptedData,
2016-10-21 03:06:12 +03:00
/// When asked to read too much data
TooLargeReadErr,
/// Consensus rule failure (currently sort order)
ConsensusError(consensus::Error),
hash (features|commitment) in output mmr (#615) * experiment with lock_heights on outputs * playing around with lock_height as part of the switch commitment hash * cleanup * include features in the switch commit hash key * commit * rebase off master * commit * cleanup * missing docs * rework coinbase maturity test to build valid tx * pool and chain tests passing (inputs have switch commitments) * commit * cleanup * check inputs spending coinbase outputs have valid lock_heights * wip - got it building (tests still failing) * use zero key for non coinbase switch commit hash * fees and height wrong order... * send output lock_height over to wallet via api * no more header by height index workaround this for wallet refresh and wallet restore * refresh heights for unspent wallet outputs where missing * TODO - might be slow? * simplify - do not pass around lock_height for non coinbase outputs * commit * fix tests after merge * build input vs coinbase_input switch commit hash key encodes lock_height cleanup output by commit index (currently broken...) * is_unspent and get_unspent cleanup - we have no outputs, only switch_commit_hashes * separate concept of utxo vs output in the api utxos come from the sumtrees (and only the sumtrees, limited info) outputs come from blocks (and we need to look them up via block height) * cleanup * better api support for block outputs with range proofs * basic wallet operations appear to work restore is not working fully refresh refreshes heights correctly (at least appears to) * wallet refresh and wallet restore appear to be working now * fix core tests * fix some mine_simple_chain tests * fixup chain tests * rework so pool tests pass * wallet restore now safely habndles duplicate commitments (reused wallet keys) for coinbase outputs where lock_height is _very_ important * wip * validate_coinbase_maturity got things building tests are failing * lite vs full versions of is_unspent * builds and working locally zero-conf - what to do here? * handle zero-conf edge case (use latest block) * introduce OutputIdentifier, avoid leaking SumCommit everywhere * fix the bad merge * pool verifies coinbase maturity via is_matured this uses sumtree in a consistent way * cleanup * add docs, cleanup build warnings * fix core tests * fix chain tests * fix pool tests * cleanup debug logging that we no longer need * make out_block optional on an input (only care about it for spending coinbase outputs) * cleanup * bump the build
2018-01-17 06:03:40 +03:00
/// Error from from_hex deserialization
HexError(String),
2016-10-21 03:06:12 +03:00
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Error {
Error::IOErr(e)
}
}
impl From<consensus::Error> for Error {
fn from(e: consensus::Error) -> Error {
Error::ConsensusError(e)
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::IOErr(ref e) => write!(f, "{}", e),
2017-09-29 21:44:25 +03:00
Error::UnexpectedData {
expected: ref e,
received: ref r,
} => write!(f, "expected {:?}, got {:?}", e, r),
Error::CorruptedData => f.write_str("corrupted data"),
Error::TooLargeReadErr => f.write_str("too large read"),
Error::ConsensusError(ref e) => write!(f, "consensus error {:?}", e),
hash (features|commitment) in output mmr (#615) * experiment with lock_heights on outputs * playing around with lock_height as part of the switch commitment hash * cleanup * include features in the switch commit hash key * commit * rebase off master * commit * cleanup * missing docs * rework coinbase maturity test to build valid tx * pool and chain tests passing (inputs have switch commitments) * commit * cleanup * check inputs spending coinbase outputs have valid lock_heights * wip - got it building (tests still failing) * use zero key for non coinbase switch commit hash * fees and height wrong order... * send output lock_height over to wallet via api * no more header by height index workaround this for wallet refresh and wallet restore * refresh heights for unspent wallet outputs where missing * TODO - might be slow? * simplify - do not pass around lock_height for non coinbase outputs * commit * fix tests after merge * build input vs coinbase_input switch commit hash key encodes lock_height cleanup output by commit index (currently broken...) * is_unspent and get_unspent cleanup - we have no outputs, only switch_commit_hashes * separate concept of utxo vs output in the api utxos come from the sumtrees (and only the sumtrees, limited info) outputs come from blocks (and we need to look them up via block height) * cleanup * better api support for block outputs with range proofs * basic wallet operations appear to work restore is not working fully refresh refreshes heights correctly (at least appears to) * wallet refresh and wallet restore appear to be working now * fix core tests * fix some mine_simple_chain tests * fixup chain tests * rework so pool tests pass * wallet restore now safely habndles duplicate commitments (reused wallet keys) for coinbase outputs where lock_height is _very_ important * wip * validate_coinbase_maturity got things building tests are failing * lite vs full versions of is_unspent * builds and working locally zero-conf - what to do here? * handle zero-conf edge case (use latest block) * introduce OutputIdentifier, avoid leaking SumCommit everywhere * fix the bad merge * pool verifies coinbase maturity via is_matured this uses sumtree in a consistent way * cleanup * add docs, cleanup build warnings * fix core tests * fix chain tests * fix pool tests * cleanup debug logging that we no longer need * make out_block optional on an input (only care about it for spending coinbase outputs) * cleanup * bump the build
2018-01-17 06:03:40 +03:00
Error::HexError(ref e) => write!(f, "hex error {:?}", e),
}
}
}
impl error::Error for Error {
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::IOErr(ref e) => Some(e),
_ => None,
}
}
fn description(&self) -> &str {
match *self {
Error::IOErr(ref e) => error::Error::description(e),
2017-09-29 21:44:25 +03:00
Error::UnexpectedData {
expected: _,
received: _,
} => "unexpected data",
Error::CorruptedData => "corrupted data",
Error::TooLargeReadErr => "too large read",
Error::ConsensusError(_) => "consensus error (sort order)",
hash (features|commitment) in output mmr (#615) * experiment with lock_heights on outputs * playing around with lock_height as part of the switch commitment hash * cleanup * include features in the switch commit hash key * commit * rebase off master * commit * cleanup * missing docs * rework coinbase maturity test to build valid tx * pool and chain tests passing (inputs have switch commitments) * commit * cleanup * check inputs spending coinbase outputs have valid lock_heights * wip - got it building (tests still failing) * use zero key for non coinbase switch commit hash * fees and height wrong order... * send output lock_height over to wallet via api * no more header by height index workaround this for wallet refresh and wallet restore * refresh heights for unspent wallet outputs where missing * TODO - might be slow? * simplify - do not pass around lock_height for non coinbase outputs * commit * fix tests after merge * build input vs coinbase_input switch commit hash key encodes lock_height cleanup output by commit index (currently broken...) * is_unspent and get_unspent cleanup - we have no outputs, only switch_commit_hashes * separate concept of utxo vs output in the api utxos come from the sumtrees (and only the sumtrees, limited info) outputs come from blocks (and we need to look them up via block height) * cleanup * better api support for block outputs with range proofs * basic wallet operations appear to work restore is not working fully refresh refreshes heights correctly (at least appears to) * wallet refresh and wallet restore appear to be working now * fix core tests * fix some mine_simple_chain tests * fixup chain tests * rework so pool tests pass * wallet restore now safely habndles duplicate commitments (reused wallet keys) for coinbase outputs where lock_height is _very_ important * wip * validate_coinbase_maturity got things building tests are failing * lite vs full versions of is_unspent * builds and working locally zero-conf - what to do here? * handle zero-conf edge case (use latest block) * introduce OutputIdentifier, avoid leaking SumCommit everywhere * fix the bad merge * pool verifies coinbase maturity via is_matured this uses sumtree in a consistent way * cleanup * add docs, cleanup build warnings * fix core tests * fix chain tests * fix pool tests * cleanup debug logging that we no longer need * make out_block optional on an input (only care about it for spending coinbase outputs) * cleanup * bump the build
2018-01-17 06:03:40 +03:00
Error::HexError(_) => "hex error",
}
}
}
/// Signal to a serializable object how much of its data should be serialized
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum SerializationMode {
/// Serialize everything sufficiently to fully reconstruct the object
Full,
/// Serialize the data that defines the object
Hash,
/// Serialize everything that a signer of the object should know
SigHash,
/// Serialize for local storage, for instance in the case where
/// an output doesn't wish to store its range proof
Storage,
}
2016-10-21 03:06:12 +03:00
/// Implementations defined how different numbers and binary structures are
/// written to an underlying stream or container (depending on implementation).
pub trait Writer {
/// The mode this serializer is writing in
fn serialization_mode(&self) -> SerializationMode;
/// Writes a u8 as bytes
fn write_u8(&mut self, n: u8) -> Result<(), Error> {
self.write_fixed_bytes(&[n])
}
/// Writes a u16 as bytes
fn write_u16(&mut self, n: u16) -> Result<(), Error> {
let mut bytes = [0; 2];
BigEndian::write_u16(&mut bytes, n);
self.write_fixed_bytes(&bytes)
}
2016-10-21 03:06:12 +03:00
/// Writes a u32 as bytes
fn write_u32(&mut self, n: u32) -> Result<(), Error> {
let mut bytes = [0; 4];
BigEndian::write_u32(&mut bytes, n);
self.write_fixed_bytes(&bytes)
}
2016-10-21 03:06:12 +03:00
/// Writes a u64 as bytes
fn write_u64(&mut self, n: u64) -> Result<(), Error> {
let mut bytes = [0; 8];
BigEndian::write_u64(&mut bytes, n);
self.write_fixed_bytes(&bytes)
}
2016-10-21 03:06:12 +03:00
/// Writes a i64 as bytes
fn write_i64(&mut self, n: i64) -> Result<(), Error> {
let mut bytes = [0; 8];
BigEndian::write_i64(&mut bytes, n);
self.write_fixed_bytes(&bytes)
}
/// Writes a variable number of bytes. The length is encoded as a 64-bit
2016-10-21 03:06:12 +03:00
/// prefix.
fn write_bytes<T: AsFixedBytes>(&mut self, bytes: &T) -> Result<(), Error> {
try!(self.write_u64(bytes.as_ref().len() as u64));
self.write_fixed_bytes(bytes)
}
2016-10-21 03:06:12 +03:00
/// Writes a fixed number of bytes from something that can turn itself into
/// a `&[u8]`. The reader is expected to know the actual length on read.
fn write_fixed_bytes<T: AsFixedBytes>(&mut self, fixed: &T) -> Result<(), Error>;
2016-10-21 03:06:12 +03:00
}
/// Implementations defined how different numbers and binary structures are
/// read from an underlying stream or container (depending on implementation).
pub trait Reader {
/// Read a u8 from the underlying Read
fn read_u8(&mut self) -> Result<u8, Error>;
/// Read a u16 from the underlying Read
fn read_u16(&mut self) -> Result<u16, Error>;
2016-10-21 03:06:12 +03:00
/// Read a u32 from the underlying Read
fn read_u32(&mut self) -> Result<u32, Error>;
/// Read a u64 from the underlying Read
fn read_u64(&mut self) -> Result<u64, Error>;
/// Read a i32 from the underlying Read
fn read_i64(&mut self) -> Result<i64, Error>;
/// first before the data bytes.
fn read_vec(&mut self) -> Result<Vec<u8>, Error>;
/// first before the data bytes limited to max bytes.
fn read_limited_vec(&mut self, max: usize) -> Result<Vec<u8>, Error>;
2016-10-21 03:06:12 +03:00
/// Read a fixed number of bytes from the underlying reader.
fn read_fixed_bytes(&mut self, length: usize) -> Result<Vec<u8>, Error>;
/// Consumes a byte from the reader, producing an error if it doesn't have
/// the expected value
fn expect_u8(&mut self, val: u8) -> Result<u8, Error>;
2016-10-21 03:06:12 +03:00
}
/// Trait that every type that can be serialized as binary must implement.
/// Writes directly to a Writer, a utility type thinly wrapping an
/// underlying Write implementation.
pub trait Writeable {
/// Write the data held by this Writeable to the provided writer
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error>;
2016-10-21 03:06:12 +03:00
}
/// Trait to allow a collection of Writeables to be written in lexicographical sort order.
pub trait WriteableSorted {
/// Write the data but sort it first.
fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error>;
}
/// Reads a collection of serialized items into a Vec
/// and verifies they are lexicographically ordered.
///
/// A consensus rule requires everything is sorted lexicographically to avoid
/// leaking any information through specific ordering of items.
pub fn read_and_verify_sorted<T>(reader: &mut Reader, count: u64) -> Result<Vec<T>, Error>
where
T: Readable + Hashed + Writeable,
{
let result: Vec<T> = try!((0..count).map(|_| T::read(reader)).collect());
result.verify_sort_order()?;
Ok(result)
}
2016-10-21 03:06:12 +03:00
/// Trait that every type that can be deserialized from binary must implement.
/// Reads directly to a Reader, a utility type thinly wrapping an
/// underlying Read implementation.
2017-06-01 00:44:44 +03:00
pub trait Readable
2017-09-29 21:44:25 +03:00
where
Self: Sized,
2017-06-01 00:44:44 +03:00
{
2016-10-21 03:06:12 +03:00
/// Reads the data necessary to this Readable from the provided reader
fn read(reader: &mut Reader) -> Result<Self, Error>;
2016-10-21 03:06:12 +03:00
}
/// Deserializes a Readeable from any std::io::Read implementation.
pub fn deserialize<T: Readable>(source: &mut Read) -> Result<T, Error> {
2016-10-21 03:06:12 +03:00
let mut reader = BinReader { source: source };
T::read(&mut reader)
}
/// Serializes a Writeable into any std::io::Write implementation.
pub fn serialize<W: Writeable>(sink: &mut Write, thing: &W) -> Result<(), Error> {
2016-10-21 03:06:12 +03:00
let mut writer = BinWriter { sink: sink };
thing.write(&mut writer)
}
/// Utility function to serialize a writeable directly in memory using a
/// Vec<u8>.
pub fn ser_vec<W: Writeable>(thing: &W) -> Result<Vec<u8>, Error> {
2016-10-21 03:06:12 +03:00
let mut vec = Vec::new();
try!(serialize(&mut vec, thing));
2016-10-21 03:06:12 +03:00
Ok(vec)
}
/// Utility to read from a binary source
2016-10-21 03:06:12 +03:00
struct BinReader<'a> {
source: &'a mut Read,
}
/// Utility wrapper for an underlying byte Reader. Defines higher level methods
/// to read numbers, byte vectors, hashes, etc.
impl<'a> Reader for BinReader<'a> {
fn read_u8(&mut self) -> Result<u8, Error> {
self.source.read_u8().map_err(Error::IOErr)
}
fn read_u16(&mut self) -> Result<u16, Error> {
self.source.read_u16::<BigEndian>().map_err(Error::IOErr)
}
2016-10-21 03:06:12 +03:00
fn read_u32(&mut self) -> Result<u32, Error> {
self.source.read_u32::<BigEndian>().map_err(Error::IOErr)
}
fn read_u64(&mut self) -> Result<u64, Error> {
self.source.read_u64::<BigEndian>().map_err(Error::IOErr)
}
fn read_i64(&mut self) -> Result<i64, Error> {
self.source.read_i64::<BigEndian>().map_err(Error::IOErr)
}
/// Read a variable size vector from the underlying Read. Expects a usize
fn read_vec(&mut self) -> Result<Vec<u8>, Error> {
let len = try!(self.read_u64());
self.read_fixed_bytes(len as usize)
}
/// Read limited variable size vector from the underlying Read. Expects a
/// usize
fn read_limited_vec(&mut self, max: usize) -> Result<Vec<u8>, Error> {
let len = cmp::min(max, try!(self.read_u64()) as usize);
self.read_fixed_bytes(len as usize)
}
2016-10-21 03:06:12 +03:00
fn read_fixed_bytes(&mut self, length: usize) -> Result<Vec<u8>, Error> {
// not reading more than 100k in a single read
if length > 100000 {
return Err(Error::TooLargeReadErr);
2016-10-21 03:06:12 +03:00
}
let mut buf = vec![0; length];
self.source
.read_exact(&mut buf)
.map(move |_| buf)
.map_err(Error::IOErr)
2016-10-21 03:06:12 +03:00
}
fn expect_u8(&mut self, val: u8) -> Result<u8, Error> {
let b = try!(self.read_u8());
if b == val {
Ok(b)
} else {
Err(Error::UnexpectedData {
expected: vec![val],
received: vec![b],
})
}
}
2016-10-21 03:06:12 +03:00
}
impl Readable for Commitment {
fn read(reader: &mut Reader) -> Result<Commitment, Error> {
let a = try!(reader.read_fixed_bytes(PEDERSEN_COMMITMENT_SIZE));
let mut c = [0; PEDERSEN_COMMITMENT_SIZE];
for i in 0..PEDERSEN_COMMITMENT_SIZE {
c[i] = a[i];
}
Ok(Commitment(c))
}
}
impl Writeable for Commitment {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
writer.write_fixed_bytes(self)
}
}
impl Writeable for BlindingFactor {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
writer.write_fixed_bytes(self)
}
}
impl Readable for BlindingFactor {
fn read(reader: &mut Reader) -> Result<BlindingFactor, Error> {
let bytes = reader.read_fixed_bytes(SECRET_KEY_SIZE)?;
Ok(BlindingFactor::from_slice(&bytes))
}
}
impl Writeable for Identifier {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
writer.write_fixed_bytes(self)
}
}
impl Readable for Identifier {
fn read(reader: &mut Reader) -> Result<Identifier, Error> {
let bytes = reader.read_fixed_bytes(IDENTIFIER_SIZE)?;
Ok(Identifier::from_bytes(&bytes))
}
}
impl Writeable for RangeProof {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
2018-02-25 02:20:15 +03:00
writer.write_bytes(self)
}
}
impl Readable for RangeProof {
fn read(reader: &mut Reader) -> Result<RangeProof, Error> {
let p = reader.read_limited_vec(MAX_PROOF_SIZE)?;
let mut a = [0; MAX_PROOF_SIZE];
for i in 0..p.len() {
a[i] = p[i];
}
Ok(RangeProof {
proof: a,
plen: p.len(),
})
}
}
impl PMMRable for RangeProof {
fn len() -> usize {
MAX_PROOF_SIZE + 8
}
}
impl Readable for Signature {
fn read(reader: &mut Reader) -> Result<Signature, Error> {
let a = try!(reader.read_fixed_bytes(AGG_SIGNATURE_SIZE));
let mut c = [0; AGG_SIGNATURE_SIZE];
for i in 0..AGG_SIGNATURE_SIZE {
c[i] = a[i];
}
Ok(Signature::from_raw_data(&c).unwrap())
}
}
impl Writeable for Signature {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
writer.write_fixed_bytes(self)
}
}
2016-10-21 03:06:12 +03:00
/// Utility wrapper for an underlying byte Writer. Defines higher level methods
/// to write numbers, byte vectors, hashes, etc.
struct BinWriter<'a> {
sink: &'a mut Write,
}
impl<'a> Writer for BinWriter<'a> {
fn serialization_mode(&self) -> SerializationMode {
SerializationMode::Full
}
fn write_fixed_bytes<T: AsFixedBytes>(&mut self, fixed: &T) -> Result<(), Error> {
let bs = fixed.as_ref();
try!(self.sink.write_all(bs));
Ok(())
2016-10-21 03:06:12 +03:00
}
}
macro_rules! impl_int {
($int: ty, $w_fn: ident, $r_fn: ident) => {
impl Writeable for $int {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
writer.$w_fn(*self)
}
}
impl Readable for $int {
fn read(reader: &mut Reader) -> Result<$int, Error> {
reader.$r_fn()
}
}
}
}
impl_int!(u8, write_u8, read_u8);
impl_int!(u16, write_u16, read_u16);
impl_int!(u32, write_u32, read_u32);
impl_int!(u64, write_u64, read_u64);
impl_int!(i64, write_i64, read_i64);
2017-09-29 21:44:25 +03:00
impl<T> Readable for Vec<T>
where
T: Readable,
{
Prunable MMR storage (#112) * Base MMR storage structures Implementations of the MMR append-only file structure and its remove log. The append-only file is backed by a mmap for read access. The remove log is stored in memory for quick checking and backed by a simple file to persist it. * Add PMMR backend buffer, make PMMR Backend mutable * The Backend trait now has &mut self methods, and an &mut reference in PMMR. This simplifies the implementation of all backends by not forcing them to be interior mutable. Slight drawback is that a backend can't be used directly as long as it's used by a PMMR instance. * Introduced a buffer in the PMMR persistent backend to allow reads before the underlying files are fully flushed. Implemented with a temporary VecBackend. * Implement a prune list to use with dense backends The PruneList is useful when implementing compact backends for a PMMR (for example a single large byte array or a file). As nodes get pruned and removed from the backend to free space, the backend will get more compact but positions of a node within the PMMR will not match positions in the backend storage anymore. The PruneList accounts for that mismatch and does the position translation. * PMMR store compaction Implement actual pruning of the underlying PMMR storage by flushing the remove log. This triggers a rewrite of the PMMR nodes data (hashes and sums), removing pruned nodes. The information of what has been removed is kept in a prune list and the remove log is truncated. * PMMR store pruning tests and fixes
2017-09-05 08:50:25 +03:00
fn read(reader: &mut Reader) -> Result<Vec<T>, Error> {
let mut buf = Vec::new();
loop {
let elem = T::read(reader);
match elem {
Ok(e) => buf.push(e),
2017-09-29 21:44:25 +03:00
Err(Error::IOErr(ref ioerr)) if ioerr.kind() == io::ErrorKind::UnexpectedEof => {
break
}
Prunable MMR storage (#112) * Base MMR storage structures Implementations of the MMR append-only file structure and its remove log. The append-only file is backed by a mmap for read access. The remove log is stored in memory for quick checking and backed by a simple file to persist it. * Add PMMR backend buffer, make PMMR Backend mutable * The Backend trait now has &mut self methods, and an &mut reference in PMMR. This simplifies the implementation of all backends by not forcing them to be interior mutable. Slight drawback is that a backend can't be used directly as long as it's used by a PMMR instance. * Introduced a buffer in the PMMR persistent backend to allow reads before the underlying files are fully flushed. Implemented with a temporary VecBackend. * Implement a prune list to use with dense backends The PruneList is useful when implementing compact backends for a PMMR (for example a single large byte array or a file). As nodes get pruned and removed from the backend to free space, the backend will get more compact but positions of a node within the PMMR will not match positions in the backend storage anymore. The PruneList accounts for that mismatch and does the position translation. * PMMR store compaction Implement actual pruning of the underlying PMMR storage by flushing the remove log. This triggers a rewrite of the PMMR nodes data (hashes and sums), removing pruned nodes. The information of what has been removed is kept in a prune list and the remove log is truncated. * PMMR store pruning tests and fixes
2017-09-05 08:50:25 +03:00
Err(e) => return Err(e),
}
}
Ok(buf)
}
}
2017-09-29 21:44:25 +03:00
impl<T> Writeable for Vec<T>
where
T: Writeable,
{
Prunable MMR storage (#112) * Base MMR storage structures Implementations of the MMR append-only file structure and its remove log. The append-only file is backed by a mmap for read access. The remove log is stored in memory for quick checking and backed by a simple file to persist it. * Add PMMR backend buffer, make PMMR Backend mutable * The Backend trait now has &mut self methods, and an &mut reference in PMMR. This simplifies the implementation of all backends by not forcing them to be interior mutable. Slight drawback is that a backend can't be used directly as long as it's used by a PMMR instance. * Introduced a buffer in the PMMR persistent backend to allow reads before the underlying files are fully flushed. Implemented with a temporary VecBackend. * Implement a prune list to use with dense backends The PruneList is useful when implementing compact backends for a PMMR (for example a single large byte array or a file). As nodes get pruned and removed from the backend to free space, the backend will get more compact but positions of a node within the PMMR will not match positions in the backend storage anymore. The PruneList accounts for that mismatch and does the position translation. * PMMR store compaction Implement actual pruning of the underlying PMMR storage by flushing the remove log. This triggers a rewrite of the PMMR nodes data (hashes and sums), removing pruned nodes. The information of what has been removed is kept in a prune list and the remove log is truncated. * PMMR store pruning tests and fixes
2017-09-05 08:50:25 +03:00
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
for elmt in self {
elmt.write(writer)?;
}
Ok(())
}
}
impl<T> WriteableSorted for Vec<T>
where
T: Writeable + Ord,
{
fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error> {
self.sort();
for elmt in self {
elmt.write(writer)?;
}
Ok(())
}
}
impl<'a, A: Writeable> Writeable for &'a A {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
Writeable::write(*self, writer)
}
}
impl<A: Writeable, B: Writeable> Writeable for (A, B) {
2017-06-01 00:44:44 +03:00
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
try!(Writeable::write(&self.0, writer));
Writeable::write(&self.1, writer)
}
}
impl<A: Readable, B: Readable> Readable for (A, B) {
2017-06-01 00:44:44 +03:00
fn read(reader: &mut Reader) -> Result<(A, B), Error> {
Ok((try!(Readable::read(reader)), try!(Readable::read(reader))))
}
}
impl<A: Writeable, B: Writeable, C: Writeable> Writeable for (A, B, C) {
2017-06-01 00:44:44 +03:00
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
try!(Writeable::write(&self.0, writer));
try!(Writeable::write(&self.1, writer));
Writeable::write(&self.2, writer)
}
}
impl<A: Writeable, B: Writeable, C: Writeable, D: Writeable> Writeable for (A, B, C, D) {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
try!(Writeable::write(&self.0, writer));
try!(Writeable::write(&self.1, writer));
try!(Writeable::write(&self.2, writer));
Writeable::write(&self.3, writer)
}
}
impl<A: Readable, B: Readable, C: Readable> Readable for (A, B, C) {
2017-06-01 00:44:44 +03:00
fn read(reader: &mut Reader) -> Result<(A, B, C), Error> {
2017-09-29 21:44:25 +03:00
Ok((
try!(Readable::read(reader)),
try!(Readable::read(reader)),
try!(Readable::read(reader)),
))
2017-06-01 00:44:44 +03:00
}
}
impl<A: Readable, B: Readable, C: Readable, D: Readable> Readable for (A, B, C, D) {
fn read(reader: &mut Reader) -> Result<(A, B, C, D), Error> {
2017-09-29 21:44:25 +03:00
Ok((
try!(Readable::read(reader)),
try!(Readable::read(reader)),
try!(Readable::read(reader)),
try!(Readable::read(reader)),
))
}
}
impl Writeable for [u8; 4] {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
writer.write_bytes(self)
}
}
/// Trait for types that can serialize and report their size
pub trait PMMRable: Readable + Writeable + Hashed + Clone {
/// Length in bytes
fn len() -> usize;
}
2017-06-01 00:44:44 +03:00
/// Useful marker trait on types that can be sized byte slices
pub trait AsFixedBytes: Sized + AsRef<[u8]> {
/// The length in bytes
fn len(&self) -> usize;
}
impl<'a> AsFixedBytes for &'a [u8] {
fn len(&self) -> usize {
return 1;
}
}
impl AsFixedBytes for Vec<u8> {
fn len(&self) -> usize {
return self.len();
}
}
impl AsFixedBytes for [u8; 1] {
fn len(&self) -> usize {
return 1;
}
}
impl AsFixedBytes for [u8; 2] {
fn len(&self) -> usize {
return 2;
}
}
impl AsFixedBytes for [u8; 4] {
fn len(&self) -> usize {
return 4;
}
}
impl AsFixedBytes for [u8; 6] {
fn len(&self) -> usize {
return 6;
}
}
impl AsFixedBytes for [u8; 8] {
fn len(&self) -> usize {
return 8;
}
}
impl AsFixedBytes for [u8; 20] {
fn len(&self) -> usize {
return 20;
}
}
impl AsFixedBytes for [u8; 32] {
fn len(&self) -> usize {
return 32;
}
}
impl AsFixedBytes for String {
fn len(&self) -> usize {
return self.len();
}
}
impl AsFixedBytes for ::core::hash::Hash {
fn len(&self) -> usize {
return 32;
}
}
impl AsFixedBytes for ::util::secp::pedersen::RangeProof {
fn len(&self) -> usize {
return self.plen;
}
}
impl AsFixedBytes for ::util::secp::Signature {
fn len(&self) -> usize {
return 64;
}
}
impl AsFixedBytes for ::util::secp::pedersen::Commitment {
fn len(&self) -> usize {
return PEDERSEN_COMMITMENT_SIZE;
}
}
impl AsFixedBytes for BlindingFactor {
fn len(&self) -> usize {
return SECRET_KEY_SIZE;
}
}
impl AsFixedBytes for SwitchCommitHash {
fn len(&self) -> usize {
return SWITCH_COMMIT_HASH_SIZE;
}
}
impl AsFixedBytes for ::keychain::Identifier {
fn len(&self) -> usize {
return IDENTIFIER_SIZE;
}
}