Reduce number of allocations in to_key calls (#3311)

We have to make an extra allocation per db get request because key generation function to_key takes Vec. Taking byte slice (AsRef<[u8]> to be precise) also simplifes the code a bit.
This commit is contained in:
hashmap 2020-04-30 17:47:44 +02:00 committed by GitHub
parent a82041d0ed
commit 8a22fb516a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 55 additions and 81 deletions

View file

@ -88,23 +88,21 @@ impl ChainStore {
/// Get full block. /// Get full block.
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> { pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
option_to_not_found( option_to_not_found(self.db.get_ser(&to_key(BLOCK_PREFIX, h)), || {
self.db.get_ser(&to_key(BLOCK_PREFIX, &mut h.to_vec())), format!("BLOCK: {}", h)
|| format!("BLOCK: {}", h), })
)
} }
/// Does this full block exist? /// Does this full block exist?
pub fn block_exists(&self, h: &Hash) -> Result<bool, Error> { pub fn block_exists(&self, h: &Hash) -> Result<bool, Error> {
self.db.exists(&to_key(BLOCK_PREFIX, &mut h.to_vec())) self.db.exists(&to_key(BLOCK_PREFIX, h))
} }
/// Get block_sums for the block hash. /// Get block_sums for the block hash.
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> { pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
option_to_not_found( option_to_not_found(self.db.get_ser(&to_key(BLOCK_SUMS_PREFIX, h)), || {
self.db.get_ser(&to_key(BLOCK_SUMS_PREFIX, &mut h.to_vec())), format!("Block sums for block: {}", h)
|| format!("Block sums for block: {}", h), })
)
} }
/// Get previous header. /// Get previous header.
@ -114,11 +112,9 @@ impl ChainStore {
/// Get block header. /// Get block header.
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> { pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
option_to_not_found( option_to_not_found(self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, h)), || {
self.db format!("BLOCK HEADER: {}", h)
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())), })
|| format!("BLOCK HEADER: {}", h),
)
} }
/// Get PMMR pos for the given output commitment. /// Get PMMR pos for the given output commitment.
@ -134,8 +130,7 @@ impl ChainStore {
/// Get PMMR pos and block height for the given output commitment. /// Get PMMR pos and block height for the given output commitment.
pub fn get_output_pos_height(&self, commit: &Commitment) -> Result<Option<(u64, u64)>, Error> { pub fn get_output_pos_height(&self, commit: &Commitment) -> Result<Option<(u64, u64)>, Error> {
self.db self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, commit))
.get_ser(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()))
} }
/// Builds a new batch to be used with this store. /// Builds a new batch to be used with this store.
@ -192,41 +187,35 @@ impl<'a> Batch<'a> {
/// get block /// get block
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> { pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
option_to_not_found( option_to_not_found(self.db.get_ser(&to_key(BLOCK_PREFIX, h)), || {
self.db.get_ser(&to_key(BLOCK_PREFIX, &mut h.to_vec())), format!("Block with hash: {}", h)
|| format!("Block with hash: {}", h), })
)
} }
/// Does the block exist? /// Does the block exist?
pub fn block_exists(&self, h: &Hash) -> Result<bool, Error> { pub fn block_exists(&self, h: &Hash) -> Result<bool, Error> {
self.db.exists(&to_key(BLOCK_PREFIX, &mut h.to_vec())) self.db.exists(&to_key(BLOCK_PREFIX, h))
} }
/// Save the block to the db. /// Save the block to the db.
/// Note: the block header is not saved to the db here, assumes this has already been done. /// Note: the block header is not saved to the db here, assumes this has already been done.
pub fn save_block(&self, b: &Block) -> Result<(), Error> { pub fn save_block(&self, b: &Block) -> Result<(), Error> {
self.db self.db.put_ser(&to_key(BLOCK_PREFIX, b.hash())[..], b)?;
.put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)?;
Ok(()) Ok(())
} }
/// We maintain a "spent" index for each full block to allow the output_pos /// We maintain a "spent" index for each full block to allow the output_pos
/// to be easily reverted during rewind. /// to be easily reverted during rewind.
pub fn save_spent_index(&self, h: &Hash, spent: &Vec<CommitPos>) -> Result<(), Error> { pub fn save_spent_index(&self, h: &Hash, spent: &Vec<CommitPos>) -> Result<(), Error> {
self.db self.db.put_ser(&to_key(BLOCK_SPENT_PREFIX, h)[..], spent)?;
.put_ser(&to_key(BLOCK_SPENT_PREFIX, &mut h.to_vec())[..], spent)?;
Ok(()) Ok(())
} }
/// Migrate a block stored in the db by serializing it using the provided protocol version. /// Migrate a block stored in the db by serializing it using the provided protocol version.
/// Block may have been read using a previous protocol version but we do not actually care. /// Block may have been read using a previous protocol version but we do not actually care.
pub fn migrate_block(&self, b: &Block, version: ProtocolVersion) -> Result<(), Error> { pub fn migrate_block(&self, b: &Block, version: ProtocolVersion) -> Result<(), Error> {
self.db.put_ser_with_version( self.db
&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], .put_ser_with_version(&to_key(BLOCK_PREFIX, &mut b.hash())[..], b, version)?;
b,
version,
)?;
Ok(()) Ok(())
} }
@ -238,8 +227,7 @@ impl<'a> Batch<'a> {
/// Delete a full block. Does not delete any record associated with a block /// Delete a full block. Does not delete any record associated with a block
/// header. /// header.
pub fn delete_block(&self, bh: &Hash) -> Result<(), Error> { pub fn delete_block(&self, bh: &Hash) -> Result<(), Error> {
self.db self.db.delete(&to_key(BLOCK_PREFIX, bh)[..])?;
.delete(&to_key(BLOCK_PREFIX, &mut bh.to_vec())[..])?;
// Best effort at deleting associated data for this block. // Best effort at deleting associated data for this block.
// Not an error if these fail. // Not an error if these fail.
@ -257,7 +245,7 @@ impl<'a> Batch<'a> {
// Store the header itself indexed by hash. // Store the header itself indexed by hash.
self.db self.db
.put_ser(&to_key(BLOCK_HEADER_PREFIX, &mut hash.to_vec())[..], header)?; .put_ser(&to_key(BLOCK_HEADER_PREFIX, hash)[..], header)?;
Ok(()) Ok(())
} }
@ -269,29 +257,26 @@ impl<'a> Batch<'a> {
pos: u64, pos: u64,
height: u64, height: u64,
) -> Result<(), Error> { ) -> Result<(), Error> {
self.db.put_ser( self.db
&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec())[..], .put_ser(&to_key(OUTPUT_POS_PREFIX, commit)[..], &(pos, height))
&(pos, height),
)
} }
/// Delete the output_pos index entry for a spent output. /// Delete the output_pos index entry for a spent output.
pub fn delete_output_pos_height(&self, commit: &Commitment) -> Result<(), Error> { pub fn delete_output_pos_height(&self, commit: &Commitment) -> Result<(), Error> {
self.db self.db.delete(&to_key(OUTPUT_POS_PREFIX, commit))
.delete(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()))
} }
/// When using the output_pos iterator we have access to the index keys but not the /// When using the output_pos iterator we have access to the index keys but not the
/// original commitment that the key is constructed from. So we need a way of comparing /// original commitment that the key is constructed from. So we need a way of comparing
/// a key with another commitment without reconstructing the commitment from the key bytes. /// a key with another commitment without reconstructing the commitment from the key bytes.
pub fn is_match_output_pos_key(&self, key: &[u8], commit: &Commitment) -> bool { pub fn is_match_output_pos_key(&self, key: &[u8], commit: &Commitment) -> bool {
let commit_key = to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()); let commit_key = to_key(OUTPUT_POS_PREFIX, commit);
commit_key == key commit_key == key
} }
/// Iterator over the output_pos index. /// Iterator over the output_pos index.
pub fn output_pos_iter(&self) -> Result<SerIterator<(u64, u64)>, Error> { pub fn output_pos_iter(&self) -> Result<SerIterator<(u64, u64)>, Error> {
let key = to_key(OUTPUT_POS_PREFIX, &mut "".to_string().into_bytes()); let key = to_key(OUTPUT_POS_PREFIX, "");
self.db.iter(&key) self.db.iter(&key)
} }
@ -308,8 +293,7 @@ impl<'a> Batch<'a> {
/// Get output_pos and block height from index. /// Get output_pos and block height from index.
pub fn get_output_pos_height(&self, commit: &Commitment) -> Result<Option<(u64, u64)>, Error> { pub fn get_output_pos_height(&self, commit: &Commitment) -> Result<Option<(u64, u64)>, Error> {
self.db self.db.get_ser(&to_key(OUTPUT_POS_PREFIX, commit))
.get_ser(&to_key(OUTPUT_POS_PREFIX, &mut commit.as_ref().to_vec()))
} }
/// Get the previous header. /// Get the previous header.
@ -319,41 +303,34 @@ impl<'a> Batch<'a> {
/// Get block header. /// Get block header.
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> { pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
option_to_not_found( option_to_not_found(self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, h)), || {
self.db format!("BLOCK HEADER: {}", h)
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())), })
|| format!("BLOCK HEADER: {}", h),
)
} }
/// Delete the block spent index. /// Delete the block spent index.
fn delete_spent_index(&self, bh: &Hash) -> Result<(), Error> { fn delete_spent_index(&self, bh: &Hash) -> Result<(), Error> {
// Clean up the legacy input bitmap as well. // Clean up the legacy input bitmap as well.
let _ = self let _ = self.db.delete(&to_key(BLOCK_INPUT_BITMAP_PREFIX, bh));
.db
.delete(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec()));
self.db self.db.delete(&to_key(BLOCK_SPENT_PREFIX, bh))
.delete(&to_key(BLOCK_SPENT_PREFIX, &mut bh.to_vec()))
} }
/// Save block_sums for the block. /// Save block_sums for the block.
pub fn save_block_sums(&self, h: &Hash, sums: BlockSums) -> Result<(), Error> { pub fn save_block_sums(&self, h: &Hash, sums: BlockSums) -> Result<(), Error> {
self.db self.db.put_ser(&to_key(BLOCK_SUMS_PREFIX, h)[..], &sums)
.put_ser(&to_key(BLOCK_SUMS_PREFIX, &mut h.to_vec())[..], &sums)
} }
/// Get block_sums for the block. /// Get block_sums for the block.
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> { pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
option_to_not_found( option_to_not_found(self.db.get_ser(&to_key(BLOCK_SUMS_PREFIX, h)), || {
self.db.get_ser(&to_key(BLOCK_SUMS_PREFIX, &mut h.to_vec())), format!("Block sums for block: {}", h)
|| format!("Block sums for block: {}", h), })
)
} }
/// Delete the block_sums for the block. /// Delete the block_sums for the block.
fn delete_block_sums(&self, bh: &Hash) -> Result<(), Error> { fn delete_block_sums(&self, bh: &Hash) -> Result<(), Error> {
self.db.delete(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec())) self.db.delete(&to_key(BLOCK_SUMS_PREFIX, bh))
} }
/// Get the block input bitmap based on our spent index. /// Get the block input bitmap based on our spent index.
@ -371,10 +348,7 @@ impl<'a> Batch<'a> {
} }
fn get_legacy_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> { fn get_legacy_input_bitmap(&self, bh: &Hash) -> Result<Bitmap, Error> {
if let Ok(Some(bytes)) = self if let Ok(Some(bytes)) = self.db.get(&to_key(BLOCK_INPUT_BITMAP_PREFIX, bh)) {
.db
.get(&to_key(BLOCK_INPUT_BITMAP_PREFIX, &mut bh.to_vec()))
{
Ok(Bitmap::deserialize(&bytes)) Ok(Bitmap::deserialize(&bytes))
} else { } else {
Err(Error::NotFoundErr("legacy block input bitmap".to_string())) Err(Error::NotFoundErr("legacy block input bitmap".to_string()))
@ -384,11 +358,9 @@ impl<'a> Batch<'a> {
/// Get the "spent index" from the db for the specified block. /// Get the "spent index" from the db for the specified block.
/// If we need to rewind a block then we use this to "unspend" the spent outputs. /// If we need to rewind a block then we use this to "unspend" the spent outputs.
pub fn get_spent_index(&self, bh: &Hash) -> Result<Vec<CommitPos>, Error> { pub fn get_spent_index(&self, bh: &Hash) -> Result<Vec<CommitPos>, Error> {
option_to_not_found( option_to_not_found(self.db.get_ser(&to_key(BLOCK_SPENT_PREFIX, bh)), || {
self.db format!("spent index: {}", bh)
.get_ser(&to_key(BLOCK_SPENT_PREFIX, &mut bh.to_vec())), })
|| format!("spent index: {}", bh),
)
} }
/// Commits this batch. If it's a child batch, it will be merged with the /// Commits this batch. If it's a child batch, it will be merged with the
@ -407,7 +379,7 @@ impl<'a> Batch<'a> {
/// An iterator to all block in db /// An iterator to all block in db
pub fn blocks_iter(&self) -> Result<SerIterator<Block>, Error> { pub fn blocks_iter(&self) -> Result<SerIterator<Block>, Error> {
let key = to_key(BLOCK_PREFIX, &mut "".to_string().into_bytes()); let key = to_key(BLOCK_PREFIX, "");
self.db.iter(&key) self.db.iter(&key)
} }
} }

View file

@ -154,7 +154,7 @@ impl PeerStore {
) -> Result<Vec<PeerData>, Error> { ) -> Result<Vec<PeerData>, Error> {
let mut peers = self let mut peers = self
.db .db
.iter::<PeerData>(&to_key(PEER_PREFIX, &mut "".to_string().into_bytes()))? .iter::<PeerData>(&to_key(PEER_PREFIX, ""))?
.map(|(_, v)| v) .map(|(_, v)| v)
.filter(|p| p.flags == state && p.capabilities.contains(cap)) .filter(|p| p.flags == state && p.capabilities.contains(cap))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -165,7 +165,7 @@ impl PeerStore {
/// List all known peers /// List all known peers
/// Used for /v1/peers/all api endpoint /// Used for /v1/peers/all api endpoint
pub fn all_peers(&self) -> Result<Vec<PeerData>, Error> { pub fn all_peers(&self) -> Result<Vec<PeerData>, Error> {
let key = to_key(PEER_PREFIX, &mut "".to_string().into_bytes()); let key = to_key(PEER_PREFIX, "");
Ok(self Ok(self
.db .db
.iter::<PeerData>(&key)? .iter::<PeerData>(&key)?
@ -221,5 +221,5 @@ impl PeerStore {
// Ignore the port unless ip is loopback address. // Ignore the port unless ip is loopback address.
fn peer_key(peer_addr: PeerAddr) -> Vec<u8> { fn peer_key(peer_addr: PeerAddr) -> Vec<u8> {
to_key(PEER_PREFIX, &mut peer_addr.as_key().into_bytes()) to_key(PEER_PREFIX, &peer_addr.as_key())
} }

View file

@ -44,20 +44,22 @@ use byteorder::{BigEndian, WriteBytesExt};
pub use crate::lmdb::*; pub use crate::lmdb::*;
/// Build a db key from a prefix and a byte vector identifier. /// Build a db key from a prefix and a byte vector identifier.
pub fn to_key(prefix: u8, k: &mut Vec<u8>) -> Vec<u8> { pub fn to_key<K: AsRef<[u8]>>(prefix: u8, k: K) -> Vec<u8> {
let k = k.as_ref();
let mut res = Vec::with_capacity(k.len() + 2); let mut res = Vec::with_capacity(k.len() + 2);
res.push(prefix); res.push(prefix);
res.push(SEP); res.push(SEP);
res.append(k); res.extend_from_slice(k);
res res
} }
/// Build a db key from a prefix and a byte vector identifier and numeric identifier /// Build a db key from a prefix and a byte vector identifier and numeric identifier
pub fn to_key_u64(prefix: u8, k: &mut Vec<u8>, val: u64) -> Vec<u8> { pub fn to_key_u64<K: AsRef<[u8]>>(prefix: u8, k: K, val: u64) -> Vec<u8> {
let k = k.as_ref();
let mut res = Vec::with_capacity(k.len() + 10); let mut res = Vec::with_capacity(k.len() + 10);
res.push(prefix); res.push(prefix);
res.push(SEP); res.push(SEP);
res.append(k); res.extend_from_slice(k);
res.write_u64::<BigEndian>(val).unwrap(); res.write_u64::<BigEndian>(val).unwrap();
res res
} }

View file

@ -75,9 +75,9 @@ fn lmdb_allocate() -> Result<(), store::Error> {
for i in 0..WRITE_CHUNK_SIZE * 2 { for i in 0..WRITE_CHUNK_SIZE * 2 {
println!("Allocating chunk: {}", i); println!("Allocating chunk: {}", i);
let chunk = PhatChunkStruct::new(); let chunk = PhatChunkStruct::new();
let mut key_val = format!("phat_chunk_set_1_{}", i).as_bytes().to_vec(); let key_val = format!("phat_chunk_set_1_{}", i);
let batch = store.batch()?; let batch = store.batch()?;
let key = store::to_key(b'P', &mut key_val); let key = store::to_key(b'P', &key_val);
batch.put_ser(&key, &chunk)?; batch.put_ser(&key, &chunk)?;
batch.commit()?; batch.commit()?;
} }
@ -91,9 +91,9 @@ fn lmdb_allocate() -> Result<(), store::Error> {
for i in 0..WRITE_CHUNK_SIZE * 2 { for i in 0..WRITE_CHUNK_SIZE * 2 {
println!("Allocating chunk: {}", i); println!("Allocating chunk: {}", i);
let chunk = PhatChunkStruct::new(); let chunk = PhatChunkStruct::new();
let mut key_val = format!("phat_chunk_set_2_{}", i).as_bytes().to_vec(); let key_val = format!("phat_chunk_set_2_{}", i);
let batch = store.batch()?; let batch = store.batch()?;
let key = store::to_key(b'P', &mut key_val); let key = store::to_key(b'P', &key_val);
batch.put_ser(&key, &chunk)?; batch.put_ser(&key, &chunk)?;
batch.commit()?; batch.commit()?;
} }