2018-06-22 11:08:06 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
//! Storage of core types using LMDB.
|
|
|
|
|
|
|
|
use std::fs;
|
|
|
|
use std::marker;
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
use lmdb_zero as lmdb;
|
|
|
|
use lmdb_zero::traits::CreateCursor;
|
2018-07-01 01:36:38 +03:00
|
|
|
use lmdb_zero::LmdbResultExt;
|
2018-06-22 11:08:06 +03:00
|
|
|
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::core::ser;
|
2019-02-27 12:47:46 +03:00
|
|
|
use crate::util::{RwLock, RwLockReadGuard};
|
|
|
|
|
|
|
|
/// number of bytes to grow the database by when needed
|
|
|
|
pub const ALLOC_CHUNK_SIZE: usize = 134_217_728; //128 MB
|
|
|
|
const RESIZE_PERCENT: f32 = 0.9;
|
|
|
|
/// Want to ensure that each resize gives us at least this %
|
|
|
|
/// of total space free
|
|
|
|
const RESIZE_MIN_TARGET_PERCENT: f32 = 0.65;
|
2018-06-22 11:08:06 +03:00
|
|
|
|
|
|
|
/// Main error type for this lmdb
|
2018-07-01 01:36:38 +03:00
|
|
|
#[derive(Clone, Eq, PartialEq, Debug, Fail)]
|
2018-06-22 11:08:06 +03:00
|
|
|
pub enum Error {
|
|
|
|
/// Couldn't find what we were looking for
|
2018-07-01 01:36:38 +03:00
|
|
|
#[fail(display = "DB Not Found Error: {}", _0)]
|
|
|
|
NotFoundErr(String),
|
2018-06-22 11:08:06 +03:00
|
|
|
/// Wraps an error originating from RocksDB (which unfortunately returns
|
|
|
|
/// string errors).
|
2018-07-01 01:36:38 +03:00
|
|
|
#[fail(display = "LMDB error")]
|
2018-06-22 11:08:06 +03:00
|
|
|
LmdbErr(lmdb::error::Error),
|
|
|
|
/// Wraps a serialization error for Writeable or Readable
|
2018-07-01 01:36:38 +03:00
|
|
|
#[fail(display = "Serialization Error")]
|
|
|
|
SerErr(String),
|
2018-06-22 11:08:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl From<lmdb::error::Error> for Error {
|
|
|
|
fn from(e: lmdb::error::Error) -> Error {
|
|
|
|
Error::LmdbErr(e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// unwraps the inner option by converting the none case to a not found error
|
2018-07-01 01:36:38 +03:00
|
|
|
pub fn option_to_not_found<T>(res: Result<Option<T>, Error>, field_name: &str) -> Result<T, Error> {
|
2018-06-22 11:08:06 +03:00
|
|
|
match res {
|
2018-07-01 01:36:38 +03:00
|
|
|
Ok(None) => Err(Error::NotFoundErr(field_name.to_owned())),
|
2018-06-22 11:08:06 +03:00
|
|
|
Ok(Some(o)) => Ok(o),
|
|
|
|
Err(e) => Err(e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// LMDB-backed store facilitating data access and serialization. All writes
|
|
|
|
/// are done through a Batch abstraction providing atomicity.
|
|
|
|
pub struct Store {
|
|
|
|
env: Arc<lmdb::Environment>,
|
2019-02-27 12:47:46 +03:00
|
|
|
db: RwLock<Option<Arc<lmdb::Database<'static>>>>,
|
|
|
|
name: String,
|
2018-06-22 11:08:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Store {
|
2019-02-27 12:47:46 +03:00
|
|
|
/// Create a new LMDB env under the provided directory.
|
|
|
|
/// By default creates an environment named "lmdb".
|
|
|
|
/// Be aware of transactional semantics in lmdb
|
|
|
|
/// (transactions are per environment, not per database).
|
2019-03-06 20:34:39 +03:00
|
|
|
pub fn new(
|
|
|
|
root_path: &str,
|
|
|
|
env_name: Option<&str>,
|
|
|
|
db_name: Option<&str>,
|
|
|
|
max_readers: Option<u32>,
|
|
|
|
) -> Result<Store, Error> {
|
|
|
|
let name = match env_name {
|
2019-02-27 12:47:46 +03:00
|
|
|
Some(n) => n.to_owned(),
|
|
|
|
None => "lmdb".to_owned(),
|
|
|
|
};
|
2019-03-06 20:34:39 +03:00
|
|
|
let db_name = match db_name {
|
|
|
|
Some(n) => n.to_owned(),
|
|
|
|
None => "lmdb".to_owned(),
|
|
|
|
};
|
|
|
|
let full_path = [root_path.to_owned(), name.clone()].join("/");
|
2019-02-27 12:47:46 +03:00
|
|
|
fs::create_dir_all(&full_path)
|
|
|
|
.expect("Unable to create directory 'db_root' to store chain_data");
|
|
|
|
|
|
|
|
let mut env_builder = lmdb::EnvBuilder::new().unwrap();
|
|
|
|
env_builder.set_maxdbs(8)?;
|
|
|
|
|
|
|
|
if let Some(max_readers) = max_readers {
|
|
|
|
env_builder.set_maxreaders(max_readers)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let env = unsafe { env_builder.open(&full_path, lmdb::open::NOTLS, 0o600)? };
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
"DB Mapsize for {} is {}",
|
|
|
|
full_path,
|
|
|
|
env.info().as_ref().unwrap().mapsize
|
|
|
|
);
|
|
|
|
let res = Store {
|
|
|
|
env: Arc::new(env),
|
|
|
|
db: RwLock::new(None),
|
2019-03-06 20:34:39 +03:00
|
|
|
name: db_name,
|
2019-02-27 12:47:46 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
{
|
|
|
|
let mut w = res.db.write();
|
|
|
|
*w = Some(Arc::new(lmdb::Database::open(
|
|
|
|
res.env.clone(),
|
|
|
|
Some(&res.name),
|
2018-06-22 11:08:06 +03:00
|
|
|
&lmdb::DatabaseOptions::new(lmdb::db::CREATE),
|
2019-02-27 12:47:46 +03:00
|
|
|
)?));
|
|
|
|
}
|
|
|
|
Ok(res)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Opens the database environment
|
|
|
|
pub fn open(&self) -> Result<(), Error> {
|
|
|
|
let mut w = self.db.write();
|
|
|
|
*w = Some(Arc::new(lmdb::Database::open(
|
|
|
|
self.env.clone(),
|
|
|
|
Some(&self.name),
|
|
|
|
&lmdb::DatabaseOptions::new(lmdb::db::CREATE),
|
|
|
|
)?));
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Determines whether the environment needs a resize based on a simple percentage threshold
|
|
|
|
pub fn needs_resize(&self) -> Result<bool, Error> {
|
|
|
|
let env_info = self.env.info()?;
|
|
|
|
let stat = self.env.stat()?;
|
|
|
|
|
|
|
|
let size_used = stat.psize as usize * env_info.last_pgno;
|
|
|
|
trace!("DB map size: {}", env_info.mapsize);
|
|
|
|
trace!("Space used: {}", size_used);
|
|
|
|
trace!("Space remaining: {}", env_info.mapsize - size_used);
|
|
|
|
let resize_percent = RESIZE_PERCENT;
|
|
|
|
trace!(
|
|
|
|
"Percent used: {:.*} Percent threshold: {:.*}",
|
|
|
|
4,
|
|
|
|
size_used as f64 / env_info.mapsize as f64,
|
|
|
|
4,
|
|
|
|
resize_percent
|
2018-06-22 11:08:06 +03:00
|
|
|
);
|
2019-02-27 12:47:46 +03:00
|
|
|
|
|
|
|
if size_used as f32 / env_info.mapsize as f32 > resize_percent
|
|
|
|
|| env_info.mapsize < ALLOC_CHUNK_SIZE
|
|
|
|
{
|
|
|
|
trace!("Resize threshold met (percent-based)");
|
|
|
|
Ok(true)
|
|
|
|
} else {
|
|
|
|
trace!("Resize threshold not met (percent-based)");
|
|
|
|
Ok(false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Increments the database size by as many ALLOC_CHUNK_SIZES
|
|
|
|
/// to give a minimum threshold of free space
|
|
|
|
pub fn do_resize(&self) -> Result<(), Error> {
|
|
|
|
let env_info = self.env.info()?;
|
|
|
|
let stat = self.env.stat()?;
|
|
|
|
let size_used = stat.psize as usize * env_info.last_pgno;
|
|
|
|
|
|
|
|
let new_mapsize = if env_info.mapsize < ALLOC_CHUNK_SIZE {
|
|
|
|
ALLOC_CHUNK_SIZE
|
|
|
|
} else {
|
|
|
|
let mut tot = env_info.mapsize;
|
|
|
|
while size_used as f32 / tot as f32 > RESIZE_MIN_TARGET_PERCENT {
|
|
|
|
tot += ALLOC_CHUNK_SIZE;
|
|
|
|
}
|
|
|
|
tot
|
|
|
|
};
|
|
|
|
|
|
|
|
// close
|
|
|
|
let mut w = self.db.write();
|
|
|
|
*w = None;
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
self.env.set_mapsize(new_mapsize)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
*w = Some(Arc::new(lmdb::Database::open(
|
|
|
|
self.env.clone(),
|
|
|
|
Some(&self.name),
|
|
|
|
&lmdb::DatabaseOptions::new(lmdb::db::CREATE),
|
|
|
|
)?));
|
|
|
|
|
|
|
|
info!(
|
|
|
|
"Resized database from {} to {}",
|
|
|
|
env_info.mapsize, new_mapsize
|
|
|
|
);
|
|
|
|
Ok(())
|
2018-06-22 11:08:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets a value from the db, provided its key
|
|
|
|
pub fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
2019-02-27 12:47:46 +03:00
|
|
|
let db = self.db.read();
|
2018-06-22 11:08:06 +03:00
|
|
|
let txn = lmdb::ReadTransaction::new(self.env.clone())?;
|
|
|
|
let access = txn.access();
|
2019-02-27 12:47:46 +03:00
|
|
|
let res = access.get(&db.as_ref().unwrap(), key);
|
2018-06-22 11:08:06 +03:00
|
|
|
res.map(|res: &[u8]| res.to_vec())
|
|
|
|
.to_opt()
|
|
|
|
.map_err(From::from)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets a `Readable` value from the db, provided its key. Encapsulates
|
|
|
|
/// serialization.
|
|
|
|
pub fn get_ser<T: ser::Readable>(&self, key: &[u8]) -> Result<Option<T>, Error> {
|
2019-02-27 12:47:46 +03:00
|
|
|
let db = self.db.read();
|
2018-06-22 11:08:06 +03:00
|
|
|
let txn = lmdb::ReadTransaction::new(self.env.clone())?;
|
|
|
|
let access = txn.access();
|
2019-02-27 12:47:46 +03:00
|
|
|
self.get_ser_access(key, &access, db)
|
2018-06-22 11:08:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
fn get_ser_access<T: ser::Readable>(
|
|
|
|
&self,
|
|
|
|
key: &[u8],
|
2018-12-08 02:59:40 +03:00
|
|
|
access: &lmdb::ConstAccessor<'_>,
|
2019-02-27 12:47:46 +03:00
|
|
|
db: RwLockReadGuard<'_, Option<Arc<lmdb::Database<'static>>>>,
|
2018-06-22 11:08:06 +03:00
|
|
|
) -> Result<Option<T>, Error> {
|
2019-02-27 12:47:46 +03:00
|
|
|
let res: lmdb::error::Result<&[u8]> = access.get(&db.as_ref().unwrap(), key);
|
2018-06-22 11:08:06 +03:00
|
|
|
match res.to_opt() {
|
2018-07-01 01:36:38 +03:00
|
|
|
Ok(Some(mut res)) => match ser::deserialize(&mut res) {
|
2018-06-22 11:08:06 +03:00
|
|
|
Ok(res) => Ok(Some(res)),
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(e) => Err(Error::SerErr(format!("{}", e))),
|
2018-06-22 11:08:06 +03:00
|
|
|
},
|
|
|
|
Ok(None) => Ok(None),
|
|
|
|
Err(e) => Err(From::from(e)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Whether the provided key exists
|
|
|
|
pub fn exists(&self, key: &[u8]) -> Result<bool, Error> {
|
2019-02-27 12:47:46 +03:00
|
|
|
let db = self.db.read();
|
2018-06-22 11:08:06 +03:00
|
|
|
let txn = lmdb::ReadTransaction::new(self.env.clone())?;
|
|
|
|
let access = txn.access();
|
2019-02-27 12:47:46 +03:00
|
|
|
let res: lmdb::error::Result<&lmdb::Ignore> = access.get(&db.as_ref().unwrap(), key);
|
2018-06-22 11:08:06 +03:00
|
|
|
res.to_opt().map(|r| r.is_some()).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Produces an iterator of `Readable` types moving forward from the
|
|
|
|
/// provided key.
|
|
|
|
pub fn iter<T: ser::Readable>(&self, from: &[u8]) -> Result<SerIterator<T>, Error> {
|
2019-02-27 12:47:46 +03:00
|
|
|
let db = self.db.read();
|
2018-11-01 14:34:47 +03:00
|
|
|
let tx = Arc::new(lmdb::ReadTransaction::new(self.env.clone())?);
|
2019-02-27 12:47:46 +03:00
|
|
|
let cursor = Arc::new(tx.cursor(db.as_ref().unwrap().clone()).unwrap());
|
2018-06-22 11:08:06 +03:00
|
|
|
Ok(SerIterator {
|
2018-11-01 14:34:47 +03:00
|
|
|
tx,
|
|
|
|
cursor,
|
2018-06-22 11:08:06 +03:00
|
|
|
seek: false,
|
|
|
|
prefix: from.to_vec(),
|
|
|
|
_marker: marker::PhantomData,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Builds a new batch to be used with this store.
|
2018-12-08 02:59:40 +03:00
|
|
|
pub fn batch(&self) -> Result<Batch<'_>, Error> {
|
2019-02-27 12:47:46 +03:00
|
|
|
// check if the db needs resizing before returning the batch
|
|
|
|
if self.needs_resize()? {
|
|
|
|
self.do_resize()?;
|
|
|
|
}
|
2018-06-22 11:08:06 +03:00
|
|
|
let txn = lmdb::WriteTransaction::new(self.env.clone())?;
|
|
|
|
Ok(Batch {
|
|
|
|
store: self,
|
|
|
|
tx: txn,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Batch to write multiple Writeables to db in an atomic manner.
|
|
|
|
pub struct Batch<'a> {
|
|
|
|
store: &'a Store,
|
|
|
|
tx: lmdb::WriteTransaction<'a>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> Batch<'a> {
|
|
|
|
/// Writes a single key/value pair to the db
|
2019-01-02 02:29:16 +03:00
|
|
|
pub fn put(&self, key: &[u8], value: &[u8]) -> Result<(), Error> {
|
2019-02-27 12:47:46 +03:00
|
|
|
let db = self.store.db.read();
|
2018-06-22 11:08:06 +03:00
|
|
|
self.tx
|
|
|
|
.access()
|
2019-02-27 12:47:46 +03:00
|
|
|
.put(&db.as_ref().unwrap(), key, value, lmdb::put::Flags::empty())?;
|
2018-06-22 11:08:06 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Writes a single key and its `Writeable` value to the db. Encapsulates
|
|
|
|
/// serialization.
|
|
|
|
pub fn put_ser<W: ser::Writeable>(&self, key: &[u8], value: &W) -> Result<(), Error> {
|
|
|
|
let ser_value = ser::ser_vec(value);
|
|
|
|
match ser_value {
|
2019-01-02 02:29:16 +03:00
|
|
|
Ok(data) => self.put(key, &data),
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(err) => Err(Error::SerErr(format!("{}", err))),
|
2018-06-22 11:08:06 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// gets a value from the db, provided its key
|
|
|
|
pub fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
|
|
|
self.store.get(key)
|
|
|
|
}
|
|
|
|
|
2018-09-27 11:35:25 +03:00
|
|
|
/// Whether the provided key exists
|
|
|
|
pub fn exists(&self, key: &[u8]) -> Result<bool, Error> {
|
|
|
|
self.store.exists(key)
|
|
|
|
}
|
|
|
|
|
2018-07-19 12:35:36 +03:00
|
|
|
/// Produces an iterator of `Readable` types moving forward from the
|
|
|
|
/// provided key.
|
|
|
|
pub fn iter<T: ser::Readable>(&self, from: &[u8]) -> Result<SerIterator<T>, Error> {
|
|
|
|
self.store.iter(from)
|
|
|
|
}
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
/// Gets a `Readable` value from the db, provided its key, taking the
|
|
|
|
/// content of the current batch into account.
|
|
|
|
pub fn get_ser<T: ser::Readable>(&self, key: &[u8]) -> Result<Option<T>, Error> {
|
|
|
|
let access = self.tx.access();
|
2019-02-27 12:47:46 +03:00
|
|
|
let db = self.store.db.read();
|
|
|
|
self.store.get_ser_access(key, &access, db)
|
2018-06-22 11:08:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Deletes a key/value pair from the db
|
|
|
|
pub fn delete(&self, key: &[u8]) -> Result<(), Error> {
|
2019-02-27 12:47:46 +03:00
|
|
|
let db = self.store.db.read();
|
|
|
|
self.tx.access().del_key(&db.as_ref().unwrap(), key)?;
|
2018-06-22 11:08:06 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Writes the batch to db
|
|
|
|
pub fn commit(self) -> Result<(), Error> {
|
|
|
|
self.tx.commit()?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a child of this batch. It will be merged with its parent on
|
|
|
|
/// commit, abandoned otherwise.
|
2018-12-08 02:59:40 +03:00
|
|
|
pub fn child(&mut self) -> Result<Batch<'_>, Error> {
|
2018-06-22 11:08:06 +03:00
|
|
|
Ok(Batch {
|
|
|
|
store: self.store,
|
|
|
|
tx: self.tx.child_tx()?,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// An iterator thad produces Readable instances back. Wraps the lower level
|
|
|
|
/// DBIterator and deserializes the returned values.
|
|
|
|
pub struct SerIterator<T>
|
|
|
|
where
|
|
|
|
T: ser::Readable,
|
|
|
|
{
|
|
|
|
tx: Arc<lmdb::ReadTransaction<'static>>,
|
|
|
|
cursor: Arc<lmdb::Cursor<'static, 'static>>,
|
|
|
|
seek: bool,
|
|
|
|
prefix: Vec<u8>,
|
|
|
|
_marker: marker::PhantomData<T>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> Iterator for SerIterator<T>
|
|
|
|
where
|
|
|
|
T: ser::Readable,
|
|
|
|
{
|
|
|
|
type Item = T;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<T> {
|
|
|
|
let access = self.tx.access();
|
|
|
|
let kv = if self.seek {
|
|
|
|
Arc::get_mut(&mut self.cursor).unwrap().next(&access)
|
|
|
|
} else {
|
|
|
|
self.seek = true;
|
|
|
|
Arc::get_mut(&mut self.cursor)
|
|
|
|
.unwrap()
|
|
|
|
.seek_range_k(&access, &self.prefix[..])
|
|
|
|
};
|
|
|
|
self.deser_if_prefix_match(kv)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T> SerIterator<T>
|
|
|
|
where
|
|
|
|
T: ser::Readable,
|
|
|
|
{
|
|
|
|
fn deser_if_prefix_match(&self, kv: Result<(&[u8], &[u8]), lmdb::Error>) -> Option<T> {
|
|
|
|
match kv {
|
|
|
|
Ok((k, v)) => {
|
|
|
|
let plen = self.prefix.len();
|
|
|
|
if plen == 0 || k[0..plen] == self.prefix[..] {
|
|
|
|
ser::deserialize(&mut &v[..]).ok()
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(_) => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|