mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
we have not used the rm_log for a while, getting rid of it (#1897)
This commit is contained in:
parent
a8bf2eb126
commit
3b5a39dd42
5 changed files with 4 additions and 262 deletions
|
@ -41,7 +41,6 @@ pub mod leaf_set;
|
|||
mod lmdb;
|
||||
pub mod pmmr;
|
||||
pub mod prune_list;
|
||||
pub mod rm_log;
|
||||
pub mod types;
|
||||
|
||||
const SEP: u8 = ':' as u8;
|
||||
|
|
|
@ -1,148 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! The deprecated rm_log impl. Still used for migration
|
||||
//! from rm_log -> leaf_set on startup and fast sync.
|
||||
|
||||
use std::fs::File;
|
||||
use std::io::{self, BufWriter, Write};
|
||||
|
||||
use core::ser;
|
||||
use types::read_ordered_vec;
|
||||
|
||||
/// Log file fully cached in memory containing all positions that should be
|
||||
/// eventually removed from the MMR append-only data file. Allows quick
|
||||
/// checking of whether a piece of data has been marked for deletion. When the
|
||||
/// log becomes too long, the MMR backend will actually remove chunks from the
|
||||
/// MMR data file and truncate the remove log.
|
||||
pub struct RemoveLog {
|
||||
path: String,
|
||||
/// Ordered vector of MMR positions that should get eventually removed.
|
||||
pub removed: Vec<(u64, u32)>,
|
||||
// Holds positions temporarily until flush is called.
|
||||
removed_tmp: Vec<(u64, u32)>,
|
||||
// Holds truncated removed temporarily until discarded or committed
|
||||
removed_bak: Vec<(u64, u32)>,
|
||||
}
|
||||
|
||||
impl RemoveLog {
|
||||
/// Open the remove log file.
|
||||
/// The content of the file will be read in memory for fast checking.
|
||||
pub fn open(path: String) -> io::Result<RemoveLog> {
|
||||
let removed = read_ordered_vec(path.clone(), 12)?;
|
||||
Ok(RemoveLog {
|
||||
path: path,
|
||||
removed: removed,
|
||||
removed_tmp: vec![],
|
||||
removed_bak: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
/// Rewinds the remove log back to the provided index.
|
||||
/// We keep everything in the rm_log from that index and earlier.
|
||||
/// In practice the index is a block height, so we rewind back to that block
|
||||
/// keeping everything in the rm_log up to and including that block.
|
||||
pub fn rewind(&mut self, idx: u32) -> io::Result<()> {
|
||||
// backing it up before truncating (unless we already have a backup)
|
||||
if self.removed_bak.is_empty() {
|
||||
self.removed_bak = self.removed.clone();
|
||||
}
|
||||
|
||||
if idx == 0 {
|
||||
self.removed = vec![];
|
||||
self.removed_tmp = vec![];
|
||||
} else {
|
||||
// retain rm_log entries up to and including those at the provided index
|
||||
self.removed.retain(|&(_, x)| x <= idx);
|
||||
self.removed_tmp.retain(|&(_, x)| x <= idx);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Append a set of new positions to the remove log. Both adds those
|
||||
/// positions the ordered in-memory set and to the file.
|
||||
pub fn append(&mut self, elmts: Vec<u64>, index: u32) -> io::Result<()> {
|
||||
for elmt in elmts {
|
||||
match self.removed_tmp.binary_search(&(elmt, index)) {
|
||||
Ok(_) => continue,
|
||||
Err(idx) => {
|
||||
self.removed_tmp.insert(idx, (elmt, index));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flush the positions to remove to file.
|
||||
pub fn flush(&mut self) -> io::Result<()> {
|
||||
for elmt in &self.removed_tmp {
|
||||
match self.removed.binary_search(&elmt) {
|
||||
Ok(_) => continue,
|
||||
Err(idx) => {
|
||||
self.removed.insert(idx, *elmt);
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut file = BufWriter::new(File::create(self.path.clone())?);
|
||||
for elmt in &self.removed {
|
||||
file.write_all(&ser::ser_vec(&elmt).unwrap()[..])?;
|
||||
}
|
||||
self.removed_tmp = vec![];
|
||||
self.removed_bak = vec![];
|
||||
file.flush()
|
||||
}
|
||||
|
||||
/// Discard pending changes
|
||||
pub fn discard(&mut self) {
|
||||
if self.removed_bak.len() > 0 {
|
||||
self.removed = self.removed_bak.clone();
|
||||
self.removed_bak = vec![];
|
||||
}
|
||||
self.removed_tmp = vec![];
|
||||
}
|
||||
|
||||
/// Whether the remove log currently includes the provided position.
|
||||
pub fn includes(&self, elmt: u64) -> bool {
|
||||
include_tuple(&self.removed, elmt) || include_tuple(&self.removed_tmp, elmt)
|
||||
}
|
||||
|
||||
/// Number of positions stored in the remove log.
|
||||
pub fn len(&self) -> usize {
|
||||
self.removed.len()
|
||||
}
|
||||
|
||||
/// Return vec of pos for removed elements before the provided cutoff index.
|
||||
/// Useful for when we prune and compact an MMR.
|
||||
pub fn removed_pre_cutoff(&self, cutoff_idx: u32) -> Vec<u64> {
|
||||
self.removed
|
||||
.iter()
|
||||
.filter_map(
|
||||
|&(pos, idx)| {
|
||||
if idx < cutoff_idx {
|
||||
Some(pos)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
).collect()
|
||||
}
|
||||
}
|
||||
|
||||
fn include_tuple(v: &Vec<(u64, u32)>, e: u64) -> bool {
|
||||
if let Err(pos) = v.binary_search(&(e, 0)) {
|
||||
if pos < v.len() && v[pos].0 == e {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
|
@ -276,7 +276,7 @@ fn pmmr_reload() {
|
|||
|
||||
// pos 4 is removed (via prune list)
|
||||
assert_eq!(backend.get_hash(4), None);
|
||||
// pos 5 is removed (via rm_log)
|
||||
// pos 5 is removed (via leaf_set)
|
||||
assert_eq!(backend.get_hash(5), None);
|
||||
|
||||
// now check contents of the hash file
|
||||
|
@ -628,7 +628,7 @@ fn pmmr_compact_horizon() {
|
|||
|
||||
// check we can read a hash by pos correctly from recreated backend
|
||||
// get_hash() and get_from_file() should return the same value
|
||||
// and we only store leaves in the rm_log so pos 7 still has a hash in there
|
||||
// and we only store leaves in the leaf_set so pos 7 still has a hash in there
|
||||
assert_eq!(backend.get_hash(7), Some(pos_7_hash));
|
||||
assert_eq!(backend.get_from_file(7), Some(pos_7_hash));
|
||||
|
||||
|
|
|
@ -1,109 +0,0 @@
|
|||
// Copyright 2018 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
extern crate chrono;
|
||||
extern crate croaring;
|
||||
extern crate env_logger;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_store as store;
|
||||
|
||||
use chrono::prelude::Utc;
|
||||
use std::fs;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use store::rm_log::RemoveLog;
|
||||
|
||||
pub fn as_millis(d: Duration) -> u128 {
|
||||
d.as_secs() as u128 * 1_000 as u128 + (d.subsec_nanos() / (1_000 * 1_000)) as u128
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rm_log_performance() {
|
||||
let (mut rm_log, data_dir) = setup("rm_log_perf");
|
||||
|
||||
println!("Timing some common operations:");
|
||||
|
||||
// Add a 1000 pos to the rm_log and sync to disk.
|
||||
let now = Instant::now();
|
||||
for x in 0..100 {
|
||||
for y in 0..1000 {
|
||||
let idx = x + 1;
|
||||
let pos = (x * 1000) + y + 1;
|
||||
rm_log.append(vec![pos], idx as u32).unwrap();
|
||||
}
|
||||
rm_log.flush().unwrap();
|
||||
}
|
||||
assert_eq!(rm_log.len(), 100_000);
|
||||
println!(
|
||||
"Adding 100 chunks of 1,000 pos to rm_log (syncing to disk) took {}ms",
|
||||
as_millis(now.elapsed())
|
||||
);
|
||||
|
||||
// Add another 900,000 pos to the UTXO set, (do not sync each block, too
|
||||
// expensive)... Simulates 1,000 blocks with 1,000 outputs each.
|
||||
let now = Instant::now();
|
||||
for x in 100..1_000 {
|
||||
for y in 0..1_000 {
|
||||
let pos = (x * 1_000) + y + 1;
|
||||
rm_log.append(vec![pos], (x + 1) as u32).unwrap();
|
||||
}
|
||||
// Do not flush to disk each time (this gets very expensive).
|
||||
// rm_log.flush().unwrap();
|
||||
}
|
||||
// assert_eq!(rm_log.len(), 1_000_000);
|
||||
println!(
|
||||
"Adding 990 chunks of 1,000 pos to rm_log (without syncing) took {}ms",
|
||||
as_millis(now.elapsed())
|
||||
);
|
||||
|
||||
// Simulate looking up existence of a large number of pos in the UTXO set.
|
||||
let now = Instant::now();
|
||||
for x in 0..1_000_000 {
|
||||
assert!(rm_log.includes(x + 1));
|
||||
}
|
||||
println!(
|
||||
"Checking 1,000,000 inclusions in rm_log took {}ms",
|
||||
as_millis(now.elapsed())
|
||||
);
|
||||
|
||||
// Rewind pos in chunks of 1,000 to simulate rewinding over the same blocks.
|
||||
let now = Instant::now();
|
||||
let mut x = 1_000;
|
||||
while x > 0 {
|
||||
rm_log.rewind(x - 1).unwrap();
|
||||
x = x - 1;
|
||||
}
|
||||
rm_log.flush().unwrap();
|
||||
assert_eq!(rm_log.len(), 0);
|
||||
println!(
|
||||
"Rewinding 1,000 chunks of 1,000 pos from rm_log took {}ms",
|
||||
as_millis(now.elapsed())
|
||||
);
|
||||
|
||||
// panic!("stop here to display results");
|
||||
|
||||
teardown(data_dir);
|
||||
}
|
||||
|
||||
fn setup(test_name: &str) -> (RemoveLog, String) {
|
||||
let _ = env_logger::init();
|
||||
let data_dir = format!("./target/{}-{}", test_name, Utc::now().timestamp());
|
||||
fs::create_dir_all(data_dir.clone()).unwrap();
|
||||
let rm_log = RemoveLog::open(format!("{}/{}", data_dir, "rm_log.bin")).unwrap();
|
||||
(rm_log, data_dir)
|
||||
}
|
||||
|
||||
fn teardown(data_dir: String) {
|
||||
fs::remove_dir_all(data_dir).unwrap();
|
||||
}
|
|
@ -20,7 +20,7 @@ use chrono::prelude::Utc;
|
|||
use croaring::Bitmap;
|
||||
use rand::Rng;
|
||||
|
||||
// We can use "andnot" to rewind the rm_log easily by passing in a "bitmask" of
|
||||
// We can use "andnot" to rewind easily by passing in a "bitmask" of
|
||||
// all the subsequent pos we want to rewind.
|
||||
#[test]
|
||||
fn test_andnot_bitmap() {
|
||||
|
@ -33,7 +33,7 @@ fn test_andnot_bitmap() {
|
|||
assert_eq!(res.to_vec(), vec![1, 4]);
|
||||
}
|
||||
|
||||
// Alternatively we can use "and" to rewind the rm_log easily by passing in a
|
||||
// Alternatively we can use "and" to rewind easily by passing in a
|
||||
// "bitmask" of all the pos we want to keep.
|
||||
#[test]
|
||||
fn test_and_bitmap() {
|
||||
|
|
Loading…
Reference in a new issue