Windows Compatibility Fixes #1 (#2535)

* initial changes for windows build and unit/integration tests

* rustfmt

* wallet+store tests

* rustfmt

* fix linux daemonize

* better encapsulate file rename

* rustfmt

* remove daemonize commands

* rustfmt

* remove server start/stop commands

* add ability to drop pmmr backend files explicitly for txhashset unzip

* rustfmt

* fix pmmr tests

* rustfmt
This commit is contained in:
Yeastplume 2019-02-09 21:14:27 +00:00 committed by GitHub
parent d8605a4d51
commit 15c7a270eb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 1415 additions and 1118 deletions

1143
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -25,9 +25,7 @@ chrono = "0.4.4"
clap = { version = "2.31", features = ["yaml"] }
rpassword = "2.0.0"
ctrlc = { version = "3.1", features = ["termination"] }
cursive = "0.9.0"
humansize = "1.1.0"
daemonize = "0.3"
serde = "1"
serde_json = "1"
log = "0.4"
@ -45,6 +43,11 @@ grin_servers = { path = "./servers", version = "1.0.1" }
grin_util = { path = "./util", version = "1.0.1" }
grin_wallet = { path = "./wallet", version = "1.0.1" }
[target.'cfg(windows)'.dependencies]
cursive = { version = "0.10.0", default-features = false, features = ["pancurses-backend"] }
[target.'cfg(unix)'.dependencies]
cursive = "0.9.0"
[build-dependencies]
built = "0.3"
reqwest = "0.9"

View file

@ -857,6 +857,14 @@ impl Chain {
}
let header = self.get_block_header(&h)?;
{
let mut txhashset_ref = self.txhashset.write();
// Drop file handles in underlying txhashset
txhashset_ref.release_backend_files();
}
// Rewrite hashset
txhashset::zip_write(self.db_root.clone(), txhashset_data, &header)?;
let mut txhashset =

View file

@ -18,7 +18,7 @@
use crate::core::core::committed::Committed;
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::pmmr::{self, ReadonlyPMMR, RewindablePMMR, PMMR};
use crate::core::core::pmmr::{self, Backend, ReadonlyPMMR, RewindablePMMR, PMMR};
use crate::core::core::{
Block, BlockHeader, Input, Output, OutputIdentifier, TxKernel, TxKernelEntry,
};
@ -153,6 +153,15 @@ impl TxHashSet {
})
}
/// Close all backend file handles
pub fn release_backend_files(&mut self) {
self.header_pmmr_h.backend.release_files();
self.sync_pmmr_h.backend.release_files();
self.output_pmmr_h.backend.release_files();
self.rproof_pmmr_h.backend.release_files();
self.kernel_pmmr_h.backend.release_files();
}
/// Check if an output is unspent.
/// We look in the index to find the output MMR pos.
/// Then we check the entry in the output MMR and confirm the hash matches.

View file

@ -12,7 +12,7 @@ edition = "2018"
[dependencies]
blake2-rfc = "0.2"
byteorder = "1"
croaring = "=0.3"
croaring = "=0.3.8"
enum_primitive = "0.1"
failure = "0.1"
failure_derive = "0.1"

View file

@ -62,6 +62,9 @@ pub trait Backend<T: PMMRable> {
/// fastest way to to be able to allow direct access to the file
fn get_data_file_path(&self) -> &Path;
/// Release underlying datafiles and locks
fn release_files(&mut self);
/// Also a bit of a hack...
/// Saves a snapshot of the rewound utxo file with the block hash as
/// filename suffix. We need this when sending a txhashset zip file to a

View file

@ -122,6 +122,8 @@ impl<T: PMMRable> Backend<T> for VecBackend<T> {
Path::new("")
}
fn release_files(&mut self) {}
fn dump_stats(&self) {}
}

View file

@ -13,6 +13,7 @@
// limitations under the License.
/// Grin server commands processing
#[cfg(not(target_os = "windows"))]
use std::env::current_dir;
use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
@ -22,7 +23,6 @@ use std::time::Duration;
use clap::ArgMatches;
use ctrlc;
use daemonize::Daemonize;
use crate::config::GlobalConfig;
use crate::core::global;
@ -31,7 +31,7 @@ use crate::servers;
use crate::tui::ui;
/// wrap below to allow UI to clean up on stop
fn start_server(config: servers::ServerConfig) {
pub fn start_server(config: servers::ServerConfig) {
start_server_tui(config);
// Just kill process for now, otherwise the process
// hangs around until sigint because the API server
@ -157,29 +157,11 @@ pub fn server_command(
});
}*/
// start the server in the different run modes (interactive or daemon)
if let Some(a) = server_args {
match a.subcommand() {
("run", _) => {
start_server(server_config);
}
("start", _) => {
let daemonize = Daemonize::new()
.pid_file("/tmp/grin.pid")
.chown_pid_file(true)
.working_directory(current_dir().unwrap())
.privileged_action(move || {
start_server(server_config.clone());
loop {
thread::sleep(Duration::from_secs(60));
}
});
match daemonize.start() {
Ok(_) => info!("Grin server successfully started."),
Err(e) => error!("Error starting: {}", e),
}
}
("stop", _) => println!("TODO. Just 'kill $pid' for now. Maybe /tmp/grin.pid is $pid"),
("", _) => {
println!("Subcommand required, use 'grin help server' for details");
}

View file

@ -44,10 +44,6 @@ subcommands:
subcommands:
- config:
about: Generate a configuration grin-server.toml file in the current directory
- start:
about: Start the Grin server as a daemon
- stop:
about: Stop the Grin server daemon
- run:
about: Run the Grin server in this console
- client:

View file

@ -11,7 +11,7 @@ edition = "2018"
[dependencies]
byteorder = "1"
croaring = "=0.3"
croaring = "=0.3.8"
env_logger = "0.5"
libc = "0.2"
failure = "0.1"

View file

@ -73,11 +73,19 @@ pub fn new_named_env(path: String, name: String, max_readers: Option<u32>) -> lm
env_builder.set_maxdbs(8).unwrap();
// half a TB should give us plenty room, will be an issue on 32 bits
// (which we don't support anyway)
env_builder
.set_mapsize(549_755_813_888)
.unwrap_or_else(|e| {
panic!("Unable to allocate LMDB space: {:?}", e);
});
#[cfg(not(target_os = "windows"))]
env_builder.set_mapsize(5_368_709_120).unwrap_or_else(|e| {
panic!("Unable to allocate LMDB space: {:?}", e);
});
//TODO: This is temporary to support (beta) windows support
//Windows allocates the entire file at once, so this needs to
//be changed to allocate as little as possible and increase as needed
#[cfg(target_os = "windows")]
env_builder.set_mapsize(524_288_000).unwrap_or_else(|e| {
panic!("Unable to allocate LMDB space: {:?}", e);
});
if let Some(max_readers) = max_readers {
env_builder
.set_maxreaders(max_readers)

View file

@ -152,6 +152,12 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
self.data_file.path()
}
/// Release underlying data files
fn release_files(&mut self) {
self.data_file.release();
self.hash_file.release();
}
fn snapshot(&self, header: &BlockHeader) -> Result<(), String> {
self.leaf_set
.snapshot(header)
@ -208,8 +214,8 @@ impl<T: PMMRable> PMMRBackend<T> {
Ok(PMMRBackend {
data_dir: data_dir.to_path_buf(),
prunable,
hash_file,
data_file,
hash_file: hash_file,
data_file: data_file,
leaf_set,
prune_list,
})
@ -334,20 +340,11 @@ impl<T: PMMRable> PMMRBackend<T> {
}
self.prune_list.flush()?;
}
// 4. Rename the compact copy of hash file and reopen it.
fs::rename(
tmp_prune_file_hash.clone(),
self.data_dir.join(PMMR_HASH_FILE),
)?;
self.hash_file = DataFile::open(self.data_dir.join(PMMR_HASH_FILE))?;
self.hash_file.replace(Path::new(&tmp_prune_file_hash))?;
// 5. Rename the compact copy of the data file and reopen it.
fs::rename(
tmp_prune_file_data.clone(),
self.data_dir.join(PMMR_DATA_FILE),
)?;
self.data_file = DataFile::open(self.data_dir.join(PMMR_DATA_FILE))?;
self.data_file.replace(Path::new(&tmp_prune_file_data))?;
// 6. Write the leaf_set to disk.
// Optimize the bitmap storage in the process.

View file

@ -101,6 +101,17 @@ where
self.file.path()
}
/// Replace underlying file with another, deleting original
pub fn replace(&mut self, with: &Path) -> io::Result<()> {
self.file.replace(with)?;
Ok(())
}
/// Drop underlying file handles
pub fn release(&mut self) {
self.file.release();
}
/// Write the file out to disk, pruning removed elements.
pub fn save_prune<F>(&self, target: &str, prune_offs: &[u64], prune_cb: F) -> io::Result<()>
where
@ -125,7 +136,7 @@ where
/// latter by truncating the underlying file and re-creating the mmap.
pub struct AppendOnlyFile {
path: PathBuf,
file: File,
file: Option<File>,
mmap: Option<memmap::Mmap>,
buffer_start: usize,
buffer: Vec<u8>,
@ -135,28 +146,36 @@ pub struct AppendOnlyFile {
impl AppendOnlyFile {
/// Open a file (existing or not) as append-only, backed by a mmap.
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<AppendOnlyFile> {
let file = OpenOptions::new()
.read(true)
.append(true)
.create(true)
.open(&path)?;
let mut aof = AppendOnlyFile {
file,
file: None,
path: path.as_ref().to_path_buf(),
mmap: None,
buffer_start: 0,
buffer: vec![],
buffer_start_bak: 0,
};
// If we have a non-empty file then mmap it.
let sz = aof.size();
if sz > 0 {
aof.buffer_start = sz as usize;
aof.mmap = Some(unsafe { memmap::Mmap::map(&aof.file)? });
}
aof.init()?;
Ok(aof)
}
/// (Re)init an underlying file and its associated memmap
pub fn init(&mut self) -> io::Result<()> {
self.file = Some(
OpenOptions::new()
.read(true)
.append(true)
.create(true)
.open(self.path.clone())?,
);
// If we have a non-empty file then mmap it.
let sz = self.size();
if sz > 0 {
self.buffer_start = sz as usize;
self.mmap = Some(unsafe { memmap::Mmap::map(&self.file.as_ref().unwrap())? });
}
Ok(())
}
/// Append data to the file. Until the append-only file is synced, data is
/// only written to memory.
pub fn append(&mut self, bytes: &mut [u8]) {
@ -193,21 +212,37 @@ impl AppendOnlyFile {
pub fn flush(&mut self) -> io::Result<()> {
if self.buffer_start_bak > 0 {
// Flushing a rewound state, we need to truncate via set_len() before applying.
self.file.set_len(self.buffer_start as u64)?;
// Drop and recreate, or windows throws an access error
self.mmap = None;
self.file = None;
{
let file = OpenOptions::new()
.read(true)
.create(true)
.write(true)
.open(&self.path)?;
file.set_len(self.buffer_start as u64)?;
}
let file = OpenOptions::new()
.read(true)
.create(true)
.append(true)
.open(&self.path)?;
self.file = Some(file);
self.buffer_start_bak = 0;
}
self.buffer_start += self.buffer.len();
self.file.write_all(&self.buffer[..])?;
self.file.sync_all()?;
self.file.as_mut().unwrap().write_all(&self.buffer[..])?;
self.file.as_mut().unwrap().sync_all()?;
self.buffer = vec![];
// Note: file must be non-empty to memory map it
if self.file.metadata()?.len() == 0 {
if self.file.as_ref().unwrap().metadata()?.len() == 0 {
self.mmap = None;
} else {
self.mmap = Some(unsafe { memmap::Mmap::map(&self.file)? });
self.mmap = Some(unsafe { memmap::Mmap::map(&self.file.as_ref().unwrap())? });
}
Ok(())
@ -313,6 +348,23 @@ impl AppendOnlyFile {
}
}
/// Replace the underlying file with another file
/// deleting the original
pub fn replace(&mut self, with: &Path) -> io::Result<()> {
self.mmap = None;
self.file = None;
fs::remove_file(&self.path)?;
fs::rename(with, &self.path)?;
self.init()?;
Ok(())
}
/// Release underlying file handles
pub fn release(&mut self) {
self.mmap = None;
self.file = None;
}
/// Current size of the file in bytes.
pub fn size(&self) -> u64 {
fs::metadata(&self.path).map(|md| md.len()).unwrap_or(0)

File diff suppressed because it is too large Load diff

View file

@ -80,7 +80,15 @@ where
fs::create_dir_all(&p)?;
}
}
let mut outfile = fs::File::create(&file_path)?;
//let mut outfile = fs::File::create(&file_path)?;
let res = fs::File::create(&file_path);
let mut outfile = match res {
Err(e) => {
error!("{:?}", e);
return Err(zip::result::ZipError::Io(e));
}
Ok(r) => r,
};
io::copy(&mut file, &mut outfile)?;
}

View file

@ -137,6 +137,10 @@ where
self.wallets.insert(addr.to_owned(), (tx, wallet));
}
pub fn stop(&mut self) {
self.running.store(false, Ordering::Relaxed);
}
/// Run the incoming message queue and respond more or less
/// synchronously
pub fn run(&mut self) -> Result<(), libwallet::Error> {

View file

@ -26,6 +26,7 @@ use grin_keychain as keychain;
use grin_util as util;
use grin_wallet as wallet;
use std::fs;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
@ -54,6 +55,7 @@ fn restore_wallet(base_dir: &str, wallet_dir: &str) -> Result<(), libwallet::Err
wallet_proxy.add_wallet(wallet_dir, client.get_send_instance(), wallet.clone());
// Set the wallet proxy listener running
let wp_running = wallet_proxy.running.clone();
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
@ -67,6 +69,9 @@ fn restore_wallet(base_dir: &str, wallet_dir: &str) -> Result<(), libwallet::Err
Ok(())
})?;
wp_running.store(false, Ordering::Relaxed);
//thread::sleep(Duration::from_millis(1000));
Ok(())
}
@ -108,6 +113,7 @@ fn compare_wallet_restore(
}
// Set the wallet proxy listener running
let wp_running = wallet_proxy.running.clone();
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
@ -164,6 +170,9 @@ fn compare_wallet_restore(
dest_accts.as_ref().unwrap().len()
);
wp_running.store(false, Ordering::Relaxed);
//thread::sleep(Duration::from_millis(1000));
Ok(())
}
@ -208,6 +217,7 @@ fn setup_restore(test_dir: &str) -> Result<(), libwallet::Error> {
wallet_proxy.add_wallet("wallet3", client3.get_send_instance(), wallet3.clone());
// Set the wallet proxy listener running
let wp_running = wallet_proxy.running.clone();
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
@ -327,17 +337,19 @@ fn setup_restore(test_dir: &str) -> Result<(), libwallet::Error> {
Ok(())
})?;
wp_running.store(false, Ordering::Relaxed);
Ok(())
}
fn perform_restore(test_dir: &str) -> Result<(), libwallet::Error> {
restore_wallet(test_dir, "wallet1")?;
restore_wallet(&format!("{}_r1", test_dir), "wallet1")?;
compare_wallet_restore(
test_dir,
"wallet1",
&ExtKeychain::derive_key_id(2, 0, 0, 0, 0),
)?;
restore_wallet(test_dir, "wallet2")?;
restore_wallet(&format!("{}_r2", test_dir), "wallet2")?;
compare_wallet_restore(
test_dir,
"wallet2",
@ -353,7 +365,7 @@ fn perform_restore(test_dir: &str) -> Result<(), libwallet::Error> {
"wallet2",
&ExtKeychain::derive_key_id(2, 2, 0, 0, 0),
)?;
restore_wallet(test_dir, "wallet3")?;
restore_wallet(&format!("{}_r3", test_dir), "wallet3")?;
compare_wallet_restore(
test_dir,
"wallet3",