Cuckoo-miner update - Multiple Plugin mining (#139)

* Adding ability to load multiple mining plugins in parallel via cuckoo miner
* updating with newest cuckoo-miner changes
* revert default config value
* update pow document with link to podcast
This commit is contained in:
Yeastplume 2017-09-26 18:58:56 +01:00 committed by Ignotus Peverell
parent 7d48e1da49
commit 53d9ca630c
14 changed files with 1028 additions and 789 deletions

View file

@ -46,7 +46,7 @@ impl ApiEndpoint for ChainApi {
fn get(&self, _: String) -> ApiResult<Tip> { fn get(&self, _: String) -> ApiResult<Tip> {
match self.chain.head() { match self.chain.head() {
Ok(tip) => Ok(Tip::from_tip(tip)), Ok(tip) => Ok(Tip::from_tip(tip)),
Err(e) => Err(Error::Internal(format!("{:?}", e))) Err(e) => Err(Error::Internal(format!("{:?}", e))),
} }
} }
} }
@ -70,12 +70,14 @@ impl ApiEndpoint for OutputApi {
fn get(&self, id: String) -> ApiResult<Output> { fn get(&self, id: String) -> ApiResult<Output> {
debug!("GET output {}", id); debug!("GET output {}", id);
let c = util::from_hex(id.clone()).map_err(|_| Error::Argument(format!("Not a valid commitment: {}", id)))?; let c = util::from_hex(id.clone()).map_err(|_| {
Error::Argument(format!("Not a valid commitment: {}", id))
})?;
let commit = Commitment::from_vec(c); let commit = Commitment::from_vec(c);
let out = self.chain.get_unspent(&commit) let out = self.chain.get_unspent(&commit).map_err(|_| Error::NotFound)?;
.map_err(|_| Error::NotFound)?; let header = self.chain
let header = self.chain.get_block_header_by_output_commit(&commit) .get_block_header_by_output_commit(&commit)
.map_err(|_| Error::NotFound)?; .map_err(|_| Error::NotFound)?;
Ok(Output::from_output(&out, &header)) Ok(Output::from_output(&out, &header))
@ -90,7 +92,8 @@ pub struct PoolApi<T> {
} }
impl<T> ApiEndpoint for PoolApi<T> impl<T> ApiEndpoint for PoolApi<T>
where T: pool::BlockChain + Clone + Send + Sync + 'static where
T: pool::BlockChain + Clone + Send + Sync + 'static,
{ {
type ID = String; type ID = String;
type T = PoolInfo; type T = PoolInfo;
@ -116,7 +119,9 @@ impl<T> ApiEndpoint for PoolApi<T>
})?; })?;
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| { let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
Error::Argument("Could not deserialize transaction, invalid format.".to_string()) Error::Argument(
"Could not deserialize transaction, invalid format.".to_string(),
)
})?; })?;
let source = pool::TxSource { let source = pool::TxSource {
@ -148,20 +153,21 @@ pub struct TxWrapper {
/// Start all server REST APIs. Just register all of them on a ApiServer /// Start all server REST APIs. Just register all of them on a ApiServer
/// instance and runs the corresponding HTTP server. /// instance and runs the corresponding HTTP server.
pub fn start_rest_apis<T>(addr: String, pub fn start_rest_apis<T>(
chain: Arc<chain::Chain>, addr: String,
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>) chain: Arc<chain::Chain>,
where T: pool::BlockChain + Clone + Send + Sync + 'static tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
) where
T: pool::BlockChain + Clone + Send + Sync + 'static,
{ {
thread::spawn(move || { thread::spawn(move || {
let mut apis = ApiServer::new("/v1".to_string()); let mut apis = ApiServer::new("/v1".to_string());
apis.register_endpoint("/chain".to_string(), apis.register_endpoint("/chain".to_string(), ChainApi { chain: chain.clone() });
ChainApi { chain: chain.clone() }); apis.register_endpoint(
apis.register_endpoint("/chain/utxo".to_string(), "/chain/utxo".to_string(),
OutputApi { OutputApi { chain: chain.clone() },
chain: chain.clone(), );
});
apis.register_endpoint("/pool".to_string(), PoolApi { tx_pool: tx_pool }); apis.register_endpoint("/pool".to_string(), PoolApi { tx_pool: tx_pool });
apis.start(&addr[..]).unwrap_or_else(|e| { apis.start(&addr[..]).unwrap_or_else(|e| {

View file

@ -22,9 +22,7 @@ use std::fs::File;
use toml; use toml;
use grin::ServerConfig; use grin::ServerConfig;
use pow::types::MinerConfig; use pow::types::MinerConfig;
use types::{ConfigMembers, use types::{ConfigMembers, GlobalConfig, ConfigError};
GlobalConfig,
ConfigError};
/// The default file name to use when trying to derive /// The default file name to use when trying to derive
/// the config file location /// the config file location
@ -36,148 +34,159 @@ const GRIN_HOME: &'static str = ".grin";
impl Default for ConfigMembers { impl Default for ConfigMembers {
fn default() -> ConfigMembers { fn default() -> ConfigMembers {
ConfigMembers { ConfigMembers {
server: ServerConfig::default(), server: ServerConfig::default(),
mining: Some(MinerConfig::default()), mining: Some(MinerConfig::default()),
//wallet: Some(WalletConfig::default()), }
} }
}
} }
impl Default for GlobalConfig { impl Default for GlobalConfig {
fn default() -> GlobalConfig{ fn default() -> GlobalConfig {
GlobalConfig { GlobalConfig {
config_file_path: None, config_file_path: None,
using_config_file: false, using_config_file: false,
members: Some(ConfigMembers::default()) members: Some(ConfigMembers::default()),
} }
} }
} }
impl GlobalConfig { impl GlobalConfig {
/// Need to decide on rules where to read the config file from,
/// but will take a stab at logic for now
/// Need to decide on rules where to read the config file from, fn derive_config_location(&mut self) -> Result<(), ConfigError> {
/// but will take a stab at logic for now // First, check working directory
let mut config_path = env::current_dir().unwrap();
fn derive_config_location(&mut self) -> Result<(), ConfigError> { config_path.push(CONFIG_FILE_NAME);
//First, check working directory if config_path.exists() {
let mut config_path = env::current_dir().unwrap(); self.config_file_path = Some(config_path);
config_path.push(CONFIG_FILE_NAME); return Ok(());
if config_path.exists() { }
self.config_file_path = Some(config_path); // Next, look in directory of executable
return Ok(()) let mut config_path = env::current_exe().unwrap();
}
//Next, look in directory of executable
let mut config_path=env::current_exe().unwrap();
config_path.pop(); config_path.pop();
config_path.push(CONFIG_FILE_NAME); config_path.push(CONFIG_FILE_NAME);
if config_path.exists() { if config_path.exists() {
self.config_file_path = Some(config_path); self.config_file_path = Some(config_path);
return Ok(()) return Ok(());
} }
//Then look in {user_home}/.grin // Then look in {user_home}/.grin
let config_path = env::home_dir(); let config_path = env::home_dir();
if let Some(mut p) = config_path { if let Some(mut p) = config_path {
p.push(GRIN_HOME); p.push(GRIN_HOME);
p.push(CONFIG_FILE_NAME); p.push(CONFIG_FILE_NAME);
if p.exists() { if p.exists() {
self.config_file_path = Some(p); self.config_file_path = Some(p);
return Ok(()) return Ok(());
} }
} }
// Give up // Give up
Err(ConfigError::FileNotFoundError(String::from(""))) Err(ConfigError::FileNotFoundError(String::from("")))
} }
/// Takes the path to a config file, or if NONE, tries /// Takes the path to a config file, or if NONE, tries
/// to determine a config file based on rules in /// to determine a config file based on rules in
/// derive_config_location /// derive_config_location
pub fn new(file_path:Option<&str>) -> Result<GlobalConfig, ConfigError> { pub fn new(file_path: Option<&str>) -> Result<GlobalConfig, ConfigError> {
let mut return_value = GlobalConfig::default(); let mut return_value = GlobalConfig::default();
if let Some(fp) = file_path { if let Some(fp) = file_path {
return_value.config_file_path = Some(PathBuf::from(&fp)); return_value.config_file_path = Some(PathBuf::from(&fp));
} else { } else {
return_value.derive_config_location().unwrap(); return_value.derive_config_location().unwrap();
} }
//No attempt at a config file, just return defaults // No attempt at a config file, just return defaults
if let None = return_value.config_file_path { if let None = return_value.config_file_path {
return Ok(return_value); return Ok(return_value);
} }
//Config file path is given but not valid // Config file path is given but not valid
if !return_value.config_file_path.as_mut().unwrap().exists() { if !return_value.config_file_path.as_mut().unwrap().exists() {
return Err( return Err(ConfigError::FileNotFoundError(String::from(
ConfigError::FileNotFoundError(String::from(return_value.config_file_path.as_mut() return_value
.unwrap().to_str().unwrap().clone())) .config_file_path
); .as_mut()
} .unwrap()
.to_str()
.unwrap()
.clone(),
)));
}
//Try to parse the config file if it exists // Try to parse the config file if it exists
//explode if it does exist but something's wrong // explode if it does exist but something's wrong
//with it // with it
return_value.read_config() return_value.read_config()
} }
/// Read config /// Read config
pub fn read_config(mut self) -> Result<GlobalConfig, ConfigError> { pub fn read_config(mut self) -> Result<GlobalConfig, ConfigError> {
let mut file = File::open(self.config_file_path.as_mut().unwrap())?; let mut file = File::open(self.config_file_path.as_mut().unwrap())?;
let mut contents = String::new(); let mut contents = String::new();
file.read_to_string(&mut contents)?; file.read_to_string(&mut contents)?;
let decoded:Result<ConfigMembers, toml::de::Error> = toml::from_str(&contents); let decoded: Result<ConfigMembers, toml::de::Error> = toml::from_str(&contents);
match decoded { match decoded {
Ok(mut gc) => { Ok(mut gc) => {
//Put the struct back together, because the config // Put the struct back together, because the config
//file was flattened a bit // file was flattened a bit
gc.server.mining_config = gc.mining.clone(); gc.server.mining_config = gc.mining.clone();
self.using_config_file = true; self.using_config_file = true;
self.members = Some(gc); self.members = Some(gc);
return Ok(self) return Ok(self);
}, }
Err (e) => { Err(e) => {
return Err( return Err(ConfigError::ParseError(
ConfigError::ParseError(String::from(self.config_file_path.as_mut() String::from(
.unwrap().to_str().unwrap().clone()), self.config_file_path
String::from(format!("{}", e)) .as_mut()
) .unwrap()
); .to_str()
} .unwrap()
} .clone(),
} ),
String::from(format!("{}", e)),
));
}
}
}
/// Serialize config /// Serialize config
pub fn ser_config(&mut self) -> Result<String, ConfigError> { pub fn ser_config(&mut self) -> Result<String, ConfigError> {
let encoded:Result<String, toml::ser::Error> = toml::to_string(self.members.as_mut().unwrap()); let encoded: Result<String, toml::ser::Error> =
match encoded { toml::to_string(self.members.as_mut().unwrap());
Ok(enc) => { match encoded {
return Ok(enc) Ok(enc) => return Ok(enc),
}, Err(e) => {
Err (e) => { return Err(ConfigError::SerializationError(
return Err( String::from(format!("{}", e)),
ConfigError::SerializationError( ));
String::from(format!("{}", e)) }
) }
); }
}
}
}
/*pub fn wallet_enabled(&mut self) -> bool { /*pub fn wallet_enabled(&mut self) -> bool {
return self.members.as_mut().unwrap().wallet.as_mut().unwrap().enable_wallet; return self.members.as_mut().unwrap().wallet.as_mut().unwrap().enable_wallet;
}*/ }*/
/// Enable mining /// Enable mining
pub fn mining_enabled(&mut self) -> bool { pub fn mining_enabled(&mut self) -> bool {
return self.members.as_mut().unwrap().mining.as_mut().unwrap().enable_mining; return self.members
} .as_mut()
.unwrap()
.mining
.as_mut()
.unwrap()
.enable_mining;
}
} }
#[test] #[test]
fn test_read_config() { fn test_read_config() {
let toml_str = r#" let toml_str = r#"
#Section is optional, if not here or enable_server is false, will only run wallet #Section is optional, if not here or enable_server is false, will only run wallet
[server] [server]
enable_server = true enable_server = true
@ -202,9 +211,9 @@ fn test_read_config() {
"#; "#;
let mut decoded: GlobalConfig = toml::from_str(toml_str).unwrap(); let mut decoded: GlobalConfig = toml::from_str(toml_str).unwrap();
decoded.server.as_mut().unwrap().mining_config = decoded.mining; decoded.server.as_mut().unwrap().mining_config = decoded.mining;
println!("Decoded.server: {:?}", decoded.server); println!("Decoded.server: {:?}", decoded.server);
println!("Decoded wallet: {:?}", decoded.wallet); println!("Decoded wallet: {:?}", decoded.wallet);
panic!("panic"); panic!("panic");
} }

View file

@ -34,4 +34,4 @@ extern crate grin_pow as pow;
pub mod config; pub mod config;
pub mod types; pub mod types;
pub use types::{GlobalConfig, ConfigMembers, ConfigError}; pub use types::{GlobalConfig, ConfigMembers, ConfigError};

View file

@ -24,45 +24,50 @@ use pow::types::MinerConfig;
/// Error type wrapping config errors. /// Error type wrapping config errors.
#[derive(Debug)] #[derive(Debug)]
pub enum ConfigError { pub enum ConfigError {
/// Error with parsing of config file /// Error with parsing of config file
ParseError (String, String), ParseError(String, String),
/// Error with fileIO while reading config file /// Error with fileIO while reading config file
FileIOError (String, String), FileIOError(String, String),
/// No file found /// No file found
FileNotFoundError (String), FileNotFoundError(String),
/// Error serializing config values /// Error serializing config values
SerializationError (String), SerializationError(String),
} }
impl fmt::Display for ConfigError { impl fmt::Display for ConfigError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
ConfigError::ParseError(ref file_name, ref message) => { ConfigError::ParseError(ref file_name, ref message) => {
write!(f, "Error parsing configuration file at {} - {}",file_name, message) write!(
} f,
ConfigError::FileIOError(ref file_name, ref message) => { "Error parsing configuration file at {} - {}",
write!(f, "{} {}", message, file_name) file_name,
} message
ConfigError::FileNotFoundError(ref file_name) => { )
write!(f, "Configuration file not found: {}", file_name) }
} ConfigError::FileIOError(ref file_name, ref message) => {
ConfigError::SerializationError(ref message) => { write!(f, "{} {}", message, file_name)
write!(f, "Error serializing configuration: {}", message) }
} ConfigError::FileNotFoundError(ref file_name) => {
} write!(f, "Configuration file not found: {}", file_name)
} }
ConfigError::SerializationError(ref message) => {
write!(f, "Error serializing configuration: {}", message)
}
}
}
} }
impl From<io::Error> for ConfigError { impl From<io::Error> for ConfigError {
fn from(error: io::Error) -> ConfigError { fn from(error: io::Error) -> ConfigError {
ConfigError::FileIOError( ConfigError::FileIOError(
String::from(""), String::from(""),
String::from(format!("Error loading config file: {}",error)), String::from(format!("Error loading config file: {}", error)),
) )
} }
} }
/// Going to hold all of the various configuration types /// Going to hold all of the various configuration types
@ -72,30 +77,28 @@ impl From<io::Error> for ConfigError {
/// as they tend to be quite nested in the code /// as they tend to be quite nested in the code
/// Most structs optional, as they may or may not /// Most structs optional, as they may or may not
/// be needed depending on what's being run /// be needed depending on what's being run
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct GlobalConfig { pub struct GlobalConfig {
///Keep track of the file we've read /// Keep track of the file we've read
pub config_file_path: Option<PathBuf>, pub config_file_path: Option<PathBuf>,
/// keep track of whether we're using /// keep track of whether we're using
/// a config file or just the defaults /// a config file or just the defaults
/// for each member /// for each member
pub using_config_file: bool, pub using_config_file: bool,
/// Global member config /// Global member config
pub members: Option<ConfigMembers>, pub members: Option<ConfigMembers>,
} }
/// Keeping an 'inner' structure here, as the top /// Keeping an 'inner' structure here, as the top
/// level GlobalConfigContainer options might want to keep /// level GlobalConfigContainer options might want to keep
/// internal state that we don't necessarily /// internal state that we don't necessarily
/// want serialised or deserialised /// want serialised or deserialised
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct ConfigMembers { pub struct ConfigMembers {
/// Server config /// Server config
pub server: ServerConfig, pub server: ServerConfig,
/// Mining config /// Mining config
pub mining: Option<MinerConfig>, pub mining: Option<MinerConfig>,
//removing wallet from here for now, //removing wallet from here for now,
//as its concerns are separate from the server's, really //as its concerns are separate from the server's, really
//given it needs to manage keys. It should probably //given it needs to manage keys. It should probably

View file

@ -56,7 +56,7 @@ from the build directory will run grin using the defaults in the grin.toml file,
For the time being, it's recommended just to put the built version of grin on your path, e.g. via: For the time being, it's recommended just to put the built version of grin on your path, e.g. via:
``` ```
export $PATH /path/to/grin/dir/target/grin:$PATH export $PATH=/path/to/grin/dir/target/grin:$PATH
``` ```
# Configuration # Configuration

View file

@ -1,8 +1,6 @@
Grin's Proof-of-Work Grin's Proof-of-Work
==================== ====================
[WIP and subject to review, may still contain errors]
This document is meant to outline, at a level suitable for someone without prior knowledge, This document is meant to outline, at a level suitable for someone without prior knowledge,
the algorithms and processes currently involved in Grin's Proof-of-Work system. We'll start the algorithms and processes currently involved in Grin's Proof-of-Work system. We'll start
with a general overview of cycles in a graph and the Cuckoo Cycle algorithm which forms the with a general overview of cycles in a graph and the Cuckoo Cycle algorithm which forms the
@ -16,8 +14,8 @@ Please note that Grin is currently under active development, and any and all of
Grin's basic Proof-of-Work algorithm is called Cuckoo Cycle, which is specifically designed Grin's basic Proof-of-Work algorithm is called Cuckoo Cycle, which is specifically designed
to be resistant to Bitcoin style hardware arms-races. It is primarily a memory bound algorithm, to be resistant to Bitcoin style hardware arms-races. It is primarily a memory bound algorithm,
which, (at least in theory,) means that solution time is limited to the speed of a system's RAM which, (at least in theory,) means that solution time is bound by memory bandwidth
rather than processor or GPU speed. As such, mining Cuckoo Cycle solutions should be viable on rather than raw processor or GPU speed. As such, mining Cuckoo Cycle solutions should be viable on
most commodity hardware, and require far less energy than most other GPU, CPU or ASIC-bound most commodity hardware, and require far less energy than most other GPU, CPU or ASIC-bound
proof of work algorithms. proof of work algorithms.
@ -26,11 +24,16 @@ can be found in [his github repository](https://github.com/tromp/cuckoo). The
[white paper](https://github.com/tromp/cuckoo/blob/master/doc/cuckoo.pdf) is the best source of [white paper](https://github.com/tromp/cuckoo/blob/master/doc/cuckoo.pdf) is the best source of
further technical details. further technical details.
There is also a [podcast with Mike from Monero Monitor](https://moneromonitor.com/episodes/2017-09-26-Episode-014.html)
in which John Tromp talks at length about Cuckoo Cycle; recommended listening for anyone wanting
more background on Cuckoo Cycle, including more technical detail, the history of the algorihm's development
and some of the motivations behind it.
## Cycles in a Graph ## Cycles in a Graph
Cuckoo Cycle is an algorithm meant to detect cycles in a bipartite graph of N nodes Cuckoo Cycle is an algorithm meant to detect cycles in a bipartite graph of N nodes
and M edges. In plainer terms, a bipartite graph is one in which edges (i.e. lines connecting nodes) and M edges. In plainer terms, a bipartite graph is one in which edges (i.e. lines connecting nodes)
go only between 2 separate groups of nodes. In the case of the Cuckoo hashtable in Cuckoo Cycle, travel only between 2 separate groups of nodes. In the case of the Cuckoo hashtable in Cuckoo Cycle,
one side of the graph is an array numbered with odd indices (up to the size of the graph), and the other is numbered with even one side of the graph is an array numbered with odd indices (up to the size of the graph), and the other is numbered with even
indices. A node is simply a numbered 'space' on either side of the Cuckoo Table, and an Edge is a indices. A node is simply a numbered 'space' on either side of the Cuckoo Table, and an Edge is a
line connecting two nodes on opposite sides. The simple graph below denotes just such a graph, line connecting two nodes on opposite sides. The simple graph below denotes just such a graph,

View file

@ -34,7 +34,7 @@ seeding_type = "None"
#UserTesting - For regular user testing, much lighter than production more #UserTesting - For regular user testing, much lighter than production more
#Production - Full production cuckoo parameters #Production - Full production cuckoo parameters
mining_parameter_mode = "UserTesting" mining_parameter_mode = "Production"
#7 = Bit flags for FULL_NODE, this structure needs to be changed #7 = Bit flags for FULL_NODE, this structure needs to be changed
#internally to make it more configurable #internally to make it more configurable
@ -74,27 +74,10 @@ cuckoo_miner_async_mode = false
#cuckoo_miner_plugin_dir = "target/debug/plugins" #cuckoo_miner_plugin_dir = "target/debug/plugins"
#if using cuckoo_miner, the implementation to use.. currently
#just filters for this word in the filenames in the plugin
#directory
#Plugins currently included are:
#"simple" : the basic cuckoo algorithm
#"edgetrim" : an algorithm trading speed for a much lower memory footprint
#"matrix" : fastest available CPU miner, with largest memory footprint
#"tomato" : Time memory-tradeoff... low memory but very slow
#Not included but verified working:
#"cuda" a gpu miner - which currently needs to bebuilt and installed
#separately from#the cuckoo-miner repository. Instructions found there
cuckoo_miner_plugin_type = "simple"
#the list of parameters if you're using "edgetrim or matrix"
#cuckoo_miner_parameter_list = {NUM_THREADS=4, NUM_TRIMS=7}
#The amount of time, in seconds, to attempt to mine on a particular #The amount of time, in seconds, to attempt to mine on a particular
#header before stopping and re-collecting transactions from the pool #header before stopping and re-collecting transactions from the pool
attempt_time_per_block = 30 attempt_time_per_block = 90
#the wallet reciever to which coinbase rewards will be sent #the wallet reciever to which coinbase rewards will be sent
@ -106,3 +89,40 @@ burn_reward = true
#testing value, optional #testing value, optional
#slow_down_in_millis = 30 #slow_down_in_millis = 30
#########################################
### CUCKOO MINER PLUGIN CONFIGURATION ###
#########################################
# These entries configure instances of cuckoo miner
# plugins if the 'use_cuckoo_miner' value above is
# set to 'true'.
#
# Multiple plugins can be specified, (e.g. a cpu
# miner and a gpu miner running in parallel). However,
# if 'use_async_mode' above is set to 'false', only
# the first plugin specified will be used for mining
# in single-threaded mode
# You'll likely get the best performance using a
# single GPU and single CPU plugin in parallel
#The fastest cpu algorithm, but consumes the most memory
[[mining.cuckoo_miner_plugin_config]]
type_filter = "mean_cpu"
parameter_list = {NUM_THREADS=4, NUM_TRIMS=64}
#note lean_cpu currently has a bug which prevents it from
#working with threads > 1
#[[mining.cuckoo_miner_plugin_config]]
#type_filter = "lean_cpu"
#parameter_list = {NUM_THREADS=1, NUM_TRIMS=7}
#CUDA verion of lean miner
#Can currently be used only in Production (30) Mode
#[[mining.cuckoo_miner_plugin_config]]
#type_filter = "lean_cuda"
#parameter_list = {}

View file

@ -19,10 +19,10 @@ use rand::{self, Rng};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread; use std::thread;
use std; use std;
use std::{str}; use std::str;
use time; use time;
use adapters::{PoolToChainAdapter}; use adapters::PoolToChainAdapter;
use api; use api;
use core::consensus; use core::consensus;
use core::core; use core::core;
@ -34,9 +34,9 @@ use core::core::hash::{Hash, Hashed};
use pow::MiningWorker; use pow::MiningWorker;
use pow::types::MinerConfig; use pow::types::MinerConfig;
use core::ser; use core::ser;
use core::ser::{AsFixedBytes}; use core::ser::AsFixedBytes;
//use core::genesis; // use core::genesis;
use chain; use chain;
use secp; use secp;
@ -62,7 +62,7 @@ pub struct HeaderPartWriter {
// Post nonce is currently variable length // Post nonce is currently variable length
// because of difficulty // because of difficulty
pub post_nonce: Vec<u8>, pub post_nonce: Vec<u8>,
//which difficulty field we're on // which difficulty field we're on
bytes_written: usize, bytes_written: usize,
writing_pre: bool, writing_pre: bool,
} }
@ -79,7 +79,7 @@ impl Default for HeaderPartWriter {
} }
impl HeaderPartWriter { impl HeaderPartWriter {
pub fn parts_as_hex_strings(&self)->(String, String) { pub fn parts_as_hex_strings(&self) -> (String, String) {
( (
String::from(format!("{:02x}", self.pre_nonce.iter().format(""))), String::from(format!("{:02x}", self.pre_nonce.iter().format(""))),
String::from(format!("{:02x}", self.post_nonce.iter().format(""))), String::from(format!("{:02x}", self.post_nonce.iter().format(""))),
@ -94,17 +94,21 @@ impl ser::Writer for HeaderPartWriter {
fn write_fixed_bytes<T: AsFixedBytes>(&mut self, bytes_in: &T) -> Result<(), ser::Error> { fn write_fixed_bytes<T: AsFixedBytes>(&mut self, bytes_in: &T) -> Result<(), ser::Error> {
if self.writing_pre { if self.writing_pre {
for i in 0..bytes_in.len() {self.pre_nonce.push(bytes_in.as_ref()[i])}; for i in 0..bytes_in.len() {
self.pre_nonce.push(bytes_in.as_ref()[i])
}
} else if self.bytes_written!=0 { } else if self.bytes_written != 0 {
for i in 0..bytes_in.len() {self.post_nonce.push(bytes_in.as_ref()[i])}; for i in 0..bytes_in.len() {
self.post_nonce.push(bytes_in.as_ref()[i])
}
} }
self.bytes_written+=bytes_in.len(); self.bytes_written += bytes_in.len();
if self.bytes_written==PRE_NONCE_SIZE && self.writing_pre { if self.bytes_written == PRE_NONCE_SIZE && self.writing_pre {
self.writing_pre=false; self.writing_pre = false;
self.bytes_written=0; self.bytes_written = 0;
} }
Ok(()) Ok(())
@ -116,18 +120,19 @@ pub struct Miner {
chain: Arc<chain::Chain>, chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>, tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
//Just to hold the port we're on, so this miner can be identified // Just to hold the port we're on, so this miner can be identified
//while watching debug output // while watching debug output
debug_output_id: String, debug_output_id: String,
} }
impl Miner { impl Miner {
/// Creates a new Miner. Needs references to the chain state and its /// Creates a new Miner. Needs references to the chain state and its
/// storage. /// storage.
pub fn new(config: MinerConfig, pub fn new(
chain_ref: Arc<chain::Chain>, config: MinerConfig,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>) chain_ref: Arc<chain::Chain>,
-> Miner { tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
) -> Miner {
Miner { Miner {
config: config, config: config,
chain: chain_ref, chain: chain_ref,
@ -139,29 +144,33 @@ impl Miner {
/// Keeping this optional so setting in a separate funciton /// Keeping this optional so setting in a separate funciton
/// instead of in the new function /// instead of in the new function
pub fn set_debug_output_id(&mut self, debug_output_id: String){ pub fn set_debug_output_id(&mut self, debug_output_id: String) {
self.debug_output_id=debug_output_id; self.debug_output_id = debug_output_id;
} }
/// Inner part of the mining loop for cuckoo-miner asynch mode /// Inner part of the mining loop for cuckoo-miner async mode
pub fn inner_loop_async(&self, pub fn inner_loop_async(
plugin_miner:&mut PluginMiner, &self,
difficulty:Difficulty, plugin_miner: &mut PluginMiner,
b:&mut Block, difficulty: Difficulty,
cuckoo_size: u32, b: &mut Block,
head:&BlockHeader, cuckoo_size: u32,
latest_hash:&Hash, head: &BlockHeader,
attempt_time_per_block: u32) latest_hash: &Hash,
-> Option<Proof> { attempt_time_per_block: u32,
) -> Option<Proof> {
debug!("(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.", debug!(
self.debug_output_id, "(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.",
cuckoo_size, self.debug_output_id,
attempt_time_per_block, cuckoo_size,
b.header.height, attempt_time_per_block,
b.header.difficulty); b.header.height,
b.header.difficulty
);
// look for a pow for at most 10 sec on the same block (to give a chance to new // look for a pow for at most attempt_time_per_block sec on the
// same block (to give a chance to new
// transactions) and as long as the head hasn't changed // transactions) and as long as the head hasn't changed
// Will change this to something else at some point // Will change this to something else at some point
let deadline = time::get_time().sec + attempt_time_per_block as i64; let deadline = time::get_time().sec + attempt_time_per_block as i64;
@ -170,7 +179,7 @@ impl Miner {
let stat_output_interval = 2; let stat_output_interval = 2;
let mut next_stat_output = time::get_time().sec + stat_output_interval; let mut next_stat_output = time::get_time().sec + stat_output_interval;
//Get parts of the header // Get parts of the header
let mut header_parts = HeaderPartWriter::default(); let mut header_parts = HeaderPartWriter::default();
ser::Writeable::write(&b.header, &mut header_parts).unwrap(); ser::Writeable::write(&b.header, &mut header_parts).unwrap();
let (pre, post) = header_parts.parts_as_hex_strings(); let (pre, post) = header_parts.parts_as_hex_strings();
@ -182,41 +191,54 @@ impl Miner {
let (pre, post) = header_parts.parts_as_hex_strings(); let (pre, post) = header_parts.parts_as_hex_strings();
println!("pre, post: {}, {}", pre, post);*/ println!("pre, post: {}, {}", pre, post);*/
//Start the miner working // Start the miner working
let miner = plugin_miner.get_consumable(); let miner = plugin_miner.get_consumable();
let job_handle=miner.notify(1, &pre, &post, difficulty.into_num()).unwrap(); let job_handle = miner.notify(1, &pre, &post, difficulty.into_num()).unwrap();
let mut sol=None; let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline { while head.hash() == *latest_hash && time::get_time().sec < deadline {
if let Some(s) = job_handle.get_solution() { if let Some(s) = job_handle.get_solution() {
sol = Some(Proof::new(s.solution_nonces.to_vec())); sol = Some(Proof::new(s.solution_nonces.to_vec()));
b.header.nonce=s.get_nonce_as_u64(); b.header.nonce = s.get_nonce_as_u64();
println!("Nonce: {}", b.header.nonce); // debug!("Nonce: {}", b.header.nonce);
break; break;
} }
if time::get_time().sec > next_stat_output { if time::get_time().sec > next_stat_output {
let stats = job_handle.get_stats(); let mut sps_total = 0.0;
if let Ok(stat_vec) = stats { for i in 0..plugin_miner.loaded_plugin_count() {
for s in stat_vec { let stats = job_handle.get_stats(i);
if s.last_start_time==0 { if let Ok(stat_vec) = stats {
continue; for s in stat_vec {
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
debug!(
"Mining: Plugin {} - Device {} ({}): Last Solution time: {}s; \
Solutions per second: {:.*} - Total Attempts: {}",
i,
s.device_id,
s.device_name,
last_solution_time_secs,
3,
last_hashes_per_sec,
s.iterations_completed
);
if last_hashes_per_sec.is_finite() {
sps_total += last_hashes_per_sec;
}
} }
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
debug!("Mining on Device {} - {}: Last hash time: {} - Hashes per second: {:.*} - Total Attempts: {}",
s.device_id, s.device_name,
last_solution_time_secs, 3, last_hashes_per_sec,
s.iterations_completed);
} }
} debug!("Total solutions per second: {}", sps_total);
next_stat_output = time::get_time().sec + stat_output_interval; next_stat_output = time::get_time().sec + stat_output_interval;
}
} }
} }
if sol==None { if sol == None {
debug!("(Server ID: {}) No solution found after {} iterations, continuing...", debug!(
self.debug_output_id, "(Server ID: {}) No solution found after {} seconds, continuing...",
job_handle.get_hashes_since_last_call().unwrap()) self.debug_output_id,
attempt_time_per_block
);
} }
job_handle.stop_jobs(); job_handle.stop_jobs();
@ -224,83 +246,183 @@ impl Miner {
} }
/// The inner part of mining loop for synchronous mode /// The inner part of mining loop for cuckoo miner sync mode
pub fn inner_loop_sync<T: MiningWorker>(&self, pub fn inner_loop_sync_plugin(
miner:&mut T, &self,
b:&mut Block, plugin_miner: &mut PluginMiner,
cuckoo_size: u32, b: &mut Block,
head:&BlockHeader, cuckoo_size: u32,
attempt_time_per_block: u32, head: &BlockHeader,
latest_hash:&mut Hash) attempt_time_per_block: u32,
-> Option<Proof> { latest_hash: &mut Hash,
) -> Option<Proof> {
// look for a pow for at most 2 sec on the same block (to give a chance to new // look for a pow for at most 2 sec on the same block (to give a chance to new
// transactions) and as long as the head hasn't changed // transactions) and as long as the head hasn't changed
let deadline = time::get_time().sec + attempt_time_per_block as i64; let deadline = time::get_time().sec + attempt_time_per_block as i64;
let stat_check_interval = 3;
let mut next_stat_check = time::get_time().sec + stat_check_interval;
debug!("(Server ID: {}) Mining at Cuckoo{} for at most {} secs on block {} at difficulty {}.", debug!(
self.debug_output_id, "(Server ID: {}) Mining at Cuckoo{} for {} secs (will wait for last solution) \
cuckoo_size, on block {} at difficulty {}.",
attempt_time_per_block, self.debug_output_id,
latest_hash, cuckoo_size,
b.header.difficulty); attempt_time_per_block,
latest_hash,
b.header.difficulty
);
let mut iter_count = 0; let mut iter_count = 0;
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 { if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
debug!("(Server ID: {}) Artificially slowing down loop by {}ms per iteration.", debug!(
self.debug_output_id, "(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
self.config.slow_down_in_millis.unwrap()); self.debug_output_id,
self.config.slow_down_in_millis.unwrap()
);
} }
let mut sol=None; let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline { while head.hash() == *latest_hash && time::get_time().sec < deadline {
let pow_hash = b.hash(); let pow_hash = b.hash();
if let Ok(proof) = miner.mine(&pow_hash[..]) { if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) {
let proof_diff=proof.clone().to_difficulty(); let proof_diff = proof.clone().to_difficulty();
/*debug!("(Server ID: {}) Header difficulty is: {}, Proof difficulty is: {}",
self.debug_output_id,
b.header.difficulty,
proof_diff);*/
if proof_diff >= b.header.difficulty { if proof_diff >= b.header.difficulty {
sol = Some(proof); sol = Some(proof);
break; break;
} }
} }
if time::get_time().sec >= next_stat_check {
let stats_vec = plugin_miner.get_stats(0).unwrap();
for s in stats_vec.into_iter() {
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
println!(
"Plugin 0 - Device {} ({}) - Last Solution time: {}; Solutions per second: {:.*}",
s.device_id,
s.device_name,
last_solution_time_secs,
3,
last_hashes_per_sec
);
}
next_stat_check = time::get_time().sec + stat_check_interval;
}
b.header.nonce += 1; b.header.nonce += 1;
*latest_hash = self.chain.head().unwrap().last_block_h; *latest_hash = self.chain.head().unwrap().last_block_h;
iter_count += 1; iter_count += 1;
//Artificial slow down // Artificial slow down
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 { if self.config.slow_down_in_millis != None &&
thread::sleep(std::time::Duration::from_millis(self.config.slow_down_in_millis.unwrap())); self.config.slow_down_in_millis.unwrap() > 0
{
thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(),
));
} }
} }
if sol==None { if sol == None {
debug!("(Server ID: {}) No solution found after {} iterations, continuing...", debug!(
self.debug_output_id, "(Server ID: {}) No solution found after {} iterations, continuing...",
iter_count) self.debug_output_id,
iter_count
)
} }
sol sol
} }
/// The inner part of mining loop for the internal miner
pub fn inner_loop_sync_internal<T: MiningWorker>(
&self,
miner: &mut T,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
attempt_time_per_block: u32,
latest_hash: &mut Hash,
) -> Option<Proof> {
// look for a pow for at most 2 sec on the same block (to give a chance to new
// transactions) and as long as the head hasn't changed
let deadline = time::get_time().sec + attempt_time_per_block as i64;
debug!(
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs on block {} at difficulty {}.",
self.debug_output_id,
cuckoo_size,
attempt_time_per_block,
latest_hash,
b.header.difficulty
);
let mut iter_count = 0;
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
debug!(
"(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
self.debug_output_id,
self.config.slow_down_in_millis.unwrap()
);
}
let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline {
let pow_hash = b.hash();
if let Ok(proof) = miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty();
if proof_diff >= b.header.difficulty {
sol = Some(proof);
break;
}
}
b.header.nonce += 1;
*latest_hash = self.chain.head().unwrap().last_block_h;
iter_count += 1;
// Artificial slow down
if self.config.slow_down_in_millis != None &&
self.config.slow_down_in_millis.unwrap() > 0
{
thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(),
));
}
}
if sol == None {
debug!(
"(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
iter_count
)
}
sol
}
/// Starts the mining loop, building a new block on top of the existing /// Starts the mining loop, building a new block on top of the existing
/// chain anytime required and looking for PoW solution. /// chain anytime required and looking for PoW solution.
pub fn run_loop(&self, pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) {
miner_config:MinerConfig,
cuckoo_size:u32,
proof_size:usize) {
info!("(Server ID: {}) Starting miner loop.", self.debug_output_id); info!("(Server ID: {}) Starting miner loop.", self.debug_output_id);
let mut plugin_miner=None; let mut plugin_miner = None;
let mut miner=None; let mut miner = None;
if miner_config.use_cuckoo_miner { if miner_config.use_cuckoo_miner {
plugin_miner = Some(PluginMiner::new(consensus::EASINESS, cuckoo_size, proof_size)); plugin_miner = Some(PluginMiner::new(
consensus::EASINESS,
cuckoo_size,
proof_size,
));
plugin_miner.as_mut().unwrap().init(miner_config.clone()); plugin_miner.as_mut().unwrap().init(miner_config.clone());
} else { } else {
miner = Some(cuckoo::Miner::new(consensus::EASINESS, cuckoo_size, proof_size)); miner = Some(cuckoo::Miner::new(
consensus::EASINESS,
cuckoo_size,
proof_size,
));
} }
let mut coinbase = self.get_coinbase(); let mut coinbase = self.get_coinbase();
@ -311,45 +433,54 @@ impl Miner {
let mut latest_hash = self.chain.head().unwrap().last_block_h; let mut latest_hash = self.chain.head().unwrap().last_block_h;
let mut b = self.build_block(&head, coinbase.clone()); let mut b = self.build_block(&head, coinbase.clone());
let mut sol=None; let mut sol = None;
let mut use_async=false; let mut use_async = false;
if let Some(c)=self.config.cuckoo_miner_async_mode { if let Some(c) = self.config.cuckoo_miner_async_mode {
if c { if c {
use_async=true; use_async = true;
} }
} }
if let Some(mut p) = plugin_miner.as_mut() { if let Some(mut p) = plugin_miner.as_mut() {
if use_async { if use_async {
sol = self.inner_loop_async(&mut p, sol = self.inner_loop_async(
b.header.difficulty.clone(), &mut p,
&mut b, b.header.difficulty.clone(),
cuckoo_size, &mut b,
&head, cuckoo_size,
&latest_hash, &head,
miner_config.attempt_time_per_block); &latest_hash,
miner_config.attempt_time_per_block,
);
} else { } else {
sol = self.inner_loop_sync(p, sol = self.inner_loop_sync_plugin(
&mut b, p,
cuckoo_size, &mut b,
&head, cuckoo_size,
miner_config.attempt_time_per_block, &head,
&mut latest_hash); miner_config.attempt_time_per_block,
&mut latest_hash,
);
} }
} }
if let Some(mut m) = miner.as_mut() { if let Some(mut m) = miner.as_mut() {
sol = self.inner_loop_sync(m, sol = self.inner_loop_sync_internal(
&mut b, m,
cuckoo_size, &mut b,
&head, cuckoo_size,
miner_config.attempt_time_per_block, &head,
&mut latest_hash); miner_config.attempt_time_per_block,
&mut latest_hash,
);
} }
// if we found a solution, push our block out // if we found a solution, push our block out
if let Some(proof) = sol { if let Some(proof) = sol {
info!("(Server ID: {}) Found valid proof of work, adding block {}.", info!(
self.debug_output_id, b.hash()); "(Server ID: {}) Found valid proof of work, adding block {}.",
b.header.pow = proof; self.debug_output_id,
b.hash()
);
b.header.pow = proof;
let opts = if cuckoo_size < consensus::DEFAULT_SIZESHIFT as u32 { let opts = if cuckoo_size < consensus::DEFAULT_SIZESHIFT as u32 {
chain::EASY_POW chain::EASY_POW
} else { } else {
@ -357,8 +488,11 @@ impl Miner {
}; };
let res = self.chain.process_block(b, opts); let res = self.chain.process_block(b, opts);
if let Err(e) = res { if let Err(e) = res {
error!("(Server ID: {}) Error validating mined block: {:?}", error!(
self.debug_output_id, e); "(Server ID: {}) Error validating mined block: {:?}",
self.debug_output_id,
e
);
} else { } else {
coinbase = self.get_coinbase(); coinbase = self.get_coinbase();
} }
@ -368,10 +502,11 @@ impl Miner {
/// Builds a new block with the chain head as previous and eligible /// Builds a new block with the chain head as previous and eligible
/// transactions from the pool. /// transactions from the pool.
fn build_block(&self, fn build_block(
head: &core::BlockHeader, &self,
coinbase: (core::Output, core::TxKernel)) head: &core::BlockHeader,
-> core::Block { coinbase: (core::Output, core::TxKernel),
) -> core::Block {
let mut now_sec = time::get_time().sec; let mut now_sec = time::get_time().sec;
let head_sec = head.timestamp.to_timespec().sec; let head_sec = head.timestamp.to_timespec().sec;
if now_sec == head_sec { if now_sec == head_sec {
@ -381,15 +516,19 @@ impl Miner {
let diff_iter = self.chain.difficulty_iter(); let diff_iter = self.chain.difficulty_iter();
let difficulty = consensus::next_difficulty(diff_iter).unwrap(); let difficulty = consensus::next_difficulty(diff_iter).unwrap();
let txs_box = self.tx_pool.read().unwrap().prepare_mineable_transactions(MAX_TX); let txs_box = self.tx_pool.read().unwrap().prepare_mineable_transactions(
MAX_TX,
);
let txs = txs_box.iter().map(|tx| tx.as_ref()).collect(); let txs = txs_box.iter().map(|tx| tx.as_ref()).collect();
let (output, kernel) = coinbase; let (output, kernel) = coinbase;
let mut b = core::Block::with_reward(head, txs, output, kernel).unwrap(); let mut b = core::Block::with_reward(head, txs, output, kernel).unwrap();
debug!("(Server ID: {}) Built new block with {} inputs and {} outputs, difficulty: {}", debug!(
self.debug_output_id, "(Server ID: {}) Built new block with {} inputs and {} outputs, difficulty: {}",
b.inputs.len(), self.debug_output_id,
b.outputs.len(), b.inputs.len(),
difficulty); b.outputs.len(),
difficulty
);
// making sure we're not spending time mining a useless block // making sure we're not spending time mining a useless block
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
@ -409,13 +548,18 @@ impl Miner {
let skey = secp::key::SecretKey::new(&secp_inst, &mut rng); let skey = secp::key::SecretKey::new(&secp_inst, &mut rng);
core::Block::reward_output(skey, &secp_inst).unwrap() core::Block::reward_output(skey, &secp_inst).unwrap()
} else { } else {
let url = format!("{}/v1/receive/coinbase", let url = format!(
self.config.wallet_receiver_url.as_str()); "{}/v1/receive/coinbase",
let request = WalletReceiveRequest::Coinbase(CbAmount{amount: consensus::REWARD}); self.config.wallet_receiver_url.as_str()
let res: CbData = api::client::post(url.as_str(), );
&request) let request = WalletReceiveRequest::Coinbase(CbAmount { amount: consensus::REWARD });
.expect(format!("(Server ID: {}) Wallet receiver unreachable, could not claim reward. Is it running?", let res: CbData = api::client::post(url.as_str(), &request).expect(
self.debug_output_id.as_str()).as_str()); format!(
"(Server ID: {}) Wallet receiver unreachable, could not claim reward. Is it running?",
self.debug_output_id
.as_str()
).as_str(),
);
let out_bin = util::from_hex(res.output).unwrap(); let out_bin = util::from_hex(res.output).unwrap();
let kern_bin = util::from_hex(res.kernel).unwrap(); let kern_bin = util::from_hex(res.kernel).unwrap();
let output = ser::deserialize(&mut &out_bin[..]).unwrap(); let output = ser::deserialize(&mut &out_bin[..]).unwrap();

View file

@ -44,12 +44,12 @@ use wallet::WalletConfig;
/// Just removes all results from previous runs /// Just removes all results from previous runs
pub fn clean_all_output(test_name_dir:&str){ pub fn clean_all_output(test_name_dir: &str) {
let target_dir = format!("target/test_servers/{}", test_name_dir); let target_dir = format!("target/test_servers/{}", test_name_dir);
let result = fs::remove_dir_all(target_dir); let result = fs::remove_dir_all(target_dir);
if let Err(e) = result { if let Err(e) = result {
println!("{}",e); println!("{}", e);
} }
} }
/// Errors that can be returned by LocalServerContainer /// Errors that can be returned by LocalServerContainer
@ -58,59 +58,55 @@ pub fn clean_all_output(test_name_dir:&str){
pub enum Error { pub enum Error {
Internal(String), Internal(String),
Argument(String), Argument(String),
NotFound, NotFound,
} }
/// All-in-one server configuration struct, for convenience /// All-in-one server configuration struct, for convenience
/// ///
#[derive(Clone)] #[derive(Clone)]
pub struct LocalServerContainerConfig { pub struct LocalServerContainerConfig {
// user friendly name for the server, also denotes what dir
// the data files will appear in
pub name: String,
//user friendly name for the server, also denotes what dir // Base IP address
//the data files will appear in pub base_addr: String,
pub name: String,
//Base IP address // Port the server (p2p) is running on
pub base_addr: String, pub p2p_server_port: u16,
//Port the server (p2p) is running on // Port the API server is running on
pub p2p_server_port: u16, pub api_server_port: u16,
//Port the API server is running on // Port the wallet server is running on
pub api_server_port: u16, pub wallet_port: u16,
//Port the wallet server is running on // Whether we're going to mine
pub wallet_port: u16, pub start_miner: bool,
//Whether we're going to mine // time in millis by which to artifically slow down the mining loop
pub start_miner: bool, // in this container
pub miner_slowdown_in_millis: u64,
//time in millis by which to artifically slow down the mining loop // Whether we're going to run a wallet as well,
//in this container // can use same server instance as a validating node for convenience
pub miner_slowdown_in_millis: u64, pub start_wallet: bool,
//Whether we're going to run a wallet as well, // address of a server to use as a seed
//can use same server instance as a validating node for convenience pub seed_addr: String,
pub start_wallet: bool,
//address of a server to use as a seed // keep track of whether this server is supposed to be seeding
pub seed_addr: String, pub is_seeding: bool,
//keep track of whether this server is supposed to be seeding // Whether to burn mining rewards
pub is_seeding: bool, pub burn_mining_rewards: bool,
//Whether to burn mining rewards
pub burn_mining_rewards: bool,
//full address to send coinbase rewards to
pub coinbase_wallet_address: String,
//When running a wallet, the address to check inputs and send
//finalised transactions to,
pub wallet_validating_node_url:String,
// full address to send coinbase rewards to
pub coinbase_wallet_address: String,
// When running a wallet, the address to check inputs and send
// finalised transactions to,
pub wallet_validating_node_url: String,
} }
/// Default server config /// Default server config
@ -119,17 +115,17 @@ impl Default for LocalServerContainerConfig {
LocalServerContainerConfig { LocalServerContainerConfig {
name: String::from("test_host"), name: String::from("test_host"),
base_addr: String::from("127.0.0.1"), base_addr: String::from("127.0.0.1"),
p2p_server_port: 13414, p2p_server_port: 13414,
api_server_port: 13415, api_server_port: 13415,
wallet_port: 13416, wallet_port: 13416,
seed_addr: String::from(""), seed_addr: String::from(""),
is_seeding: false, is_seeding: false,
start_miner: false, start_miner: false,
start_wallet: false, start_wallet: false,
burn_mining_rewards: false, burn_mining_rewards: false,
coinbase_wallet_address: String::from(""), coinbase_wallet_address: String::from(""),
wallet_validating_node_url: String::from(""), wallet_validating_node_url: String::from(""),
miner_slowdown_in_millis: 0, miner_slowdown_in_millis: 0,
} }
} }
} }
@ -139,379 +135,392 @@ impl Default for LocalServerContainerConfig {
/// on a server, i.e. server, wallet in send or receive mode /// on a server, i.e. server, wallet in send or receive mode
pub struct LocalServerContainer { pub struct LocalServerContainer {
// Configuration
config: LocalServerContainerConfig,
//Configuration // Structure of references to the
config: LocalServerContainerConfig, // internal server data
pub p2p_server_stats: Option<grin::ServerStats>,
//Structure of references to the // The API server instance
//internal server data api_server: Option<api::ApiServer>,
pub p2p_server_stats: Option<grin::ServerStats>,
//The API server instance // whether the server is running
api_server: Option<api::ApiServer>, pub server_is_running: bool,
//whether the server is running // Whether the server is mining
pub server_is_running: bool, pub server_is_mining: bool,
//Whether the server is mining // Whether the server is also running a wallet
pub server_is_mining: bool, // Not used if running wallet without server
pub wallet_is_running: bool,
//Whether the server is also running a wallet // the list of peers to connect to
//Not used if running wallet without server pub peer_list: Vec<String>,
pub wallet_is_running: bool,
//the list of peers to connect to
pub peer_list: Vec<String>,
//base directory for the server instance
working_dir: String,
// base directory for the server instance
working_dir: String,
} }
impl LocalServerContainer { impl LocalServerContainer {
/// Create a new local server container with defaults, with the given name
/// all related files will be created in the directory
/// target/test_servers/{name}
/// Create a new local server container with defaults, with the given name pub fn new(config: LocalServerContainerConfig) -> Result<LocalServerContainer, Error> {
/// all related files will be created in the directory target/test_servers/{name} let working_dir = format!("target/test_servers/{}", config.name);
Ok(
(LocalServerContainer {
config: config,
p2p_server_stats: None,
api_server: None,
server_is_running: false,
server_is_mining: false,
wallet_is_running: false,
working_dir: working_dir,
peer_list: Vec::new(),
}),
)
}
pub fn new(config:LocalServerContainerConfig) -> Result<LocalServerContainer, Error> { pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats {
let working_dir = format!("target/test_servers/{}", config.name); let mut event_loop = reactor::Core::new().unwrap();
Ok((LocalServerContainer {
config:config,
p2p_server_stats: None,
api_server: None,
server_is_running: false,
server_is_mining: false,
wallet_is_running: false,
working_dir: working_dir,
peer_list: Vec::new(),
}))
}
pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats let api_addr = format!("{}:{}", self.config.base_addr, self.config.api_server_port);
{
let mut event_loop = reactor::Core::new().unwrap();
let api_addr = format!("{}:{}", self.config.base_addr, self.config.api_server_port); let mut seeding_type = grin::Seeding::None;
let mut seeds = Vec::new();
let mut seeding_type=grin::Seeding::None; if self.config.seed_addr.len() > 0 {
let mut seeds=Vec::new(); seeding_type = grin::Seeding::List;
seeds = vec![self.config.seed_addr.to_string()];
}
if self.config.seed_addr.len()>0{ let s = grin::Server::future(
seeding_type=grin::Seeding::List; grin::ServerConfig {
seeds=vec![self.config.seed_addr.to_string()]; api_http_addr: api_addr,
} db_root: format!("{}/.grin", self.working_dir),
p2p_config: Some(p2p::P2PConfig {
port: self.config.p2p_server_port,
..p2p::P2PConfig::default()
}),
seeds: Some(seeds),
seeding_type: seeding_type,
..Default::default()
},
&event_loop.handle(),
).unwrap();
let s = grin::Server::future( self.p2p_server_stats = Some(s.get_server_stats().unwrap());
grin::ServerConfig{
api_http_addr: api_addr,
db_root: format!("{}/.grin", self.working_dir),
p2p_config: Some(p2p::P2PConfig{port: self.config.p2p_server_port, ..p2p::P2PConfig::default()}),
seeds: Some(seeds),
seeding_type: seeding_type,
..Default::default()
}, &event_loop.handle()).unwrap();
self.p2p_server_stats = Some(s.get_server_stats().unwrap()); if self.config.start_wallet == true {
self.run_wallet(duration_in_seconds + 5);
// give a second to start wallet before continuing
thread::sleep(time::Duration::from_millis(1000));
}
if self.config.start_wallet == true{ let mut plugin_config = pow::types::CuckooMinerPluginConfig::default();
self.run_wallet(duration_in_seconds+5); let mut plugin_config_vec: Vec<pow::types::CuckooMinerPluginConfig> = Vec::new();
//give a second to start wallet before continuing plugin_config.type_filter = String::from("mean_cpu");
thread::sleep(time::Duration::from_millis(1000)); plugin_config_vec.push(plugin_config);
}
let miner_config = pow::types::MinerConfig { let miner_config = pow::types::MinerConfig {
enable_mining: self.config.start_miner, enable_mining: self.config.start_miner,
burn_reward: self.config.burn_mining_rewards, burn_reward: self.config.burn_mining_rewards,
use_cuckoo_miner: false, use_cuckoo_miner: false,
cuckoo_miner_async_mode: Some(false), cuckoo_miner_async_mode: Some(false),
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")), cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_type: Some(String::from("simple")), cuckoo_miner_plugin_config: Some(plugin_config_vec),
wallet_receiver_url : self.config.coinbase_wallet_address.clone(), wallet_receiver_url: self.config.coinbase_wallet_address.clone(),
slow_down_in_millis: Some(self.config.miner_slowdown_in_millis.clone()), slow_down_in_millis: Some(self.config.miner_slowdown_in_millis.clone()),
..Default::default() ..Default::default()
}; };
if self.config.start_miner == true { if self.config.start_miner == true {
println!("starting Miner on port {}", self.config.p2p_server_port); println!("starting Miner on port {}", self.config.p2p_server_port);
s.start_miner(miner_config); s.start_miner(miner_config);
} }
for p in &mut self.peer_list { for p in &mut self.peer_list {
println!("{} connecting to peer: {}", self.config.p2p_server_port, p); println!("{} connecting to peer: {}", self.config.p2p_server_port, p);
s.connect_peer(p.parse().unwrap()).unwrap(); s.connect_peer(p.parse().unwrap()).unwrap();
} }
let timeout = Timer::default().sleep(time::Duration::from_secs(duration_in_seconds)); let timeout = Timer::default().sleep(time::Duration::from_secs(duration_in_seconds));
event_loop.run(timeout).unwrap(); event_loop.run(timeout).unwrap();
if self.wallet_is_running{ if self.wallet_is_running {
self.stop_wallet(); self.stop_wallet();
} }
s.get_server_stats().unwrap() s.get_server_stats().unwrap()
} }
/// Starts a wallet daemon to receive and returns the /// Starts a wallet daemon to receive and returns the
/// listening server url /// listening server url
pub fn run_wallet(&mut self, _duration_in_seconds: u64) { pub fn run_wallet(&mut self, _duration_in_seconds: u64) {
//URL on which to start the wallet listener (i.e. api server) // URL on which to start the wallet listener (i.e. api server)
let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port); let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port);
//Just use the name of the server for a seed for now // Just use the name of the server for a seed for now
let seed = format!("{}", self.config.name); let seed = format!("{}", self.config.name);
let seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes()); let seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes());
let s = Secp256k1::new(); let s = Secp256k1::new();
let key = wallet::ExtendedKey::from_seed(&s, seed.as_bytes()) let key = wallet::ExtendedKey::from_seed(&s, seed.as_bytes()).expect(
.expect("Error deriving extended key from seed."); "Error deriving extended key from seed.",
);
println!("Starting the Grin wallet receiving daemon on {} ", self.config.wallet_port ); println!(
"Starting the Grin wallet receiving daemon on {} ",
self.config.wallet_port
);
let mut wallet_config = WalletConfig::default(); let mut wallet_config = WalletConfig::default();
wallet_config.api_http_addr = format!("http://{}", url); wallet_config.api_http_addr = format!("http://{}", url);
wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone(); wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone();
wallet_config.data_file_dir=self.working_dir.clone(); wallet_config.data_file_dir = self.working_dir.clone();
let mut api_server = api::ApiServer::new("/v1".to_string()); let mut api_server = api::ApiServer::new("/v1".to_string());
api_server.register_endpoint("/receive".to_string(), wallet::WalletReceiver { api_server.register_endpoint(
key: key, "/receive".to_string(),
config: wallet_config, wallet::WalletReceiver {
}); key: key,
config: wallet_config,
},
);
api_server.start(url).unwrap_or_else(|e| { api_server.start(url).unwrap_or_else(|e| {
println!("Failed to start Grin wallet receiver: {}.", e); println!("Failed to start Grin wallet receiver: {}.", e);
}); });
self.api_server = Some(api_server); self.api_server = Some(api_server);
self.wallet_is_running = true; self.wallet_is_running = true;
} }
/// Stops the running wallet server /// Stops the running wallet server
pub fn stop_wallet(&mut self){ pub fn stop_wallet(&mut self) {
let mut api_server = self.api_server.as_mut().unwrap(); let mut api_server = self.api_server.as_mut().unwrap();
api_server.stop(); api_server.stop();
} }
/// Adds a peer to this server to connect to upon running /// Adds a peer to this server to connect to upon running
pub fn add_peer(&mut self, addr:String){
self.peer_list.push(addr);
}
pub fn add_peer(&mut self, addr: String) {
self.peer_list.push(addr);
}
} }
/// Configuration values for container pool /// Configuration values for container pool
pub struct LocalServerContainerPoolConfig { pub struct LocalServerContainerPoolConfig {
//Base name to append to all the servers in this pool // Base name to append to all the servers in this pool
pub base_name: String, pub base_name: String,
//Base http address for all of the servers in this pool // Base http address for all of the servers in this pool
pub base_http_addr: String, pub base_http_addr: String,
//Base port server for all of the servers in this pool // Base port server for all of the servers in this pool
//Increment the number by 1 for each new server // Increment the number by 1 for each new server
pub base_p2p_port: u16, pub base_p2p_port: u16,
//Base api port for all of the servers in this pool // Base api port for all of the servers in this pool
//Increment this number by 1 for each new server // Increment this number by 1 for each new server
pub base_api_port: u16, pub base_api_port: u16,
//Base wallet port for this server
//
pub base_wallet_port: u16,
//How long the servers in the pool are going to run
pub run_length_in_seconds: u64,
// Base wallet port for this server
//
pub base_wallet_port: u16,
// How long the servers in the pool are going to run
pub run_length_in_seconds: u64,
} }
/// Default server config /// Default server config
/// ///
impl Default for LocalServerContainerPoolConfig { impl Default for LocalServerContainerPoolConfig {
fn default() -> LocalServerContainerPoolConfig { fn default() -> LocalServerContainerPoolConfig {
LocalServerContainerPoolConfig { LocalServerContainerPoolConfig {
base_name: String::from("test_pool"), base_name: String::from("test_pool"),
base_http_addr: String::from("127.0.0.1"), base_http_addr: String::from("127.0.0.1"),
base_p2p_port: 10000, base_p2p_port: 10000,
base_api_port: 11000, base_api_port: 11000,
base_wallet_port: 12000, base_wallet_port: 12000,
run_length_in_seconds: 30, run_length_in_seconds: 30,
} }
} }
} }
/// A convenience pool for running many servers simultaneously /// A convenience pool for running many servers simultaneously
/// without necessarily having to configure each one manually /// without necessarily having to configure each one manually
pub struct LocalServerContainerPool { pub struct LocalServerContainerPool {
//configuration // configuration
pub config: LocalServerContainerPoolConfig, pub config: LocalServerContainerPoolConfig,
//keep ahold of all the created servers thread-safely // keep ahold of all the created servers thread-safely
server_containers: Vec<LocalServerContainer>, server_containers: Vec<LocalServerContainer>,
//Keep track of what the last ports a server was opened on // Keep track of what the last ports a server was opened on
next_p2p_port: u16, next_p2p_port: u16,
next_api_port: u16, next_api_port: u16,
next_wallet_port: u16, next_wallet_port: u16,
//keep track of whether a seed exists, and pause a bit if so
is_seeding: bool,
// keep track of whether a seed exists, and pause a bit if so
is_seeding: bool,
} }
impl LocalServerContainerPool { impl LocalServerContainerPool {
pub fn new(config: LocalServerContainerPoolConfig) -> LocalServerContainerPool {
(LocalServerContainerPool {
next_api_port: config.base_api_port,
next_p2p_port: config.base_p2p_port,
next_wallet_port: config.base_wallet_port,
config: config,
server_containers: Vec::new(),
is_seeding: false,
})
}
pub fn new(config: LocalServerContainerPoolConfig)->LocalServerContainerPool{ /// adds a single server on the next available port
(LocalServerContainerPool{ /// overriding passed-in values as necessary. Config object is an OUT value
next_api_port: config.base_api_port, /// with
next_p2p_port: config.base_p2p_port, /// ports/addresses filled in
next_wallet_port: config.base_wallet_port, ///
config: config,
server_containers: Vec::new(),
is_seeding: false,
}) pub fn create_server(&mut self, server_config: &mut LocalServerContainerConfig) {
}
/// adds a single server on the next available port // If we're calling it this way, need to override these
/// overriding passed-in values as necessary. Config object is an OUT value with server_config.p2p_server_port = self.next_p2p_port;
/// ports/addresses filled in server_config.api_server_port = self.next_api_port;
/// server_config.wallet_port = self.next_wallet_port;
pub fn create_server(&mut self, server_config:&mut LocalServerContainerConfig) server_config.name = String::from(format!(
{ "{}/{}-{}",
self.config.base_name,
//If we're calling it this way, need to override these self.config.base_name,
server_config.p2p_server_port=self.next_p2p_port; server_config.p2p_server_port
server_config.api_server_port=self.next_api_port; ));
server_config.wallet_port=self.next_wallet_port;
server_config.name=String::from(format!("{}/{}-{}",
self.config.base_name,
self.config.base_name,
server_config.p2p_server_port));
//Use self as coinbase wallet // Use self as coinbase wallet
server_config.coinbase_wallet_address=String::from(format!("http://{}:{}", server_config.coinbase_wallet_address = String::from(format!(
server_config.base_addr, "http://{}:{}",
server_config.wallet_port)); server_config.base_addr,
server_config.wallet_port
));
self.next_p2p_port+=1; self.next_p2p_port += 1;
self.next_api_port+=1; self.next_api_port += 1;
self.next_wallet_port+=1; self.next_wallet_port += 1;
if server_config.is_seeding { if server_config.is_seeding {
self.is_seeding=true; self.is_seeding = true;
} }
let _server_address = format!("{}:{}", let _server_address = format!(
server_config.base_addr, "{}:{}",
server_config.p2p_server_port); server_config.base_addr,
server_config.p2p_server_port
);
let server_container = LocalServerContainer::new(server_config.clone()).unwrap(); let server_container = LocalServerContainer::new(server_config.clone()).unwrap();
//self.server_containers.push(server_arc); // self.server_containers.push(server_arc);
//Create a future that runs the server for however many seconds // Create a future that runs the server for however many seconds
//collect them all and run them in the run_all_servers // collect them all and run them in the run_all_servers
let _run_time = self.config.run_length_in_seconds; let _run_time = self.config.run_length_in_seconds;
self.server_containers.push(server_container); self.server_containers.push(server_container);
} }
/// adds n servers, ready to run /// adds n servers, ready to run
/// ///
/// ///
#[allow(dead_code)] #[allow(dead_code)]
pub fn create_servers(&mut self, number: u16){ pub fn create_servers(&mut self, number: u16) {
for _ in 0..number { for _ in 0..number {
//self.create_server(); // self.create_server();
} }
} }
/// runs all servers, and returns a vector of references to the servers /// runs all servers, and returns a vector of references to the servers
/// once they've all been run /// once they've all been run
/// ///
pub fn run_all_servers(self) -> Vec<grin::ServerStats>{ pub fn run_all_servers(self) -> Vec<grin::ServerStats> {
let run_length = self.config.run_length_in_seconds; let run_length = self.config.run_length_in_seconds;
let mut handles = vec![]; let mut handles = vec![];
// return handles to all of the servers, wrapped in mutexes, handles, etc // return handles to all of the servers, wrapped in mutexes, handles, etc
let return_containers = Arc::new(Mutex::new(Vec::new())); let return_containers = Arc::new(Mutex::new(Vec::new()));
let is_seeding = self.is_seeding.clone(); let is_seeding = self.is_seeding.clone();
for mut s in self.server_containers { for mut s in self.server_containers {
let return_container_ref = return_containers.clone(); let return_container_ref = return_containers.clone();
let handle=thread::spawn(move || { let handle = thread::spawn(move || {
if is_seeding && !s.config.is_seeding { if is_seeding && !s.config.is_seeding {
//there's a seed and we're not it, so hang around longer and give the seed // there's a seed and we're not it, so hang around longer and give the seed
//a chance to start // a chance to start
thread::sleep(time::Duration::from_millis(2000)); thread::sleep(time::Duration::from_millis(2000));
} }
let server_ref=s.run_server(run_length); let server_ref = s.run_server(run_length);
return_container_ref.lock().unwrap().push(server_ref); return_container_ref.lock().unwrap().push(server_ref);
}); });
//Not a big fan of sleeping hack here, but there appears to be a // Not a big fan of sleeping hack here, but there appears to be a
//concurrency issue when creating files in rocksdb that causes // concurrency issue when creating files in rocksdb that causes
//failure if we don't pause a bit before starting the next server // failure if we don't pause a bit before starting the next server
thread::sleep(time::Duration::from_millis(500)); thread::sleep(time::Duration::from_millis(500));
handles.push(handle); handles.push(handle);
} }
for handle in handles { for handle in handles {
match handle.join() { match handle.join() {
Ok(_) => {} Ok(_) => {}
Err(e) => { Err(e) => {
println!("Error starting server thread: {:?}", e); println!("Error starting server thread: {:?}", e);
panic!(e); panic!(e);
} }
} }
} }
//return a much simplified version of the results // return a much simplified version of the results
let return_vec=return_containers.lock().unwrap(); let return_vec = return_containers.lock().unwrap();
return_vec.clone() return_vec.clone()
} }
pub fn connect_all_peers(&mut self){ pub fn connect_all_peers(&mut self) {
/// just pull out all currently active servers, build a list, /// just pull out all currently active servers, build a list,
/// and feed into all servers /// and feed into all servers
let mut server_addresses:Vec<String> = Vec::new(); let mut server_addresses: Vec<String> = Vec::new();
for s in &self.server_containers { for s in &self.server_containers {
let server_address = format!("{}:{}", let server_address = format!("{}:{}", s.config.base_addr, s.config.p2p_server_port);
s.config.base_addr, server_addresses.push(server_address);
s.config.p2p_server_port); }
server_addresses.push(server_address);
}
for a in server_addresses { for a in server_addresses {
for s in &mut self.server_containers { for s in &mut self.server_containers {
if format!("{}:{}", s.config.base_addr, if format!("{}:{}", s.config.base_addr, s.config.p2p_server_port) != a {
s.config.p2p_server_port) != a { s.add_peer(a.clone());
s.add_peer(a.clone()); }
} }
} }
} }
}
} }

View file

@ -49,8 +49,8 @@ use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerCon
/// Block and mining into a wallet for a bit /// Block and mining into a wallet for a bit
#[test] #[test]
fn basic_genesis_mine() { fn basic_genesis_mine() {
let _ = env_logger::init(); let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting); global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "genesis_mine"; let test_name_dir = "genesis_mine";
framework::clean_all_output(test_name_dir); framework::clean_all_output(test_name_dir);
@ -80,8 +80,8 @@ fn basic_genesis_mine() {
/// messages they all end up connected. /// messages they all end up connected.
#[test] #[test]
fn simulate_seeding() { fn simulate_seeding() {
let _ = env_logger::init(); let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting); global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "simulate_seeding"; let test_name_dir = "simulate_seeding";
framework::clean_all_output(test_name_dir); framework::clean_all_output(test_name_dir);
@ -114,13 +114,13 @@ fn simulate_seeding() {
server_config.p2p_server_port server_config.p2p_server_port
)); ));
for _ in 0..4 { for _ in 0..4 {
pool.create_server(&mut server_config); pool.create_server(&mut server_config);
} }
pool.connect_all_peers(); pool.connect_all_peers();
let _ = pool.run_all_servers(); let _ = pool.run_all_servers();
} }
/// Create 1 server, start it mining, then connect 4 other peers mining and /// Create 1 server, start it mining, then connect 4 other peers mining and
@ -128,16 +128,14 @@ fn simulate_seeding() {
/// as a seed. Meant to test the evolution of mining difficulty with miners /// as a seed. Meant to test the evolution of mining difficulty with miners
/// running at /// running at
/// different rates /// different rates
// Just going to comment this out as an automatically run test for the time // Just going to comment this out as an automatically run test for the time
// being, // being,
// As it's more for actively testing and hurts CI a lot // As it's more for actively testing and hurts CI a lot
//#[test] //#[test]
#[allow(dead_code)] #[allow(dead_code)]
fn simulate_parallel_mining() { fn simulate_parallel_mining() {
let _ = env_logger::init(); let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting); global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "simulate_parallel_mining"; let test_name_dir = "simulate_parallel_mining";
// framework::clean_all_output(test_name_dir); // framework::clean_all_output(test_name_dir);
@ -178,7 +176,7 @@ fn simulate_parallel_mining() {
pool.connect_all_peers(); pool.connect_all_peers();
let _ = pool.run_all_servers(); let _ = pool.run_all_servers();
// Check mining difficulty here?, though I'd think it's more valuable // Check mining difficulty here?, though I'd think it's more valuable
// to simply output it. Can at least see the evolution of the difficulty target // to simply output it. Can at least see the evolution of the difficulty target
@ -193,7 +191,7 @@ fn simulate_parallel_mining() {
#[test] #[test]
fn a_simulate_block_propagation() { fn a_simulate_block_propagation() {
env_logger::init(); env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting); global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "grin-prop"; let test_name_dir = "grin-prop";
framework::clean_all_output(test_name_dir); framework::clean_all_output(test_name_dir);
@ -201,13 +199,18 @@ fn a_simulate_block_propagation() {
let mut evtlp = reactor::Core::new().unwrap(); let mut evtlp = reactor::Core::new().unwrap();
let handle = evtlp.handle(); let handle = evtlp.handle();
let mut plugin_config = pow::types::CuckooMinerPluginConfig::default();
let mut plugin_config_vec: Vec<pow::types::CuckooMinerPluginConfig> = Vec::new();
plugin_config.type_filter = String::from("mean_cpu");
plugin_config_vec.push(plugin_config);
let miner_config = pow::types::MinerConfig { let miner_config = pow::types::MinerConfig {
enable_mining: true, enable_mining: true,
burn_reward: true, burn_reward: true,
use_cuckoo_miner: false, use_cuckoo_miner: false,
cuckoo_miner_async_mode: None, cuckoo_miner_async_mode: None,
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")), cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_type: Some(String::from("simple")), cuckoo_miner_plugin_config: Some(plugin_config_vec),
..Default::default() ..Default::default()
}; };
@ -260,7 +263,7 @@ fn a_simulate_block_propagation() {
#[test] #[test]
fn simulate_full_sync() { fn simulate_full_sync() {
env_logger::init(); env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting); global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "grin-sync"; let test_name_dir = "grin-sync";
framework::clean_all_output(test_name_dir); framework::clean_all_output(test_name_dir);
@ -268,13 +271,18 @@ fn simulate_full_sync() {
let mut evtlp = reactor::Core::new().unwrap(); let mut evtlp = reactor::Core::new().unwrap();
let handle = evtlp.handle(); let handle = evtlp.handle();
let mut plugin_config = pow::types::CuckooMinerPluginConfig::default();
let mut plugin_config_vec: Vec<pow::types::CuckooMinerPluginConfig> = Vec::new();
plugin_config.type_filter = String::from("mean_cpu");
plugin_config_vec.push(plugin_config);
let miner_config = pow::types::MinerConfig { let miner_config = pow::types::MinerConfig {
enable_mining: true, enable_mining: true,
burn_reward: true, burn_reward: true,
use_cuckoo_miner: false, use_cuckoo_miner: false,
cuckoo_miner_async_mode: Some(false), cuckoo_miner_async_mode: Some(false),
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")), cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_type: Some(String::from("simple")), cuckoo_miner_plugin_config: Some(plugin_config_vec),
..Default::default() ..Default::default()
}; };

View file

@ -14,10 +14,15 @@ lazy_static = "~0.2.8"
serde = "~1.0.8" serde = "~1.0.8"
serde_derive = "~1.0.8" serde_derive = "~1.0.8"
cuckoo_miner = { git = "https://github.com/mimblewimble/cuckoo-miner", tag="grin_integration_8"}
grin_core = { path = "../core" } grin_core = { path = "../core" }
[dependencies.cuckoo_miner]
git = "https://github.com/mimblewimble/cuckoo-miner"
tag="grin_integration_9"
#path = "../../cuckoo-miner"
#uncomment this feature to turn off plugin builds
#features=["no-plugin-build"]
[dev_dependencies] [dev_dependencies]
grin_chain = { path = "../chain"} grin_chain = { path = "../chain"}
secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" } secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" }

View file

@ -121,7 +121,7 @@ pub fn mine_genesis_block(miner_config:Option<types::MinerConfig>)->Option<core:
/// Runs a proof of work computation over the provided block using the provided Mining Worker, /// Runs a proof of work computation over the provided block using the provided Mining Worker,
/// until the required difficulty target is reached. May take a while for a low target... /// until the required difficulty target is reached. May take a while for a low target...
pub fn pow_size<T: MiningWorker + ?Sized>(miner:&mut T, bh: &mut BlockHeader, pub fn pow_size<T: MiningWorker + ?Sized>(miner:&mut T, bh: &mut BlockHeader,
diff: Difficulty, _: u32) -> Result<(), Error> { diff: Difficulty, _: u32) -> Result<(), Error> {
let start_nonce = bh.nonce; let start_nonce = bh.nonce;
// if we're in production mode, try the pre-mined solution first // if we're in production mode, try the pre-mined solution first

View file

@ -27,36 +27,35 @@ use core::global;
use core::core::Proof; use core::core::Proof;
use types::MinerConfig; use types::MinerConfig;
use std::sync::{Mutex}; use std::sync::Mutex;
use cuckoo_miner::{ use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMinerSolution,
CuckooMiner, CuckooMinerDeviceStats, CuckooMinerError};
CuckooPluginManager,
CuckooMinerConfig,
CuckooMinerSolution};
//For now, we're just going to keep a static reference around to the loaded config // For now, we're just going to keep a static reference around to the loaded
//And not allow querying the plugin directory twice once a plugin has been selected // config
//This is to keep compatibility with multi-threaded testing, so that spawned // And not allow querying the plugin directory twice once a plugin has been
//testing threads don't try to load/unload the library while another thread is // selected
//using it. // This is to keep compatibility with multi-threaded testing, so that spawned
// testing threads don't try to load/unload the library while another thread is
// using it.
lazy_static!{ lazy_static!{
static ref LOADED_CONFIG: Mutex<Option<CuckooMinerConfig>> = Mutex::new(None); static ref LOADED_CONFIG: Mutex<Option<Vec<CuckooMinerConfig>>> = Mutex::new(None);
} }
/// plugin miner /// plugin miner
pub struct PluginMiner { pub struct PluginMiner {
/// the miner /// the miner
pub miner:Option<CuckooMiner>, pub miner: Option<CuckooMiner>,
last_solution: CuckooMinerSolution, last_solution: CuckooMinerSolution,
config: CuckooMinerConfig, config: Vec<CuckooMinerConfig>,
} }
impl Default for PluginMiner { impl Default for PluginMiner {
fn default() -> PluginMiner { fn default() -> PluginMiner {
PluginMiner { PluginMiner {
miner: None, miner: None,
config: CuckooMinerConfig::new(), config: Vec::new(),
last_solution: CuckooMinerSolution::new(), last_solution: CuckooMinerSolution::new(),
} }
} }
@ -64,86 +63,97 @@ impl Default for PluginMiner {
impl PluginMiner { impl PluginMiner {
/// Init the plugin miner /// Init the plugin miner
pub fn init(&mut self, miner_config: MinerConfig){ pub fn init(&mut self, miner_config: MinerConfig) {
//Get directory of executable // Get directory of executable
let mut exe_path=env::current_exe().unwrap(); let mut exe_path = env::current_exe().unwrap();
exe_path.pop(); exe_path.pop();
let exe_path=exe_path.to_str().unwrap(); let exe_path = exe_path.to_str().unwrap();
let plugin_install_path = match miner_config.cuckoo_miner_plugin_dir.clone() {
//println!("Plugin dir: {}", miner_config.clone().cuckoo_miner_plugin_dir.unwrap());
let plugin_install_path = match miner_config.cuckoo_miner_plugin_dir {
Some(s) => s, Some(s) => s,
None => String::from(format!("{}/plugins", exe_path)) None => String::from(format!("{}/plugins", exe_path)),
}; };
let plugin_impl_filter = match miner_config.cuckoo_miner_plugin_type { let mut plugin_vec_filters = Vec::new();
Some(s) => s, if let None = miner_config.cuckoo_miner_plugin_config {
None => String::from("simple") plugin_vec_filters.push(String::from("simple"));
}; } else {
for p in miner_config.clone().cuckoo_miner_plugin_config.unwrap() {
plugin_vec_filters.push(p.type_filter);
}
}
//First, load and query the plugins in the given directory // First, load and query the plugins in the given directory
//These should all be stored in 'plugins' at the moment relative // These should all be stored in 'plugins' at the moment relative
//to the executable path, though they should appear somewhere else // to the executable path, though they should appear somewhere else
//when packaging is more//thought out // when packaging is more//thought out
let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap(); let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap();
//Load from here instead // Load from here instead
if let Some(ref c) = *loaded_config_ref { if let Some(ref c) = *loaded_config_ref {
debug!("Not re-loading plugin or directory."); debug!("Not re-loading plugin or directory.");
//this will load the associated plugin // this will load the associated plugin
let result=CuckooMiner::new(c.clone()); let result = CuckooMiner::new(c.clone());
self.miner=Some(result.unwrap()); self.miner = Some(result.unwrap());
self.config = c.clone();
return; return;
} }
let mut plugin_manager = CuckooPluginManager::new().unwrap(); let mut plugin_manager = CuckooPluginManager::new().unwrap();
let result=plugin_manager.load_plugin_dir(plugin_install_path); let result = plugin_manager.load_plugin_dir(plugin_install_path);
if let Err(_) = result { if let Err(_) = result {
error!("Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/plugins."); error!(
"Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/plugins."
);
panic!("Unable to load plugin directory... Please check configuration values"); panic!("Unable to load plugin directory... Please check configuration values");
} }
let sz = global::sizeshift(); let sz = global::sizeshift();
//So this is built dynamically based on the plugin implementation let mut cuckoo_configs = Vec::new();
//type and the consensus sizeshift let mut index=0;
let filter = format!("{}_{}", plugin_impl_filter, sz); for f in plugin_vec_filters {
// So this is built dynamically based on the plugin implementation
// type and the consensus sizeshift
let filter = format!("{}_{}", f, sz);
let caps = plugin_manager.get_available_plugins(&filter).unwrap(); let caps = plugin_manager.get_available_plugins(&filter).unwrap();
//insert it into the miner configuration being created below // insert it into the miner configuration being created below
let mut config = CuckooMinerConfig::new(); let mut config = CuckooMinerConfig::new();
info!("Mining using plugin: {}", caps[0].full_path.clone()); info!("Mining plugin {} - {}", index, caps[0].full_path.clone());
config.plugin_full_path = caps[0].full_path.clone(); config.plugin_full_path = caps[0].full_path.clone();
if let Some(l) = miner_config.cuckoo_miner_parameter_list { if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config {
config.parameter_list = l.clone(); if let Some(lp) = l[index].parameter_list.clone(){
config.parameter_list = lp.clone();
}
}
cuckoo_configs.push(config);
index+=1;
} }
// Store this config now, because we just want one instance
// of the plugin lib per invocation now
*loaded_config_ref = Some(cuckoo_configs.clone());
//Store this config now, because we just want one instance // this will load the associated plugin
//of the plugin lib per invocation now let result = CuckooMiner::new(cuckoo_configs.clone());
*loaded_config_ref=Some(config.clone());
//this will load the associated plugin
let result=CuckooMiner::new(config.clone());
if let Err(e) = result { if let Err(e) = result {
error!("Error initializing mining plugin: {:?}", e); error!("Error initializing mining plugin: {:?}", e);
error!("Accepted values are: {:?}", caps[0].parameters); //error!("Accepted values are: {:?}", caps[0].parameters);
panic!("Unable to init mining plugin."); panic!("Unable to init mining plugin.");
} }
self.config=config.clone(); self.config = cuckoo_configs.clone();
self.miner=Some(result.unwrap()); self.miner = Some(result.unwrap());
} }
/// Get the miner /// Get the miner
pub fn get_consumable(&mut self)->CuckooMiner{ pub fn get_consumable(&mut self) -> CuckooMiner {
//this will load the associated plugin // this will load the associated plugin
let result=CuckooMiner::new(self.config.clone()); let result = CuckooMiner::new(self.config.clone());
if let Err(e) = result { if let Err(e) = result {
error!("Error initializing mining plugin: {:?}", e); error!("Error initializing mining plugin: {:?}", e);
panic!("Unable to init mining plugin."); panic!("Unable to init mining plugin.");
@ -151,29 +161,39 @@ impl PluginMiner {
result.unwrap() result.unwrap()
} }
/// Returns the number of mining plugins that have been loaded
pub fn loaded_plugin_count(&self) -> usize {
self.config.len()
}
/// Get stats
pub fn get_stats(&self, index:usize) -> Result<Vec<CuckooMinerDeviceStats>, CuckooMinerError> {
self.miner.as_ref().unwrap().get_stats(index)
}
} }
impl MiningWorker for PluginMiner { impl MiningWorker for PluginMiner {
/// This will initialise a plugin according to what's currently /// This will initialise a plugin according to what's currently
/// included in CONSENSUS::TEST_SIZESHIFT, just using the edgetrim /// included in CONSENSUS::TEST_SIZESHIFT, just using the edgetrim
/// version of the miner for now, though this should become /// version of the miner for now, though this should become
/// configurable somehow /// configurable somehow
fn new(_ease: u32, fn new(_ease: u32, _sizeshift: u32, _proof_size: usize) -> Self {
_sizeshift: u32,
_proof_size: usize) -> Self {
PluginMiner::default() PluginMiner::default()
} }
/// And simply calls the mine function of the loaded plugin /// And simply calls the mine function of the loaded plugin
/// returning whether a solution was found and the solution itself /// returning whether a solution was found and the solution itself
fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error> { fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error>{
let result = self.miner.as_mut().unwrap().mine(&header, &mut self.last_solution).unwrap(); let result = self.miner
.as_mut()
.unwrap()
.mine(&header, &mut self.last_solution, 0)
.unwrap();
if result == true { if result == true {
return Ok(Proof::new(self.last_solution.solution_nonces.to_vec())); return Ok(Proof::new(self.last_solution.solution_nonces.to_vec()));
} }
Err(Error::NoSolution) Err(Error::NoSolution)
} }
} }

View file

@ -16,6 +16,25 @@
use std::collections::HashMap; use std::collections::HashMap;
/// CuckooMinerPlugin configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CuckooMinerPluginConfig {
///The type of plugin to load (i.e. filters on filename)
pub type_filter : String,
///Parameters for this plugin
pub parameter_list : Option<HashMap<String, u32>>,
}
impl Default for CuckooMinerPluginConfig {
fn default() -> CuckooMinerPluginConfig {
CuckooMinerPluginConfig {
type_filter : String::new(),
parameter_list : None,
}
}
}
/// Mining configuration /// Mining configuration
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MinerConfig { pub struct MinerConfig {
@ -28,19 +47,15 @@ pub struct MinerConfig {
/// Whether to use the async version of mining /// Whether to use the async version of mining
pub cuckoo_miner_async_mode: Option<bool>, pub cuckoo_miner_async_mode: Option<bool>,
/// The location in which cuckoo miner plugins are stored /// plugin dir
pub cuckoo_miner_plugin_dir: Option<String>, pub cuckoo_miner_plugin_dir: Option<String>,
/// The type of plugin to use (ends up filtering the filename) /// Cuckoo miner plugin configuration, one for each plugin
pub cuckoo_miner_plugin_type: Option<String>, pub cuckoo_miner_plugin_config: Option<Vec<CuckooMinerPluginConfig>>,
/// Cuckoo-miner parameters... these vary according /// How long to wait before stopping the miner, recollecting transactions
/// to the plugin being loaded /// and starting again
pub cuckoo_miner_parameter_list: Option<HashMap<String, u32>>, pub attempt_time_per_block: u32,
/// How long to wait before stopping the miner, recollecting transactions
/// and starting again
pub attempt_time_per_block: u32,
/// Base address to the HTTP wallet receiver /// Base address to the HTTP wallet receiver
pub wallet_receiver_url: String, pub wallet_receiver_url: String,
@ -52,7 +67,6 @@ pub struct MinerConfig {
/// a testing attribute for the time being that artifically slows down the /// a testing attribute for the time being that artifically slows down the
/// mining loop by adding a sleep to the thread /// mining loop by adding a sleep to the thread
pub slow_down_in_millis: Option<u64>, pub slow_down_in_millis: Option<u64>,
} }
impl Default for MinerConfig { impl Default for MinerConfig {
@ -62,8 +76,7 @@ impl Default for MinerConfig {
use_cuckoo_miner: false, use_cuckoo_miner: false,
cuckoo_miner_async_mode: None, cuckoo_miner_async_mode: None,
cuckoo_miner_plugin_dir: None, cuckoo_miner_plugin_dir: None,
cuckoo_miner_plugin_type: None, cuckoo_miner_plugin_config: None,
cuckoo_miner_parameter_list: None,
wallet_receiver_url: "http://localhost:13416".to_string(), wallet_receiver_url: "http://localhost:13416".to_string(),
burn_reward: false, burn_reward: false,
slow_down_in_millis: Some(0), slow_down_in_millis: Some(0),
@ -71,4 +84,3 @@ impl Default for MinerConfig {
} }
} }
} }