Cuckoo-miner update - Multiple Plugin mining (#139)

* Adding ability to load multiple mining plugins in parallel via cuckoo miner
* updating with newest cuckoo-miner changes
* revert default config value
* update pow document with link to podcast
This commit is contained in:
Yeastplume 2017-09-26 18:58:56 +01:00 committed by Ignotus Peverell
parent 7d48e1da49
commit 53d9ca630c
14 changed files with 1028 additions and 789 deletions

View file

@ -46,7 +46,7 @@ impl ApiEndpoint for ChainApi {
fn get(&self, _: String) -> ApiResult<Tip> {
match self.chain.head() {
Ok(tip) => Ok(Tip::from_tip(tip)),
Err(e) => Err(Error::Internal(format!("{:?}", e)))
Err(e) => Err(Error::Internal(format!("{:?}", e))),
}
}
}
@ -70,12 +70,14 @@ impl ApiEndpoint for OutputApi {
fn get(&self, id: String) -> ApiResult<Output> {
debug!("GET output {}", id);
let c = util::from_hex(id.clone()).map_err(|_| Error::Argument(format!("Not a valid commitment: {}", id)))?;
let c = util::from_hex(id.clone()).map_err(|_| {
Error::Argument(format!("Not a valid commitment: {}", id))
})?;
let commit = Commitment::from_vec(c);
let out = self.chain.get_unspent(&commit)
.map_err(|_| Error::NotFound)?;
let header = self.chain.get_block_header_by_output_commit(&commit)
let out = self.chain.get_unspent(&commit).map_err(|_| Error::NotFound)?;
let header = self.chain
.get_block_header_by_output_commit(&commit)
.map_err(|_| Error::NotFound)?;
Ok(Output::from_output(&out, &header))
@ -90,7 +92,8 @@ pub struct PoolApi<T> {
}
impl<T> ApiEndpoint for PoolApi<T>
where T: pool::BlockChain + Clone + Send + Sync + 'static
where
T: pool::BlockChain + Clone + Send + Sync + 'static,
{
type ID = String;
type T = PoolInfo;
@ -116,7 +119,9 @@ impl<T> ApiEndpoint for PoolApi<T>
})?;
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
Error::Argument("Could not deserialize transaction, invalid format.".to_string())
Error::Argument(
"Could not deserialize transaction, invalid format.".to_string(),
)
})?;
let source = pool::TxSource {
@ -148,20 +153,21 @@ pub struct TxWrapper {
/// Start all server REST APIs. Just register all of them on a ApiServer
/// instance and runs the corresponding HTTP server.
pub fn start_rest_apis<T>(addr: String,
chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>)
where T: pool::BlockChain + Clone + Send + Sync + 'static
pub fn start_rest_apis<T>(
addr: String,
chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
) where
T: pool::BlockChain + Clone + Send + Sync + 'static,
{
thread::spawn(move || {
let mut apis = ApiServer::new("/v1".to_string());
apis.register_endpoint("/chain".to_string(),
ChainApi { chain: chain.clone() });
apis.register_endpoint("/chain/utxo".to_string(),
OutputApi {
chain: chain.clone(),
});
apis.register_endpoint("/chain".to_string(), ChainApi { chain: chain.clone() });
apis.register_endpoint(
"/chain/utxo".to_string(),
OutputApi { chain: chain.clone() },
);
apis.register_endpoint("/pool".to_string(), PoolApi { tx_pool: tx_pool });
apis.start(&addr[..]).unwrap_or_else(|e| {

View file

@ -22,9 +22,7 @@ use std::fs::File;
use toml;
use grin::ServerConfig;
use pow::types::MinerConfig;
use types::{ConfigMembers,
GlobalConfig,
ConfigError};
use types::{ConfigMembers, GlobalConfig, ConfigError};
/// The default file name to use when trying to derive
/// the config file location
@ -36,148 +34,159 @@ const GRIN_HOME: &'static str = ".grin";
impl Default for ConfigMembers {
fn default() -> ConfigMembers {
ConfigMembers {
server: ServerConfig::default(),
mining: Some(MinerConfig::default()),
//wallet: Some(WalletConfig::default()),
}
}
ConfigMembers {
server: ServerConfig::default(),
mining: Some(MinerConfig::default()),
}
}
}
impl Default for GlobalConfig {
fn default() -> GlobalConfig{
GlobalConfig {
config_file_path: None,
using_config_file: false,
members: Some(ConfigMembers::default())
}
}
fn default() -> GlobalConfig {
GlobalConfig {
config_file_path: None,
using_config_file: false,
members: Some(ConfigMembers::default()),
}
}
}
impl GlobalConfig {
/// Need to decide on rules where to read the config file from,
/// but will take a stab at logic for now
/// Need to decide on rules where to read the config file from,
/// but will take a stab at logic for now
fn derive_config_location(&mut self) -> Result<(), ConfigError> {
//First, check working directory
let mut config_path = env::current_dir().unwrap();
config_path.push(CONFIG_FILE_NAME);
if config_path.exists() {
self.config_file_path = Some(config_path);
return Ok(())
}
//Next, look in directory of executable
let mut config_path=env::current_exe().unwrap();
fn derive_config_location(&mut self) -> Result<(), ConfigError> {
// First, check working directory
let mut config_path = env::current_dir().unwrap();
config_path.push(CONFIG_FILE_NAME);
if config_path.exists() {
self.config_file_path = Some(config_path);
return Ok(());
}
// Next, look in directory of executable
let mut config_path = env::current_exe().unwrap();
config_path.pop();
config_path.push(CONFIG_FILE_NAME);
if config_path.exists() {
self.config_file_path = Some(config_path);
return Ok(())
}
//Then look in {user_home}/.grin
let config_path = env::home_dir();
if let Some(mut p) = config_path {
p.push(GRIN_HOME);
p.push(CONFIG_FILE_NAME);
if p.exists() {
self.config_file_path = Some(p);
return Ok(())
}
}
if config_path.exists() {
self.config_file_path = Some(config_path);
return Ok(());
}
// Then look in {user_home}/.grin
let config_path = env::home_dir();
if let Some(mut p) = config_path {
p.push(GRIN_HOME);
p.push(CONFIG_FILE_NAME);
if p.exists() {
self.config_file_path = Some(p);
return Ok(());
}
}
// Give up
Err(ConfigError::FileNotFoundError(String::from("")))
// Give up
Err(ConfigError::FileNotFoundError(String::from("")))
}
}
/// Takes the path to a config file, or if NONE, tries
/// to determine a config file based on rules in
/// derive_config_location
/// Takes the path to a config file, or if NONE, tries
/// to determine a config file based on rules in
/// derive_config_location
pub fn new(file_path:Option<&str>) -> Result<GlobalConfig, ConfigError> {
let mut return_value = GlobalConfig::default();
if let Some(fp) = file_path {
return_value.config_file_path = Some(PathBuf::from(&fp));
} else {
return_value.derive_config_location().unwrap();
}
pub fn new(file_path: Option<&str>) -> Result<GlobalConfig, ConfigError> {
let mut return_value = GlobalConfig::default();
if let Some(fp) = file_path {
return_value.config_file_path = Some(PathBuf::from(&fp));
} else {
return_value.derive_config_location().unwrap();
}
//No attempt at a config file, just return defaults
if let None = return_value.config_file_path {
return Ok(return_value);
}
// No attempt at a config file, just return defaults
if let None = return_value.config_file_path {
return Ok(return_value);
}
//Config file path is given but not valid
if !return_value.config_file_path.as_mut().unwrap().exists() {
return Err(
ConfigError::FileNotFoundError(String::from(return_value.config_file_path.as_mut()
.unwrap().to_str().unwrap().clone()))
);
}
// Config file path is given but not valid
if !return_value.config_file_path.as_mut().unwrap().exists() {
return Err(ConfigError::FileNotFoundError(String::from(
return_value
.config_file_path
.as_mut()
.unwrap()
.to_str()
.unwrap()
.clone(),
)));
}
//Try to parse the config file if it exists
//explode if it does exist but something's wrong
//with it
return_value.read_config()
}
// Try to parse the config file if it exists
// explode if it does exist but something's wrong
// with it
return_value.read_config()
}
/// Read config
pub fn read_config(mut self) -> Result<GlobalConfig, ConfigError> {
let mut file = File::open(self.config_file_path.as_mut().unwrap())?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let decoded:Result<ConfigMembers, toml::de::Error> = toml::from_str(&contents);
match decoded {
Ok(mut gc) => {
//Put the struct back together, because the config
//file was flattened a bit
gc.server.mining_config = gc.mining.clone();
self.using_config_file = true;
self.members = Some(gc);
return Ok(self)
},
Err (e) => {
return Err(
ConfigError::ParseError(String::from(self.config_file_path.as_mut()
.unwrap().to_str().unwrap().clone()),
String::from(format!("{}", e))
)
);
}
}
}
/// Read config
pub fn read_config(mut self) -> Result<GlobalConfig, ConfigError> {
let mut file = File::open(self.config_file_path.as_mut().unwrap())?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let decoded: Result<ConfigMembers, toml::de::Error> = toml::from_str(&contents);
match decoded {
Ok(mut gc) => {
// Put the struct back together, because the config
// file was flattened a bit
gc.server.mining_config = gc.mining.clone();
self.using_config_file = true;
self.members = Some(gc);
return Ok(self);
}
Err(e) => {
return Err(ConfigError::ParseError(
String::from(
self.config_file_path
.as_mut()
.unwrap()
.to_str()
.unwrap()
.clone(),
),
String::from(format!("{}", e)),
));
}
}
}
/// Serialize config
pub fn ser_config(&mut self) -> Result<String, ConfigError> {
let encoded:Result<String, toml::ser::Error> = toml::to_string(self.members.as_mut().unwrap());
match encoded {
Ok(enc) => {
return Ok(enc)
},
Err (e) => {
return Err(
ConfigError::SerializationError(
String::from(format!("{}", e))
)
);
}
}
}
/// Serialize config
pub fn ser_config(&mut self) -> Result<String, ConfigError> {
let encoded: Result<String, toml::ser::Error> =
toml::to_string(self.members.as_mut().unwrap());
match encoded {
Ok(enc) => return Ok(enc),
Err(e) => {
return Err(ConfigError::SerializationError(
String::from(format!("{}", e)),
));
}
}
}
/*pub fn wallet_enabled(&mut self) -> bool {
/*pub fn wallet_enabled(&mut self) -> bool {
return self.members.as_mut().unwrap().wallet.as_mut().unwrap().enable_wallet;
}*/
/// Enable mining
pub fn mining_enabled(&mut self) -> bool {
return self.members.as_mut().unwrap().mining.as_mut().unwrap().enable_mining;
}
/// Enable mining
pub fn mining_enabled(&mut self) -> bool {
return self.members
.as_mut()
.unwrap()
.mining
.as_mut()
.unwrap()
.enable_mining;
}
}
#[test]
fn test_read_config() {
let toml_str = r#"
let toml_str = r#"
#Section is optional, if not here or enable_server is false, will only run wallet
[server]
enable_server = true
@ -202,9 +211,9 @@ fn test_read_config() {
"#;
let mut decoded: GlobalConfig = toml::from_str(toml_str).unwrap();
decoded.server.as_mut().unwrap().mining_config = decoded.mining;
println!("Decoded.server: {:?}", decoded.server);
println!("Decoded wallet: {:?}", decoded.wallet);
panic!("panic");
let mut decoded: GlobalConfig = toml::from_str(toml_str).unwrap();
decoded.server.as_mut().unwrap().mining_config = decoded.mining;
println!("Decoded.server: {:?}", decoded.server);
println!("Decoded wallet: {:?}", decoded.wallet);
panic!("panic");
}

View file

@ -34,4 +34,4 @@ extern crate grin_pow as pow;
pub mod config;
pub mod types;
pub use types::{GlobalConfig, ConfigMembers, ConfigError};
pub use types::{GlobalConfig, ConfigMembers, ConfigError};

View file

@ -24,45 +24,50 @@ use pow::types::MinerConfig;
/// Error type wrapping config errors.
#[derive(Debug)]
pub enum ConfigError {
/// Error with parsing of config file
ParseError (String, String),
/// Error with parsing of config file
ParseError(String, String),
/// Error with fileIO while reading config file
FileIOError (String, String),
/// Error with fileIO while reading config file
FileIOError(String, String),
/// No file found
FileNotFoundError (String),
/// No file found
FileNotFoundError(String),
/// Error serializing config values
SerializationError (String),
/// Error serializing config values
SerializationError(String),
}
impl fmt::Display for ConfigError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ConfigError::ParseError(ref file_name, ref message) => {
write!(f, "Error parsing configuration file at {} - {}",file_name, message)
}
ConfigError::FileIOError(ref file_name, ref message) => {
write!(f, "{} {}", message, file_name)
}
ConfigError::FileNotFoundError(ref file_name) => {
write!(f, "Configuration file not found: {}", file_name)
}
ConfigError::SerializationError(ref message) => {
write!(f, "Error serializing configuration: {}", message)
}
}
}
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ConfigError::ParseError(ref file_name, ref message) => {
write!(
f,
"Error parsing configuration file at {} - {}",
file_name,
message
)
}
ConfigError::FileIOError(ref file_name, ref message) => {
write!(f, "{} {}", message, file_name)
}
ConfigError::FileNotFoundError(ref file_name) => {
write!(f, "Configuration file not found: {}", file_name)
}
ConfigError::SerializationError(ref message) => {
write!(f, "Error serializing configuration: {}", message)
}
}
}
}
impl From<io::Error> for ConfigError {
fn from(error: io::Error) -> ConfigError {
ConfigError::FileIOError(
String::from(""),
String::from(format!("Error loading config file: {}",error)),
)
}
fn from(error: io::Error) -> ConfigError {
ConfigError::FileIOError(
String::from(""),
String::from(format!("Error loading config file: {}", error)),
)
}
}
/// Going to hold all of the various configuration types
@ -72,30 +77,28 @@ impl From<io::Error> for ConfigError {
/// as they tend to be quite nested in the code
/// Most structs optional, as they may or may not
/// be needed depending on what's being run
#[derive(Debug, Serialize, Deserialize)]
pub struct GlobalConfig {
///Keep track of the file we've read
pub config_file_path: Option<PathBuf>,
/// keep track of whether we're using
/// a config file or just the defaults
/// for each member
pub using_config_file: bool,
/// Global member config
pub members: Option<ConfigMembers>,
/// Keep track of the file we've read
pub config_file_path: Option<PathBuf>,
/// keep track of whether we're using
/// a config file or just the defaults
/// for each member
pub using_config_file: bool,
/// Global member config
pub members: Option<ConfigMembers>,
}
/// Keeping an 'inner' structure here, as the top
/// level GlobalConfigContainer options might want to keep
/// internal state that we don't necessarily
/// want serialised or deserialised
#[derive(Debug, Serialize, Deserialize)]
pub struct ConfigMembers {
/// Server config
pub server: ServerConfig,
/// Mining config
pub mining: Option<MinerConfig>,
/// Server config
pub server: ServerConfig,
/// Mining config
pub mining: Option<MinerConfig>,
//removing wallet from here for now,
//as its concerns are separate from the server's, really
//given it needs to manage keys. It should probably

View file

@ -56,7 +56,7 @@ from the build directory will run grin using the defaults in the grin.toml file,
For the time being, it's recommended just to put the built version of grin on your path, e.g. via:
```
export $PATH /path/to/grin/dir/target/grin:$PATH
export $PATH=/path/to/grin/dir/target/grin:$PATH
```
# Configuration

View file

@ -1,8 +1,6 @@
Grin's Proof-of-Work
====================
[WIP and subject to review, may still contain errors]
This document is meant to outline, at a level suitable for someone without prior knowledge,
the algorithms and processes currently involved in Grin's Proof-of-Work system. We'll start
with a general overview of cycles in a graph and the Cuckoo Cycle algorithm which forms the
@ -16,8 +14,8 @@ Please note that Grin is currently under active development, and any and all of
Grin's basic Proof-of-Work algorithm is called Cuckoo Cycle, which is specifically designed
to be resistant to Bitcoin style hardware arms-races. It is primarily a memory bound algorithm,
which, (at least in theory,) means that solution time is limited to the speed of a system's RAM
rather than processor or GPU speed. As such, mining Cuckoo Cycle solutions should be viable on
which, (at least in theory,) means that solution time is bound by memory bandwidth
rather than raw processor or GPU speed. As such, mining Cuckoo Cycle solutions should be viable on
most commodity hardware, and require far less energy than most other GPU, CPU or ASIC-bound
proof of work algorithms.
@ -26,11 +24,16 @@ can be found in [his github repository](https://github.com/tromp/cuckoo). The
[white paper](https://github.com/tromp/cuckoo/blob/master/doc/cuckoo.pdf) is the best source of
further technical details.
There is also a [podcast with Mike from Monero Monitor](https://moneromonitor.com/episodes/2017-09-26-Episode-014.html)
in which John Tromp talks at length about Cuckoo Cycle; recommended listening for anyone wanting
more background on Cuckoo Cycle, including more technical detail, the history of the algorihm's development
and some of the motivations behind it.
## Cycles in a Graph
Cuckoo Cycle is an algorithm meant to detect cycles in a bipartite graph of N nodes
and M edges. In plainer terms, a bipartite graph is one in which edges (i.e. lines connecting nodes)
go only between 2 separate groups of nodes. In the case of the Cuckoo hashtable in Cuckoo Cycle,
travel only between 2 separate groups of nodes. In the case of the Cuckoo hashtable in Cuckoo Cycle,
one side of the graph is an array numbered with odd indices (up to the size of the graph), and the other is numbered with even
indices. A node is simply a numbered 'space' on either side of the Cuckoo Table, and an Edge is a
line connecting two nodes on opposite sides. The simple graph below denotes just such a graph,

View file

@ -34,7 +34,7 @@ seeding_type = "None"
#UserTesting - For regular user testing, much lighter than production more
#Production - Full production cuckoo parameters
mining_parameter_mode = "UserTesting"
mining_parameter_mode = "Production"
#7 = Bit flags for FULL_NODE, this structure needs to be changed
#internally to make it more configurable
@ -74,27 +74,10 @@ cuckoo_miner_async_mode = false
#cuckoo_miner_plugin_dir = "target/debug/plugins"
#if using cuckoo_miner, the implementation to use.. currently
#just filters for this word in the filenames in the plugin
#directory
#Plugins currently included are:
#"simple" : the basic cuckoo algorithm
#"edgetrim" : an algorithm trading speed for a much lower memory footprint
#"matrix" : fastest available CPU miner, with largest memory footprint
#"tomato" : Time memory-tradeoff... low memory but very slow
#Not included but verified working:
#"cuda" a gpu miner - which currently needs to bebuilt and installed
#separately from#the cuckoo-miner repository. Instructions found there
cuckoo_miner_plugin_type = "simple"
#the list of parameters if you're using "edgetrim or matrix"
#cuckoo_miner_parameter_list = {NUM_THREADS=4, NUM_TRIMS=7}
#The amount of time, in seconds, to attempt to mine on a particular
#header before stopping and re-collecting transactions from the pool
attempt_time_per_block = 30
attempt_time_per_block = 90
#the wallet reciever to which coinbase rewards will be sent
@ -106,3 +89,40 @@ burn_reward = true
#testing value, optional
#slow_down_in_millis = 30
#########################################
### CUCKOO MINER PLUGIN CONFIGURATION ###
#########################################
# These entries configure instances of cuckoo miner
# plugins if the 'use_cuckoo_miner' value above is
# set to 'true'.
#
# Multiple plugins can be specified, (e.g. a cpu
# miner and a gpu miner running in parallel). However,
# if 'use_async_mode' above is set to 'false', only
# the first plugin specified will be used for mining
# in single-threaded mode
# You'll likely get the best performance using a
# single GPU and single CPU plugin in parallel
#The fastest cpu algorithm, but consumes the most memory
[[mining.cuckoo_miner_plugin_config]]
type_filter = "mean_cpu"
parameter_list = {NUM_THREADS=4, NUM_TRIMS=64}
#note lean_cpu currently has a bug which prevents it from
#working with threads > 1
#[[mining.cuckoo_miner_plugin_config]]
#type_filter = "lean_cpu"
#parameter_list = {NUM_THREADS=1, NUM_TRIMS=7}
#CUDA verion of lean miner
#Can currently be used only in Production (30) Mode
#[[mining.cuckoo_miner_plugin_config]]
#type_filter = "lean_cuda"
#parameter_list = {}

View file

@ -19,10 +19,10 @@ use rand::{self, Rng};
use std::sync::{Arc, RwLock};
use std::thread;
use std;
use std::{str};
use std::str;
use time;
use adapters::{PoolToChainAdapter};
use adapters::PoolToChainAdapter;
use api;
use core::consensus;
use core::core;
@ -34,9 +34,9 @@ use core::core::hash::{Hash, Hashed};
use pow::MiningWorker;
use pow::types::MinerConfig;
use core::ser;
use core::ser::{AsFixedBytes};
use core::ser::AsFixedBytes;
//use core::genesis;
// use core::genesis;
use chain;
use secp;
@ -62,7 +62,7 @@ pub struct HeaderPartWriter {
// Post nonce is currently variable length
// because of difficulty
pub post_nonce: Vec<u8>,
//which difficulty field we're on
// which difficulty field we're on
bytes_written: usize,
writing_pre: bool,
}
@ -79,7 +79,7 @@ impl Default for HeaderPartWriter {
}
impl HeaderPartWriter {
pub fn parts_as_hex_strings(&self)->(String, String) {
pub fn parts_as_hex_strings(&self) -> (String, String) {
(
String::from(format!("{:02x}", self.pre_nonce.iter().format(""))),
String::from(format!("{:02x}", self.post_nonce.iter().format(""))),
@ -94,17 +94,21 @@ impl ser::Writer for HeaderPartWriter {
fn write_fixed_bytes<T: AsFixedBytes>(&mut self, bytes_in: &T) -> Result<(), ser::Error> {
if self.writing_pre {
for i in 0..bytes_in.len() {self.pre_nonce.push(bytes_in.as_ref()[i])};
for i in 0..bytes_in.len() {
self.pre_nonce.push(bytes_in.as_ref()[i])
}
} else if self.bytes_written!=0 {
for i in 0..bytes_in.len() {self.post_nonce.push(bytes_in.as_ref()[i])};
} else if self.bytes_written != 0 {
for i in 0..bytes_in.len() {
self.post_nonce.push(bytes_in.as_ref()[i])
}
}
self.bytes_written+=bytes_in.len();
self.bytes_written += bytes_in.len();
if self.bytes_written==PRE_NONCE_SIZE && self.writing_pre {
self.writing_pre=false;
self.bytes_written=0;
if self.bytes_written == PRE_NONCE_SIZE && self.writing_pre {
self.writing_pre = false;
self.bytes_written = 0;
}
Ok(())
@ -116,18 +120,19 @@ pub struct Miner {
chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
//Just to hold the port we're on, so this miner can be identified
//while watching debug output
// Just to hold the port we're on, so this miner can be identified
// while watching debug output
debug_output_id: String,
}
impl Miner {
/// Creates a new Miner. Needs references to the chain state and its
/// storage.
pub fn new(config: MinerConfig,
chain_ref: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>)
-> Miner {
pub fn new(
config: MinerConfig,
chain_ref: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
) -> Miner {
Miner {
config: config,
chain: chain_ref,
@ -139,29 +144,33 @@ impl Miner {
/// Keeping this optional so setting in a separate funciton
/// instead of in the new function
pub fn set_debug_output_id(&mut self, debug_output_id: String){
self.debug_output_id=debug_output_id;
pub fn set_debug_output_id(&mut self, debug_output_id: String) {
self.debug_output_id = debug_output_id;
}
/// Inner part of the mining loop for cuckoo-miner asynch mode
pub fn inner_loop_async(&self,
plugin_miner:&mut PluginMiner,
difficulty:Difficulty,
b:&mut Block,
cuckoo_size: u32,
head:&BlockHeader,
latest_hash:&Hash,
attempt_time_per_block: u32)
-> Option<Proof> {
/// Inner part of the mining loop for cuckoo-miner async mode
pub fn inner_loop_async(
&self,
plugin_miner: &mut PluginMiner,
difficulty: Difficulty,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
latest_hash: &Hash,
attempt_time_per_block: u32,
) -> Option<Proof> {
debug!("(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.",
self.debug_output_id,
cuckoo_size,
attempt_time_per_block,
b.header.height,
b.header.difficulty);
debug!(
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.",
self.debug_output_id,
cuckoo_size,
attempt_time_per_block,
b.header.height,
b.header.difficulty
);
// look for a pow for at most 10 sec on the same block (to give a chance to new
// look for a pow for at most attempt_time_per_block sec on the
// same block (to give a chance to new
// transactions) and as long as the head hasn't changed
// Will change this to something else at some point
let deadline = time::get_time().sec + attempt_time_per_block as i64;
@ -170,7 +179,7 @@ impl Miner {
let stat_output_interval = 2;
let mut next_stat_output = time::get_time().sec + stat_output_interval;
//Get parts of the header
// Get parts of the header
let mut header_parts = HeaderPartWriter::default();
ser::Writeable::write(&b.header, &mut header_parts).unwrap();
let (pre, post) = header_parts.parts_as_hex_strings();
@ -182,41 +191,54 @@ impl Miner {
let (pre, post) = header_parts.parts_as_hex_strings();
println!("pre, post: {}, {}", pre, post);*/
//Start the miner working
// Start the miner working
let miner = plugin_miner.get_consumable();
let job_handle=miner.notify(1, &pre, &post, difficulty.into_num()).unwrap();
let job_handle = miner.notify(1, &pre, &post, difficulty.into_num()).unwrap();
let mut sol=None;
let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline {
if let Some(s) = job_handle.get_solution() {
if let Some(s) = job_handle.get_solution() {
sol = Some(Proof::new(s.solution_nonces.to_vec()));
b.header.nonce=s.get_nonce_as_u64();
println!("Nonce: {}", b.header.nonce);
b.header.nonce = s.get_nonce_as_u64();
// debug!("Nonce: {}", b.header.nonce);
break;
}
if time::get_time().sec > next_stat_output {
let stats = job_handle.get_stats();
if let Ok(stat_vec) = stats {
for s in stat_vec {
if s.last_start_time==0 {
continue;
let mut sps_total = 0.0;
for i in 0..plugin_miner.loaded_plugin_count() {
let stats = job_handle.get_stats(i);
if let Ok(stat_vec) = stats {
for s in stat_vec {
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
debug!(
"Mining: Plugin {} - Device {} ({}): Last Solution time: {}s; \
Solutions per second: {:.*} - Total Attempts: {}",
i,
s.device_id,
s.device_name,
last_solution_time_secs,
3,
last_hashes_per_sec,
s.iterations_completed
);
if last_hashes_per_sec.is_finite() {
sps_total += last_hashes_per_sec;
}
}
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
debug!("Mining on Device {} - {}: Last hash time: {} - Hashes per second: {:.*} - Total Attempts: {}",
s.device_id, s.device_name,
last_solution_time_secs, 3, last_hashes_per_sec,
s.iterations_completed);
}
}
next_stat_output = time::get_time().sec + stat_output_interval;
debug!("Total solutions per second: {}", sps_total);
next_stat_output = time::get_time().sec + stat_output_interval;
}
}
}
if sol==None {
debug!("(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
job_handle.get_hashes_since_last_call().unwrap())
if sol == None {
debug!(
"(Server ID: {}) No solution found after {} seconds, continuing...",
self.debug_output_id,
attempt_time_per_block
);
}
job_handle.stop_jobs();
@ -224,83 +246,183 @@ impl Miner {
}
/// The inner part of mining loop for synchronous mode
pub fn inner_loop_sync<T: MiningWorker>(&self,
miner:&mut T,
b:&mut Block,
cuckoo_size: u32,
head:&BlockHeader,
attempt_time_per_block: u32,
latest_hash:&mut Hash)
-> Option<Proof> {
/// The inner part of mining loop for cuckoo miner sync mode
pub fn inner_loop_sync_plugin(
&self,
plugin_miner: &mut PluginMiner,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
attempt_time_per_block: u32,
latest_hash: &mut Hash,
) -> Option<Proof> {
// look for a pow for at most 2 sec on the same block (to give a chance to new
// transactions) and as long as the head hasn't changed
let deadline = time::get_time().sec + attempt_time_per_block as i64;
let stat_check_interval = 3;
let mut next_stat_check = time::get_time().sec + stat_check_interval;
debug!("(Server ID: {}) Mining at Cuckoo{} for at most {} secs on block {} at difficulty {}.",
self.debug_output_id,
cuckoo_size,
attempt_time_per_block,
latest_hash,
b.header.difficulty);
debug!(
"(Server ID: {}) Mining at Cuckoo{} for {} secs (will wait for last solution) \
on block {} at difficulty {}.",
self.debug_output_id,
cuckoo_size,
attempt_time_per_block,
latest_hash,
b.header.difficulty
);
let mut iter_count = 0;
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
debug!("(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
self.debug_output_id,
self.config.slow_down_in_millis.unwrap());
debug!(
"(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
self.debug_output_id,
self.config.slow_down_in_millis.unwrap()
);
}
let mut sol=None;
let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline {
let pow_hash = b.hash();
if let Ok(proof) = miner.mine(&pow_hash[..]) {
let proof_diff=proof.clone().to_difficulty();
/*debug!("(Server ID: {}) Header difficulty is: {}, Proof difficulty is: {}",
self.debug_output_id,
b.header.difficulty,
proof_diff);*/
if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty();
if proof_diff >= b.header.difficulty {
sol = Some(proof);
break;
}
}
if time::get_time().sec >= next_stat_check {
let stats_vec = plugin_miner.get_stats(0).unwrap();
for s in stats_vec.into_iter() {
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
println!(
"Plugin 0 - Device {} ({}) - Last Solution time: {}; Solutions per second: {:.*}",
s.device_id,
s.device_name,
last_solution_time_secs,
3,
last_hashes_per_sec
);
}
next_stat_check = time::get_time().sec + stat_check_interval;
}
b.header.nonce += 1;
*latest_hash = self.chain.head().unwrap().last_block_h;
iter_count += 1;
//Artificial slow down
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
thread::sleep(std::time::Duration::from_millis(self.config.slow_down_in_millis.unwrap()));
// Artificial slow down
if self.config.slow_down_in_millis != None &&
self.config.slow_down_in_millis.unwrap() > 0
{
thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(),
));
}
}
if sol==None {
debug!("(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
iter_count)
if sol == None {
debug!(
"(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
iter_count
)
}
sol
}
/// The inner part of mining loop for the internal miner
pub fn inner_loop_sync_internal<T: MiningWorker>(
&self,
miner: &mut T,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
attempt_time_per_block: u32,
latest_hash: &mut Hash,
) -> Option<Proof> {
// look for a pow for at most 2 sec on the same block (to give a chance to new
// transactions) and as long as the head hasn't changed
let deadline = time::get_time().sec + attempt_time_per_block as i64;
debug!(
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs on block {} at difficulty {}.",
self.debug_output_id,
cuckoo_size,
attempt_time_per_block,
latest_hash,
b.header.difficulty
);
let mut iter_count = 0;
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
debug!(
"(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
self.debug_output_id,
self.config.slow_down_in_millis.unwrap()
);
}
let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline {
let pow_hash = b.hash();
if let Ok(proof) = miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty();
if proof_diff >= b.header.difficulty {
sol = Some(proof);
break;
}
}
b.header.nonce += 1;
*latest_hash = self.chain.head().unwrap().last_block_h;
iter_count += 1;
// Artificial slow down
if self.config.slow_down_in_millis != None &&
self.config.slow_down_in_millis.unwrap() > 0
{
thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(),
));
}
}
if sol == None {
debug!(
"(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
iter_count
)
}
sol
}
/// Starts the mining loop, building a new block on top of the existing
/// chain anytime required and looking for PoW solution.
pub fn run_loop(&self,
miner_config:MinerConfig,
cuckoo_size:u32,
proof_size:usize) {
pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) {
info!("(Server ID: {}) Starting miner loop.", self.debug_output_id);
let mut plugin_miner=None;
let mut miner=None;
if miner_config.use_cuckoo_miner {
plugin_miner = Some(PluginMiner::new(consensus::EASINESS, cuckoo_size, proof_size));
let mut plugin_miner = None;
let mut miner = None;
if miner_config.use_cuckoo_miner {
plugin_miner = Some(PluginMiner::new(
consensus::EASINESS,
cuckoo_size,
proof_size,
));
plugin_miner.as_mut().unwrap().init(miner_config.clone());
} else {
miner = Some(cuckoo::Miner::new(consensus::EASINESS, cuckoo_size, proof_size));
miner = Some(cuckoo::Miner::new(
consensus::EASINESS,
cuckoo_size,
proof_size,
));
}
let mut coinbase = self.get_coinbase();
@ -311,45 +433,54 @@ impl Miner {
let mut latest_hash = self.chain.head().unwrap().last_block_h;
let mut b = self.build_block(&head, coinbase.clone());
let mut sol=None;
let mut use_async=false;
if let Some(c)=self.config.cuckoo_miner_async_mode {
let mut sol = None;
let mut use_async = false;
if let Some(c) = self.config.cuckoo_miner_async_mode {
if c {
use_async=true;
use_async = true;
}
}
if let Some(mut p) = plugin_miner.as_mut() {
if use_async {
sol = self.inner_loop_async(&mut p,
b.header.difficulty.clone(),
&mut b,
cuckoo_size,
&head,
&latest_hash,
miner_config.attempt_time_per_block);
sol = self.inner_loop_async(
&mut p,
b.header.difficulty.clone(),
&mut b,
cuckoo_size,
&head,
&latest_hash,
miner_config.attempt_time_per_block,
);
} else {
sol = self.inner_loop_sync(p,
&mut b,
cuckoo_size,
&head,
miner_config.attempt_time_per_block,
&mut latest_hash);
sol = self.inner_loop_sync_plugin(
p,
&mut b,
cuckoo_size,
&head,
miner_config.attempt_time_per_block,
&mut latest_hash,
);
}
}
if let Some(mut m) = miner.as_mut() {
sol = self.inner_loop_sync(m,
&mut b,
cuckoo_size,
&head,
miner_config.attempt_time_per_block,
&mut latest_hash);
sol = self.inner_loop_sync_internal(
m,
&mut b,
cuckoo_size,
&head,
miner_config.attempt_time_per_block,
&mut latest_hash,
);
}
// if we found a solution, push our block out
if let Some(proof) = sol {
info!("(Server ID: {}) Found valid proof of work, adding block {}.",
self.debug_output_id, b.hash());
b.header.pow = proof;
info!(
"(Server ID: {}) Found valid proof of work, adding block {}.",
self.debug_output_id,
b.hash()
);
b.header.pow = proof;
let opts = if cuckoo_size < consensus::DEFAULT_SIZESHIFT as u32 {
chain::EASY_POW
} else {
@ -357,8 +488,11 @@ impl Miner {
};
let res = self.chain.process_block(b, opts);
if let Err(e) = res {
error!("(Server ID: {}) Error validating mined block: {:?}",
self.debug_output_id, e);
error!(
"(Server ID: {}) Error validating mined block: {:?}",
self.debug_output_id,
e
);
} else {
coinbase = self.get_coinbase();
}
@ -368,10 +502,11 @@ impl Miner {
/// Builds a new block with the chain head as previous and eligible
/// transactions from the pool.
fn build_block(&self,
head: &core::BlockHeader,
coinbase: (core::Output, core::TxKernel))
-> core::Block {
fn build_block(
&self,
head: &core::BlockHeader,
coinbase: (core::Output, core::TxKernel),
) -> core::Block {
let mut now_sec = time::get_time().sec;
let head_sec = head.timestamp.to_timespec().sec;
if now_sec == head_sec {
@ -381,15 +516,19 @@ impl Miner {
let diff_iter = self.chain.difficulty_iter();
let difficulty = consensus::next_difficulty(diff_iter).unwrap();
let txs_box = self.tx_pool.read().unwrap().prepare_mineable_transactions(MAX_TX);
let txs_box = self.tx_pool.read().unwrap().prepare_mineable_transactions(
MAX_TX,
);
let txs = txs_box.iter().map(|tx| tx.as_ref()).collect();
let (output, kernel) = coinbase;
let mut b = core::Block::with_reward(head, txs, output, kernel).unwrap();
debug!("(Server ID: {}) Built new block with {} inputs and {} outputs, difficulty: {}",
self.debug_output_id,
b.inputs.len(),
b.outputs.len(),
difficulty);
debug!(
"(Server ID: {}) Built new block with {} inputs and {} outputs, difficulty: {}",
self.debug_output_id,
b.inputs.len(),
b.outputs.len(),
difficulty
);
// making sure we're not spending time mining a useless block
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
@ -409,13 +548,18 @@ impl Miner {
let skey = secp::key::SecretKey::new(&secp_inst, &mut rng);
core::Block::reward_output(skey, &secp_inst).unwrap()
} else {
let url = format!("{}/v1/receive/coinbase",
self.config.wallet_receiver_url.as_str());
let request = WalletReceiveRequest::Coinbase(CbAmount{amount: consensus::REWARD});
let res: CbData = api::client::post(url.as_str(),
&request)
.expect(format!("(Server ID: {}) Wallet receiver unreachable, could not claim reward. Is it running?",
self.debug_output_id.as_str()).as_str());
let url = format!(
"{}/v1/receive/coinbase",
self.config.wallet_receiver_url.as_str()
);
let request = WalletReceiveRequest::Coinbase(CbAmount { amount: consensus::REWARD });
let res: CbData = api::client::post(url.as_str(), &request).expect(
format!(
"(Server ID: {}) Wallet receiver unreachable, could not claim reward. Is it running?",
self.debug_output_id
.as_str()
).as_str(),
);
let out_bin = util::from_hex(res.output).unwrap();
let kern_bin = util::from_hex(res.kernel).unwrap();
let output = ser::deserialize(&mut &out_bin[..]).unwrap();

View file

@ -44,12 +44,12 @@ use wallet::WalletConfig;
/// Just removes all results from previous runs
pub fn clean_all_output(test_name_dir:&str){
let target_dir = format!("target/test_servers/{}", test_name_dir);
let result = fs::remove_dir_all(target_dir);
if let Err(e) = result {
println!("{}",e);
}
pub fn clean_all_output(test_name_dir: &str) {
let target_dir = format!("target/test_servers/{}", test_name_dir);
let result = fs::remove_dir_all(target_dir);
if let Err(e) = result {
println!("{}", e);
}
}
/// Errors that can be returned by LocalServerContainer
@ -58,59 +58,55 @@ pub fn clean_all_output(test_name_dir:&str){
pub enum Error {
Internal(String),
Argument(String),
NotFound,
NotFound,
}
/// All-in-one server configuration struct, for convenience
///
#[derive(Clone)]
pub struct LocalServerContainerConfig {
// user friendly name for the server, also denotes what dir
// the data files will appear in
pub name: String,
//user friendly name for the server, also denotes what dir
//the data files will appear in
pub name: String,
// Base IP address
pub base_addr: String,
//Base IP address
pub base_addr: String,
// Port the server (p2p) is running on
pub p2p_server_port: u16,
//Port the server (p2p) is running on
pub p2p_server_port: u16,
// Port the API server is running on
pub api_server_port: u16,
//Port the API server is running on
pub api_server_port: u16,
// Port the wallet server is running on
pub wallet_port: u16,
//Port the wallet server is running on
pub wallet_port: u16,
// Whether we're going to mine
pub start_miner: bool,
//Whether we're going to mine
pub start_miner: bool,
// time in millis by which to artifically slow down the mining loop
// in this container
pub miner_slowdown_in_millis: u64,
//time in millis by which to artifically slow down the mining loop
//in this container
pub miner_slowdown_in_millis: u64,
// Whether we're going to run a wallet as well,
// can use same server instance as a validating node for convenience
pub start_wallet: bool,
//Whether we're going to run a wallet as well,
//can use same server instance as a validating node for convenience
pub start_wallet: bool,
// address of a server to use as a seed
pub seed_addr: String,
//address of a server to use as a seed
pub seed_addr: String,
// keep track of whether this server is supposed to be seeding
pub is_seeding: bool,
//keep track of whether this server is supposed to be seeding
pub is_seeding: bool,
//Whether to burn mining rewards
pub burn_mining_rewards: bool,
//full address to send coinbase rewards to
pub coinbase_wallet_address: String,
//When running a wallet, the address to check inputs and send
//finalised transactions to,
pub wallet_validating_node_url:String,
// Whether to burn mining rewards
pub burn_mining_rewards: bool,
// full address to send coinbase rewards to
pub coinbase_wallet_address: String,
// When running a wallet, the address to check inputs and send
// finalised transactions to,
pub wallet_validating_node_url: String,
}
/// Default server config
@ -119,17 +115,17 @@ impl Default for LocalServerContainerConfig {
LocalServerContainerConfig {
name: String::from("test_host"),
base_addr: String::from("127.0.0.1"),
p2p_server_port: 13414,
api_server_port: 13415,
wallet_port: 13416,
seed_addr: String::from(""),
is_seeding: false,
start_miner: false,
start_wallet: false,
burn_mining_rewards: false,
coinbase_wallet_address: String::from(""),
wallet_validating_node_url: String::from(""),
miner_slowdown_in_millis: 0,
p2p_server_port: 13414,
api_server_port: 13415,
wallet_port: 13416,
seed_addr: String::from(""),
is_seeding: false,
start_miner: false,
start_wallet: false,
burn_mining_rewards: false,
coinbase_wallet_address: String::from(""),
wallet_validating_node_url: String::from(""),
miner_slowdown_in_millis: 0,
}
}
}
@ -139,379 +135,392 @@ impl Default for LocalServerContainerConfig {
/// on a server, i.e. server, wallet in send or receive mode
pub struct LocalServerContainer {
// Configuration
config: LocalServerContainerConfig,
//Configuration
config: LocalServerContainerConfig,
// Structure of references to the
// internal server data
pub p2p_server_stats: Option<grin::ServerStats>,
//Structure of references to the
//internal server data
pub p2p_server_stats: Option<grin::ServerStats>,
// The API server instance
api_server: Option<api::ApiServer>,
//The API server instance
api_server: Option<api::ApiServer>,
// whether the server is running
pub server_is_running: bool,
//whether the server is running
pub server_is_running: bool,
// Whether the server is mining
pub server_is_mining: bool,
//Whether the server is mining
pub server_is_mining: bool,
// Whether the server is also running a wallet
// Not used if running wallet without server
pub wallet_is_running: bool,
//Whether the server is also running a wallet
//Not used if running wallet without server
pub wallet_is_running: bool,
//the list of peers to connect to
pub peer_list: Vec<String>,
//base directory for the server instance
working_dir: String,
// the list of peers to connect to
pub peer_list: Vec<String>,
// base directory for the server instance
working_dir: String,
}
impl LocalServerContainer {
/// Create a new local server container with defaults, with the given name
/// all related files will be created in the directory
/// target/test_servers/{name}
/// Create a new local server container with defaults, with the given name
/// all related files will be created in the directory target/test_servers/{name}
pub fn new(config: LocalServerContainerConfig) -> Result<LocalServerContainer, Error> {
let working_dir = format!("target/test_servers/{}", config.name);
Ok(
(LocalServerContainer {
config: config,
p2p_server_stats: None,
api_server: None,
server_is_running: false,
server_is_mining: false,
wallet_is_running: false,
working_dir: working_dir,
peer_list: Vec::new(),
}),
)
}
pub fn new(config:LocalServerContainerConfig) -> Result<LocalServerContainer, Error> {
let working_dir = format!("target/test_servers/{}", config.name);
Ok((LocalServerContainer {
config:config,
p2p_server_stats: None,
api_server: None,
server_is_running: false,
server_is_mining: false,
wallet_is_running: false,
working_dir: working_dir,
peer_list: Vec::new(),
}))
}
pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats {
let mut event_loop = reactor::Core::new().unwrap();
pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats
{
let mut event_loop = reactor::Core::new().unwrap();
let api_addr = format!("{}:{}", self.config.base_addr, self.config.api_server_port);
let api_addr = format!("{}:{}", self.config.base_addr, self.config.api_server_port);
let mut seeding_type = grin::Seeding::None;
let mut seeds = Vec::new();
let mut seeding_type=grin::Seeding::None;
let mut seeds=Vec::new();
if self.config.seed_addr.len() > 0 {
seeding_type = grin::Seeding::List;
seeds = vec![self.config.seed_addr.to_string()];
}
if self.config.seed_addr.len()>0{
seeding_type=grin::Seeding::List;
seeds=vec![self.config.seed_addr.to_string()];
}
let s = grin::Server::future(
grin::ServerConfig {
api_http_addr: api_addr,
db_root: format!("{}/.grin", self.working_dir),
p2p_config: Some(p2p::P2PConfig {
port: self.config.p2p_server_port,
..p2p::P2PConfig::default()
}),
seeds: Some(seeds),
seeding_type: seeding_type,
..Default::default()
},
&event_loop.handle(),
).unwrap();
let s = grin::Server::future(
grin::ServerConfig{
api_http_addr: api_addr,
db_root: format!("{}/.grin", self.working_dir),
p2p_config: Some(p2p::P2PConfig{port: self.config.p2p_server_port, ..p2p::P2PConfig::default()}),
seeds: Some(seeds),
seeding_type: seeding_type,
..Default::default()
}, &event_loop.handle()).unwrap();
self.p2p_server_stats = Some(s.get_server_stats().unwrap());
self.p2p_server_stats = Some(s.get_server_stats().unwrap());
if self.config.start_wallet == true {
self.run_wallet(duration_in_seconds + 5);
// give a second to start wallet before continuing
thread::sleep(time::Duration::from_millis(1000));
}
if self.config.start_wallet == true{
self.run_wallet(duration_in_seconds+5);
//give a second to start wallet before continuing
thread::sleep(time::Duration::from_millis(1000));
}
let mut plugin_config = pow::types::CuckooMinerPluginConfig::default();
let mut plugin_config_vec: Vec<pow::types::CuckooMinerPluginConfig> = Vec::new();
plugin_config.type_filter = String::from("mean_cpu");
plugin_config_vec.push(plugin_config);
let miner_config = pow::types::MinerConfig {
enable_mining: self.config.start_miner,
burn_reward: self.config.burn_mining_rewards,
use_cuckoo_miner: false,
cuckoo_miner_async_mode: Some(false),
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_type: Some(String::from("simple")),
wallet_receiver_url : self.config.coinbase_wallet_address.clone(),
slow_down_in_millis: Some(self.config.miner_slowdown_in_millis.clone()),
..Default::default()
};
let miner_config = pow::types::MinerConfig {
enable_mining: self.config.start_miner,
burn_reward: self.config.burn_mining_rewards,
use_cuckoo_miner: false,
cuckoo_miner_async_mode: Some(false),
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_config: Some(plugin_config_vec),
wallet_receiver_url: self.config.coinbase_wallet_address.clone(),
slow_down_in_millis: Some(self.config.miner_slowdown_in_millis.clone()),
..Default::default()
};
if self.config.start_miner == true {
println!("starting Miner on port {}", self.config.p2p_server_port);
s.start_miner(miner_config);
}
if self.config.start_miner == true {
println!("starting Miner on port {}", self.config.p2p_server_port);
s.start_miner(miner_config);
}
for p in &mut self.peer_list {
println!("{} connecting to peer: {}", self.config.p2p_server_port, p);
s.connect_peer(p.parse().unwrap()).unwrap();
}
for p in &mut self.peer_list {
println!("{} connecting to peer: {}", self.config.p2p_server_port, p);
s.connect_peer(p.parse().unwrap()).unwrap();
}
let timeout = Timer::default().sleep(time::Duration::from_secs(duration_in_seconds));
let timeout = Timer::default().sleep(time::Duration::from_secs(duration_in_seconds));
event_loop.run(timeout).unwrap();
event_loop.run(timeout).unwrap();
if self.wallet_is_running{
self.stop_wallet();
}
if self.wallet_is_running {
self.stop_wallet();
}
s.get_server_stats().unwrap()
s.get_server_stats().unwrap()
}
}
/// Starts a wallet daemon to receive and returns the
/// listening server url
/// Starts a wallet daemon to receive and returns the
/// listening server url
pub fn run_wallet(&mut self, _duration_in_seconds: u64) {
pub fn run_wallet(&mut self, _duration_in_seconds: u64) {
//URL on which to start the wallet listener (i.e. api server)
let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port);
// URL on which to start the wallet listener (i.e. api server)
let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port);
//Just use the name of the server for a seed for now
let seed = format!("{}", self.config.name);
// Just use the name of the server for a seed for now
let seed = format!("{}", self.config.name);
let seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes());
let seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes());
let s = Secp256k1::new();
let key = wallet::ExtendedKey::from_seed(&s, seed.as_bytes())
.expect("Error deriving extended key from seed.");
let s = Secp256k1::new();
let key = wallet::ExtendedKey::from_seed(&s, seed.as_bytes()).expect(
"Error deriving extended key from seed.",
);
println!("Starting the Grin wallet receiving daemon on {} ", self.config.wallet_port );
println!(
"Starting the Grin wallet receiving daemon on {} ",
self.config.wallet_port
);
let mut wallet_config = WalletConfig::default();
let mut wallet_config = WalletConfig::default();
wallet_config.api_http_addr = format!("http://{}", url);
wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone();
wallet_config.data_file_dir=self.working_dir.clone();
wallet_config.api_http_addr = format!("http://{}", url);
wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone();
wallet_config.data_file_dir = self.working_dir.clone();
let mut api_server = api::ApiServer::new("/v1".to_string());
let mut api_server = api::ApiServer::new("/v1".to_string());
api_server.register_endpoint("/receive".to_string(), wallet::WalletReceiver {
key: key,
config: wallet_config,
});
api_server.register_endpoint(
"/receive".to_string(),
wallet::WalletReceiver {
key: key,
config: wallet_config,
},
);
api_server.start(url).unwrap_or_else(|e| {
println!("Failed to start Grin wallet receiver: {}.", e);
println!("Failed to start Grin wallet receiver: {}.", e);
});
self.api_server = Some(api_server);
self.wallet_is_running = true;
self.api_server = Some(api_server);
self.wallet_is_running = true;
}
}
/// Stops the running wallet server
/// Stops the running wallet server
pub fn stop_wallet(&mut self){
let mut api_server = self.api_server.as_mut().unwrap();
api_server.stop();
}
pub fn stop_wallet(&mut self) {
let mut api_server = self.api_server.as_mut().unwrap();
api_server.stop();
}
/// Adds a peer to this server to connect to upon running
pub fn add_peer(&mut self, addr:String){
self.peer_list.push(addr);
}
/// Adds a peer to this server to connect to upon running
pub fn add_peer(&mut self, addr: String) {
self.peer_list.push(addr);
}
}
/// Configuration values for container pool
pub struct LocalServerContainerPoolConfig {
//Base name to append to all the servers in this pool
pub base_name: String,
// Base name to append to all the servers in this pool
pub base_name: String,
//Base http address for all of the servers in this pool
pub base_http_addr: String,
// Base http address for all of the servers in this pool
pub base_http_addr: String,
//Base port server for all of the servers in this pool
//Increment the number by 1 for each new server
pub base_p2p_port: u16,
// Base port server for all of the servers in this pool
// Increment the number by 1 for each new server
pub base_p2p_port: u16,
//Base api port for all of the servers in this pool
//Increment this number by 1 for each new server
pub base_api_port: u16,
//Base wallet port for this server
//
pub base_wallet_port: u16,
//How long the servers in the pool are going to run
pub run_length_in_seconds: u64,
// Base api port for all of the servers in this pool
// Increment this number by 1 for each new server
pub base_api_port: u16,
// Base wallet port for this server
//
pub base_wallet_port: u16,
// How long the servers in the pool are going to run
pub run_length_in_seconds: u64,
}
/// Default server config
///
impl Default for LocalServerContainerPoolConfig {
fn default() -> LocalServerContainerPoolConfig {
LocalServerContainerPoolConfig {
base_name: String::from("test_pool"),
base_http_addr: String::from("127.0.0.1"),
base_p2p_port: 10000,
base_api_port: 11000,
base_wallet_port: 12000,
run_length_in_seconds: 30,
}
}
fn default() -> LocalServerContainerPoolConfig {
LocalServerContainerPoolConfig {
base_name: String::from("test_pool"),
base_http_addr: String::from("127.0.0.1"),
base_p2p_port: 10000,
base_api_port: 11000,
base_wallet_port: 12000,
run_length_in_seconds: 30,
}
}
}
/// A convenience pool for running many servers simultaneously
/// without necessarily having to configure each one manually
pub struct LocalServerContainerPool {
//configuration
pub config: LocalServerContainerPoolConfig,
// configuration
pub config: LocalServerContainerPoolConfig,
//keep ahold of all the created servers thread-safely
server_containers: Vec<LocalServerContainer>,
// keep ahold of all the created servers thread-safely
server_containers: Vec<LocalServerContainer>,
//Keep track of what the last ports a server was opened on
next_p2p_port: u16,
// Keep track of what the last ports a server was opened on
next_p2p_port: u16,
next_api_port: u16,
next_api_port: u16,
next_wallet_port: u16,
//keep track of whether a seed exists, and pause a bit if so
is_seeding: bool,
next_wallet_port: u16,
// keep track of whether a seed exists, and pause a bit if so
is_seeding: bool,
}
impl LocalServerContainerPool {
pub fn new(config: LocalServerContainerPoolConfig) -> LocalServerContainerPool {
(LocalServerContainerPool {
next_api_port: config.base_api_port,
next_p2p_port: config.base_p2p_port,
next_wallet_port: config.base_wallet_port,
config: config,
server_containers: Vec::new(),
is_seeding: false,
})
}
pub fn new(config: LocalServerContainerPoolConfig)->LocalServerContainerPool{
(LocalServerContainerPool{
next_api_port: config.base_api_port,
next_p2p_port: config.base_p2p_port,
next_wallet_port: config.base_wallet_port,
config: config,
server_containers: Vec::new(),
is_seeding: false,
/// adds a single server on the next available port
/// overriding passed-in values as necessary. Config object is an OUT value
/// with
/// ports/addresses filled in
///
})
}
pub fn create_server(&mut self, server_config: &mut LocalServerContainerConfig) {
/// adds a single server on the next available port
/// overriding passed-in values as necessary. Config object is an OUT value with
/// ports/addresses filled in
///
// If we're calling it this way, need to override these
server_config.p2p_server_port = self.next_p2p_port;
server_config.api_server_port = self.next_api_port;
server_config.wallet_port = self.next_wallet_port;
pub fn create_server(&mut self, server_config:&mut LocalServerContainerConfig)
{
//If we're calling it this way, need to override these
server_config.p2p_server_port=self.next_p2p_port;
server_config.api_server_port=self.next_api_port;
server_config.wallet_port=self.next_wallet_port;
server_config.name=String::from(format!("{}/{}-{}",
self.config.base_name,
self.config.base_name,
server_config.p2p_server_port));
server_config.name = String::from(format!(
"{}/{}-{}",
self.config.base_name,
self.config.base_name,
server_config.p2p_server_port
));
//Use self as coinbase wallet
server_config.coinbase_wallet_address=String::from(format!("http://{}:{}",
server_config.base_addr,
server_config.wallet_port));
// Use self as coinbase wallet
server_config.coinbase_wallet_address = String::from(format!(
"http://{}:{}",
server_config.base_addr,
server_config.wallet_port
));
self.next_p2p_port+=1;
self.next_api_port+=1;
self.next_wallet_port+=1;
self.next_p2p_port += 1;
self.next_api_port += 1;
self.next_wallet_port += 1;
if server_config.is_seeding {
self.is_seeding=true;
}
if server_config.is_seeding {
self.is_seeding = true;
}
let _server_address = format!("{}:{}",
server_config.base_addr,
server_config.p2p_server_port);
let _server_address = format!(
"{}:{}",
server_config.base_addr,
server_config.p2p_server_port
);
let server_container = LocalServerContainer::new(server_config.clone()).unwrap();
//self.server_containers.push(server_arc);
let server_container = LocalServerContainer::new(server_config.clone()).unwrap();
// self.server_containers.push(server_arc);
//Create a future that runs the server for however many seconds
//collect them all and run them in the run_all_servers
let _run_time = self.config.run_length_in_seconds;
// Create a future that runs the server for however many seconds
// collect them all and run them in the run_all_servers
let _run_time = self.config.run_length_in_seconds;
self.server_containers.push(server_container);
self.server_containers.push(server_container);
}
}
/// adds n servers, ready to run
///
///
#[allow(dead_code)]
pub fn create_servers(&mut self, number: u16){
for _ in 0..number {
//self.create_server();
}
}
/// adds n servers, ready to run
///
///
#[allow(dead_code)]
pub fn create_servers(&mut self, number: u16) {
for _ in 0..number {
// self.create_server();
}
}
/// runs all servers, and returns a vector of references to the servers
/// once they've all been run
///
/// runs all servers, and returns a vector of references to the servers
/// once they've all been run
///
pub fn run_all_servers(self) -> Vec<grin::ServerStats>{
pub fn run_all_servers(self) -> Vec<grin::ServerStats> {
let run_length = self.config.run_length_in_seconds;
let mut handles = vec![];
let run_length = self.config.run_length_in_seconds;
let mut handles = vec![];
// return handles to all of the servers, wrapped in mutexes, handles, etc
let return_containers = Arc::new(Mutex::new(Vec::new()));
// return handles to all of the servers, wrapped in mutexes, handles, etc
let return_containers = Arc::new(Mutex::new(Vec::new()));
let is_seeding = self.is_seeding.clone();
let is_seeding = self.is_seeding.clone();
for mut s in self.server_containers {
let return_container_ref = return_containers.clone();
let handle=thread::spawn(move || {
if is_seeding && !s.config.is_seeding {
//there's a seed and we're not it, so hang around longer and give the seed
//a chance to start
thread::sleep(time::Duration::from_millis(2000));
}
let server_ref=s.run_server(run_length);
return_container_ref.lock().unwrap().push(server_ref);
});
//Not a big fan of sleeping hack here, but there appears to be a
//concurrency issue when creating files in rocksdb that causes
//failure if we don't pause a bit before starting the next server
thread::sleep(time::Duration::from_millis(500));
handles.push(handle);
for mut s in self.server_containers {
let return_container_ref = return_containers.clone();
let handle = thread::spawn(move || {
if is_seeding && !s.config.is_seeding {
// there's a seed and we're not it, so hang around longer and give the seed
// a chance to start
thread::sleep(time::Duration::from_millis(2000));
}
let server_ref = s.run_server(run_length);
return_container_ref.lock().unwrap().push(server_ref);
});
// Not a big fan of sleeping hack here, but there appears to be a
// concurrency issue when creating files in rocksdb that causes
// failure if we don't pause a bit before starting the next server
thread::sleep(time::Duration::from_millis(500));
handles.push(handle);
}
}
for handle in handles {
match handle.join() {
Ok(_) => {}
Err(e) => {
println!("Error starting server thread: {:?}", e);
panic!(e);
}
}
}
//return a much simplified version of the results
let return_vec=return_containers.lock().unwrap();
return_vec.clone()
}
for handle in handles {
match handle.join() {
Ok(_) => {}
Err(e) => {
println!("Error starting server thread: {:?}", e);
panic!(e);
}
}
}
// return a much simplified version of the results
let return_vec = return_containers.lock().unwrap();
return_vec.clone()
}
pub fn connect_all_peers(&mut self){
/// just pull out all currently active servers, build a list,
/// and feed into all servers
pub fn connect_all_peers(&mut self) {
/// just pull out all currently active servers, build a list,
/// and feed into all servers
let mut server_addresses:Vec<String> = Vec::new();
for s in &self.server_containers {
let server_address = format!("{}:{}",
s.config.base_addr,
s.config.p2p_server_port);
server_addresses.push(server_address);
}
let mut server_addresses: Vec<String> = Vec::new();
for s in &self.server_containers {
let server_address = format!("{}:{}", s.config.base_addr, s.config.p2p_server_port);
server_addresses.push(server_address);
}
for a in server_addresses {
for s in &mut self.server_containers {
if format!("{}:{}", s.config.base_addr,
s.config.p2p_server_port) != a {
s.add_peer(a.clone());
}
}
}
}
for a in server_addresses {
for s in &mut self.server_containers {
if format!("{}:{}", s.config.base_addr, s.config.p2p_server_port) != a {
s.add_peer(a.clone());
}
}
}
}
}

View file

@ -49,8 +49,8 @@ use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerCon
/// Block and mining into a wallet for a bit
#[test]
fn basic_genesis_mine() {
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "genesis_mine";
framework::clean_all_output(test_name_dir);
@ -80,8 +80,8 @@ fn basic_genesis_mine() {
/// messages they all end up connected.
#[test]
fn simulate_seeding() {
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "simulate_seeding";
framework::clean_all_output(test_name_dir);
@ -114,13 +114,13 @@ fn simulate_seeding() {
server_config.p2p_server_port
));
for _ in 0..4 {
pool.create_server(&mut server_config);
}
for _ in 0..4 {
pool.create_server(&mut server_config);
}
pool.connect_all_peers();
let _ = pool.run_all_servers();
let _ = pool.run_all_servers();
}
/// Create 1 server, start it mining, then connect 4 other peers mining and
@ -128,16 +128,14 @@ fn simulate_seeding() {
/// as a seed. Meant to test the evolution of mining difficulty with miners
/// running at
/// different rates
// Just going to comment this out as an automatically run test for the time
// being,
// As it's more for actively testing and hurts CI a lot
//#[test]
#[allow(dead_code)]
fn simulate_parallel_mining() {
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "simulate_parallel_mining";
// framework::clean_all_output(test_name_dir);
@ -178,7 +176,7 @@ fn simulate_parallel_mining() {
pool.connect_all_peers();
let _ = pool.run_all_servers();
let _ = pool.run_all_servers();
// Check mining difficulty here?, though I'd think it's more valuable
// to simply output it. Can at least see the evolution of the difficulty target
@ -193,7 +191,7 @@ fn simulate_parallel_mining() {
#[test]
fn a_simulate_block_propagation() {
env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "grin-prop";
framework::clean_all_output(test_name_dir);
@ -201,13 +199,18 @@ fn a_simulate_block_propagation() {
let mut evtlp = reactor::Core::new().unwrap();
let handle = evtlp.handle();
let mut plugin_config = pow::types::CuckooMinerPluginConfig::default();
let mut plugin_config_vec: Vec<pow::types::CuckooMinerPluginConfig> = Vec::new();
plugin_config.type_filter = String::from("mean_cpu");
plugin_config_vec.push(plugin_config);
let miner_config = pow::types::MinerConfig {
enable_mining: true,
burn_reward: true,
use_cuckoo_miner: false,
cuckoo_miner_async_mode: None,
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_type: Some(String::from("simple")),
cuckoo_miner_plugin_config: Some(plugin_config_vec),
..Default::default()
};
@ -260,7 +263,7 @@ fn a_simulate_block_propagation() {
#[test]
fn simulate_full_sync() {
env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "grin-sync";
framework::clean_all_output(test_name_dir);
@ -268,13 +271,18 @@ fn simulate_full_sync() {
let mut evtlp = reactor::Core::new().unwrap();
let handle = evtlp.handle();
let mut plugin_config = pow::types::CuckooMinerPluginConfig::default();
let mut plugin_config_vec: Vec<pow::types::CuckooMinerPluginConfig> = Vec::new();
plugin_config.type_filter = String::from("mean_cpu");
plugin_config_vec.push(plugin_config);
let miner_config = pow::types::MinerConfig {
enable_mining: true,
burn_reward: true,
use_cuckoo_miner: false,
cuckoo_miner_async_mode: Some(false),
cuckoo_miner_plugin_dir: Some(String::from("../target/debug/deps")),
cuckoo_miner_plugin_type: Some(String::from("simple")),
cuckoo_miner_plugin_config: Some(plugin_config_vec),
..Default::default()
};

View file

@ -14,10 +14,15 @@ lazy_static = "~0.2.8"
serde = "~1.0.8"
serde_derive = "~1.0.8"
cuckoo_miner = { git = "https://github.com/mimblewimble/cuckoo-miner", tag="grin_integration_8"}
grin_core = { path = "../core" }
[dependencies.cuckoo_miner]
git = "https://github.com/mimblewimble/cuckoo-miner"
tag="grin_integration_9"
#path = "../../cuckoo-miner"
#uncomment this feature to turn off plugin builds
#features=["no-plugin-build"]
[dev_dependencies]
grin_chain = { path = "../chain"}
secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" }

View file

@ -121,7 +121,7 @@ pub fn mine_genesis_block(miner_config:Option<types::MinerConfig>)->Option<core:
/// Runs a proof of work computation over the provided block using the provided Mining Worker,
/// until the required difficulty target is reached. May take a while for a low target...
pub fn pow_size<T: MiningWorker + ?Sized>(miner:&mut T, bh: &mut BlockHeader,
diff: Difficulty, _: u32) -> Result<(), Error> {
diff: Difficulty, _: u32) -> Result<(), Error> {
let start_nonce = bh.nonce;
// if we're in production mode, try the pre-mined solution first

View file

@ -27,36 +27,35 @@ use core::global;
use core::core::Proof;
use types::MinerConfig;
use std::sync::{Mutex};
use std::sync::Mutex;
use cuckoo_miner::{
CuckooMiner,
CuckooPluginManager,
CuckooMinerConfig,
CuckooMinerSolution};
use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMinerSolution,
CuckooMinerDeviceStats, CuckooMinerError};
//For now, we're just going to keep a static reference around to the loaded config
//And not allow querying the plugin directory twice once a plugin has been selected
//This is to keep compatibility with multi-threaded testing, so that spawned
//testing threads don't try to load/unload the library while another thread is
//using it.
// For now, we're just going to keep a static reference around to the loaded
// config
// And not allow querying the plugin directory twice once a plugin has been
// selected
// This is to keep compatibility with multi-threaded testing, so that spawned
// testing threads don't try to load/unload the library while another thread is
// using it.
lazy_static!{
static ref LOADED_CONFIG: Mutex<Option<CuckooMinerConfig>> = Mutex::new(None);
static ref LOADED_CONFIG: Mutex<Option<Vec<CuckooMinerConfig>>> = Mutex::new(None);
}
/// plugin miner
pub struct PluginMiner {
/// the miner
pub miner:Option<CuckooMiner>,
pub miner: Option<CuckooMiner>,
last_solution: CuckooMinerSolution,
config: CuckooMinerConfig,
config: Vec<CuckooMinerConfig>,
}
impl Default for PluginMiner {
fn default() -> PluginMiner {
PluginMiner {
miner: None,
config: CuckooMinerConfig::new(),
config: Vec::new(),
last_solution: CuckooMinerSolution::new(),
}
}
@ -64,86 +63,97 @@ impl Default for PluginMiner {
impl PluginMiner {
/// Init the plugin miner
pub fn init(&mut self, miner_config: MinerConfig){
//Get directory of executable
let mut exe_path=env::current_exe().unwrap();
pub fn init(&mut self, miner_config: MinerConfig) {
// Get directory of executable
let mut exe_path = env::current_exe().unwrap();
exe_path.pop();
let exe_path=exe_path.to_str().unwrap();
//println!("Plugin dir: {}", miner_config.clone().cuckoo_miner_plugin_dir.unwrap());
let plugin_install_path = match miner_config.cuckoo_miner_plugin_dir {
let exe_path = exe_path.to_str().unwrap();
let plugin_install_path = match miner_config.cuckoo_miner_plugin_dir.clone() {
Some(s) => s,
None => String::from(format!("{}/plugins", exe_path))
None => String::from(format!("{}/plugins", exe_path)),
};
let plugin_impl_filter = match miner_config.cuckoo_miner_plugin_type {
Some(s) => s,
None => String::from("simple")
};
let mut plugin_vec_filters = Vec::new();
if let None = miner_config.cuckoo_miner_plugin_config {
plugin_vec_filters.push(String::from("simple"));
} else {
for p in miner_config.clone().cuckoo_miner_plugin_config.unwrap() {
plugin_vec_filters.push(p.type_filter);
}
}
//First, load and query the plugins in the given directory
//These should all be stored in 'plugins' at the moment relative
//to the executable path, though they should appear somewhere else
//when packaging is more//thought out
// First, load and query the plugins in the given directory
// These should all be stored in 'plugins' at the moment relative
// to the executable path, though they should appear somewhere else
// when packaging is more//thought out
let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap();
//Load from here instead
// Load from here instead
if let Some(ref c) = *loaded_config_ref {
debug!("Not re-loading plugin or directory.");
//this will load the associated plugin
let result=CuckooMiner::new(c.clone());
self.miner=Some(result.unwrap());
// this will load the associated plugin
let result = CuckooMiner::new(c.clone());
self.miner = Some(result.unwrap());
self.config = c.clone();
return;
}
let mut plugin_manager = CuckooPluginManager::new().unwrap();
let result=plugin_manager.load_plugin_dir(plugin_install_path);
let mut plugin_manager = CuckooPluginManager::new().unwrap();
let result = plugin_manager.load_plugin_dir(plugin_install_path);
if let Err(_) = result {
error!("Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/plugins.");
error!(
"Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/plugins."
);
panic!("Unable to load plugin directory... Please check configuration values");
}
let sz = global::sizeshift();
//So this is built dynamically based on the plugin implementation
//type and the consensus sizeshift
let filter = format!("{}_{}", plugin_impl_filter, sz);
let mut cuckoo_configs = Vec::new();
let mut index=0;
for f in plugin_vec_filters {
// So this is built dynamically based on the plugin implementation
// type and the consensus sizeshift
let filter = format!("{}_{}", f, sz);
let caps = plugin_manager.get_available_plugins(&filter).unwrap();
//insert it into the miner configuration being created below
let caps = plugin_manager.get_available_plugins(&filter).unwrap();
// insert it into the miner configuration being created below
let mut config = CuckooMinerConfig::new();
let mut config = CuckooMinerConfig::new();
info!("Mining using plugin: {}", caps[0].full_path.clone());
config.plugin_full_path = caps[0].full_path.clone();
if let Some(l) = miner_config.cuckoo_miner_parameter_list {
config.parameter_list = l.clone();
info!("Mining plugin {} - {}", index, caps[0].full_path.clone());
config.plugin_full_path = caps[0].full_path.clone();
if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config {
if let Some(lp) = l[index].parameter_list.clone(){
config.parameter_list = lp.clone();
}
}
cuckoo_configs.push(config);
index+=1;
}
// Store this config now, because we just want one instance
// of the plugin lib per invocation now
*loaded_config_ref = Some(cuckoo_configs.clone());
//Store this config now, because we just want one instance
//of the plugin lib per invocation now
*loaded_config_ref=Some(config.clone());
//this will load the associated plugin
let result=CuckooMiner::new(config.clone());
// this will load the associated plugin
let result = CuckooMiner::new(cuckoo_configs.clone());
if let Err(e) = result {
error!("Error initializing mining plugin: {:?}", e);
error!("Accepted values are: {:?}", caps[0].parameters);
//error!("Accepted values are: {:?}", caps[0].parameters);
panic!("Unable to init mining plugin.");
}
self.config=config.clone();
self.miner=Some(result.unwrap());
self.config = cuckoo_configs.clone();
self.miner = Some(result.unwrap());
}
/// Get the miner
pub fn get_consumable(&mut self)->CuckooMiner{
pub fn get_consumable(&mut self) -> CuckooMiner {
//this will load the associated plugin
let result=CuckooMiner::new(self.config.clone());
// this will load the associated plugin
let result = CuckooMiner::new(self.config.clone());
if let Err(e) = result {
error!("Error initializing mining plugin: {:?}", e);
panic!("Unable to init mining plugin.");
@ -151,29 +161,39 @@ impl PluginMiner {
result.unwrap()
}
/// Returns the number of mining plugins that have been loaded
pub fn loaded_plugin_count(&self) -> usize {
self.config.len()
}
/// Get stats
pub fn get_stats(&self, index:usize) -> Result<Vec<CuckooMinerDeviceStats>, CuckooMinerError> {
self.miner.as_ref().unwrap().get_stats(index)
}
}
impl MiningWorker for PluginMiner {
/// This will initialise a plugin according to what's currently
/// included in CONSENSUS::TEST_SIZESHIFT, just using the edgetrim
/// version of the miner for now, though this should become
/// configurable somehow
fn new(_ease: u32,
_sizeshift: u32,
_proof_size: usize) -> Self {
fn new(_ease: u32, _sizeshift: u32, _proof_size: usize) -> Self {
PluginMiner::default()
}
/// And simply calls the mine function of the loaded plugin
/// returning whether a solution was found and the solution itself
fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error> {
let result = self.miner.as_mut().unwrap().mine(&header, &mut self.last_solution).unwrap();
fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error>{
let result = self.miner
.as_mut()
.unwrap()
.mine(&header, &mut self.last_solution, 0)
.unwrap();
if result == true {
return Ok(Proof::new(self.last_solution.solution_nonces.to_vec()));
}
Err(Error::NoSolution)
return Ok(Proof::new(self.last_solution.solution_nonces.to_vec()));
}
Err(Error::NoSolution)
}
}

View file

@ -16,6 +16,25 @@
use std::collections::HashMap;
/// CuckooMinerPlugin configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CuckooMinerPluginConfig {
///The type of plugin to load (i.e. filters on filename)
pub type_filter : String,
///Parameters for this plugin
pub parameter_list : Option<HashMap<String, u32>>,
}
impl Default for CuckooMinerPluginConfig {
fn default() -> CuckooMinerPluginConfig {
CuckooMinerPluginConfig {
type_filter : String::new(),
parameter_list : None,
}
}
}
/// Mining configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MinerConfig {
@ -28,19 +47,15 @@ pub struct MinerConfig {
/// Whether to use the async version of mining
pub cuckoo_miner_async_mode: Option<bool>,
/// The location in which cuckoo miner plugins are stored
/// plugin dir
pub cuckoo_miner_plugin_dir: Option<String>,
/// The type of plugin to use (ends up filtering the filename)
pub cuckoo_miner_plugin_type: Option<String>,
/// Cuckoo miner plugin configuration, one for each plugin
pub cuckoo_miner_plugin_config: Option<Vec<CuckooMinerPluginConfig>>,
/// Cuckoo-miner parameters... these vary according
/// to the plugin being loaded
pub cuckoo_miner_parameter_list: Option<HashMap<String, u32>>,
/// How long to wait before stopping the miner, recollecting transactions
/// and starting again
pub attempt_time_per_block: u32,
/// How long to wait before stopping the miner, recollecting transactions
/// and starting again
pub attempt_time_per_block: u32,
/// Base address to the HTTP wallet receiver
pub wallet_receiver_url: String,
@ -52,7 +67,6 @@ pub struct MinerConfig {
/// a testing attribute for the time being that artifically slows down the
/// mining loop by adding a sleep to the thread
pub slow_down_in_millis: Option<u64>,
}
impl Default for MinerConfig {
@ -62,8 +76,7 @@ impl Default for MinerConfig {
use_cuckoo_miner: false,
cuckoo_miner_async_mode: None,
cuckoo_miner_plugin_dir: None,
cuckoo_miner_plugin_type: None,
cuckoo_miner_parameter_list: None,
cuckoo_miner_plugin_config: None,
wallet_receiver_url: "http://localhost:13416".to_string(),
burn_reward: false,
slow_down_in_millis: Some(0),
@ -71,4 +84,3 @@ impl Default for MinerConfig {
}
}
}