2023-04-21 03:45:59 +03:00
|
|
|
// Copyright 2023 The Grim Developers
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2023-04-10 16:02:53 +03:00
|
|
|
|
2023-05-04 20:09:26 +03:00
|
|
|
use std::{fs, thread};
|
2023-05-17 00:35:42 +03:00
|
|
|
use std::path::PathBuf;
|
|
|
|
use std::sync::{Arc, RwLock, RwLockReadGuard};
|
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2023-05-04 20:09:26 +03:00
|
|
|
use std::time::Duration;
|
|
|
|
|
|
|
|
use futures::channel::oneshot;
|
|
|
|
use grin_chain::SyncStatus;
|
2023-04-10 16:02:53 +03:00
|
|
|
use grin_core::global;
|
|
|
|
use grin_core::global::ChainTypes;
|
2023-07-03 21:17:49 +03:00
|
|
|
use grin_servers::{Server, ServerStats, StratumServerConfig, StratumStats};
|
2023-06-29 03:42:56 +03:00
|
|
|
use grin_servers::common::types::Error;
|
2023-06-13 23:45:03 +03:00
|
|
|
use jni::sys::{jboolean, jstring};
|
2023-05-23 01:46:42 +03:00
|
|
|
use lazy_static::lazy_static;
|
2023-06-29 23:52:30 +03:00
|
|
|
use crate::node::NodeConfig;
|
2023-07-06 03:02:13 +03:00
|
|
|
use crate::node::stratum::{StratumStopState, StratumServer};
|
2023-04-10 16:02:53 +03:00
|
|
|
|
2023-05-23 01:46:42 +03:00
|
|
|
lazy_static! {
|
2023-06-02 21:19:34 +03:00
|
|
|
/// Static thread-aware state of [`Node`] to be updated from another thread.
|
2023-05-23 01:46:42 +03:00
|
|
|
static ref NODE_STATE: Arc<Node> = Arc::new(Node::default());
|
2023-05-04 20:09:26 +03:00
|
|
|
}
|
|
|
|
|
2023-06-02 21:19:34 +03:00
|
|
|
/// Provides [`Server`] control, holds current status and statistics.
|
2023-05-23 01:46:42 +03:00
|
|
|
pub struct Node {
|
2023-07-03 21:17:49 +03:00
|
|
|
/// The node [`Server`] statistics for UI.
|
2023-05-11 15:34:04 +03:00
|
|
|
stats: Arc<RwLock<Option<ServerStats>>>,
|
2023-07-03 21:17:49 +03:00
|
|
|
/// Stratum server statistics.
|
|
|
|
stratum_stats: Arc<grin_util::RwLock<StratumStats>>,
|
2023-07-06 03:02:13 +03:00
|
|
|
/// Stratum server statistics.
|
|
|
|
stratum_stop_state: Arc<StratumStopState>,
|
2023-06-29 23:52:30 +03:00
|
|
|
/// Running API server address.
|
|
|
|
api_addr: Arc<RwLock<Option<String>>>,
|
|
|
|
/// Running P2P server port.
|
|
|
|
p2p_port: Arc<RwLock<Option<u16>>>,
|
2023-06-02 02:05:34 +03:00
|
|
|
/// Indicator if server is starting.
|
2023-05-17 00:35:42 +03:00
|
|
|
starting: AtomicBool,
|
2023-06-02 02:05:34 +03:00
|
|
|
/// Thread flag to stop the server and start it again.
|
2023-05-11 15:34:04 +03:00
|
|
|
restart_needed: AtomicBool,
|
2023-06-02 02:05:34 +03:00
|
|
|
/// Thread flag to stop the server.
|
2023-05-11 15:34:04 +03:00
|
|
|
stop_needed: AtomicBool,
|
2023-06-13 23:45:03 +03:00
|
|
|
/// Flag to check if app exit is needed after server stop.
|
2023-06-19 01:29:15 +03:00
|
|
|
exit_after_stop: AtomicBool,
|
2023-07-03 21:17:49 +03:00
|
|
|
/// Thread flag to start stratum server.
|
|
|
|
start_stratum_needed: AtomicBool,
|
2023-06-29 03:42:56 +03:00
|
|
|
/// Error on [`Server`] start.
|
|
|
|
init_error: Option<Error>
|
2023-05-04 20:09:26 +03:00
|
|
|
}
|
|
|
|
|
2023-05-23 01:46:42 +03:00
|
|
|
impl Default for Node {
|
|
|
|
fn default() -> Self {
|
2023-05-04 20:09:26 +03:00
|
|
|
Self {
|
2023-05-11 15:34:04 +03:00
|
|
|
stats: Arc::new(RwLock::new(None)),
|
2023-07-03 21:17:49 +03:00
|
|
|
stratum_stats: Arc::new(grin_util::RwLock::new(StratumStats::default())),
|
2023-07-06 03:02:13 +03:00
|
|
|
stratum_stop_state: Arc::new(StratumStopState::default()),
|
2023-06-29 23:52:30 +03:00
|
|
|
api_addr: Arc::new(RwLock::new(None)),
|
|
|
|
p2p_port: Arc::new(RwLock::new(None)),
|
2023-05-23 01:46:42 +03:00
|
|
|
starting: AtomicBool::new(false),
|
2023-05-11 15:34:04 +03:00
|
|
|
restart_needed: AtomicBool::new(false),
|
|
|
|
stop_needed: AtomicBool::new(false),
|
2023-06-19 01:29:15 +03:00
|
|
|
exit_after_stop: AtomicBool::new(false),
|
2023-07-03 21:17:49 +03:00
|
|
|
start_stratum_needed: AtomicBool::new(false),
|
2023-06-29 03:42:56 +03:00
|
|
|
init_error: None
|
2023-05-23 01:46:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Node {
|
2023-07-11 03:02:44 +03:00
|
|
|
/// Delay for server thread to update the stats.
|
|
|
|
pub const STATS_UPDATE_DELAY: Duration = Duration::from_millis(250);
|
|
|
|
|
2023-06-13 23:45:03 +03:00
|
|
|
/// Stop the [`Server`] and setup exit flag after if needed.
|
|
|
|
pub fn stop(exit_after_stop: bool) {
|
2023-05-23 01:46:42 +03:00
|
|
|
NODE_STATE.stop_needed.store(true, Ordering::Relaxed);
|
2023-06-13 23:45:03 +03:00
|
|
|
NODE_STATE.exit_after_stop.store(exit_after_stop, Ordering::Relaxed);
|
2023-05-23 01:46:42 +03:00
|
|
|
}
|
|
|
|
|
2023-06-15 23:54:31 +03:00
|
|
|
/// Start the node.
|
|
|
|
pub fn start() {
|
2023-05-23 01:46:42 +03:00
|
|
|
if !Self::is_running() {
|
|
|
|
Self::start_server_thread();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-15 23:54:31 +03:00
|
|
|
/// Restart the node.
|
|
|
|
pub fn restart() {
|
2023-05-23 01:46:42 +03:00
|
|
|
if Self::is_running() {
|
|
|
|
NODE_STATE.restart_needed.store(true, Ordering::Relaxed);
|
|
|
|
} else {
|
2023-06-15 23:54:31 +03:00
|
|
|
Node::start();
|
2023-05-04 20:09:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-29 23:52:30 +03:00
|
|
|
/// Get API server address if node is running.
|
|
|
|
pub fn get_api_addr() -> Option<String> {
|
|
|
|
let r_api_addr = NODE_STATE.api_addr.read().unwrap();
|
|
|
|
if r_api_addr.is_some() {
|
|
|
|
Some(r_api_addr.as_ref().unwrap().clone())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get P2P server port if node is running.
|
|
|
|
pub fn get_p2p_port() -> Option<u16> {
|
|
|
|
let r_p2p_port = NODE_STATE.p2p_port.read().unwrap();
|
|
|
|
if r_p2p_port.is_some() {
|
|
|
|
Some(r_p2p_port.unwrap())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-03 21:17:49 +03:00
|
|
|
/// Request to start stratum server.
|
2023-07-06 03:02:13 +03:00
|
|
|
pub fn start_stratum() {
|
2023-07-03 21:17:49 +03:00
|
|
|
NODE_STATE.start_stratum_needed.store(true, Ordering::Relaxed);
|
2023-06-19 01:29:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Check if stratum server is starting.
|
2023-07-06 03:02:13 +03:00
|
|
|
pub fn is_stratum_starting() -> bool {
|
2023-07-03 21:17:49 +03:00
|
|
|
NODE_STATE.start_stratum_needed.load(Ordering::Relaxed)
|
2023-06-19 01:29:15 +03:00
|
|
|
}
|
|
|
|
|
2023-07-06 03:02:13 +03:00
|
|
|
/// Get stratum server statistics.
|
|
|
|
pub fn get_stratum_stats() -> grin_util::RwLockReadGuard<'static, StratumStats> {
|
|
|
|
NODE_STATE.stratum_stats.read()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Stop stratum server.
|
|
|
|
pub fn stop_stratum() {
|
|
|
|
NODE_STATE.stratum_stop_state.stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check if stratum server is stopping.
|
|
|
|
pub fn is_stratum_stopping() -> bool {
|
|
|
|
NODE_STATE.stratum_stop_state.is_stopped()
|
|
|
|
}
|
|
|
|
|
2023-06-15 23:54:31 +03:00
|
|
|
/// Check if node is starting.
|
2023-05-23 01:46:42 +03:00
|
|
|
pub fn is_starting() -> bool {
|
|
|
|
NODE_STATE.starting.load(Ordering::Relaxed)
|
2023-05-17 00:35:42 +03:00
|
|
|
}
|
|
|
|
|
2023-06-15 23:54:31 +03:00
|
|
|
/// Check if node is running.
|
2023-05-23 01:46:42 +03:00
|
|
|
pub fn is_running() -> bool {
|
2023-06-02 21:19:34 +03:00
|
|
|
Self::get_sync_status().is_some()
|
2023-05-11 20:21:48 +03:00
|
|
|
}
|
|
|
|
|
2023-06-15 23:54:31 +03:00
|
|
|
/// Check if node is stopping.
|
2023-05-23 01:46:42 +03:00
|
|
|
pub fn is_stopping() -> bool {
|
|
|
|
NODE_STATE.stop_needed.load(Ordering::Relaxed)
|
2023-05-04 20:09:26 +03:00
|
|
|
}
|
|
|
|
|
2023-06-15 23:54:31 +03:00
|
|
|
/// Check if node is restarting.
|
2023-05-23 01:46:42 +03:00
|
|
|
pub fn is_restarting() -> bool {
|
|
|
|
NODE_STATE.restart_needed.load(Ordering::Relaxed)
|
2023-05-11 15:34:04 +03:00
|
|
|
}
|
|
|
|
|
2023-06-15 23:54:31 +03:00
|
|
|
/// Get node [`Server`] statistics.
|
2023-05-23 01:46:42 +03:00
|
|
|
pub fn get_stats() -> RwLockReadGuard<'static, Option<ServerStats>> {
|
|
|
|
NODE_STATE.stats.read().unwrap()
|
2023-05-11 15:34:04 +03:00
|
|
|
}
|
|
|
|
|
2023-06-15 23:54:31 +03:00
|
|
|
/// Get synchronization status, empty when [`Server`] is not running.
|
2023-05-23 01:46:42 +03:00
|
|
|
pub fn get_sync_status() -> Option<SyncStatus> {
|
2023-06-02 02:05:34 +03:00
|
|
|
// Return Shutdown status when node is stopping.
|
2023-05-23 01:46:42 +03:00
|
|
|
if Self::is_stopping() {
|
2023-06-29 03:42:56 +03:00
|
|
|
return Some(SyncStatus::Shutdown);
|
2023-05-11 20:21:48 +03:00
|
|
|
}
|
|
|
|
|
2023-06-02 21:19:34 +03:00
|
|
|
// Return Initial status when node is starting or restarting.
|
|
|
|
if Self::is_starting() || Self::is_restarting() {
|
2023-06-29 03:42:56 +03:00
|
|
|
return Some(SyncStatus::Initial);
|
2023-05-17 00:35:42 +03:00
|
|
|
}
|
|
|
|
|
2023-05-23 01:46:42 +03:00
|
|
|
let stats = Self::get_stats();
|
2023-06-02 02:05:34 +03:00
|
|
|
// Return sync status when server is running (stats are not empty).
|
2023-05-11 20:21:48 +03:00
|
|
|
if stats.is_some() {
|
2023-06-29 03:42:56 +03:00
|
|
|
return Some(stats.as_ref().unwrap().sync_status);
|
2023-05-11 20:21:48 +03:00
|
|
|
}
|
|
|
|
None
|
2023-05-04 20:09:26 +03:00
|
|
|
}
|
2023-05-11 20:21:48 +03:00
|
|
|
|
2023-07-03 21:17:49 +03:00
|
|
|
/// Start the [`Server`] at separate thread to update state with stats and handle statuses.
|
2023-06-02 21:19:34 +03:00
|
|
|
fn start_server_thread() {
|
2023-05-23 01:46:42 +03:00
|
|
|
thread::spawn(move || {
|
|
|
|
NODE_STATE.starting.store(true, Ordering::Relaxed);
|
|
|
|
|
2023-06-02 21:19:34 +03:00
|
|
|
// Start the server.
|
2023-07-03 21:17:49 +03:00
|
|
|
match start_node_server() {
|
2023-06-29 03:42:56 +03:00
|
|
|
Ok(mut server) => {
|
|
|
|
let mut first_start = true;
|
|
|
|
loop {
|
|
|
|
if Self::is_restarting() {
|
|
|
|
// Stop the server.
|
|
|
|
server.stop();
|
|
|
|
|
2023-07-03 21:17:49 +03:00
|
|
|
// Reset stratum stats
|
|
|
|
{
|
|
|
|
let mut w_stratum_stats = NODE_STATE.stratum_stats.write();
|
|
|
|
*w_stratum_stats = StratumStats::default();
|
|
|
|
}
|
|
|
|
|
2023-06-29 03:42:56 +03:00
|
|
|
// Create new server.
|
2023-07-03 21:17:49 +03:00
|
|
|
match start_node_server() {
|
2023-06-29 03:42:56 +03:00
|
|
|
Ok(s) => {
|
|
|
|
server = s;
|
|
|
|
NODE_STATE.restart_needed.store(false, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
Err(e) => {
|
2023-06-29 23:52:30 +03:00
|
|
|
Self::on_start_error(&e);
|
|
|
|
break;
|
2023-06-29 03:42:56 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if Self::is_stopping() {
|
|
|
|
// Stop the server.
|
|
|
|
server.stop();
|
2023-07-03 21:17:49 +03:00
|
|
|
// Clean stats and statuses.
|
|
|
|
Self::on_thread_stop();
|
|
|
|
// Exit thread loop.
|
2023-06-29 03:42:56 +03:00
|
|
|
break;
|
2023-07-03 21:17:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start stratum mining server if requested.
|
2023-07-06 03:02:13 +03:00
|
|
|
let stratum_start_requested = Self::is_stratum_starting();
|
2023-07-03 21:17:49 +03:00
|
|
|
if stratum_start_requested {
|
|
|
|
let (s_ip, s_port) = NodeConfig::get_stratum_address();
|
|
|
|
if NodeConfig::is_stratum_port_available(&s_ip, &s_port) {
|
2023-06-29 03:42:56 +03:00
|
|
|
let stratum_config = server
|
|
|
|
.config
|
|
|
|
.stratum_mining_config
|
|
|
|
.clone()
|
|
|
|
.unwrap();
|
2023-07-03 21:17:49 +03:00
|
|
|
start_stratum_mining_server(&server, stratum_config);
|
2023-06-29 03:42:56 +03:00
|
|
|
}
|
2023-07-03 21:17:49 +03:00
|
|
|
}
|
2023-06-29 03:42:56 +03:00
|
|
|
|
2023-07-03 21:17:49 +03:00
|
|
|
// Update server stats.
|
|
|
|
if let Ok(stats) = server.get_server_stats() {
|
|
|
|
{
|
|
|
|
let mut w_stats = NODE_STATE.stats.write().unwrap();
|
|
|
|
*w_stats = Some(stats.clone());
|
|
|
|
}
|
2023-06-29 03:42:56 +03:00
|
|
|
|
2023-07-03 21:17:49 +03:00
|
|
|
if first_start {
|
|
|
|
NODE_STATE.starting.store(false, Ordering::Relaxed);
|
|
|
|
first_start = false;
|
2023-06-29 03:42:56 +03:00
|
|
|
}
|
2023-05-23 01:46:42 +03:00
|
|
|
}
|
2023-07-03 21:17:49 +03:00
|
|
|
|
|
|
|
if stratum_start_requested {
|
|
|
|
NODE_STATE.start_stratum_needed.store(false, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
|
2023-07-11 03:02:44 +03:00
|
|
|
thread::sleep(Self::STATS_UPDATE_DELAY);
|
2023-05-17 00:35:42 +03:00
|
|
|
}
|
2023-05-11 20:21:48 +03:00
|
|
|
}
|
2023-06-29 03:42:56 +03:00
|
|
|
Err(e) => {
|
2023-06-29 23:52:30 +03:00
|
|
|
Self::on_start_error(&e);
|
2023-06-29 03:42:56 +03:00
|
|
|
}
|
2023-05-23 01:46:42 +03:00
|
|
|
}
|
2023-06-02 21:19:34 +03:00
|
|
|
});
|
2023-05-23 01:46:42 +03:00
|
|
|
}
|
|
|
|
|
2023-07-03 21:17:49 +03:00
|
|
|
/// Reset stats and statuses on [`Server`] thread stop.
|
|
|
|
fn on_thread_stop() {
|
|
|
|
NODE_STATE.starting.store(false, Ordering::Relaxed);
|
|
|
|
NODE_STATE.restart_needed.store(false, Ordering::Relaxed);
|
|
|
|
NODE_STATE.start_stratum_needed.store(false, Ordering::Relaxed);
|
|
|
|
NODE_STATE.stop_needed.store(false, Ordering::Relaxed);
|
|
|
|
|
|
|
|
// Reset stratum stats.
|
|
|
|
{
|
|
|
|
let mut w_stratum_stats = NODE_STATE.stratum_stats.write();
|
|
|
|
*w_stratum_stats = StratumStats::default();
|
|
|
|
}
|
2023-07-01 20:25:50 +03:00
|
|
|
// Clean server stats.
|
|
|
|
{
|
|
|
|
let mut w_stats = NODE_STATE.stats.write().unwrap();
|
|
|
|
*w_stats = None;
|
|
|
|
}
|
2023-06-29 23:52:30 +03:00
|
|
|
// Clean launched API server address.
|
|
|
|
{
|
|
|
|
let mut w_api_addr = NODE_STATE.api_addr.write().unwrap();
|
|
|
|
*w_api_addr = None;
|
|
|
|
}
|
|
|
|
// Clean launched P2P server port.
|
|
|
|
{
|
|
|
|
let mut w_p2p_port = NODE_STATE.p2p_port.write().unwrap();
|
|
|
|
*w_p2p_port = None;
|
|
|
|
}
|
2023-07-03 21:17:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Handle node [`Server`] error on start.
|
|
|
|
fn on_start_error(e: &Error) {
|
|
|
|
Self::on_thread_stop();
|
|
|
|
|
2023-06-29 03:42:56 +03:00
|
|
|
//TODO: Create error
|
|
|
|
// NODE_STATE.init_error = Some(e);
|
|
|
|
|
|
|
|
// // Clean-up server data on data init error.
|
|
|
|
// // TODO: Ask user to clean-up data
|
|
|
|
// let clean_server_and_recreate = || -> Server {
|
|
|
|
// let mut db_path = PathBuf::from(&server_config.db_root);
|
|
|
|
// db_path.push("grin.lock");
|
|
|
|
// fs::remove_file(db_path).unwrap();
|
|
|
|
//
|
|
|
|
// // Remove chain data on server start error
|
|
|
|
// let dirs_to_remove: Vec<&str> = vec!["header", "lmdb", "txhashset"];
|
|
|
|
// for dir in dirs_to_remove {
|
|
|
|
// let mut path = PathBuf::from(&server_config.db_root);
|
|
|
|
// path.push(dir);
|
|
|
|
// fs::remove_dir_all(path).unwrap();
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// // Recreate server
|
|
|
|
// let api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>) =
|
|
|
|
// Box::leak(Box::new(oneshot::channel::<()>()));
|
|
|
|
// server_result = Server::new(server_config.clone(), None, api_chan);
|
|
|
|
// server_result.unwrap()
|
|
|
|
// };
|
|
|
|
|
|
|
|
// Show err on server init error.
|
|
|
|
// TODO: Ask user to clean-up data
|
|
|
|
let show_error = |err: String| {
|
|
|
|
println!("Node server creation error:\n{}", err);
|
2023-07-01 20:25:50 +03:00
|
|
|
//TODO don't panic maybe
|
|
|
|
panic!("{}", err);
|
2023-06-29 03:42:56 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
//TODO: Better error handling
|
|
|
|
match e {
|
2023-07-01 20:25:50 +03:00
|
|
|
Error::Store(e) => {
|
2023-06-29 03:42:56 +03:00
|
|
|
//TODO: Set err to ask user to clean data
|
2023-07-01 20:25:50 +03:00
|
|
|
panic!("{}", e);
|
2023-06-29 03:42:56 +03:00
|
|
|
//(clean_server_and_recreate)()
|
|
|
|
}
|
2023-07-01 20:25:50 +03:00
|
|
|
Error::Chain(e) => {
|
2023-06-29 03:42:56 +03:00
|
|
|
//TODO: Set err to ask user to clean data
|
2023-07-01 20:25:50 +03:00
|
|
|
panic!("{}", e);
|
2023-06-29 03:42:56 +03:00
|
|
|
//(clean_server_and_recreate)()
|
|
|
|
}
|
|
|
|
//TODO: Handle P2P error (Show config error msg)
|
|
|
|
Error::P2P(ref e) => {
|
|
|
|
(show_error)("P2P error".to_string());
|
|
|
|
}
|
|
|
|
//TODO: Handle API error (Show config error msg)
|
|
|
|
Error::API(ref e) => {
|
|
|
|
(show_error)(e.to_string());
|
|
|
|
}
|
|
|
|
//TODO: Seems like another node instance running?
|
|
|
|
Error::IOError(ref e) => {
|
|
|
|
(show_error)(e.to_string());
|
|
|
|
}
|
|
|
|
//TODO: Show config error msg
|
|
|
|
Error::Configuration(ref e) => {
|
|
|
|
(show_error)(e.to_string());
|
|
|
|
}
|
|
|
|
//TODO: Unknown error
|
|
|
|
_ => {
|
|
|
|
(show_error)("Unknown error".to_string());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-02 02:05:34 +03:00
|
|
|
/// Get synchronization status i18n text.
|
2023-06-02 21:19:34 +03:00
|
|
|
pub fn get_sync_status_text() -> String {
|
2023-06-02 02:05:34 +03:00
|
|
|
if Node::is_stopping() {
|
2023-06-29 03:42:56 +03:00
|
|
|
return t!("sync_status.shutdown");
|
2023-06-02 21:19:34 +03:00
|
|
|
};
|
|
|
|
|
2023-06-13 23:45:03 +03:00
|
|
|
if Node::is_starting() {
|
2023-06-29 03:42:56 +03:00
|
|
|
return t!("sync_status.initial");
|
2023-06-13 23:45:03 +03:00
|
|
|
};
|
|
|
|
|
2023-06-02 21:19:34 +03:00
|
|
|
if Node::is_restarting() {
|
2023-06-29 03:42:56 +03:00
|
|
|
return t!("sync_status.node_restarting");
|
2023-05-23 01:46:42 +03:00
|
|
|
}
|
|
|
|
|
2023-06-02 21:19:34 +03:00
|
|
|
let sync_status = Self::get_sync_status();
|
|
|
|
|
2023-05-23 01:46:42 +03:00
|
|
|
if sync_status.is_none() {
|
2023-06-29 03:42:56 +03:00
|
|
|
return t!("sync_status.node_down");
|
2023-05-23 01:46:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
match sync_status.unwrap() {
|
|
|
|
SyncStatus::Initial => t!("sync_status.initial"),
|
|
|
|
SyncStatus::NoSync => t!("sync_status.no_sync"),
|
|
|
|
SyncStatus::AwaitingPeers(_) => t!("sync_status.awaiting_peers"),
|
|
|
|
SyncStatus::HeaderSync {
|
|
|
|
sync_head,
|
|
|
|
highest_height,
|
|
|
|
..
|
|
|
|
} => {
|
|
|
|
if highest_height == 0 {
|
|
|
|
t!("sync_status.header_sync")
|
|
|
|
} else {
|
|
|
|
let percent = sync_head.height * 100 / highest_height;
|
|
|
|
t!("sync_status.header_sync_percent", "percent" => percent)
|
|
|
|
}
|
|
|
|
}
|
2023-06-03 21:33:07 +03:00
|
|
|
SyncStatus::TxHashsetPibd {
|
|
|
|
aborted: _,
|
|
|
|
errored: _,
|
|
|
|
completed_leaves,
|
|
|
|
leaves_required,
|
|
|
|
completed_to_height: _,
|
|
|
|
required_height: _,
|
|
|
|
} => {
|
|
|
|
if completed_leaves == 0 {
|
|
|
|
t!("sync_status.tx_hashset_pibd")
|
|
|
|
} else {
|
|
|
|
let percent = completed_leaves * 100 / leaves_required;
|
|
|
|
t!("sync_status.tx_hashset_pibd_percent", "percent" => percent)
|
|
|
|
}
|
|
|
|
}
|
2023-05-23 01:46:42 +03:00
|
|
|
SyncStatus::TxHashsetDownload(stat) => {
|
|
|
|
if stat.total_size > 0 {
|
|
|
|
let percent = stat.downloaded_size * 100 / stat.total_size;
|
|
|
|
t!("sync_status.tx_hashset_download_percent", "percent" => percent)
|
|
|
|
} else {
|
|
|
|
t!("sync_status.tx_hashset_download")
|
|
|
|
}
|
|
|
|
}
|
2023-06-03 21:33:07 +03:00
|
|
|
SyncStatus::TxHashsetSetup {
|
|
|
|
headers,
|
|
|
|
headers_total,
|
|
|
|
kernel_pos,
|
|
|
|
kernel_pos_total,
|
|
|
|
} => {
|
|
|
|
if headers.is_some() && headers_total.is_some() {
|
|
|
|
let h = headers.unwrap();
|
|
|
|
let ht = headers_total.unwrap();
|
|
|
|
let percent = h * 100 / ht;
|
|
|
|
t!("sync_status.tx_hashset_setup_history", "percent" => percent)
|
|
|
|
} else if kernel_pos.is_some() && kernel_pos_total.is_some() {
|
|
|
|
let k = kernel_pos.unwrap();
|
|
|
|
let kt = kernel_pos_total.unwrap();
|
|
|
|
let percent = k * 100 / kt;
|
|
|
|
t!("sync_status.tx_hashset_setup_position", "percent" => percent)
|
|
|
|
} else {
|
|
|
|
t!("sync_status.tx_hashset_setup")
|
|
|
|
}
|
2023-05-11 20:21:48 +03:00
|
|
|
}
|
2023-05-23 01:46:42 +03:00
|
|
|
SyncStatus::TxHashsetRangeProofsValidation {
|
|
|
|
rproofs,
|
|
|
|
rproofs_total,
|
|
|
|
} => {
|
|
|
|
let r_percent = if rproofs_total > 0 {
|
|
|
|
(rproofs * 100) / rproofs_total
|
|
|
|
} else {
|
|
|
|
0
|
|
|
|
};
|
|
|
|
t!("sync_status.tx_hashset_range_proofs_validation", "percent" => r_percent)
|
|
|
|
}
|
|
|
|
SyncStatus::TxHashsetKernelsValidation {
|
|
|
|
kernels,
|
|
|
|
kernels_total,
|
|
|
|
} => {
|
|
|
|
let k_percent = if kernels_total > 0 {
|
|
|
|
(kernels * 100) / kernels_total
|
|
|
|
} else {
|
|
|
|
0
|
|
|
|
};
|
|
|
|
t!("sync_status.tx_hashset_kernels_validation", "percent" => k_percent)
|
|
|
|
}
|
|
|
|
SyncStatus::TxHashsetSave | SyncStatus::TxHashsetDone => {
|
|
|
|
t!("sync_status.tx_hashset_save")
|
|
|
|
}
|
|
|
|
SyncStatus::BodySync {
|
|
|
|
current_height,
|
|
|
|
highest_height,
|
|
|
|
} => {
|
|
|
|
if highest_height == 0 {
|
|
|
|
t!("sync_status.body_sync")
|
|
|
|
} else {
|
|
|
|
let percent = current_height * 100 / highest_height;
|
|
|
|
t!("sync_status.body_sync_percent", "percent" => percent)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SyncStatus::Shutdown => t!("sync_status.shutdown"),
|
2023-05-11 20:21:48 +03:00
|
|
|
}
|
2023-05-23 01:46:42 +03:00
|
|
|
}
|
2023-05-04 20:09:26 +03:00
|
|
|
}
|
|
|
|
|
2023-07-03 21:17:49 +03:00
|
|
|
/// Start the node [`Server`].
|
|
|
|
fn start_node_server() -> Result<Server, Error> {
|
2023-07-11 03:02:44 +03:00
|
|
|
// Get saved server config.
|
|
|
|
let config = NodeConfig::node_server_config();
|
2023-06-19 01:29:15 +03:00
|
|
|
let server_config = config.server.clone();
|
2023-04-10 16:02:53 +03:00
|
|
|
|
2023-07-11 03:02:44 +03:00
|
|
|
// Remove temporary file dir.
|
2023-05-17 00:35:42 +03:00
|
|
|
{
|
|
|
|
let mut tmp_dir = PathBuf::from(&server_config.db_root);
|
|
|
|
tmp_dir = tmp_dir.parent().unwrap().to_path_buf();
|
|
|
|
tmp_dir.push("tmp");
|
|
|
|
if tmp_dir.exists() {
|
|
|
|
match fs::remove_dir_all(tmp_dir) {
|
|
|
|
Ok(_) => {}
|
|
|
|
Err(_) => { println!("Cannot remove tmp dir") }
|
|
|
|
}
|
|
|
|
}
|
2023-05-11 20:21:48 +03:00
|
|
|
}
|
2023-07-11 03:02:44 +03:00
|
|
|
|
2023-05-04 20:09:26 +03:00
|
|
|
// Initialize our global chain_type, feature flags (NRD kernel support currently),
|
|
|
|
// accept_fee_base, and future_time_limit.
|
2023-04-10 16:02:53 +03:00
|
|
|
// These are read via global and not read from config beyond this point.
|
2023-05-04 20:09:26 +03:00
|
|
|
if !global::GLOBAL_CHAIN_TYPE.is_init() {
|
2023-06-19 01:29:15 +03:00
|
|
|
global::init_global_chain_type(config.server.chain_type);
|
2023-07-01 17:22:28 +03:00
|
|
|
} else {
|
|
|
|
global::set_global_chain_type(config.server.chain_type);
|
2023-07-01 20:25:50 +03:00
|
|
|
global::set_local_chain_type(config.server.chain_type);
|
2023-05-04 20:09:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if !global::GLOBAL_NRD_FEATURE_ENABLED.is_init() {
|
|
|
|
match global::get_chain_type() {
|
|
|
|
ChainTypes::Mainnet => {
|
|
|
|
global::init_global_nrd_enabled(false);
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
global::init_global_nrd_enabled(true);
|
|
|
|
}
|
2023-04-10 16:02:53 +03:00
|
|
|
}
|
2023-07-01 20:25:50 +03:00
|
|
|
} else {
|
|
|
|
match global::get_chain_type() {
|
|
|
|
ChainTypes::Mainnet => {
|
|
|
|
global::set_global_nrd_enabled(false);
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
global::set_global_nrd_enabled(true);
|
|
|
|
}
|
|
|
|
}
|
2023-04-10 16:02:53 +03:00
|
|
|
}
|
2023-07-01 17:22:28 +03:00
|
|
|
|
|
|
|
let afb = config.server.pool_config.accept_fee_base;
|
2023-05-04 20:09:26 +03:00
|
|
|
if !global::GLOBAL_ACCEPT_FEE_BASE.is_init() {
|
|
|
|
global::init_global_accept_fee_base(afb);
|
2023-07-01 17:22:28 +03:00
|
|
|
} else {
|
|
|
|
global::set_global_accept_fee_base(afb);
|
2023-05-04 20:09:26 +03:00
|
|
|
}
|
2023-07-01 17:22:28 +03:00
|
|
|
|
|
|
|
let future_time_limit = config.server.future_time_limit;
|
2023-05-04 20:09:26 +03:00
|
|
|
if !global::GLOBAL_FUTURE_TIME_LIMIT.is_init() {
|
2023-06-15 23:54:31 +03:00
|
|
|
global::init_global_future_time_limit(future_time_limit);
|
2023-07-01 17:22:28 +03:00
|
|
|
} else {
|
|
|
|
global::set_global_future_time_limit(future_time_limit);
|
2023-05-04 20:09:26 +03:00
|
|
|
}
|
2023-04-10 16:02:53 +03:00
|
|
|
|
|
|
|
let api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>) =
|
|
|
|
Box::leak(Box::new(oneshot::channel::<()>()));
|
2023-06-29 23:52:30 +03:00
|
|
|
|
2023-07-11 03:02:44 +03:00
|
|
|
// Set launching API server address from config to state.
|
2023-06-29 23:52:30 +03:00
|
|
|
{
|
|
|
|
let mut w_api_addr = NODE_STATE.api_addr.write().unwrap();
|
|
|
|
*w_api_addr = Some(config.server.api_http_addr);
|
|
|
|
}
|
|
|
|
|
2023-07-11 03:02:44 +03:00
|
|
|
// Set launching P2P server port from config to state.
|
2023-06-29 23:52:30 +03:00
|
|
|
{
|
|
|
|
let mut w_p2p_port = NODE_STATE.p2p_port.write().unwrap();
|
|
|
|
*w_p2p_port = Some(config.server.p2p_config.port);
|
|
|
|
}
|
|
|
|
|
2023-07-03 21:17:49 +03:00
|
|
|
// Put flag to start stratum server if autorun is available.
|
|
|
|
if NodeConfig::is_stratum_autorun_enabled() {
|
|
|
|
NODE_STATE.start_stratum_needed.store(true, Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
|
2023-07-11 03:02:44 +03:00
|
|
|
let server_result = Server::new(server_config, None, api_chan);
|
2023-06-29 03:42:56 +03:00
|
|
|
server_result
|
2023-05-23 01:46:42 +03:00
|
|
|
}
|
|
|
|
|
2023-07-03 21:17:49 +03:00
|
|
|
/// Start stratum mining server on a separate thread.
|
|
|
|
pub fn start_stratum_mining_server(server: &Server, config: StratumServerConfig) {
|
|
|
|
let proof_size = global::proofsize();
|
|
|
|
let sync_state = server.sync_state.clone();
|
|
|
|
|
|
|
|
let mut stratum_server = StratumServer::new(
|
|
|
|
config,
|
|
|
|
server.chain.clone(),
|
|
|
|
server.tx_pool.clone(),
|
|
|
|
NODE_STATE.stratum_stats.clone(),
|
|
|
|
);
|
2023-07-06 03:02:13 +03:00
|
|
|
let stop_state = NODE_STATE.stratum_stop_state.clone();
|
|
|
|
stop_state.reset();
|
2023-07-03 21:17:49 +03:00
|
|
|
let _ = thread::Builder::new()
|
|
|
|
.name("stratum_server".to_string())
|
|
|
|
.spawn(move || {
|
|
|
|
stratum_server.run_loop(proof_size, sync_state, stop_state);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2023-05-23 01:46:42 +03:00
|
|
|
#[allow(dead_code)]
|
|
|
|
#[cfg(target_os = "android")]
|
|
|
|
#[allow(non_snake_case)]
|
|
|
|
#[no_mangle]
|
2023-06-02 21:19:34 +03:00
|
|
|
/// Get sync status text for Android notification from [`NODE_STATE`] in Java string format.
|
2023-05-23 01:46:42 +03:00
|
|
|
pub extern "C" fn Java_mw_gri_android_BackgroundService_getSyncStatusText(
|
|
|
|
_env: jni::JNIEnv,
|
|
|
|
_class: jni::objects::JObject,
|
|
|
|
_activity: jni::objects::JObject,
|
|
|
|
) -> jstring {
|
2023-06-02 21:19:34 +03:00
|
|
|
let status_text = Node::get_sync_status_text();
|
2023-05-23 01:46:42 +03:00
|
|
|
let j_text = _env.new_string(status_text);
|
|
|
|
return j_text.unwrap().into_raw();
|
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(dead_code)]
|
|
|
|
#[cfg(target_os = "android")]
|
|
|
|
#[allow(non_snake_case)]
|
|
|
|
#[no_mangle]
|
2023-06-02 02:05:34 +03:00
|
|
|
/// Get sync title for Android notification in Java string format.
|
2023-05-23 01:46:42 +03:00
|
|
|
pub extern "C" fn Java_mw_gri_android_BackgroundService_getSyncTitle(
|
|
|
|
_env: jni::JNIEnv,
|
|
|
|
_class: jni::objects::JObject,
|
|
|
|
_activity: jni::objects::JObject,
|
|
|
|
) -> jstring {
|
2023-06-02 02:05:34 +03:00
|
|
|
let j_text = _env.new_string(t!("network.node"));
|
2023-05-23 01:46:42 +03:00
|
|
|
return j_text.unwrap().into_raw();
|
2023-06-02 02:05:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(dead_code)]
|
|
|
|
#[cfg(target_os = "android")]
|
|
|
|
#[allow(non_snake_case)]
|
|
|
|
#[no_mangle]
|
2023-06-19 01:29:15 +03:00
|
|
|
/// Check if app exit is needed after node stop to finish Android app at background.
|
2023-06-13 23:45:03 +03:00
|
|
|
pub extern "C" fn Java_mw_gri_android_BackgroundService_exitAppAfterNodeStop(
|
2023-06-02 02:05:34 +03:00
|
|
|
_env: jni::JNIEnv,
|
|
|
|
_class: jni::objects::JObject,
|
|
|
|
_activity: jni::objects::JObject,
|
2023-06-13 23:45:03 +03:00
|
|
|
) -> jboolean {
|
2023-06-19 01:29:15 +03:00
|
|
|
let exit_needed = !Node::is_running() && NODE_STATE.exit_after_stop.load(Ordering::Relaxed);
|
|
|
|
return exit_needed as jboolean;
|
2023-04-10 16:02:53 +03:00
|
|
|
}
|