fix: split state validation status into kernel and rproof updates. (#3096)

* fix: split state validation status into kernel and rproof updates. And fix sync status for these two states

* fix: show correct number of leaves for pruned MMR as well as unpruned

* docs: better docs for kernel/range proof validation

* fix: ordering of kernel and rproofs validation in TUI

* fix: typo in rangeproofs api and comments
This commit is contained in:
Joseph Goulden 2019-11-17 21:12:10 +00:00 committed by hashmap
parent 739a190352
commit 6d864a813c
10 changed files with 195 additions and 199 deletions

View file

@ -113,16 +113,19 @@ fn sync_status_to_api(sync_status: SyncStatus) -> (String, Option<serde_json::Va
"txhashset_download".to_string(),
Some(json!({ "downloaded_size": downloaded_size, "total_size": total_size })),
),
SyncStatus::TxHashsetValidation {
kernels,
kernel_total,
SyncStatus::TxHashsetRangeProofsValidation {
rproofs,
rproof_total,
rproofs_total,
} => (
"txhashset_validation".to_string(),
Some(
json!({ "kernels": kernels, "kernel_total": kernel_total ,"rproofs": rproofs, "rproof_total": rproof_total }),
),
"txhashset_rangeproofs_validation".to_string(),
Some(json!({ "rproofs": rproofs, "rproofs_total": rproofs_total })),
),
SyncStatus::TxHashsetKernelsValidation {
kernels,
kernels_total,
} => (
"txhashset_kernels_validation".to_string(),
Some(json!({ "kernels": kernels, "kernels_total": kernels_total })),
),
SyncStatus::BodySync {
current_height,

View file

@ -1280,7 +1280,7 @@ impl<'a> Extension<'a> {
TxKernel::batch_sig_verify(&tx_kernels)?;
kern_count += tx_kernels.len() as u64;
tx_kernels.clear();
status.on_validation(kern_count, total_kernels, 0, 0);
status.on_validation_kernels(kern_count, total_kernels);
debug!(
"txhashset: verify_kernel_signatures: verified {} signatures",
kern_count,
@ -1305,7 +1305,8 @@ impl<'a> Extension<'a> {
let mut proofs: Vec<RangeProof> = Vec::with_capacity(1_000);
let mut proof_count = 0;
let total_rproofs = pmmr::n_leaves(self.output_pmmr.unpruned_size());
let total_rproofs = self.output_pmmr.n_unpruned_leaves();
for pos in self.output_pmmr.leaf_pos_iter() {
let output = self.output_pmmr.get_data(pos);
let proof = self.rproof_pmmr.get_data(pos);
@ -1331,10 +1332,9 @@ impl<'a> Extension<'a> {
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
}
if proof_count % 1_000 == 0 {
status.on_validation(0, 0, proof_count, total_rproofs);
if proof_count % 1_000 == 0 {
status.on_validation_rproofs(proof_count, total_rproofs);
}
}
}

View file

@ -65,12 +65,15 @@ pub enum SyncStatus {
},
/// Setting up before validation
TxHashsetSetup,
/// Validating the full state
TxHashsetValidation {
/// Validating the kernels
TxHashsetKernelsValidation {
kernels: u64,
kernel_total: u64,
kernels_total: u64,
},
/// Validating the range proofs
TxHashsetRangeProofsValidation {
rproofs: u64,
rproof_total: u64,
rproofs_total: u64,
},
/// Finalizing the new state
TxHashsetSave,
@ -155,43 +158,18 @@ impl TxHashsetWriteStatus for SyncState {
self.update(SyncStatus::TxHashsetSetup);
}
fn on_validation(&self, vkernels: u64, vkernel_total: u64, vrproofs: u64, vrproof_total: u64) {
let mut status = self.current.write();
match *status {
SyncStatus::TxHashsetValidation {
kernels,
kernel_total,
rproofs,
rproof_total,
} => {
let ks = if vkernels > 0 { vkernels } else { kernels };
let kt = if vkernel_total > 0 {
vkernel_total
} else {
kernel_total
};
let rps = if vrproofs > 0 { vrproofs } else { rproofs };
let rpt = if vrproof_total > 0 {
vrproof_total
} else {
rproof_total
};
*status = SyncStatus::TxHashsetValidation {
kernels: ks,
kernel_total: kt,
rproofs: rps,
rproof_total: rpt,
};
}
_ => {
*status = SyncStatus::TxHashsetValidation {
kernels: 0,
kernel_total: 0,
rproofs: 0,
rproof_total: 0,
}
}
}
fn on_validation_kernels(&self, kernels: u64, kernels_total: u64) {
self.update(SyncStatus::TxHashsetKernelsValidation {
kernels,
kernels_total,
});
}
fn on_validation_rproofs(&self, rproofs: u64, rproofs_total: u64) {
self.update(SyncStatus::TxHashsetRangeProofsValidation {
rproofs,
rproofs_total,
});
}
fn on_save(&self) {
@ -315,8 +293,10 @@ pub trait ChainAdapter {
pub trait TxHashsetWriteStatus {
/// First setup of the txhashset
fn on_setup(&self);
/// Starting validation
fn on_validation(&self, kernels: u64, kernel_total: u64, rproofs: u64, rproof_total: u64);
/// Starting kernel validation
fn on_validation_kernels(&self, kernels: u64, kernel_total: u64);
/// Starting rproof validation
fn on_validation_rproofs(&self, rproofs: u64, rproof_total: u64);
/// Starting to save the txhashset and related data
fn on_save(&self);
/// Done writing a new txhashset
@ -328,7 +308,8 @@ pub struct NoStatus;
impl TxHashsetWriteStatus for NoStatus {
fn on_setup(&self) {}
fn on_validation(&self, _ks: u64, _kts: u64, _rs: u64, _rt: u64) {}
fn on_validation_kernels(&self, _ks: u64, _kts: u64) {}
fn on_validation_rproofs(&self, _rs: u64, _rt: u64) {}
fn on_save(&self) {}
fn on_done(&self) {}
}

View file

@ -55,6 +55,9 @@ pub trait Backend<T: PMMRable> {
/// Iterator over current (unpruned, unremoved) leaf positions.
fn leaf_pos_iter(&self) -> Box<dyn Iterator<Item = u64> + '_>;
/// Number of leaves
fn n_unpruned_leaves(&self) -> u64;
/// Remove Hash by insertion position. An index is also provided so the
/// underlying backend can implement some rollback of positions up to a
/// given index (practically the index is the height of a block that

View file

@ -79,6 +79,11 @@ where
self.backend.leaf_pos_iter()
}
/// Number of leafs in the MMR
pub fn n_unpruned_leaves(&self) -> u64 {
self.backend.n_unpruned_leaves()
}
/// Returns a vec of the peaks of this MMR.
pub fn peaks(&self) -> Vec<Hash> {
let peaks_pos = peaks(self.last_pos);
@ -490,7 +495,6 @@ pub fn peak_map_height(mut pos: u64) -> (u64, u64) {
/// The height of a node in a full binary tree from its postorder traversal
/// index. This function is the base on which all others, as well as the MMR,
/// are built.
pub fn bintree_postorder_height(num: u64) -> u64 {
if num == 0 {
return 0;

View file

@ -113,6 +113,10 @@ impl<T: PMMRable> Backend<T> for VecBackend<T> {
unimplemented!()
}
fn n_unpruned_leaves(&self) -> u64 {
unimplemented!()
}
fn remove(&mut self, position: u64) -> Result<(), String> {
self.remove_list.push(position);
Ok(())

View file

@ -192,7 +192,8 @@ impl SyncRunner {
match self.sync_state.status() {
SyncStatus::TxHashsetDownload { .. }
| SyncStatus::TxHashsetSetup
| SyncStatus::TxHashsetValidation { .. }
| SyncStatus::TxHashsetRangeProofsValidation { .. }
| SyncStatus::TxHashsetKernelsValidation { .. }
| SyncStatus::TxHashsetSave
| SyncStatus::TxHashsetDone => check_state_sync = true,
_ => {

View file

@ -31,6 +31,109 @@ const NANO_TO_MILLIS: f64 = 1.0 / 1_000_000.0;
pub struct TUIStatusView;
impl TUIStatusView {
fn update_sync_status(sync_status: SyncStatus) -> String {
match sync_status {
SyncStatus::Initial => "Initializing".to_string(),
SyncStatus::NoSync => "Running".to_string(),
SyncStatus::AwaitingPeers(_) => "Waiting for peers".to_string(),
SyncStatus::HeaderSync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Sync step 1/7: Downloading headers: {}%", percent)
}
SyncStatus::TxHashsetDownload {
start_time,
prev_update_time,
update_time: _,
prev_downloaded_size,
downloaded_size,
total_size,
} => {
if total_size > 0 {
let percent = if total_size > 0 {
downloaded_size * 100 / total_size
} else {
0
};
let start = prev_update_time.timestamp_nanos();
let fin = Utc::now().timestamp_nanos();
let dur_ms = (fin - start) as f64 * NANO_TO_MILLIS;
format!("Sync step 2/7: Downloading {}(MB) chain state for state sync: {}% at {:.1?}(kB/s)",
total_size / 1_000_000,
percent,
if dur_ms > 1.0f64 { (downloaded_size - prev_downloaded_size) as f64 / dur_ms as f64 } else { 0f64 },
)
} else {
let start = start_time.timestamp_millis();
let fin = Utc::now().timestamp_millis();
let dur_secs = (fin - start) / 1000;
format!("Sync step 2/7: Downloading chain state for state sync. Waiting remote peer to start: {}s",
dur_secs,
)
}
}
SyncStatus::TxHashsetSetup => {
"Sync step 3/7: Preparing chain state for validation".to_string()
}
SyncStatus::TxHashsetRangeProofsValidation {
rproofs,
rproofs_total,
} => {
let r_percent = if rproofs_total > 0 {
(rproofs * 100) / rproofs_total
} else {
0
};
format!(
"Sync step 4/7: Validating chain state - range proofs: {}%",
r_percent
)
}
SyncStatus::TxHashsetKernelsValidation {
kernels,
kernels_total,
} => {
let k_percent = if kernels_total > 0 {
(kernels * 100) / kernels_total
} else {
0
};
format!(
"Sync step 5/7: Validating chain state - kernels: {}%",
k_percent
)
}
SyncStatus::TxHashsetSave => {
"Sync step 6/7: Finalizing chain state for state sync".to_string()
}
SyncStatus::TxHashsetDone => {
"Sync step 6/7: Finalized chain state for state sync".to_string()
}
SyncStatus::BodySync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Sync step 7/7: Downloading blocks: {}%", percent)
}
SyncStatus::Shutdown => "Shutting down, closing connections".to_string(),
}
}
}
impl TUIStatusListener for TUIStatusView {
/// Create basic status view
fn create() -> Box<dyn View> {
@ -143,137 +246,9 @@ impl TUIStatusListener for TUIStatusView {
Box::new(basic_status_view.with_id(VIEW_BASIC_STATUS))
}
/// update
fn update(c: &mut Cursive, stats: &ServerStats) {
//find and update here as needed
let basic_status = {
match stats.sync_status {
SyncStatus::Initial => "Initializing".to_string(),
SyncStatus::NoSync => "Running".to_string(),
SyncStatus::AwaitingPeers(_) => "Waiting for peers".to_string(),
SyncStatus::HeaderSync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Downloading headers: {}%, step 1/4", percent)
}
SyncStatus::TxHashsetDownload {
start_time,
prev_update_time,
update_time: _,
prev_downloaded_size,
downloaded_size,
total_size,
} => {
if total_size > 0 {
let percent = if total_size > 0 {
downloaded_size * 100 / total_size
} else {
0
};
let start = prev_update_time.timestamp_nanos();
let fin = Utc::now().timestamp_nanos();
let dur_ms = (fin - start) as f64 * NANO_TO_MILLIS;
let basic_status = TUIStatusView::update_sync_status(stats.sync_status);
format!("Downloading {}(MB) chain state for state sync: {}% at {:.1?}(kB/s), step 2/4",
total_size / 1_000_000,
percent,
if dur_ms > 1.0f64 { (downloaded_size - prev_downloaded_size) as f64 / dur_ms as f64 } else { 0f64 },
)
} else {
let start = start_time.timestamp_millis();
let fin = Utc::now().timestamp_millis();
let dur_secs = (fin - start) / 1000;
format!("Downloading chain state for state sync. Waiting remote peer to start: {}s, step 2/4",
dur_secs,
)
}
}
SyncStatus::TxHashsetSetup => {
"Preparing chain state for validation, step 3/4".to_string()
}
SyncStatus::TxHashsetValidation {
kernels,
kernel_total,
rproofs,
rproof_total,
} => {
// 10% of overall progress is attributed to kernel validation
// 90% to range proofs (which are much longer)
let mut percent = if kernel_total > 0 {
kernels * 10 / kernel_total
} else {
0
};
percent += if rproof_total > 0 {
rproofs * 90 / rproof_total
} else {
0
};
format!("Validating chain state: {}%, step 3/4", percent)
}
SyncStatus::TxHashsetSave => {
"Finalizing chain state for state sync, step 3/4".to_string()
}
SyncStatus::TxHashsetDone => {
"Finalized chain state for state sync, step 3/4".to_string()
}
SyncStatus::BodySync {
current_height,
highest_height,
} => {
let percent = if highest_height == 0 {
0
} else {
current_height * 100 / highest_height
};
format!("Downloading blocks: {}%, step 4/4", percent)
}
SyncStatus::Shutdown => "Shutting down, closing connections".to_string(),
}
};
/*let basic_mining_config_status = {
if stats.mining_stats.is_enabled {
"Configured as mining node"
} else {
"Configured as validating node only (not mining)"
}
};
let (basic_mining_status, basic_network_info) = {
if stats.mining_stats.is_enabled {
if stats.is_syncing {
(
"Mining Status: Paused while syncing".to_string(),
" ".to_string(),
)
} else if stats.mining_stats.combined_gps == 0.0 {
(
"Mining Status: Starting miner and awaiting first solution...".to_string(),
" ".to_string(),
)
} else {
(
format!(
"Mining Status: Mining at height {} at {:.*} GPS",
stats.mining_stats.block_height, 4, stats.mining_stats.combined_gps
),
format!(
"Cuckoo {} - Network Difficulty {}",
stats.mining_stats.edge_bits,
stats.mining_stats.network_difficulty.to_string()
),
)
}
} else {
(" ".to_string(), " ".to_string())
}
};*/
c.call_on_id("basic_current_status", |t: &mut TextView| {
t.set_content(basic_status);
});
@ -321,14 +296,25 @@ impl TUIStatusListener for TUIStatusView {
c.call_on_id("stem_pool_kernels", |t: &mut TextView| {
t.set_content(stats.tx_stats.stem_pool_kernels.to_string());
});
/*c.call_on_id("basic_mining_config_status", |t: &mut TextView| {
t.set_content(basic_mining_config_status);
});
c.call_on_id("basic_mining_status", |t: &mut TextView| {
t.set_content(basic_mining_status);
});
c.call_on_id("basic_network_info", |t: &mut TextView| {
t.set_content(basic_network_info);
});*/
}
}
#[test]
fn test_status_txhashset_kernels() {
let status = SyncStatus::TxHashsetKernelsValidation {
kernels: 201,
kernels_total: 5000,
};
let basic_status = TUIStatusView::update_sync_status(status);
assert!(basic_status.contains("4%"), basic_status);
}
#[test]
fn test_status_txhashset_rproofs() {
let status = SyncStatus::TxHashsetRangeProofsValidation {
rproofs: 643,
rproofs_total: 1000,
};
let basic_status = TUIStatusView::update_sync_status(status);
assert!(basic_status.contains("64%"), basic_status);
}

View file

@ -140,6 +140,14 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
}
}
fn n_unpruned_leaves(&self) -> u64 {
if self.prunable {
self.leaf_set.len() as u64
} else {
pmmr::n_leaves(self.unpruned_size())
}
}
fn data_as_temp_file(&self) -> Result<File, String> {
self.data_file
.as_temp_file()

View file

@ -34,7 +34,7 @@ fn pmmr_append() {
{
let mut backend = store::pmmr::PMMRBackend::new(
data_dir.to_string(),
true,
false,
false,
ProtocolVersion(1),
None,
@ -53,6 +53,7 @@ fn pmmr_append() {
// Note: 1-indexed PMMR API
let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);
assert_eq!(pmmr.n_unpruned_leaves(), 4);
assert_eq!(pmmr.get_data(1), Some(elems[0]));
assert_eq!(pmmr.get_data(2), Some(elems[1]));
@ -88,6 +89,8 @@ fn pmmr_append() {
// Note: 1-indexed PMMR API
let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);
assert_eq!(pmmr.n_unpruned_leaves(), 9);
// First pair of leaves.
assert_eq!(pmmr.get_data(1), Some(elems[0]));
assert_eq!(pmmr.get_data(2), Some(elems[1]));
@ -132,6 +135,7 @@ fn pmmr_compact_leaf_sibling() {
let mmr_size = load(0, &elems[..], &mut backend);
backend.sync().unwrap();
assert_eq!(backend.n_unpruned_leaves(), 19);
// On far left of the MMR -
// pos 1 and 2 are leaves (and siblings)
// the parent is pos 3
@ -159,6 +163,8 @@ fn pmmr_compact_leaf_sibling() {
{
let pmmr = PMMR::at(&mut backend, mmr_size);
assert_eq!(pmmr.n_unpruned_leaves(), 17);
// check that pos 1 is "removed"
assert_eq!(pmmr.get_hash(1), None);