move all PIBD-related constants into pibd_params modules (#3711)

This commit is contained in:
Yeastplume 2022-06-03 12:05:36 +01:00 committed by GitHub
parent 41f3aafb9a
commit a441b78891
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 61 additions and 10 deletions

View file

@ -38,6 +38,7 @@ use grin_util as util;
mod chain;
mod error;
pub mod linked_list;
pub mod pibd_params;
pub mod pipe;
pub mod store;
pub mod txhashset;

45
chain/src/pibd_params.rs Normal file
View file

@ -0,0 +1,45 @@
// Copyright 2022 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Set of static definitions for all parameters related to PIBD and Desegmentation
//! Note these are for experimentation via compilation, not meant to be exposed as
//! configuration parameters anywhere
/// Bitmap segment height assumed for requests and segment calculation
pub const BITMAP_SEGMENT_HEIGHT: u8 = 9;
/// Output segment height assumed for requests and segment calculation
pub const OUTPUT_SEGMENT_HEIGHT: u8 = 11;
/// Rangeproof segment height assumed for requests and segment calculation
pub const RANGEPROOF_SEGMENT_HEIGHT: u8 = 11;
/// Kernel segment height assumed for requests and segment calculation
pub const KERNEL_SEGMENT_HEIGHT: u8 = 11;
/// Maximum number of received segments to cache (across all trees) before we stop requesting others
pub const MAX_CACHED_SEGMENTS: usize = 15;
/// How long the state sync should wait after requesting a segment from a peer before
/// deciding the segment isn't going to arrive. The syncer will then re-request the segment
pub const SEGMENT_REQUEST_TIMEOUT_SECS: i64 = 60;
/// Number of simultaneous requests for segments we should make. Note this is currently
/// divisible by 3 to try and evenly spread requests amount the 3 main MMRs (Bitmap segments
/// will always be requested first)
pub const SEGMENT_REQUEST_COUNT: usize = 15;
/// If the syncer hasn't seen a max work peer that supports PIBD in this number of seconds
/// give up and revert back to the txhashset.zip download method
pub const TXHASHSET_ZIP_FALLBACK_TIME_SECS: i64 = 60;

View file

@ -30,6 +30,7 @@ use crate::util::secp::pedersen::RangeProof;
use crate::util::{RwLock, StopState};
use crate::SyncState;
use crate::pibd_params;
use crate::store;
use crate::txhashset;
@ -86,10 +87,10 @@ impl Desegmenter {
store,
genesis,
bitmap_accumulator: BitmapAccumulator::new(),
default_bitmap_segment_height: 9,
default_output_segment_height: 11,
default_rangeproof_segment_height: 11,
default_kernel_segment_height: 11,
default_bitmap_segment_height: pibd_params::BITMAP_SEGMENT_HEIGHT,
default_output_segment_height: pibd_params::OUTPUT_SEGMENT_HEIGHT,
default_rangeproof_segment_height: pibd_params::RANGEPROOF_SEGMENT_HEIGHT,
default_kernel_segment_height: pibd_params::KERNEL_SEGMENT_HEIGHT,
bitmap_segment_cache: vec![],
output_segment_cache: vec![],
rangeproof_segment_cache: vec![],
@ -98,7 +99,7 @@ impl Desegmenter {
bitmap_mmr_leaf_count: 0,
bitmap_mmr_size: 0,
max_cached_segments: 15,
max_cached_segments: pibd_params::MAX_CACHED_SEGMENTS,
bitmap_cache: None,

View file

@ -16,7 +16,7 @@ use chrono::prelude::{DateTime, Utc};
use chrono::Duration;
use std::sync::Arc;
use crate::chain::{self, SyncState, SyncStatus};
use crate::chain::{self, pibd_params, SyncState, SyncStatus};
use crate::core::core::{hash::Hashed, pmmr::segment::SegmentType};
use crate::core::global;
use crate::core::pow::Difficulty;
@ -260,7 +260,8 @@ impl StateSync {
// Remove stale requests, if we haven't recieved the segment within a minute re-request
// TODO: verify timing
self.sync_state.remove_stale_pibd_requests(60);
self.sync_state
.remove_stale_pibd_requests(pibd_params::SEGMENT_REQUEST_TIMEOUT_SECS);
// Apply segments... TODO: figure out how this should be called, might
// need to be a separate thread.
@ -286,7 +287,7 @@ impl StateSync {
// Figure out the next segments we need
// (12 is divisible by 3, to try and evenly spread the requests among the 3
// main pmmrs. Bitmaps segments will always be requested first)
next_segment_ids = d.next_desired_segments(15);
next_segment_ids = d.next_desired_segments(pibd_params::SEGMENT_REQUEST_COUNT);
}
// For each segment, pick a desirable peer and send message
@ -320,9 +321,12 @@ impl StateSync {
if let None = self.earliest_zero_pibd_peer_time {
self.set_earliest_zero_pibd_peer_time(Some(Utc::now()));
}
if self.earliest_zero_pibd_peer_time.unwrap() + Duration::seconds(60) < Utc::now() {
if self.earliest_zero_pibd_peer_time.unwrap()
+ Duration::seconds(pibd_params::TXHASHSET_ZIP_FALLBACK_TIME_SECS)
< Utc::now()
{
// random abort test
info!("No PIBD-enabled max-difficulty peers for the past minute - Aborting PIBD and falling back to TxHashset.zip download");
info!("No PIBD-enabled max-difficulty peers for the past {} seconds - Aborting PIBD and falling back to TxHashset.zip download", pibd_params::TXHASHSET_ZIP_FALLBACK_TIME_SECS);
self.sync_state
.update_pibd_progress(true, true, 0, 1, &archive_header);
self.sync_state