mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 11:31:08 +03:00
Split wallet server queries into multiple queries + restore performance (#1013)
* mods to speed up restore a bit * mods to speed up restore a bit * performance improvements to wallet restore and split large server queries into multiple
This commit is contained in:
parent
59664181e4
commit
93b648fbc0
2 changed files with 109 additions and 41 deletions
|
@ -82,7 +82,7 @@ fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> R
|
||||||
wallet_outputs.len(),
|
wallet_outputs.len(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut id_params: Vec<String> = wallet_outputs
|
let id_params: Vec<String> = wallet_outputs
|
||||||
.keys()
|
.keys()
|
||||||
.map(|commit| {
|
.map(|commit| {
|
||||||
let id = util::to_hex(commit.as_ref().to_vec());
|
let id = util::to_hex(commit.as_ref().to_vec());
|
||||||
|
@ -92,9 +92,31 @@ fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> R
|
||||||
|
|
||||||
let tip = get_tip_from_node(config)?;
|
let tip = get_tip_from_node(config)?;
|
||||||
|
|
||||||
|
let max_ids_in_query = 1000;
|
||||||
|
let mut current_index = 0;
|
||||||
|
|
||||||
|
let mut api_blocks: HashMap<pedersen::Commitment, api::BlockHeaderInfo> = HashMap::new();
|
||||||
|
let mut api_merkle_proofs: HashMap<pedersen::Commitment, MerkleProofWrapper> = HashMap::new();
|
||||||
|
|
||||||
|
// Split up into separate requests, to avoid hitting http limits
|
||||||
|
loop {
|
||||||
|
let q = id_params.clone();
|
||||||
|
let mut cur_params: Vec<String> = q.into_iter()
|
||||||
|
.skip(current_index)
|
||||||
|
.take(max_ids_in_query)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if cur_params.len() == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(LOGGER, "Splitting query: {} ids", cur_params.len());
|
||||||
|
|
||||||
|
current_index = current_index + cur_params.len();
|
||||||
|
|
||||||
let height_params = format!("start_height={}&end_height={}", 0, tip.height,);
|
let height_params = format!("start_height={}&end_height={}", 0, tip.height,);
|
||||||
let mut query_params = vec![height_params];
|
let mut query_params = vec![height_params];
|
||||||
query_params.append(&mut id_params);
|
query_params.append(&mut cur_params);
|
||||||
|
|
||||||
let url = format!(
|
let url = format!(
|
||||||
"{}/v1/chain/outputs/byheight?{}",
|
"{}/v1/chain/outputs/byheight?{}",
|
||||||
|
@ -103,8 +125,6 @@ fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> R
|
||||||
);
|
);
|
||||||
debug!(LOGGER, "{:?}", url);
|
debug!(LOGGER, "{:?}", url);
|
||||||
|
|
||||||
let mut api_blocks: HashMap<pedersen::Commitment, api::BlockHeaderInfo> = HashMap::new();
|
|
||||||
let mut api_merkle_proofs: HashMap<pedersen::Commitment, MerkleProofWrapper> = HashMap::new();
|
|
||||||
match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) {
|
match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) {
|
||||||
Ok(blocks) => for block in blocks {
|
Ok(blocks) => for block in blocks {
|
||||||
for out in block.outputs {
|
for out in block.outputs {
|
||||||
|
@ -121,6 +141,7 @@ fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> R
|
||||||
return Err(e).context(ErrorKind::Node)?;
|
return Err(e).context(ErrorKind::Node)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// now for each commit, find the output in the wallet and
|
// now for each commit, find the output in the wallet and
|
||||||
// the corresponding api output (if it exists)
|
// the corresponding api output (if it exists)
|
||||||
|
@ -177,7 +198,25 @@ fn refresh_output_state(config: &WalletConfig, keychain: &Keychain) -> Result<()
|
||||||
// build a map of api outputs by commit so we can look them up efficiently
|
// build a map of api outputs by commit so we can look them up efficiently
|
||||||
let mut api_outputs: HashMap<pedersen::Commitment, api::Output> = HashMap::new();
|
let mut api_outputs: HashMap<pedersen::Commitment, api::Output> = HashMap::new();
|
||||||
|
|
||||||
let query_string = query_params.join("&");
|
let max_ids_in_query = 1000;
|
||||||
|
let mut current_index = 0;
|
||||||
|
|
||||||
|
// Split up into separate requests, to avoid hitting http limits
|
||||||
|
loop {
|
||||||
|
let q = query_params.clone();
|
||||||
|
let cur_params: Vec<String> = q.into_iter()
|
||||||
|
.skip(current_index)
|
||||||
|
.take(max_ids_in_query)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if cur_params.len() == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(LOGGER, "Splitting query: {} ids", cur_params.len());
|
||||||
|
|
||||||
|
current_index = current_index + cur_params.len();
|
||||||
|
let query_string = cur_params.join("&");
|
||||||
|
|
||||||
let url = format!(
|
let url = format!(
|
||||||
"{}/v1/chain/outputs/byids?{}",
|
"{}/v1/chain/outputs/byids?{}",
|
||||||
|
@ -191,10 +230,14 @@ fn refresh_output_state(config: &WalletConfig, keychain: &Keychain) -> Result<()
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// if we got anything other than 200 back from server, don't attempt to refresh
|
// if we got anything other than 200 back from server, don't attempt to refresh
|
||||||
// the wallet data after
|
// the wallet data after
|
||||||
|
error!(
|
||||||
|
LOGGER,
|
||||||
|
"Error sending wallet refresh request to server: {:?}", e
|
||||||
|
);
|
||||||
return Err(e).context(ErrorKind::Node)?;
|
return Err(e).context(ErrorKind::Node)?;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
}
|
||||||
// now for each commit, find the output in the wallet and
|
// now for each commit, find the output in the wallet and
|
||||||
// the corresponding api output (if it exists)
|
// the corresponding api output (if it exists)
|
||||||
// and refresh it in-place in the wallet.
|
// and refresh it in-place in the wallet.
|
||||||
|
|
|
@ -103,6 +103,7 @@ fn find_outputs_with_key(
|
||||||
config: &WalletConfig,
|
config: &WalletConfig,
|
||||||
keychain: &Keychain,
|
keychain: &Keychain,
|
||||||
outputs: Vec<api::OutputPrintable>,
|
outputs: Vec<api::OutputPrintable>,
|
||||||
|
found_key_index: &mut Vec<u32>,
|
||||||
) -> Vec<
|
) -> Vec<
|
||||||
(
|
(
|
||||||
pedersen::Commitment,
|
pedersen::Commitment,
|
||||||
|
@ -151,7 +152,24 @@ fn find_outputs_with_key(
|
||||||
}
|
}
|
||||||
// we have a match, now check through our key iterations to find a partial match
|
// we have a match, now check through our key iterations to find a partial match
|
||||||
let mut found = false;
|
let mut found = false;
|
||||||
for i in 1..max_derivations {
|
|
||||||
|
let mut start_index = 1;
|
||||||
|
|
||||||
|
// TODO: This assumption only holds with current wallet software assuming
|
||||||
|
// wallet doesn't go back and re-use gaps in its key index, ie. every
|
||||||
|
// new key index produced is always greater than the previous max key index
|
||||||
|
if let Some(m) = found_key_index.iter().max() {
|
||||||
|
start_index = *m as usize + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in start_index..max_derivations {
|
||||||
|
// much faster than calling EC functions for each found key
|
||||||
|
// Shouldn't be needed if assumtion about wallet key 'gaps' above
|
||||||
|
// holds.. otherwise this is a good optimisation.. perhaps
|
||||||
|
// provide a command line switch
|
||||||
|
/*if found_key_index.contains(&(i as u32)) {
|
||||||
|
continue;
|
||||||
|
}*/
|
||||||
let key_id = &keychain.derive_key_id(i as u32).unwrap();
|
let key_id = &keychain.derive_key_id(i as u32).unwrap();
|
||||||
if !message.compare_bf_first_8(key_id) {
|
if !message.compare_bf_first_8(key_id) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -171,7 +189,7 @@ fn find_outputs_with_key(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"Output found: {:?}, key_index: {:?}", output.commit, i,
|
"Output found: {:?}, key_index: {:?}", output.commit, i,
|
||||||
);
|
);
|
||||||
|
found_key_index.push(i as u32);
|
||||||
// add it to result set here
|
// add it to result set here
|
||||||
let commit_id = output.commit.0;
|
let commit_id = output.commit.0;
|
||||||
|
|
||||||
|
@ -242,6 +260,9 @@ pub fn restore(config: &WalletConfig, keychain: &Keychain) -> Result<(), Error>
|
||||||
|
|
||||||
let batch_size = 1000;
|
let batch_size = 1000;
|
||||||
let mut start_index = 1;
|
let mut start_index = 1;
|
||||||
|
// Keep a set of keys we've already claimed (cause it's far faster than
|
||||||
|
// deriving a key for each one)
|
||||||
|
let mut found_key_index: Vec<u32> = vec![];
|
||||||
// this will start here, then lower as outputs are found, moving backwards on
|
// this will start here, then lower as outputs are found, moving backwards on
|
||||||
// the chain
|
// the chain
|
||||||
loop {
|
loop {
|
||||||
|
@ -255,8 +276,12 @@ pub fn restore(config: &WalletConfig, keychain: &Keychain) -> Result<(), Error>
|
||||||
);
|
);
|
||||||
|
|
||||||
let _ = WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
let _ = WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||||
let result_vec =
|
let result_vec = find_outputs_with_key(
|
||||||
find_outputs_with_key(config, keychain, output_listing.outputs.clone());
|
config,
|
||||||
|
keychain,
|
||||||
|
output_listing.outputs.clone(),
|
||||||
|
&mut found_key_index,
|
||||||
|
);
|
||||||
if result_vec.len() > 0 {
|
if result_vec.len() > 0 {
|
||||||
for output in result_vec.clone() {
|
for output in result_vec.clone() {
|
||||||
let root_key_id = keychain.root_key_id();
|
let root_key_id = keychain.root_key_id();
|
||||||
|
|
Loading…
Reference in a new issue