GET /v1/chain/validate (#832)

chain.validate() now takes skip_rproof=true|false
This commit is contained in:
Antioch Peverell 2018-03-21 08:28:05 -04:00 committed by GitHub
parent f0a3479ea3
commit c595a4b35c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 73 additions and 73 deletions

View file

@ -405,6 +405,20 @@ impl Handler for ChainHandler {
}
}
/// Chain validation handler.
/// GET /v1/chain/validate
pub struct ChainValidationHandler {
pub chain: Weak<chain::Chain>,
}
impl Handler for ChainValidationHandler {
fn handle(&self, _req: &mut Request) -> IronResult<Response> {
// TODO - read skip_rproofs from query params
w(&self.chain).validate(true).unwrap();
Ok(Response::with((status::Ok, "{}")))
}
}
/// Chain compaction handler. Trigger a compaction of the chain state to regain
/// storage space.
/// GET /v1/chain/compact
@ -636,6 +650,9 @@ pub fn start_rest_apis<T>(
let chain_compact_handler = ChainCompactHandler {
chain: chain.clone(),
};
let chain_validation_handler = ChainValidationHandler {
chain: chain.clone(),
};
let status_handler = StatusHandler {
chain: chain.clone(),
peers: peers.clone(),
@ -667,6 +684,7 @@ pub fn start_rest_apis<T>(
"get blocks".to_string(),
"get chain".to_string(),
"get chain/compact".to_string(),
"get chain/validate".to_string(),
"get chain/outputs".to_string(),
"get status".to_string(),
"get txhashset/roots".to_string(),
@ -688,6 +706,7 @@ pub fn start_rest_apis<T>(
blocks: get "/blocks/*" => block_handler,
chain_tip: get "/chain" => chain_tip_handler,
chain_compact: get "/chain/compact" => chain_compact_handler,
chain_validate: get "/chain/validate" => chain_validation_handler,
chain_outputs: get "/chain/outputs/*" => output_handler,
status: get "/status" => status_handler,
txhashset_roots: get "/txhashset/*" => txhashset_handler,

View file

@ -412,7 +412,7 @@ impl Chain {
}
/// Validate the current chain state.
pub fn validate(&self) -> Result<(), Error> {
pub fn validate(&self, skip_rproofs: bool) -> Result<(), Error> {
let header = self.store.head_header()?;
let mut txhashset = self.txhashset.write().unwrap();
@ -423,7 +423,7 @@ impl Chain {
// Force rollback first as this is a "read-only" extension.
txhashset::extending(&mut txhashset, |extension| {
extension.force_rollback();
extension.validate(&header)
extension.validate(&header, skip_rproofs)
})
}
@ -532,7 +532,7 @@ impl Chain {
let mut txhashset =
txhashset::TxHashSet::open(self.db_root.clone(), self.store.clone(), None)?;
txhashset::extending(&mut txhashset, |extension| {
extension.validate(&header)?;
extension.validate(&header, false)?;
// TODO validate kernels and their sums with Outputs
extension.rebuild_index()?;
Ok(())
@ -572,7 +572,7 @@ impl Chain {
// First check we can successfully validate the full chain state.
// If we cannot then do not attempt to compact.
// This should not be required long term - but doing this for debug purposes.
self.validate()?;
self.validate(true)?;
// Now compact the txhashset via the extension.
{
@ -588,7 +588,7 @@ impl Chain {
// Now check we can still successfully validate the chain state after
// compacting.
self.validate()?;
self.validate(true)?;
// we need to be careful here in testing as 20 blocks is not that long
// in wall clock time

View file

@ -31,7 +31,7 @@ use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdenti
use core::core::pmmr::{self, MerkleProof, PMMR};
use core::global;
use core::core::hash::{Hash, Hashed};
use core::ser::{self, PMMRIndexHashable, PMMRable};
use core::ser::{PMMRIndexHashable, PMMRable};
use grin_store;
use grin_store::pmmr::{PMMRBackend, PMMRFileMetadata};
@ -554,7 +554,7 @@ impl<'a> Extension<'a> {
/// Validate the txhashset state against the provided block header.
/// Rewinds to that pos for the header first so we see a consistent
/// view of the world.
pub fn validate(&mut self, header: &BlockHeader) -> Result<(), Error> {
pub fn validate(&mut self, header: &BlockHeader, skip_rproofs: bool) -> Result<(), Error> {
// first rewind to the provided header
&self.rewind(header)?;
@ -599,9 +599,11 @@ impl<'a> Extension<'a> {
}
}
// now verify the rangeproof for each output included in the sum above
// this is an expensive operation
self.verify_rangeproofs()?;
// now verify the rangeproof for each output in the sum above
// this is an expensive operation (only verified if requested)
if !skip_rproofs {
self.verify_rangeproofs()?;
}
Ok(())
}
@ -679,40 +681,24 @@ impl<'a> Extension<'a> {
fn sum_kernels(&self, kernel_offset: Option<Commitment>) -> Result<Commitment, Error> {
let now = Instant::now();
// make sure we have the right count of kernels using the MMR, the storage
// file may have a few more
let mmr_sz = self.kernel_pmmr.unpruned_size();
let count = pmmr::n_leaves(mmr_sz);
let mut commitments = vec![];
if let Some(offset) = kernel_offset {
commitments.push(offset);
}
let mut kernel_file = File::open(self.kernel_pmmr.data_file_path())?;
let first: TxKernel = ser::deserialize(&mut kernel_file)?;
first.verify()?;
let mut sum_kernel = first.excess;
for n in 1..self.kernel_pmmr.unpruned_size() + 1 {
if pmmr::is_leaf(n) {
if let Some((_, Some(kernel))) = self.kernel_pmmr.get(n, true) {
kernel.verify()?;
commitments.push(kernel.excess.clone());
}
}
}
let secp = static_secp_instance();
let mut kern_count = 1;
loop {
match ser::deserialize::<TxKernel>(&mut kernel_file) {
Ok(kernel) => {
kernel.verify()?;
let secp = secp.lock().unwrap();
sum_kernel = secp.commit_sum(vec![sum_kernel, kernel.excess], vec![])?;
kern_count += 1;
if kern_count == count {
break;
}
}
Err(_) => break,
}
}
// now apply the kernel offset of we have one
{
let secp = secp.lock().unwrap();
if let Some(kernel_offset) = kernel_offset {
sum_kernel = secp.commit_sum(vec![sum_kernel, kernel_offset], vec![])?;
}
}
let secp = secp.lock().unwrap();
let kern_count = commitments.len();
let sum_kernel = secp.commit_sum(commitments, vec![])?;
debug!(
LOGGER,
@ -730,13 +716,12 @@ impl<'a> Extension<'a> {
let mut proof_count = 0;
for n in 1..self.output_pmmr.unpruned_size() + 1 {
if pmmr::is_leaf(n) {
if let Some((_, output)) = self.output_pmmr.get(n, true) {
let out = output.expect("not a leaf node");
match self.rproof_pmmr.get(n, true) {
Some((_, Some(rp))) => out.to_output(rp).verify_proof()?,
_res => {
return Err(Error::OutputNotFound);
}
if let Some((_, Some(out))) = self.output_pmmr.get(n, true) {
if let Some((_, Some(rp))) = self.rproof_pmmr.get(n, true) {
out.to_output(rp).verify_proof()?;
} else {
// TODO - rangeproof not found
return Err(Error::OutputNotFound);
}
proof_count += 1;
}
@ -756,33 +741,29 @@ impl<'a> Extension<'a> {
fn sum_outputs(&self) -> Result<Commitment, Error> {
let now = Instant::now();
let mut sum_output = None;
let mut output_count = 0;
let secp = static_secp_instance();
let mut commitments = vec![];
for n in 1..self.output_pmmr.unpruned_size() + 1 {
if pmmr::is_leaf(n) {
if let Some((_, output)) = self.output_pmmr.get(n, true) {
let out = output.expect("not a leaf node");
let commit = out.commit.clone();
if let None = sum_output {
sum_output = Some(commit);
} else {
let secp = secp.lock().unwrap();
sum_output =
Some(secp.commit_sum(vec![sum_output.unwrap(), commit], vec![])?);
}
output_count += 1;
if let Some((_, Some(out))) = self.output_pmmr.get(n, true) {
commitments.push(out.commit.clone());
}
}
}
let secp = static_secp_instance();
let secp = secp.lock().unwrap();
let commit_count = commitments.len();
let sum_output = secp.commit_sum(commitments, vec![])?;
debug!(
LOGGER,
"Summed {} Outputs, pmmr size {}, took {}s",
output_count,
commit_count,
self.output_pmmr.unpruned_size(),
now.elapsed().as_secs(),
);
Ok(sum_output.unwrap())
Ok(sum_output)
}
}

View file

@ -118,13 +118,13 @@ fn data_files() {
.expect("previous block pmmr file data doesn't exist");
println!("Cur_pmmr_md: {:?}", cur_pmmr_md);
chain.validate().unwrap();
chain.validate(false).unwrap();
}
}
// Now reload the chain, should have valid indices
{
let chain = reload_chain(chain_dir);
chain.validate().unwrap();
chain.validate(false).unwrap();
}
}

View file

@ -112,7 +112,7 @@ fn mine_empty_chain() {
let header_by_height = chain.get_header_by_height(n).unwrap();
assert_eq!(header_by_height.hash(), bhash);
chain.validate().unwrap();
chain.validate(false).unwrap();
}
}
@ -295,7 +295,7 @@ fn spend_in_fork_and_compact() {
chain
.process_block(next.clone(), chain::Options::SKIP_POW)
.unwrap();
chain.validate().unwrap();
chain.validate(false).unwrap();
println!("tx 1 processed, should have 6 outputs or 396 bytes in file, first skipped");
@ -311,7 +311,7 @@ fn spend_in_fork_and_compact() {
let next = prepare_block_tx(&kc, &prev_main, &chain, 9, vec![&tx2]);
let prev_main = next.header.clone();
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
chain.validate().unwrap();
chain.validate(false).unwrap();
println!("tx 2 processed");
/* panic!("Stop"); */
@ -326,7 +326,7 @@ fn spend_in_fork_and_compact() {
chain
.process_block(fork_next, chain::Options::SKIP_POW)
.unwrap();
chain.validate().unwrap();
chain.validate(false).unwrap();
// check state
let head = chain.head_header().unwrap();
@ -349,7 +349,7 @@ fn spend_in_fork_and_compact() {
chain
.process_block(fork_next, chain::Options::SKIP_POW)
.unwrap();
chain.validate().unwrap();
chain.validate(false).unwrap();
// check state
let head = chain.head_header().unwrap();
@ -374,9 +374,9 @@ fn spend_in_fork_and_compact() {
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
}
chain.validate().unwrap();
chain.validate(false).unwrap();
chain.compact().unwrap();
chain.validate().unwrap();
chain.validate(false).unwrap();
}
fn prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {