bench-cli: Support JSON output (#10771)

* Add dependencies

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Make benchmark results serializable

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Add `--json[-file]` options

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Enable JSON output

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Use specific serde version

Polkadot does not compile otherwise.

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Review comments

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Review comment: fs::write

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
This commit is contained in:
Oliver Tale-Yazdi
2022-02-02 15:25:13 +01:00
committed by GitHub
parent 46f4396657
commit 99fae0cd57
6 changed files with 84 additions and 46 deletions
@@ -35,7 +35,7 @@ use sp_externalities::Extensions;
use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr};
use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
use sp_state_machine::StateMachine;
use std::{fmt::Debug, sync::Arc, time};
use std::{fmt::Debug, fs, sync::Arc, time};
// This takes multiple benchmark batches and combines all the results where the pallet, instance,
// and benchmark are the same.
@@ -357,55 +357,61 @@ impl BenchmarkCmd {
// are together.
let batches: Vec<BenchmarkBatchSplitResults> = combine_batches(batches, batches_db);
// Create the weights.rs file.
if let Some(output_path) = &self.output {
crate::writer::write_results(&batches, &storage_info, output_path, self)?;
}
// Jsonify the result and write it to a file or stdout if desired.
if !self.jsonify(&batches)? {
// Print the summary only if `jsonify` did not write to stdout.
self.print_summary(&batches, &storage_info)
}
Ok(())
}
/// Jsonifies the passed batches and writes them to stdout or into a file.
/// Can be configured via `--json` and `--json-file`.
/// Returns whether it wrote to stdout.
fn jsonify(&self, batches: &Vec<BenchmarkBatchSplitResults>) -> Result<bool> {
if self.json_output || self.json_file.is_some() {
let json = serde_json::to_string_pretty(&batches)
.map_err(|e| format!("Serializing into JSON: {:?}", e))?;
if let Some(path) = &self.json_file {
fs::write(path, json)?;
} else {
println!("{}", json);
return Ok(true)
}
}
Ok(false)
}
/// Prints the results as human-readable summary without raw timing data.
fn print_summary(
&self,
batches: &Vec<BenchmarkBatchSplitResults>,
storage_info: &Vec<StorageInfo>,
) {
for batch in batches.into_iter() {
// Print benchmark metadata
println!(
"Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}",
String::from_utf8(batch.pallet).expect("Encoded from String; qed"),
String::from_utf8(batch.benchmark).expect("Encoded from String; qed"),
self.lowest_range_values,
self.highest_range_values,
self.steps,
self.repeat,
);
"Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}",
String::from_utf8(batch.pallet.clone()).expect("Encoded from String; qed"),
String::from_utf8(batch.benchmark.clone()).expect("Encoded from String; qed"),
self.lowest_range_values,
self.highest_range_values,
self.steps,
self.repeat,
);
// Skip raw data + analysis if there are no results
if batch.time_results.is_empty() {
continue
}
if self.raw_data {
// Print the table header
batch.time_results[0]
.components
.iter()
.for_each(|param| print!("{:?},", param.0));
print!("extrinsic_time_ns,storage_root_time_ns,reads,repeat_reads,writes,repeat_writes,proof_size_bytes\n");
// Print the values
batch.time_results.iter().for_each(|result| {
let parameters = &result.components;
parameters.iter().for_each(|param| print!("{:?},", param.1));
// Print extrinsic time and storage root time
print!(
"{:?},{:?},{:?},{:?},{:?},{:?},{:?}\n",
result.extrinsic_time,
result.storage_root_time,
result.reads,
result.repeat_reads,
result.writes,
result.repeat_writes,
result.proof_size,
);
});
println!();
}
if !self.no_storage_info {
let mut comments: Vec<String> = Default::default();
crate::writer::add_storage_comments(
@@ -460,8 +466,6 @@ impl BenchmarkCmd {
println!("");
}
}
Ok(())
}
}
@@ -19,7 +19,7 @@ mod command;
mod writer;
use sc_cli::{ExecutionStrategy, WasmExecutionMethod};
use std::fmt::Debug;
use std::{fmt::Debug, path::PathBuf};
// Add a more relaxed parsing for pallet names by allowing pallet directory names with `-` to be
// used like crate names with `_`
@@ -60,9 +60,13 @@ pub struct BenchmarkCmd {
#[clap(long, default_value = "1")]
pub external_repeat: u32,
/// Print the raw results.
#[clap(long = "raw")]
pub raw_data: bool,
/// Print the raw results in JSON format.
#[clap(long = "json")]
pub json_output: bool,
/// Write the raw results in JSON format into the give file.
#[clap(long, conflicts_with = "json-output")]
pub json_file: Option<PathBuf>,
/// Don't print the median-slopes linear regression analysis.
#[clap(long)]
@@ -74,15 +78,15 @@ pub struct BenchmarkCmd {
/// Output the benchmarks to a Rust file at the given path.
#[clap(long)]
pub output: Option<std::path::PathBuf>,
pub output: Option<PathBuf>,
/// Add a header file to your outputted benchmarks
#[clap(long)]
pub header: Option<std::path::PathBuf>,
pub header: Option<PathBuf>,
/// Path to Handlebars template file used for outputting benchmark results. (Optional)
#[clap(long)]
pub template: Option<std::path::PathBuf>,
pub template: Option<PathBuf>,
/// Which analysis function to use when outputting benchmarks:
/// * min-squares (default)