Make Benchmark Output Analysis Function Configurable (#8228)

* Integrate `output-analysis`

* fix test

* use default

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_system --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/system/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* Update frame/system/src/weights.rs

* cargo run --release --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_system --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/system/src/weights.rs --template=./.maintain/frame-weight-template.hbs --output-analysis=max

* Update frame/system/src/weights.rs

* dont discard value_dist and model

* feedback

Co-authored-by: Parity Benchmarking Bot <admin@parity.io>
This commit is contained in:
Shawn Tabrizi
2021-03-02 11:56:51 -04:00
committed by GitHub
parent 7c1dd95e50
commit cdc59db5b7
5 changed files with 131 additions and 31 deletions
@@ -72,6 +72,13 @@ pub struct BenchmarkCmd {
#[structopt(long)]
pub template: Option<std::path::PathBuf>,
/// Which analysis function to use when outputting benchmarks:
/// * min-squares (default)
/// * median-slopes
/// * max (max of min squares and median slopes for each value)
#[structopt(long)]
pub output_analysis: Option<String>,
/// Set the heap pages while running benchmarks.
#[structopt(long)]
pub heap_pages: Option<u64>,
@@ -20,12 +20,13 @@
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
use core::convert::TryInto;
use serde::Serialize;
use inflector::Inflector;
use crate::BenchmarkCmd;
use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis, RegressionModel};
use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis, AnalysisChoice, RegressionModel};
use sp_runtime::traits::Zero;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
@@ -71,6 +72,7 @@ struct CmdData {
wasm_execution: String,
chain: String,
db_cache: u32,
analysis_choice: String,
}
// This encodes the component name and whether that component is used.
@@ -104,7 +106,10 @@ fn io_error(s: &str) -> std::io::Error {
// p1 -> [b1, b2, b3]
// p2 -> [b1, b2]
// ```
fn map_results(batches: &[BenchmarkBatch]) -> Result<HashMap<(String, String), Vec<BenchmarkData>>, std::io::Error> {
fn map_results(
batches: &[BenchmarkBatch],
analysis_choice: &AnalysisChoice,
) -> Result<HashMap<(String, String), Vec<BenchmarkData>>, std::io::Error> {
// Skip if batches is empty.
if batches.is_empty() { return Err(io_error("empty batches")) }
@@ -118,7 +123,7 @@ fn map_results(batches: &[BenchmarkBatch]) -> Result<HashMap<(String, String), V
let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap();
let instance_string = String::from_utf8(batch.instance.clone()).unwrap();
let benchmark_data = get_benchmark_data(batch);
let benchmark_data = get_benchmark_data(batch, analysis_choice);
pallet_benchmarks.push(benchmark_data);
// Check if this is the end of the iterator
@@ -150,11 +155,23 @@ fn extract_errors(model: &Option<RegressionModel>) -> impl Iterator<Item=u128> +
}
// Analyze and return the relevant results for a given benchmark.
fn get_benchmark_data(batch: &BenchmarkBatch) -> BenchmarkData {
fn get_benchmark_data(
batch: &BenchmarkBatch,
analysis_choice: &AnalysisChoice,
) -> BenchmarkData {
// Analyze benchmarks to get the linear regression.
let extrinsic_time = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime).unwrap();
let reads = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads).unwrap();
let writes = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes).unwrap();
let analysis_function = match analysis_choice {
AnalysisChoice::MinSquares => Analysis::min_squares_iqr,
AnalysisChoice::MedianSlopes => Analysis::median_slopes,
AnalysisChoice::Max => Analysis::max,
};
let extrinsic_time = analysis_function(&batch.results, BenchmarkSelector::ExtrinsicTime)
.expect("analysis function should return an extrinsic time for valid inputs");
let reads = analysis_function(&batch.results, BenchmarkSelector::Reads)
.expect("analysis function should return the number of reads for valid inputs");
let writes = analysis_function(&batch.results, BenchmarkSelector::Writes)
.expect("analysis function should return the number of writes for valid inputs");
// Analysis data may include components that are not used, this filters out anything whose value is zero.
let mut used_components = Vec::new();
@@ -255,6 +272,11 @@ pub fn write_results(
// Full CLI args passed to trigger the benchmark.
let args = std::env::args().collect::<Vec<String>>();
// Which analysis function should be used when outputting benchmarks
let analysis_choice: AnalysisChoice = cmd.output_analysis.clone()
.try_into()
.map_err(|e| io_error(e))?;
// Capture individual args
let cmd_data = CmdData {
steps: cmd.steps.clone(),
@@ -265,6 +287,7 @@ pub fn write_results(
wasm_execution: cmd.wasm_method.to_string(),
chain: format!("{:?}", cmd.shared_params.chain),
db_cache: cmd.database_cache_size,
analysis_choice: format!("{:?}", analysis_choice),
};
// New Handlebars instance with helpers.
@@ -275,7 +298,7 @@ pub fn write_results(
handlebars.register_escape_fn(|s| -> String { s.to_string() });
// Organize results by pallet into a JSON map
let all_results = map_results(batches)?;
let all_results = map_results(batches, &analysis_choice)?;
for ((pallet, instance), results) in all_results.iter() {
let mut file_path = path.clone();
// If a user only specified a directory...
@@ -455,7 +478,7 @@ mod test {
test_data(b"first", b"first", BenchmarkParameter::a, 10, 3),
test_data(b"first", b"second", BenchmarkParameter::b, 9, 2),
test_data(b"second", b"first", BenchmarkParameter::c, 3, 4),
]).unwrap();
], &AnalysisChoice::default()).unwrap();
let first_benchmark = &mapped_results.get(
&("first_pallet".to_string(), "instance".to_string())