Make Benchmark Output Analysis Function Configurable (#8228)

* Integrate `output-analysis`

* fix test

* use default

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_system --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/system/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* Update frame/system/src/weights.rs

* cargo run --release --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_system --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/system/src/weights.rs --template=./.maintain/frame-weight-template.hbs --output-analysis=max

* Update frame/system/src/weights.rs

* dont discard value_dist and model

* feedback

Co-authored-by: Parity Benchmarking Bot <admin@parity.io>
This commit is contained in:
Shawn Tabrizi
2021-03-02 11:56:51 -04:00
committed by GitHub
parent 7c1dd95e50
commit cdc59db5b7
5 changed files with 131 additions and 31 deletions
@@ -18,6 +18,7 @@
//! Tools for analyzing the benchmark results.
use std::collections::BTreeMap;
use core::convert::TryFrom;
use linregress::{FormulaRegressionBuilder, RegressionDataBuilder};
use crate::BenchmarkResults;
@@ -31,6 +32,7 @@ pub struct Analysis {
pub model: Option<RegressionModel>,
}
#[derive(Clone, Copy)]
pub enum BenchmarkSelector {
ExtrinsicTime,
StorageRootTime,
@@ -38,6 +40,40 @@ pub enum BenchmarkSelector {
Writes,
}
#[derive(Debug)]
pub enum AnalysisChoice {
/// Use minimum squares regression for analyzing the benchmarking results.
MinSquares,
/// Use median slopes for analyzing the benchmarking results.
MedianSlopes,
/// Use the maximum values among all other analysis functions for the benchmarking results.
Max,
}
impl Default for AnalysisChoice {
fn default() -> Self {
AnalysisChoice::MinSquares
}
}
impl TryFrom<Option<String>> for AnalysisChoice {
type Error = &'static str;
fn try_from(s: Option<String>) -> Result<Self, Self::Error> {
match s {
None => Ok(AnalysisChoice::default()),
Some(i) => {
match &i[..] {
"min-squares" | "min_squares" => Ok(AnalysisChoice::MinSquares),
"median-slopes" | "median_slopes" => Ok(AnalysisChoice::MedianSlopes),
"max" => Ok(AnalysisChoice::Max),
_ => Err("invalid analysis string")
}
}
}
}
}
impl Analysis {
// Useful for when there are no components, and we just need an median value of the benchmark results.
// Note: We choose the median value because it is more robust to outliers.
@@ -215,6 +251,39 @@ impl Analysis {
model: Some(model),
})
}
pub fn max(r: &Vec<BenchmarkResults>, selector: BenchmarkSelector) -> Option<Self> {
let median_slopes = Self::median_slopes(r, selector);
let min_squares = Self::min_squares_iqr(r, selector);
if median_slopes.is_none() || min_squares.is_none() {
return None;
}
let median_slopes = median_slopes.unwrap();
let min_squares = min_squares.unwrap();
let base = median_slopes.base.max(min_squares.base);
let slopes = median_slopes.slopes.into_iter()
.zip(min_squares.slopes.into_iter())
.map(|(a, b): (u128, u128)| { a.max(b) })
.collect::<Vec<u128>>();
// components should always be in the same order
median_slopes.names.iter()
.zip(min_squares.names.iter())
.for_each(|(a, b)| assert!(a == b, "benchmark results not in the same order"));
let names = median_slopes.names;
let value_dists = min_squares.value_dists;
let model = min_squares.model;
Some(Self {
base,
slopes,
names,
value_dists,
model,
})
}
}
fn ms(mut nanos: u128) -> String {