FRAME Weights with Storage Metadata (#9471)

* weights with metadata

* fix

* fix contract test

* skip metadata tag

* special handling for `frame_system`

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_system --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/system/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* add skip metadata to contracts

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* fix contract test

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_democracy --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/democracy/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_bounties --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/bounties/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* expose component information

* fix test generation

* refactor list benchmarks

* move component selection out of runtime

* add benchmark verification

* missing feature

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_bounties --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/bounties/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_democracy --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/democracy/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* add internal repeats

* update weights with internal repeats

* fix warning

* return error with pov

* try without tracking

* Revert "return error with pov"

This reverts commit 44c36cbbd3c6818f36f377e3e291f1df156e40f7.

* Revert "try without tracking"

This reverts commit f401c44aebff2232389d8d307b20924891e5d77d.

* Revert "Revert "try without tracking""

This reverts commit 4b4e05929802ad3e8154e107359447634e5fb21b.

* state without tracking

* fix build

* temp test

* split db and timing benchmarks

* extend db results?

* default repeat is internal

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_democracy --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/democracy/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* fix warning

* bump linked hash map

* use linked hash map for storage tracker

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_democracy --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/democracy/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* remove conflicting short command

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_democracy --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/democracy/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* missed one linked hashmap

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_bounties --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/bounties/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_bounties --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/bounties/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_election_provider_multi_phase --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/election-provider-multi-phase/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* new weights with latest changes

* Update frame/benchmarking/src/utils.rs

Co-authored-by: Parity Benchmarking Bot <admin@parity.io>
This commit is contained in:
Shawn Tabrizi
2021-08-07 18:05:12 +02:00
committed by GitHub
parent 38db14089b
commit 0489c1768c
44 changed files with 3956 additions and 2182 deletions
@@ -18,7 +18,8 @@
use crate::BenchmarkCmd;
use codec::{Decode, Encode};
use frame_benchmarking::{
Analysis, BenchmarkBatch, BenchmarkList, BenchmarkResults, BenchmarkSelector,
Analysis, BenchmarkBatch, BenchmarkBatchSplitResults, BenchmarkList, BenchmarkParameter,
BenchmarkResults, BenchmarkSelector,
};
use frame_support::traits::StorageInfo;
use linked_hash_map::LinkedHashMap;
@@ -38,14 +39,18 @@ use std::{fmt::Debug, sync::Arc, time};
// This takes multiple benchmark batches and combines all the results where the pallet, instance,
// and benchmark are the same.
fn combine_batches(batches: Vec<BenchmarkBatch>) -> Vec<BenchmarkBatch> {
if batches.is_empty() {
return batches
fn combine_batches(
time_batches: Vec<BenchmarkBatch>,
db_batches: Vec<BenchmarkBatch>,
) -> Vec<BenchmarkBatchSplitResults> {
if time_batches.is_empty() && db_batches.is_empty() {
return Default::default()
}
let mut all_benchmarks = LinkedHashMap::<_, Vec<BenchmarkResults>>::new();
let mut all_benchmarks =
LinkedHashMap::<_, (Vec<BenchmarkResults>, Vec<BenchmarkResults>)>::new();
batches
db_batches
.into_iter()
.for_each(|BenchmarkBatch { pallet, instance, benchmark, results }| {
// We use this key to uniquely identify a benchmark among batches.
@@ -53,21 +58,31 @@ fn combine_batches(batches: Vec<BenchmarkBatch>) -> Vec<BenchmarkBatch> {
match all_benchmarks.get_mut(&key) {
// We already have this benchmark, so we extend the results.
Some(x) => x.extend(results),
Some(x) => x.1.extend(results),
// New benchmark, so we add a new entry with the initial results.
None => {
all_benchmarks.insert(key, results);
all_benchmarks.insert(key, (Vec::new(), results));
},
}
});
time_batches
.into_iter()
.for_each(|BenchmarkBatch { pallet, instance, benchmark, results }| {
// We use this key to uniquely identify a benchmark among batches.
let key = (pallet, instance, benchmark);
match all_benchmarks.get_mut(&key) {
// We already have this benchmark, so we extend the results.
Some(x) => x.0.extend(results),
None => panic!("all benchmark keys should have been populated by db batches"),
}
});
all_benchmarks
.into_iter()
.map(|((pallet, instance, benchmark), results)| BenchmarkBatch {
pallet,
instance,
benchmark,
results,
.map(|((pallet, instance, benchmark), (time_results, db_results))| {
BenchmarkBatchSplitResults { pallet, instance, benchmark, time_results, db_results }
})
.collect::<Vec<_>>()
}
@@ -110,7 +125,14 @@ impl BenchmarkCmd {
let genesis_storage = spec.build_storage()?;
let mut changes = Default::default();
let cache_size = Some(self.database_cache_size as usize);
let state = BenchmarkingState::<BB>::new(genesis_storage, cache_size, self.record_proof)?;
let state_with_tracking = BenchmarkingState::<BB>::new(
genesis_storage.clone(),
cache_size,
self.record_proof,
true,
)?;
let state_without_tracking =
BenchmarkingState::<BB>::new(genesis_storage, cache_size, self.record_proof, false)?;
let executor = NativeExecutor::<ExecDispatch>::new(
wasm_method,
self.heap_pages,
@@ -129,15 +151,16 @@ impl BenchmarkCmd {
};
// Get Benchmark List
let state = &state_without_tracking;
let result = StateMachine::<_, _, NumberFor<BB>, _>::new(
&state,
state,
None,
&mut changes,
&executor,
"Benchmark_benchmark_metadata",
&(self.extra).encode(),
extensions(),
&sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?,
&sp_state_machine::backend::BackendRuntimeCode::new(state).runtime_code()?,
sp_core::testing::TaskExecutor::new(),
)
.execute(strategy.into())
@@ -147,35 +170,81 @@ impl BenchmarkCmd {
<(Vec<BenchmarkList>, Vec<StorageInfo>) as Decode>::decode(&mut &result[..])
.map_err(|e| format!("Failed to decode benchmark metadata: {:?}", e))?;
if self.list {
list_benchmark(pallet, extrinsic, list);
return Ok(())
}
// Use the benchmark list and the user input to determine the set of benchmarks to run.
let mut benchmarks_to_run = Vec::new();
for item in list {
if pallet == &item.pallet[..] || pallet == &b"*"[..] {
if &pallet[..] == &b"*"[..] || &extrinsic[..] == &b"*"[..] {
for benchmark in item.benchmarks {
benchmarks_to_run.push((item.pallet.clone(), benchmark));
list.iter()
.filter(|item| pallet.is_empty() || pallet == &b"*"[..] || pallet == &item.pallet[..])
.for_each(|item| {
for benchmark in &item.benchmarks {
if extrinsic.is_empty() ||
&extrinsic[..] == &b"*"[..] ||
extrinsic == benchmark.name
{
benchmarks_to_run.push((
item.pallet.clone(),
benchmark.name.clone(),
benchmark.components.clone(),
))
}
} else {
benchmarks_to_run.push((pallet.to_vec(), extrinsic.to_vec()));
}
}
});
if benchmarks_to_run.is_empty() {
return Err("No benchmarks found which match your input.".into())
}
if self.list {
// List benchmarks instead of running them
list_benchmark(benchmarks_to_run);
return Ok(())
}
// Run the benchmarks
let mut batches = Vec::new();
let mut batches_db = Vec::new();
let mut timer = time::SystemTime::now();
for (pallet, extrinsic) in benchmarks_to_run {
for s in 0..self.steps {
for r in 0..self.repeat {
// This should run only a single instance of a benchmark for `pallet` and
// `extrinsic`. All loops happen above.
let result = StateMachine::<_, _, NumberFor<BB>, _>::new(
&state,
for (pallet, extrinsic, components) in benchmarks_to_run {
let all_components = if components.is_empty() {
vec![Default::default()]
} else {
let mut all_components = Vec::new();
for (idx, (name, low, high)) in components.iter().enumerate() {
let lowest = self.lowest_range_values.get(idx).cloned().unwrap_or(*low);
let highest = self.highest_range_values.get(idx).cloned().unwrap_or(*high);
let diff = highest - lowest;
// Create up to `STEPS` steps for that component between high and low.
let step_size = (diff / self.steps).max(1);
let num_of_steps = diff / step_size + 1;
for s in 0..num_of_steps {
// This is the value we will be testing for component `name`
let component_value = lowest + step_size * s;
// Select the max value for all the other components.
let c: Vec<(BenchmarkParameter, u32)> = components
.iter()
.enumerate()
.map(|(idx, (n, _, h))| {
if n == name {
(*n, component_value)
} else {
(*n, *self.highest_range_values.get(idx).unwrap_or(h))
}
})
.collect();
all_components.push(c);
}
}
all_components
};
for (s, selected_components) in all_components.iter().enumerate() {
// First we run a verification
if !self.no_verify {
// Dont use these results since verification code will add overhead
let state = &state_without_tracking;
let _results = StateMachine::<_, _, NumberFor<BB>, _>::new(
state,
None,
&mut changes,
&executor,
@@ -183,16 +252,73 @@ impl BenchmarkCmd {
&(
&pallet.clone(),
&extrinsic.clone(),
self.lowest_range_values.clone(),
self.highest_range_values.clone(),
(s, self.steps),
(r, self.repeat),
!self.no_verify,
self.extra,
&selected_components.clone(),
true, // run verification code
1, // no need to do internal repeats
)
.encode(),
extensions(),
&sp_state_machine::backend::BackendRuntimeCode::new(&state)
&sp_state_machine::backend::BackendRuntimeCode::new(state)
.runtime_code()?,
sp_core::testing::TaskExecutor::new(),
)
.execute(strategy.into())
.map_err(|e| {
format!("Error executing and verifying runtime benchmark: {:?}", e)
})?;
}
// Do one loop of DB tracking.
{
let state = &state_with_tracking;
let result = StateMachine::<_, _, NumberFor<BB>, _>::new(
state, // todo remove tracking
None,
&mut changes,
&executor,
"Benchmark_dispatch_benchmark",
&(
&pallet.clone(),
&extrinsic.clone(),
&selected_components.clone(),
false, // dont run verification code for final values
self.repeat,
)
.encode(),
extensions(),
&sp_state_machine::backend::BackendRuntimeCode::new(state)
.runtime_code()?,
sp_core::testing::TaskExecutor::new(),
)
.execute(strategy.into())
.map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?;
let batch =
<std::result::Result<Vec<BenchmarkBatch>, String> as Decode>::decode(
&mut &result[..],
)
.map_err(|e| format!("Failed to decode benchmark results: {:?}", e))??;
batches_db.extend(batch);
}
// Finally run a bunch of loops to get extrinsic timing information.
for r in 0..self.external_repeat {
let state = &state_without_tracking;
let result = StateMachine::<_, _, NumberFor<BB>, _>::new(
state, // todo remove tracking
None,
&mut changes,
&executor,
"Benchmark_dispatch_benchmark",
&(
&pallet.clone(),
&extrinsic.clone(),
&selected_components.clone(),
false, // dont run verification code for final values
self.repeat,
)
.encode(),
extensions(),
&sp_state_machine::backend::BackendRuntimeCode::new(state)
.runtime_code()?,
sp_core::testing::TaskExecutor::new(),
)
@@ -217,10 +343,10 @@ impl BenchmarkCmd {
.expect("Encoded from String; qed"),
String::from_utf8(extrinsic.clone())
.expect("Encoded from String; qed"),
s,
s, // todo show step
self.steps,
r,
self.repeat,
self.external_repeat,
);
}
}
@@ -230,7 +356,7 @@ impl BenchmarkCmd {
// Combine all of the benchmark results, so that benchmarks of the same pallet/function
// are together.
let batches = combine_batches(batches);
let batches: Vec<BenchmarkBatchSplitResults> = combine_batches(batches, batches_db);
if let Some(output_path) = &self.output {
crate::writer::write_results(&batches, &storage_info, output_path, self)?;
@@ -249,17 +375,20 @@ impl BenchmarkCmd {
);
// Skip raw data + analysis if there are no results
if batch.results.is_empty() {
if batch.time_results.is_empty() {
continue
}
if self.raw_data {
// Print the table header
batch.results[0].components.iter().for_each(|param| print!("{:?},", param.0));
batch.time_results[0]
.components
.iter()
.for_each(|param| print!("{:?},", param.0));
print!("extrinsic_time_ns,storage_root_time_ns,reads,repeat_reads,writes,repeat_writes,proof_size_bytes\n");
// Print the values
batch.results.iter().for_each(|result| {
batch.time_results.iter().for_each(|result| {
let parameters = &result.components;
parameters.iter().for_each(|param| print!("{:?},", param.1));
// Print extrinsic time and storage root time
@@ -282,17 +411,17 @@ impl BenchmarkCmd {
if !self.no_median_slopes {
println!("Median Slopes Analysis\n========");
if let Some(analysis) =
Analysis::median_slopes(&batch.results, BenchmarkSelector::ExtrinsicTime)
Analysis::median_slopes(&batch.time_results, BenchmarkSelector::ExtrinsicTime)
{
println!("-- Extrinsic Time --\n{}", analysis);
}
if let Some(analysis) =
Analysis::median_slopes(&batch.results, BenchmarkSelector::Reads)
Analysis::median_slopes(&batch.db_results, BenchmarkSelector::Reads)
{
println!("Reads = {:?}", analysis);
}
if let Some(analysis) =
Analysis::median_slopes(&batch.results, BenchmarkSelector::Writes)
Analysis::median_slopes(&batch.db_results, BenchmarkSelector::Writes)
{
println!("Writes = {:?}", analysis);
}
@@ -300,17 +429,17 @@ impl BenchmarkCmd {
if !self.no_min_squares {
println!("Min Squares Analysis\n========");
if let Some(analysis) =
Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime)
Analysis::min_squares_iqr(&batch.time_results, BenchmarkSelector::ExtrinsicTime)
{
println!("-- Extrinsic Time --\n{}", analysis);
}
if let Some(analysis) =
Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads)
Analysis::min_squares_iqr(&batch.db_results, BenchmarkSelector::Reads)
{
println!("Reads = {:?}", analysis);
}
if let Some(analysis) =
Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes)
Analysis::min_squares_iqr(&batch.db_results, BenchmarkSelector::Writes)
{
println!("Writes = {:?}", analysis);
}
@@ -335,39 +464,9 @@ impl CliConfiguration for BenchmarkCmd {
}
/// List the benchmarks available in the runtime, in a CSV friendly format.
///
/// If `pallet_input` and `extrinsic_input` is empty, we list everything.
///
/// If `pallet_input` is present, we only list the benchmarks for that pallet.
///
/// If `extrinsic_input` is `*`, we will hide the individual benchmarks for each pallet, and just
/// show a single line for each available pallet.
fn list_benchmark(pallet_input: &[u8], extrinsic_input: &[u8], list: Vec<BenchmarkList>) {
let filtered_list = list
.into_iter()
.filter(|item| pallet_input.is_empty() || pallet_input == &item.pallet)
.collect::<Vec<_>>();
if filtered_list.is_empty() {
println!("Pallet not found.");
return
}
fn list_benchmark(benchmarks_to_run: Vec<(Vec<u8>, Vec<u8>, Vec<(BenchmarkParameter, u32, u32)>)>) {
println!("pallet, benchmark");
for item in filtered_list {
let pallet_string =
String::from_utf8(item.pallet.clone()).expect("Encoded from String; qed");
if extrinsic_input == &b"*"[..] {
println!("{}, *", pallet_string)
} else {
for benchmark in item.benchmarks {
println!(
"{}, {}",
pallet_string,
String::from_utf8(benchmark).expect("Encoded from String; qed"),
);
}
}
for (pallet, extrinsic, _components) in benchmarks_to_run {
println!("{}, {}", String::from_utf8_lossy(&pallet), String::from_utf8_lossy(&extrinsic));
}
}
@@ -50,10 +50,16 @@ pub struct BenchmarkCmd {
#[structopt(long = "high", use_delimiter = true)]
pub highest_range_values: Vec<u32>,
/// Select how many repetitions of this benchmark should run.
/// Select how many repetitions of this benchmark should run from within the wasm.
#[structopt(short, long, default_value = "1")]
pub repeat: u32,
/// Select how many repetitions of this benchmark should run from the client.
///
/// NOTE: Using this alone may give slower results, but will afford you maximum Wasm memory.
#[structopt(long, default_value = "1")]
pub external_repeat: u32,
/// Print the raw results.
#[structopt(long = "raw")]
pub raw_data: bool,
@@ -130,11 +136,9 @@ pub struct BenchmarkCmd {
#[structopt(long = "db-cache", value_name = "MiB", default_value = "128")]
pub database_cache_size: u32,
/// List the benchmarks available.
/// List the benchmarks that match your query rather than running them.
///
/// * If nothing else is specified, all pallets and benchmarks will be listed.
/// * If the `pallet` argument is passed, then we will only list benchmarks for that pallet.
/// * If the `extrinsic` argument is set to `*`, we will hide the individual benchmarks.
/// When nothing is provided, we list all benchmarks.
#[structopt(long)]
pub list: bool,
}
@@ -29,7 +29,8 @@ use serde::Serialize;
use crate::BenchmarkCmd;
use frame_benchmarking::{
Analysis, AnalysisChoice, BenchmarkBatch, BenchmarkResults, BenchmarkSelector, RegressionModel,
Analysis, AnalysisChoice, BenchmarkBatchSplitResults, BenchmarkResults, BenchmarkSelector,
RegressionModel,
};
use frame_support::traits::StorageInfo;
use sp_core::hexdisplay::HexDisplay;
@@ -114,7 +115,7 @@ fn io_error(s: &str) -> std::io::Error {
// p2 -> [b1, b2]
// ```
fn map_results(
batches: &[BenchmarkBatch],
batches: &[BenchmarkBatchSplitResults],
storage_info: &[StorageInfo],
analysis_choice: &AnalysisChoice,
) -> Result<HashMap<(String, String), Vec<BenchmarkData>>, std::io::Error> {
@@ -129,7 +130,7 @@ fn map_results(
let mut batches_iter = batches.iter().peekable();
while let Some(batch) = batches_iter.next() {
// Skip if there are no results
if batch.results.is_empty() {
if batch.time_results.is_empty() {
continue
}
@@ -166,7 +167,7 @@ fn extract_errors(model: &Option<RegressionModel>) -> impl Iterator<Item = u128>
// Analyze and return the relevant results for a given benchmark.
fn get_benchmark_data(
batch: &BenchmarkBatch,
batch: &BenchmarkBatchSplitResults,
storage_info: &[StorageInfo],
analysis_choice: &AnalysisChoice,
) -> BenchmarkData {
@@ -180,11 +181,11 @@ fn get_benchmark_data(
AnalysisChoice::Max => Analysis::max,
};
let extrinsic_time = analysis_function(&batch.results, BenchmarkSelector::ExtrinsicTime)
let extrinsic_time = analysis_function(&batch.time_results, BenchmarkSelector::ExtrinsicTime)
.expect("analysis function should return an extrinsic time for valid inputs");
let reads = analysis_function(&batch.results, BenchmarkSelector::Reads)
let reads = analysis_function(&batch.db_results, BenchmarkSelector::Reads)
.expect("analysis function should return the number of reads for valid inputs");
let writes = analysis_function(&batch.results, BenchmarkSelector::Writes)
let writes = analysis_function(&batch.db_results, BenchmarkSelector::Writes)
.expect("analysis function should return the number of writes for valid inputs");
// Analysis data may include components that are not used, this filters out anything whose value is zero.
@@ -238,7 +239,7 @@ fn get_benchmark_data(
});
// This puts a marker on any component which is entirely unused in the weight formula.
let components = batch.results[0]
let components = batch.time_results[0]
.components
.iter()
.map(|(name, _)| -> Component {
@@ -249,7 +250,7 @@ fn get_benchmark_data(
.collect::<Vec<_>>();
// We add additional comments showing which storage items were touched.
add_storage_comments(&mut comments, &batch.results, storage_info);
add_storage_comments(&mut comments, &batch.db_results, storage_info);
BenchmarkData {
name: String::from_utf8(batch.benchmark.clone()).unwrap(),
@@ -266,7 +267,7 @@ fn get_benchmark_data(
// Create weight file from benchmark data and Handlebars template.
pub fn write_results(
batches: &[BenchmarkBatch],
batches: &[BenchmarkBatchSplitResults],
storage_info: &[StorageInfo],
path: &PathBuf,
cmd: &BenchmarkCmd,
@@ -360,10 +361,21 @@ fn add_storage_comments(
results: &[BenchmarkResults],
storage_info: &[StorageInfo],
) {
let storage_info_map = storage_info
let mut storage_info_map = storage_info
.iter()
.map(|info| (info.prefix.clone(), info))
.collect::<HashMap<_, _>>();
// Special hack to show `Skipped Metadata`
let skip_storage_info = StorageInfo {
pallet_name: b"Skipped".to_vec(),
storage_name: b"Metadata".to_vec(),
prefix: b"Skipped Metadata".to_vec(),
max_values: None,
max_size: None,
};
storage_info_map.insert(skip_storage_info.prefix.clone(), &skip_storage_info);
// This tracks the keys we already identified, so we only generate a single comment.
let mut identified = HashSet::<Vec<u8>>::new();
@@ -489,7 +501,7 @@ where
#[cfg(test)]
mod test {
use super::*;
use frame_benchmarking::{BenchmarkBatch, BenchmarkParameter, BenchmarkResults};
use frame_benchmarking::{BenchmarkBatchSplitResults, BenchmarkParameter, BenchmarkResults};
fn test_data(
pallet: &[u8],
@@ -497,7 +509,7 @@ mod test {
param: BenchmarkParameter,
base: u32,
slope: u32,
) -> BenchmarkBatch {
) -> BenchmarkBatchSplitResults {
let mut results = Vec::new();
for i in 0..5 {
results.push(BenchmarkResults {
@@ -513,11 +525,12 @@ mod test {
})
}
return BenchmarkBatch {
return BenchmarkBatchSplitResults {
pallet: [pallet.to_vec(), b"_pallet".to_vec()].concat(),
instance: b"instance".to_vec(),
benchmark: [benchmark.to_vec(), b"_benchmark".to_vec()].concat(),
results,
time_results: results.clone(),
db_results: results,
}
}