Include StorageInfo in Benchmarking Pipeline (#9090)

* extend storageinfo

* extend_storage_info

* use vec

* add storage info to pipeline

* get read and written keys

* undo storageinfo move

* refactor keytracker

* return read / write count

* playing with key matching

* add basic `StorageInfo` constructor

* add whitelisted to returned info

* fix some test stuff

* pipe comments into benchmark data

* add_storage_comments

* add comments to template

* track only storage prefix

* Update frame/benchmarking/src/lib.rs

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* fix test

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* remove test logs

* add temp benchmark script

* Apply suggestions from code review

Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>

* remove keytracker and use trackedstoragekey

* add comment for unknown keys

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_timestamp --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/timestamp/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* remove duplicate comments with unknown keys

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_timestamp --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/timestamp/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* refactor bench tracker, and fix results

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* fix child tries in new tracker

* extra newline

* fix unused warning

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_timestamp --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/timestamp/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* fix master merge

* storage info usage refactor

* remove now unused

* fix refactor

* use a vec for prefix

* fix tests

* also update writer to use vec

* disable read and written keys for now

* cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_system --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/system/src/weights.rs --template=./.maintain/frame-weight-template.hbs

* Update frame/system/src/weights.rs

* fix test

* Delete weights.rs

* reset weights

Co-authored-by: Parity Bot <admin@parity.io>
Co-authored-by: Guillaume Thiolliere <gui.thiolliere@gmail.com>
This commit is contained in:
Shawn Tabrizi
2021-07-07 18:06:06 -04:00
committed by GitHub
parent e0ad91ed95
commit b42b8fc5fb
28 changed files with 552 additions and 185 deletions
@@ -15,10 +15,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use crate::BenchmarkCmd;
use codec::{Decode, Encode};
use frame_benchmarking::{Analysis, BenchmarkBatch, BenchmarkSelector};
use frame_support::traits::StorageInfo;
use sc_cli::{SharedParams, CliConfiguration, ExecutionStrategy, Result};
use sc_client_db::BenchmarkingState;
use sc_executor::NativeExecutor;
@@ -31,7 +31,7 @@ use sp_keystore::{
SyncCryptoStorePtr, KeystoreExt,
testing::KeyStore,
};
use std::fmt::Debug;
use std::{fmt::Debug, sync::Arc};
impl BenchmarkCmd {
/// Runs the command and benchmarks the chain.
@@ -98,13 +98,16 @@ impl BenchmarkCmd {
.execute(strategy.into())
.map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?;
let results = <std::result::Result<Vec<BenchmarkBatch>, String> as Decode>::decode(&mut &result[..])
let results = <std::result::Result<
(Vec<BenchmarkBatch>, Vec<StorageInfo>),
String,
> as Decode>::decode(&mut &result[..])
.map_err(|e| format!("Failed to decode benchmark results: {:?}", e))?;
match results {
Ok(batches) => {
Ok((batches, storage_info)) => {
if let Some(output_path) = &self.output {
crate::writer::write_results(&batches, output_path, self)?;
crate::writer::write_results(&batches, &storage_info, output_path, self)?;
}
for batch in batches.into_iter() {
@@ -129,6 +132,7 @@ impl BenchmarkCmd {
print!("extrinsic_time_ns,storage_root_time_ns,reads,repeat_reads,writes,repeat_writes,proof_size_bytes\n");
// Print the values
batch.results.iter().for_each(|result| {
let parameters = &result.components;
parameters.iter().for_each(|param| print!("{:?},", param.1));
// Print extrinsic time and storage root time
@@ -20,6 +20,9 @@ use sp_std::marker::PhantomData;
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> {{pallet}}::WeightInfo for WeightInfo<T> {
{{~#each benchmarks as |benchmark|}}
{{~#each benchmark.comments as |comment|}}
// {{comment}}
{{~/each}}
fn {{benchmark.name~}}
(
{{~#each benchmark.components as |c| ~}}
@@ -17,7 +17,7 @@
// Outputs benchmark results to Rust files that can be ingested by the runtime.
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::PathBuf;
use core::convert::TryInto;
@@ -26,8 +26,12 @@ use serde::Serialize;
use inflector::Inflector;
use crate::BenchmarkCmd;
use frame_benchmarking::{BenchmarkBatch, BenchmarkSelector, Analysis, AnalysisChoice, RegressionModel};
use frame_benchmarking::{
BenchmarkBatch, BenchmarkSelector, Analysis, AnalysisChoice, RegressionModel, BenchmarkResults,
};
use sp_core::hexdisplay::HexDisplay;
use sp_runtime::traits::Zero;
use frame_support::traits::StorageInfo;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
const TEMPLATE: &str = include_str!("./template.hbs");
@@ -59,6 +63,7 @@ struct BenchmarkData {
component_weight: Vec<ComponentSlope>,
component_reads: Vec<ComponentSlope>,
component_writes: Vec<ComponentSlope>,
comments: Vec<String>,
}
// This forwards some specific metadata from the `BenchmarkCmd`
@@ -108,6 +113,7 @@ fn io_error(s: &str) -> std::io::Error {
// ```
fn map_results(
batches: &[BenchmarkBatch],
storage_info: &[StorageInfo],
analysis_choice: &AnalysisChoice,
) -> Result<HashMap<(String, String), Vec<BenchmarkData>>, std::io::Error> {
// Skip if batches is empty.
@@ -123,7 +129,7 @@ fn map_results(
let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap();
let instance_string = String::from_utf8(batch.instance.clone()).unwrap();
let benchmark_data = get_benchmark_data(batch, analysis_choice);
let benchmark_data = get_benchmark_data(batch, storage_info, analysis_choice);
pallet_benchmarks.push(benchmark_data);
// Check if this is the end of the iterator
@@ -157,8 +163,12 @@ fn extract_errors(model: &Option<RegressionModel>) -> impl Iterator<Item=u128> +
// Analyze and return the relevant results for a given benchmark.
fn get_benchmark_data(
batch: &BenchmarkBatch,
storage_info: &[StorageInfo],
analysis_choice: &AnalysisChoice,
) -> BenchmarkData {
// You can use this to put any additional comments with the benchmarking output.
let mut comments = Vec::<String>::new();
// Analyze benchmarks to get the linear regression.
let analysis_function = match analysis_choice {
AnalysisChoice::MinSquares => Analysis::min_squares_iqr,
@@ -229,6 +239,9 @@ fn get_benchmark_data(
})
.collect::<Vec<_>>();
// We add additional comments showing which storage items were touched.
add_storage_comments(&mut comments, &batch.results, storage_info);
BenchmarkData {
name: String::from_utf8(batch.benchmark.clone()).unwrap(),
components,
@@ -238,12 +251,14 @@ fn get_benchmark_data(
component_weight: used_extrinsic_time,
component_reads: used_reads,
component_writes: used_writes,
comments,
}
}
// Create weight file from benchmark data and Handlebars template.
pub fn write_results(
batches: &[BenchmarkBatch],
storage_info: &[StorageInfo],
path: &PathBuf,
cmd: &BenchmarkCmd,
) -> Result<(), std::io::Error> {
@@ -298,7 +313,7 @@ pub fn write_results(
handlebars.register_escape_fn(|s| -> String { s.to_string() });
// Organize results by pallet into a JSON map
let all_results = map_results(batches, &analysis_choice)?;
let all_results = map_results(batches, storage_info, &analysis_choice)?;
for ((pallet, instance), results) in all_results.iter() {
let mut file_path = path.clone();
// If a user only specified a directory...
@@ -332,6 +347,57 @@ pub fn write_results(
Ok(())
}
// This function looks at the keys touched during the benchmark, and the storage info we collected
// from the pallets, and creates comments with information about the storage keys touched during
// each benchmark.
fn add_storage_comments(
comments: &mut Vec<String>,
results: &[BenchmarkResults],
storage_info: &[StorageInfo],
) {
let storage_info_map = storage_info.iter().map(|info| (info.prefix.clone(), info))
.collect::<HashMap<_, _>>();
// This tracks the keys we already identified, so we only generate a single comment.
let mut identified = HashSet::<Vec<u8>>::new();
for result in results.clone() {
for (key, reads, writes, whitelisted) in &result.keys {
// skip keys which are whitelisted
if *whitelisted { continue; }
let prefix_length = key.len().min(32);
let prefix = key[0..prefix_length].to_vec();
if identified.contains(&prefix) {
// skip adding comments for keys we already identified
continue;
} else {
// track newly identified keys
identified.insert(prefix.clone());
}
match storage_info_map.get(&prefix) {
Some(key_info) => {
let comment = format!(
"Storage: {} {} (r:{} w:{})",
String::from_utf8(key_info.pallet_name.clone()).expect("encoded from string"),
String::from_utf8(key_info.storage_name.clone()).expect("encoded from string"),
reads,
writes,
);
comments.push(comment)
},
None => {
let comment = format!(
"Storage: unknown [0x{}] (r:{} w:{})",
HexDisplay::from(key),
reads,
writes,
);
comments.push(comment)
}
}
}
}
}
// Add an underscore after every 3rd character, i.e. a separator for large numbers.
fn underscore<Number>(i: Number) -> String
where Number: std::string::ToString
@@ -422,6 +488,7 @@ mod test {
writes: (base + slope * i).into(),
repeat_writes: 0,
proof_size: 0,
keys: vec![],
}
)
}
@@ -475,11 +542,15 @@ mod test {
#[test]
fn map_results_works() {
let mapped_results = map_results(&[
test_data(b"first", b"first", BenchmarkParameter::a, 10, 3),
test_data(b"first", b"second", BenchmarkParameter::b, 9, 2),
test_data(b"second", b"first", BenchmarkParameter::c, 3, 4),
], &AnalysisChoice::default()).unwrap();
let mapped_results = map_results(
&[
test_data(b"first", b"first", BenchmarkParameter::a, 10, 3),
test_data(b"first", b"second", BenchmarkParameter::b, 9, 2),
test_data(b"second", b"first", BenchmarkParameter::c, 3, 4),
],
&[],
&AnalysisChoice::default(),
).unwrap();
let first_benchmark = &mapped_results.get(
&("first_pallet".to_string(), "instance".to_string())