Consolidate frame benchmarking into a frame crate (#4977)

This prs cleans up some of the frame benchmarking stuff:
- Move CLI into `frame-benchmarking-cli`. No frame related CLI should
exists in the default Substrate CLI.
- Move all traits and types related to frame benchmarking into the
`frame-benchmarking` trait. Frame types should be isolated in Frame.
This commit is contained in:
Bastian Köcher
2020-02-19 10:22:36 +01:00
committed by GitHub
parent e50f610907
commit b4ebd41c21
25 changed files with 484 additions and 329 deletions
@@ -0,0 +1,17 @@
[package]
name = "frame-benchmarking-cli"
version = "2.0.0"
authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
license = "GPL-3.0"
[dependencies]
frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" }
sc-service = { version = "0.8.0", path = "../../../client/service" }
sc-cli = { version = "0.8.0", path = "../../../client/cli" }
sc-client = { version = "0.8.0", path = "../../../client" }
sc-client-db = { version = "0.8.0", path = "../../../client/db" }
sc-executor = { version = "0.8.0", path = "../../../client/executor" }
sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" }
structopt = "0.3.8"
codec = { version = "1.1.2", package = "parity-scale-codec" }
@@ -0,0 +1,152 @@
// Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
use sp_runtime::{BuildStorage, traits::{Block as BlockT, Header as HeaderT, NumberFor}};
use sc_client::StateMachine;
use sc_cli::{ExecutionStrategy, WasmExecutionMethod};
use sc_client_db::BenchmarkingState;
use sc_service::{RuntimeGenesis, ChainSpecExtension};
use sc_executor::{NativeExecutor, NativeExecutionDispatch};
use std::fmt::Debug;
use codec::{Encode, Decode};
use frame_benchmarking::BenchmarkResults;
/// The `benchmark` command used to benchmark FRAME Pallets.
#[derive(Debug, structopt::StructOpt, Clone)]
pub struct BenchmarkCmd {
/// Select a FRAME Pallet to benchmark.
#[structopt(short, long)]
pub pallet: String,
/// Select an extrinsic to benchmark.
#[structopt(short, long)]
pub extrinsic: String,
/// Select how many samples we should take across the variable components.
#[structopt(short, long, default_value = "1")]
pub steps: u32,
/// Select how many repetitions of this benchmark should run.
#[structopt(short, long, default_value = "1")]
pub repeat: u32,
#[allow(missing_docs)]
#[structopt(flatten)]
pub shared_params: sc_cli::SharedParams,
/// The execution strategy that should be used for benchmarks
#[structopt(
long = "execution",
value_name = "STRATEGY",
possible_values = &ExecutionStrategy::variants(),
case_insensitive = true,
)]
pub execution: Option<ExecutionStrategy>,
/// Method for executing Wasm runtime code.
#[structopt(
long = "wasm-execution",
value_name = "METHOD",
possible_values = &WasmExecutionMethod::enabled_variants(),
case_insensitive = true,
default_value = "Interpreted"
)]
pub wasm_method: WasmExecutionMethod,
}
impl BenchmarkCmd {
/// Parse CLI arguments and initialize given config.
pub fn init<G, E>(
&self,
config: &mut sc_service::config::Configuration<G, E>,
spec_factory: impl FnOnce(&str) -> Result<Option<sc_service::ChainSpec<G, E>>, String>,
version: &sc_cli::VersionInfo,
) -> sc_cli::error::Result<()> where
G: sc_service::RuntimeGenesis,
E: sc_service::ChainSpecExtension,
{
sc_cli::init_config(config, &self.shared_params, version, spec_factory)?;
// make sure to configure keystore
sc_cli::fill_config_keystore_in_memory(config).map_err(Into::into)
}
/// Runs the command and benchmarks the chain.
pub fn run<G, E, BB, ExecDispatch>(
self,
config: sc_service::Configuration<G, E>,
) -> sc_cli::error::Result<()>
where
G: RuntimeGenesis,
E: ChainSpecExtension,
BB: BlockT + Debug,
<<<BB as BlockT>::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug,
<BB as BlockT>::Hash: std::str::FromStr,
ExecDispatch: NativeExecutionDispatch + 'static,
{
let spec = config.chain_spec.expect("chain_spec is always Some");
let wasm_method = self.wasm_method.into();
let strategy = self.execution.unwrap_or(ExecutionStrategy::Native);
let genesis_storage = spec.build_storage()?;
let mut changes = Default::default();
let state = BenchmarkingState::<BB>::new(genesis_storage)?;
let executor = NativeExecutor::<ExecDispatch>::new(
wasm_method,
None, // heap pages
);
let result = StateMachine::<_, _, NumberFor<BB>, _>::new(
&state,
None,
&mut changes,
&executor,
"Benchmark_dispatch_benchmark",
&(&self.pallet, &self.extrinsic, self.steps, self.repeat).encode(),
Default::default(),
)
.execute(strategy.into())
.map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?;
let results = <Option<Vec<BenchmarkResults>> as Decode>::decode(&mut &result[..])
.unwrap_or(None);
if let Some(results) = results {
// Print benchmark metadata
println!(
"Pallet: {:?}, Extrinsic: {:?}, Steps: {:?}, Repeat: {:?}",
self.pallet,
self.extrinsic,
self.steps,
self.repeat,
);
// Print the table header
results[0].0.iter().for_each(|param| print!("{:?},", param.0));
print!("time\n");
// Print the values
results.iter().for_each(|result| {
let parameters = &result.0;
parameters.iter().for_each(|param| print!("{:?},", param.1));
print!("{:?}\n", result.1);
});
eprintln!("Done.");
} else {
eprintln!("No Results.");
}
Ok(())
}
}