mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 19:17:58 +00:00
* Starting * closer * Compiles! * comments * Create seperate mock * Remove changes to test env * Fix step calculation * Add host function * Add runtime api * compiles * Update to use offchain timestamp * Gives a result * added some CLI wip * make generic * Update instance * Remove CLI stuff * Remove last cli stuff * undo more changes * Update benchmarks * Update Cargo.lock * remove test * Move loop out of runtime * Benchmarking externalities * Benchmarking state * Implemented commit * Make CLI work, move loop back into runtime * Wipe resets to genesis * Speedup benchmarks * Use enum to select extrinsic within pallet * CLI controls which module and extrinsic to call * Select a pallet with cli * Add steps and repeats to cli * Output as CSV format * Introduce benchmark pallet * Append bench * Use Results * fix merge * Clear Identity benchmark * Bench request judgment and cancel request * Add final benchmarks * Fix CSV output * Start cleaning up for PR * Bump numbers in `wasmtime` integration tests. * More docs * Add rockdb feature to bench * Fix formatting issues * Add test feature to bench * Add test feature to bench * Add rocksdb feature flag * Update bench.rs Co-authored-by: Arkadiy Paronyan <arkady.paronyan@gmail.com> Co-authored-by: Gavin Wood <github@gavwood.com>
This commit is contained in:
@@ -685,6 +685,18 @@ pub fn print(print: impl traits::Printable) {
|
||||
print.print();
|
||||
}
|
||||
|
||||
/// An alphabet of possible parameters to use for benchmarking.
|
||||
#[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum BenchmarkParameter {
|
||||
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z,
|
||||
}
|
||||
|
||||
/// Results from running benchmarks on a FRAME pallet.
|
||||
/// Contains duration of the function call in nanoseconds along with the benchmark parameters
|
||||
/// used for that benchmark result.
|
||||
pub type BenchmarkResults = (Vec<(BenchmarkParameter, u32)>, u128);
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -24,6 +24,7 @@ use std::fmt::Display;
|
||||
#[cfg(feature = "std")]
|
||||
use serde::{Serialize, Deserialize, de::DeserializeOwned};
|
||||
use sp_core::{self, Hasher, Blake2Hasher, TypeId, RuntimeDebug};
|
||||
use crate::BenchmarkParameter;
|
||||
use crate::codec::{Codec, Encode, Decode};
|
||||
use crate::transaction_validity::{
|
||||
ValidTransaction, TransactionValidity, TransactionValidityError, UnknownTransaction,
|
||||
@@ -1328,6 +1329,26 @@ pub trait BlockIdTo<Block: self::Block> {
|
||||
) -> Result<Option<NumberFor<Block>>, Self::Error>;
|
||||
}
|
||||
|
||||
/// The pallet benchmarking trait.
|
||||
pub trait Benchmarking<T> {
|
||||
/// Run the benchmarks for this pallet.
|
||||
///
|
||||
/// Parameters
|
||||
/// - `extrinsic`: The name of extrinsic function you want to benchmark encoded as bytes.
|
||||
/// - `steps`: The number of sample points you want to take across the range of parameters.
|
||||
/// - `repeat`: The number of times you want to repeat a benchmark.
|
||||
fn run_benchmark(extrinsic: Vec<u8>, steps: u32, repeat: u32) -> Result<Vec<T>, &'static str>;
|
||||
}
|
||||
|
||||
/// The required setup for creating a benchmark.
|
||||
pub trait BenchmarkingSetup<T, Call, RawOrigin> {
|
||||
/// Return the components and their ranges which should be tested in this benchmark.
|
||||
fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>;
|
||||
|
||||
/// Set up the storage, and prepare a call and caller to test in a single run of the benchmark.
|
||||
fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result<(Call, RawOrigin), &'static str>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
Reference in New Issue
Block a user