feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit e4778b4576
6838 changed files with 1847450 additions and 0 deletions
@@ -0,0 +1,121 @@
[package]
name = "frame-benchmarking-cli"
version = "32.0.0"
authors.workspace = true
edition.workspace = true
license = "Apache-2.0"
homepage.workspace = true
repository.workspace = true
description = "CLI for benchmarking FRAME"
readme = "README.md"
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
Inflector = { workspace = true }
array-bytes = { workspace = true, default-features = true }
chrono = { workspace = true }
clap = { features = ["derive"], workspace = true }
codec = { workspace = true, default-features = true }
comfy-table = { workspace = true }
cumulus-client-teyrchain-inherent = { workspace = true, default-features = true }
cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true }
env_filter = { workspace = true }
frame-benchmarking = { workspace = true, default-features = true }
frame-storage-access-test-runtime = { workspace = true, default-features = true }
frame-support = { workspace = true, default-features = true }
frame-system = { workspace = true, default-features = true }
gethostname = { workspace = true }
handlebars = { workspace = true }
itertools = { workspace = true }
linked-hash-map = { workspace = true }
log = { workspace = true, default-features = true }
pezkuwi-primitives = { workspace = true, default-features = true }
pezkuwi-teyrchain-primitives = { workspace = true, default-features = true }
rand = { features = ["small_rng"], workspace = true, default-features = true }
rand_pcg = { workspace = true }
sc-block-builder = { workspace = true, default-features = true }
sc-chain-spec = { workspace = true }
sc-cli = { workspace = true, default-features = false }
sc-client-api = { workspace = true, default-features = true }
sc-client-db = { workspace = true, default-features = false }
sc-executor = { workspace = true, default-features = true }
sc-executor-common = { workspace = true }
sc-executor-wasmtime = { workspace = true }
sc-runtime-utilities = { workspace = true, default-features = true }
sc-service = { workspace = true, default-features = false }
sc-sysinfo = { workspace = true, default-features = true }
serde = { workspace = true, default-features = true }
serde_json = { workspace = true, default-features = true }
sp-api = { workspace = true, default-features = true }
sp-block-builder = { workspace = true, default-features = true }
sp-blockchain = { workspace = true, default-features = true }
sp-core = { workspace = true, default-features = true }
sp-database = { workspace = true, default-features = true }
sp-externalities = { workspace = true, default-features = true }
sp-genesis-builder = { workspace = true, default-features = true }
sp-inherents = { workspace = true, default-features = true }
sp-io = { workspace = true, default-features = true }
sp-keystore = { workspace = true, default-features = true }
sp-runtime = { workspace = true, default-features = true }
sp-runtime-interface = { workspace = true, default-features = true }
sp-state-machine = { workspace = true, default-features = true }
sp-storage = { workspace = true, default-features = true }
sp-timestamp = { workspace = true, default-features = true }
sp-transaction-pool = { workspace = true, default-features = true }
sp-trie = { workspace = true, default-features = true }
sp-version = { workspace = true, default-features = true }
sp-wasm-interface = { workspace = true, default-features = true }
subxt = { workspace = true, features = ["native"] }
subxt-signer = { workspace = true, features = ["unstable-eth"] }
thiserror = { workspace = true }
thousands = { workspace = true }
[dev-dependencies]
cumulus-test-runtime = { workspace = true, default-features = true }
substrate-test-runtime = { workspace = true, default-features = true }
zagros-runtime = { workspace = true, default-features = true }
[features]
default = []
runtime-benchmarks = [
"cumulus-client-teyrchain-inherent/runtime-benchmarks",
"cumulus-primitives-proof-size-hostfunction/runtime-benchmarks",
"cumulus-test-runtime/runtime-benchmarks",
"frame-benchmarking/runtime-benchmarks",
"frame-storage-access-test-runtime/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"pezkuwi-primitives/runtime-benchmarks",
"pezkuwi-teyrchain-primitives/runtime-benchmarks",
"sc-block-builder/runtime-benchmarks",
"sc-chain-spec/runtime-benchmarks",
"sc-cli/runtime-benchmarks",
"sc-client-api/runtime-benchmarks",
"sc-client-db/runtime-benchmarks",
"sc-executor-wasmtime/runtime-benchmarks",
"sc-executor/runtime-benchmarks",
"sc-runtime-utilities/runtime-benchmarks",
"sc-service/runtime-benchmarks",
"sc-sysinfo/runtime-benchmarks",
"sp-api/runtime-benchmarks",
"sp-block-builder/runtime-benchmarks",
"sp-blockchain/runtime-benchmarks",
"sp-genesis-builder/runtime-benchmarks",
"sp-inherents/runtime-benchmarks",
"sp-io/runtime-benchmarks",
"sp-runtime-interface/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"sp-state-machine/runtime-benchmarks",
"sp-timestamp/runtime-benchmarks",
"sp-transaction-pool/runtime-benchmarks",
"sp-trie/runtime-benchmarks",
"sp-version/runtime-benchmarks",
"substrate-test-runtime/runtime-benchmarks",
"zagros-runtime/runtime-benchmarks",
]
rocksdb = ["sc-cli/rocksdb", "sc-client-db/rocksdb"]
@@ -0,0 +1,98 @@
# The FRAME Benchmarking CLI
This crate contains commands to benchmark various aspects of Substrate and the hardware.
The goal is to have a comprehensive suite of benchmarks that cover all aspects of Substrate and the hardware that its
running on.
There exist fundamentally two ways to use this crate. A node-integrated CLI version, and a freestanding CLI. If you are
only interested in pallet benchmarking, then skip ahead to the [Freestanding CLI](#freestanding-cli).
# Node Integrated CLI
Mostly all Substrate nodes will expose some commands for benchmarking. You can refer to the `staging-node-cli` crate as
an example on how to integrate those. Note that for solely benchmarking pallets, the freestanding CLI is more suitable.
## Usage
Here we invoke the root command on the `staging-node-cli`. Most Substrate nodes should have a similar output, depending
on their integration of these commands.
```sh
$ cargo run -p staging-node-cli --profile=production --features=runtime-benchmarks -- benchmark
Sub-commands concerned with benchmarking.
USAGE:
substrate benchmark <SUBCOMMAND>
OPTIONS:
-h, --help Print help information
-V, --version Print version information
SUBCOMMANDS:
block Benchmark the execution time of historic blocks
machine Command to benchmark the hardware.
overhead Benchmark the execution overhead per-block and per-extrinsic
pallet Benchmark the extrinsic weight of FRAME Pallets
storage Benchmark the storage speed of a chain snapshot
```
All examples use the `production` profile for correctness which makes the compilation *very* slow; for testing you can
use `--release`.
For the final results the `production` profile and reference hardware should be used, otherwise the results are not
comparable.
# Freestanding CLI
The freestanding is a standalone CLI that does not rely on any node integration. It can be used to benchmark pallets of
any FRAME runtime that does not utilize 3rd party host functions.
It currently only supports pallet benchmarking, since the other commands still rely on a node.
## Installation
Installing from local source repository:
```sh
cargo install --locked --path substrate/utils/frame/omni-bencher --profile=production
```
## Usage
The exposed pallet sub-command is identical as the node-integrated CLI. The only difference is that it needs to be prefixed
with a `v1` to ensure drop-in compatibility.
First we need to ensure that there is a runtime available. As example we will build the zagros runtime:
```sh
cargo build -p zagros-runtime --profile production --features runtime-benchmarks
```
Now the benchmarking can be started with:
```sh
frame-omni-bencher v1 \
benchmark pallet \
--runtime target/release/wbuild/zagros-runtime/zagros-runtime.compact.compressed.wasm \
--pallet "pallet_balances" --extrinsic ""
```
For the exact arguments of the `pallet` command, please refer to the [pallet] sub-module.
# Commands
The sub-commands of both CLIs have the same semantics and are documented in their respective sub-modules:
- [block] Compare the weight of a historic block to its actual resource usage
- [machine] Gauges the speed of the hardware
- [overhead] Creates weight files for the *Block*- and *Extrinsic*-base weights
- [pallet] Creates weight files for a Pallet
- [storage] Creates weight files for *Read* and *Write* storage operations
License: Apache-2.0
<!-- LINKS -->
[pallet]: ../../../frame/benchmarking/README.md
[machine]: src/machine/README.md
[storage]: src/storage/README.md
[overhead]: src/overhead/README.md
[block]: src/block/README.md
@@ -0,0 +1,35 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::env;
/// Exposes build environment variables to the rust code.
///
/// - The build profile as `build_profile`
/// - The optimization level as `build_opt_level`
pub fn main() {
if let Ok(opt_level) = env::var("OPT_LEVEL") {
println!("cargo:rustc-cfg=build_opt_level={:?}", opt_level);
} else {
println!("cargo:rustc-cfg=build_opt_level={:?}", "unknown");
}
if let Ok(profile) = env::var("PROFILE") {
println!("cargo:rustc-cfg=build_profile={:?}", profile);
} else {
println!("cargo:rustc-cfg=build_profile={:?}", "unknown");
}
}
@@ -0,0 +1,113 @@
# The `benchmark block` command
The whole benchmarking process in Substrate aims to predict the resource usage of an unexecuted block. This command
measures how accurate this prediction was by executing a block and comparing the predicted weight to its actual resource
usage. It can be used to measure the accuracy of the pallet benchmarking.
In the following it will be explained once for PezkuwiChain and once for Substrate.
## PezkuwiChain # 1
<sup>(Also works for Kusama, zagros and pezkuwichain)</sup>
Suppose you either have a synced PezkuwiChain node or downloaded a snapshot from [Polkachu]. This example uses a pruned
ParityDB snapshot from the 2022-4-19 with the last block being 9939462. For pruned snapshots you need to know the number
of the last block (to be improved [here]). Pruned snapshots normally store the last 256 blocks, archive nodes can use
any block range.
In this example we will benchmark just the last 10 blocks:
```sh
cargo run --profile=production -- benchmark block --from 9939453 --to 9939462 --db paritydb
```
Output:
```pre
Block 9939453 with 2 tx used 4.57% of its weight ( 26,458,801 of 579,047,053 ns)
Block 9939454 with 3 tx used 4.80% of its weight ( 28,335,826 of 590,414,831 ns)
Block 9939455 with 2 tx used 4.76% of its weight ( 27,889,567 of 586,484,595 ns)
Block 9939456 with 2 tx used 4.65% of its weight ( 27,101,306 of 582,789,723 ns)
Block 9939457 with 2 tx used 4.62% of its weight ( 26,908,882 of 582,789,723 ns)
Block 9939458 with 2 tx used 4.78% of its weight ( 28,211,440 of 590,179,467 ns)
Block 9939459 with 4 tx used 4.78% of its weight ( 27,866,077 of 583,260,451 ns)
Block 9939460 with 3 tx used 4.72% of its weight ( 27,845,836 of 590,462,629 ns)
Block 9939461 with 2 tx used 4.58% of its weight ( 26,685,119 of 582,789,723 ns)
Block 9939462 with 2 tx used 4.60% of its weight ( 26,840,938 of 583,697,101 ns)
```
### Output Interpretation
<sup>(Only results from reference hardware are relevant)</sup>
Each block is executed multiple times and the results are averaged. The percent number is the interesting part and
indicates how much weight was used as compared to how much was predicted. The closer to 100% this is without exceeding
100%, the better. If it exceeds 100%, the block is marked with "**OVER WEIGHT!**" to easier spot them. This is not good
since then the benchmarking under-estimated the weight. This would mean that an honest validator would possibly not be
able to keep up with importing blocks since users did not pay for enough weight. If that happens the validator could lag
behind the chain and get slashed for missing deadlines. It is therefore important to investigate any overweight blocks.
In this example you can see an unexpected result; only < 5% of the weight was used! The measured blocks can be executed
much faster than predicted. This means that the benchmarking process massively over-estimated the execution time. Since
they are off by so much, it is an issue [`pezkuwi#5192`].
The ideal range for these results would be 85-100%.
## PezkuwiChain # 2
Let's take a more interesting example where the blocks use more of their predicted weight. Every day when validators pay
out rewards, the blocks are nearly full. Using an archive node here is the easiest.
The PezkuwiChain blocks TODO-TODO for example contain large batch transactions for staking payout.
```sh
cargo run --profile=production -- benchmark block --from TODO --to TODO --db paritydb
```
```pre
TODO
```
## Substrate
It is also possible to try the procedure in Substrate, although it's a bit boring.
First you need to create some blocks with either a local or dev chain. This example will use the standard development
spec. Pick a non existing directory where the chain data will be stored, eg `/tmp/dev`.
```sh
cargo run --profile=production -- --dev -d /tmp/dev
```
You should see after some seconds that it started to produce blocks:
```pre
✨ Imported #1 (0x801d…9189)
```
You can now kill the node with `Ctrl+C`. Then measure how long it takes to execute these blocks:
```sh
cargo run --profile=production -- benchmark block --from 1 --to 1 --dev -d /tmp/dev --pruning archive
```
This will benchmark the first block. If you killed the node at a later point, you can measure multiple blocks.
```pre
Block 1 with 1 tx used 72.04% of its weight ( 4,945,664 of 6,864,702 ns)
```
In this example the block used ~72% of its weight. The benchmarking therefore over-estimated the effort to execute the
block. Since this block is empty, its not very interesting.
## Arguments
- `--from` Number of the first block to measure (inclusive).
- `--to` Number of the last block to measure (inclusive).
- `--repeat` How often each block should be measured.
- [`--db`]
- [`--pruning`]
License: Apache-2.0
<!-- LINKS -->
[Polkachu]: https://polkachu.com/snapshots
[here]: https://github.com/paritytech/substrate/issues/11141
[pezkuwi#5192]: https://github.com/paritytech/polkadot/issues/5192
[`--db`]: ../shared/README.md#arguments
[`--pruning`]: ../shared/README.md#arguments
@@ -0,0 +1,184 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains the core benchmarking logic.
use codec::DecodeAll;
use frame_support::weights::constants::WEIGHT_REF_TIME_PER_NANOS;
use frame_system::ConsumedWeight;
use sc_block_builder::BlockBuilderApi;
use sc_cli::{Error, Result};
use sc_client_api::{
Backend as ClientBackend, BlockBackend, HeaderBackend, StorageProvider, UsageProvider,
};
use sp_api::{ApiExt, Core, ProvideRuntimeApi};
use sp_blockchain::Error::RuntimeApiError;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT},
DigestItem, OpaqueExtrinsic,
};
use sp_storage::StorageKey;
use clap::Args;
use log::{info, warn};
use serde::Serialize;
use std::{fmt::Debug, marker::PhantomData, sync::Arc, time::Instant};
use thousands::Separable;
use crate::shared::{StatSelect, Stats};
/// Log target for printing block weight info.
const LOG_TARGET: &'static str = "benchmark::block::weight";
/// Parameters for modifying the benchmark behaviour.
#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)]
pub struct BenchmarkParams {
/// Number of the first block to consider.
#[arg(long)]
pub from: u32,
/// Last block number to consider.
#[arg(long)]
pub to: u32,
/// Number of times that the benchmark should be repeated for each block.
#[arg(long, default_value_t = 10)]
pub repeat: u32,
}
/// Convenience closure for the [`Benchmark::run()`] function.
pub struct Benchmark<Block, BA, C> {
client: Arc<C>,
params: BenchmarkParams,
_p: PhantomData<(Block, BA, C)>,
}
/// Helper for nano seconds.
type NanoSeconds = u64;
impl<Block, BA, C> Benchmark<Block, BA, C>
where
Block: BlockT<Extrinsic = OpaqueExtrinsic>,
BA: ClientBackend<Block>,
C: ProvideRuntimeApi<Block>
+ StorageProvider<Block, BA>
+ UsageProvider<Block>
+ BlockBackend<Block>
+ HeaderBackend<Block>,
C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
{
/// Returns a new [`Self`] from the arguments.
pub fn new(client: Arc<C>, params: BenchmarkParams) -> Self {
Self { client, params, _p: PhantomData }
}
/// Benchmark the execution speed of historic blocks and log the results.
pub fn run(&self) -> Result<()> {
if self.params.from == 0 {
return Err("Cannot benchmark the genesis block".into());
}
for i in self.params.from..=self.params.to {
let block_num = BlockId::Number(i.into());
let hash = self.client.expect_block_hash_from_id(&block_num)?;
let consumed = self.consumed_weight(hash)?;
let block = self.client.block(hash)?.ok_or(format!("Block {} not found", block_num))?;
let block = self.unsealed(block.block);
let took = self.measure_block(&block, *block.header().parent_hash())?;
self.log_weight(i, block.extrinsics().len(), consumed, took);
}
Ok(())
}
/// Return the average *execution* aka. *import* time of the block.
fn measure_block(&self, block: &Block, parent_hash: Block::Hash) -> Result<NanoSeconds> {
let mut record = Vec::<NanoSeconds>::default();
// Interesting part here:
// Execute the block multiple times and collect stats about its execution time.
for _ in 0..self.params.repeat {
let block = block.clone();
let runtime_api = self.client.runtime_api();
let start = Instant::now();
runtime_api
.execute_block(parent_hash, block.into())
.map_err(|e| Error::Client(RuntimeApiError(e)))?;
record.push(start.elapsed().as_nanos() as NanoSeconds);
}
let took = Stats::new(&record)?.select(StatSelect::Average);
Ok(took)
}
/// Returns the total nanoseconds of a [`frame_system::ConsumedWeight`] for a block number.
///
/// This is the post-dispatch corrected weight and is only available
/// after executing the block.
fn consumed_weight(&self, block_hash: Block::Hash) -> Result<NanoSeconds> {
// Hard-coded key for System::BlockWeight. It could also be passed in as argument
// for the benchmark, but I think this should work as well.
let hash = array_bytes::hex2bytes(
"26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96",
)?;
let key = StorageKey(hash);
let mut raw_weight = &self
.client
.storage(block_hash, &key)?
.ok_or(format!("Could not find System::BlockWeight for block: {}", block_hash))?
.0[..];
let weight = ConsumedWeight::decode_all(&mut raw_weight)?;
// Should be divisible, but still use floats in case we ever change that.
Ok((weight.total().ref_time() as f64 / WEIGHT_REF_TIME_PER_NANOS as f64).floor()
as NanoSeconds)
}
/// Prints the weight info of a block to the console.
fn log_weight(&self, num: u32, num_ext: usize, consumed: NanoSeconds, took: NanoSeconds) {
// The ratio of weight that the block used vs what it consumed.
// This should in general not exceed 100% (minus outliers).
let percent = (took as f64 / consumed as f64) * 100.0;
let msg = format!(
"Block {} with {: >5} tx used {: >6.2}% of its weight ({: >14} of {: >14} ns)",
num,
num_ext,
percent,
took.separate_with_commas(),
consumed.separate_with_commas()
);
if took <= consumed {
info!(target: LOG_TARGET, "{}", msg);
} else {
warn!(target: LOG_TARGET, "{} - OVER WEIGHT!", msg);
}
}
/// Removes the consensus seal from the block.
fn unsealed(&self, block: Block) -> Block {
let (mut header, exts) = block.deconstruct();
header.digest_mut().logs.retain(|item| !matches!(item, DigestItem::Seal(_, _)));
Block::new(header, exts)
}
}
@@ -0,0 +1,116 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains the [`BlockCmd`] as entry point for the CLI to execute
//! the *block* benchmark.
use sc_block_builder::BlockBuilderApi;
use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams};
use sc_client_api::{Backend as ClientBackend, BlockBackend, StorageProvider, UsageProvider};
use sp_api::{ApiExt, ProvideRuntimeApi};
use sp_blockchain::HeaderBackend;
use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic};
use clap::Parser;
use std::{fmt::Debug, sync::Arc};
use super::bench::{Benchmark, BenchmarkParams};
/// Benchmark the execution time of historic blocks.
///
/// This can be used to verify that blocks do not use more weight than they consumed
/// in their `WeightInfo`. Example:
///
/// Let's say you are on a Substrate chain and want to verify that the first 3 blocks
/// did not use more weight than declared which would otherwise be an issue.
/// To test this with a dev node, first create one with a temp directory:
///
/// $ substrate --dev -d /tmp/my-dev --wasm-execution compiled
///
/// And wait some time to let it produce 3 blocks. Then benchmark them with:
///
/// $ substrate benchmark-block --from 1 --to 3 --dev -d /tmp/my-dev
/// --wasm-execution compiled --pruning archive
///
/// The output will be similar to this:
///
/// Block 1 with 1 tx used 77.34% of its weight ( 5,308,964 of 6,864,645 ns)
/// Block 2 with 1 tx used 77.99% of its weight ( 5,353,992 of 6,864,645 ns)
/// Block 3 with 1 tx used 75.91% of its weight ( 5,305,938 of 6,989,645 ns)
///
/// The percent number is important and indicates how much weight
/// was used as compared to the consumed weight.
/// This number should be below 100% for reference hardware.
#[derive(Debug, Parser)]
pub struct BlockCmd {
#[allow(missing_docs)]
#[clap(flatten)]
pub shared_params: SharedParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub import_params: ImportParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub params: BenchmarkParams,
/// Enable the Trie cache.
///
/// This should only be used for performance analysis and not for final results.
#[arg(long)]
pub enable_trie_cache: bool,
}
impl BlockCmd {
/// Benchmark the execution time of historic blocks and compare it to their consumed weight.
///
/// Output will be printed to console.
pub fn run<Block, BA, C>(&self, client: Arc<C>) -> Result<()>
where
Block: BlockT<Extrinsic = OpaqueExtrinsic>,
BA: ClientBackend<Block>,
C: BlockBackend<Block>
+ ProvideRuntimeApi<Block>
+ StorageProvider<Block, BA>
+ UsageProvider<Block>
+ HeaderBackend<Block>,
C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
{
// Put everything in the benchmark type to have the generic types handy.
Benchmark::new(client, self.params.clone()).run()
}
}
// Boilerplate
impl CliConfiguration for BlockCmd {
fn shared_params(&self) -> &SharedParams {
&self.shared_params
}
fn import_params(&self) -> Option<&ImportParams> {
Some(&self.import_params)
}
fn trie_cache_maximum_size(&self) -> Result<Option<usize>> {
if self.enable_trie_cache {
Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default())
} else {
Ok(None)
}
}
}
@@ -0,0 +1,24 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Crate to benchmark the execution time of historic blocks
//! and compare it to their consumed weight.
mod bench;
mod cmd;
pub use cmd::BlockCmd;
@@ -0,0 +1,234 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains the core benchmarking logic.
use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder, BuiltBlock};
use sc_cli::{Error, Result};
use sc_client_api::UsageProvider;
use sp_api::{ApiExt, CallApiAt, Core, ProvideRuntimeApi};
use sp_blockchain::{
ApplyExtrinsicFailed::Validity,
Error::{ApplyExtrinsicFailed, RuntimeApiError},
};
use sp_runtime::{
traits::Block as BlockT,
transaction_validity::{InvalidTransaction, TransactionValidityError},
Digest, DigestItem, OpaqueExtrinsic,
};
use super::ExtrinsicBuilder;
use crate::shared::{StatSelect, Stats};
use clap::Args;
use codec::Encode;
use log::info;
use serde::Serialize;
use sp_trie::proof_size_extension::ProofSizeExt;
use std::{marker::PhantomData, sync::Arc, time::Instant};
/// Parameters to configure an *overhead* benchmark.
#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)]
pub struct BenchmarkParams {
/// Rounds of warmups before measuring.
#[arg(long, default_value_t = 10)]
pub warmup: u32,
/// How many times the benchmark should be repeated.
#[arg(long, default_value_t = 100)]
pub repeat: u32,
/// Maximal number of extrinsics that should be put into a block.
///
/// Only useful for debugging.
#[arg(long)]
pub max_ext_per_block: Option<u32>,
}
/// The results of multiple runs in nano seconds.
pub(crate) type BenchRecord = Vec<u64>;
/// Holds all objects needed to run the *overhead* benchmarks.
pub(crate) struct Benchmark<Block, C> {
client: Arc<C>,
params: BenchmarkParams,
inherent_data: sp_inherents::InherentData,
digest_items: Vec<DigestItem>,
record_proof: bool,
_p: PhantomData<Block>,
}
impl<Block, C> Benchmark<Block, C>
where
Block: BlockT<Extrinsic = OpaqueExtrinsic>,
C: ProvideRuntimeApi<Block>
+ CallApiAt<Block>
+ UsageProvider<Block>
+ sp_blockchain::HeaderBackend<Block>,
C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
{
/// Create a new [`Self`] from the arguments.
pub fn new(
client: Arc<C>,
params: BenchmarkParams,
inherent_data: sp_inherents::InherentData,
digest_items: Vec<DigestItem>,
record_proof: bool,
) -> Self {
Self { client, params, inherent_data, digest_items, record_proof, _p: PhantomData }
}
/// Benchmark a block with only inherents.
///
/// Returns the Ref time stats and the proof size.
pub fn bench_block(&self) -> Result<(Stats, u64)> {
let (block, _, proof_size) = self.build_block(None)?;
let record = self.measure_block(&block)?;
Ok((Stats::new(&record)?, proof_size))
}
/// Benchmark the time of an extrinsic in a full block.
///
/// First benchmarks an empty block, analogous to `bench_block` and use it as baseline.
/// Then benchmarks a full block built with the given `ext_builder` and subtracts the baseline
/// from the result.
/// This is necessary to account for the time the inherents use. Returns ref time stats and the
/// proof size.
pub fn bench_extrinsic(&self, ext_builder: &dyn ExtrinsicBuilder) -> Result<(Stats, u64)> {
let (block, _, base_proof_size) = self.build_block(None)?;
let base = self.measure_block(&block)?;
let base_time = Stats::new(&base)?.select(StatSelect::Average);
let (block, num_ext, proof_size) = self.build_block(Some(ext_builder))?;
let num_ext = num_ext.ok_or_else(|| Error::Input("Block was empty".into()))?;
let mut records = self.measure_block(&block)?;
for r in &mut records {
// Subtract the base time.
*r = r.saturating_sub(base_time);
// Divide by the number of extrinsics in the block.
*r = ((*r as f64) / (num_ext as f64)).ceil() as u64;
}
Ok((Stats::new(&records)?, proof_size.saturating_sub(base_proof_size)))
}
/// Builds a block with some optional extrinsics.
///
/// Returns the block and the number of extrinsics in the block
/// that are not inherents together with the proof size.
/// Returns a block with only inherents if `ext_builder` is `None`.
fn build_block(
&self,
ext_builder: Option<&dyn ExtrinsicBuilder>,
) -> Result<(Block, Option<u64>, u64)> {
let chain = self.client.usage_info().chain;
let mut builder = BlockBuilderBuilder::new(&*self.client)
.on_parent_block(chain.best_hash)
.with_parent_block_number(chain.best_number)
.with_inherent_digests(Digest { logs: self.digest_items.clone() })
.with_proof_recording(self.record_proof)
.build()?;
// Create and insert the inherents.
let inherents = builder.create_inherents(self.inherent_data.clone())?;
for inherent in inherents {
builder.push(inherent)?;
}
let num_ext = match ext_builder {
Some(ext_builder) => {
// Put as many extrinsics into the block as possible and count them.
info!("Building block, this takes some time...");
let mut num_ext = 0;
for nonce in 0..self.max_ext_per_block() {
let ext = ext_builder.build(nonce)?;
match builder.push(ext.clone()) {
Ok(()) => {},
Err(ApplyExtrinsicFailed(Validity(TransactionValidityError::Invalid(
InvalidTransaction::ExhaustsResources,
)))) => break, // Block is full
Err(e) => return Err(Error::Client(e)),
}
num_ext += 1;
}
if num_ext == 0 {
return Err("A Block must hold at least one extrinsic".into());
}
info!("Extrinsics per block: {}", num_ext);
Some(num_ext)
},
None => None,
};
let BuiltBlock { block, proof, .. } = builder.build()?;
Ok((
block,
num_ext,
proof
.map(|p| p.encoded_size())
.unwrap_or(0)
.try_into()
.map_err(|_| "Proof size is too large".to_string())?,
))
}
/// Measures the time that it take to execute a block or an extrinsic.
fn measure_block(&self, block: &Block) -> Result<BenchRecord> {
let mut record = BenchRecord::new();
let genesis = self.client.info().genesis_hash;
let measure_block = || -> Result<u128> {
let block = block.clone();
let mut runtime_api = self.client.runtime_api();
if self.record_proof {
runtime_api.record_proof();
let recorder = runtime_api
.proof_recorder()
.expect("Proof recording is enabled in the line above; qed.");
runtime_api.register_extension(ProofSizeExt::new(recorder));
}
let start = Instant::now();
runtime_api
.execute_block(genesis, block.into())
.map_err(|e| Error::Client(RuntimeApiError(e)))?;
Ok(start.elapsed().as_nanos())
};
info!("Running {} warmups...", self.params.warmup);
for _ in 0..self.params.warmup {
measure_block()?;
}
info!("Executing block {} times", self.params.repeat);
// Interesting part here:
// Execute a block multiple times and record each execution time.
for _ in 0..self.params.repeat {
let elapsed = measure_block()?;
record.push(elapsed as u64);
}
Ok(record)
}
fn max_ext_per_block(&self) -> u32 {
self.params.max_ext_per_block.unwrap_or(u32::MAX)
}
}
@@ -0,0 +1,152 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use sc_block_builder::BlockBuilderApi;
use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams};
use sc_client_api::UsageProvider;
use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi};
use sp_runtime::{traits::Block as BlockT, DigestItem, OpaqueExtrinsic};
use clap::{Args, Parser};
use log::info;
use serde::Serialize;
use std::{fmt::Debug, sync::Arc};
use super::{
bench::{Benchmark, BenchmarkParams},
extrinsic_factory::ExtrinsicFactory,
};
/// Benchmark the execution time of different extrinsics.
///
/// This is calculated by filling a block with a specific extrinsic and executing the block.
/// The result time is then divided by the number of extrinsics in that block.
///
/// NOTE: The BlockExecutionWeight is ignored in this case since it
// is very small compared to the total block execution time.
#[derive(Debug, Parser)]
pub struct ExtrinsicCmd {
#[allow(missing_docs)]
#[clap(flatten)]
pub shared_params: SharedParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub import_params: ImportParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub params: ExtrinsicParams,
}
/// The params for the [`ExtrinsicCmd`].
#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)]
pub struct ExtrinsicParams {
#[clap(flatten)]
pub bench: BenchmarkParams,
/// List all available pallets and extrinsics.
///
/// The format is CSV with header `pallet, extrinsic`.
#[arg(long)]
pub list: bool,
/// Pallet name of the extrinsic to benchmark.
#[arg(long, value_name = "PALLET", required_unless_present = "list")]
pub pallet: Option<String>,
/// Extrinsic to benchmark.
#[arg(long, value_name = "EXTRINSIC", required_unless_present = "list")]
pub extrinsic: Option<String>,
/// Enable the Trie cache.
///
/// This should only be used for performance analysis and not for final results.
#[arg(long)]
pub enable_trie_cache: bool,
}
impl ExtrinsicCmd {
/// Benchmark the execution time of a specific type of extrinsic.
///
/// The output will be printed to console.
pub fn run<Block, C>(
&self,
client: Arc<C>,
inherent_data: sp_inherents::InherentData,
digest_items: Vec<DigestItem>,
ext_factory: &ExtrinsicFactory,
) -> Result<()>
where
Block: BlockT<Extrinsic = OpaqueExtrinsic>,
C: ProvideRuntimeApi<Block>
+ CallApiAt<Block>
+ UsageProvider<Block>
+ sp_blockchain::HeaderBackend<Block>,
C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
{
// Short circuit if --list was specified.
if self.params.list {
let list: Vec<String> = ext_factory.0.iter().map(|b| b.name()).collect();
info!(
"Listing available extrinsics ({}):\npallet, extrinsic\n{}",
list.len(),
list.join("\n")
);
return Ok(());
}
let pallet = self.params.pallet.clone().unwrap_or_default();
let extrinsic = self.params.extrinsic.clone().unwrap_or_default();
let ext_builder = match ext_factory.try_get(&pallet, &extrinsic) {
Some(ext_builder) => ext_builder,
None =>
return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()),
};
let bench =
Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items, false);
let stats = bench.bench_extrinsic(ext_builder)?;
info!(
"Executing a {}::{} extrinsic takes[ns]:\n{:?}",
ext_builder.pallet(),
ext_builder.extrinsic(),
stats
);
Ok(())
}
}
// Boilerplate
impl CliConfiguration for ExtrinsicCmd {
fn shared_params(&self) -> &SharedParams {
&self.shared_params
}
fn import_params(&self) -> Option<&ImportParams> {
Some(&self.import_params)
}
fn trie_cache_maximum_size(&self) -> Result<Option<usize>> {
if self.params.enable_trie_cache {
Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default())
} else {
Ok(None)
}
}
}
@@ -0,0 +1,70 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Provides the [`ExtrinsicFactory`] and the [`ExtrinsicBuilder`] types.
//! Is used by the *overhead* and *extrinsic* benchmarks to build extrinsics.
use sp_runtime::OpaqueExtrinsic;
/// Helper to manage [`ExtrinsicBuilder`] instances.
#[derive(Default)]
pub struct ExtrinsicFactory(pub Vec<Box<dyn ExtrinsicBuilder>>);
impl ExtrinsicFactory {
/// Returns a builder for a pallet and extrinsic name.
///
/// Is case in-sensitive.
pub fn try_get(&self, pallet: &str, extrinsic: &str) -> Option<&dyn ExtrinsicBuilder> {
let pallet = pallet.to_lowercase();
let extrinsic = extrinsic.to_lowercase();
self.0
.iter()
.find(|b| b.pallet() == pallet && b.extrinsic() == extrinsic)
.map(|b| b.as_ref())
}
}
/// Used by the benchmark to build signed extrinsics.
///
/// The built extrinsics only need to be valid in the first block
/// who's parent block is the genesis block.
/// This assumption simplifies the generation of the extrinsics.
/// The signer should be one of the pre-funded dev accounts.
pub trait ExtrinsicBuilder {
/// Name of the pallet this builder is for.
///
/// Should be all lowercase.
fn pallet(&self) -> &str;
/// Name of the extrinsic this builder is for.
///
/// Should be all lowercase.
fn extrinsic(&self) -> &str;
/// Builds an extrinsic.
///
/// Will be called multiple times with increasing nonces.
fn build(&self, nonce: u32) -> std::result::Result<OpaqueExtrinsic, &'static str>;
}
impl dyn ExtrinsicBuilder + '_ {
/// Name of this builder in CSV format: `pallet, extrinsic`.
pub fn name(&self) -> String {
format!("{}, {}", self.pallet(), self.extrinsic())
}
}
@@ -0,0 +1,27 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Benchmark the time it takes to execute a specific extrinsic.
//! This is a generalization of the *overhead* benchmark which can only measure `System::Remark`
//! extrinsics.
pub mod bench;
pub mod cmd;
pub mod extrinsic_factory;
pub use cmd::ExtrinsicCmd;
pub use extrinsic_factory::{ExtrinsicBuilder, ExtrinsicFactory};
@@ -0,0 +1,124 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains the root [`BenchmarkCmd`] command and exports its sub-commands.
mod block;
mod extrinsic;
mod machine;
mod overhead;
mod pallet;
mod shared;
mod storage;
pub use block::BlockCmd;
pub use extrinsic::{ExtrinsicBuilder, ExtrinsicCmd, ExtrinsicFactory};
pub use machine::{MachineCmd, SUBSTRATE_REFERENCE_HARDWARE};
pub use overhead::{
remark_builder::{DynamicRemarkBuilder, SubstrateRemarkBuilder},
OpaqueBlock, OverheadCmd,
};
pub use pallet::PalletCmd;
pub use sc_service::BasePath;
pub use storage::StorageCmd;
use sc_cli::{CliConfiguration, DatabaseParams, ImportParams, PruningParams, Result, SharedParams};
/// The root `benchmarking` command.
///
/// Has no effect itself besides printing a help menu of the sub-commands.
#[derive(Debug, clap::Subcommand)]
pub enum BenchmarkCmd {
Pallet(PalletCmd),
Storage(StorageCmd),
Overhead(OverheadCmd),
Block(BlockCmd),
Machine(MachineCmd),
Extrinsic(ExtrinsicCmd),
}
/// Unwraps a [`BenchmarkCmd`] into its concrete sub-command.
macro_rules! unwrap_cmd {
{
$self:expr,
$cmd:ident,
$code:expr
} => {
match $self {
BenchmarkCmd::Pallet($cmd) => $code,
BenchmarkCmd::Storage($cmd) => $code,
BenchmarkCmd::Overhead($cmd) => $code,
BenchmarkCmd::Block($cmd) => $code,
BenchmarkCmd::Machine($cmd) => $code,
BenchmarkCmd::Extrinsic($cmd) => $code,
}
}
}
/// Forward the [`CliConfiguration`] trait implementation.
///
/// Each time a sub-command exposes a new config option, it must be added here.
impl CliConfiguration for BenchmarkCmd {
fn shared_params(&self) -> &SharedParams {
unwrap_cmd! {
self, cmd, cmd.shared_params()
}
}
fn import_params(&self) -> Option<&ImportParams> {
unwrap_cmd! {
self, cmd, cmd.import_params()
}
}
fn database_params(&self) -> Option<&DatabaseParams> {
unwrap_cmd! {
self, cmd, cmd.database_params()
}
}
fn base_path(&self) -> Result<Option<BasePath>> {
let inner = unwrap_cmd! {
self, cmd, cmd.base_path()
};
// If the base path was not provided, benchmark command shall use temporary path. Otherwise
// we may end up using shared path, which may be inappropriate for benchmarking.
match inner {
Ok(None) => Some(BasePath::new_temp_dir()).transpose().map_err(|e| e.into()),
e => e,
}
}
fn pruning_params(&self) -> Option<&PruningParams> {
unwrap_cmd! {
self, cmd, cmd.pruning_params()
}
}
fn trie_cache_maximum_size(&self) -> Result<Option<usize>> {
unwrap_cmd! {
self, cmd, cmd.trie_cache_maximum_size()
}
}
fn chain_id(&self, is_dev: bool) -> Result<String> {
unwrap_cmd! {
self, cmd, cmd.chain_id(is_dev)
}
}
}
@@ -0,0 +1,83 @@
# The `benchmark machine` command
Different Substrate chains can have different hardware requirements.
It is therefore important to be able to quickly gauge if a piece of hardware fits a chains' requirements.
The `benchmark machine` command archives this by measuring key metrics and making them comparable.
Invoking the command looks like this:
```sh
cargo run --profile=production -- benchmark machine --dev
```
## Output
The output on reference hardware:
```pre
+----------+----------------+---------------+--------------+-------------------+
| Category | Function | Score | Minimum | Result |
+----------+----------------+---------------+--------------+-------------------+
| CPU | BLAKE2-256 | 1023.00 MiB/s | 1.00 GiB/s | ✅ Pass ( 99.4 %) |
+----------+----------------+---------------+--------------+-------------------+
| CPU | SR25519-Verify | 665.13 KiB/s | 666.00 KiB/s | ✅ Pass ( 99.9 %) |
+----------+----------------+---------------+--------------+-------------------+
| Memory | Copy | 14.39 GiB/s | 14.32 GiB/s | ✅ Pass (100.4 %) |
+----------+----------------+---------------+--------------+-------------------+
| Disk | Seq Write | 457.00 MiB/s | 450.00 MiB/s | ✅ Pass (101.6 %) |
+----------+----------------+---------------+--------------+-------------------+
| Disk | Rnd Write | 190.00 MiB/s | 200.00 MiB/s | ✅ Pass ( 95.0 %) |
+----------+----------------+---------------+--------------+-------------------+
```
The *score* is the average result of each benchmark. It always adheres to "higher is better".
The *category* indicate which part of the hardware was benchmarked:
- **CPU** Processor intensive task
- **Memory** RAM intensive task
- **Disk** Hard drive intensive task
The *function* is the concrete benchmark that was run:
- **BLAKE2-256** The throughput of the [Blake2-256] cryptographic hashing function with 32 KiB input. The [blake2_256
function] is used in many places in Substrate. The throughput of a hash function strongly depends on the input size,
therefore we settled to use a fixed input size for comparable results.
- **SR25519 Verify** Sr25519 is an optimized version of the [Curve25519] signature scheme. Signature verification is
used by Substrate when verifying extrinsics and blocks.
- **Copy** The throughput of copying memory from one place in the RAM to another.
- **Seq Write** The throughput of writing data to the storage location sequentially. It is important that the same disk
is used that will later-on be used to store the chain data.
- **Rnd Write** The throughput of writing data to the storage location in a random order. This is normally much slower
than the sequential write.
The *score* needs to reach the *minimum* in order to pass the benchmark. This can be reduced with the `--tolerance`
flag.
The *result* indicated if a specific benchmark was passed by the machine or not. The percent number is the relative
score reached to the *minimum* that is needed. The `--tolerance` flag is taken into account for this decision. For
example a benchmark that passes even with 95% since the *tolerance* was set to 10% would look like this: `✅ Pass ( 95.0
%)`.
## Interpretation
Ideally all results show a `Pass` and the program exits with code 0. Currently some of the benchmarks can fail even on
reference hardware; they are still being improved to make them more deterministic.
Make sure to run nothing else on the machine when benchmarking it.
You can re-run them multiple times to get more reliable results.
## Arguments
- `--tolerance` A percent number to reduce the *minimum* requirement. This should be used to ignore outliers of the
benchmarks. The default value is 10%.
- `--verify-duration` How long the verification benchmark should run.
- `--disk-duration` How long the *read* and *write* benchmarks should run each.
- `--allow-fail` Always exit the program with code 0.
- `--chain` / `--dev` Specify the chain config to use. This will be used to compare the results with the requirements of
the chain (WIP).
- [`--base-path`]
License: Apache-2.0
<!-- LINKS -->
[Blake2-256]: https://www.blake2.net/
[blake2_256 function]: https://docs.rs/sp-crypto-hashing/latest/sp_crypto_hashing/fn.blake2_256.html
[Curve25519]: https://en.wikipedia.org/wiki/Curve25519
[`--base-path`]: ../shared/README.md#arguments
@@ -0,0 +1,85 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains types to define hardware requirements.
use sc_sysinfo::Requirements;
use std::sync::LazyLock;
/// The hardware requirements as measured on reference hardware.
///
/// These values are provided by Parity, however it is possible
/// to use your own requirements if you are running a custom chain.
pub static SUBSTRATE_REFERENCE_HARDWARE: LazyLock<Requirements> = LazyLock::new(|| {
let raw = include_bytes!("reference_hardware.json").as_slice();
serde_json::from_slice(raw).expect("Hardcoded data is known good; qed")
});
#[cfg(test)]
mod tests {
use super::*;
use sc_sysinfo::{Metric, Requirement, Requirements, Throughput};
/// `SUBSTRATE_REFERENCE_HARDWARE` can be decoded.
#[test]
fn json_static_data() {
let raw = serde_json::to_string(&*SUBSTRATE_REFERENCE_HARDWARE).unwrap();
let decoded: Requirements = serde_json::from_str(&raw).unwrap();
assert_eq!(decoded, SUBSTRATE_REFERENCE_HARDWARE.clone());
}
/// The hard-coded values are correct.
#[test]
fn json_static_data_is_correct() {
assert_eq!(
*SUBSTRATE_REFERENCE_HARDWARE,
Requirements(vec![
Requirement {
metric: Metric::Blake2256,
minimum: Throughput::from_mibs(1000.00),
validator_only: false
},
Requirement {
metric: Metric::Blake2256Parallel { num_cores: 8 },
minimum: Throughput::from_mibs(1000.00),
validator_only: true,
},
Requirement {
metric: Metric::Sr25519Verify,
minimum: Throughput::from_kibs(637.619999744),
validator_only: false
},
Requirement {
metric: Metric::MemCopy,
minimum: Throughput::from_gibs(11.4925205078125003),
validator_only: false,
},
Requirement {
metric: Metric::DiskSeqWrite,
minimum: Throughput::from_mibs(950.0),
validator_only: false,
},
Requirement {
metric: Metric::DiskRndWrite,
minimum: Throughput::from_mibs(420.0),
validator_only: false
},
])
);
}
}
@@ -0,0 +1,242 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains the [`MachineCmd`] as entry point for the node
//! and the core benchmarking logic.
pub mod hardware;
use std::{boxed::Box, fs, path::Path};
use clap::Parser;
use comfy_table::{Row, Table};
use log::{error, info, warn};
use sc_cli::{CliConfiguration, Result, SharedParams};
use sc_service::Configuration;
use sc_sysinfo::{
benchmark_cpu, benchmark_cpu_parallelism, benchmark_disk_random_writes,
benchmark_disk_sequential_writes, benchmark_memory, benchmark_sr25519_verify, ExecutionLimit,
Metric, Requirement, Requirements, Throughput,
};
use crate::shared::check_build_profile;
pub use hardware::SUBSTRATE_REFERENCE_HARDWARE;
/// Command to benchmark the hardware.
///
/// Runs multiple benchmarks and prints their output to console.
/// Can be used to gauge if the hardware is fast enough to keep up with a chain's requirements.
/// This command must be integrated by the client since the client can set compiler flags
/// which influence the results.
///
/// You can use the `--base-path` flag to set a location for the disk benchmarks.
#[derive(Debug, Parser)]
pub struct MachineCmd {
#[allow(missing_docs)]
#[clap(flatten)]
pub shared_params: SharedParams,
/// Do not return an error if any check fails.
///
/// Should only be used for debugging.
#[arg(long)]
pub allow_fail: bool,
/// Set a fault tolerance for passing a requirement.
///
/// 10% means that the test would pass even when only 90% score was archived.
/// Can be used to mitigate outliers of the benchmarks.
#[arg(long, default_value_t = 10.0, value_name = "PERCENT")]
pub tolerance: f64,
/// Time limit for the verification benchmark.
#[arg(long, default_value_t = 5.0, value_name = "SECONDS")]
pub verify_duration: f32,
/// Time limit for the hash function benchmark.
#[arg(long, default_value_t = 5.0, value_name = "SECONDS")]
pub hash_duration: f32,
/// Time limit for the memory benchmark.
#[arg(long, default_value_t = 5.0, value_name = "SECONDS")]
pub memory_duration: f32,
/// Time limit for each disk benchmark.
#[arg(long, default_value_t = 5.0, value_name = "SECONDS")]
pub disk_duration: f32,
}
/// Helper for the result of a concrete benchmark.
struct BenchResult {
/// Did the hardware pass the benchmark?
passed: bool,
/// The absolute score that was archived.
score: Throughput,
/// The score relative to the minimal required score.
///
/// Is in range [0, 1].
rel_score: f64,
}
/// Errors that can be returned by the this command.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
pub enum Error {
#[error("One of the benchmarks had a score that was lower than its requirement")]
UnmetRequirement,
#[error("The build profile is unfit for benchmarking: {0}")]
BadBuildProfile(String),
#[error("Benchmark results are off by at least factor 100")]
BadResults,
}
impl MachineCmd {
/// Execute the benchmark and print the results.
pub fn run(&self, cfg: &Configuration, requirements: Requirements) -> Result<()> {
self.validate_args()?;
// Ensure that the dir exists since the node is not started to take care of it.
let dir = cfg.database.path().ok_or("No DB directory provided")?;
fs::create_dir_all(dir)?;
info!("Running machine benchmarks...");
let mut results = Vec::new();
for requirement in &requirements.0 {
let result = self.run_benchmark(requirement, &dir)?;
results.push(result);
}
self.print_summary(requirements, results)
}
/// Benchmarks a specific metric of the hardware and judges the resulting score.
fn run_benchmark(&self, requirement: &Requirement, dir: &Path) -> Result<BenchResult> {
// Dispatch the concrete function from `sc-sysinfo`.
let score = self.measure(&requirement.metric, dir)?;
let rel_score = score.as_bytes() / requirement.minimum.as_bytes();
// Sanity check if the result is off by factor >100x.
if rel_score >= 100.0 || rel_score <= 0.01 {
self.check_failed(Error::BadResults)?;
}
let passed = rel_score >= (1.0 - (self.tolerance / 100.0));
Ok(BenchResult { passed, score, rel_score })
}
/// Measures a metric of the hardware.
fn measure(&self, metric: &Metric, dir: &Path) -> Result<Throughput> {
let verify_limit = ExecutionLimit::from_secs_f32(self.verify_duration);
let disk_limit = ExecutionLimit::from_secs_f32(self.disk_duration);
let hash_limit = ExecutionLimit::from_secs_f32(self.hash_duration);
let memory_limit = ExecutionLimit::from_secs_f32(self.memory_duration);
let score = match metric {
Metric::Blake2256 => benchmark_cpu(hash_limit),
Metric::Blake2256Parallel { num_cores } =>
benchmark_cpu_parallelism(hash_limit, *num_cores),
Metric::Sr25519Verify => benchmark_sr25519_verify(verify_limit),
Metric::MemCopy => benchmark_memory(memory_limit),
Metric::DiskSeqWrite => benchmark_disk_sequential_writes(disk_limit, dir)?,
Metric::DiskRndWrite => benchmark_disk_random_writes(disk_limit, dir)?,
};
Ok(score)
}
/// Prints a human-readable summary.
fn print_summary(&self, requirements: Requirements, results: Vec<BenchResult>) -> Result<()> {
// Use a table for nicer console output.
let mut table = Table::new();
table.set_header(["Category", "Function", "Score", "Minimum", "Result"]);
// Count how many passed and how many failed.
let (mut passed, mut failed) = (0, 0);
for (requirement, result) in requirements.0.iter().zip(results.iter()) {
if result.passed {
passed += 1
} else {
failed += 1
}
table.add_row(result.to_row(requirement));
}
// Print the table and a summary.
info!(
"\n{}\nFrom {} benchmarks in total, {} passed and {} failed ({:.0?}% fault tolerance).",
table,
passed + failed,
passed,
failed,
self.tolerance
);
// Print the final result.
if failed != 0 {
info!("The hardware fails to meet the requirements");
self.check_failed(Error::UnmetRequirement)?;
} else {
info!("The hardware meets the requirements ");
}
// Check that the results were not created by a bad build profile.
if let Err(err) = check_build_profile() {
self.check_failed(Error::BadBuildProfile(err))?;
}
Ok(())
}
/// Returns `Ok` if [`self.allow_fail`] is set and otherwise the error argument.
fn check_failed(&self, e: Error) -> Result<()> {
if !self.allow_fail {
error!("Failing since --allow-fail is not set");
Err(sc_cli::Error::Application(Box::new(e)))
} else {
warn!("Ignoring error since --allow-fail is set: {:?}", e);
Ok(())
}
}
/// Validates the CLI arguments.
fn validate_args(&self) -> Result<()> {
if self.tolerance > 100.0 || self.tolerance < 0.0 {
return Err("The --tolerance argument is out of range".into());
}
Ok(())
}
}
impl BenchResult {
/// Format [`Self`] as row that can be printed in a table.
fn to_row(&self, req: &Requirement) -> Row {
let passed = if self.passed { "✅ Pass" } else { "❌ Fail" };
vec![
req.metric.category().into(),
req.metric.name().into(),
format!("{}", self.score),
format!("{}", req.minimum),
format!("{} ({: >5.1?} %)", passed, self.rel_score * 100.0),
]
.into()
}
}
// Boilerplate
impl CliConfiguration for MachineCmd {
fn shared_params(&self) -> &SharedParams {
&self.shared_params
}
}
@@ -0,0 +1,27 @@
[
{
"metric": "Blake2256",
"minimum": 1000.00
},
{
"metric": {"Blake2256Parallel":{"num_cores":8}},
"minimum": 1000.00,
"validator_only": true
},
{
"metric": "Sr25519Verify",
"minimum": 0.622675781
},
{
"metric": "MemCopy",
"minimum": 11768.341
},
{
"metric": "DiskSeqWrite",
"minimum": 950.0
},
{
"metric": "DiskRndWrite",
"minimum": 420.0
}
]
@@ -0,0 +1,146 @@
# The `benchmark overhead` command
Each time an extrinsic or a block is executed, a fixed weight is charged as "execution overhead". This is necessary
since the weight that is calculated by the pallet benchmarks does not include this overhead. The exact overhead to can
vary per Substrate chain and needs to be calculated per chain. This command calculates the exact values of these
overhead weights for any Substrate chain that supports it.
## How does it work?
The benchmark consists of two parts; the [`BlockExecutionWeight`] and the [`ExtrinsicBaseWeight`]. Both are executed
sequentially when invoking the command.
## BlockExecutionWeight
The block execution weight is defined as the weight that it takes to execute an *empty block*. It is measured by
constructing an empty block and measuring its executing time. The result are written to a `block_weights.rs` file which
is created from a template. The file will contain the concrete weight value and various statistics about the
measurements. For example:
```rust
/// Time to execute an empty block.
/// Calculated by multiplying the *Average* with `1` and adding `0`.
///
/// Stats [NS]:
/// Min, Max: 3_508_416, 3_680_498
/// Average: 3_532_484
/// Median: 3_522_111
/// Std-Dev: 27070.23
///
/// Percentiles [NS]:
/// 99th: 3_631_863
/// 95th: 3_595_674
/// 75th: 3_526_435
pub const BlockExecutionWeight: Weight =
Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(3_532_484), 0);
```
In this example it takes 3.5 ms to execute an empty block. That means that it always takes at least 3.5 ms to execute
*any* block. This constant weight is therefore added to each block to ensure that Substrate budgets enough time to
execute it.
## ExtrinsicBaseWeight
The extrinsic base weight is defined as the weight that it takes to execute an *empty* extrinsic. An *empty* extrinsic
is also called a *NO-OP*. It does nothing and is the equivalent to the empty block form above. The benchmark now
constructs a block which is filled with only NO-OP extrinsics. This block is then executed many times and the weights
are measured. The result is divided by the number of extrinsics in that block and the results are written to
`extrinsic_weights.rs`.
The relevant section in the output file looks like this:
```rust
/// Time to execute a NO-OP extrinsic, for example `System::remark`.
/// Calculated by multiplying the *Average* with `1` and adding `0`.
///
/// Stats [NS]:
/// Min, Max: 67_561, 69_855
/// Average: 67_745
/// Median: 67_701
/// Std-Dev: 264.68
///
/// Percentiles [NS]:
/// 99th: 68_758
/// 95th: 67_843
/// 75th: 67_749
pub const ExtrinsicBaseWeight: Weight =
Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul(67_745), 0);
```
In this example it takes 67.7 µs to execute a NO-OP extrinsic. That means that it always takes at least 67.7 µs to
execute *any* extrinsic. This constant weight is therefore added to each extrinsic to ensure that Substrate budgets
enough time to execute it.
## Invocation
The base command looks like this (for debugging you can use `--release`):
```sh
cargo run --profile=production -- benchmark overhead --dev
```
Output:
```pre
# BlockExecutionWeight
Running 10 warmups...
Executing block 100 times
Per-block execution overhead [ns]:
Total: 353248430
Min: 3508416, Max: 3680498
Average: 3532484, Median: 3522111, Stddev: 27070.23
Percentiles 99th, 95th, 75th: 3631863, 3595674, 3526435
Writing weights to "block_weights.rs"
# Setup
Building block, this takes some time...
Extrinsics per block: 12000
# ExtrinsicBaseWeight
Running 10 warmups...
Executing block 100 times
Per-extrinsic execution overhead [ns]:
Total: 6774590
Min: 67561, Max: 69855
Average: 67745, Median: 67701, Stddev: 264.68
Percentiles 99th, 95th, 75th: 68758, 67843, 67749
Writing weights to "extrinsic_weights.rs"
```
The complete command for PezkuwiChain looks like this:
```sh
cargo run --profile=production -- benchmark overhead --chain=pezkuwi-dev --wasm-execution=compiled --weight-path=runtime/pezkuwi/constants/src/weights/
```
This will overwrite the
[block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs)
and
[extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs)
files in the PezkuwiChain runtime directory. You can try the same for *pezkuwichain* and to see that the results slightly differ.
👉 It is paramount to use `--profile=production` and `--wasm-execution=compiled` as the results are otherwise useless.
## Output Interpretation
Lower is better. The less weight the execution overhead needs, the better. Since the weights of the overhead is charged
per extrinsic and per block, a larger weight results in less extrinsics per block. Minimizing this is important to have
a large transaction throughput.
## Arguments
- `--chain` / `--dev` Set the chain specification.
- `--weight-path` Set the output directory or file to write the weights to.
- `--repeat` Set the repetitions of both benchmarks.
- `--warmup` Set the rounds of warmup before measuring.
- `--wasm-execution` Should be set to `compiled` for correct results.
- [`--mul`](../shared/README.md#arguments)
- [`--add`](../shared/README.md#arguments)
- [`--metric`](../shared/README.md#arguments)
- [`--weight-path`](../shared/README.md#arguments)
- [`--header`](../shared/README.md#arguments)
License: Apache-2.0
<!-- LINKS -->
[`ExtrinsicBaseWeight`]:
https://github.com/paritytech/substrate/blob/580ebae17fa30082604f1c9720f6f4a1cfe95b50/frame/support/src/weights/extrinsic_weights.rs#L26
[`BlockExecutionWeight`]:
https://github.com/paritytech/substrate/blob/580ebae17fa30082604f1c9720f6f4a1cfe95b50/frame/support/src/weights/block_weights.rs#L26
[System::Remark]:
https://github.com/paritytech/substrate/blob/580ebae17fa30082604f1c9720f6f4a1cfe95b50/frame/system/src/lib.rs#L382
@@ -0,0 +1,783 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains the [`OverheadCmd`] as entry point for the CLI to execute
//! the *overhead* benchmarks.
use crate::{
extrinsic::{
bench::{Benchmark, BenchmarkParams as ExtrinsicBenchmarkParams},
ExtrinsicBuilder,
},
overhead::{
command::ChainType::{Relaychain, Teyrchain, Unknown},
fake_runtime_api,
remark_builder::SubstrateRemarkBuilder,
template::TemplateData,
},
shared::{
genesis_state,
genesis_state::{GenesisStateHandler, SpecGenesisSource},
HostInfoParams, WeightParams,
},
};
use clap::{error::ErrorKind, Args, CommandFactory, Parser};
use codec::{Decode, Encode};
use cumulus_client_teyrchain_inherent::MockValidationDataInherentDataProvider;
use fake_runtime_api::RuntimeApi as FakeRuntimeApi;
use frame_support::Deserialize;
use genesis_state::WARN_SPEC_GENESIS_CTOR;
use log::info;
use pezkuwi_teyrchain_primitives::primitives::Id as ParaId;
use sc_block_builder::BlockBuilderApi;
use sc_chain_spec::{ChainSpec, ChainSpecExtension, GenesisBlockBuilder};
use sc_cli::{CliConfiguration, Database, ImportParams, Result, SharedParams};
use sc_client_api::{execution_extensions::ExecutionExtensions, UsageProvider};
use sc_client_db::{BlocksPruning, DatabaseSettings};
use sc_executor::WasmExecutor;
use sc_runtime_utilities::fetch_latest_metadata_from_code_blob;
use sc_service::{new_client, new_db_backend, BasePath, ClientConfig, TFullClient, TaskManager};
use serde::Serialize;
use serde_json::{json, Value};
use sp_api::{ApiExt, CallApiAt, Core, ProvideRuntimeApi};
use sp_blockchain::HeaderBackend;
use sp_core::H256;
use sp_inherents::{InherentData, InherentDataProvider};
use sp_runtime::{
generic,
traits::{BlakeTwo256, Block as BlockT},
DigestItem, OpaqueExtrinsic,
};
use sp_storage::Storage;
use sp_wasm_interface::HostFunctions;
use std::{
fmt::{Debug, Display, Formatter},
fs,
path::PathBuf,
sync::Arc,
};
use subxt::{client::RuntimeVersion, ext::futures, Metadata};
const DEFAULT_PARA_ID: u32 = 100;
const LOG_TARGET: &'static str = "pezkuwi_sdk_frame::benchmark::overhead";
/// Benchmark the execution overhead per-block and per-extrinsic.
#[derive(Debug, Parser)]
pub struct OverheadCmd {
#[allow(missing_docs)]
#[clap(flatten)]
pub shared_params: SharedParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub import_params: ImportParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub params: OverheadParams,
}
/// Configures the benchmark, the post-processing and weight generation.
#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)]
pub struct OverheadParams {
#[allow(missing_docs)]
#[clap(flatten)]
pub weight: WeightParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub bench: ExtrinsicBenchmarkParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub hostinfo: HostInfoParams,
/// Add a header to the generated weight output file.
///
/// Good for adding LICENSE headers.
#[arg(long, value_name = "PATH")]
pub header: Option<PathBuf>,
/// Enable the Trie cache.
///
/// This should only be used for performance analysis and not for final results.
#[arg(long)]
pub enable_trie_cache: bool,
/// Optional runtime blob to use instead of the one from the genesis config.
#[arg(
long,
value_name = "PATH",
conflicts_with = "chain",
required_if_eq("genesis_builder", "runtime")
)]
pub runtime: Option<PathBuf>,
/// The preset that we expect to find in the GenesisBuilder runtime API.
///
/// This can be useful when a runtime has a dedicated benchmarking preset instead of using the
/// default one.
#[arg(long, default_value = sp_genesis_builder::DEV_RUNTIME_PRESET)]
pub genesis_builder_preset: String,
/// How to construct the genesis state.
///
/// Can be used together with `--chain` to determine whether the
/// genesis state should be initialized with the values from the
/// provided chain spec or a runtime-provided genesis preset.
#[arg(long, value_enum, alias = "genesis-builder-policy")]
pub genesis_builder: Option<GenesisBuilderPolicy>,
/// Teyrchain Id to use for teyrchains. If not specified, the benchmark code will choose
/// a para-id and patch the state accordingly.
#[arg(long)]
pub para_id: Option<u32>,
}
/// How the genesis state for benchmarking should be built.
#[derive(clap::ValueEnum, Debug, Eq, PartialEq, Clone, Copy, Serialize)]
#[clap(rename_all = "kebab-case")]
pub enum GenesisBuilderPolicy {
/// Let the runtime build the genesis state through its `BuildGenesisConfig` runtime API.
/// This will use the `development` preset by default.
Runtime,
/// Use the runtime from the Spec file to build the genesis state.
SpecRuntime,
/// Use the spec file to build the genesis state. This fails when there is no spec.
#[value(alias = "spec")]
SpecGenesis,
}
/// Type of a benchmark.
#[derive(Serialize, Clone, PartialEq, Copy)]
pub(crate) enum BenchmarkType {
/// Measure the per-extrinsic execution overhead.
Extrinsic,
/// Measure the per-block execution overhead.
Block,
}
/// Hostfunctions that are typically used by teyrchains.
pub type TeyrchainHostFunctions = (
cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions,
sp_io::SubstrateHostFunctions,
);
pub type BlockNumber = u32;
/// Typical block header.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Typical block type using `OpaqueExtrinsic`.
pub type OpaqueBlock = generic::Block<Header, OpaqueExtrinsic>;
/// Client type used throughout the benchmarking code.
type OverheadClient<Block, HF> = TFullClient<Block, FakeRuntimeApi, WasmExecutor<HF>>;
/// Creates inherent data for a given teyrchain ID.
///
/// This function constructs the inherent data required for block execution,
/// including the relay chain state and validation data. Not all of these
/// inherents are required for every chain. The runtime will pick the ones
/// it requires based on their identifier.
fn create_inherent_data<Client: UsageProvider<Block> + HeaderBackend<Block>, Block: BlockT>(
client: &Arc<Client>,
chain_type: &ChainType,
) -> InherentData {
let genesis = client.usage_info().chain.best_hash;
let header = client.header(genesis).unwrap().unwrap();
let mut inherent_data = InherentData::new();
// Para inherent can only makes sense when we are handling a teyrchain.
if let Teyrchain(para_id) = chain_type {
let teyrchain_validation_data_provider = MockValidationDataInherentDataProvider::<()> {
para_id: ParaId::from(*para_id),
current_para_block_head: Some(header.encode().into()),
relay_offset: 0,
..Default::default()
};
let _ = futures::executor::block_on(
teyrchain_validation_data_provider.provide_inherent_data(&mut inherent_data),
);
}
// Teyrchain inherent that is used on relay chains to perform teyrchain validation.
let para_inherent = pezkuwi_primitives::InherentData {
bitfields: Vec::new(),
backed_candidates: Vec::new(),
disputes: Vec::new(),
parent_header: header,
};
// Timestamp inherent that is very common in substrate chains.
let timestamp = sp_timestamp::InherentDataProvider::new(std::time::Duration::default().into());
let _ = futures::executor::block_on(timestamp.provide_inherent_data(&mut inherent_data));
let _ =
inherent_data.put_data(pezkuwi_primitives::TEYRCHAINS_INHERENT_IDENTIFIER, &para_inherent);
inherent_data
}
/// Identifies what kind of chain we are dealing with.
///
/// Chains containing the `TeyrchainSystem` and `TeyrchainInfo` pallet are considered teyrchains.
/// Chains containing the `ParaInherent` pallet are considered relay chains.
fn identify_chain(metadata: &Metadata, para_id: Option<u32>) -> ChainType {
let teyrchain_info_exists = metadata.pallet_by_name("TeyrchainInfo").is_some();
let teyrchain_system_exists = metadata.pallet_by_name("TeyrchainSystem").is_some();
let para_inherent_exists = metadata.pallet_by_name("ParaInherent").is_some();
log::debug!("{} TeyrchainSystem", if teyrchain_system_exists { "" } else { "" });
log::debug!("{} TeyrchainInfo", if teyrchain_info_exists { "" } else { "" });
log::debug!("{} ParaInherent", if para_inherent_exists { "" } else { "" });
let chain_type = if teyrchain_system_exists && teyrchain_info_exists {
Teyrchain(para_id.unwrap_or(DEFAULT_PARA_ID))
} else if para_inherent_exists {
Relaychain
} else {
Unknown
};
log::info!(target: LOG_TARGET, "Identified Chain type from metadata: {}", chain_type);
chain_type
}
#[derive(Deserialize, Serialize, Clone, ChainSpecExtension)]
pub struct TeyrchainExtension {
/// The id of the Teyrchain.
pub para_id: Option<u32>,
}
impl OverheadCmd {
fn state_handler_from_cli<HF: HostFunctions>(
&self,
chain_spec_from_api: Option<Box<dyn ChainSpec>>,
) -> Result<(GenesisStateHandler, Option<u32>)> {
let genesis_builder_to_source = || match self.params.genesis_builder {
Some(GenesisBuilderPolicy::Runtime) | Some(GenesisBuilderPolicy::SpecRuntime) =>
SpecGenesisSource::Runtime(self.params.genesis_builder_preset.clone()),
Some(GenesisBuilderPolicy::SpecGenesis) | None => {
log::warn!(target: LOG_TARGET, "{WARN_SPEC_GENESIS_CTOR}");
SpecGenesisSource::SpecJson
},
};
// First handle chain-spec passed in via API parameter.
if let Some(chain_spec) = chain_spec_from_api {
log::debug!(target: LOG_TARGET, "Initializing state handler with chain-spec from API: {:?}", chain_spec);
let source = genesis_builder_to_source();
return Ok((GenesisStateHandler::ChainSpec(chain_spec, source), self.params.para_id));
};
// Handle chain-spec passed in via CLI.
if let Some(chain_spec_path) = &self.shared_params.chain {
log::debug!(target: LOG_TARGET,
"Initializing state handler with chain-spec from path: {:?}",
chain_spec_path
);
let (chain_spec, para_id_from_chain_spec) =
genesis_state::chain_spec_from_path::<HF>(chain_spec_path.to_string().into())?;
let source = genesis_builder_to_source();
return Ok((
GenesisStateHandler::ChainSpec(chain_spec, source),
self.params.para_id.or(para_id_from_chain_spec),
));
};
// Check for runtimes. In general, we make sure that `--runtime` and `--chain` are
// incompatible on the CLI level.
if let Some(runtime_path) = &self.params.runtime {
log::debug!(target: LOG_TARGET, "Initializing state handler with runtime from path: {:?}", runtime_path);
let runtime_blob = fs::read(runtime_path)?;
return Ok((
GenesisStateHandler::Runtime(
runtime_blob,
Some(self.params.genesis_builder_preset.clone()),
),
self.params.para_id,
));
};
Err("Neither a runtime nor a chain-spec were specified".to_string().into())
}
fn check_args(
&self,
chain_spec: &Option<Box<dyn ChainSpec>>,
) -> std::result::Result<(), (ErrorKind, String)> {
if chain_spec.is_none() &&
self.params.runtime.is_none() &&
self.shared_params.chain.is_none()
{
return Err((
ErrorKind::MissingRequiredArgument,
"Provide either a runtime via `--runtime` or a chain spec via `--chain`"
.to_string(),
));
}
match self.params.genesis_builder {
Some(GenesisBuilderPolicy::SpecGenesis | GenesisBuilderPolicy::SpecRuntime) =>
if chain_spec.is_none() && self.shared_params.chain.is_none() {
return Err((
ErrorKind::MissingRequiredArgument,
"Provide a chain spec via `--chain`.".to_string(),
));
},
_ => {},
};
Ok(())
}
/// Run the overhead benchmark with the default extrinsic builder.
///
/// This will use [SubstrateRemarkBuilder] to build the extrinsic. It is
/// designed to match common configurations found in substrate chains.
pub fn run_with_default_builder_and_spec<Block, ExtraHF>(
&self,
chain_spec: Option<Box<dyn ChainSpec>>,
) -> Result<()>
where
Block: BlockT<Extrinsic = OpaqueExtrinsic, Hash = H256>,
ExtraHF: HostFunctions,
{
self.run_with_extrinsic_builder_and_spec::<Block, ExtraHF>(
Box::new(|metadata, hash, version| {
let genesis = subxt::utils::H256::from(hash.to_fixed_bytes());
Box::new(SubstrateRemarkBuilder::new(metadata, genesis, version)) as Box<_>
}),
chain_spec,
)
}
/// Run the benchmark overhead command.
///
/// The provided [ExtrinsicBuilder] will be used to build extrinsics for
/// block-building. It is expected that the provided implementation builds
/// a `System::remark` extrinsic.
pub fn run_with_extrinsic_builder_and_spec<Block, ExtraHF>(
&self,
ext_builder_provider: Box<
dyn FnOnce(Metadata, Block::Hash, RuntimeVersion) -> Box<dyn ExtrinsicBuilder>,
>,
chain_spec: Option<Box<dyn ChainSpec>>,
) -> Result<()>
where
Block: BlockT<Extrinsic = OpaqueExtrinsic>,
ExtraHF: HostFunctions,
{
if let Err((error_kind, msg)) = self.check_args(&chain_spec) {
let mut cmd = OverheadCmd::command();
cmd.error(error_kind, msg).exit();
};
let (state_handler, para_id) =
self.state_handler_from_cli::<(TeyrchainHostFunctions, ExtraHF)>(chain_spec)?;
let executor = WasmExecutor::<(TeyrchainHostFunctions, ExtraHF)>::builder()
.with_allow_missing_host_functions(true)
.build();
let opaque_metadata =
fetch_latest_metadata_from_code_blob(&executor, state_handler.get_code_bytes()?)
.map_err(|_| {
<&str as Into<sc_cli::Error>>::into("Unable to fetch latest stable metadata")
})?;
let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice())?;
// At this point we know what kind of chain we are dealing with.
let chain_type = identify_chain(&metadata, para_id);
// If we are dealing with a teyrchain, make sure that the para id in genesis will
// match what we expect.
let genesis_patcher = match chain_type {
Teyrchain(para_id) =>
Some(Box::new(move |value| patch_genesis(value, Some(para_id))) as Box<_>),
_ => None,
};
let client = self.build_client_components::<Block, (TeyrchainHostFunctions, ExtraHF)>(
state_handler.build_storage::<(TeyrchainHostFunctions, ExtraHF)>(genesis_patcher)?,
executor,
&chain_type,
)?;
let inherent_data = create_inherent_data(&client, &chain_type);
let (ext_builder, runtime_name) = {
let genesis = client.usage_info().chain.best_hash;
let version = client.runtime_api().version(genesis).unwrap();
let runtime_name = version.spec_name;
let runtime_version = RuntimeVersion {
spec_version: version.spec_version,
transaction_version: version.transaction_version,
};
(ext_builder_provider(metadata, genesis, runtime_version), runtime_name)
};
self.run(
runtime_name.to_string(),
client,
inherent_data,
Default::default(),
&*ext_builder,
chain_type.requires_proof_recording(),
)
}
/// Run the benchmark overhead command.
pub fn run_with_extrinsic_builder<Block, ExtraHF>(
&self,
ext_builder_provider: Box<
dyn FnOnce(Metadata, Block::Hash, RuntimeVersion) -> Box<dyn ExtrinsicBuilder>,
>,
) -> Result<()>
where
Block: BlockT<Extrinsic = OpaqueExtrinsic>,
ExtraHF: HostFunctions,
{
self.run_with_extrinsic_builder_and_spec::<Block, ExtraHF>(ext_builder_provider, None)
}
fn build_client_components<Block, HF>(
&self,
genesis_storage: Storage,
executor: WasmExecutor<HF>,
chain_type: &ChainType,
) -> Result<Arc<OverheadClient<Block, HF>>>
where
Block: BlockT,
HF: HostFunctions,
{
let extensions = ExecutionExtensions::new(None, Arc::new(executor.clone()));
let base_path = match &self.shared_params.base_path {
None => BasePath::new_temp_dir()?,
Some(path) => BasePath::from(path.clone()),
};
let database_source = self.database_config(
&base_path.path().to_path_buf(),
self.database_cache_size()?.unwrap_or(1024),
self.database()?.unwrap_or(Database::Auto),
)?;
let backend = new_db_backend(DatabaseSettings {
trie_cache_maximum_size: self.trie_cache_maximum_size()?,
state_pruning: None,
blocks_pruning: BlocksPruning::KeepAll,
source: database_source,
metrics_registry: None,
})?;
let genesis_block_builder = GenesisBlockBuilder::new_with_storage(
genesis_storage,
true,
backend.clone(),
executor.clone(),
)?;
let tokio_runtime = sc_cli::build_runtime()?;
let task_manager = TaskManager::new(tokio_runtime.handle().clone(), None)
.map_err(|_| "Unable to build task manager")?;
let client: Arc<OverheadClient<Block, HF>> = Arc::new(new_client(
backend.clone(),
executor,
genesis_block_builder,
Default::default(),
Default::default(),
extensions,
Box::new(task_manager.spawn_handle()),
None,
None,
ClientConfig {
offchain_worker_enabled: false,
offchain_indexing_api: false,
wasm_runtime_overrides: None,
no_genesis: false,
wasm_runtime_substitutes: Default::default(),
enable_import_proof_recording: chain_type.requires_proof_recording(),
},
)?);
Ok(client)
}
/// Measure the per-block and per-extrinsic execution overhead.
///
/// Writes the results to console and into two instances of the
/// `weights.hbs` template, one for each benchmark.
pub fn run<Block, C>(
&self,
chain_name: String,
client: Arc<C>,
inherent_data: sp_inherents::InherentData,
digest_items: Vec<DigestItem>,
ext_builder: &dyn ExtrinsicBuilder,
should_record_proof: bool,
) -> Result<()>
where
Block: BlockT<Extrinsic = OpaqueExtrinsic>,
C: ProvideRuntimeApi<Block>
+ CallApiAt<Block>
+ UsageProvider<Block>
+ sp_blockchain::HeaderBackend<Block>,
C::Api: ApiExt<Block> + BlockBuilderApi<Block>,
{
if ext_builder.pallet() != "system" || ext_builder.extrinsic() != "remark" {
return Err(format!("The extrinsic builder is required to build `System::Remark` extrinsics but builds `{}` extrinsics instead", ext_builder.name()).into());
}
let bench = Benchmark::new(
client,
self.params.bench.clone(),
inherent_data,
digest_items,
should_record_proof,
);
// per-block execution overhead
{
let (stats, proof_size) = bench.bench_block()?;
info!(target: LOG_TARGET, "Per-block execution overhead [ns]:\n{:?}", stats);
let template = TemplateData::new(
BenchmarkType::Block,
&chain_name,
&self.params,
&stats,
proof_size,
)?;
template.write(&self.params.weight.weight_path)?;
}
// per-extrinsic execution overhead
{
let (stats, proof_size) = bench.bench_extrinsic(ext_builder)?;
info!(target: LOG_TARGET, "Per-extrinsic execution overhead [ns]:\n{:?}", stats);
let template = TemplateData::new(
BenchmarkType::Extrinsic,
&chain_name,
&self.params,
&stats,
proof_size,
)?;
template.write(&self.params.weight.weight_path)?;
}
Ok(())
}
}
impl BenchmarkType {
/// Short name of the benchmark type.
pub(crate) fn short_name(&self) -> &'static str {
match self {
Self::Extrinsic => "extrinsic",
Self::Block => "block",
}
}
/// Long name of the benchmark type.
pub(crate) fn long_name(&self) -> &'static str {
match self {
Self::Extrinsic => "ExtrinsicBase",
Self::Block => "BlockExecution",
}
}
}
#[derive(Clone, PartialEq, Debug)]
enum ChainType {
Teyrchain(u32),
Relaychain,
Unknown,
}
impl Display for ChainType {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
ChainType::Teyrchain(id) => write!(f, "Teyrchain(paraid = {})", id),
ChainType::Relaychain => write!(f, "Relaychain"),
ChainType::Unknown => write!(f, "Unknown"),
}
}
}
impl ChainType {
fn requires_proof_recording(&self) -> bool {
match self {
Teyrchain(_) => true,
Relaychain => false,
Unknown => false,
}
}
}
/// Patch the teyrchain id into the genesis config. This is necessary since the inherents
/// also contain a teyrchain id and they need to match.
fn patch_genesis(mut input_value: Value, para_id: Option<u32>) -> Value {
// If we identified a teyrchain we should patch a teyrchain id into the genesis config.
// This ensures compatibility with the inherents that we provide to successfully build a
// block.
if let Some(para_id) = para_id {
sc_chain_spec::json_patch::merge(
&mut input_value,
json!({
"teyrchainInfo": {
"teyrchainId": para_id,
}
}),
);
log::debug!(target: LOG_TARGET, "Genesis Config Json");
log::debug!(target: LOG_TARGET, "{}", input_value);
}
input_value
}
// Boilerplate
impl CliConfiguration for OverheadCmd {
fn shared_params(&self) -> &SharedParams {
&self.shared_params
}
fn import_params(&self) -> Option<&ImportParams> {
Some(&self.import_params)
}
fn base_path(&self) -> Result<Option<BasePath>> {
Ok(Some(BasePath::new_temp_dir()?))
}
fn trie_cache_maximum_size(&self) -> Result<Option<usize>> {
if self.params.enable_trie_cache {
Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default())
} else {
Ok(None)
}
}
}
#[cfg(test)]
mod tests {
use crate::{
overhead::command::{identify_chain, ChainType, TeyrchainHostFunctions, DEFAULT_PARA_ID},
OverheadCmd,
};
use clap::Parser;
use codec::Decode;
use sc_executor::WasmExecutor;
#[test]
fn test_chain_type_relaychain() {
let executor: WasmExecutor<TeyrchainHostFunctions> = WasmExecutor::builder().build();
let code_bytes = zagros_runtime::WASM_BINARY
.expect("To run this test, build the wasm binary of zagros-runtime")
.to_vec();
let opaque_metadata =
super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap();
let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap();
let chain_type = identify_chain(&metadata, None);
assert_eq!(chain_type, ChainType::Relaychain);
assert_eq!(chain_type.requires_proof_recording(), false);
}
#[test]
fn test_chain_type_teyrchain() {
let executor: WasmExecutor<TeyrchainHostFunctions> = WasmExecutor::builder().build();
let code_bytes = cumulus_test_runtime::WASM_BINARY
.expect("To run this test, build the wasm binary of cumulus-test-runtime")
.to_vec();
let opaque_metadata =
super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap();
let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap();
let chain_type = identify_chain(&metadata, Some(100));
assert_eq!(chain_type, ChainType::Teyrchain(100));
assert!(chain_type.requires_proof_recording());
assert_eq!(identify_chain(&metadata, None), ChainType::Teyrchain(DEFAULT_PARA_ID));
}
#[test]
fn test_chain_type_custom() {
let executor: WasmExecutor<TeyrchainHostFunctions> = WasmExecutor::builder().build();
let code_bytes = substrate_test_runtime::WASM_BINARY
.expect("To run this test, build the wasm binary of substrate-test-runtime")
.to_vec();
let opaque_metadata =
super::fetch_latest_metadata_from_code_blob(&executor, code_bytes.into()).unwrap();
let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice()).unwrap();
let chain_type = identify_chain(&metadata, None);
assert_eq!(chain_type, ChainType::Unknown);
assert_eq!(chain_type.requires_proof_recording(), false);
}
fn cli_succeed(args: &[&str]) -> Result<(), clap::Error> {
let cmd = OverheadCmd::try_parse_from(args)?;
assert!(cmd.check_args(&None).is_ok());
Ok(())
}
fn cli_fail(args: &[&str]) {
let cmd = OverheadCmd::try_parse_from(args);
if let Ok(cmd) = cmd {
assert!(cmd.check_args(&None).is_err());
}
}
#[test]
fn test_cli_conflicts() -> Result<(), clap::Error> {
// Runtime tests
cli_succeed(&["test", "--runtime", "path/to/runtime", "--genesis-builder", "runtime"])?;
cli_succeed(&["test", "--runtime", "path/to/runtime"])?;
cli_succeed(&[
"test",
"--runtime",
"path/to/runtime",
"--genesis-builder-preset",
"preset",
])?;
cli_fail(&["test", "--runtime", "path/to/spec", "--genesis-builder", "spec"]);
cli_fail(&["test", "--runtime", "path/to/spec", "--genesis-builder", "spec-genesis"]);
cli_fail(&["test", "--runtime", "path/to/spec", "--genesis-builder", "spec-runtime"]);
// Spec tests
cli_succeed(&["test", "--chain", "path/to/spec"])?;
cli_succeed(&["test", "--chain", "path/to/spec", "--genesis-builder", "spec"])?;
cli_succeed(&["test", "--chain", "path/to/spec", "--genesis-builder", "spec-genesis"])?;
cli_succeed(&["test", "--chain", "path/to/spec", "--genesis-builder", "spec-runtime"])?;
cli_fail(&["test", "--chain", "path/to/spec", "--genesis-builder", "none"]);
cli_fail(&["test", "--chain", "path/to/spec", "--genesis-builder", "runtime"]);
cli_fail(&[
"test",
"--chain",
"path/to/spec",
"--genesis-builder",
"runtime",
"--genesis-builder-preset",
"preset",
]);
Ok(())
}
}
@@ -0,0 +1,112 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A fake runtime struct that allows us to instantiate a client.
//! Has all the required runtime APIs implemented to satisfy trait bounds,
//! but the methods are never called since we use WASM exclusively.
use sp_core::OpaqueMetadata;
use sp_runtime::{
generic,
traits::{BlakeTwo256, Block as BlockT},
transaction_validity::{TransactionSource, TransactionValidity},
ApplyExtrinsicResult, OpaqueExtrinsic,
};
/// Block number
#[allow(dead_code)]
type BlockNumber = u32;
/// Opaque block header type.
#[allow(dead_code)]
type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Opaque block type.
#[allow(dead_code)]
type Block = generic::Block<Header, OpaqueExtrinsic>;
#[allow(unused)]
pub struct Runtime;
sp_api::impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> sp_version::RuntimeVersion {
unimplemented!()
}
fn execute_block(_: <Block as BlockT>::LazyBlock) {
unimplemented!()
}
fn initialize_block(_: &<Block as BlockT>::Header) -> sp_runtime::ExtrinsicInclusionMode {
unimplemented!()
}
}
impl sp_api::Metadata<Block> for Runtime {
fn metadata() -> OpaqueMetadata {
unimplemented!()
}
fn metadata_at_version(_: u32) -> Option<OpaqueMetadata> {
unimplemented!()
}
fn metadata_versions() -> Vec<u32> {
unimplemented!()
}
}
impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(_: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult {
unimplemented!()
}
fn finalize_block() -> <Block as BlockT>::Header {
unimplemented!()
}
fn inherent_extrinsics(_: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> {
unimplemented!()
}
fn check_inherents(_: <Block as BlockT>::LazyBlock, _: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult {
unimplemented!()
}
}
impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
fn validate_transaction(
_: TransactionSource,
_: <Block as BlockT>::Extrinsic,
_: <Block as BlockT>::Hash,
) -> TransactionValidity {
unimplemented!()
}
}
impl sp_genesis_builder::GenesisBuilder<Block> for Runtime {
fn build_state(_: Vec<u8>) -> sp_genesis_builder::Result {
unimplemented!()
}
fn get_preset(_id: &Option<sp_genesis_builder::PresetId>) -> Option<Vec<u8>> {
unimplemented!()
}
fn preset_names() -> Vec<sp_genesis_builder::PresetId> {
unimplemented!()
}
}
}
@@ -0,0 +1,24 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod command;
pub mod template;
mod fake_runtime_api;
pub mod remark_builder;
pub use command::{OpaqueBlock, OverheadCmd};
@@ -0,0 +1,127 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::extrinsic::ExtrinsicBuilder;
use codec::{Decode, Encode};
use sc_client_api::UsageProvider;
use sp_api::{ApiExt, Core, Metadata, ProvideRuntimeApi};
use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic};
use std::sync::Arc;
use subxt::{
client::RuntimeVersion as SubxtRuntimeVersion,
config::{substrate::SubstrateExtrinsicParamsBuilder, HashFor},
Config, OfflineClient, SubstrateConfig,
};
pub type SubstrateRemarkBuilder = DynamicRemarkBuilder<SubstrateConfig>;
/// Remark builder that can be used to build simple extrinsics for
/// FRAME-based runtimes.
pub struct DynamicRemarkBuilder<C: Config> {
offline_client: OfflineClient<C>,
}
impl<C: Config> DynamicRemarkBuilder<C> {
/// Initializes a new remark builder from a client.
///
/// This will first fetch metadata and runtime version from the runtime and then
/// construct an offline client that provides the extrinsics.
pub fn new_from_client<Client, Block>(client: Arc<Client>) -> sc_cli::Result<Self>
where
Block: BlockT,
Client: UsageProvider<Block> + ProvideRuntimeApi<Block>,
Client::Api: Metadata<Block> + Core<Block>,
{
let genesis = client.usage_info().chain.best_hash;
let api = client.runtime_api();
let Ok(Some(metadata_api_version)) = api.api_version::<dyn Metadata<Block>>(genesis) else {
return Err("Unable to fetch metadata runtime API version.".to_string().into());
};
log::debug!("Found metadata API version {}.", metadata_api_version);
let opaque_metadata = if metadata_api_version > 1 {
let Ok(supported_metadata_versions) = api.metadata_versions(genesis) else {
return Err("Unable to fetch metadata versions".to_string().into());
};
let latest = supported_metadata_versions
.into_iter()
.max()
.ok_or("No stable metadata versions supported".to_string())?;
api.metadata_at_version(genesis, latest)
.map_err(|e| format!("Unable to fetch metadata: {:?}", e))?
.ok_or("Unable to decode metadata".to_string())?
} else {
// Fall back to using the non-versioned metadata API.
api.metadata(genesis)
.map_err(|e| format!("Unable to fetch metadata: {:?}", e))?
};
let version = api.version(genesis).unwrap();
let runtime_version = SubxtRuntimeVersion {
spec_version: version.spec_version,
transaction_version: version.transaction_version,
};
let metadata = subxt::Metadata::decode(&mut (*opaque_metadata).as_slice())?;
let genesis = HashFor::<C>::decode(&mut &genesis.encode()[..])
.map_err(|_| "Incompatible hash types?")?;
Ok(Self { offline_client: OfflineClient::new(genesis, runtime_version, metadata) })
}
}
impl<C: Config> DynamicRemarkBuilder<C> {
/// Constructs a new remark builder.
pub fn new(
metadata: subxt::Metadata,
genesis_hash: HashFor<C>,
runtime_version: SubxtRuntimeVersion,
) -> Self {
Self { offline_client: OfflineClient::new(genesis_hash, runtime_version, metadata) }
}
}
impl ExtrinsicBuilder for DynamicRemarkBuilder<SubstrateConfig> {
fn pallet(&self) -> &str {
"system"
}
fn extrinsic(&self) -> &str {
"remark"
}
fn build(&self, nonce: u32) -> std::result::Result<OpaqueExtrinsic, &'static str> {
let signer = subxt_signer::sr25519::dev::alice();
let dynamic_tx = subxt::dynamic::tx("System", "remark", vec![Vec::<u8>::new()]);
let params = SubstrateExtrinsicParamsBuilder::new().nonce(nonce.into()).build();
// Default transaction parameters assume a nonce of 0.
let transaction = self
.offline_client
.tx()
.create_partial_offline(&dynamic_tx, params)
.unwrap()
.sign(&signer);
let mut encoded = transaction.into_encoded();
OpaqueExtrinsic::try_from_encoded_extrinsic(&mut encoded)
.map_err(|_| "Unable to construct OpaqueExtrinsic")
}
}
@@ -0,0 +1,128 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Converts a benchmark result into [`TemplateData`] and writes
//! it into the `weights.hbs` template.
use sc_cli::Result;
use handlebars::Handlebars;
use log::info;
use serde::Serialize;
use std::{env, fs, path::PathBuf};
use crate::{
overhead::command::{BenchmarkType, OverheadParams},
shared::{Stats, UnderscoreHelper},
};
static VERSION: &str = env!("CARGO_PKG_VERSION");
static TEMPLATE: &str = include_str!("./weights.hbs");
/// Data consumed by Handlebar to fill out the `weights.hbs` template.
#[derive(Serialize, Debug, Clone)]
pub(crate) struct TemplateData {
/// Short name of the benchmark. Can be "block" or "extrinsic".
long_name: String,
/// Long name of the benchmark. Can be "BlockExecution" or "ExtrinsicBase".
short_name: String,
/// Name of the runtime. Taken from the chain spec.
runtime_name: String,
/// Version of the benchmarking CLI used.
version: String,
/// Date that the template was filled out.
date: String,
/// Hostname of the machine that executed the benchmarks.
hostname: String,
/// CPU name of the machine that executed the benchmarks.
cpuname: String,
/// Header for the generated file.
header: String,
/// Command line arguments that were passed to the CLI.
args: Vec<String>,
/// Params of the executed command.
params: OverheadParams,
/// Stats about the benchmark result.
stats: Stats,
/// The resulting ref time weight.
ref_time: u64,
/// The size of the proof weight.
proof_size: u64,
}
impl TemplateData {
/// Returns a new [`Self`] from the given params.
pub(crate) fn new(
t: BenchmarkType,
chain_name: &String,
params: &OverheadParams,
stats: &Stats,
proof_size: u64,
) -> Result<Self> {
let ref_time = params.weight.calc_weight(stats)?;
let header = params
.header
.as_ref()
.map(|p| std::fs::read_to_string(p))
.transpose()?
.unwrap_or_default();
Ok(TemplateData {
short_name: t.short_name().into(),
long_name: t.long_name().into(),
runtime_name: chain_name.to_owned(),
version: VERSION.into(),
date: chrono::Utc::now().format("%Y-%m-%d (Y/M/D)").to_string(),
hostname: params.hostinfo.hostname(),
cpuname: params.hostinfo.cpuname(),
header,
args: env::args().collect::<Vec<String>>(),
params: params.clone(),
stats: stats.clone(),
ref_time,
proof_size,
})
}
/// Fill out the `weights.hbs` HBS template with its own data.
/// Writes the result to `path` which can be a directory or a file.
pub fn write(&self, path: &Option<PathBuf>) -> Result<()> {
let mut handlebars = Handlebars::new();
// Format large integers with underscores.
handlebars.register_helper("underscore", Box::new(UnderscoreHelper));
// Don't HTML escape any characters.
handlebars.register_escape_fn(|s| -> String { s.to_string() });
let out_path = self.build_path(path)?;
let mut fd = fs::File::create(&out_path)?;
info!("Writing weights to {:?}", fs::canonicalize(&out_path)?);
handlebars
.render_template_to_write(TEMPLATE, &self, &mut fd)
.map_err(|e| format!("HBS template write: {:?}", e).into())
}
/// Build a path for the weight file.
fn build_path(&self, weight_out: &Option<PathBuf>) -> Result<PathBuf> {
let mut path = weight_out.clone().unwrap_or_else(|| PathBuf::from("."));
if !path.is_dir() {
return Err("Need directory as --weight-path".into());
}
path.push(format!("{}_weights.rs", self.short_name));
Ok(path)
}
}
@@ -0,0 +1,76 @@
{{header}}
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}}
//! DATE: {{date}}
//! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}`
//!
//! SHORT-NAME: `{{short_name}}`, LONG-NAME: `{{long_name}}`, RUNTIME: `{{runtime_name}}`
//! WARMUPS: `{{params.bench.warmup}}`, REPEAT: `{{params.bench.repeat}}`
//! WEIGHT-PATH: `{{params.weight.weight_path}}`
//! WEIGHT-METRIC: `{{params.weight.weight_metric}}`, WEIGHT-MUL: `{{params.weight.weight_mul}}`, WEIGHT-ADD: `{{params.weight.weight_add}}`
// Executed Command:
{{#each args as |arg|}}
// {{arg}}
{{/each}}
use sp_core::parameter_types;
use sp_weights::{constants::WEIGHT_REF_TIME_PER_NANOS, Weight};
parameter_types! {
{{#if (eq short_name "block")}}
/// Weight of executing an empty block.
{{else}}
/// Weight of executing a NO-OP extrinsic, for example `System::remark`.
{{/if}}
/// Calculated by multiplying the *{{params.weight.weight_metric}}* with `{{params.weight.weight_mul}}` and adding `{{params.weight.weight_add}}`.
///
/// Stats nanoseconds:
/// Min, Max: {{underscore stats.min}}, {{underscore stats.max}}
/// Average: {{underscore stats.avg}}
/// Median: {{underscore stats.median}}
/// Std-Dev: {{stats.stddev}}
///
/// Percentiles nanoseconds:
/// 99th: {{underscore stats.p99}}
/// 95th: {{underscore stats.p95}}
/// 75th: {{underscore stats.p75}}
pub const {{long_name}}Weight: Weight =
Weight::from_parts(WEIGHT_REF_TIME_PER_NANOS.saturating_mul({{underscore ref_time}}), {{underscore proof_size}});
}
#[cfg(test)]
mod test_weights {
use sp_weights::constants;
/// Checks that the weight exists and is sane.
// NOTE: If this test fails but you are sure that the generated values are fine,
// you can delete it.
#[test]
fn sane() {
let w = super::{{long_name}}Weight::get();
{{#if (eq short_name "block")}}
// At least 100 µs.
assert!(
w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS,
"Weight should be at least 100 µs."
);
// At most 50 ms.
assert!(
w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS,
"Weight should be at most 50 ms."
);
{{else}}
// At least 10 µs.
assert!(
w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS,
"Weight should be at least 10 µs."
);
// At most 1 ms.
assert!(
w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS,
"Weight should be at most 1 ms."
);
{{/if}}
}
}
@@ -0,0 +1,3 @@
The pallet command is explained in [frame/benchmarking](../../../../../frame/benchmarking/README.md).
License: Apache-2.0
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,89 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::LOG_TARGET;
use sp_core::{LogLevelFilter, RuntimeInterfaceLogLevel};
use sp_runtime_interface::{
pass_by::{PassAs, PassFatPointerAndRead, ReturnAs},
runtime_interface,
};
use std::cell::OnceCell;
thread_local! {
/// Log level filter that the runtime will use.
///
/// Must be initialized by the host before invoking the runtime executor. You may use `init` for
/// this or set it manually. The that can be set are either levels directly or filter like
// `warn,runtime=info`.
pub static RUNTIME_LOG: OnceCell<env_filter::Filter> = OnceCell::new();
}
/// Init runtime logger with the following priority (high to low):
/// - CLI argument
/// - Environment variable
/// - Default logger settings
pub fn init(arg: Option<String>) {
let filter_str = arg.unwrap_or_else(|| {
if let Ok(env) = std::env::var("RUNTIME_LOG") {
env
} else {
log::max_level().to_string()
}
});
let filter = env_filter::Builder::new()
.try_parse(&filter_str)
.expect("Invalid runtime log filter")
.build();
RUNTIME_LOG.with(|cell| {
cell.set(filter).expect("Can be set by host");
log::info!(target: LOG_TARGET, "Initialized runtime log filter to '{}'", filter_str);
});
}
/// Alternative implementation to `sp_runtime_interface::logging::HostFunctions` for benchmarking.
#[runtime_interface]
pub trait Logging {
#[allow(dead_code)]
fn log(
level: PassAs<RuntimeInterfaceLogLevel, u8>,
target: PassFatPointerAndRead<&str>,
message: PassFatPointerAndRead<&[u8]>,
) {
let Ok(message) = core::str::from_utf8(message) else {
log::error!(target: LOG_TARGET, "Runtime tried to log invalid UTF-8 data");
return;
};
let level = log::Level::from(level);
let metadata = log::MetadataBuilder::new().level(level).target(target).build();
if RUNTIME_LOG.with(|filter| filter.get().expect("Must be set by host").enabled(&metadata))
{
log::log!(target: target, level, "{}", message);
}
}
#[allow(dead_code)]
fn max_level() -> ReturnAs<LogLevelFilter, u8> {
RUNTIME_LOG
// .filter() gives us the max level of this filter
.with(|filter| filter.get().expect("Must be set by host").filter())
.into()
}
}
@@ -0,0 +1,308 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod command;
mod logging;
mod types;
mod writer;
use crate::shared::HostInfoParams;
use clap::ValueEnum;
use frame_support::Serialize;
use sc_cli::{
WasmExecutionMethod, WasmtimeInstantiationStrategy, DEFAULT_WASMTIME_INSTANTIATION_STRATEGY,
DEFAULT_WASM_EXECUTION_METHOD,
};
use std::{fmt::Debug, path::PathBuf};
/// Logging target
const LOG_TARGET: &'static str = "frame::benchmark::pallet";
// Add a more relaxed parsing for pallet names by allowing pallet directory names with `-` to be
// used like crate names with `_`
fn parse_pallet_name(pallet: &str) -> std::result::Result<String, String> {
Ok(pallet.replace("-", "_"))
}
/// List options for available benchmarks.
#[derive(Debug, Clone, Copy, ValueEnum)]
pub enum ListOutput {
/// List all available pallets and extrinsics.
All,
/// List all available pallets only.
Pallets,
}
/// Benchmark the extrinsic weight of FRAME Pallets.
#[derive(Debug, clap::Parser)]
pub struct PalletCmd {
/// Select a FRAME Pallets to benchmark, or `*` for all (in which case `extrinsic` must be
/// `*`).
#[arg(short, long, alias = "pallet", num_args = 1.., value_delimiter = ',', value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))]
pub pallets: Vec<String>,
/// Select an extrinsic inside the pallet to benchmark, or `*` or 'all' for all.
#[arg(short, long, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))]
pub extrinsic: Option<String>,
/// Comma separated list of pallets that should be excluded from the benchmark.
#[arg(long, value_parser, num_args = 1.., value_delimiter = ',')]
pub exclude_pallets: Vec<String>,
/// Comma separated list of `pallet::extrinsic` combinations that should not be run.
///
/// Example: `frame_system::remark,pallet_balances::transfer_keep_alive`
#[arg(long, value_parser, num_args = 1.., value_delimiter = ',')]
pub exclude_extrinsics: Vec<String>,
/// Run benchmarks for all pallets and extrinsics.
///
/// This is equivalent to running `--pallet * --extrinsic *`.
#[arg(long)]
pub all: bool,
/// Select how many samples we should take across the variable components.
#[arg(short, long, default_value_t = 50)]
pub steps: u32,
/// Indicates lowest values for each of the component ranges.
#[arg(long = "low", value_delimiter = ',')]
pub lowest_range_values: Vec<u32>,
/// Indicates highest values for each of the component ranges.
#[arg(long = "high", value_delimiter = ',')]
pub highest_range_values: Vec<u32>,
/// Select how many repetitions of this benchmark should run from within the wasm.
#[arg(short, long, default_value_t = 20)]
pub repeat: u32,
/// Select how many repetitions of this benchmark should run from the client.
///
/// NOTE: Using this alone may give slower results, but will afford you maximum Wasm memory.
#[arg(long, default_value_t = 1)]
pub external_repeat: u32,
/// Print the raw results in JSON format.
#[arg(long = "json")]
pub json_output: bool,
/// Write the raw results in JSON format into the given file.
#[arg(long, conflicts_with = "json_output")]
pub json_file: Option<PathBuf>,
/// Don't print the median-slopes linear regression analysis.
#[arg(long)]
pub no_median_slopes: bool,
/// Don't print the min-squares linear regression analysis.
#[arg(long)]
pub no_min_squares: bool,
/// Output the benchmarks to a Rust file at the given path.
#[arg(long)]
pub output: Option<PathBuf>,
/// Add a header file to your outputted benchmarks.
#[arg(long)]
pub header: Option<PathBuf>,
/// Path to Handlebars template file used for outputting benchmark results. (Optional)
#[arg(long)]
pub template: Option<PathBuf>,
#[allow(missing_docs)]
#[clap(flatten)]
pub hostinfo_params: HostInfoParams,
/// Which analysis function to use when outputting benchmarks:
/// * min-squares (default)
/// * median-slopes
/// * max (max of min squares and median slopes for each value)
#[arg(long)]
pub output_analysis: Option<String>,
/// Which analysis function to use when analyzing measured proof sizes.
#[arg(long, default_value("median-slopes"))]
pub output_pov_analysis: Option<String>,
/// The PoV estimation mode of a benchmark if no `pov_mode` attribute is present.
#[arg(long, default_value("max-encoded-len"), value_enum)]
pub default_pov_mode: command::PovEstimationMode,
/// Ignore the error when PoV modes reference unknown storage items or pallets.
#[arg(long)]
pub ignore_unknown_pov_mode: bool,
/// Set the heap pages while running benchmarks. If not set, the default value from the client
/// is used.
#[arg(long)]
pub heap_pages: Option<u64>,
/// Disable verification logic when running benchmarks.
#[arg(long)]
pub no_verify: bool,
/// Display and run extra benchmarks that would otherwise not be needed for weight
/// construction.
#[arg(long)]
pub extra: bool,
#[allow(missing_docs)]
#[clap(flatten)]
pub shared_params: sc_cli::SharedParams,
/// Method for executing Wasm runtime code.
#[arg(
long = "wasm-execution",
value_name = "METHOD",
value_enum,
ignore_case = true,
default_value_t = DEFAULT_WASM_EXECUTION_METHOD,
)]
pub wasm_method: WasmExecutionMethod,
/// The WASM instantiation method to use.
///
/// Only has an effect when `wasm-execution` is set to `compiled`.
#[arg(
long = "wasm-instantiation-strategy",
value_name = "STRATEGY",
default_value_t = DEFAULT_WASMTIME_INSTANTIATION_STRATEGY,
value_enum,
)]
pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy,
/// Optional runtime blob to use instead of the one from the genesis config.
#[arg(long, conflicts_with = "chain", required_if_eq("genesis_builder", "runtime"))]
pub runtime: Option<PathBuf>,
/// Set the runtime log level.
///
/// This will overwrite the `RUNTIME_LOG` environment variable. If neither is set, the CLI
/// default set by `RUST_LOG` setting is used.
#[arg(long)]
pub runtime_log: Option<String>,
/// Do not fail if there are unknown but also unused host functions in the runtime.
#[arg(long)]
pub allow_missing_host_functions: bool,
/// How to construct the genesis state.
///
/// Uses `GenesisBuilderPolicy::Spec` by default.
#[arg(long, value_enum, alias = "genesis-builder-policy")]
pub genesis_builder: Option<GenesisBuilderPolicy>,
/// The preset that we expect to find in the GenesisBuilder runtime API.
///
/// This can be useful when a runtime has a dedicated benchmarking preset instead of using the
/// default one.
#[arg(long, default_value = sp_genesis_builder::DEV_RUNTIME_PRESET)]
pub genesis_builder_preset: String,
/// DEPRECATED: This argument has no effect.
#[arg(long = "execution")]
pub execution: Option<String>,
/// Limit the memory the database cache can use.
#[arg(long = "db-cache", value_name = "MiB", default_value_t = 1024)]
pub database_cache_size: u32,
/// List and print available benchmarks in a csv-friendly format.
///
/// NOTE: `num_args` and `require_equals` are required to allow `--list`
#[arg(long, value_enum, ignore_case = true, num_args = 0..=1, require_equals = true, default_missing_value("All"))]
pub list: Option<ListOutput>,
/// Don't include csv header when listing benchmarks.
#[arg(long, requires("list"))]
pub no_csv_header: bool,
/// If enabled, the storage info is not displayed in the output next to the analysis.
///
/// This is independent of the storage info appearing in the *output file*. Use a Handlebar
/// template for that purpose.
#[arg(long)]
pub no_storage_info: bool,
/// The assumed default maximum size of any `StorageMap`.
///
/// When the maximum size of a map is not defined by the runtime developer,
/// this value is used as a worst case scenario. It will affect the calculated worst case
/// PoV size for accessing a value in a map, since the PoV will need to include the trie
/// nodes down to the underlying value.
#[clap(long = "map-size", default_value = "1000000")]
pub worst_case_map_values: u32,
/// Adjust the PoV estimation by adding additional trie layers to it.
///
/// This should be set to `log16(n)` where `n` is the number of top-level storage items in the
/// runtime, eg. `StorageMap`s and `StorageValue`s. A value of 2 to 3 is usually sufficient.
/// Each layer will result in an additional 495 bytes PoV per distinct top-level access.
/// Therefore multiple `StorageMap` accesses only suffer from this increase once. The exact
/// number of storage items depends on the runtime and the deployed pallets.
#[clap(long, default_value = "2")]
pub additional_trie_layers: u8,
/// A path to a `.json` file with existing benchmark results generated with `--json` or
/// `--json-file`. When specified the benchmarks are not actually executed, and the data for
/// the analysis is read from this file.
#[arg(long)]
pub json_input: Option<PathBuf>,
/// Allow overwriting a single file with multiple results.
///
/// This exists only to restore legacy behaviour. It should never actually be needed.
#[arg(long)]
pub unsafe_overwrite_results: bool,
/// Do not print a summary at the end of the run.
///
/// These summaries can be very long when benchmarking multiple pallets at once. For CI
/// use-cases, this option reduces the noise.
#[arg(long)]
quiet: bool,
/// Do not enable proof recording during time benchmarking.
///
/// By default, proof recording is enabled during benchmark execution. This can slightly
/// inflate the resulting time weights. For teyrchains using PoV-reclaim, this is typically the
/// correct setting. Chains that ignore the proof size dimension of weight (e.g. relay chain,
/// solo-chains) can disable proof recording to get more accurate results.
#[arg(long)]
disable_proof_recording: bool,
}
/// How the genesis state for benchmarking should be built.
#[derive(clap::ValueEnum, Debug, Eq, PartialEq, Clone, Copy, Serialize)]
#[clap(rename_all = "kebab-case")]
pub enum GenesisBuilderPolicy {
/// Do not provide any genesis state.
///
/// Benchmarks are advised to function with this, since they should setup their own required
/// state. However, to keep backwards compatibility, this is not the default.
None,
/// Let the runtime build the genesis state through its `BuildGenesisConfig` runtime API.
/// This will use the `development` preset by default.
Runtime,
/// Use the runtime from the Spec file to build the genesis state.
SpecRuntime,
/// Use the spec file to build the genesis state. This fails when there is no spec.
#[value(alias = "spec")]
SpecGenesis,
}
@@ -0,0 +1,69 @@
{{header}}
//! Autogenerated weights for `{{pallet}}`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}}
//! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: `{{cmd.repeat}}`, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}`
//! WORST CASE MAP SIZE: `{{cmd.worst_case_map_values}}`
//! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}`
//! WASM-EXECUTION: `{{cmd.wasm_execution}}`, CHAIN: `{{cmd.chain}}`, DB CACHE: {{cmd.db_cache}}
// Executed Command:
{{#each args as |arg|}}
// {{arg}}
{{/each}}
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
#![allow(missing_docs)]
use frame_support::{traits::Get, weights::Weight};
use core::marker::PhantomData;
/// Weight functions for `{{pallet}}`.
pub struct WeightInfo<T>(PhantomData<T>);
{{#if (eq pallet "frame_system_extensions")}}
impl<T: frame_system::Config> frame_system::ExtensionsWeightInfo for WeightInfo<T> {
{{else}}
impl<T: frame_system::Config> {{pallet}}::WeightInfo for WeightInfo<T> {
{{/if}}
{{#each benchmarks as |benchmark|}}
{{#each benchmark.comments as |comment|}}
/// {{comment}}
{{/each}}
{{#each benchmark.component_ranges as |range|}}
/// The range of component `{{range.name}}` is `[{{range.min}}, {{range.max}}]`.
{{/each}}
fn {{benchmark.name~}}
(
{{~#each benchmark.components as |c| ~}}
{{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}}
) -> Weight {
// Proof Size summary in bytes:
// Measured: `{{benchmark.base_recorded_proof_size}}{{#each benchmark.component_recorded_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}`
// Estimated: `{{benchmark.base_calculated_proof_size}}{{#each benchmark.component_calculated_proof_size as |cp|}} + {{cp.name}} * ({{cp.slope}} ±{{underscore cp.error}}){{/each}}`
// Minimum execution time: {{underscore benchmark.min_execution_time}}_000 picoseconds.
Weight::from_parts({{underscore benchmark.base_weight}}, 0)
.saturating_add(Weight::from_parts(0, {{benchmark.base_calculated_proof_size}}))
{{#each benchmark.component_weight as |cw|}}
// Standard Error: {{underscore cw.error}}
.saturating_add(Weight::from_parts({{underscore cw.slope}}, 0).saturating_mul({{cw.name}}.into()))
{{/each}}
{{#if (ne benchmark.base_reads "0")}}
.saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}}))
{{/if}}
{{#each benchmark.component_reads as |cr|}}
.saturating_add(T::DbWeight::get().reads(({{cr.slope}}_u64).saturating_mul({{cr.name}}.into())))
{{/each}}
{{#if (ne benchmark.base_writes "0")}}
.saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}}))
{{/if}}
{{#each benchmark.component_writes as |cw|}}
.saturating_add(T::DbWeight::get().writes(({{cw.slope}}_u64).saturating_mul({{cw.name}}.into())))
{{/each}}
{{#each benchmark.component_calculated_proof_size as |cp|}}
.saturating_add(Weight::from_parts(0, {{cp.slope}}).saturating_mul({{cp.name}}.into()))
{{/each}}
}
{{/each}}
}
@@ -0,0 +1,63 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Various types used by this crate.
use sc_cli::Result;
use sp_core::traits::{RuntimeCode, WrappedRuntimeCode};
use sp_runtime::traits::Hash;
/// A runtime blob that was either fetched from genesis storage or loaded from a file.
// NOTE: This enum is only needed for the annoying lifetime bounds on `RuntimeCode`. Otherwise we
// could just directly return the blob.
pub enum FetchedCode<'a, B, H> {
FromGenesis { state: sp_state_machine::backend::BackendRuntimeCode<'a, B, H> },
FromFile { wrapped_code: WrappedRuntimeCode<'a>, heap_pages: Option<u64>, hash: Vec<u8> },
}
impl<'a, B, H> FetchedCode<'a, B, H>
where
H: Hash,
B: sc_client_api::StateBackend<H>,
{
/// The runtime blob.
pub fn code(&'a self) -> Result<RuntimeCode<'a>> {
match self {
Self::FromGenesis { state } => state.runtime_code().map_err(Into::into),
Self::FromFile { wrapped_code, heap_pages, hash } => Ok(RuntimeCode {
code_fetcher: wrapped_code,
heap_pages: *heap_pages,
hash: hash.clone(),
}),
}
}
}
/// Maps a (pallet, benchmark) to its component ranges.
pub(crate) type ComponentRangeMap =
std::collections::HashMap<(String, String), Vec<ComponentRange>>;
/// The inclusive range of a component.
#[derive(serde::Serialize, Debug, Clone, Eq, PartialEq)]
pub(crate) struct ComponentRange {
/// Name of the component.
pub(crate) name: String,
/// Minimal valid value of the component.
pub(crate) min: u32,
/// Maximal valid value of the component.
pub(crate) max: u32,
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,19 @@
# Shared code
Contains code that is shared among multiple sub-commands.
## Arguments
- `--mul` Multiply the result with a factor. Can be used to manually adjust for future chain growth.
- `--add` Add a value to the result. Can be used to manually offset the results.
- `--metric` Set the metric to use for calculating the final weight from the raw data. Defaults to `average`.
- `--weight-path` Set the file or directory to write the weight files to.
- `--db` The database backend to use. This depends on your snapshot.
- `--pruning` Set the pruning mode of the node. Some benchmarks require you to set this to `archive`.
- `--base-path` The location on the disk that should be used for the benchmarks. You can try this on different disks or
even on a mounted RAM-disk. It is important to use the same location that will later-on be used to store the chain
data to get the correct results.
- `--header` Optional file header which will be prepended to the weight output file. Can be used for adding LICENSE
headers.
License: Apache-2.0
@@ -0,0 +1,141 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::overhead::command::TeyrchainExtension;
use sc_chain_spec::{ChainSpec, GenericChainSpec, GenesisConfigBuilderRuntimeCaller};
use sc_cli::Result;
use serde_json::Value;
use sp_storage::{well_known_keys::CODE, Storage};
use sp_wasm_interface::HostFunctions;
use std::{borrow::Cow, path::PathBuf};
/// When the runtime could not build the genesis storage.
const ERROR_CANNOT_BUILD_GENESIS: &str = "The runtime returned \
an error when trying to build the genesis storage. Please ensure that all pallets \
define a genesis config that can be built. This can be tested with: \
https://github.com/paritytech/polkadot-sdk/pull/3412";
/// Warn when using the chain spec to generate the genesis state.
pub const WARN_SPEC_GENESIS_CTOR: &'static str = "Using the chain spec instead of the runtime to \
generate the genesis state is deprecated. Please remove the `--chain`/`--dev`/`--local` argument, \
point `--runtime` to your runtime blob and set `--genesis-builder=runtime`. This warning may \
become a hard error any time after December 2024.";
/// Defines how the chain specification shall be used to build the genesis storage.
pub enum SpecGenesisSource {
/// Use preset provided by the runtime embedded in the chain specification.
Runtime(String),
/// Use provided chain-specification JSON file.
SpecJson,
/// Use default storage.
None,
}
/// Defines how the genesis storage shall be built.
pub enum GenesisStateHandler {
ChainSpec(Box<dyn ChainSpec>, SpecGenesisSource),
Runtime(Vec<u8>, Option<String>),
}
impl GenesisStateHandler {
/// Populate the genesis storage.
///
/// If the raw storage is derived from a named genesis preset, `json_patcher` is can be used to
/// inject values into the preset.
pub fn build_storage<HF: HostFunctions>(
&self,
json_patcher: Option<Box<dyn FnOnce(Value) -> Value + 'static>>,
) -> Result<Storage> {
match self {
GenesisStateHandler::ChainSpec(chain_spec, source) => match source {
SpecGenesisSource::Runtime(preset) => {
let mut storage = chain_spec.build_storage()?;
let code_bytes = storage
.top
.remove(CODE)
.ok_or("chain spec genesis does not contain code")?;
genesis_from_code::<HF>(code_bytes.as_slice(), preset, json_patcher)
},
SpecGenesisSource::SpecJson => chain_spec
.build_storage()
.map_err(|e| format!("{ERROR_CANNOT_BUILD_GENESIS}\nError: {e}").into()),
SpecGenesisSource::None => Ok(Storage::default()),
},
GenesisStateHandler::Runtime(code_bytes, Some(preset)) =>
genesis_from_code::<HF>(code_bytes.as_slice(), preset, json_patcher),
GenesisStateHandler::Runtime(_, None) => Ok(Storage::default()),
}
}
/// Get the runtime code blob.
pub fn get_code_bytes(&self) -> Result<Cow<'_, [u8]>> {
match self {
GenesisStateHandler::ChainSpec(chain_spec, _) => {
let mut storage = chain_spec.build_storage()?;
storage
.top
.remove(CODE)
.map(|code| Cow::from(code))
.ok_or("chain spec genesis does not contain code".into())
},
GenesisStateHandler::Runtime(code_bytes, _) => Ok(code_bytes.into()),
}
}
}
pub fn chain_spec_from_path<HF: HostFunctions>(
chain: PathBuf,
) -> Result<(Box<dyn ChainSpec>, Option<u32>)> {
let spec = GenericChainSpec::<TeyrchainExtension, HF>::from_json_file(chain)
.map_err(|e| format!("Unable to load chain spec: {:?}", e))?;
let para_id_from_chain_spec = spec.extensions().para_id;
Ok((Box::new(spec), para_id_from_chain_spec))
}
fn genesis_from_code<EHF: HostFunctions>(
code: &[u8],
genesis_builder_preset: &String,
storage_patcher: Option<Box<dyn FnOnce(Value) -> Value>>,
) -> Result<Storage> {
let genesis_config_caller = GenesisConfigBuilderRuntimeCaller::<(
sp_io::SubstrateHostFunctions,
frame_benchmarking::benchmarking::HostFunctions,
EHF,
)>::new(code);
let mut preset_json = genesis_config_caller.get_named_preset(Some(genesis_builder_preset))?;
if let Some(patcher) = storage_patcher {
preset_json = patcher(preset_json);
}
let mut storage =
genesis_config_caller.get_storage_for_patch(preset_json).inspect_err(|e| {
let presets = genesis_config_caller.preset_names().unwrap_or_default();
log::error!(
"Please pick one of the available presets with \
`--genesis-builder-preset=<PRESET>`. Available presets ({}): {:?}. Error: {:?}",
presets.len(),
presets,
e
);
})?;
storage.top.insert(CODE.into(), code.into());
Ok(storage)
}
@@ -0,0 +1,135 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Code that is shared among all benchmarking sub-commands.
pub mod genesis_state;
pub mod record;
pub mod stats;
pub mod weight_params;
pub use record::BenchRecord;
pub use stats::{StatSelect, Stats};
pub use weight_params::WeightParams;
use clap::Args;
use rand::prelude::*;
use sc_sysinfo::gather_sysinfo;
use serde::Serialize;
/// A Handlebars helper to add an underscore after every 3rd character,
/// i.e. a separator for large numbers.
#[derive(Clone, Copy)]
pub struct UnderscoreHelper;
impl handlebars::HelperDef for UnderscoreHelper {
fn call<'reg: 'rc, 'rc>(
&self,
h: &handlebars::Helper,
_: &handlebars::Handlebars,
_: &handlebars::Context,
_rc: &mut handlebars::RenderContext,
out: &mut dyn handlebars::Output,
) -> handlebars::HelperResult {
use handlebars::JsonRender;
let param = h.param(0).unwrap();
let underscore_param = underscore(param.value().render());
out.write(&underscore_param)?;
Ok(())
}
}
/// Add an underscore after every 3rd character, i.e. a separator for large numbers.
fn underscore<Number>(i: Number) -> String
where
Number: std::string::ToString,
{
let mut s = String::new();
let i_str = i.to_string();
let a = i_str.chars().rev().enumerate();
for (idx, val) in a {
if idx != 0 && idx % 3 == 0 {
s.insert(0, '_');
}
s.insert(0, val);
}
s
}
/// Returns an rng and the seed that was used to create it.
///
/// Uses a random seed if none is provided.
pub fn new_rng(seed: Option<u64>) -> (impl rand::Rng, u64) {
let seed = seed.unwrap_or(rand::thread_rng().gen::<u64>());
(rand_pcg::Pcg64::seed_from_u64(seed), seed)
}
/// Returns an error if a debug profile is detected.
///
/// The rust compiler only exposes the binary information whether
/// or not we are in a `debug` build.
/// This means that `release` and `production` cannot be told apart.
/// This function additionally checks for OPT-LEVEL = 3.
pub fn check_build_profile() -> Result<(), String> {
if cfg!(build_profile = "debug") {
Err("Detected a `debug` profile".into())
} else if !cfg!(build_opt_level = "3") {
Err("The optimization level is not set to 3".into())
} else {
Ok(())
}
}
/// Parameters to configure how the host info will be determined.
#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)]
#[command(rename_all = "kebab-case")]
pub struct HostInfoParams {
/// Manually override the hostname to use.
#[arg(long)]
pub hostname_override: Option<String>,
/// Specify a fallback hostname if no-one could be detected automatically.
///
/// Note: This only exists to make the `hostname` function infallible.
#[arg(long, default_value = "<UNKNOWN>")]
pub hostname_fallback: String,
/// Specify a fallback CPU name if no-one could be detected automatically.
///
/// Note: This only exists to make the `cpuname` function infallible.
#[arg(long, default_value = "<UNKNOWN>")]
pub cpuname_fallback: String,
}
impl HostInfoParams {
/// Returns the hostname of the machine.
///
/// Can be used to track on which machine a benchmark was run.
pub fn hostname(&self) -> String {
self.hostname_override
.clone()
.or(gethostname::gethostname().into_string().ok())
.unwrap_or(self.hostname_fallback.clone())
}
/// Returns the CPU name of the current machine.
///
/// Can be used to track on which machine a benchmark was run.
pub fn cpuname(&self) -> String {
gather_sysinfo().cpu.unwrap_or(self.cpuname_fallback.clone())
}
}
@@ -0,0 +1,72 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Defines the [`BenchRecord`] and its facilities for computing [`super::Stats`].
use sc_cli::Result;
use sc_service::Configuration;
use log::info;
use serde::Serialize;
use std::{fs, path::PathBuf, time::Duration};
use super::Stats;
/// Raw output of a Storage benchmark.
#[derive(Debug, Default, Clone, Serialize)]
pub struct BenchRecord {
/// Multi-Map of value sizes and the time that it took to access them.
ns_per_size: Vec<(u64, u64)>,
}
impl BenchRecord {
/// Appends a new record. Uses safe casts.
pub fn append(&mut self, size: usize, d: Duration) -> Result<()> {
let size: u64 = size.try_into().map_err(|e| format!("Size overflow u64: {}", e))?;
let ns: u64 = d
.as_nanos()
.try_into()
.map_err(|e| format!("Nanoseconds overflow u64: {}", e))?;
self.ns_per_size.push((size, ns));
Ok(())
}
/// Returns the statistics for *time* and *value size*.
pub fn calculate_stats(self) -> Result<(Stats, Stats)> {
let (size, time): (Vec<_>, Vec<_>) = self.ns_per_size.into_iter().unzip();
let size = Stats::new(&size)?;
let time = Stats::new(&time)?;
Ok((time, size)) // The swap of time/size here is intentional.
}
/// Unless a path is specified, saves the raw results in a json file in the current directory.
/// Prefixes it with the DB name and suffixed with `path_suffix`.
pub fn save_json(&self, cfg: &Configuration, out_path: &PathBuf, suffix: &str) -> Result<()> {
let mut path = PathBuf::from(out_path);
if path.is_dir() || path.as_os_str().is_empty() {
path.push(&format!("{}_{}", cfg.database, suffix).to_lowercase());
path.set_extension("json");
}
let json = serde_json::to_string_pretty(&self)
.map_err(|e| format!("Serializing as JSON: {:?}", e))?;
fs::write(&path, json)?;
info!("Raw data written to {:?}", fs::canonicalize(&path)?);
Ok(())
}
}
@@ -0,0 +1,188 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Handles statistics that were generated from benchmarking results and
//! that can be used to fill out weight templates.
use sc_cli::Result;
use serde::Serialize;
use std::{fmt, result, str::FromStr};
/// Various statistics that help to gauge the quality of the produced weights.
/// Will be written to the weight file and printed to console.
#[derive(Serialize, Default, Clone)]
pub struct Stats {
/// Sum of all values.
pub sum: u64,
/// Minimal observed value.
pub min: u64,
/// Maximal observed value.
pub max: u64,
/// Average of all values.
pub avg: u64,
/// Median of all values.
pub median: u64,
/// Standard derivation of all values.
pub stddev: f64,
/// 99th percentile. At least 99% of all values are below this threshold.
pub p99: u64,
/// 95th percentile. At least 95% of all values are below this threshold.
pub p95: u64,
/// 75th percentile. At least 75% of all values are below this threshold.
pub p75: u64,
}
/// Selects a specific field from a [`Stats`] object.
/// Not all fields are available.
#[derive(Debug, Clone, Copy, Serialize, PartialEq)]
pub enum StatSelect {
/// Select the maximum.
Maximum,
/// Select the average.
Average,
/// Select the median.
Median,
/// Select the 99th percentile.
P99Percentile,
/// Select the 95th percentile.
P95Percentile,
/// Select the 75th percentile.
P75Percentile,
}
impl Stats {
/// Calculates statistics and returns them.
pub fn new(xs: &Vec<u64>) -> Result<Self> {
if xs.is_empty() {
return Err("Empty input is invalid".into());
}
let (avg, stddev) = Self::avg_and_stddev(xs);
Ok(Self {
sum: xs.iter().sum(),
min: *xs.iter().min().expect("Checked for non-empty above"),
max: *xs.iter().max().expect("Checked for non-empty above"),
avg: avg as u64,
median: Self::percentile(xs.clone(), 0.50),
stddev: (stddev * 100.0).round() / 100.0, // round to 1/100
p99: Self::percentile(xs.clone(), 0.99),
p95: Self::percentile(xs.clone(), 0.95),
p75: Self::percentile(xs.clone(), 0.75),
})
}
/// Returns the selected stat.
pub fn select(&self, s: StatSelect) -> u64 {
match s {
StatSelect::Maximum => self.max,
StatSelect::Average => self.avg,
StatSelect::Median => self.median,
StatSelect::P99Percentile => self.p99,
StatSelect::P95Percentile => self.p95,
StatSelect::P75Percentile => self.p75,
}
}
/// Returns the *average* and the *standard derivation*.
fn avg_and_stddev(xs: &Vec<u64>) -> (f64, f64) {
let avg = xs.iter().map(|x| *x as f64).sum::<f64>() / xs.len() as f64;
let variance = xs.iter().map(|x| (*x as f64 - avg).powi(2)).sum::<f64>() / xs.len() as f64;
(avg, variance.sqrt())
}
/// Returns the specified percentile for the given data.
/// This is best effort since it ignores the interpolation case.
fn percentile(mut xs: Vec<u64>, p: f64) -> u64 {
xs.sort();
let index = (xs.len() as f64 * p).ceil() as usize - 1;
xs[index.clamp(0, xs.len() - 1)]
}
}
impl fmt::Debug for Stats {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "Total: {}", self.sum)?;
writeln!(f, "Min: {}, Max: {}", self.min, self.max)?;
writeln!(f, "Average: {}, Median: {}, Stddev: {}", self.avg, self.median, self.stddev)?;
write!(f, "Percentiles 99th, 95th, 75th: {}, {}, {}", self.p99, self.p95, self.p75)
}
}
impl Default for StatSelect {
/// Returns the `Average` selector.
fn default() -> Self {
Self::Average
}
}
impl FromStr for StatSelect {
type Err = &'static str;
fn from_str(day: &str) -> result::Result<Self, Self::Err> {
match day.to_lowercase().as_str() {
"max" => Ok(Self::Maximum),
"average" => Ok(Self::Average),
"median" => Ok(Self::Median),
"p99" => Ok(Self::P99Percentile),
"p95" => Ok(Self::P95Percentile),
"p75" => Ok(Self::P75Percentile),
_ => Err("String was not a StatSelect"),
}
}
}
#[cfg(test)]
mod test_stats {
use super::Stats;
use rand::{seq::SliceRandom, thread_rng};
#[test]
fn stats_correct() {
let mut data: Vec<u64> = (1..=100).collect();
data.shuffle(&mut thread_rng());
let stats = Stats::new(&data).unwrap();
assert_eq!(stats.sum, 5050);
assert_eq!(stats.min, 1);
assert_eq!(stats.max, 100);
assert_eq!(stats.avg, 50);
assert_eq!(stats.median, 50); // 50.5 to be exact.
assert_eq!(stats.stddev, 28.87); // Rounded with 1/100 precision.
assert_eq!(stats.p99, 99);
assert_eq!(stats.p95, 95);
assert_eq!(stats.p75, 75);
}
#[test]
fn no_panic_short_lengths() {
// Empty input does error.
assert!(Stats::new(&vec![]).is_err());
// Different small input lengths are fine.
for l in 1..10 {
let data = (0..=l).collect();
assert!(Stats::new(&data).is_ok());
}
}
}
@@ -0,0 +1,95 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Calculates a weight from the [`super::Stats`] of a benchmark result.
use sc_cli::Result;
use clap::Args;
use serde::Serialize;
use std::path::PathBuf;
use super::{StatSelect, Stats};
/// Configures the weight generation.
#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)]
pub struct WeightParams {
/// File or directory to write the *weight* files to.
///
/// For Substrate this should be `frame/support/src/weights`.
#[arg(long)]
pub weight_path: Option<PathBuf>,
/// Select a specific metric to calculate the final weight output.
#[arg(long = "metric", default_value = "average")]
pub weight_metric: StatSelect,
/// Multiply the resulting weight with the given factor. Must be positive.
///
/// Is applied before `weight_add`.
#[arg(long = "mul", default_value_t = 1.0)]
pub weight_mul: f64,
/// Add the given offset to the resulting weight.
///
/// Is applied after `weight_mul`.
#[arg(long = "add", default_value_t = 0)]
pub weight_add: u64,
}
/// Calculates the final weight by multiplying the selected metric with
/// `weight_mul` and adding `weight_add`.
/// Does not use safe casts and can overflow.
impl WeightParams {
pub fn calc_weight(&self, stat: &Stats) -> Result<u64> {
if self.weight_mul.is_sign_negative() || !self.weight_mul.is_normal() {
return Err("invalid floating number for `weight_mul`".into());
}
let s = stat.select(self.weight_metric) as f64;
let w = s.mul_add(self.weight_mul, self.weight_add as f64).ceil();
Ok(w as u64) // No safe cast here since there is no `From<f64>` for `u64`.
}
}
#[cfg(test)]
mod test_weight_params {
use super::WeightParams;
use crate::shared::{StatSelect, Stats};
#[test]
fn calc_weight_works() {
let stats = Stats { avg: 113, ..Default::default() };
let params = WeightParams {
weight_metric: StatSelect::Average,
weight_mul: 0.75,
weight_add: 3,
..Default::default()
};
let want = (113.0f64 * 0.75 + 3.0).ceil() as u64; // Ceil for overestimation.
let got = params.calc_weight(&stats).unwrap();
assert_eq!(want, got);
}
#[test]
fn calc_weight_detects_negative_mul() {
let stats = Stats::default();
let params = WeightParams { weight_mul: -0.75, ..Default::default() };
assert!(params.calc_weight(&stats).is_err());
}
}
@@ -0,0 +1,111 @@
# The `benchmark storage` command
The cost of storage operations in a Substrate chain depends on the current chain state.
It is therefore important to regularly update these weights as the chain grows.
This sub-command measures the cost of storage operations for a concrete snapshot.
For the Substrate node it looks like this (for debugging you can use `--release`):
```sh
cargo run --profile=production -- benchmark storage --dev --state-version=1
```
Running the command on Substrate itself is not verify meaningful, since the genesis state of the `--dev` chain spec is
used.
The output for the PezkuwiChain client with a recent chain snapshot will give you a better impression. A recent snapshot can
be downloaded from [PezkuwiChain Snapshots].
Then run (remove the `--db=paritydb` if you have a RocksDB snapshot):
```sh
cargo run --profile=production -- benchmark storage --dev --state-version=0 --db=paritydb --weight-path runtime/pezkuwi/constants/src/weights
```
This takes a while since reads and writes all keys from the snapshot:
```pre
# The 'read' benchmark
Preparing keys from block BlockId::Number(9939462)
Reading 1379083 keys
Time summary [ns]:
Total: 19668919930
Min: 6450, Max: 1217259
Average: 14262, Median: 14190, Stddev: 3035.79
Percentiles 99th, 95th, 75th: 18270, 16190, 14819
Value size summary:
Total: 265702275
Min: 1, Max: 1381859
Average: 192, Median: 80, Stddev: 3427.53
Percentiles 99th, 95th, 75th: 3368, 383, 80
# The 'write' benchmark
Preparing keys from block BlockId::Number(9939462)
Writing 1379083 keys
Time summary [ns]:
Total: 98393809781
Min: 12969, Max: 13282577
Average: 71347, Median: 69499, Stddev: 25145.27
Percentiles 99th, 95th, 75th: 135839, 106129, 79239
Value size summary:
Total: 265702275
Min: 1, Max: 1381859
Average: 192, Median: 80, Stddev: 3427.53
Percentiles 99th, 95th, 75th: 3368, 383, 80
Writing weights to "paritydb_weights.rs"
```
You will see that the [paritydb_weights.rs] files was modified and now contains new weights. The exact command for
PezkuwiChain can be seen at the top of the file.
This uses the most recent block from your snapshot which is printed at the top.
The value size summary tells us that the pruned PezkuwiChain chain state is ~253 MiB in size.
Reading a value on average takes (in this examples) 14.3 µs and writing 71.3 µs.
The interesting part in the generated weight file tells us the weight constants and some statistics about the
measurements:
```rust
/// Time to read one storage item.
/// Calculated by multiplying the *Average* of all values with `1.1` and adding `0`.
///
/// Stats [NS]:
/// Min, Max: 4_611, 1_217_259
/// Average: 14_262
/// Median: 14_190
/// Std-Dev: 3035.79
///
/// Percentiles [NS]:
/// 99th: 18_270
/// 95th: 16_190
/// 75th: 14_819
read: 14_262 * constants::WEIGHT_REF_TIME_PER_NANOS,
/// Time to write one storage item.
/// Calculated by multiplying the *Average* of all values with `1.1` and adding `0`.
///
/// Stats [NS]:
/// Min, Max: 12_969, 13_282_577
/// Average: 71_347This works under the assumption that the *average* read a
/// Median: 69_499
/// Std-Dev: 25145.27
///
/// Percentiles [NS]:
/// 99th: 135_839
/// 95th: 106_129
/// 75th: 79_239
write: 71_347 * constants::WEIGHT_REF_TIME_PER_NANOS,
```
## Arguments
- `--db` Specify which database backend to use. This greatly influences the results.
- `--state-version` Set the version of the state encoding that this snapshot uses. Should be set to `1` for Substrate
`--dev` and `0` for PezkuwiChain et al. Using the wrong version can corrupt the snapshot.
- [`--mul`](../shared/README.md#arguments)
- [`--add`](../shared/README.md#arguments)
- [`--metric`](../shared/README.md#arguments)
- [`--weight-path`](../shared/README.md#arguments)
- `--json-read-path` Write the raw 'read' results to this file or directory.
- `--json-write-path` Write the raw 'write' results to this file or directory.
- [`--header`](../shared/README.md#arguments)
License: Apache-2.0
<!-- LINKS -->
[PezkuwiChain Snapshots]: https://snapshots.polkadot.io
[paritydb_weights.rs]:
https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/paritydb_weights.rs#L60
@@ -0,0 +1,310 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use sc_cli::{CliConfiguration, DatabaseParams, PruningParams, Result, SharedParams};
use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider};
use sc_client_db::DbHash;
use sc_service::Configuration;
use sp_api::CallApiAt;
use sp_blockchain::HeaderBackend;
use sp_database::{ColumnId, Database};
use sp_runtime::traits::{Block as BlockT, HashingFor};
use sp_state_machine::Storage;
use sp_storage::{ChildInfo, ChildType, PrefixedStorageKey, StateVersion};
use clap::{Args, Parser, ValueEnum};
use log::info;
use rand::prelude::*;
use serde::Serialize;
use sp_runtime::generic::BlockId;
use std::{fmt::Debug, path::PathBuf, sync::Arc};
use super::template::TemplateData;
use crate::shared::{new_rng, HostInfoParams, WeightParams};
/// The mode in which to run the storage benchmark.
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Serialize, ValueEnum)]
pub enum StorageBenchmarkMode {
/// Run the benchmark for block import.
#[default]
ImportBlock,
/// Run the benchmark for block validation.
ValidateBlock,
}
/// Benchmark the storage speed of a chain snapshot.
#[derive(Debug, Parser)]
pub struct StorageCmd {
#[allow(missing_docs)]
#[clap(flatten)]
pub shared_params: SharedParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub database_params: DatabaseParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub pruning_params: PruningParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub params: StorageParams,
}
/// Parameters for modifying the benchmark behaviour and the post processing of the results.
#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)]
pub struct StorageParams {
#[allow(missing_docs)]
#[clap(flatten)]
pub weight_params: WeightParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub hostinfo: HostInfoParams,
/// Skip the `read` benchmark.
#[arg(long)]
pub skip_read: bool,
/// Skip the `write` benchmark.
#[arg(long)]
pub skip_write: bool,
/// Specify the Handlebars template to use for outputting benchmark results.
#[arg(long)]
pub template_path: Option<PathBuf>,
/// Add a header to the generated weight output file.
///
/// Good for adding LICENSE headers.
#[arg(long, value_name = "PATH")]
pub header: Option<PathBuf>,
/// Path to write the raw 'read' results in JSON format to. Can be a file or directory.
#[arg(long)]
pub json_read_path: Option<PathBuf>,
/// Path to write the raw 'write' results in JSON format to. Can be a file or directory.
#[arg(long)]
pub json_write_path: Option<PathBuf>,
/// Rounds of warmups before measuring.
#[arg(long, default_value_t = 1)]
pub warmups: u32,
/// The `StateVersion` to use. Substrate `--dev` should use `V1` and Pezkuwi `V0`.
/// Selecting the wrong version can corrupt the DB.
#[arg(long, value_parser = clap::value_parser!(u8).range(0..=1))]
pub state_version: u8,
/// Trie cache size in bytes.
///
/// Providing `0` will disable the cache.
#[arg(long, value_name = "Bytes", default_value_t = 67108864)]
pub trie_cache_size: usize,
/// Enable the Trie cache.
///
/// This should only be used for performance analysis and not for final results.
#[arg(long)]
pub enable_trie_cache: bool,
/// Include child trees in benchmark.
#[arg(long)]
pub include_child_trees: bool,
/// Disable PoV recorder.
///
/// The recorder has impact on performance when benchmarking with the TrieCache enabled.
/// If the chain is recording a proof while building/importing a block, the pov recorder
/// should be activated.
///
/// Hence, when generating weights for a teyrchain this should be activated and when generating
/// weights for a standalone chain this should be deactivated.
#[arg(long, default_value = "false")]
pub disable_pov_recorder: bool,
/// The batch size for the read/write benchmark.
///
/// Since the write size needs to also include the cost of computing the storage root, which is
/// done once at the end of the block, the batch size is used to simulate multiple writes in a
/// block.
#[arg(long, default_value_t = 100_000)]
pub batch_size: usize,
/// The mode in which to run the storage benchmark.
///
/// PoV recorder must be activated to provide a storage proof for block validation at runtime.
#[arg(long, value_enum, default_value_t = StorageBenchmarkMode::ImportBlock)]
pub mode: StorageBenchmarkMode,
/// Number of rounds to execute block validation during the benchmark.
///
/// We need to run the benchmark several times to avoid fluctuations during runtime setup.
/// This is only used when `mode` is `validate-block`.
#[arg(long, default_value_t = 20)]
pub validate_block_rounds: u32,
}
impl StorageParams {
pub fn is_import_block_mode(&self) -> bool {
matches!(self.mode, StorageBenchmarkMode::ImportBlock)
}
pub fn is_validate_block_mode(&self) -> bool {
matches!(self.mode, StorageBenchmarkMode::ValidateBlock)
}
}
impl StorageCmd {
/// Calls into the Read and Write benchmarking functions.
/// Processes the output and writes it into files and stdout.
pub fn run<Block, BA, C>(
&self,
cfg: Configuration,
client: Arc<C>,
db: (Arc<dyn Database<DbHash>>, ColumnId),
storage: Arc<dyn Storage<HashingFor<Block>>>,
shared_trie_cache: Option<sp_trie::cache::SharedTrieCache<HashingFor<Block>>>,
) -> Result<()>
where
BA: ClientBackend<Block>,
Block: BlockT<Hash = DbHash>,
C: UsageProvider<Block>
+ StorageProvider<Block, BA>
+ HeaderBackend<Block>
+ CallApiAt<Block>,
{
let mut template = TemplateData::new(&cfg, &self.params)?;
let block_id = BlockId::<Block>::Number(client.usage_info().chain.best_number);
template.set_block_number(block_id.to_string());
if !self.params.skip_read {
self.bench_warmup(&client)?;
let record = self.bench_read(client.clone(), shared_trie_cache.clone())?;
if let Some(path) = &self.params.json_read_path {
record.save_json(&cfg, path, "read")?;
}
let stats = record.calculate_stats()?;
info!("Time summary [ns]:\n{:?}\nValue size summary:\n{:?}", stats.0, stats.1);
template.set_stats(Some(stats), None)?;
}
if !self.params.skip_write {
self.bench_warmup(&client)?;
let record = self.bench_write(client, db, storage, shared_trie_cache)?;
if let Some(path) = &self.params.json_write_path {
record.save_json(&cfg, path, "write")?;
}
let stats = record.calculate_stats()?;
info!("Time summary [ns]:\n{:?}\nValue size summary:\n{:?}", stats.0, stats.1);
template.set_stats(None, Some(stats))?;
}
template.write(&self.params.weight_params.weight_path, &self.params.template_path)
}
/// Returns the specified state version.
pub(crate) fn state_version(&self) -> StateVersion {
match self.params.state_version {
0 => StateVersion::V0,
1 => StateVersion::V1,
_ => unreachable!("Clap set to only allow 0 and 1"),
}
}
/// Returns Some if child node and None if regular
pub(crate) fn is_child_key(&self, key: Vec<u8>) -> Option<ChildInfo> {
if let Some((ChildType::ParentKeyId, storage_key)) =
ChildType::from_prefixed_key(&PrefixedStorageKey::new(key))
{
return Some(ChildInfo::new_default(storage_key));
}
None
}
/// Run some rounds of the (read) benchmark as warmup.
/// See `frame_benchmarking_cli::storage::read::bench_read` for detailed comments.
fn bench_warmup<B, BA, C>(&self, client: &Arc<C>) -> Result<()>
where
C: UsageProvider<B> + StorageProvider<B, BA>,
B: BlockT + Debug,
BA: ClientBackend<B>,
{
let hash = client.usage_info().chain.best_hash;
let mut keys: Vec<_> = client.storage_keys(hash, None, None)?.collect();
let (mut rng, _) = new_rng(None);
keys.shuffle(&mut rng);
for i in 0..self.params.warmups {
info!("Warmup round {}/{}", i + 1, self.params.warmups);
let mut child_nodes = Vec::new();
for key in keys.as_slice() {
let _ = client
.storage(hash, &key)
.expect("Checked above to exist")
.ok_or("Value unexpectedly empty");
if let Some(info) = self
.params
.include_child_trees
.then(|| self.is_child_key(key.clone().0))
.flatten()
{
// child tree key
for ck in client.child_storage_keys(hash, info.clone(), None, None)? {
child_nodes.push((ck.clone(), info.clone()));
}
}
}
for (key, info) in child_nodes.as_slice() {
client
.child_storage(hash, info, key)
.expect("Checked above to exist")
.ok_or("Value unexpectedly empty")?;
}
}
Ok(())
}
}
// Boilerplate
impl CliConfiguration for StorageCmd {
fn shared_params(&self) -> &SharedParams {
&self.shared_params
}
fn database_params(&self) -> Option<&DatabaseParams> {
Some(&self.database_params)
}
fn pruning_params(&self) -> Option<&PruningParams> {
Some(&self.pruning_params)
}
fn trie_cache_maximum_size(&self) -> Result<Option<usize>> {
if self.params.enable_trie_cache && self.params.trie_cache_size > 0 {
Ok(Some(self.params.trie_cache_size))
} else {
Ok(None)
}
}
}
@@ -0,0 +1,57 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod cmd;
pub mod read;
pub mod template;
pub mod write;
pub use cmd::StorageCmd;
/// Empirically, the maximum batch size for block validation should be no more than 10,000.
/// Bigger sizes may cause problems with runtime memory allocation.
pub(crate) const MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION: usize = 10_000;
pub(crate) fn get_wasm_module() -> Box<dyn sc_executor_common::wasm_runtime::WasmModule> {
let blob = sc_executor_common::runtime_blob::RuntimeBlob::uncompress_if_needed(
frame_storage_access_test_runtime::WASM_BINARY
.expect("You need to build the WASM binaries to run the benchmark!"),
)
.expect("Failed to create runtime blob");
let config = sc_executor_wasmtime::Config {
allow_missing_func_imports: true,
cache_path: None,
semantics: sc_executor_wasmtime::Semantics {
heap_alloc_strategy: sc_executor_common::wasm_runtime::HeapAllocStrategy::Dynamic {
maximum_pages: Some(4096),
},
instantiation_strategy: sc_executor::WasmtimeInstantiationStrategy::PoolingCopyOnWrite,
deterministic_stack_limit: None,
canonicalize_nans: false,
parallel_compilation: false,
wasm_multi_value: false,
wasm_bulk_memory: false,
wasm_reference_types: false,
wasm_simd: false,
},
};
Box::new(
sc_executor_wasmtime::create_runtime::<sp_io::SubstrateHostFunctions>(blob, config)
.expect("Unable to create wasm module."),
)
}
@@ -0,0 +1,263 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use codec::Encode;
use frame_storage_access_test_runtime::StorageAccessParams;
use log::{debug, info};
use rand::prelude::*;
use sc_cli::{Error, Result};
use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider};
use sp_api::CallApiAt;
use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT};
use sp_state_machine::{backend::AsTrieBackend, Backend};
use sp_storage::ChildInfo;
use sp_trie::StorageProof;
use std::{fmt::Debug, sync::Arc, time::Instant};
use super::{cmd::StorageCmd, get_wasm_module, MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION};
use crate::shared::{new_rng, BenchRecord};
impl StorageCmd {
/// Benchmarks the time it takes to read a single Storage item.
/// Uses the latest state that is available for the given client.
pub(crate) fn bench_read<B, BA, C>(
&self,
client: Arc<C>,
_shared_trie_cache: Option<sp_trie::cache::SharedTrieCache<HashingFor<B>>>,
) -> Result<BenchRecord>
where
C: UsageProvider<B> + StorageProvider<B, BA> + CallApiAt<B>,
B: BlockT + Debug,
BA: ClientBackend<B>,
<<B as BlockT>::Header as HeaderT>::Number: From<u32>,
{
if self.params.is_validate_block_mode() && self.params.disable_pov_recorder {
return Err("PoV recorder must be activated to provide a storage proof for block validation at runtime. Remove `--disable-pov-recorder` from the command line.".into());
}
if self.params.is_validate_block_mode() &&
self.params.batch_size > MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION
{
return Err(format!("Batch size is too large. This may cause problems with runtime memory allocation. Better set `--batch-size {}` or less.", MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION).into());
}
let mut record = BenchRecord::default();
let best_hash = client.usage_info().chain.best_hash;
info!("Preparing keys from block {}", best_hash);
// Load all keys and randomly shuffle them.
let mut keys: Vec<_> = client.storage_keys(best_hash, None, None)?.collect();
let (mut rng, _) = new_rng(None);
keys.shuffle(&mut rng);
if keys.is_empty() {
return Err("Can't process benchmarking with empty storage".into());
}
let mut child_nodes = Vec::new();
// Interesting part here:
// Read all the keys in the database and measure the time it takes to access each.
info!("Reading {} keys", keys.len());
// Read using the same TrieBackend and recorder for up to `batch_size` keys.
// This would allow us to measure the amortized cost of reading a key.
let state = client
.state_at(best_hash)
.map_err(|_err| Error::Input("State not found".into()))?;
// We reassign the backend and recorder for every batch size.
// Using a new recorder for every read vs using the same for the entire batch
// produces significant different results. Since in the real use case we use a
// single recorder per block, simulate the same behavior by creating a new
// recorder every batch size, so that the amortized cost of reading a key is
// measured in conditions closer to the real world.
let (mut backend, mut recorder) = self.create_backend::<B, C>(&state);
let mut read_in_batch = 0;
let mut on_validation_batch = vec![];
let mut on_validation_size = 0;
let last_key = keys.last().expect("Checked above to be non-empty");
for key in keys.as_slice() {
match (self.params.include_child_trees, self.is_child_key(key.clone().0)) {
(true, Some(info)) => {
// child tree key
for ck in client.child_storage_keys(best_hash, info.clone(), None, None)? {
child_nodes.push((ck, info.clone()));
}
},
_ => {
// regular key
on_validation_batch.push((key.0.clone(), None));
let start = Instant::now();
let v = backend
.storage(key.0.as_ref())
.expect("Checked above to exist")
.ok_or("Value unexpectedly empty")?;
on_validation_size += v.len();
if self.params.is_import_block_mode() {
record.append(v.len(), start.elapsed())?;
}
},
}
read_in_batch += 1;
let is_batch_full = read_in_batch >= self.params.batch_size || key == last_key;
// Read keys on block validation
if is_batch_full && self.params.is_validate_block_mode() {
let root = backend.root();
let storage_proof = recorder
.clone()
.map(|r| r.drain_storage_proof())
.expect("Storage proof must exist for block validation");
let elapsed = measure_block_validation::<B>(
*root,
storage_proof,
on_validation_batch.clone(),
self.params.validate_block_rounds,
);
record.append(on_validation_size / on_validation_batch.len(), elapsed)?;
on_validation_batch = vec![];
on_validation_size = 0;
}
// Reload recorder
if is_batch_full {
(backend, recorder) = self.create_backend::<B, C>(&state);
read_in_batch = 0;
}
}
if self.params.include_child_trees && !child_nodes.is_empty() {
child_nodes.shuffle(&mut rng);
info!("Reading {} child keys", child_nodes.len());
let (last_child_key, last_child_info) =
child_nodes.last().expect("Checked above to be non-empty");
for (key, info) in child_nodes.as_slice() {
on_validation_batch.push((key.0.clone(), Some(info.clone())));
let start = Instant::now();
let v = backend
.child_storage(info, key.0.as_ref())
.expect("Checked above to exist")
.ok_or("Value unexpectedly empty")?;
on_validation_size += v.len();
if self.params.is_import_block_mode() {
record.append(v.len(), start.elapsed())?;
}
read_in_batch += 1;
let is_batch_full = read_in_batch >= self.params.batch_size ||
(last_child_key == key && last_child_info == info);
// Read child keys on block validation
if is_batch_full && self.params.is_validate_block_mode() {
let root = backend.root();
let storage_proof = recorder
.clone()
.map(|r| r.drain_storage_proof())
.expect("Storage proof must exist for block validation");
let elapsed = measure_block_validation::<B>(
*root,
storage_proof,
on_validation_batch.clone(),
self.params.validate_block_rounds,
);
record.append(on_validation_size / on_validation_batch.len(), elapsed)?;
on_validation_batch = vec![];
on_validation_size = 0;
}
// Reload recorder
if is_batch_full {
(backend, recorder) = self.create_backend::<B, C>(&state);
read_in_batch = 0;
}
}
}
Ok(record)
}
fn create_backend<'a, B, C>(
&self,
state: &'a C::StateBackend,
) -> (
sp_state_machine::TrieBackend<
&'a <C::StateBackend as AsTrieBackend<HashingFor<B>>>::TrieBackendStorage,
HashingFor<B>,
&'a sp_trie::cache::LocalTrieCache<HashingFor<B>>,
>,
Option<sp_trie::recorder::Recorder<HashingFor<B>>>,
)
where
C: CallApiAt<B>,
B: BlockT + Debug,
{
let recorder = (!self.params.disable_pov_recorder).then(|| Default::default());
let backend = sp_state_machine::TrieBackendBuilder::wrap(state.as_trie_backend())
.with_optional_recorder(recorder.clone())
.build();
(backend, recorder)
}
}
fn measure_block_validation<B: BlockT + Debug>(
root: B::Hash,
storage_proof: StorageProof,
on_validation_batch: Vec<(Vec<u8>, Option<ChildInfo>)>,
rounds: u32,
) -> std::time::Duration {
debug!(
"POV: len {:?} {:?}",
storage_proof.len(),
storage_proof.clone().encoded_compact_size::<HashingFor<B>>(root)
);
let batch_size = on_validation_batch.len();
let wasm_module = get_wasm_module();
let mut instance = wasm_module.new_instance().expect("Failed to create wasm instance");
let params = StorageAccessParams::<B>::new_read(root, storage_proof, on_validation_batch);
let dry_run_encoded = params.as_dry_run().encode();
let encoded = params.encode();
let mut durations_in_nanos = Vec::new();
for i in 1..=rounds {
info!("validate_block with {} keys, round {}/{}", batch_size, i, rounds);
// Dry run to get the time it takes without storage access
let dry_run_start = Instant::now();
instance
.call_export("validate_block", &dry_run_encoded)
.expect("Failed to call validate_block");
let dry_run_elapsed = dry_run_start.elapsed();
debug!("validate_block dry-run time {:?}", dry_run_elapsed);
let start = Instant::now();
instance
.call_export("validate_block", &encoded)
.expect("Failed to call validate_block");
let elapsed = start.elapsed();
debug!("validate_block time {:?}", elapsed);
durations_in_nanos
.push(elapsed.saturating_sub(dry_run_elapsed).as_nanos() as u64 / batch_size as u64);
}
std::time::Duration::from_nanos(
durations_in_nanos.iter().sum::<u64>() / durations_in_nanos.len() as u64,
)
}
@@ -0,0 +1,153 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use sc_cli::Result;
use sc_service::Configuration;
use log::info;
use serde::Serialize;
use std::{env, fs, path::PathBuf};
use super::cmd::StorageParams;
use crate::shared::{Stats, UnderscoreHelper};
static VERSION: &str = env!("CARGO_PKG_VERSION");
static TEMPLATE: &str = include_str!("./weights.hbs");
/// Data consumed by Handlebar to fill out the `weights.hbs` template.
#[derive(Serialize, Default, Debug, Clone)]
pub(crate) struct TemplateData {
/// Name of the database used.
db_name: String,
/// Block number that was used.
block_number: String,
/// Name of the runtime. Taken from the chain spec.
runtime_name: String,
/// Version of the benchmarking CLI used.
version: String,
/// Date that the template was filled out.
date: String,
/// Hostname of the machine that executed the benchmarks.
hostname: String,
/// CPU name of the machine that executed the benchmarks.
cpuname: String,
/// Header for the generated file.
header: String,
/// Command line arguments that were passed to the CLI.
args: Vec<String>,
/// Storage params of the executed command.
params: StorageParams,
/// The weight for one `read`.
read_weight: u64,
/// The weight for one `write`.
write_weight: u64,
/// Stats about a `read` benchmark. Contains *time* and *value size* stats.
/// The *value size* stats are currently not used in the template.
read: Option<(Stats, Stats)>,
/// Stats about a `write` benchmark. Contains *time* and *value size* stats.
/// The *value size* stats are currently not used in the template.
write: Option<(Stats, Stats)>,
}
impl TemplateData {
/// Returns a new [`Self`] from the given configuration.
pub fn new(cfg: &Configuration, params: &StorageParams) -> Result<Self> {
let header = params
.header
.as_ref()
.map(|p| std::fs::read_to_string(p))
.transpose()?
.unwrap_or_default();
Ok(TemplateData {
db_name: if params.is_validate_block_mode() {
String::from("InMemoryDb")
} else {
format!("{}", cfg.database)
},
runtime_name: cfg.chain_spec.name().into(),
version: VERSION.into(),
date: chrono::Utc::now().format("%Y-%m-%d (Y/M/D)").to_string(),
hostname: params.hostinfo.hostname(),
cpuname: params.hostinfo.cpuname(),
header,
args: env::args().collect::<Vec<String>>(),
params: params.clone(),
..Default::default()
})
}
/// Sets the stats and calculates the final weights.
pub fn set_stats(
&mut self,
read: Option<(Stats, Stats)>,
write: Option<(Stats, Stats)>,
) -> Result<()> {
if let Some(read) = read {
self.read_weight = self.params.weight_params.calc_weight(&read.0)?;
self.read = Some(read);
}
if let Some(write) = write {
self.write_weight = self.params.weight_params.calc_weight(&write.0)?;
self.write = Some(write);
}
Ok(())
}
/// Sets the block id that was used.
pub fn set_block_number(&mut self, block_number: String) {
self.block_number = block_number
}
/// Fills out the `weights.hbs` or specified HBS template with its own data.
/// Writes the result to `path` which can be a directory or file.
pub fn write(&self, path: &Option<PathBuf>, hbs_template: &Option<PathBuf>) -> Result<()> {
let mut handlebars = handlebars::Handlebars::new();
// Format large integers with underscore.
handlebars.register_helper("underscore", Box::new(UnderscoreHelper));
// Don't HTML escape any characters.
handlebars.register_escape_fn(|s| -> String { s.to_string() });
// Use custom template if provided.
let template = match hbs_template {
Some(template) if template.is_file() => fs::read_to_string(template)?,
Some(_) => return Err("Handlebars template is not a valid file!".into()),
None => TEMPLATE.to_string(),
};
let out_path = self.build_path(path);
let mut fd = fs::File::create(&out_path)?;
info!("Writing weights to {:?}", fs::canonicalize(&out_path)?);
handlebars
.render_template_to_write(&template, &self, &mut fd)
.map_err(|e| format!("HBS template write: {:?}", e).into())
}
/// Builds a path for the weight file.
fn build_path(&self, weight_out: &Option<PathBuf>) -> PathBuf {
let mut path = match weight_out {
Some(p) => PathBuf::from(p),
None => PathBuf::new(),
};
if path.is_dir() || path.as_os_str().is_empty() {
path.push(format!("{}_weights", self.db_name.to_lowercase()));
path.set_extension("rs");
}
path
}
}
@@ -0,0 +1,99 @@
{{header}}
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}}
//! DATE: {{date}}
//! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}`
//!
//! DATABASE: `{{db_name}}`, RUNTIME: `{{runtime_name}}`
//! BLOCK-NUM: `{{block_number}}`
//! SKIP-WRITE: `{{params.skip_write}}`, SKIP-READ: `{{params.skip_read}}`, WARMUPS: `{{params.warmups}}`
//! STATE-VERSION: `V{{params.state_version}}`, STATE-CACHE-SIZE: `{{params.state_cache_size}}`
//! WEIGHT-PATH: `{{params.weight_params.weight_path}}`
//! METRIC: `{{params.weight_params.weight_metric}}`, WEIGHT-MUL: `{{params.weight_params.weight_mul}}`, WEIGHT-ADD: `{{params.weight_params.weight_add}}`
// Executed Command:
{{#each args as |arg|}}
// {{arg}}
{{/each}}
/// Storage DB weights for the `{{runtime_name}}` runtime and `{{db_name}}`.
pub mod constants {
use frame_support::weights::constants;
use sp_core::parameter_types;
use sp_weights::RuntimeDbWeight;
parameter_types! {
{{#if (eq db_name "InMemoryDb")}}
/// `InMemoryDb` weights are measured in the context of the validation functions.
/// To avoid submitting overweight blocks to the relay chain this is the configuration
/// parachains should use.
{{else if (eq db_name "ParityDb")}}
/// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights
/// are available for brave runtime engineers who may want to try this out as default.
{{else}}
/// By default, Substrate uses `RocksDB`, so this will be the weight used throughout
/// the runtime.
{{/if}}
pub const {{db_name}}Weight: RuntimeDbWeight = RuntimeDbWeight {
/// Time to read one storage item.
/// Calculated by multiplying the *{{params.weight_params.weight_metric}}* of all values with `{{params.weight_params.weight_mul}}` and adding `{{params.weight_params.weight_add}}`.
///
/// Stats nanoseconds:
/// Min, Max: {{underscore read.0.min}}, {{underscore read.0.max}}
/// Average: {{underscore read.0.avg}}
/// Median: {{underscore read.0.median}}
/// Std-Dev: {{read.0.stddev}}
///
/// Percentiles nanoseconds:
/// 99th: {{underscore read.0.p99}}
/// 95th: {{underscore read.0.p95}}
/// 75th: {{underscore read.0.p75}}
read: {{underscore read_weight}} * constants::WEIGHT_REF_TIME_PER_NANOS,
/// Time to write one storage item.
/// Calculated by multiplying the *{{params.weight_params.weight_metric}}* of all values with `{{params.weight_params.weight_mul}}` and adding `{{params.weight_params.weight_add}}`.
///
/// Stats nanoseconds:
/// Min, Max: {{underscore write.0.min}}, {{underscore write.0.max}}
/// Average: {{underscore write.0.avg}}
/// Median: {{underscore write.0.median}}
/// Std-Dev: {{write.0.stddev}}
///
/// Percentiles nanoseconds:
/// 99th: {{underscore write.0.p99}}
/// 95th: {{underscore write.0.p95}}
/// 75th: {{underscore write.0.p75}}
write: {{underscore write_weight}} * constants::WEIGHT_REF_TIME_PER_NANOS,
};
}
#[cfg(test)]
mod test_db_weights {
use super::constants::{{db_name}}Weight as W;
use sp_weights::constants;
/// Checks that all weights exist and have sane values.
// NOTE: If this test fails but you are sure that the generated values are fine,
// you can delete it.
#[test]
fn bound() {
// At least 1 µs.
assert!(
W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS,
"Read weight should be at least 1 µs."
);
assert!(
W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS,
"Write weight should be at least 1 µs."
);
// At most 1 ms.
assert!(
W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS,
"Read weight should be at most 1 ms."
);
assert!(
W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS,
"Write weight should be at most 1 ms."
);
}
}
}
@@ -0,0 +1,436 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use codec::Encode;
use frame_storage_access_test_runtime::StorageAccessParams;
use log::{debug, info, trace, warn};
use rand::prelude::*;
use sc_cli::Result;
use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider};
use sc_client_db::{DbHash, DbState, DbStateBuilder};
use sp_blockchain::HeaderBackend;
use sp_database::{ColumnId, Transaction};
use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT};
use sp_state_machine::Backend as StateBackend;
use sp_storage::{ChildInfo, StateVersion};
use sp_trie::{recorder::Recorder, PrefixedMemoryDB};
use std::{
fmt::Debug,
sync::Arc,
time::{Duration, Instant},
};
use super::{cmd::StorageCmd, get_wasm_module, MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION};
use crate::shared::{new_rng, BenchRecord};
impl StorageCmd {
/// Benchmarks the time it takes to write a single Storage item.
///
/// Uses the latest state that is available for the given client.
///
/// Unlike reading benchmark, where we read every single key, here we write a batch of keys in
/// one time. So writing a remaining keys with the size much smaller than batch size can
/// dramatically distort the results. To avoid this, we skip the remaining keys.
pub(crate) fn bench_write<Block, BA, H, C>(
&self,
client: Arc<C>,
(db, state_col): (Arc<dyn sp_database::Database<DbHash>>, ColumnId),
storage: Arc<dyn sp_state_machine::Storage<HashingFor<Block>>>,
shared_trie_cache: Option<sp_trie::cache::SharedTrieCache<HashingFor<Block>>>,
) -> Result<BenchRecord>
where
Block: BlockT<Header = H, Hash = DbHash> + Debug,
H: HeaderT<Hash = DbHash>,
BA: ClientBackend<Block>,
C: UsageProvider<Block> + HeaderBackend<Block> + StorageProvider<Block, BA>,
{
if self.params.is_validate_block_mode() && self.params.disable_pov_recorder {
return Err("PoV recorder must be activated to provide a storage proof for block validation at runtime. Remove `--disable-pov-recorder`.".into());
}
if self.params.is_validate_block_mode() &&
self.params.batch_size > MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION
{
return Err(format!("Batch size is too large. This may cause problems with runtime memory allocation. Better set `--batch-size {}` or less.", MAX_BATCH_SIZE_FOR_BLOCK_VALIDATION).into());
}
// Store the time that it took to write each value.
let mut record = BenchRecord::default();
let best_hash = client.usage_info().chain.best_hash;
let header = client.header(best_hash)?.ok_or("Header not found")?;
let original_root = *header.state_root();
let (trie, _) = self.create_trie_backend::<Block, H>(
original_root,
&storage,
shared_trie_cache.as_ref(),
);
info!("Preparing keys from block {}", best_hash);
// Load all KV pairs and randomly shuffle them.
let mut kvs: Vec<_> = trie.pairs(Default::default())?.collect();
let (mut rng, _) = new_rng(None);
kvs.shuffle(&mut rng);
if kvs.is_empty() {
return Err("Can't process benchmarking with empty storage".into());
}
info!("Writing {} keys in batches of {}", kvs.len(), self.params.batch_size);
let remainder = kvs.len() % self.params.batch_size;
if self.params.is_validate_block_mode() && remainder != 0 {
info!("Remaining `{remainder}` keys will be skipped");
}
let mut child_nodes = Vec::new();
let mut batched_keys = Vec::new();
// Generate all random values first; Make sure there are no collisions with existing
// db entries, so we can rollback all additions without corrupting existing entries.
for key_value in kvs {
let (k, original_v) = key_value?;
match (self.params.include_child_trees, self.is_child_key(k.to_vec())) {
(true, Some(info)) => {
let child_keys = client
.child_storage_keys(best_hash, info.clone(), None, None)?
.collect::<Vec<_>>();
child_nodes.push((child_keys, info.clone()));
},
_ => {
// regular key
let mut new_v = vec![0; original_v.len()];
loop {
// Create a random value to overwrite with.
// NOTE: We use a possibly higher entropy than the original value,
// could be improved but acts as an over-estimation which is fine for now.
rng.fill_bytes(&mut new_v[..]);
if check_new_value::<Block>(
db.clone(),
&trie,
&k.to_vec(),
&new_v,
self.state_version(),
state_col,
None,
) {
break;
}
}
batched_keys.push((k.to_vec(), new_v.to_vec()));
if batched_keys.len() < self.params.batch_size {
continue;
}
// Write each value in one commit.
let (size, duration) = if self.params.is_validate_block_mode() {
self.measure_per_key_amortised_validate_block_write_cost::<Block, H>(
original_root,
&storage,
shared_trie_cache.as_ref(),
batched_keys.clone(),
None,
)?
} else {
self.measure_per_key_amortised_import_block_write_cost::<Block, H>(
original_root,
&storage,
shared_trie_cache.as_ref(),
db.clone(),
batched_keys.clone(),
self.state_version(),
state_col,
None,
)?
};
record.append(size, duration)?;
batched_keys.clear();
},
}
}
if self.params.include_child_trees && !child_nodes.is_empty() {
info!("Writing {} child keys", child_nodes.iter().map(|(c, _)| c.len()).sum::<usize>());
for (mut child_keys, info) in child_nodes {
if child_keys.len() < self.params.batch_size {
warn!(
"{} child keys will be skipped because it's less than batch size",
child_keys.len()
);
continue;
}
child_keys.shuffle(&mut rng);
for key in child_keys {
if let Some(original_v) = client
.child_storage(best_hash, &info, &key)
.expect("Checked above to exist")
{
let mut new_v = vec![0; original_v.0.len()];
loop {
rng.fill_bytes(&mut new_v[..]);
if check_new_value::<Block>(
db.clone(),
&trie,
&key.0,
&new_v,
self.state_version(),
state_col,
Some(&info),
) {
break;
}
}
batched_keys.push((key.0, new_v.to_vec()));
if batched_keys.len() < self.params.batch_size {
continue;
}
let (size, duration) = if self.params.is_validate_block_mode() {
self.measure_per_key_amortised_validate_block_write_cost::<Block, H>(
original_root,
&storage,
shared_trie_cache.as_ref(),
batched_keys.clone(),
None,
)?
} else {
self.measure_per_key_amortised_import_block_write_cost::<Block, H>(
original_root,
&storage,
shared_trie_cache.as_ref(),
db.clone(),
batched_keys.clone(),
self.state_version(),
state_col,
Some(&info),
)?
};
record.append(size, duration)?;
batched_keys.clear();
}
}
}
}
Ok(record)
}
fn create_trie_backend<Block, H>(
&self,
original_root: Block::Hash,
storage: &Arc<dyn sp_state_machine::Storage<HashingFor<Block>>>,
shared_trie_cache: Option<&sp_trie::cache::SharedTrieCache<HashingFor<Block>>>,
) -> (DbState<HashingFor<Block>>, Option<Recorder<HashingFor<Block>>>)
where
Block: BlockT<Header = H, Hash = DbHash> + Debug,
H: HeaderT<Hash = DbHash>,
{
let recorder = (!self.params.disable_pov_recorder).then(|| Default::default());
let trie = DbStateBuilder::<HashingFor<Block>>::new(storage.clone(), original_root)
.with_optional_cache(shared_trie_cache.map(|c| c.local_cache_trusted()))
.with_optional_recorder(recorder.clone())
.build();
(trie, recorder)
}
/// Measures write benchmark
/// if `child_info` exist then it means this is a child tree key
fn measure_per_key_amortised_import_block_write_cost<Block, H>(
&self,
original_root: Block::Hash,
storage: &Arc<dyn sp_state_machine::Storage<HashingFor<Block>>>,
shared_trie_cache: Option<&sp_trie::cache::SharedTrieCache<HashingFor<Block>>>,
db: Arc<dyn sp_database::Database<DbHash>>,
changes: Vec<(Vec<u8>, Vec<u8>)>,
version: StateVersion,
col: ColumnId,
child_info: Option<&ChildInfo>,
) -> Result<(usize, Duration)>
where
Block: BlockT<Header = H, Hash = DbHash> + Debug,
H: HeaderT<Hash = DbHash>,
{
let batch_size = changes.len();
let average_len = changes.iter().map(|(_, v)| v.len()).sum::<usize>() / batch_size;
// For every batched write use a different trie instance and recorder, so we
// don't benefit from past runs.
let (trie, _recorder) =
self.create_trie_backend::<Block, H>(original_root, storage, shared_trie_cache);
let start = Instant::now();
// Create a TX that will modify the Trie in the DB and
// calculate the root hash of the Trie after the modification.
let replace = changes
.iter()
.map(|(key, new_v)| (key.as_ref(), Some(new_v.as_ref())))
.collect::<Vec<_>>();
let stx = match child_info {
Some(info) => trie.child_storage_root(info, replace.iter().cloned(), version).2,
None => trie.storage_root(replace.iter().cloned(), version).1,
};
// Only the keep the insertions, since we do not want to benchmark pruning.
let tx = convert_tx::<Block>(db.clone(), stx.clone(), false, col);
db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?;
let result = (average_len, start.elapsed() / batch_size as u32);
// Now undo the changes by removing what was added.
let tx = convert_tx::<Block>(db.clone(), stx.clone(), true, col);
db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?;
Ok(result)
}
/// Measures write benchmark on block validation
/// if `child_info` exist then it means this is a child tree key
fn measure_per_key_amortised_validate_block_write_cost<Block, H>(
&self,
original_root: Block::Hash,
storage: &Arc<dyn sp_state_machine::Storage<HashingFor<Block>>>,
shared_trie_cache: Option<&sp_trie::cache::SharedTrieCache<HashingFor<Block>>>,
changes: Vec<(Vec<u8>, Vec<u8>)>,
maybe_child_info: Option<&ChildInfo>,
) -> Result<(usize, Duration)>
where
Block: BlockT<Header = H, Hash = DbHash> + Debug,
H: HeaderT<Hash = DbHash>,
{
let batch_size = changes.len();
let average_len = changes.iter().map(|(_, v)| v.len()).sum::<usize>() / batch_size;
let (trie, recorder) =
self.create_trie_backend::<Block, H>(original_root, storage, shared_trie_cache);
for (key, _) in changes.iter() {
let _v = trie
.storage(key)
.expect("Checked above to exist")
.ok_or("Value unexpectedly empty")?;
}
let storage_proof = recorder
.map(|r| r.drain_storage_proof())
.expect("Storage proof must exist for block validation");
let root = trie.root();
debug!(
"POV: len {:?} {:?}",
storage_proof.len(),
storage_proof.clone().encoded_compact_size::<HashingFor<Block>>(*root)
);
let params = StorageAccessParams::<Block>::new_write(
*root,
storage_proof,
(changes, maybe_child_info.cloned()),
);
let mut durations_in_nanos = Vec::new();
let wasm_module = get_wasm_module();
let mut instance = wasm_module.new_instance().expect("Failed to create wasm instance");
let dry_run_encoded = params.as_dry_run().encode();
let encoded = params.encode();
for i in 1..=self.params.validate_block_rounds {
info!(
"validate_block with {} keys, round {}/{}",
batch_size, i, self.params.validate_block_rounds
);
// Dry run to get the time it takes without storage access
let dry_run_start = Instant::now();
instance
.call_export("validate_block", &dry_run_encoded)
.expect("Failed to call validate_block");
let dry_run_elapsed = dry_run_start.elapsed();
debug!("validate_block dry-run time {:?}", dry_run_elapsed);
let start = Instant::now();
instance
.call_export("validate_block", &encoded)
.expect("Failed to call validate_block");
let elapsed = start.elapsed();
debug!("validate_block time {:?}", elapsed);
durations_in_nanos.push(
elapsed.saturating_sub(dry_run_elapsed).as_nanos() as u64 / batch_size as u64,
);
}
let result = (
average_len,
std::time::Duration::from_nanos(
durations_in_nanos.iter().sum::<u64>() / durations_in_nanos.len() as u64,
),
);
Ok(result)
}
}
/// Converts a Trie transaction into a DB transaction.
/// Removals are ignored and will not be included in the final tx.
/// `invert_inserts` replaces all inserts with removals.
fn convert_tx<B: BlockT>(
db: Arc<dyn sp_database::Database<DbHash>>,
mut tx: PrefixedMemoryDB<HashingFor<B>>,
invert_inserts: bool,
col: ColumnId,
) -> Transaction<DbHash> {
let mut ret = Transaction::<DbHash>::default();
for (mut k, (v, rc)) in tx.drain().into_iter() {
if rc > 0 {
db.sanitize_key(&mut k);
if invert_inserts {
ret.remove(col, &k);
} else {
ret.set(col, &k, &v);
}
}
// < 0 means removal - ignored.
// 0 means no modification.
}
ret
}
/// Checks if a new value causes any collision in tree updates
/// returns true if there is no collision
/// if `child_info` exist then it means this is a child tree key
fn check_new_value<Block: BlockT>(
db: Arc<dyn sp_database::Database<DbHash>>,
trie: &DbState<HashingFor<Block>>,
key: &Vec<u8>,
new_v: &Vec<u8>,
version: StateVersion,
col: ColumnId,
child_info: Option<&ChildInfo>,
) -> bool {
let new_kv = vec![(key.as_ref(), Some(new_v.as_ref()))];
let mut stx = match child_info {
Some(info) => trie.child_storage_root(info, new_kv.iter().cloned(), version).2,
None => trie.storage_root(new_kv.iter().cloned(), version).1,
};
for (mut k, (_, rc)) in stx.drain().into_iter() {
if rc > 0 {
db.sanitize_key(&mut k);
if db.get(col, &k).is_some() {
trace!("Benchmark-store key creation: Key collision detected, retry");
return false;
}
}
}
true
}