Storage benchmarking (#10897)

* WIP

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* WIP: DB benchmarking

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* WIP

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* WIP

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Simplify code

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Remove old files

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Remove old files

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Minimize changes

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Add license

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Remove dependencies

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Extend template

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Linter

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Linter

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Beauty fixes

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Remove default

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Add feature

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Remove seed

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* CI wakeup

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Fmt

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Review fixes

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Adding doc

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Adding doc

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Improve template

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Do not expose columns

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Fix ColumnId

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Nicer template prints

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Cleanup

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Fix json path

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Simplify `bench_write` logic

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Invert tx before the second commit

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
This commit is contained in:
Oliver Tale-Yazdi
2022-02-25 20:30:57 +01:00
committed by GitHub
parent d551fe6613
commit 4c984500a7
19 changed files with 917 additions and 11 deletions
@@ -0,0 +1,171 @@
// This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use sc_cli::{CliConfiguration, DatabaseParams, PruningParams, Result, SharedParams};
use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider};
use sc_client_db::DbHash;
use sc_service::Configuration;
use sp_blockchain::HeaderBackend;
use sp_database::{ColumnId, Database};
use sp_runtime::traits::{Block as BlockT, HashFor};
use sp_state_machine::Storage;
use sp_storage::StateVersion;
use clap::{Args, Parser};
use log::info;
use rand::prelude::*;
use serde::Serialize;
use std::{fmt::Debug, sync::Arc};
use super::{record::StatSelect, template::TemplateData};
/// Benchmark the storage of a Substrate node with a live chain snapshot.
#[derive(Debug, Parser)]
pub struct StorageCmd {
#[allow(missing_docs)]
#[clap(flatten)]
pub shared_params: SharedParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub database_params: DatabaseParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub pruning_params: PruningParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub params: StorageParams,
}
/// Parameters for modifying the benchmark behaviour and the post processing of the results.
#[derive(Debug, Default, Serialize, Clone, PartialEq, Args)]
pub struct StorageParams {
/// Path to write the *weight* file to. Can be a file or directory.
/// For substrate this should be `frame/support/src/weights`.
#[clap(long, default_value = ".")]
pub weight_path: String,
/// Select a specific metric to calculate the final weight output.
#[clap(long = "metric", default_value = "average")]
pub weight_metric: StatSelect,
/// Multiply the resulting weight with the given factor. Must be positive.
/// Is calculated before `weight_add`.
#[clap(long = "mul", default_value = "1")]
pub weight_mul: f64,
/// Add the given offset to the resulting weight.
/// Is calculated after `weight_mul`.
#[clap(long = "add", default_value = "0")]
pub weight_add: u64,
/// Skip the `read` benchmark.
#[clap(long)]
pub skip_read: bool,
/// Skip the `write` benchmark.
#[clap(long)]
pub skip_write: bool,
/// Rounds of warmups before measuring.
/// Only supported for `read` benchmarks.
#[clap(long, default_value = "1")]
pub warmups: u32,
/// The `StateVersion` to use. Substrate `--dev` should use `V1` and Polkadot `V0`.
/// Selecting the wrong version can corrupt the DB.
#[clap(long, possible_values = ["0", "1"])]
pub state_version: u8,
/// State cache size.
#[clap(long, default_value = "0")]
pub state_cache_size: usize,
}
impl StorageCmd {
/// Calls into the Read and Write benchmarking functions.
/// Processes the output and writes it into files and stdout.
pub async fn run<Block, BA, C>(
&self,
cfg: Configuration,
client: Arc<C>,
db: (Arc<dyn Database<DbHash>>, ColumnId),
storage: Arc<dyn Storage<HashFor<Block>>>,
) -> Result<()>
where
BA: ClientBackend<Block>,
Block: BlockT<Hash = DbHash>,
C: UsageProvider<Block> + StorageProvider<Block, BA> + HeaderBackend<Block>,
{
let mut template = TemplateData::new(&cfg, &self.params);
if !self.params.skip_read {
let record = self.bench_read(client.clone())?;
record.save_json(&cfg, "read")?;
let stats = record.calculate_stats()?;
info!("Time summary [ns]:\n{:?}\nValue size summary:\n{:?}", stats.0, stats.1);
template.set_stats(Some(stats), None)?;
}
if !self.params.skip_write {
let record = self.bench_write(client, db, storage)?;
record.save_json(&cfg, "write")?;
let stats = record.calculate_stats()?;
info!("Time summary [ns]:\n{:?}\nValue size summary:\n{:?}", stats.0, stats.1);
template.set_stats(None, Some(stats))?;
}
template.write(&self.params.weight_path)
}
/// Returns the specified state version.
pub(crate) fn state_version(&self) -> StateVersion {
match self.params.state_version {
0 => StateVersion::V0,
1 => StateVersion::V1,
_ => unreachable!("Clap set to only allow 0 and 1"),
}
}
/// Creates an rng from a random seed.
pub(crate) fn setup_rng() -> impl rand::Rng {
let seed = rand::thread_rng().gen::<u64>();
info!("Using seed {}", seed);
StdRng::seed_from_u64(seed)
}
}
// Boilerplate
impl CliConfiguration for StorageCmd {
fn shared_params(&self) -> &SharedParams {
&self.shared_params
}
fn database_params(&self) -> Option<&DatabaseParams> {
Some(&self.database_params)
}
fn pruning_params(&self) -> Option<&PruningParams> {
Some(&self.pruning_params)
}
fn state_cache_size(&self) -> Result<usize> {
Ok(self.params.state_cache_size)
}
}
@@ -0,0 +1,24 @@
// This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod cmd;
pub mod read;
pub mod record;
pub mod template;
pub mod write;
pub use cmd::StorageCmd;
@@ -0,0 +1,76 @@
// This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use sc_cli::Result;
use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider};
use sp_core::storage::StorageKey;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT},
};
use log::info;
use rand::prelude::*;
use std::{fmt::Debug, sync::Arc, time::Instant};
use super::{cmd::StorageCmd, record::BenchRecord};
impl StorageCmd {
/// Benchmarks the time it takes to read a single Storage item.
/// Uses the latest state that is available for the given client.
pub(crate) fn bench_read<B, BA, C>(&self, client: Arc<C>) -> Result<BenchRecord>
where
C: UsageProvider<B> + StorageProvider<B, BA>,
B: BlockT + Debug,
BA: ClientBackend<B>,
<<B as BlockT>::Header as HeaderT>::Number: From<u32>,
{
let mut record = BenchRecord::default();
let block = BlockId::Number(client.usage_info().chain.best_number);
info!("Preparing keys from block {}", block);
// Load all keys and randomly shuffle them.
let empty_prefix = StorageKey(Vec::new());
let mut keys = client.storage_keys(&block, &empty_prefix)?;
let mut rng = Self::setup_rng();
keys.shuffle(&mut rng);
// Run some rounds of the benchmark as warmup.
for i in 0..self.params.warmups {
info!("Warmup round {}/{}", i + 1, self.params.warmups);
for key in keys.clone() {
let _ = client
.storage(&block, &key)
.expect("Checked above to exist")
.ok_or("Value unexpectedly empty")?;
}
}
// Interesting part here:
// Read all the keys in the database and measure the time it takes to access each.
info!("Reading {} keys", keys.len());
for key in keys.clone() {
let start = Instant::now();
let v = client
.storage(&block, &key)
.expect("Checked above to exist")
.ok_or("Value unexpectedly empty")?;
record.append(v.0.len(), start.elapsed())?;
}
Ok(record)
}
}
@@ -0,0 +1,191 @@
// This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Calculates statistics and fills out the `weight.hbs` template.
use sc_cli::Result;
use sc_service::Configuration;
use log::info;
use serde::Serialize;
use std::{fmt, fs, result, str::FromStr, time::Duration};
/// Raw output of a Storage benchmark.
#[derive(Debug, Default, Clone, Serialize)]
pub(crate) struct BenchRecord {
/// Multi-Map of value sizes and the time that it took to access them.
ns_per_size: Vec<(u64, u64)>,
}
/// Various statistics that help to gauge the quality of the produced weights.
/// Will be written to the weight file and printed to console.
#[derive(Serialize, Default, Clone)]
pub(crate) struct Stats {
/// Sum of all values.
sum: u64,
/// Minimal observed value.
min: u64,
/// Maximal observed value.
max: u64,
/// Average of all values.
avg: u64,
/// Median of all values.
median: u64,
/// Standard derivation of all values.
stddev: f64,
/// 99th percentile. At least 99% of all values are below this threshold.
p99: u64,
/// 95th percentile. At least 95% of all values are below this threshold.
p95: u64,
/// 75th percentile. At least 75% of all values are below this threshold.
p75: u64,
}
/// Selects a specific field from a [`Stats`] object.
/// Not all fields are available.
#[derive(Debug, Clone, Copy, Serialize, PartialEq)]
pub enum StatSelect {
/// Select the maximum.
Maximum,
/// Select the average.
Average,
/// Select the median.
Median,
/// Select the 99th percentile.
P99Percentile,
/// Select the 95th percentile.
P95Percentile,
/// Select the 75th percentile.
P75Percentile,
}
impl BenchRecord {
/// Appends a new record. Uses safe casts.
pub fn append(&mut self, size: usize, d: Duration) -> Result<()> {
let size: u64 = size.try_into().map_err(|e| format!("Size overflow u64: {}", e))?;
let ns: u64 = d
.as_nanos()
.try_into()
.map_err(|e| format!("Nanoseconds overflow u64: {}", e))?;
self.ns_per_size.push((size, ns));
Ok(())
}
/// Returns the statistics for *time* and *value size*.
pub(crate) fn calculate_stats(self) -> Result<(Stats, Stats)> {
let (size, time): (Vec<_>, Vec<_>) = self.ns_per_size.into_iter().unzip();
let size = Stats::new(&size)?;
let time = Stats::new(&time)?;
Ok((time, size)) // The swap of time/size here is intentional.
}
/// Saves the raw results in a json file in the current directory.
/// Prefixes it with the DB name and suffixed with `path_suffix`.
pub fn save_json(&self, cfg: &Configuration, path_suffix: &str) -> Result<()> {
let path = format!("{}_{}.json", cfg.database, path_suffix).to_lowercase();
let json = serde_json::to_string_pretty(&self)
.map_err(|e| format!("Serializing as JSON: {:?}", e))?;
fs::write(&path, json)?;
info!("Raw data written to {:?}", fs::canonicalize(&path)?);
Ok(())
}
}
impl Stats {
/// Calculates statistics and returns them.
pub fn new(xs: &Vec<u64>) -> Result<Self> {
if xs.is_empty() {
return Err("Empty input is invalid".into())
}
let (avg, stddev) = Self::avg_and_stddev(&xs);
Ok(Self {
sum: xs.iter().sum(),
min: *xs.iter().min().expect("Checked for non-empty above"),
max: *xs.iter().max().expect("Checked for non-empty above"),
avg: avg as u64,
median: Self::percentile(xs.clone(), 0.50),
stddev: (stddev * 100.0).round() / 100.0, // round to 1/100
p99: Self::percentile(xs.clone(), 0.99),
p95: Self::percentile(xs.clone(), 0.95),
p75: Self::percentile(xs.clone(), 0.75),
})
}
/// Returns the selected stat.
pub(crate) fn select(&self, s: StatSelect) -> u64 {
match s {
StatSelect::Maximum => self.max,
StatSelect::Average => self.avg,
StatSelect::Median => self.median,
StatSelect::P99Percentile => self.p99,
StatSelect::P95Percentile => self.p95,
StatSelect::P75Percentile => self.p75,
}
}
/// Returns the *average* and the *standard derivation*.
fn avg_and_stddev(xs: &Vec<u64>) -> (f64, f64) {
let avg = xs.iter().map(|x| *x as f64).sum::<f64>() / xs.len() as f64;
let variance = xs.iter().map(|x| (*x as f64 - avg).powi(2)).sum::<f64>() / xs.len() as f64;
(avg, variance.sqrt())
}
/// Returns the specified percentile for the given data.
/// This is best effort since it ignores the interpolation case.
fn percentile(mut xs: Vec<u64>, p: f64) -> u64 {
xs.sort();
let index = (xs.len() as f64 * p).ceil() as usize;
xs[index]
}
}
impl fmt::Debug for Stats {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Total: {}\n", self.sum)?;
write!(f, "Min: {}, Max: {}\n", self.min, self.max)?;
write!(f, "Average: {}, Median: {}, Stddev: {}\n", self.avg, self.median, self.stddev)?;
write!(f, "Percentiles 99th, 95th, 75th: {}, {}, {}", self.p99, self.p95, self.p75)
}
}
impl Default for StatSelect {
/// Returns the `Average` selector.
fn default() -> Self {
Self::Average
}
}
impl FromStr for StatSelect {
type Err = &'static str;
fn from_str(day: &str) -> result::Result<Self, Self::Err> {
match day.to_lowercase().as_str() {
"max" => Ok(Self::Maximum),
"average" => Ok(Self::Average),
"median" => Ok(Self::Median),
"p99" => Ok(Self::P99Percentile),
"p95" => Ok(Self::P95Percentile),
"p75" => Ok(Self::P75Percentile),
_ => Err("String was not a StatSelect"),
}
}
}
@@ -0,0 +1,126 @@
// This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use sc_cli::Result;
use sc_service::Configuration;
use log::info;
use serde::Serialize;
use std::{env, fs, path::PathBuf};
use super::{cmd::StorageParams, record::Stats};
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
static TEMPLATE: &str = include_str!("./weights.hbs");
/// Data consumed by Handlebar to fill out the `weights.hbs` template.
#[derive(Serialize, Default, Debug, Clone)]
pub(crate) struct TemplateData {
/// Name of the database used.
db_name: String,
/// Name of the runtime. Taken from the chain spec.
runtime_name: String,
/// Version of the benchmarking CLI used.
version: String,
/// Date that the template was filled out.
date: String,
/// Command line arguments that were passed to the CLI.
args: Vec<String>,
/// Storage params of the executed command.
params: StorageParams,
/// The weight for one `read`.
read_weight: u64,
/// The weight for one `write`.
write_weight: u64,
/// Stats about a `read` benchmark. Contains *time* and *value size* stats.
/// The *value size* stats are currently not used in the template.
read: Option<(Stats, Stats)>,
/// Stats about a `write` benchmark. Contains *time* and *value size* stats.
/// The *value size* stats are currently not used in the template.
write: Option<(Stats, Stats)>,
}
impl TemplateData {
/// Returns a new [`Self`] from the given configuration.
pub fn new(cfg: &Configuration, params: &StorageParams) -> Self {
TemplateData {
db_name: format!("{}", cfg.database),
runtime_name: cfg.chain_spec.name().into(),
version: VERSION.into(),
date: chrono::Utc::now().format("%Y-%m-%d (Y/M/D)").to_string(),
args: env::args().collect::<Vec<String>>(),
params: params.clone(),
..Default::default()
}
}
/// Sets the stats and calculates the final weights.
pub fn set_stats(
&mut self,
read: Option<(Stats, Stats)>,
write: Option<(Stats, Stats)>,
) -> Result<()> {
if let Some(read) = read {
self.read_weight = calc_weight(&read.0, &self.params)?;
self.read = Some(read);
}
if let Some(write) = write {
self.write_weight = calc_weight(&write.0, &self.params)?;
self.write = Some(write);
}
Ok(())
}
/// Filles out the `weights.hbs` HBS template with its own data.
/// Writes the result to `path` which can be a directory or file.
pub fn write(&self, path: &str) -> Result<()> {
let mut handlebars = handlebars::Handlebars::new();
// Format large integers with underscore.
handlebars.register_helper("underscore", Box::new(crate::writer::UnderscoreHelper));
// Don't HTML escape any characters.
handlebars.register_escape_fn(|s| -> String { s.to_string() });
let out_path = self.build_path(path);
let mut fd = fs::File::create(&out_path)?;
info!("Writing weights to {:?}", fs::canonicalize(&out_path)?);
handlebars
.render_template_to_write(&TEMPLATE, &self, &mut fd)
.map_err(|e| format!("HBS template write: {:?}", e).into())
}
/// Builds a path for the weight file.
fn build_path(&self, weight_out: &str) -> PathBuf {
let mut path = PathBuf::from(weight_out);
if path.is_dir() {
path.push(format!("{}_weights.rs", self.db_name.to_lowercase()));
path.set_extension("rs");
}
path
}
}
/// Calculates the final weight by multiplying the selected metric with
/// `mul` and adding `add`.
/// Does not use safe casts and can overflow.
fn calc_weight(stat: &Stats, params: &StorageParams) -> Result<u64> {
if params.weight_mul.is_sign_negative() || !params.weight_mul.is_normal() {
return Err("invalid floating number for `weight_mul`".into())
}
let s = stat.select(params.weight_metric) as f64;
let w = s.mul_add(params.weight_mul, params.weight_add as f64).ceil();
Ok(w as u64) // No safe cast here since there is no `From<f64>` for `u64`.
}
@@ -0,0 +1,107 @@
// This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}}
//! DATE: {{date}}
//!
//! DATABASE: `{{db_name}}`, RUNTIME: `{{runtime_name}}`
//! SKIP-WRITE: `{{params.skip_write}}`, SKIP-READ: `{{params.skip_read}}`, WARMUPS: `{{params.warmups}}`
//! STATE-VERSION: `V{{params.state_version}}`, STATE-CACHE-SIZE: `{{params.state_cache_size}}`
//! WEIGHT-PATH: `{{params.weight_path}}`
//! METRIC: `{{params.weight_metric}}`, WEIGHT-MUL: `{{params.weight_mul}}`, WEIGHT-ADD: `{{params.weight_add}}`
// Executed Command:
{{#each args as |arg|}}
// {{arg}}
{{/each}}
/// Storage DB weights for the {{runtime_name}} runtime and {{db_name}}.
pub mod constants {
use frame_support::{parameter_types, weights::{RuntimeDbWeight, constants}};
parameter_types! {
{{#if (eq db_name "ParityDb")}}
/// ParityDB can be enabled with a feature flag, but is still experimental. These weights
/// are available for brave runtime engineers who may want to try this out as default.
{{else}}
/// By default, Substrate uses RocksDB, so this will be the weight used throughout
/// the runtime.
{{/if}}
pub const {{db_name}}Weight: RuntimeDbWeight = RuntimeDbWeight {
/// Time to read one storage item.
/// Calculated by multiplying the *{{params.weight_metric}}* of all values with `{{params.weight_mul}}` and adding `{{params.weight_add}}`.
///
/// Stats [ns]:
/// Min, Max: {{underscore read.0.min}}, {{underscore read.0.max}}
/// Average: {{underscore read.0.avg}}
/// Median: {{underscore read.0.median}}
/// StdDev: {{read.0.stddev}}
///
/// Percentiles [ns]:
/// 99th: {{underscore read.0.p99}}
/// 95th: {{underscore read.0.p95}}
/// 75th: {{underscore read.0.p75}}
read: {{underscore read_weight}} * constants::WEIGHT_PER_NANOS,
/// Time to write one storage item.
/// Calculated by multiplying the *{{params.weight_metric}}* of all values with `{{params.weight_mul}}` and adding `{{params.weight_add}}`.
///
/// Stats [ns]:
/// Min, Max: {{underscore write.0.min}}, {{underscore write.0.max}}
/// Average: {{underscore write.0.avg}}
/// Median: {{underscore write.0.median}}
/// StdDev: {{write.0.stddev}}
///
/// Percentiles [ns]:
/// 99th: {{underscore write.0.p99}}
/// 95th: {{underscore write.0.p95}}
/// 75th: {{underscore write.0.p75}}
write: {{underscore write_weight}} * constants::WEIGHT_PER_NANOS,
};
}
#[cfg(test)]
mod test_db_weights {
use super::constants::{{db_name}}Weight as W;
use frame_support::weights::constants;
/// Checks that all weights exist and have sane values.
// NOTE: If this test fails but you are sure that the generated values are fine,
// you can delete it.
#[test]
fn bound() {
// At least 1 µs.
assert!(
W::get().reads(1) >= constants::WEIGHT_PER_MICROS,
"Read weight should be at least 1 µs."
);
assert!(
W::get().writes(1) >= constants::WEIGHT_PER_MICROS,
"Write weight should be at least 1 µs."
);
// At most 1 ms.
assert!(
W::get().reads(1) <= constants::WEIGHT_PER_MILLIS,
"Read weight should be at most 1 ms."
);
assert!(
W::get().writes(1) <= constants::WEIGHT_PER_MILLIS,
"Write weight should be at most 1 ms."
);
}
}
}
@@ -0,0 +1,131 @@
// This file is part of Substrate.
// Copyright (C) 2022 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use sc_cli::Result;
use sc_client_api::UsageProvider;
use sc_client_db::{DbHash, DbState, DB_HASH_LEN};
use sp_api::StateBackend;
use sp_blockchain::HeaderBackend;
use sp_database::{ColumnId, Transaction};
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, HashFor, Header as HeaderT},
};
use sp_trie::PrefixedMemoryDB;
use log::info;
use rand::prelude::*;
use std::{fmt::Debug, sync::Arc, time::Instant};
use super::{cmd::StorageCmd, record::BenchRecord};
impl StorageCmd {
/// Benchmarks the time it takes to write a single Storage item.
/// Uses the latest state that is available for the given client.
pub(crate) fn bench_write<Block, H, C>(
&self,
client: Arc<C>,
(db, state_col): (Arc<dyn sp_database::Database<DbHash>>, ColumnId),
storage: Arc<dyn sp_state_machine::Storage<HashFor<Block>>>,
) -> Result<BenchRecord>
where
Block: BlockT<Header = H, Hash = DbHash> + Debug,
H: HeaderT<Hash = DbHash>,
C: UsageProvider<Block> + HeaderBackend<Block>,
{
// Store the time that it took to write each value.
let mut record = BenchRecord::default();
let supports_rc = db.supports_ref_counting();
let block = BlockId::Number(client.usage_info().chain.best_number);
let header = client.header(block)?.ok_or("Header not found")?;
let original_root = *header.state_root();
let trie = DbState::<Block>::new(storage.clone(), original_root);
info!("Preparing keys from block {}", block);
// Load all KV pairs and randomly shuffle them.
let mut kvs = trie.pairs();
let mut rng = Self::setup_rng();
kvs.shuffle(&mut rng);
info!("Writing {} keys", kvs.len());
// Write each value in one commit.
for (k, original_v) in kvs.iter() {
// Create a random value to overwrite with.
// NOTE: We use a possibly higher entropy than the original value,
// could be improved but acts as an over-estimation which is fine for now.
let mut new_v = vec![0; original_v.len()];
rng.fill_bytes(&mut new_v[..]);
// Interesting part here:
let start = Instant::now();
// Create a TX that will modify the Trie in the DB and
// calculate the root hash of the Trie after the modification.
let replace = vec![(k.as_ref(), Some(new_v.as_ref()))];
let (_, stx) = trie.storage_root(replace.iter().cloned(), self.state_version());
// Only the keep the insertions, since we do not want to benchmark pruning.
let tx = convert_tx::<Block>(stx.clone(), true, state_col, supports_rc);
db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?;
record.append(new_v.len(), start.elapsed())?;
// Now undo the changes by removing what was added.
let tx = convert_tx::<Block>(stx.clone(), false, state_col, supports_rc);
db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?;
}
Ok(record)
}
}
/// Converts a Trie transaction into a DB transaction.
/// Removals are ignored and will not be included in the final tx.
/// `invert_inserts` replaces all inserts with removals.
///
/// The keys of Trie transactions are prefixed, this is treated differently by each DB.
/// ParityDB can use an optimization where only the last `DB_HASH_LEN` byte are needed.
/// The last `DB_HASH_LEN` byte are the hash of the actual stored data, everything
/// before that is the route in the Patricia Trie.
/// RocksDB cannot do this and needs the whole route, hence no key truncating for RocksDB.
///
/// TODO:
/// This copies logic from [`sp_client_db::Backend::try_commit_operation`] and should be
/// refactored to use a canonical `sanitize_key` function from `sp_client_db` which
/// does not yet exist.
fn convert_tx<B: BlockT>(
mut tx: PrefixedMemoryDB<HashFor<B>>,
invert_inserts: bool,
col: ColumnId,
supports_rc: bool,
) -> Transaction<DbHash> {
let mut ret = Transaction::<DbHash>::default();
for (mut k, (v, rc)) in tx.drain().into_iter() {
if supports_rc {
let _prefix = k.drain(0..k.len() - DB_HASH_LEN);
}
if rc > 0 {
if invert_inserts {
ret.set(col, k.as_ref(), &v);
} else {
ret.remove(col, &k);
}
}
// < 0 means removal - ignored.
// 0 means no modification.
}
ret
}