diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock index 397f1f038c..26954cddbd 100644 --- a/substrate/Cargo.lock +++ b/substrate/Cargo.lock @@ -2181,6 +2181,7 @@ dependencies = [ "hex", "itertools", "kvdb", + "lazy_static", "linked-hash-map", "log 0.4.16", "memory-db", @@ -2210,6 +2211,7 @@ dependencies = [ "sp-storage", "sp-trie", "tempfile", + "thiserror", "thousands", ] diff --git a/substrate/bin/node-template/node/src/command.rs b/substrate/bin/node-template/node/src/command.rs index 809257f790..e3e1000792 100644 --- a/substrate/bin/node-template/node/src/command.rs +++ b/substrate/bin/node-template/node/src/command.rs @@ -4,7 +4,7 @@ use crate::{ command_helper::{inherent_benchmark_data, BenchmarkExtrinsicBuilder}, service, }; -use frame_benchmarking_cli::BenchmarkCmd; +use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE}; use node_template_runtime::Block; use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; @@ -141,7 +141,8 @@ pub fn run() -> sc_cli::Result<()> { cmd.run(config, client, inherent_benchmark_data()?, Arc::new(ext_builder)) }, - BenchmarkCmd::Machine(cmd) => cmd.run(&config), + BenchmarkCmd::Machine(cmd) => + cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), } }) }, diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 5562efa184..6bb36b9f9a 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -96,6 +96,7 @@ sc-cli = { version = "0.10.0-dev", optional = true, path = "../../../client/cli" frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/benchmarking-cli" } node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } +serde_json = "1.0.79" [target.'cfg(any(target_arch="x86_64", target_arch="aarch64"))'.dependencies] node-executor = { version = "3.0.0-dev", path = "../executor", features = ["wasmtime"] } diff --git a/substrate/bin/node/cli/src/command.rs b/substrate/bin/node/cli/src/command.rs index b98a38d2db..b17a26fa02 100644 --- a/substrate/bin/node/cli/src/command.rs +++ b/substrate/bin/node/cli/src/command.rs @@ -130,7 +130,8 @@ pub fn run() -> Result<()> { cmd.run(config, client, inherent_benchmark_data()?, Arc::new(ext_builder)) }, - BenchmarkCmd::Machine(cmd) => cmd.run(&config), + BenchmarkCmd::Machine(cmd) => + cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), } }) }, diff --git a/substrate/bin/node/cli/tests/benchmark_machine_works.rs b/substrate/bin/node/cli/tests/benchmark_machine_works.rs index df407e988f..bf4a2b7b85 100644 --- a/substrate/bin/node/cli/tests/benchmark_machine_works.rs +++ b/substrate/bin/node/cli/tests/benchmark_machine_works.rs @@ -24,9 +24,30 @@ use std::process::Command; fn benchmark_machine_works() { let status = Command::new(cargo_bin("substrate")) .args(["benchmark", "machine", "--dev"]) - .args(["--verify-duration", "0.1"]) + .args(["--verify-duration", "0.1", "--disk-duration", "0.1"]) + // Make it succeed. + .args(["--allow-fail"]) .status() .unwrap(); assert!(status.success()); } + +/// Test that the hardware does not meet the requirements. +/// +/// This is most likely to succeed since it uses a test profile. +#[test] +#[cfg(debug_assertions)] +fn benchmark_machine_fails_with_slow_hardware() { + let output = Command::new(cargo_bin("substrate")) + .args(["benchmark", "machine", "--dev"]) + .args(["--verify-duration", "0.1", "--disk-duration", "2", "--tolerance", "0"]) + .output() + .unwrap(); + + // Command should have failed. + assert!(!output.status.success()); + // An `UnmetRequirement` error should have been printed. + let log = String::from_utf8_lossy(&output.stderr).to_string(); + assert!(log.contains("UnmetRequirement")); +} diff --git a/substrate/client/sysinfo/src/lib.rs b/substrate/client/sysinfo/src/lib.rs index 911e725dcd..be63fefe9e 100644 --- a/substrate/client/sysinfo/src/lib.rs +++ b/substrate/client/sysinfo/src/lib.rs @@ -66,6 +66,11 @@ pub enum ExecutionLimit { } impl ExecutionLimit { + /// Creates a new execution limit with the passed seconds as duration limit. + pub fn from_secs_f32(secs: f32) -> Self { + Self::MaxDuration(Duration::from_secs_f32(secs)) + } + /// Returns the duration limit or `MAX` if none is present. pub fn max_duration(&self) -> Duration { match self { diff --git a/substrate/client/sysinfo/src/sysinfo.rs b/substrate/client/sysinfo/src/sysinfo.rs index 65d7a9e41b..cd6adcf623 100644 --- a/substrate/client/sysinfo/src/sysinfo.rs +++ b/substrate/client/sysinfo/src/sysinfo.rs @@ -241,10 +241,16 @@ fn random_data(size: usize) -> Vec { buffer } -pub fn benchmark_disk_sequential_writes(directory: &Path) -> Result { +/// A default [`ExecutionLimit`] that can be used to call [`benchmark_disk_sequential_writes`] +/// and [`benchmark_disk_random_writes`]. +pub const DEFAULT_DISK_EXECUTION_LIMIT: ExecutionLimit = + ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(300) }; + +pub fn benchmark_disk_sequential_writes( + limit: ExecutionLimit, + directory: &Path, +) -> Result { const SIZE: usize = 64 * 1024 * 1024; - const MAX_ITERATIONS: usize = 32; - const MAX_DURATION: Duration = Duration::from_millis(300); let buffer = random_data(SIZE); let path = directory.join(".disk_bench_seq_wr.tmp"); @@ -273,14 +279,21 @@ pub fn benchmark_disk_sequential_writes(directory: &Path) -> Result Ok(()) }; - benchmark("disk sequential write score", SIZE, MAX_ITERATIONS, MAX_DURATION, run) - .map(|s| s as u64) + benchmark( + "disk sequential write score", + SIZE, + limit.max_iterations(), + limit.max_duration(), + run, + ) + .map(|s| s as u64) } -pub fn benchmark_disk_random_writes(directory: &Path) -> Result { +pub fn benchmark_disk_random_writes( + limit: ExecutionLimit, + directory: &Path, +) -> Result { const SIZE: usize = 64 * 1024 * 1024; - const MAX_ITERATIONS: usize = 32; - const MAX_DURATION: Duration = Duration::from_millis(300); let buffer = random_data(SIZE); let path = directory.join(".disk_bench_rand_wr.tmp"); @@ -333,8 +346,14 @@ pub fn benchmark_disk_random_writes(directory: &Path) -> Result { }; // We only wrote half of the bytes hence `SIZE / 2`. - benchmark("disk random write score", SIZE / 2, MAX_ITERATIONS, MAX_DURATION, run) - .map(|s| s as u64) + benchmark( + "disk random write score", + SIZE / 2, + limit.max_iterations(), + limit.max_duration(), + run, + ) + .map(|s| s as u64) } /// Benchmarks the verification speed of sr25519 signatures. @@ -389,7 +408,8 @@ pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { if let Some(scratch_directory) = scratch_directory { hwbench.disk_sequential_write_score = - match benchmark_disk_sequential_writes(scratch_directory) { + match benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) + { Ok(score) => Some(score), Err(error) => { log::warn!("Failed to run the sequential write disk benchmark: {}", error); @@ -397,13 +417,14 @@ pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { }, }; - hwbench.disk_random_write_score = match benchmark_disk_random_writes(scratch_directory) { - Ok(score) => Some(score), - Err(error) => { - log::warn!("Failed to run the random write disk benchmark: {}", error); - None - }, - }; + hwbench.disk_random_write_score = + match benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) { + Ok(score) => Some(score), + Err(error) => { + log::warn!("Failed to run the random write disk benchmark: {}", error); + None + }, + }; } hwbench @@ -437,12 +458,17 @@ mod tests { #[test] fn test_benchmark_disk_sequential_writes() { - assert!(benchmark_disk_sequential_writes("./".as_ref()).unwrap() > 0); + assert!( + benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > + 0 + ); } #[test] fn test_benchmark_disk_random_writes() { - assert!(benchmark_disk_random_writes("./".as_ref()).unwrap() > 0); + assert!( + benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > 0 + ); } #[test] diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index 8a3773fb10..208099162c 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -55,6 +55,8 @@ thousands = "0.2.0" prettytable-rs = "0.8.0" tempfile = "3.2.0" rand_pcg = "0.3.1" +lazy_static = "1.4.0" +thiserror = "1.0.30" [features] default = ["db", "sc-client-db/runtime-benchmarks"] diff --git a/substrate/utils/frame/benchmarking-cli/build.rs b/substrate/utils/frame/benchmarking-cli/build.rs new file mode 100644 index 0000000000..4347804156 --- /dev/null +++ b/substrate/utils/frame/benchmarking-cli/build.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::env; + +/// Exposes build environment variables to the rust code. +/// +/// - The build profile as `build_profile` +/// - The optimization level as `build_opt_level` +pub fn main() { + if let Ok(opt_level) = env::var("OPT_LEVEL") { + println!("cargo:rustc-cfg=build_opt_level={:?}", opt_level); + } + if let Ok(profile) = env::var("PROFILE") { + println!("cargo:rustc-cfg=build_profile={:?}", profile); + } +} diff --git a/substrate/utils/frame/benchmarking-cli/src/lib.rs b/substrate/utils/frame/benchmarking-cli/src/lib.rs index 75e2edc042..d0eee3d293 100644 --- a/substrate/utils/frame/benchmarking-cli/src/lib.rs +++ b/substrate/utils/frame/benchmarking-cli/src/lib.rs @@ -25,7 +25,7 @@ mod shared; mod storage; pub use block::BlockCmd; -pub use machine::MachineCmd; +pub use machine::{MachineCmd, Requirements, SUBSTRATE_REFERENCE_HARDWARE}; pub use overhead::{ExtrinsicBuilder, OverheadCmd}; pub use pallet::PalletCmd; pub use storage::StorageCmd; diff --git a/substrate/utils/frame/benchmarking-cli/src/machine/hardware.rs b/substrate/utils/frame/benchmarking-cli/src/machine/hardware.rs new file mode 100644 index 0000000000..5c62660cc7 --- /dev/null +++ b/substrate/utils/frame/benchmarking-cli/src/machine/hardware.rs @@ -0,0 +1,191 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains types to define hardware requirements. + +use lazy_static::lazy_static; +use serde::{Deserialize, Serialize}; +use std::fmt; + +lazy_static! { + /// The hardware requirements as measured on reference hardware. + /// + /// These values are provided by Parity, however it is possible + /// to use your own requirements if you are running a custom chain. + /// + /// The reference hardware is describe here: + /// + pub static ref SUBSTRATE_REFERENCE_HARDWARE: Requirements = { + let raw = include_bytes!("reference_hardware.json").as_slice(); + serde_json::from_slice(raw).expect("Hardcoded data is known good; qed") + }; +} + +/// Multiple requirements for the hardware. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub struct Requirements(pub Vec); + +/// A single requirement for the hardware. +#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)] +pub struct Requirement { + /// The metric to measure. + pub metric: Metric, + /// The minimal throughput that needs to be archived for this requirement. + pub minimum: Throughput, +} + +/// A single hardware metric. +/// +/// The implementation of these is in `sc-sysinfo`. +#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)] +pub enum Metric { + /// SR25519 signature verification. + Sr25519Verify, + /// Blake2-256 hashing algorithm. + Blake2256, + /// Copying data in RAM. + MemCopy, + /// Disk sequential write. + DiskSeqWrite, + /// Disk random write. + DiskRndWrite, +} + +/// Throughput as measured in bytes per second. +#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)] +pub enum Throughput { + /// KiB/s + KiBs(f64), + /// MiB/s + MiBs(f64), + /// GiB/s + GiBs(f64), +} + +impl Metric { + /// The category of the metric. + pub fn category(&self) -> &'static str { + match self { + Self::Sr25519Verify | Self::Blake2256 => "CPU", + Self::MemCopy => "Memory", + Self::DiskSeqWrite | Self::DiskRndWrite => "Disk", + } + } + + /// The name of the metric. It is always prefixed by the [`self::category()`]. + pub fn name(&self) -> &'static str { + match self { + Self::Sr25519Verify => "SR25519-Verify", + Self::Blake2256 => "BLAKE2-256", + Self::MemCopy => "Copy", + Self::DiskSeqWrite => "Seq Write", + Self::DiskRndWrite => "Rnd Write", + } + } +} + +const KIBIBYTE: f64 = 1024.0; + +impl Throughput { + /// The unit of the metric. + pub fn unit(&self) -> &'static str { + match self { + Self::KiBs(_) => "KiB/s", + Self::MiBs(_) => "MiB/s", + Self::GiBs(_) => "GiB/s", + } + } + + /// [`Self`] as number of byte/s. + pub fn to_bs(&self) -> f64 { + self.to_kibs() * KIBIBYTE + } + + /// [`Self`] as number of kibibyte/s. + pub fn to_kibs(&self) -> f64 { + self.to_mibs() * KIBIBYTE + } + + /// [`Self`] as number of mebibyte/s. + pub fn to_mibs(&self) -> f64 { + self.to_gibs() * KIBIBYTE + } + + /// [`Self`] as number of gibibyte/s. + pub fn to_gibs(&self) -> f64 { + match self { + Self::KiBs(k) => *k / (KIBIBYTE * KIBIBYTE), + Self::MiBs(m) => *m / KIBIBYTE, + Self::GiBs(g) => *g, + } + } + + /// Normalizes [`Self`] to use the larges unit possible. + pub fn normalize(&self) -> Self { + let bs = self.to_bs(); + + if bs >= KIBIBYTE * KIBIBYTE * KIBIBYTE { + Self::GiBs(self.to_gibs()) + } else if bs >= KIBIBYTE * KIBIBYTE { + Self::MiBs(self.to_mibs()) + } else { + Self::KiBs(self.to_kibs()) + } + } +} + +impl fmt::Display for Throughput { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let normalized = self.normalize(); + match normalized { + Self::KiBs(s) | Self::MiBs(s) | Self::GiBs(s) => + write!(f, "{:.2?} {}", s, normalized.unit()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::assert_eq_error_rate; + + /// `SUBSTRATE_REFERENCE_HARDWARE` can be en- and decoded. + #[test] + fn json_static_data() { + let raw = serde_json::to_string(&*SUBSTRATE_REFERENCE_HARDWARE).unwrap(); + let decoded: Requirements = serde_json::from_str(&raw).unwrap(); + + assert_eq!(decoded, SUBSTRATE_REFERENCE_HARDWARE.clone()); + } + + /// Test the [`Throughput`]. + #[test] + fn throughput_works() { + /// Float precision. + const EPS: f64 = 0.1; + let gib = Throughput::GiBs(14.324); + + assert_eq_error_rate!(14.324, gib.to_gibs(), EPS); + assert_eq_error_rate!(14667.776, gib.to_mibs(), EPS); + assert_eq_error_rate!(14667.776 * 1024.0, gib.to_kibs(), EPS); + assert_eq!("14.32 GiB/s", gib.to_string()); + assert_eq!("14.32 GiB/s", gib.normalize().to_string()); + + let mib = Throughput::MiBs(1029.0); + assert_eq!("1.00 GiB/s", mib.to_string()); + } +} diff --git a/substrate/utils/frame/benchmarking-cli/src/machine/mod.rs b/substrate/utils/frame/benchmarking-cli/src/machine/mod.rs index ee6bf765d0..9e25e58921 100644 --- a/substrate/utils/frame/benchmarking-cli/src/machine/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/machine/mod.rs @@ -18,6 +18,8 @@ //! Contains the [`MachineCmd`] as entry point for the node //! and the core benchmarking logic. +pub mod hardware; + use sc_cli::{CliConfiguration, Result, SharedParams}; use sc_service::Configuration; use sc_sysinfo::{ @@ -26,9 +28,12 @@ use sc_sysinfo::{ }; use clap::Parser; -use log::info; +use log::{error, info, warn}; use prettytable::{cell, row, table}; -use std::{fmt::Debug, fs, time::Duration}; +use std::{boxed::Box, fmt::Debug, fs, path::Path}; + +use crate::shared::check_build_profile; +pub use hardware::{Metric, Requirement, Requirements, Throughput, SUBSTRATE_REFERENCE_HARDWARE}; /// Command to benchmark the hardware. /// @@ -44,38 +49,174 @@ pub struct MachineCmd { #[clap(flatten)] pub shared_params: SharedParams, + /// Do not return an error if any check fails. + /// + /// Should only be used for debugging. + #[clap(long)] + pub allow_fail: bool, + + /// Set a fault tolerance for passing a requirement. + /// + /// 10% means that the test would pass even when only 90% score was archived. + /// Can be used to mitigate outliers of the benchmarks. + #[clap(long, default_value = "10.0", value_name = "PERCENT")] + pub tolerance: f64, + /// Time limit for the verification benchmark. #[clap(long, default_value = "2.0", value_name = "SECONDS")] pub verify_duration: f32, + + /// Time limit for each disk benchmark. + #[clap(long, default_value = "5.0", value_name = "SECONDS")] + pub disk_duration: f32, +} + +/// Helper for the result of a concrete benchmark. +struct BenchResult { + /// Did the hardware pass the benchmark? + passed: bool, + + /// The absolute score that was archived. + score: Throughput, + + /// The score relative to the minimal required score. + /// + /// Is in range [0, 1]. + rel_score: f64, +} + +/// Errors that can be returned by the this command. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error { + #[error("One of the benchmarks had a score that was lower than its requirement")] + UnmetRequirement, + + #[error("The build profile is unfit for benchmarking: {0}")] + BadBuildProfile(String), + + #[error("Benchmark results are off by at least factor 100")] + BadResults, } impl MachineCmd { /// Execute the benchmark and print the results. - pub fn run(&self, cfg: &Configuration) -> Result<()> { + pub fn run(&self, cfg: &Configuration, requirements: Requirements) -> Result<()> { + self.validate_args()?; // Ensure that the dir exists since the node is not started to take care of it. let dir = cfg.database.path().ok_or("No DB directory provided")?; fs::create_dir_all(dir)?; info!("Running machine benchmarks..."); - let write = benchmark_disk_sequential_writes(dir)?; - let read = benchmark_disk_random_writes(dir)?; - let verify_limit = - ExecutionLimit::MaxDuration(Duration::from_secs_f32(self.verify_duration)); - let verify = benchmark_sr25519_verify(verify_limit) * 1024.0; + let mut results = Vec::new(); + for requirement in &requirements.0 { + let result = self.run_benchmark(requirement, &dir)?; + results.push(result); + } + self.print_summary(requirements, results) + } + /// Benchmarks a specific metric of the hardware and judges the resulting score. + fn run_benchmark(&self, requirement: &Requirement, dir: &Path) -> Result { + // Dispatch the concrete function from `sc-sysinfo`. + let score = self.measure(&requirement.metric, dir)?; + let rel_score = score.to_bs() / requirement.minimum.to_bs(); + + // Sanity check if the result is off by factor >100x. + if rel_score >= 100.0 || rel_score <= 0.01 { + self.check_failed(Error::BadResults)?; + } + let passed = rel_score >= (1.0 - (self.tolerance / 100.0)); + Ok(BenchResult { passed, score, rel_score }) + } + + /// Measures a metric of the hardware. + fn measure(&self, metric: &Metric, dir: &Path) -> Result { + let verify_limit = ExecutionLimit::from_secs_f32(self.verify_duration); + let disk_limit = ExecutionLimit::from_secs_f32(self.disk_duration); + + let score = match metric { + Metric::Blake2256 => Throughput::MiBs(benchmark_cpu() as f64), + Metric::Sr25519Verify => Throughput::MiBs(benchmark_sr25519_verify(verify_limit)), + Metric::MemCopy => Throughput::MiBs(benchmark_memory() as f64), + Metric::DiskSeqWrite => + Throughput::MiBs(benchmark_disk_sequential_writes(disk_limit, dir)? as f64), + Metric::DiskRndWrite => + Throughput::MiBs(benchmark_disk_random_writes(disk_limit, dir)? as f64), + }; + Ok(score) + } + + /// Prints a human-readable summary. + fn print_summary(&self, requirements: Requirements, results: Vec) -> Result<()> { // Use a table for nicer console output. - let table = table!( - ["Category", "Function", "Score", "Unit"], - ["CPU", "BLAKE2-256", benchmark_cpu(), "MB/s"], - ["CPU", "SR25519 Verify", format!("{:.1}", verify), "KB/s"], - ["Memory", "Copy", benchmark_memory(), "MB/s"], - ["Disk", "Seq Write", write, "MB/s"], - ["Disk", "Rnd Write", read, "MB/s"] - ); + let mut table = table!(["Category", "Function", "Score", "Minimum", "Result"]); + // Count how many passed and how many failed. + let (mut passed, mut failed) = (0, 0); + for (requirement, result) in requirements.0.iter().zip(results.iter()) { + if result.passed { + passed += 1 + } else { + failed += 1 + } - info!("\n{}", table); + table.add_row(result.to_row(requirement)); + } + // Print the table and a summary. + info!( + "\n{}\nFrom {} benchmarks in total, {} passed and {} failed ({:.0?}% fault tolerance).", + table, + passed + failed, + passed, + failed, + self.tolerance + ); + // Print the final result. + if failed != 0 { + info!("The hardware fails to meet the requirements"); + self.check_failed(Error::UnmetRequirement)?; + } else { + info!("The hardware meets the requirements "); + } + // Check that the results were not created by a bad build profile. + if let Err(err) = check_build_profile() { + self.check_failed(Error::BadBuildProfile(err))?; + } Ok(()) } + + /// Returns `Ok` if [`self.allow_fail`] is set and otherwise the error argument. + fn check_failed(&self, e: Error) -> Result<()> { + if !self.allow_fail { + error!("Failing since --allow-fail is not set"); + Err(sc_cli::Error::Application(Box::new(e))) + } else { + warn!("Ignoring error since --allow-fail is set: {:?}", e); + Ok(()) + } + } + + /// Validates the CLI arguments. + fn validate_args(&self) -> Result<()> { + if self.tolerance > 100.0 || self.tolerance < 0.0 { + return Err("The --tolerance argument is out of range".into()) + } + Ok(()) + } +} + +impl BenchResult { + /// Format [`Self`] as row that can be printed in a table. + fn to_row(&self, req: &Requirement) -> prettytable::Row { + let passed = if self.passed { "✅ Pass" } else { "❌ Fail" }; + row![ + req.metric.category(), + req.metric.name(), + format!("{}", self.score), + format!("{}", req.minimum), + format!("{} ({: >5.1?} %)", passed, self.rel_score * 100.0) + ] + } } // Boilerplate diff --git a/substrate/utils/frame/benchmarking-cli/src/machine/reference_hardware.json b/substrate/utils/frame/benchmarking-cli/src/machine/reference_hardware.json new file mode 100644 index 0000000000..12645df839 --- /dev/null +++ b/substrate/utils/frame/benchmarking-cli/src/machine/reference_hardware.json @@ -0,0 +1,32 @@ +[ + { + "metric": "Blake2256", + "minimum": { + "MiBs": 1029.0 + } + }, + { + "metric": "Sr25519Verify", + "minimum": { + "KiBs": 666.0 + } + }, + { + "metric": "MemCopy", + "minimum": { + "GiBs": 14.323 + } + }, + { + "metric": "DiskSeqWrite", + "minimum": { + "MiBs": 450.0 + } + }, + { + "metric": "DiskRndWrite", + "minimum": { + "MiBs": 200.0 + } + } +] diff --git a/substrate/utils/frame/benchmarking-cli/src/shared/mod.rs b/substrate/utils/frame/benchmarking-cli/src/shared/mod.rs index 853fbdef8e..f959c285a3 100644 --- a/substrate/utils/frame/benchmarking-cli/src/shared/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/shared/mod.rs @@ -73,3 +73,19 @@ pub fn new_rng(seed: Option) -> (impl rand::Rng, u64) { let seed = seed.unwrap_or(rand::thread_rng().gen::()); (rand_pcg::Pcg64::seed_from_u64(seed), seed) } + +/// Returns an error if a debug profile is detected. +/// +/// The rust compiler only exposes the binary information whether +/// or not we are in a `debug` build. +/// This means that `release` and `production` cannot be told apart. +/// This function additionally checks for OPT-LEVEL = 3. +pub fn check_build_profile() -> Result<(), String> { + if cfg!(build_profile = "debug") { + Err("Detected a `debug` profile".into()) + } else if !cfg!(build_opt_level = "3") { + Err("The optimization level is not set to 3".into()) + } else { + Ok(()) + } +}