mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 14:37:57 +00:00
f0e589d72e
### What's been done - `subsystem-bench` has been split into two parts: a cli benchmark runner and a library. - The cli runner is quite simple. It just allows us to run `.yaml` based test sequences. Now it should only be used to run benchmarks during development. - The library is used in the cli runner and in regression tests. Some code is changed to make the library independent of the runner. - Added first regression tests for availability read and write that replicate existing test sequences. ### How we run regression tests - Regression tests are simply rust integration tests without the harnesses. - They should only be compiled under the `subsystem-benchmarks` feature to prevent them from running with other tests. - This doesn't work when running tests with `nextest` in CI, so additional filters have been added to the `nextest` runs. - Each benchmark run takes a different time in the beginning, so we "warm up" the tests until their CPU usage differs by only 1%. - After the warm-up, we run the benchmarks a few more times and compare the average with the exception using a precision. ### What is still wrong? - I haven't managed to set up approval voting tests. The spread of their results is too large and can't be narrowed down in a reasonable amount of time in the warm-up phase. - The tests start an unconfigurable prometheus endpoint inside, which causes errors because they use the same 9999 port. I disable it with a flag, but I think it's better to extract the endpoint launching outside the test, as we already do with `valgrind` and `pyroscope`. But we still use `prometheus` inside the tests. ### Future work * https://github.com/paritytech/polkadot-sdk/issues/3528 * https://github.com/paritytech/polkadot-sdk/issues/3529 * https://github.com/paritytech/polkadot-sdk/issues/3530 * https://github.com/paritytech/polkadot-sdk/issues/3531 --------- Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com>
114 lines
3.5 KiB
Rust
114 lines
3.5 KiB
Rust
// Copyright (C) Parity Technologies (UK) Ltd.
|
|
// This file is part of Polkadot.
|
|
|
|
// Polkadot is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
|
|
// Polkadot is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU General Public License for more details.
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
//! availability-read regression tests
|
|
//!
|
|
//! TODO: Explain the test case after configuration adjusted to Kusama
|
|
//!
|
|
//! Subsystems involved:
|
|
//! - availability-distribution
|
|
//! - bitfield-distribution
|
|
//! - availability-store
|
|
|
|
use polkadot_subsystem_bench::{
|
|
availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState},
|
|
configuration::{PeerLatency, TestConfiguration},
|
|
usage::BenchmarkUsage,
|
|
};
|
|
|
|
const BENCH_COUNT: usize = 3;
|
|
const WARM_UP_COUNT: usize = 20;
|
|
const WARM_UP_PRECISION: f64 = 0.01;
|
|
|
|
fn main() -> Result<(), String> {
|
|
let mut messages = vec![];
|
|
|
|
// TODO: Adjust the test configurations to Kusama values
|
|
let mut config = TestConfiguration::default();
|
|
config.latency = Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 });
|
|
config.n_validators = 1000;
|
|
config.n_cores = 200;
|
|
config.max_validators_per_core = 5;
|
|
config.min_pov_size = 5120;
|
|
config.max_pov_size = 5120;
|
|
config.peer_bandwidth = 52428800;
|
|
config.bandwidth = 52428800;
|
|
config.connectivity = 75;
|
|
config.num_blocks = 3;
|
|
config.generate_pov_sizes();
|
|
|
|
warm_up(config.clone())?;
|
|
let usage = benchmark(config.clone());
|
|
|
|
messages.extend(usage.check_network_usage(&[
|
|
("Received from peers", 4330.0, 0.05),
|
|
("Sent to peers", 15900.0, 0.05),
|
|
]));
|
|
messages.extend(usage.check_cpu_usage(&[
|
|
("availability-distribution", 0.025, 0.05),
|
|
("bitfield-distribution", 0.085, 0.05),
|
|
("availability-store", 0.180, 0.05),
|
|
]));
|
|
|
|
if messages.is_empty() {
|
|
Ok(())
|
|
} else {
|
|
eprintln!("{}", messages.join("\n"));
|
|
Err("Regressions found".to_string())
|
|
}
|
|
}
|
|
|
|
fn warm_up(config: TestConfiguration) -> Result<(), String> {
|
|
println!("Warming up...");
|
|
let mut prev_run: Option<BenchmarkUsage> = None;
|
|
for _ in 0..WARM_UP_COUNT {
|
|
let curr = run(config.clone());
|
|
if let Some(ref prev) = prev_run {
|
|
let av_distr_diff =
|
|
curr.cpu_usage_diff(prev, "availability-distribution").expect("Must exist");
|
|
let bitf_distr_diff =
|
|
curr.cpu_usage_diff(prev, "bitfield-distribution").expect("Must exist");
|
|
let av_store_diff =
|
|
curr.cpu_usage_diff(prev, "availability-store").expect("Must exist");
|
|
if av_distr_diff < WARM_UP_PRECISION &&
|
|
bitf_distr_diff < WARM_UP_PRECISION &&
|
|
av_store_diff < WARM_UP_PRECISION
|
|
{
|
|
return Ok(())
|
|
}
|
|
}
|
|
prev_run = Some(curr);
|
|
}
|
|
|
|
Err("Can't warm up".to_string())
|
|
}
|
|
|
|
fn benchmark(config: TestConfiguration) -> BenchmarkUsage {
|
|
println!("Benchmarking...");
|
|
let usages: Vec<BenchmarkUsage> = (0..BENCH_COUNT).map(|_| run(config.clone())).collect();
|
|
let usage = BenchmarkUsage::average(&usages);
|
|
println!("{}", usage);
|
|
usage
|
|
}
|
|
|
|
fn run(config: TestConfiguration) -> BenchmarkUsage {
|
|
let mut state = TestState::new(&config);
|
|
let (mut env, _protocol_config) =
|
|
prepare_test(config.clone(), &mut state, TestDataAvailability::Write, false);
|
|
env.runtime()
|
|
.block_on(benchmark_availability_write("data_availability_write", &mut env, state))
|
|
}
|