Files
pezkuwi-subxt/polkadot/node/network/availability-distribution/tests/availability-distribution-regression-bench.rs
T
Andrei Eres 05381afcb2 subsystem-bench: adjust test config to Kusama (#3583)
Fixes https://github.com/paritytech/polkadot-sdk/issues/3528

```rust
latency:
    mean_latency_ms = 30 // common sense
    std_dev = 2.0 // common sense
n_validators = 300 // max number of validators, from chain config
n_cores = 60 // 300/5
max_validators_per_core = 5 // default
min_pov_size = 5120 // max
max_pov_size = 5120 // max
peer_bandwidth = 44040192 // from the Parity's kusama validators
bandwidth = 44040192 // from the Parity's kusama validators
connectivity = 90 // we need to be connected to 90-95% of peers
```
2024-03-11 16:17:45 +00:00

105 lines
3.3 KiB
Rust

// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! availability-read regression tests
//!
//! Availability read benchmark based on Kusama parameters and scale.
//!
//! Subsystems involved:
//! - availability-distribution
//! - bitfield-distribution
//! - availability-store
use polkadot_subsystem_bench::{
availability::{benchmark_availability_write, prepare_test, TestDataAvailability, TestState},
configuration::TestConfiguration,
usage::BenchmarkUsage,
};
const BENCH_COUNT: usize = 3;
const WARM_UP_COUNT: usize = 30;
const WARM_UP_PRECISION: f64 = 0.01;
fn main() -> Result<(), String> {
let mut messages = vec![];
let mut config = TestConfiguration::default();
// A single node effort roughly n_cores * needed_approvals / n_validators = 60 * 30 / 300
config.n_cores = 6;
config.num_blocks = 3;
config.generate_pov_sizes();
warm_up(config.clone())?;
let usage = benchmark(config.clone());
messages.extend(usage.check_network_usage(&[
("Received from peers", 4330.0, 0.05),
("Sent to peers", 15900.0, 0.05),
]));
messages.extend(usage.check_cpu_usage(&[
("availability-distribution", 0.025, 0.05),
("bitfield-distribution", 0.085, 0.05),
("availability-store", 0.180, 0.05),
]));
if messages.is_empty() {
Ok(())
} else {
eprintln!("{}", messages.join("\n"));
Err("Regressions found".to_string())
}
}
fn warm_up(config: TestConfiguration) -> Result<(), String> {
println!("Warming up...");
let mut prev_run: Option<BenchmarkUsage> = None;
for _ in 0..WARM_UP_COUNT {
let curr = run(config.clone());
if let Some(ref prev) = prev_run {
let av_distr_diff =
curr.cpu_usage_diff(prev, "availability-distribution").expect("Must exist");
let bitf_distr_diff =
curr.cpu_usage_diff(prev, "bitfield-distribution").expect("Must exist");
let av_store_diff =
curr.cpu_usage_diff(prev, "availability-store").expect("Must exist");
if av_distr_diff < WARM_UP_PRECISION &&
bitf_distr_diff < WARM_UP_PRECISION &&
av_store_diff < WARM_UP_PRECISION
{
return Ok(())
}
}
prev_run = Some(curr);
}
Err("Can't warm up".to_string())
}
fn benchmark(config: TestConfiguration) -> BenchmarkUsage {
println!("Benchmarking...");
let usages: Vec<BenchmarkUsage> = (0..BENCH_COUNT).map(|_| run(config.clone())).collect();
let usage = BenchmarkUsage::average(&usages);
println!("{}", usage);
usage
}
fn run(config: TestConfiguration) -> BenchmarkUsage {
let mut state = TestState::new(&config);
let (mut env, _protocol_config) =
prepare_test(config.clone(), &mut state, TestDataAvailability::Write, false);
env.runtime()
.block_on(benchmark_availability_write("data_availability_write", &mut env, state))
}