feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit 286de54384
6841 changed files with 1848356 additions and 0 deletions
@@ -0,0 +1,212 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A tool for running subsystem benchmark tests
//! designed for development and CI regression testing.
use clap::Parser;
use color_eyre::eyre;
use colored::Colorize;
use pezkuwi_subsystem_bench::{approval, availability, configuration, disputes, statement};
use pyroscope::PyroscopeAgent;
use pyroscope_pprofrs::{pprof_backend, PprofConfig};
use serde::{Deserialize, Serialize};
use std::path::Path;
mod valgrind;
const LOG_TARGET: &str = "subsystem-bench::cli";
/// Supported test objectives
#[derive(Debug, Clone, Parser, Serialize, Deserialize)]
#[command(rename_all = "kebab-case")]
pub enum TestObjective {
/// Benchmark availability recovery strategies.
DataAvailabilityRead(availability::DataAvailabilityReadOptions),
/// Benchmark availability and bitfield distribution.
DataAvailabilityWrite,
/// Benchmark the approval-voting and approval-distribution subsystems.
ApprovalVoting(approval::ApprovalsOptions),
// Benchmark the statement-distribution subsystem
StatementDistribution,
/// Benchmark the dispute-coordinator subsystem
DisputeCoordinator(disputes::DisputesOptions),
}
impl std::fmt::Display for TestObjective {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
Self::DataAvailabilityRead(_) => "DataAvailabilityRead",
Self::DataAvailabilityWrite => "DataAvailabilityWrite",
Self::ApprovalVoting(_) => "ApprovalVoting",
Self::StatementDistribution => "StatementDistribution",
Self::DisputeCoordinator(_) => "DisputeCoordinator",
}
)
}
}
/// The test input parameters
#[derive(Clone, Debug, Serialize, Deserialize)]
struct CliTestConfiguration {
/// Test Objective
pub objective: TestObjective,
/// Test Configuration
#[serde(flatten)]
pub test_config: configuration::TestConfiguration,
}
#[derive(Serialize, Deserialize)]
pub struct TestSequence {
#[serde(rename(serialize = "TestConfiguration", deserialize = "TestConfiguration"))]
test_configurations: Vec<CliTestConfiguration>,
}
impl TestSequence {
fn new_from_file(path: &Path) -> std::io::Result<TestSequence> {
let string = String::from_utf8(std::fs::read(path)?).expect("File is valid UTF8");
Ok(serde_yaml::from_str(&string).expect("File is valid test sequence YA"))
}
}
#[derive(Debug, Parser)]
#[allow(missing_docs)]
struct BenchCli {
#[clap(long, default_value_t = false)]
/// Enable CPU Profiling with Pyroscope
pub profile: bool,
#[clap(long, requires = "profile", default_value_t = String::from("http://localhost:4040"))]
/// Pyroscope Server URL
pub pyroscope_url: String,
#[clap(long, requires = "profile", default_value_t = 113)]
/// Pyroscope Sample Rate
pub pyroscope_sample_rate: u32,
#[clap(long, default_value_t = false)]
/// Enable Cache Misses Profiling with Valgrind. Linux only, Valgrind must be in the PATH
pub cache_misses: bool,
#[arg(required = true)]
/// Path to the test sequence configuration file
pub path: String,
}
impl BenchCli {
fn launch(self) -> eyre::Result<()> {
let is_valgrind_running = valgrind::is_valgrind_running();
if !is_valgrind_running && self.cache_misses {
return valgrind::relaunch_in_valgrind_mode();
}
let agent_running = if self.profile {
let agent = PyroscopeAgent::builder(self.pyroscope_url.as_str(), "subsystem-bench")
.backend(pprof_backend(PprofConfig::new().sample_rate(self.pyroscope_sample_rate)))
.build()?;
Some(agent.start()?)
} else {
None
};
let test_sequence = TestSequence::new_from_file(Path::new(&self.path))
.expect("File exists")
.test_configurations;
let num_steps = test_sequence.len();
gum::info!("{}", format!("Sequence contains {num_steps} step(s)").bright_purple());
for (index, CliTestConfiguration { objective, mut test_config }) in
test_sequence.into_iter().enumerate()
{
let benchmark_name = format!("{} #{} {}", &self.path, index + 1, objective);
gum::info!(target: LOG_TARGET, "{}", format!("Step {}/{}", index + 1, num_steps).bright_purple(),);
gum::info!(target: LOG_TARGET, "[{}] {}", format!("objective = {objective:?}").green(), test_config);
test_config.generate_pov_sizes();
let usage = match objective {
TestObjective::DataAvailabilityRead(opts) => {
let state = availability::TestState::new(&test_config);
let (mut env, _protocol_config) = availability::prepare_test(
&state,
availability::TestDataAvailability::Read(opts),
true,
);
env.runtime()
.block_on(availability::benchmark_availability_read(&mut env, &state))
},
TestObjective::DataAvailabilityWrite => {
let state = availability::TestState::new(&test_config);
let (mut env, _protocol_config) = availability::prepare_test(
&state,
availability::TestDataAvailability::Write,
true,
);
env.runtime()
.block_on(availability::benchmark_availability_write(&mut env, &state))
},
TestObjective::ApprovalVoting(ref options) => {
let (mut env, state) =
approval::prepare_test(test_config.clone(), options.clone(), true);
env.runtime().block_on(approval::bench_approvals(&mut env, state))
},
TestObjective::StatementDistribution => {
let state = statement::TestState::new(&test_config);
let mut env = statement::prepare_test(&state, true);
env.runtime()
.block_on(statement::benchmark_statement_distribution(&mut env, &state))
},
TestObjective::DisputeCoordinator(ref options) => {
let state = disputes::TestState::new(&test_config, options);
let mut env = disputes::prepare_test(&state, true);
env.runtime()
.block_on(disputes::benchmark_dispute_coordinator(&mut env, &state))
},
};
println!("\n{}\n{}", benchmark_name.purple(), usage);
}
if let Some(agent_running) = agent_running {
let agent_ready = agent_running.stop()?;
agent_ready.shutdown();
}
Ok(())
}
}
#[cfg(feature = "memprofile")]
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[cfg(feature = "memprofile")]
#[allow(non_upper_case_globals)]
#[export_name = "malloc_conf"]
// See https://jemalloc.net/jemalloc.3.html for more information on the configuration options.
pub static malloc_conf: &[u8] =
b"prof:true,prof_active:true,lg_prof_interval:30,lg_prof_sample:21,prof_prefix:/tmp/subsystem-bench\0";
fn main() -> eyre::Result<()> {
color_eyre::install()?;
sp_tracing::try_init_simple();
let cli: BenchCli = BenchCli::parse();
cli.launch()?;
Ok(())
}
@@ -0,0 +1,49 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use color_eyre::eyre;
/// Show if the app is running under Valgrind
pub(crate) fn is_valgrind_running() -> bool {
match std::env::var("LD_PRELOAD") {
Ok(v) => v.contains("valgrind"),
Err(_) => false,
}
}
/// Stop execution and relaunch the app under valgrind
/// Cache configuration used to emulate Intel Ice Lake (size, associativity, line size):
/// L1 instruction: 32,768 B, 8-way, 64 B lines
/// L1 data: 49,152 B, 12-way, 64 B lines
/// Last-level: 2,097,152 B, 16-way, 64 B lines
pub(crate) fn relaunch_in_valgrind_mode() -> eyre::Result<()> {
use std::os::unix::process::CommandExt;
let err = std::process::Command::new("valgrind")
.arg("--tool=cachegrind")
.arg("--cache-sim=yes")
.arg("--log-file=cachegrind_report.txt")
.arg("--I1=32768,8,64")
.arg("--D1=49152,12,64")
.arg("--LL=2097152,16,64")
.arg("--verbose")
.args(std::env::args())
.exec();
Err(eyre::eyre!(
"Сannot run Valgrind, check that it is installed and available in the PATH\n{}",
err
))
}
@@ -0,0 +1,226 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::configuration::TestAuthorities;
use itertools::Itertools;
use pezkuwi_node_network_protocol::{
grid_topology::{SessionGridTopology, TopologyPeerInfo},
View,
};
use pezkuwi_node_primitives::approval::time::{Clock, SystemClock, Tick};
use pezkuwi_node_subsystem::messages::{
ApprovalDistributionMessage, ApprovalVotingParallelMessage,
};
use pezkuwi_node_subsystem_types::messages::{
network_bridge_event::NewGossipTopology, NetworkBridgeEvent,
};
use pezkuwi_overseer::AllMessages;
use pezkuwi_primitives::{
BlockNumber, CandidateEvent, CandidateReceiptV2, CoreIndex, GroupIndex, Hash, Header,
Id as ParaId, MutateDescriptorV2, Slot, ValidatorIndex,
};
use pezkuwi_primitives_test_helpers::dummy_candidate_receipt_v2_bad_sig;
use rand::{seq::SliceRandom, SeedableRng};
use rand_chacha::ChaCha20Rng;
use sc_network_types::PeerId;
use sp_consensus_babe::{
digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest},
AllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch, VrfSignature, VrfTranscript,
};
use sp_core::crypto::VrfSecret;
use sp_keyring::sr25519::Keyring as Sr25519Keyring;
use sp_runtime::{Digest, DigestItem};
use std::sync::{atomic::AtomicU64, Arc};
/// A fake system clock used for driving the approval voting and make
/// it process blocks, assignments and approvals from the past.
#[derive(Clone)]
pub struct PastSystemClock {
/// The real system clock
real_system_clock: SystemClock,
/// The difference in ticks between the real system clock and the current clock.
delta_ticks: Arc<AtomicU64>,
}
impl PastSystemClock {
/// Creates a new fake system clock with `delta_ticks` between the real time and the fake one.
pub fn new(real_system_clock: SystemClock, delta_ticks: Arc<AtomicU64>) -> Self {
PastSystemClock { real_system_clock, delta_ticks }
}
}
impl Clock for PastSystemClock {
fn tick_now(&self) -> Tick {
self.real_system_clock.tick_now() -
self.delta_ticks.load(std::sync::atomic::Ordering::SeqCst)
}
fn wait(
&self,
tick: Tick,
) -> std::pin::Pin<Box<dyn futures::prelude::Future<Output = ()> + Send + 'static>> {
self.real_system_clock
.wait(tick + self.delta_ticks.load(std::sync::atomic::Ordering::SeqCst))
}
}
/// Helper function to generate a babe epoch for this benchmark.
/// It does not change for the duration of the test.
pub fn generate_babe_epoch(current_slot: Slot, authorities: TestAuthorities) -> BabeEpoch {
let authorities = authorities
.validator_babe_id
.into_iter()
.enumerate()
.map(|(index, public)| (public, index as u64))
.collect_vec();
BabeEpoch {
epoch_index: 1,
start_slot: current_slot.saturating_sub(1u64),
duration: 200,
authorities,
randomness: [0xde; 32],
config: BabeEpochConfiguration { c: (1, 4), allowed_slots: AllowedSlots::PrimarySlots },
}
}
/// Generates a topology to be used for this benchmark.
pub fn generate_topology(test_authorities: &TestAuthorities) -> SessionGridTopology {
let keyrings = test_authorities
.validator_authority_id
.clone()
.into_iter()
.zip(test_authorities.peer_ids.clone())
.collect_vec();
let topology = keyrings
.clone()
.into_iter()
.enumerate()
.map(|(index, (discovery_id, peer_id))| TopologyPeerInfo {
peer_ids: vec![peer_id],
validator_index: ValidatorIndex(index as u32),
discovery_id,
})
.collect_vec();
let shuffled = (0..keyrings.len()).collect_vec();
SessionGridTopology::new(shuffled, topology)
}
/// Generates new session topology message.
pub fn generate_new_session_topology(
test_authorities: &TestAuthorities,
test_node: ValidatorIndex,
approval_voting_parallel_enabled: bool,
) -> Vec<AllMessages> {
let topology = generate_topology(test_authorities);
let event = NetworkBridgeEvent::NewGossipTopology(NewGossipTopology {
session: 1,
topology,
local_index: Some(test_node),
});
vec![if approval_voting_parallel_enabled {
AllMessages::ApprovalVotingParallel(ApprovalVotingParallelMessage::NetworkBridgeUpdate(
event,
))
} else {
AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(event))
}]
}
/// Generates a peer view change for the passed `block_hash`
pub fn generate_peer_view_change_for(
block_hash: Hash,
peer_id: PeerId,
approval_voting_parallel_enabled: bool,
) -> AllMessages {
let network = NetworkBridgeEvent::PeerViewChange(peer_id, View::new([block_hash], 0));
if approval_voting_parallel_enabled {
AllMessages::ApprovalVotingParallel(ApprovalVotingParallelMessage::NetworkBridgeUpdate(
network,
))
} else {
AllMessages::ApprovalDistribution(ApprovalDistributionMessage::NetworkBridgeUpdate(network))
}
}
/// Helper function to create a a signature for the block header.
fn garbage_vrf_signature() -> VrfSignature {
let transcript = VrfTranscript::new(b"test-garbage", &[]);
Sr25519Keyring::Alice.pair().vrf_sign(&transcript.into())
}
/// Helper function to create a block header.
pub fn make_header(parent_hash: Hash, slot: Slot, number: u32) -> Header {
let digest =
{
let mut digest = Digest::default();
let vrf_signature = garbage_vrf_signature();
digest.push(DigestItem::babe_pre_digest(PreDigest::SecondaryVRF(
SecondaryVRFPreDigest { authority_index: 0, slot, vrf_signature },
)));
digest
};
Header {
digest,
extrinsics_root: Default::default(),
number,
state_root: Default::default(),
parent_hash,
}
}
/// Helper function to create a candidate receipt.
fn make_candidate(para_id: ParaId, hash: &Hash) -> CandidateReceiptV2 {
let mut r = dummy_candidate_receipt_v2_bad_sig(*hash, Some(Default::default()));
r.descriptor.set_para_id(para_id);
r
}
/// Helper function to create a list of candidates that are included in the block
pub fn make_candidates(
block_hash: Hash,
block_number: BlockNumber,
num_cores: u32,
num_candidates: u32,
) -> Vec<CandidateEvent> {
let seed = [block_number as u8; 32];
let mut rand_chacha = ChaCha20Rng::from_seed(seed);
let mut candidates = (0..num_cores)
.map(|core| {
CandidateEvent::CandidateIncluded(
make_candidate(ParaId::from(core), &block_hash),
Vec::new().into(),
CoreIndex(core),
GroupIndex(core),
)
})
.collect_vec();
let (candidates, _) = candidates.partial_shuffle(&mut rand_chacha, num_candidates as usize);
candidates
.iter_mut()
.map(|val| val.clone())
.sorted_by(|a, b| match (a, b) {
(
CandidateEvent::CandidateIncluded(_, _, core_a, _),
CandidateEvent::CandidateIncluded(_, _, core_b, _),
) => core_a.0.cmp(&core_b.0),
(_, _) => todo!("Should not happen"),
})
.collect_vec()
}
@@ -0,0 +1,676 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::{
approval::{
helpers::{generate_babe_epoch, generate_topology},
test_message::{MessagesBundle, TestMessageInfo},
ApprovalTestState, ApprovalsOptions, BlockTestData, GeneratedState,
BUFFER_FOR_GENERATION_MILLIS, LOG_TARGET, SLOT_DURATION_MILLIS,
},
configuration::{TestAuthorities, TestConfiguration},
mock::runtime_api::session_info_for_peers,
NODE_UNDER_TEST,
};
use codec::Encode;
use futures::SinkExt;
use itertools::Itertools;
use pezkuwi_node_core_approval_voting::criteria::{compute_assignments, Config};
use pezkuwi_node_network_protocol::{
grid_topology::{GridNeighbors, RandomRouting, RequiredRouting, SessionGridTopology},
v3 as protocol_v3,
};
use pezkuwi_node_primitives::approval::{
self,
time::tranche_to_tick,
v2::{CoreBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2},
};
use pezkuwi_primitives::{
ApprovalVoteMultipleCandidates, CandidateEvent, CandidateHash, CandidateIndex, CoreIndex, Hash,
SessionInfo, Slot, ValidatorId, ValidatorIndex, ASSIGNMENT_KEY_TYPE_ID,
};
use rand::{seq::SliceRandom, RngCore, SeedableRng};
use rand_chacha::ChaCha20Rng;
use rand_distr::{Distribution, Normal};
use sc_keystore::LocalKeystore;
use sc_network_types::PeerId;
use sc_service::SpawnTaskHandle;
use sha1::Digest;
use sp_application_crypto::AppCrypto;
use sp_consensus_babe::SlotDuration;
use sp_keystore::Keystore;
use sp_timestamp::Timestamp;
use std::{
cmp::max,
collections::{BTreeMap, HashSet},
fs,
io::Write,
path::{Path, PathBuf},
time::Duration,
};
/// A generator of messages coming from a given Peer/Validator
pub struct PeerMessagesGenerator {
/// The grid neighbors of the node under test.
pub topology_node_under_test: GridNeighbors,
/// The topology of the network for the epoch under test.
pub topology: SessionGridTopology,
/// The validator index for this object generates the messages.
pub validator_index: ValidatorIndex,
/// An array of pre-generated random samplings, that is used to determine, which nodes would
/// send a given assignment, to the node under test because of the random samplings.
/// As an optimization we generate this sampling at the beginning of the test and just pick
/// one randomly, because always taking the samples would be too expensive for benchmark.
pub random_samplings: Vec<Vec<ValidatorIndex>>,
/// Channel for sending the generated messages to the aggregator
pub tx_messages: futures::channel::mpsc::UnboundedSender<(Hash, Vec<MessagesBundle>)>,
/// The list of test authorities
pub test_authorities: TestAuthorities,
//// The session info used for the test.
pub session_info: SessionInfo,
/// The blocks used for testing
pub blocks: Vec<BlockTestData>,
/// Approval options params.
pub options: ApprovalsOptions,
}
impl PeerMessagesGenerator {
/// Generates messages by spawning a blocking task in the background which begins creating
/// the assignments/approvals and peer view changes at the beginning of each block.
pub fn generate_messages(mut self, spawn_task_handle: &SpawnTaskHandle) {
spawn_task_handle.spawn("generate-messages", "generate-messages", async move {
for block_info in &self.blocks {
let assignments = self.generate_assignments(block_info);
let bytes = self.validator_index.0.to_be_bytes();
let seed = [
bytes[0], bytes[1], bytes[2], bytes[3], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
let mut rand_chacha = ChaCha20Rng::from_seed(seed);
let approvals = issue_approvals(
assignments,
block_info.hash,
&self.test_authorities.validator_public,
block_info.candidates.clone(),
&self.options,
&mut rand_chacha,
self.test_authorities.keyring.keystore_ref(),
);
self.tx_messages
.send((block_info.hash, approvals))
.await
.expect("Should not fail");
}
})
}
// Builds the messages finger print corresponding to this configuration.
// When the finger print exists already on disk the messages are not re-generated.
fn messages_fingerprint(
configuration: &TestConfiguration,
options: &ApprovalsOptions,
) -> String {
let mut fingerprint = options.fingerprint();
let configuration_bytes = bincode::serialize(&configuration).unwrap();
fingerprint.extend(configuration_bytes);
let mut sha1 = sha1::Sha1::new();
sha1.update(fingerprint);
let result = sha1.finalize();
hex::encode(result)
}
/// Generate all messages(Assignments & Approvals) needed for approving `blocks``.
pub fn generate_messages_if_needed(
configuration: &TestConfiguration,
test_authorities: &TestAuthorities,
options: &ApprovalsOptions,
spawn_task_handle: &SpawnTaskHandle,
) -> PathBuf {
let path_name = format!(
"{}/{}",
options.workdir_prefix,
Self::messages_fingerprint(configuration, options)
);
let path = Path::new(&path_name);
if path.exists() {
return path.to_path_buf();
}
gum::info!("Generate message because file does not exist");
let delta_to_first_slot_under_test = Timestamp::new(BUFFER_FOR_GENERATION_MILLIS);
let initial_slot = Slot::from_timestamp(
(*Timestamp::current() - *delta_to_first_slot_under_test).into(),
SlotDuration::from_millis(SLOT_DURATION_MILLIS),
);
let babe_epoch = generate_babe_epoch(initial_slot, test_authorities.clone());
let session_info = session_info_for_peers(configuration, test_authorities);
let blocks = ApprovalTestState::generate_blocks_information(
configuration,
&babe_epoch,
initial_slot,
);
gum::info!(target: LOG_TARGET, "Generate messages");
let topology = generate_topology(test_authorities);
let random_samplings = random_samplings_to_node(
ValidatorIndex(NODE_UNDER_TEST),
test_authorities.validator_public.len(),
test_authorities.validator_public.len() * 2,
);
let topology_node_under_test =
topology.compute_grid_neighbors_for(ValidatorIndex(NODE_UNDER_TEST)).unwrap();
let (tx, mut rx) = futures::channel::mpsc::unbounded();
// Spawn a thread to generate the messages for each validator, so that we speed up the
// generation.
for current_validator_index in 1..test_authorities.validator_public.len() {
let peer_message_source = PeerMessagesGenerator {
topology_node_under_test: topology_node_under_test.clone(),
topology: topology.clone(),
validator_index: ValidatorIndex(current_validator_index as u32),
test_authorities: test_authorities.clone(),
session_info: session_info.clone(),
blocks: blocks.clone(),
tx_messages: tx.clone(),
random_samplings: random_samplings.clone(),
options: options.clone(),
};
peer_message_source.generate_messages(spawn_task_handle);
}
std::mem::drop(tx);
let seed = [0x32; 32];
let mut rand_chacha = ChaCha20Rng::from_seed(seed);
let mut all_messages: BTreeMap<u64, Vec<MessagesBundle>> = BTreeMap::new();
// Receive all messages and sort them by Tick they have to be sent.
loop {
match rx.try_next() {
Ok(Some((block_hash, messages))) =>
for message in messages {
let block_info = blocks
.iter()
.find(|val| val.hash == block_hash)
.expect("Should find blocks");
let tick_to_send = tranche_to_tick(
SLOT_DURATION_MILLIS,
block_info.slot,
message.tranche_to_send(),
);
let to_add = all_messages.entry(tick_to_send).or_default();
to_add.push(message);
},
Ok(None) => break,
Err(_) => {
std::thread::sleep(Duration::from_millis(50));
},
}
}
let all_messages = all_messages
.into_iter()
.flat_map(|(_, mut messages)| {
// Shuffle the messages inside the same tick, so that we don't priorities messages
// for older nodes. we try to simulate the same behaviour as in real world.
messages.shuffle(&mut rand_chacha);
messages
})
.collect_vec();
gum::info!("Generated a number of {:} unique messages", all_messages.len());
let generated_state = GeneratedState { all_messages: Some(all_messages), initial_slot };
let mut messages_file = fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(path)
.unwrap();
messages_file
.write_all(&generated_state.encode())
.expect("Could not update message file");
path.to_path_buf()
}
/// Generates assignments for the given `current_validator_index`
/// Returns a list of assignments to be sent sorted by tranche.
fn generate_assignments(&self, block_info: &BlockTestData) -> Vec<TestMessageInfo> {
let config = Config::from(&self.session_info);
let leaving_cores = block_info
.candidates
.clone()
.into_iter()
.map(|candidate_event| {
if let CandidateEvent::CandidateIncluded(candidate, _, core_index, group_index) =
candidate_event
{
(candidate.hash(), core_index, group_index)
} else {
todo!("Variant is never created in this benchmark")
}
})
.collect_vec();
let mut assignments_by_tranche = BTreeMap::new();
let bytes = self.validator_index.0.to_be_bytes();
let seed = [
bytes[0], bytes[1], bytes[2], bytes[3], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
let mut rand_chacha = ChaCha20Rng::from_seed(seed);
let to_be_sent_by = neighbours_that_would_sent_message(
&self.test_authorities.peer_ids,
self.validator_index.0,
&self.topology_node_under_test,
&self.topology,
);
let leaving_cores = leaving_cores
.clone()
.into_iter()
.filter(|(_, core_index, _group_index)| core_index.0 != self.validator_index.0)
.collect_vec();
let store = LocalKeystore::in_memory();
let _public = store
.sr25519_generate_new(
ASSIGNMENT_KEY_TYPE_ID,
Some(self.test_authorities.key_seeds[self.validator_index.0 as usize].as_str()),
)
.expect("should not fail");
let assignments = compute_assignments(
&store,
block_info.relay_vrf_story.clone(),
&config,
leaving_cores.clone(),
self.options.enable_assignments_v2,
);
let random_sending_nodes = self
.random_samplings
.get(rand_chacha.next_u32() as usize % self.random_samplings.len())
.unwrap();
let random_sending_peer_ids = random_sending_nodes
.iter()
.map(|validator| (*validator, self.test_authorities.peer_ids[validator.0 as usize]))
.collect_vec();
let mut unique_assignments = HashSet::new();
for (core_index, assignment) in assignments {
let assigned_cores = match &assignment.cert().kind {
approval::v2::AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield } =>
core_bitfield.iter_ones().map(|val| CoreIndex::from(val as u32)).collect_vec(),
approval::v2::AssignmentCertKindV2::RelayVRFDelay { core_index } => {
vec![*core_index]
},
approval::v2::AssignmentCertKindV2::RelayVRFModulo { sample: _ } => {
vec![core_index]
},
};
let bitfiled: CoreBitfield = assigned_cores.clone().try_into().unwrap();
// For the cases where tranch0 assignments are in a single certificate we need to make
// sure we create a single message.
if unique_assignments.insert(bitfiled) {
let this_tranche_assignments =
assignments_by_tranche.entry(assignment.tranche()).or_insert_with(Vec::new);
this_tranche_assignments.push((
IndirectAssignmentCertV2 {
block_hash: block_info.hash,
validator: self.validator_index,
cert: assignment.cert().clone(),
},
block_info
.candidates
.iter()
.enumerate()
.filter(|(_index, candidate)| {
if let CandidateEvent::CandidateIncluded(_, _, core, _) = candidate {
assigned_cores.contains(core)
} else {
panic!("Should not happen");
}
})
.map(|(index, _)| index as u32)
.collect_vec()
.try_into()
.unwrap(),
to_be_sent_by
.iter()
.chain(random_sending_peer_ids.iter())
.copied()
.collect::<HashSet<(ValidatorIndex, PeerId)>>(),
assignment.tranche(),
));
}
}
assignments_by_tranche
.into_values()
.flat_map(|assignments| assignments.into_iter())
.map(|assignment| {
let msg = protocol_v3::ApprovalDistributionMessage::Assignments(vec![(
assignment.0,
assignment.1,
)]);
TestMessageInfo {
msg,
sent_by: assignment
.2
.into_iter()
.map(|(validator_index, _)| validator_index)
.collect_vec(),
tranche: assignment.3,
block_hash: block_info.hash,
}
})
.collect_vec()
}
}
/// A list of random samplings that we use to determine which nodes should send a given message to
/// the node under test.
/// We can not sample every time for all the messages because that would be too expensive to
/// perform, so pre-generate a list of samples for a given network size.
/// - result[i] give us as a list of random nodes that would send a given message to the node under
/// test.
fn random_samplings_to_node(
node_under_test: ValidatorIndex,
num_validators: usize,
num_samplings: usize,
) -> Vec<Vec<ValidatorIndex>> {
let seed = [7u8; 32];
let mut rand_chacha = ChaCha20Rng::from_seed(seed);
(0..num_samplings)
.map(|_| {
(0..num_validators)
.filter(|sending_validator_index| {
*sending_validator_index != NODE_UNDER_TEST as usize
})
.flat_map(|sending_validator_index| {
let mut validators = (0..num_validators).collect_vec();
validators.shuffle(&mut rand_chacha);
let mut random_routing = RandomRouting::default();
validators
.into_iter()
.flat_map(|validator_to_send| {
if random_routing.sample(num_validators, &mut rand_chacha) {
random_routing.inc_sent();
if validator_to_send == node_under_test.0 as usize {
Some(ValidatorIndex(sending_validator_index as u32))
} else {
None
}
} else {
None
}
})
.collect_vec()
})
.collect_vec()
})
.collect_vec()
}
/// Helper function to randomly determine how many approvals we coalesce together in a single
/// message.
fn coalesce_approvals_len(
coalesce_mean: f32,
coalesce_std_dev: f32,
rand_chacha: &mut ChaCha20Rng,
) -> usize {
max(
1,
Normal::new(coalesce_mean, coalesce_std_dev)
.expect("normal distribution parameters are good")
.sample(rand_chacha)
.round() as i32,
) as usize
}
/// Helper function to create approvals signatures for all assignments passed as arguments.
/// Returns a list of Approvals messages that need to be sent.
fn issue_approvals(
assignments: Vec<TestMessageInfo>,
block_hash: Hash,
validator_ids: &[ValidatorId],
candidates: Vec<CandidateEvent>,
options: &ApprovalsOptions,
rand_chacha: &mut ChaCha20Rng,
store: &LocalKeystore,
) -> Vec<MessagesBundle> {
let mut queued_to_sign: Vec<TestSignInfo> = Vec::new();
let mut num_coalesce =
coalesce_approvals_len(options.coalesce_mean, options.coalesce_std_dev, rand_chacha);
let result = assignments
.iter()
.map(|message| match &message.msg {
protocol_v3::ApprovalDistributionMessage::Assignments(assignments) => {
let mut approvals_to_create = Vec::new();
let current_validator_index = queued_to_sign
.first()
.map(|msg| msg.validator_index)
.unwrap_or(ValidatorIndex(99999));
// Invariant for this benchmark.
assert_eq!(assignments.len(), 1);
let assignment = assignments.first().unwrap();
let earliest_tranche = queued_to_sign
.first()
.map(|val| val.assignment.tranche)
.unwrap_or(message.tranche);
if queued_to_sign.len() >= num_coalesce ||
(!queued_to_sign.is_empty() &&
current_validator_index != assignment.0.validator) ||
message.tranche - earliest_tranche >= options.coalesce_tranche_diff
{
approvals_to_create.push(TestSignInfo::sign_candidates(
&mut queued_to_sign,
validator_ids,
block_hash,
num_coalesce,
store,
));
num_coalesce = coalesce_approvals_len(
options.coalesce_mean,
options.coalesce_std_dev,
rand_chacha,
);
}
// If more that one candidate was in the assignment queue all of them for issuing
// approvals
for candidate_index in assignment.1.iter_ones() {
let candidate = candidates.get(candidate_index).unwrap();
if let CandidateEvent::CandidateIncluded(candidate, _, _, _) = candidate {
queued_to_sign.push(TestSignInfo {
candidate_hash: candidate.hash(),
candidate_index: candidate_index as CandidateIndex,
validator_index: assignment.0.validator,
assignment: message.clone(),
});
} else {
todo!("Other enum variants are not used in this benchmark");
}
}
approvals_to_create
},
_ => {
todo!("Other enum variants are not used in this benchmark");
},
})
.collect_vec();
let mut messages = result.into_iter().flatten().collect_vec();
if !queued_to_sign.is_empty() {
messages.push(TestSignInfo::sign_candidates(
&mut queued_to_sign,
validator_ids,
block_hash,
num_coalesce,
store,
));
}
messages
}
/// Helper struct to gather information about more than one candidate an sign it in a single
/// approval message.
struct TestSignInfo {
/// The candidate hash
candidate_hash: CandidateHash,
/// The candidate index
candidate_index: CandidateIndex,
/// The validator sending the assignments
validator_index: ValidatorIndex,
/// The assignments covering this candidate
assignment: TestMessageInfo,
}
impl TestSignInfo {
/// Helper function to create a signature for all candidates in `to_sign` parameter.
/// Returns a TestMessage
fn sign_candidates(
to_sign: &mut Vec<TestSignInfo>,
validator_ids: &[ValidatorId],
block_hash: Hash,
num_coalesce: usize,
store: &LocalKeystore,
) -> MessagesBundle {
let current_validator_index = to_sign.first().map(|val| val.validator_index).unwrap();
let tranche_approval_can_be_sent =
to_sign.iter().map(|val| val.assignment.tranche).max().unwrap();
let validator_id = validator_ids.get(current_validator_index.0 as usize).unwrap().clone();
let unique_assignments: HashSet<TestMessageInfo> =
to_sign.iter().map(|info| info.assignment.clone()).collect();
let mut to_sign = to_sign
.drain(..)
.sorted_by(|val1, val2| val1.candidate_index.cmp(&val2.candidate_index))
.peekable();
let mut bundle = MessagesBundle {
assignments: unique_assignments.into_iter().collect_vec(),
approvals: Vec::new(),
};
while to_sign.peek().is_some() {
let to_sign = to_sign.by_ref().take(num_coalesce).collect_vec();
let hashes = to_sign.iter().map(|val| val.candidate_hash).collect_vec();
let candidate_indices = to_sign.iter().map(|val| val.candidate_index).collect_vec();
let sent_by = to_sign
.iter()
.flat_map(|val| val.assignment.sent_by.iter())
.copied()
.collect::<HashSet<ValidatorIndex>>();
let payload = ApprovalVoteMultipleCandidates(&hashes).signing_payload(1);
let signature = store
.sr25519_sign(ValidatorId::ID, &validator_id.clone().into(), &payload[..])
.unwrap()
.unwrap()
.into();
let indirect = IndirectSignedApprovalVoteV2 {
block_hash,
candidate_indices: candidate_indices.try_into().unwrap(),
validator: current_validator_index,
signature,
};
let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![indirect]);
bundle.approvals.push(TestMessageInfo {
msg,
sent_by: sent_by.into_iter().collect_vec(),
tranche: tranche_approval_can_be_sent,
block_hash,
});
}
bundle
}
}
/// Determine what neighbours would send a given message to the node under test.
fn neighbours_that_would_sent_message(
peer_ids: &[PeerId],
current_validator_index: u32,
topology_node_under_test: &GridNeighbors,
topology: &SessionGridTopology,
) -> Vec<(ValidatorIndex, PeerId)> {
let topology_originator = topology
.compute_grid_neighbors_for(ValidatorIndex(current_validator_index))
.unwrap();
let originator_y = topology_originator.validator_indices_y.iter().find(|validator| {
topology_node_under_test.required_routing_by_index(**validator, false) ==
RequiredRouting::GridY
});
assert!(originator_y != Some(&ValidatorIndex(NODE_UNDER_TEST)));
let originator_x = topology_originator.validator_indices_x.iter().find(|validator| {
topology_node_under_test.required_routing_by_index(**validator, false) ==
RequiredRouting::GridX
});
assert!(originator_x != Some(&ValidatorIndex(NODE_UNDER_TEST)));
let is_neighbour = topology_originator
.validator_indices_x
.contains(&ValidatorIndex(NODE_UNDER_TEST)) ||
topology_originator
.validator_indices_y
.contains(&ValidatorIndex(NODE_UNDER_TEST));
let mut to_be_sent_by = originator_y
.into_iter()
.chain(originator_x)
.map(|val| (*val, peer_ids[val.0 as usize]))
.collect_vec();
if is_neighbour {
to_be_sent_by.push((ValidatorIndex(current_validator_index), peer_ids[0]));
}
to_be_sent_by
}
@@ -0,0 +1,64 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::approval::{ApprovalTestState, PastSystemClock, LOG_TARGET, SLOT_DURATION_MILLIS};
use futures::FutureExt;
use pezkuwi_node_primitives::approval::time::{slot_number_to_tick, Clock, TICK_DURATION_MILLIS};
use pezkuwi_node_subsystem::{overseer, SpawnedSubsystem, SubsystemError};
use pezkuwi_node_subsystem_types::messages::ChainSelectionMessage;
/// Mock ChainSelection subsystem used to answer request made by the approval-voting subsystem,
/// during benchmark. All the necessary information to answer the requests is stored in the `state`
pub struct MockChainSelection {
pub state: ApprovalTestState,
pub clock: PastSystemClock,
}
#[overseer::subsystem(ChainSelection, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockChainSelection {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "mock-chain-subsystem", future }
}
}
#[overseer::contextbounds(ChainSelection, prefix = self::overseer)]
impl MockChainSelection {
async fn run<Context>(self, mut ctx: Context) {
loop {
let msg = ctx.recv().await.expect("Should not fail");
match msg {
orchestra::FromOrchestra::Signal(_) => {},
orchestra::FromOrchestra::Communication { msg } =>
if let ChainSelectionMessage::Approved(hash) = msg {
let block_info = self.state.get_info_by_hash(hash);
let approved_number = block_info.block_number;
block_info.approved.store(true, std::sync::atomic::Ordering::SeqCst);
self.state
.last_approved_block
.store(approved_number, std::sync::atomic::Ordering::SeqCst);
let approved_in_tick = self.clock.tick_now() -
slot_number_to_tick(SLOT_DURATION_MILLIS, block_info.slot);
gum::info!(target: LOG_TARGET, ?hash, "Chain selection approved after {:} ms", approved_in_tick * TICK_DURATION_MILLIS);
},
}
}
}
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,308 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::{
approval::{ApprovalsOptions, BlockTestData, CandidateTestData},
configuration::TestAuthorities,
};
use codec::{Decode, Encode};
use itertools::Itertools;
use pezkuwi_node_network_protocol::v3 as protocol_v3;
use pezkuwi_primitives::{CandidateIndex, Hash, ValidatorIndex};
use sc_network_types::PeerId;
use std::collections::{HashMap, HashSet};
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
pub struct TestMessageInfo {
/// The actual message
pub msg: protocol_v3::ApprovalDistributionMessage,
/// The list of peers that would sends this message in a real topology.
/// It includes both the peers that would send the message because of the topology
/// or because of randomly choosing so.
pub sent_by: Vec<ValidatorIndex>,
/// The tranche at which this message should be sent.
pub tranche: u32,
/// The block hash this message refers to.
pub block_hash: Hash,
}
impl std::hash::Hash for TestMessageInfo {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match &self.msg {
protocol_v3::ApprovalDistributionMessage::Assignments(assignments) => {
for (assignment, candidates) in assignments {
(assignment.block_hash, assignment.validator).hash(state);
candidates.hash(state);
}
},
protocol_v3::ApprovalDistributionMessage::Approvals(approvals) => {
for approval in approvals {
(approval.block_hash, approval.validator).hash(state);
approval.candidate_indices.hash(state);
}
},
};
}
}
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
/// A list of messages that depend of each-other, approvals cover one of the assignments and
/// vice-versa.
pub struct MessagesBundle {
pub assignments: Vec<TestMessageInfo>,
pub approvals: Vec<TestMessageInfo>,
}
impl MessagesBundle {
/// The tranche when this bundle can be sent correctly, so no assignments or approvals will be
/// from the future.
pub fn tranche_to_send(&self) -> u32 {
self.assignments
.iter()
.chain(self.approvals.iter())
.max_by(|a, b| a.tranche.cmp(&b.tranche))
.unwrap()
.tranche
}
/// The min tranche in the bundle.
pub fn min_tranche(&self) -> u32 {
self.assignments
.iter()
.chain(self.approvals.iter())
.min_by(|a, b| a.tranche.cmp(&b.tranche))
.unwrap()
.tranche
}
/// Tells if the bundle is needed for sending.
/// We either send it because we need more assignments and approvals to approve the candidates
/// or because we configured the test to send messages until a given tranche.
pub fn should_send(
&self,
candidates_test_data: &HashMap<(Hash, CandidateIndex), CandidateTestData>,
options: &ApprovalsOptions,
) -> bool {
self.needed_for_approval(candidates_test_data) ||
(!options.stop_when_approved &&
self.min_tranche() <= options.last_considered_tranche)
}
/// Tells if the bundle is needed because we need more messages to approve the candidates.
pub fn needed_for_approval(
&self,
candidates_test_data: &HashMap<(Hash, CandidateIndex), CandidateTestData>,
) -> bool {
self.assignments
.iter()
.any(|message| message.needed_for_approval(candidates_test_data))
}
/// Mark the assignments in the bundle as sent.
pub fn record_sent_assignment(
&self,
candidates_test_data: &mut HashMap<(Hash, CandidateIndex), CandidateTestData>,
) {
self.assignments
.iter()
.for_each(|assignment| assignment.record_sent_assignment(candidates_test_data));
}
}
impl TestMessageInfo {
/// Tells if the message is an approval.
fn is_approval(&self) -> bool {
match self.msg {
protocol_v3::ApprovalDistributionMessage::Assignments(_) => false,
protocol_v3::ApprovalDistributionMessage::Approvals(_) => true,
}
}
/// Records an approval.
/// We use this to check after all messages have been processed that we didn't loose any
/// message.
pub fn record_vote(&self, state: &BlockTestData) {
if self.is_approval() {
match &self.msg {
protocol_v3::ApprovalDistributionMessage::Assignments(_) => todo!(),
protocol_v3::ApprovalDistributionMessage::Approvals(approvals) => {
for approval in approvals {
for candidate_index in approval.candidate_indices.iter_ones() {
state
.votes
.get(approval.validator.0 as usize)
.unwrap()
.get(candidate_index)
.unwrap()
.store(true, std::sync::atomic::Ordering::SeqCst);
}
}
},
}
}
}
/// Mark the assignments in the message as sent.
pub fn record_sent_assignment(
&self,
candidates_test_data: &mut HashMap<(Hash, CandidateIndex), CandidateTestData>,
) {
match &self.msg {
protocol_v3::ApprovalDistributionMessage::Assignments(assignments) => {
for (assignment, candidate_indices) in assignments {
for candidate_index in candidate_indices.iter_ones() {
let candidate_test_data = candidates_test_data
.get_mut(&(assignment.block_hash, candidate_index as CandidateIndex))
.unwrap();
candidate_test_data.mark_sent_assignment(self.tranche)
}
}
},
protocol_v3::ApprovalDistributionMessage::Approvals(_approvals) => todo!(),
}
}
/// Returns a list of candidates indices in this message
pub fn candidate_indices(&self) -> HashSet<usize> {
let mut unique_candidate_indices = HashSet::new();
match &self.msg {
protocol_v3::ApprovalDistributionMessage::Assignments(assignments) => {
for (_assignment, candidate_indices) in assignments {
for candidate_index in candidate_indices.iter_ones() {
unique_candidate_indices.insert(candidate_index);
}
}
},
protocol_v3::ApprovalDistributionMessage::Approvals(approvals) => {
for approval in approvals {
for candidate_index in approval.candidate_indices.iter_ones() {
unique_candidate_indices.insert(candidate_index);
}
}
},
}
unique_candidate_indices
}
/// Marks this message as no-shows if the number of configured no-shows is above the registered
/// no-shows.
/// Returns true if the message is a no-show.
pub fn no_show_if_required(
&self,
assignments: &[TestMessageInfo],
candidates_test_data: &mut HashMap<(Hash, CandidateIndex), CandidateTestData>,
) -> bool {
let mut should_no_show = false;
if self.is_approval() {
let covered_candidates = assignments
.iter()
.map(|assignment| (assignment, assignment.candidate_indices()))
.collect_vec();
match &self.msg {
protocol_v3::ApprovalDistributionMessage::Assignments(_) => todo!(),
protocol_v3::ApprovalDistributionMessage::Approvals(approvals) => {
assert_eq!(approvals.len(), 1);
for approval in approvals {
should_no_show = should_no_show ||
approval.candidate_indices.iter_ones().all(|candidate_index| {
let candidate_test_data = candidates_test_data
.get_mut(&(
approval.block_hash,
candidate_index as CandidateIndex,
))
.unwrap();
let assignment = covered_candidates
.iter()
.find(|(_assignment, candidates)| {
candidates.contains(&candidate_index)
})
.unwrap();
candidate_test_data.should_no_show(assignment.0.tranche)
});
if should_no_show {
for candidate_index in approval.candidate_indices.iter_ones() {
let candidate_test_data = candidates_test_data
.get_mut(&(
approval.block_hash,
candidate_index as CandidateIndex,
))
.unwrap();
let assignment = covered_candidates
.iter()
.find(|(_assignment, candidates)| {
candidates.contains(&candidate_index)
})
.unwrap();
candidate_test_data.record_no_show(assignment.0.tranche)
}
}
}
},
}
}
should_no_show
}
/// Tells if a message is needed for approval
pub fn needed_for_approval(
&self,
candidates_test_data: &HashMap<(Hash, CandidateIndex), CandidateTestData>,
) -> bool {
match &self.msg {
protocol_v3::ApprovalDistributionMessage::Assignments(assignments) =>
assignments.iter().any(|(assignment, candidate_indices)| {
candidate_indices.iter_ones().any(|candidate_index| {
candidates_test_data
.get(&(assignment.block_hash, candidate_index as CandidateIndex))
.map(|data| data.should_send_tranche(self.tranche))
.unwrap_or_default()
})
}),
protocol_v3::ApprovalDistributionMessage::Approvals(approvals) =>
approvals.iter().any(|approval| {
approval.candidate_indices.iter_ones().any(|candidate_index| {
candidates_test_data
.get(&(approval.block_hash, candidate_index as CandidateIndex))
.map(|data| data.should_send_tranche(self.tranche))
.unwrap_or_default()
})
}),
}
}
/// Splits a message into multiple messages based on what peers should send this message.
/// It build a HashMap of messages that should be sent by each peer.
pub fn split_by_peer_id(
self,
authorities: &TestAuthorities,
) -> HashMap<(ValidatorIndex, PeerId), Vec<TestMessageInfo>> {
let mut result: HashMap<(ValidatorIndex, PeerId), Vec<TestMessageInfo>> = HashMap::new();
for validator_index in &self.sent_by {
let peer = authorities.peer_ids.get(validator_index.0 as usize).unwrap();
result.entry((*validator_index, *peer)).or_default().push(TestMessageInfo {
msg: self.msg.clone(),
sent_by: Default::default(),
tranche: self.tranche,
block_hash: self.block_hash,
});
}
result
}
}
@@ -0,0 +1,42 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::{environment::TestEnvironmentDependencies, mock::TestSyncOracle};
use pezkuwi_node_core_av_store::{AvailabilityStoreSubsystem, Config};
use pezkuwi_node_metrics::metrics::Metrics;
use pezkuwi_node_subsystem_util::database::Database;
use std::sync::Arc;
mod columns {
pub const DATA: u32 = 0;
pub const META: u32 = 1;
pub const NUM_COLUMNS: u32 = 2;
}
const TEST_CONFIG: Config =
Config { col_data: columns::DATA, col_meta: columns::META, keep_finalized_for: 1 };
pub fn new_av_store(dependencies: &TestEnvironmentDependencies) -> AvailabilityStoreSubsystem {
let metrics = Metrics::try_register(&dependencies.registry).unwrap();
AvailabilityStoreSubsystem::new(test_store(), TEST_CONFIG, Box::new(TestSyncOracle {}), metrics)
}
fn test_store() -> Arc<dyn Database> {
let db = kvdb_memorydb::create(columns::NUM_COLUMNS);
let db = pezkuwi_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[columns::META]);
Arc::new(db)
}
@@ -0,0 +1,508 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::{
availability::av_store_helpers::new_av_store,
dummy_builder,
environment::{TestEnvironment, TestEnvironmentDependencies},
mock::{
av_store::{MockAvailabilityStore, NetworkAvailabilityState},
chain_api::{ChainApiState, MockChainApi},
network_bridge::{self, MockNetworkBridgeRx, MockNetworkBridgeTx},
runtime_api::{default_node_features, MockRuntimeApi, MockRuntimeApiCoreState},
AlwaysSupportsTeyrchains,
},
network::new_network,
usage::BenchmarkUsage,
};
use colored::Colorize;
use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt};
use codec::Encode;
use pezkuwi_availability_bitfield_distribution::BitfieldDistribution;
use pezkuwi_availability_distribution::{
AvailabilityDistributionSubsystem, IncomingRequestReceivers,
};
use pezkuwi_availability_recovery::{AvailabilityRecoverySubsystem, RecoveryStrategyKind};
use pezkuwi_node_core_av_store::AvailabilityStoreSubsystem;
use pezkuwi_node_metrics::metrics::Metrics;
use pezkuwi_node_network_protocol::{
request_response::{v1, v2, IncomingRequest},
OurView,
};
use pezkuwi_node_subsystem::{
messages::{AllMessages, AvailabilityRecoveryMessage},
Overseer, OverseerConnector, SpawnGlue,
};
use pezkuwi_node_subsystem_types::messages::{AvailabilityStoreMessage, NetworkBridgeEvent};
use pezkuwi_overseer::{metrics::Metrics as OverseerMetrics, Handle as OverseerHandle};
use pezkuwi_primitives::{Block, CoreIndex, GroupIndex, Hash};
use sc_network::request_responses::{IncomingRequest as RawIncomingRequest, ProtocolConfig};
use std::{ops::Sub, sync::Arc, time::Instant};
use strum::Display;
use sc_service::SpawnTaskHandle;
use serde::{Deserialize, Serialize};
pub use test_state::TestState;
mod av_store_helpers;
mod test_state;
const LOG_TARGET: &str = "subsystem-bench::availability";
#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq, Serialize, Deserialize, Display)]
#[value(rename_all = "kebab-case")]
#[strum(serialize_all = "kebab-case")]
pub enum Strategy {
/// Regular random chunk recovery. This is also the fallback for the next strategies.
Chunks,
/// Recovery from systematic chunks. Much faster than regular chunk recovery becasue it avoid
/// doing the reed-solomon reconstruction.
Systematic,
/// Fetch the full availability datafrom backers first. Saves CPU as we don't need to
/// re-construct from chunks. Typically this is only faster if nodes have enough bandwidth.
FullFromBackers,
}
#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)]
#[clap(rename_all = "kebab-case")]
#[allow(missing_docs)]
pub struct DataAvailabilityReadOptions {
#[clap(short, long, default_value_t = Strategy::Systematic)]
pub strategy: Strategy,
}
pub enum TestDataAvailability {
Read(DataAvailabilityReadOptions),
Write,
}
fn build_overseer_for_availability_read(
spawn_task_handle: SpawnTaskHandle,
runtime_api: MockRuntimeApi,
av_store: MockAvailabilityStore,
(network_bridge_tx, network_bridge_rx): (MockNetworkBridgeTx, MockNetworkBridgeRx),
availability_recovery: AvailabilityRecoverySubsystem,
dependencies: &TestEnvironmentDependencies,
) -> (Overseer<SpawnGlue<SpawnTaskHandle>, AlwaysSupportsTeyrchains>, OverseerHandle) {
let overseer_connector = OverseerConnector::with_event_capacity(64000);
let overseer_metrics = OverseerMetrics::try_register(&dependencies.registry).unwrap();
let dummy = dummy_builder!(spawn_task_handle, overseer_metrics);
let builder = dummy
.replace_runtime_api(|_| runtime_api)
.replace_availability_store(|_| av_store)
.replace_network_bridge_tx(|_| network_bridge_tx)
.replace_network_bridge_rx(|_| network_bridge_rx)
.replace_availability_recovery(|_| availability_recovery);
let (overseer, raw_handle) =
builder.build_with_connector(overseer_connector).expect("Should not fail");
(overseer, OverseerHandle::new(raw_handle))
}
#[allow(clippy::too_many_arguments)]
fn build_overseer_for_availability_write(
spawn_task_handle: SpawnTaskHandle,
runtime_api: MockRuntimeApi,
(network_bridge_tx, network_bridge_rx): (MockNetworkBridgeTx, MockNetworkBridgeRx),
availability_distribution: AvailabilityDistributionSubsystem,
chain_api: MockChainApi,
availability_store: AvailabilityStoreSubsystem,
bitfield_distribution: BitfieldDistribution,
dependencies: &TestEnvironmentDependencies,
) -> (Overseer<SpawnGlue<SpawnTaskHandle>, AlwaysSupportsTeyrchains>, OverseerHandle) {
let overseer_connector = OverseerConnector::with_event_capacity(64000);
let overseer_metrics = OverseerMetrics::try_register(&dependencies.registry).unwrap();
let dummy = dummy_builder!(spawn_task_handle, overseer_metrics);
let builder = dummy
.replace_runtime_api(|_| runtime_api)
.replace_availability_store(|_| availability_store)
.replace_network_bridge_tx(|_| network_bridge_tx)
.replace_network_bridge_rx(|_| network_bridge_rx)
.replace_chain_api(|_| chain_api)
.replace_bitfield_distribution(|_| bitfield_distribution)
// This is needed to test own chunk recovery for `n_cores`.
.replace_availability_distribution(|_| availability_distribution);
let (overseer, raw_handle) =
builder.build_with_connector(overseer_connector).expect("Should not fail");
(overseer, OverseerHandle::new(raw_handle))
}
pub fn prepare_test(
state: &TestState,
mode: TestDataAvailability,
with_prometheus_endpoint: bool,
) -> (TestEnvironment, Vec<ProtocolConfig>) {
let dependencies = TestEnvironmentDependencies::default();
let availability_state = NetworkAvailabilityState {
candidate_hashes: state.candidate_hashes.clone(),
candidate_hash_to_core_index: state.candidate_hash_to_core_index.clone(),
available_data: state.available_data.clone(),
chunks: state.chunks.clone(),
chunk_indices: state.chunk_indices.clone(),
req_protocol_names: state.req_protocol_names.clone(),
};
let mut req_cfgs = Vec::new();
let (collation_req_receiver, collation_req_cfg) = IncomingRequest::get_config_receiver::<
Block,
sc_network::NetworkWorker<Block, Hash>,
>(&state.req_protocol_names);
req_cfgs.push(collation_req_cfg);
let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver::<
Block,
sc_network::NetworkWorker<Block, Hash>,
>(&state.req_protocol_names);
req_cfgs.push(pov_req_cfg);
let (chunk_req_v1_receiver, chunk_req_v1_cfg) =
IncomingRequest::<v1::ChunkFetchingRequest>::get_config_receiver::<
Block,
sc_network::NetworkWorker<Block, Hash>,
>(&state.req_protocol_names);
// We won't use v1 chunk fetching requests, but we need to keep the inbound queue alive.
// Otherwise, av-distribution subsystem will terminate.
std::mem::forget(chunk_req_v1_cfg);
let (chunk_req_v2_receiver, chunk_req_v2_cfg) =
IncomingRequest::<v2::ChunkFetchingRequest>::get_config_receiver::<
Block,
sc_network::NetworkWorker<Block, Hash>,
>(&state.req_protocol_names);
let (network, network_interface, network_receiver) = new_network(
&state.config,
&dependencies,
&state.test_authorities,
vec![Arc::new(availability_state.clone())],
);
let network_bridge_tx = network_bridge::MockNetworkBridgeTx::new(
network.clone(),
network_interface.subsystem_sender(),
state.test_authorities.clone(),
);
let network_bridge_rx =
network_bridge::MockNetworkBridgeRx::new(network_receiver, Some(chunk_req_v2_cfg));
let runtime_api = MockRuntimeApi::new(
state.config.clone(),
state.test_authorities.clone(),
state.candidate_receipts.clone(),
Default::default(),
Default::default(),
0,
MockRuntimeApiCoreState::Occupied,
);
let (overseer, overseer_handle) = match &mode {
TestDataAvailability::Read(options) => {
let subsystem = match options.strategy {
Strategy::FullFromBackers =>
AvailabilityRecoverySubsystem::with_recovery_strategy_kind(
collation_req_receiver,
&state.req_protocol_names,
Metrics::try_register(&dependencies.registry).unwrap(),
RecoveryStrategyKind::BackersFirstAlways,
),
Strategy::Chunks => AvailabilityRecoverySubsystem::with_recovery_strategy_kind(
collation_req_receiver,
&state.req_protocol_names,
Metrics::try_register(&dependencies.registry).unwrap(),
RecoveryStrategyKind::ChunksAlways,
),
Strategy::Systematic => AvailabilityRecoverySubsystem::with_recovery_strategy_kind(
collation_req_receiver,
&state.req_protocol_names,
Metrics::try_register(&dependencies.registry).unwrap(),
RecoveryStrategyKind::SystematicChunks,
),
};
// Use a mocked av-store.
let av_store = MockAvailabilityStore::new(
state.chunks.clone(),
state.chunk_indices.clone(),
state.candidate_hashes.clone(),
state.candidate_hash_to_core_index.clone(),
);
build_overseer_for_availability_read(
dependencies.task_manager.spawn_handle(),
runtime_api,
av_store,
(network_bridge_tx, network_bridge_rx),
subsystem,
&dependencies,
)
},
TestDataAvailability::Write => {
let availability_distribution = AvailabilityDistributionSubsystem::new(
state.test_authorities.keyring.keystore(),
IncomingRequestReceivers {
pov_req_receiver,
chunk_req_v1_receiver,
chunk_req_v2_receiver,
},
state.req_protocol_names.clone(),
Metrics::try_register(&dependencies.registry).unwrap(),
);
let chain_api_state = ChainApiState { block_headers: state.block_headers.clone() };
let chain_api = MockChainApi::new(chain_api_state);
let bitfield_distribution =
BitfieldDistribution::new(Metrics::try_register(&dependencies.registry).unwrap());
build_overseer_for_availability_write(
dependencies.task_manager.spawn_handle(),
runtime_api,
(network_bridge_tx, network_bridge_rx),
availability_distribution,
chain_api,
new_av_store(&dependencies),
bitfield_distribution,
&dependencies,
)
},
};
(
TestEnvironment::new(
dependencies,
state.config.clone(),
network,
overseer,
overseer_handle,
state.test_authorities.clone(),
with_prometheus_endpoint,
),
req_cfgs,
)
}
pub async fn benchmark_availability_read(
env: &mut TestEnvironment,
state: &TestState,
) -> BenchmarkUsage {
let config = env.config().clone();
env.metrics().set_n_validators(config.n_validators);
env.metrics().set_n_cores(config.n_cores);
let mut batch = FuturesUnordered::new();
let mut availability_bytes = 0u128;
let mut candidates = state.candidates.clone();
let test_start = Instant::now();
for block_info in state.block_infos.iter() {
let block_num = block_info.number as usize;
gum::info!(target: LOG_TARGET, "Current block {}/{}", block_num, env.config().num_blocks);
env.metrics().set_current_block(block_num);
let block_start_ts = Instant::now();
env.import_block(block_info.clone()).await;
for candidate_num in 0..config.n_cores as u64 {
let candidate =
candidates.next().expect("We always send up to n_cores*num_blocks; qed");
let (tx, rx) = oneshot::channel();
batch.push(rx);
let message = AllMessages::AvailabilityRecovery(
AvailabilityRecoveryMessage::RecoverAvailableData(
candidate.clone(),
1,
Some(GroupIndex(
candidate_num as u32 % (std::cmp::max(5, config.n_cores) / 5) as u32,
)),
Some(*state.candidate_hash_to_core_index.get(&candidate.hash()).unwrap()),
tx,
),
);
env.send_message(message).await;
}
gum::info!(target: LOG_TARGET, "{}", format!("{} recoveries pending", batch.len()).bright_black());
while let Some(completed) = batch.next().await {
let available_data = completed.unwrap().unwrap();
env.metrics().on_pov_size(available_data.encoded_size());
availability_bytes += available_data.encoded_size() as u128;
}
let block_time = Instant::now().sub(block_start_ts).as_millis() as u64;
env.metrics().set_block_time(block_time);
gum::info!(target: LOG_TARGET, "All work for block completed in {}", format!("{block_time:?}ms").cyan());
}
let duration: u128 = test_start.elapsed().as_millis();
let availability_bytes = availability_bytes / 1024;
gum::info!(target: LOG_TARGET, "All blocks processed in {}", format!("{duration:?}ms").cyan());
gum::info!(target: LOG_TARGET,
"Throughput: {}",
format!("{} KiB/block", availability_bytes / env.config().num_blocks as u128).bright_red()
);
gum::info!(target: LOG_TARGET,
"Avg block time: {}",
format!("{} ms", test_start.elapsed().as_millis() / env.config().num_blocks as u128).red()
);
env.stop().await;
env.collect_resource_usage(&["availability-recovery"], false)
}
pub async fn benchmark_availability_write(
env: &mut TestEnvironment,
state: &TestState,
) -> BenchmarkUsage {
let config = env.config().clone();
env.metrics().set_n_validators(config.n_validators);
env.metrics().set_n_cores(config.n_cores);
gum::info!(target: LOG_TARGET, "Seeding availability store with candidates ...");
for (core_index, backed_candidate) in state.backed_candidates.clone().into_iter().enumerate() {
let candidate_index = *state.candidate_hashes.get(&backed_candidate.hash()).unwrap();
let available_data = state.available_data[candidate_index].clone();
let (tx, rx) = oneshot::channel();
env.send_message(AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData {
candidate_hash: backed_candidate.hash(),
n_validators: config.n_validators as u32,
available_data,
expected_erasure_root: backed_candidate.descriptor().erasure_root(),
tx,
core_index: CoreIndex(core_index as u32),
node_features: default_node_features(),
},
))
.await;
rx.await
.unwrap()
.expect("Test candidates are stored nicely in availability store");
}
gum::info!(target: LOG_TARGET, "Done");
let test_start = Instant::now();
for block_info in state.block_infos.iter() {
let block_num = block_info.number as usize;
gum::info!(target: LOG_TARGET, "Current block #{}", block_num);
env.metrics().set_current_block(block_num);
let block_start_ts = Instant::now();
let relay_block_hash = block_info.hash;
env.import_block(block_info.clone()).await;
// Inform bitfield distribution about our view of current test block
let message = pezkuwi_node_subsystem_types::messages::BitfieldDistributionMessage::NetworkBridgeUpdate(
NetworkBridgeEvent::OurViewChange(OurView::new(vec![relay_block_hash], 0))
);
env.send_message(AllMessages::BitfieldDistribution(message)).await;
let chunk_fetch_start_ts = Instant::now();
// Request chunks of our own backed candidate from all other validators.
let payloads = state.chunk_fetching_requests.get(block_num - 1).expect("pregenerated");
let receivers = (1..config.n_validators).filter_map(|index| {
let (pending_response, pending_response_receiver) = oneshot::channel();
let peer_id = *env.authorities().peer_ids.get(index).expect("all validators have ids");
let payload = payloads.get(index).expect("pregenerated").clone();
let request = RawIncomingRequest { peer: peer_id, payload, pending_response };
let peer = env
.authorities()
.validator_authority_id
.get(index)
.expect("all validators have keys");
if env.network().is_peer_connected(peer) &&
env.network().send_request_from_peer(peer, request).is_ok()
{
Some(pending_response_receiver)
} else {
None
}
});
gum::info!(target: LOG_TARGET, "Waiting for all emulated peers to receive their chunk from us ...");
let responses = futures::future::try_join_all(receivers)
.await
.expect("Chunk is always served successfully");
// TODO: check if chunk is the one the peer expects to receive.
assert!(responses.iter().all(|v| v.result.is_ok()));
let chunk_fetch_duration = Instant::now().sub(chunk_fetch_start_ts).as_millis();
gum::info!(target: LOG_TARGET, "All chunks received in {}ms", chunk_fetch_duration);
let network = env.network().clone();
let authorities = env.authorities().clone();
// Spawn a task that will generate `n_validator` - 1 signed bitfields and
// send them from the emulated peers to the subsystem.
// TODO: Implement topology.
let messages = state.signed_bitfields.get(&relay_block_hash).expect("pregenerated").clone();
for index in 1..config.n_validators {
let from_peer = &authorities.validator_authority_id[index];
let message = messages.get(index).expect("pregenerated").clone();
// Send the action from peer only if it is connected to our node.
if network.is_peer_connected(from_peer) {
let _ = network.send_message_from_peer(from_peer, message);
}
}
gum::info!(
"Waiting for {} bitfields to be received and processed",
config.connected_count()
);
// Wait for all bitfields to be processed.
env.wait_until_metric(
"pezkuwi_teyrchain_received_availability_bitfields_total",
None,
|value| value == (config.connected_count() * block_num) as f64,
)
.await;
gum::info!(target: LOG_TARGET, "All bitfields processed");
let block_time = Instant::now().sub(block_start_ts).as_millis() as u64;
env.metrics().set_block_time(block_time);
gum::info!(target: LOG_TARGET, "All work for block completed in {}", format!("{block_time:?}ms").cyan());
}
let duration: u128 = test_start.elapsed().as_millis();
gum::info!(target: LOG_TARGET, "All blocks processed in {}", format!("{duration:?}ms").cyan());
gum::info!(target: LOG_TARGET,
"Avg block time: {}",
format!("{} ms", test_start.elapsed().as_millis() / env.config().num_blocks as u128).red()
);
env.stop().await;
env.collect_resource_usage(
&["availability-distribution", "bitfield-distribution", "availability-store"],
false,
)
}
@@ -0,0 +1,306 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::{
configuration::{TestAuthorities, TestConfiguration},
environment::GENESIS_HASH,
mock::runtime_api::default_node_features,
};
use bitvec::bitvec;
use codec::Encode;
use colored::Colorize;
use itertools::Itertools;
use pezkuwi_node_network_protocol::{
request_response::{v2::ChunkFetchingRequest, ReqProtocolNames},
ValidationProtocols, VersionedValidationProtocol,
};
use pezkuwi_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV};
use pezkuwi_node_subsystem_test_helpers::{
derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info,
};
use pezkuwi_node_subsystem_util::availability_chunks::availability_chunk_indices;
use pezkuwi_overseer::BlockInfo;
use pezkuwi_primitives::{
AvailabilityBitfield, BlockNumber, CandidateHash, CandidateReceiptV2 as CandidateReceipt,
ChunkIndex, CoreIndex, Hash, HeadData, Header, PersistedValidationData, Signed, SigningContext,
ValidatorIndex,
};
use pezkuwi_primitives_test_helpers::{dummy_candidate_receipt_v2, dummy_hash};
use sp_core::H256;
use std::{collections::HashMap, iter::Cycle, sync::Arc};
const LOG_TARGET: &str = "subsystem-bench::availability::test_state";
#[derive(Clone)]
pub struct TestState {
// Full test configuration
pub config: TestConfiguration,
// A cycle iterator on all PoV sizes used in the test.
pub pov_sizes: Cycle<std::vec::IntoIter<usize>>,
// Generated candidate receipts to be used in the test
pub candidates: Cycle<std::vec::IntoIter<CandidateReceipt>>,
// Map from pov size to candidate index
pub pov_size_to_candidate: HashMap<usize, usize>,
// Map from generated candidate hashes to candidate index in `available_data` and `chunks`.
pub candidate_hashes: HashMap<CandidateHash, usize>,
// Map from candidate hash to occupied core index.
pub candidate_hash_to_core_index: HashMap<CandidateHash, CoreIndex>,
// Per candidate index receipts.
pub candidate_receipt_templates: Vec<CandidateReceipt>,
// Per candidate index `AvailableData`
pub available_data: Vec<AvailableData>,
// Per candidate index chunks
pub chunks: Vec<Vec<ErasureChunk>>,
// Per-core ValidatorIndex -> ChunkIndex mapping
pub chunk_indices: Vec<Vec<ChunkIndex>>,
// Per relay chain block - candidate backed by our backing group
pub backed_candidates: Vec<CandidateReceipt>,
// Request protcol names
pub req_protocol_names: ReqProtocolNames,
// Relay chain block infos
pub block_infos: Vec<BlockInfo>,
// Chung fetching requests for backed candidates
pub chunk_fetching_requests: Vec<Vec<Vec<u8>>>,
// Pregenerated signed availability bitfields
pub signed_bitfields: HashMap<H256, Vec<VersionedValidationProtocol>>,
// Relay chain block headers
pub block_headers: HashMap<H256, Header>,
// Authority keys for the network emulation.
pub test_authorities: TestAuthorities,
// Map from generated candidate receipts
pub candidate_receipts: HashMap<H256, Vec<CandidateReceipt>>,
}
impl TestState {
pub fn new(config: &TestConfiguration) -> Self {
use pezkuwi_primitives::MutateDescriptorV2;
let mut test_state = Self {
available_data: Default::default(),
candidate_receipt_templates: Default::default(),
chunks: Default::default(),
pov_size_to_candidate: Default::default(),
pov_sizes: Vec::from(config.pov_sizes()).into_iter().cycle(),
candidate_hashes: HashMap::new(),
candidates: Vec::new().into_iter().cycle(),
backed_candidates: Vec::new(),
config: config.clone(),
block_infos: Default::default(),
chunk_fetching_requests: Default::default(),
signed_bitfields: Default::default(),
candidate_receipts: Default::default(),
block_headers: Default::default(),
test_authorities: config.generate_authorities(),
req_protocol_names: ReqProtocolNames::new(GENESIS_HASH, None),
chunk_indices: Default::default(),
candidate_hash_to_core_index: Default::default(),
};
// we use it for all candidates.
let persisted_validation_data = PersistedValidationData {
parent_head: HeadData(vec![7, 8, 9]),
relay_parent_number: Default::default(),
max_pov_size: 1024,
relay_parent_storage_root: Default::default(),
};
test_state.chunk_indices = (0..config.n_cores)
.map(|core_index| {
availability_chunk_indices(
&default_node_features(),
config.n_validators,
CoreIndex(core_index as u32),
)
.unwrap()
})
.collect();
// For each unique pov we create a candidate receipt.
for (index, pov_size) in config.pov_sizes().iter().cloned().unique().enumerate() {
gum::info!(target: LOG_TARGET, index, pov_size, "{}", "Generating template candidate".bright_blue());
let mut candidate_receipt = dummy_candidate_receipt_v2(dummy_hash());
let pov = PoV { block_data: BlockData(vec![index as u8; pov_size]) };
let new_available_data = AvailableData {
validation_data: persisted_validation_data.clone(),
pov: Arc::new(pov),
};
let (new_chunks, erasure_root) = derive_erasure_chunks_with_proofs_and_root(
config.n_validators,
&new_available_data,
|_, _| {},
);
candidate_receipt.descriptor.set_erasure_root(erasure_root);
test_state.chunks.push(new_chunks);
test_state.available_data.push(new_available_data);
test_state.pov_size_to_candidate.insert(pov_size, index);
test_state.candidate_receipt_templates.push(CandidateReceipt {
descriptor: candidate_receipt.descriptor,
commitments_hash: candidate_receipt.commitments_hash,
});
}
test_state.block_infos = (1..=config.num_blocks)
.map(|block_num| {
let relay_block_hash = Hash::repeat_byte(block_num as u8);
new_block_import_info(relay_block_hash, block_num as BlockNumber)
})
.collect();
test_state.block_headers = test_state
.block_infos
.iter()
.map(|info| {
(
info.hash,
Header {
digest: Default::default(),
number: info.number,
parent_hash: info.parent_hash,
extrinsics_root: Default::default(),
state_root: Default::default(),
},
)
})
.collect::<HashMap<_, _>>();
// Generate all candidates
let candidates_count = config.n_cores * config.num_blocks;
gum::info!(target: LOG_TARGET,"{}", format!("Pre-generating {candidates_count} candidates.").bright_blue());
test_state.candidates = (0..candidates_count)
.map(|index| {
let pov_size = test_state.pov_sizes.next().expect("This is a cycle; qed");
let candidate_index = *test_state
.pov_size_to_candidate
.get(&pov_size)
.expect("pov_size always exists; qed");
let mut candidate_receipt =
test_state.candidate_receipt_templates[candidate_index].clone();
// Make it unique.
candidate_receipt
.descriptor
.set_relay_parent(Hash::from_low_u64_be(index as u64));
// Store the new candidate in the state
test_state.candidate_hashes.insert(candidate_receipt.hash(), candidate_index);
let core_index = (index % config.n_cores) as u32;
test_state
.candidate_hash_to_core_index
.insert(candidate_receipt.hash(), core_index.into());
gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_receipt.hash(), "new candidate");
candidate_receipt
})
.collect::<Vec<_>>()
.into_iter()
.cycle();
// Prepare per block candidates.
// Genesis block is always finalized, so we start at 1.
for info in test_state.block_infos.iter() {
for _ in 0..config.n_cores {
let receipt = test_state.candidates.next().expect("Cycle iterator");
test_state.candidate_receipts.entry(info.hash).or_default().push(receipt);
}
// First candidate is our backed candidate.
test_state.backed_candidates.push(
test_state
.candidate_receipts
.get(&info.hash)
.expect("just inserted above")
.first()
.expect("just inserted above")
.clone(),
);
}
test_state.chunk_fetching_requests = test_state
.backed_candidates
.iter()
.map(|candidate| {
(0..config.n_validators)
.map(|index| {
ChunkFetchingRequest {
candidate_hash: candidate.hash(),
index: ValidatorIndex(index as u32),
}
.encode()
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
test_state.signed_bitfields = test_state
.block_infos
.iter()
.map(|block_info| {
let signing_context =
SigningContext { session_index: 0, parent_hash: block_info.hash };
let messages = (0..config.n_validators)
.map(|index| {
let validator_public = test_state
.test_authorities
.validator_public
.get(index)
.expect("All validator keys are known");
// Node has all the chunks in the world.
let payload: AvailabilityBitfield =
AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]);
let signed_bitfield = Signed::<AvailabilityBitfield>::sign(
&test_state.test_authorities.keyring.keystore(),
payload,
&signing_context,
ValidatorIndex(index as u32),
validator_public,
)
.ok()
.flatten()
.expect("should be signed");
peer_bitfield_message_v3(block_info.hash, signed_bitfield)
})
.collect::<Vec<_>>();
(block_info.hash, messages)
})
.collect();
gum::info!(target: LOG_TARGET, "{}","Created test environment.".bright_blue());
test_state
}
}
fn peer_bitfield_message_v3(
relay_hash: H256,
signed_bitfield: Signed<AvailabilityBitfield>,
) -> VersionedValidationProtocol {
let bitfield = pezkuwi_node_network_protocol::v3::BitfieldDistributionMessage::Bitfield(
relay_hash,
signed_bitfield.into(),
);
ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ValidationProtocol::BitfieldDistribution(bitfield),
)
}
@@ -0,0 +1,317 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Test configuration definition and helpers.
use crate::keyring::Keyring;
use itertools::Itertools;
use pezkuwi_node_network_protocol::authority_discovery::AuthorityDiscovery;
use pezkuwi_primitives::{AssignmentId, AuthorityDiscoveryId, ValidatorId, ValidatorPair};
use rand::thread_rng;
use rand_distr::{Distribution, Normal, Uniform};
use sc_network::Multiaddr;
use sc_network_types::PeerId;
use serde::{Deserialize, Serialize};
use sp_consensus_babe::AuthorityId;
use sp_core::Pair;
use std::collections::{HashMap, HashSet};
/// Peer networking latency configuration.
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct PeerLatency {
/// The mean latency(milliseconds) of the peers.
pub mean_latency_ms: usize,
/// The standard deviation
pub std_dev: f64,
}
// Based on Kusama `max_validators`
fn default_n_validators() -> usize {
300
}
// Based on Kusama cores
fn default_n_cores() -> usize {
60
}
// Default PoV size in KiB.
fn default_pov_size() -> usize {
5 * 1024
}
// Default bandwidth in bytes, based stats from Kusama validators
fn default_bandwidth() -> usize {
42 * 1024 * 1024
}
// Default peer latency
fn default_peer_latency() -> Option<PeerLatency> {
Some(PeerLatency { mean_latency_ms: 30, std_dev: 2.0 })
}
// Default connectivity percentage
fn default_connectivity() -> usize {
90
}
// Default backing group size
fn default_backing_group_size() -> usize {
5
}
// Default needed approvals
fn default_needed_approvals() -> usize {
30
}
fn default_zeroth_delay_tranche_width() -> usize {
0
}
fn default_relay_vrf_modulo_samples() -> usize {
6
}
fn default_n_delay_tranches() -> usize {
89
}
fn default_no_show_slots() -> usize {
3
}
fn default_minimum_backing_votes() -> u32 {
2
}
fn default_max_candidate_depth() -> u32 {
3
}
fn default_allowed_ancestry_len() -> u32 {
2
}
/// The test input parameters
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TestConfiguration {
/// Number of validators
#[serde(default = "default_n_validators")]
pub n_validators: usize,
/// Number of cores
#[serde(default = "default_n_cores")]
pub n_cores: usize,
/// The number of needed votes to approve a candidate.
#[serde(default = "default_needed_approvals")]
pub needed_approvals: usize,
#[serde(default = "default_zeroth_delay_tranche_width")]
pub zeroth_delay_tranche_width: usize,
#[serde(default = "default_relay_vrf_modulo_samples")]
pub relay_vrf_modulo_samples: usize,
#[serde(default = "default_n_delay_tranches")]
pub n_delay_tranches: usize,
#[serde(default = "default_no_show_slots")]
pub no_show_slots: usize,
/// Maximum backing group size
#[serde(default = "default_backing_group_size")]
pub max_validators_per_core: usize,
/// The min PoV size
#[serde(default = "default_pov_size")]
pub min_pov_size: usize,
/// The max PoV size,
#[serde(default = "default_pov_size")]
pub max_pov_size: usize,
/// Randomly sampled pov_sizes
#[serde(skip)]
pub pov_sizes: Vec<usize>,
/// The amount of bandwidth remote validators have.
#[serde(default = "default_bandwidth")]
pub peer_bandwidth: usize,
/// The amount of bandwidth our node has.
#[serde(default = "default_bandwidth")]
pub bandwidth: usize,
/// Optional peer emulation latency (round trip time) wrt node under test
#[serde(default = "default_peer_latency")]
pub latency: Option<PeerLatency>,
/// Connectivity ratio, the percentage of peers we are connected to, but as part of the
/// topology.
#[serde(default = "default_connectivity")]
pub connectivity: usize,
/// Number of blocks to run the test for
pub num_blocks: usize,
/// Number of minimum backing votes
#[serde(default = "default_minimum_backing_votes")]
pub minimum_backing_votes: u32,
/// Async Backing max_candidate_depth
#[serde(default = "default_max_candidate_depth")]
pub max_candidate_depth: u32,
/// Async Backing allowed_ancestry_len
#[serde(default = "default_allowed_ancestry_len")]
pub allowed_ancestry_len: u32,
}
impl Default for TestConfiguration {
fn default() -> Self {
Self {
n_validators: default_n_validators(),
n_cores: default_n_cores(),
needed_approvals: default_needed_approvals(),
zeroth_delay_tranche_width: default_zeroth_delay_tranche_width(),
relay_vrf_modulo_samples: default_relay_vrf_modulo_samples(),
n_delay_tranches: default_n_delay_tranches(),
no_show_slots: default_no_show_slots(),
max_validators_per_core: default_backing_group_size(),
min_pov_size: default_pov_size(),
max_pov_size: default_pov_size(),
pov_sizes: Default::default(),
peer_bandwidth: default_bandwidth(),
bandwidth: default_bandwidth(),
latency: default_peer_latency(),
connectivity: default_connectivity(),
num_blocks: Default::default(),
minimum_backing_votes: default_minimum_backing_votes(),
max_candidate_depth: default_max_candidate_depth(),
allowed_ancestry_len: default_allowed_ancestry_len(),
}
}
}
impl TestConfiguration {
pub fn generate_pov_sizes(&mut self) {
self.pov_sizes = generate_pov_sizes(self.n_cores, self.min_pov_size, self.max_pov_size);
}
pub fn pov_sizes(&self) -> &[usize] {
&self.pov_sizes
}
/// Return the number of peers connected to our node.
pub fn connected_count(&self) -> usize {
((self.n_validators - 1) as f64 / (100.0 / self.connectivity as f64)) as usize
}
/// Generates the authority keys we need for the network emulation.
pub fn generate_authorities(&self) -> TestAuthorities {
let keyring = Keyring::default();
let key_seeds = (0..self.n_validators)
.map(|peer_index| format!("//Node{peer_index}"))
.collect_vec();
let keys = key_seeds
.iter()
.map(|seed| keyring.sr25519_new(seed.as_str()))
.collect::<Vec<_>>();
// Generate keys and peers ids in each of the format needed by the tests.
let validator_public: Vec<ValidatorId> =
keys.iter().map(|key| (*key).into()).collect::<Vec<_>>();
let validator_authority_id: Vec<AuthorityDiscoveryId> =
keys.iter().map(|key| (*key).into()).collect::<Vec<_>>();
let validator_babe_id: Vec<AuthorityId> =
keys.iter().map(|key| (*key).into()).collect::<Vec<_>>();
let validator_assignment_id: Vec<AssignmentId> =
keys.iter().map(|key| (*key).into()).collect::<Vec<_>>();
let peer_ids: Vec<PeerId> = keys.iter().map(|_| PeerId::random()).collect::<Vec<_>>();
let peer_id_to_authority = peer_ids
.iter()
.zip(validator_authority_id.iter())
.map(|(peer_id, authority_id)| (*peer_id, authority_id.clone()))
.collect();
let validator_pairs = key_seeds
.iter()
.map(|seed| ValidatorPair::from_string_with_seed(seed, None).unwrap().0)
.collect();
TestAuthorities {
keyring,
validator_public,
validator_authority_id,
peer_ids,
validator_babe_id,
validator_assignment_id,
key_seeds,
peer_id_to_authority,
validator_pairs,
}
}
}
fn random_uniform_sample<T: Into<usize> + From<usize>>(min_value: T, max_value: T) -> T {
Uniform::from(min_value.into()..=max_value.into())
.sample(&mut thread_rng())
.into()
}
fn random_pov_size(min_pov_size: usize, max_pov_size: usize) -> usize {
random_uniform_sample(min_pov_size, max_pov_size)
}
fn generate_pov_sizes(count: usize, min_kib: usize, max_kib: usize) -> Vec<usize> {
(0..count).map(|_| random_pov_size(min_kib * 1024, max_kib * 1024)).collect()
}
/// Helper struct for authority related state.
#[derive(Clone)]
pub struct TestAuthorities {
pub keyring: Keyring,
pub validator_public: Vec<ValidatorId>,
pub validator_authority_id: Vec<AuthorityDiscoveryId>,
pub validator_babe_id: Vec<AuthorityId>,
pub validator_assignment_id: Vec<AssignmentId>,
pub key_seeds: Vec<String>,
pub peer_ids: Vec<PeerId>,
pub peer_id_to_authority: HashMap<PeerId, AuthorityDiscoveryId>,
pub validator_pairs: Vec<ValidatorPair>,
}
impl std::fmt::Debug for TestAuthorities {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "TestAuthorities")
}
}
/// Sample latency (in milliseconds) from a normal distribution with parameters
/// specified in `maybe_peer_latency`.
pub fn random_latency(maybe_peer_latency: Option<&PeerLatency>) -> usize {
maybe_peer_latency
.map(|latency_config| {
Normal::new(latency_config.mean_latency_ms as f64, latency_config.std_dev)
.expect("normal distribution parameters are good")
.sample(&mut thread_rng())
})
.unwrap_or(0.0) as usize
}
#[async_trait::async_trait]
impl AuthorityDiscovery for TestAuthorities {
/// Get the addresses for the given [`AuthorityDiscoveryId`] from the local address cache.
async fn get_addresses_by_authority_id(
&mut self,
_authority: AuthorityDiscoveryId,
) -> Option<HashSet<Multiaddr>> {
None
}
/// Get the [`AuthorityDiscoveryId`] for the given [`PeerId`] from the local address cache.
async fn get_authority_ids_by_peer_id(
&mut self,
peer_id: PeerId,
) -> Option<HashSet<AuthorityDiscoveryId>> {
self.peer_id_to_authority.get(&peer_id).cloned().map(|id| HashSet::from([id]))
}
}
@@ -0,0 +1,219 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Display implementations and helper methods for parsing prometheus metrics
//! to a format that can be displayed in the CLI.
//!
//! Currently histogram buckets are skipped.
use crate::configuration::TestConfiguration;
use colored::Colorize;
use prometheus::{
proto::{MetricFamily, MetricType},
Registry,
};
use std::fmt::Display;
const LOG_TARGET: &str = "subsystem-bench::display";
#[derive(Default, Debug)]
pub struct MetricCollection(Vec<TestMetric>);
impl From<Vec<TestMetric>> for MetricCollection {
fn from(metrics: Vec<TestMetric>) -> Self {
MetricCollection(metrics)
}
}
impl MetricCollection {
pub fn all(&self) -> &Vec<TestMetric> {
&self.0
}
/// Sums up all metrics with the given name in the collection
pub fn sum_by(&self, name: &str) -> f64 {
self.all()
.iter()
.filter(|metric| metric.name == name)
.map(|metric| metric.value)
.sum()
}
/// Tells if entries in bucket metric is lower than `value`
pub fn metric_lower_than(&self, metric_name: &str, value: f64) -> bool {
self.sum_by(metric_name) < value
}
pub fn subset_with_label_value(&self, label_name: &str, label_value: &str) -> MetricCollection {
self.0
.iter()
.filter_map(|metric| {
if let Some(index) = metric.label_names.iter().position(|label| label == label_name)
{
if Some(&String::from(label_value)) == metric.label_values.get(index) {
Some(metric.clone())
} else {
None
}
} else {
None
}
})
.collect::<Vec<_>>()
.into()
}
}
impl Display for MetricCollection {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f)?;
let metrics = self.all();
for metric in metrics {
writeln!(f, "{metric}")?;
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct TestMetric {
name: String,
label_names: Vec<String>,
label_values: Vec<String>,
value: f64,
}
impl TestMetric {
pub fn name(&self) -> &str {
&self.name
}
pub fn value(&self) -> f64 {
self.value
}
pub fn label_value(&self, label_name: &str) -> Option<&str> {
self.label_names
.iter()
.position(|name| name == label_name)
.and_then(|index| self.label_values.get(index).map(|s| s.as_str()))
}
}
impl Display for TestMetric {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"({} = {}) [{:?}, {:?}]",
self.name.cyan(),
format!("{}", self.value).white(),
self.label_names,
self.label_values
)
}
}
// Returns `false` if metric should be skipped.
fn check_metric_family(mf: &MetricFamily) -> bool {
if mf.get_metric().is_empty() {
gum::error!(target: LOG_TARGET, "MetricFamily has no metrics: {:?}", mf);
return false;
}
if mf.get_name().is_empty() {
gum::error!(target: LOG_TARGET, "MetricFamily has no name: {:?}", mf);
return false;
}
true
}
pub fn parse_metrics(registry: &Registry) -> MetricCollection {
let metric_families = registry.gather();
let mut test_metrics = Vec::new();
for mf in metric_families {
if !check_metric_family(&mf) {
continue;
}
let name: String = mf.get_name().into();
let metric_type = mf.get_field_type();
for m in mf.get_metric() {
let (label_names, label_values): (Vec<String>, Vec<String>) = m
.get_label()
.iter()
.map(|pair| (String::from(pair.get_name()), String::from(pair.get_value())))
.unzip();
match metric_type {
MetricType::COUNTER => {
test_metrics.push(TestMetric {
name: name.clone(),
label_names,
label_values,
value: m.get_counter().get_value(),
});
},
MetricType::GAUGE => {
test_metrics.push(TestMetric {
name: name.clone(),
label_names,
label_values,
value: m.get_gauge().get_value(),
});
},
MetricType::HISTOGRAM => {
let h = m.get_histogram();
let h_name = name.clone() + "_sum";
test_metrics.push(TestMetric {
name: h_name,
label_names: label_names.clone(),
label_values: label_values.clone(),
value: h.get_sample_sum(),
});
let h_name = name.clone() + "_count";
test_metrics.push(TestMetric {
name: h_name,
label_names,
label_values,
value: h.get_sample_count() as f64,
});
},
MetricType::SUMMARY => {
unimplemented!();
},
MetricType::UNTYPED => {
unimplemented!();
},
}
}
}
test_metrics.into()
}
impl Display for TestConfiguration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}, {}, {}, {}, {}",
format!("n_validators = {}", self.n_validators).blue(),
format!("n_cores = {}", self.n_cores).blue(),
format!("pov_size = {} - {}", self.min_pov_size, self.max_pov_size).bright_black(),
format!("connectivity = {}", self.connectivity).bright_black(),
format!("latency = {:?}", self.latency).bright_black(),
)
}
}
@@ -0,0 +1,256 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Subsystem benchmark for the dispute coordinator and dispute distribution subsystems.
//!
//! Scenarios:
//! 1. Dispute participation.
//! - Dispute distribution receives a DisputeRequest message from Validator 1 with votes:
//! - valid (Validator 1).
//! - invalid (Validator 3) <- malicious.
//! - Dispute distribution sends DisputeCoordinatorMessage::ImportStatements.
//! - Dispute coordinator imports the votes and participate in the dispute.
//! - Dispute coordinator sends DisputeDistributionMessage::SendDispute.
//! - Dispute distribution sends DisputeRequest to all validators.
//! 2. TODO: Dispute confirmed: we need 1/3+1 votes per candidate.
//! 3. TODO: Dispute concluded: we need 2/3+1 votes per candidate. Here we can test db population
//! 4. TODO: Spamming: a combination of scenario 3 + multiple of scenario 1
use crate::{
dummy_builder,
environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH},
mock::{
approval_voting_parallel::MockApprovalVotingParallel,
availability_recovery::MockAvailabilityRecovery,
candidate_validation::MockCandidateValidation,
chain_api::{ChainApiState, MockChainApi},
network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx},
runtime_api::{MockRuntimeApi, MockRuntimeApiCoreState},
AlwaysSupportsTeyrchains,
},
network::{new_network, NetworkEmulatorHandle, NetworkInterface, NetworkInterfaceReceiver},
usage::BenchmarkUsage,
};
use codec::Encode;
use colored::Colorize;
use pezkuwi_dispute_distribution::DisputeDistributionSubsystem;
use pezkuwi_node_core_dispute_coordinator::{
Config as DisputeCoordinatorConfig, DisputeCoordinatorSubsystem,
};
use pezkuwi_node_metrics::metrics::Metrics;
use pezkuwi_node_network_protocol::request_response::{IncomingRequest, ReqProtocolNames};
use pezkuwi_overseer::{
Handle as OverseerHandle, Overseer, OverseerConnector, OverseerMetrics, SpawnGlue,
};
use pezkuwi_primitives::{AuthorityDiscoveryId, Block, Hash, ValidatorId};
use sc_keystore::LocalKeystore;
use sc_network::request_responses::IncomingRequest as RawIncomingRequest;
use sc_service::SpawnTaskHandle;
use serde::{Deserialize, Serialize};
use sp_keystore::Keystore;
use sp_runtime::RuntimeAppPublic;
use std::{sync::Arc, time::Instant};
pub use test_state::TestState;
mod test_state;
const LOG_TARGET: &str = "subsystem-bench::disputes";
/// Parameters specific to the approvals benchmark
#[derive(Debug, Clone, Serialize, Deserialize, clap::Parser)]
#[clap(rename_all = "kebab-case")]
#[allow(missing_docs)]
pub struct DisputesOptions {
#[clap(short, long, default_value_t = 10)]
/// The number of disputes to participate in.
pub n_disputes: u32,
}
pub fn make_keystore() -> Arc<LocalKeystore> {
let keystore = Arc::new(LocalKeystore::in_memory());
Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, Some("//Node0"))
.expect("Insert key into keystore");
Keystore::sr25519_generate_new(&*keystore, AuthorityDiscoveryId::ID, Some("//Node0"))
.expect("Insert key into keystore");
keystore
}
fn build_overseer(
state: &TestState,
network: NetworkEmulatorHandle,
network_interface: NetworkInterface,
network_receiver: NetworkInterfaceReceiver,
dependencies: &TestEnvironmentDependencies,
) -> (Overseer<SpawnGlue<SpawnTaskHandle>, AlwaysSupportsTeyrchains>, OverseerHandle) {
let overseer_connector = OverseerConnector::with_event_capacity(64000);
let overseer_metrics = OverseerMetrics::try_register(&dependencies.registry).unwrap();
let spawn_task_handle = dependencies.task_manager.spawn_handle();
let db = kvdb_memorydb::create(1);
let db = pezkuwi_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[0]);
let store = Arc::new(db);
let config = DisputeCoordinatorConfig { col_dispute_data: 0 };
let keystore = make_keystore();
let (dispute_req_receiver, dispute_req_cfg) = IncomingRequest::get_config_receiver::<
Block,
sc_network::NetworkWorker<Block, Hash>,
>(&ReqProtocolNames::new(GENESIS_HASH, None));
let mock_runtime_api = MockRuntimeApi::new(
state.config.clone(),
state.test_authorities.clone(),
state.candidate_receipts.clone(),
state.candidate_events.clone(),
Default::default(),
0,
MockRuntimeApiCoreState::Scheduled,
);
let chain_api_state = ChainApiState { block_headers: state.block_headers.clone() };
let mock_chain_api = MockChainApi::new(chain_api_state);
let mock_availability_recovery = MockAvailabilityRecovery::new();
let mock_approval_voting = MockApprovalVotingParallel::new();
let mock_candidate_validation = MockCandidateValidation::new();
let network_bridge_tx = MockNetworkBridgeTx::new(
network,
network_interface.subsystem_sender(),
state.test_authorities.clone(),
);
let network_bridge_rx = MockNetworkBridgeRx::new(network_receiver, Some(dispute_req_cfg));
let dispute_distribution = DisputeDistributionSubsystem::new(
keystore.clone(),
dispute_req_receiver,
state.test_authorities.clone(),
Metrics::try_register(&dependencies.registry).unwrap(),
);
let dispute_coordinator = DisputeCoordinatorSubsystem::new(
store,
config,
keystore,
Metrics::try_register(&dependencies.registry).unwrap(),
);
let dummy = dummy_builder!(spawn_task_handle, overseer_metrics)
.replace_runtime_api(|_| mock_runtime_api)
.replace_chain_api(|_| mock_chain_api)
.replace_availability_recovery(|_| mock_availability_recovery)
.replace_approval_voting_parallel(|_| mock_approval_voting)
.replace_candidate_validation(|_| mock_candidate_validation)
.replace_network_bridge_tx(|_| network_bridge_tx)
.replace_network_bridge_rx(|_| network_bridge_rx)
.replace_dispute_distribution(|_| dispute_distribution)
.replace_dispute_coordinator(|_| dispute_coordinator);
let (overseer, raw_handle) = dummy.build_with_connector(overseer_connector).unwrap();
let overseer_handle = OverseerHandle::new(raw_handle);
(overseer, overseer_handle)
}
pub fn prepare_test(state: &TestState, with_prometheus_endpoint: bool) -> TestEnvironment {
let dependencies = TestEnvironmentDependencies::default();
let (network, network_interface, network_receiver) = new_network(
&state.config,
&dependencies,
&state.test_authorities,
vec![Arc::new(state.clone())],
);
let (overseer, overseer_handle) =
build_overseer(state, network.clone(), network_interface, network_receiver, &dependencies);
TestEnvironment::new(
dependencies,
state.config.clone(),
network,
overseer,
overseer_handle,
state.test_authorities.clone(),
with_prometheus_endpoint,
)
}
pub async fn benchmark_dispute_coordinator(
env: &mut TestEnvironment,
state: &TestState,
) -> BenchmarkUsage {
let config = env.config().clone();
let test_start = Instant::now();
for block_info in state.block_infos.iter() {
let block_num = block_info.number as usize;
gum::info!(target: LOG_TARGET, "Current block {}/{} {:?}", block_num, config.num_blocks, block_info.hash);
env.metrics().set_current_block(block_num);
env.import_block(block_info.clone()).await;
let candidate_receipts =
state.candidate_receipts.get(&block_info.hash).expect("pregenerated");
for candidate_receipt in candidate_receipts.iter() {
let peer_id = *env.authorities().peer_ids.get(1).expect("all validators have ids");
let payload =
state.dispute_requests.get(&candidate_receipt.hash()).expect("pregenerated");
let (pending_response, pending_response_receiver) =
futures::channel::oneshot::channel();
let request =
RawIncomingRequest { peer: peer_id, payload: payload.encode(), pending_response };
let peer = env
.authorities()
.validator_authority_id
.get(1)
.expect("all validators have keys");
assert!(env.network().is_peer_connected(peer), "Peer {peer:?} is not connected");
env.network().send_request_from_peer(peer, request).unwrap();
let res = pending_response_receiver.await.expect("dispute request sent");
gum::debug!(target: LOG_TARGET, "Dispute request sent to node from peer {res:?}");
}
let candidate_hashes =
candidate_receipts.iter().map(|receipt| receipt.hash()).collect::<Vec<_>>();
let requests_expected = candidate_hashes.len() *
(state.config.n_validators * state.config.connectivity / 100 - 1);
loop {
let requests_sent = candidate_hashes
.iter()
.map(|candidate_hash| {
state
.requests_tracker
.lock()
.unwrap()
.get(candidate_hash)
.unwrap_or(&Default::default())
.len()
})
.sum::<usize>();
gum::info!(target: LOG_TARGET, "Waiting for dispute requests to be sent: {requests_sent}/{requests_expected}");
if requests_sent == requests_expected {
break;
}
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
}
let duration: u128 = test_start.elapsed().as_millis();
gum::info!(target: LOG_TARGET, "All blocks processed in {}", format!("{duration:?}ms").cyan());
gum::info!(target: LOG_TARGET,
"Avg block time: {}",
format!("{} ms", test_start.elapsed().as_millis() / env.config().num_blocks as u128).red()
);
env.stop().await;
env.collect_resource_usage(&["dispute-coordinator", "dispute-distribution"], false)
}
@@ -0,0 +1,222 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::{
configuration::{TestAuthorities, TestConfiguration},
disputes::DisputesOptions,
network::{HandleNetworkMessage, NetworkMessage},
};
use codec::Encode;
use pezkuwi_node_network_protocol::request_response::{
v1::{DisputeRequest, DisputeResponse},
ProtocolName, Requests,
};
use pezkuwi_node_primitives::{
InvalidDisputeVote, SignedDisputeStatement, UncheckedDisputeMessage, ValidDisputeVote,
};
use pezkuwi_node_subsystem_test_helpers::mock::new_block_import_info;
use pezkuwi_overseer::BlockInfo;
use pezkuwi_primitives::{
AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash,
CandidateReceiptV2, CoreIndex, GroupIndex, Hash, HeadData, Header, InvalidDisputeStatementKind,
SessionIndex, ValidDisputeStatementKind, ValidatorId, ValidatorIndex,
};
use pezkuwi_primitives_test_helpers::{dummy_candidate_receipt_v2_bad_sig, dummy_hash};
use sp_keystore::KeystorePtr;
use std::{
collections::{HashMap, HashSet},
sync::{Arc, Mutex},
};
#[derive(Clone)]
pub struct TestState {
// Full test config
pub config: TestConfiguration,
// Authority keys for the network emulation.
pub test_authorities: TestAuthorities,
// Relay chain block infos
pub block_infos: Vec<BlockInfo>,
// Generated candidate receipts
pub candidate_receipts: HashMap<Hash, Vec<CandidateReceiptV2>>,
// Generated candidate events
pub candidate_events: HashMap<Hash, Vec<CandidateEvent>>,
// Generated dispute requests
pub dispute_requests: HashMap<CandidateHash, DisputeRequest>,
// Relay chain block headers
pub block_headers: HashMap<Hash, Header>,
// Map from candidate hash to authorities that have received a dispute request
pub requests_tracker: Arc<Mutex<HashMap<CandidateHash, HashSet<AuthorityDiscoveryId>>>>,
}
impl TestState {
pub fn new(config: &TestConfiguration, options: &DisputesOptions) -> Self {
let config = config.clone();
let test_authorities = config.generate_authorities();
let block_infos: Vec<BlockInfo> =
(1..=config.num_blocks).map(generate_block_info).collect();
let candidate_receipts: HashMap<Hash, Vec<CandidateReceiptV2>> = block_infos
.iter()
.map(|block_info| {
(
block_info.hash,
(0..options.n_disputes)
.map(|_| make_candidate_receipt(block_info.hash))
.collect(),
)
})
.collect();
let candidate_events = candidate_receipts
.iter()
.map(|(&hash, receipts)| {
(
hash,
receipts
.iter()
.map(|receipt| make_candidate_backed_event(receipt.clone()))
.collect::<Vec<_>>(),
)
})
.collect();
let dispute_requests = candidate_receipts
.iter()
.flat_map(|(_, receipts)| {
receipts.iter().map(|receipt| {
let valid = issue_explicit_statement(
test_authorities.keyring.local_keystore(),
test_authorities.validator_public[1].clone(),
receipt.hash(),
1,
true,
);
let invalid = issue_explicit_statement(
test_authorities.keyring.local_keystore(),
test_authorities.validator_public[3].clone(),
receipt.hash(),
1,
false,
);
(
receipt.hash(),
DisputeRequest(UncheckedDisputeMessage {
candidate_receipt: receipt.clone(),
session_index: 1,
valid_vote: ValidDisputeVote {
validator_index: ValidatorIndex(1),
signature: valid.validator_signature().clone(),
kind: ValidDisputeStatementKind::Explicit,
},
invalid_vote: InvalidDisputeVote {
validator_index: ValidatorIndex(3),
signature: invalid.validator_signature().clone(),
kind: InvalidDisputeStatementKind::Explicit,
},
}),
)
})
})
.collect();
let block_headers = block_infos.iter().map(generate_block_header).collect();
let requests_tracker = Arc::new(Mutex::new(HashMap::new()));
Self {
config,
test_authorities,
block_infos,
candidate_receipts,
candidate_events,
dispute_requests,
block_headers,
requests_tracker,
}
}
}
fn make_candidate_receipt(relay_parent: Hash) -> CandidateReceiptV2 {
let mut candidate_receipt = dummy_candidate_receipt_v2_bad_sig(relay_parent, dummy_hash());
candidate_receipt.commitments_hash = CandidateCommitments::default().hash();
candidate_receipt
}
fn make_candidate_backed_event(receipt: CandidateReceiptV2) -> CandidateEvent {
CandidateEvent::CandidateBacked(
receipt,
HeadData::default(),
CoreIndex::default(),
GroupIndex::default(),
)
}
fn generate_block_info(block_num: usize) -> BlockInfo {
new_block_import_info(Hash::repeat_byte(block_num as u8), block_num as BlockNumber)
}
fn generate_block_header(info: &BlockInfo) -> (Hash, Header) {
(
info.hash,
Header {
digest: Default::default(),
number: info.number,
parent_hash: info.parent_hash,
extrinsics_root: Default::default(),
state_root: Default::default(),
},
)
}
fn issue_explicit_statement(
keystore: KeystorePtr,
public: ValidatorId,
candidate_hash: CandidateHash,
session: SessionIndex,
valid: bool,
) -> SignedDisputeStatement {
SignedDisputeStatement::sign_explicit(&keystore, valid, candidate_hash, session, public)
.unwrap()
.unwrap()
}
#[async_trait::async_trait]
impl HandleNetworkMessage for TestState {
async fn handle(
&self,
message: NetworkMessage,
_node_sender: &mut futures::channel::mpsc::UnboundedSender<NetworkMessage>,
) -> Option<NetworkMessage> {
match message {
NetworkMessage::RequestFromNode(authority_id, requests) => {
let Requests::DisputeSendingV1(req) = *requests else {
todo!("Wrong requests type in message: {:?}", requests);
};
let mut tracker = self.requests_tracker.lock().unwrap();
tracker
.entry(req.payload.0.candidate_receipt.hash())
.or_default()
.insert(authority_id);
drop(tracker);
let _ = req
.pending_response
.send(Ok(((DisputeResponse::Confirmed).encode(), ProtocolName::from(""))));
None
},
_ => {
todo!("Wrong message type: {:?}", message);
},
}
}
}
@@ -0,0 +1,433 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Test environment implementation
use crate::{
configuration::{TestAuthorities, TestConfiguration},
mock::AlwaysSupportsTeyrchains,
network::NetworkEmulatorHandle,
usage::{BenchmarkUsage, ResourceUsage},
};
use core::time::Duration;
use futures::{Future, FutureExt};
use pezkuwi_node_subsystem::{messages::AllMessages, Overseer, SpawnGlue, TimeoutExt};
use pezkuwi_node_subsystem_types::Hash;
use pezkuwi_node_subsystem_util::metrics::prometheus::{
self, Gauge, Histogram, PrometheusError, Registry, U64,
};
use pezkuwi_overseer::{BlockInfo, Handle as OverseerHandle};
use sc_service::{SpawnTaskHandle, TaskManager};
use std::net::{Ipv4Addr, SocketAddr};
use tokio::runtime::Handle;
const LOG_TARGET: &str = "subsystem-bench::environment";
/// Test environment/configuration metrics
#[derive(Clone)]
pub struct TestEnvironmentMetrics {
/// Number of bytes sent per peer.
n_validators: Gauge<U64>,
/// Number of received sent per peer.
n_cores: Gauge<U64>,
/// PoV size
pov_size: Histogram,
/// Current block
current_block: Gauge<U64>,
/// Current block
block_time: Gauge<U64>,
}
impl TestEnvironmentMetrics {
pub fn new(registry: &Registry) -> Result<Self, PrometheusError> {
let buckets = prometheus::exponential_buckets(16384.0, 2.0, 9)
.expect("arguments are always valid; qed");
Ok(Self {
n_validators: prometheus::register(
Gauge::new(
"subsystem_benchmark_n_validators",
"Total number of validators in the test",
)?,
registry,
)?,
n_cores: prometheus::register(
Gauge::new(
"subsystem_benchmark_n_cores",
"Number of cores we fetch availability for each block",
)?,
registry,
)?,
current_block: prometheus::register(
Gauge::new("subsystem_benchmark_current_block", "The current test block")?,
registry,
)?,
block_time: prometheus::register(
Gauge::new("subsystem_benchmark_block_time", "The time it takes for the target subsystems(s) to complete all the requests in a block")?,
registry,
)?,
pov_size: prometheus::register(
Histogram::with_opts(
prometheus::HistogramOpts::new(
"subsystem_benchmark_pov_size",
"The compressed size of the proof of validity of a candidate",
)
.buckets(buckets),
)?,
registry,
)?,
})
}
pub fn set_n_validators(&self, n_validators: usize) {
self.n_validators.set(n_validators as u64);
}
pub fn set_n_cores(&self, n_cores: usize) {
self.n_cores.set(n_cores as u64);
}
pub fn set_current_block(&self, current_block: usize) {
self.current_block.set(current_block as u64);
}
pub fn set_block_time(&self, block_time_ms: u64) {
self.block_time.set(block_time_ms);
}
pub fn on_pov_size(&self, pov_size: usize) {
self.pov_size.observe(pov_size as f64);
}
}
fn new_runtime() -> tokio::runtime::Runtime {
tokio::runtime::Builder::new_multi_thread()
.thread_name("subsystem-bench")
.enable_all()
.thread_stack_size(3 * 1024 * 1024)
.worker_threads(4)
.build()
.unwrap()
}
/// Wrapper for dependencies
pub struct TestEnvironmentDependencies {
pub registry: Registry,
pub task_manager: TaskManager,
pub runtime: tokio::runtime::Runtime,
}
impl Default for TestEnvironmentDependencies {
fn default() -> Self {
let runtime = new_runtime();
let registry = Registry::new();
let task_manager: TaskManager =
TaskManager::new(runtime.handle().clone(), Some(&registry)).unwrap();
Self { runtime, registry, task_manager }
}
}
// A dummy genesis hash
pub const GENESIS_HASH: Hash = Hash::repeat_byte(0xff);
// We use this to bail out sending messages to the subsystem if it is overloaded such that
// the time of flight is breaches 5s.
// This should eventually be a test parameter.
pub const MAX_TIME_OF_FLIGHT: Duration = Duration::from_millis(5000);
/// The test environment is the high level wrapper of all things required to test
/// a certain subsystem.
///
/// ## Mockups
/// The overseer is passed in during construction and it can host an arbitrary number of
/// real subsystems instances and the corresponding mocked instances such that the real
/// subsystems can get their messages answered.
///
/// As the subsystem's performance depends on network connectivity, the test environment
/// emulates validator nodes on the network, see `NetworkEmulator`. The network emulation
/// is configurable in terms of peer bandwidth, latency and connection error rate using
/// uniform distribution sampling.
///
///
/// ## Usage
/// `TestEnvironment` is used in tests to send `Overseer` messages or signals to the subsystem
/// under test.
///
/// ## Collecting test metrics
///
/// ### Prometheus
/// A prometheus endpoint is exposed while the test is running. A local Prometheus instance
/// can scrape it every 1s and a Grafana dashboard is the preferred way of visualizing
/// the performance characteristics of the subsystem.
///
/// ### CLI
/// A subset of the Prometheus metrics are printed at the end of the test.
pub struct TestEnvironment {
/// Test dependencies
dependencies: TestEnvironmentDependencies,
/// A runtime handle
runtime_handle: tokio::runtime::Handle,
/// A handle to the lovely overseer
overseer_handle: OverseerHandle,
/// The test configuration.
config: TestConfiguration,
/// A handle to the network emulator.
network: NetworkEmulatorHandle,
/// Configuration/env metrics
metrics: TestEnvironmentMetrics,
/// Test authorities generated from the configuration.
authorities: TestAuthorities,
}
impl TestEnvironment {
/// Create a new test environment
pub fn new(
dependencies: TestEnvironmentDependencies,
config: TestConfiguration,
network: NetworkEmulatorHandle,
overseer: Overseer<SpawnGlue<SpawnTaskHandle>, AlwaysSupportsTeyrchains>,
overseer_handle: OverseerHandle,
authorities: TestAuthorities,
with_prometheus_endpoint: bool,
) -> Self {
let metrics = TestEnvironmentMetrics::new(&dependencies.registry)
.expect("Metrics need to be registered");
let spawn_handle = dependencies.task_manager.spawn_handle();
spawn_handle.spawn_blocking("overseer", "overseer", overseer.run().boxed());
if with_prometheus_endpoint {
let registry_clone = dependencies.registry.clone();
dependencies.task_manager.spawn_handle().spawn_blocking(
"prometheus",
"test-environment",
async move {
prometheus_endpoint::init_prometheus(
SocketAddr::new(std::net::IpAddr::V4(Ipv4Addr::LOCALHOST), 9999),
registry_clone,
)
.await
.unwrap();
},
);
}
TestEnvironment {
runtime_handle: dependencies.runtime.handle().clone(),
dependencies,
overseer_handle,
config,
network,
metrics,
authorities,
}
}
/// Returns the test configuration.
pub fn config(&self) -> &TestConfiguration {
&self.config
}
/// Returns a reference to the inner network emulator handle.
pub fn network(&self) -> &NetworkEmulatorHandle {
&self.network
}
/// Returns a reference to the overseer handle.
pub fn overseer_handle(&self) -> &OverseerHandle {
&self.overseer_handle
}
/// Returns the Prometheus registry.
pub fn registry(&self) -> &Registry {
&self.dependencies.registry
}
/// Spawn a named task in the `test-environment` task group.
#[allow(unused)]
pub fn spawn(&self, name: &'static str, task: impl Future<Output = ()> + Send + 'static) {
self.dependencies
.task_manager
.spawn_handle()
.spawn(name, "test-environment", task);
}
/// Spawn a blocking named task in the `test-environment` task group.
pub fn spawn_blocking(
&self,
name: &'static str,
task: impl Future<Output = ()> + Send + 'static,
) {
self.dependencies.task_manager.spawn_handle().spawn_blocking(
name,
"test-environment",
task,
);
}
/// Returns a reference to the test environment metrics instance
pub fn metrics(&self) -> &TestEnvironmentMetrics {
&self.metrics
}
/// Returns a handle to the tokio runtime.
pub fn runtime(&self) -> Handle {
self.runtime_handle.clone()
}
/// Returns a reference to the authority keys used in the test.
pub fn authorities(&self) -> &TestAuthorities {
&self.authorities
}
/// Send a message to the subsystem under test environment.
pub async fn send_message(&mut self, msg: AllMessages) {
self.overseer_handle
.send_msg(msg, LOG_TARGET)
.timeout(MAX_TIME_OF_FLIGHT)
.await
.unwrap_or_else(|| {
panic!("{}ms maximum time of flight breached", MAX_TIME_OF_FLIGHT.as_millis())
});
}
/// Send an `ActiveLeavesUpdate` signal to all subsystems under test.
pub async fn import_block(&mut self, block: BlockInfo) {
self.overseer_handle
.block_imported(block)
.timeout(MAX_TIME_OF_FLIGHT)
.await
.unwrap_or_else(|| {
panic!("{}ms maximum time of flight breached", MAX_TIME_OF_FLIGHT.as_millis())
});
}
/// Stop overseer and subsystems.
pub async fn stop(&mut self) {
self.overseer_handle.stop().await;
}
/// Tells if entries in bucket metric is lower than `value`
pub fn metric_lower_than(registry: &Registry, metric_name: &str, value: f64) -> bool {
let test_metrics = super::display::parse_metrics(registry);
test_metrics.metric_lower_than(metric_name, value)
}
/// Blocks until `metric_name` >= `value`
pub async fn wait_until_metric(
&self,
metric_name: &str,
label: Option<(&str, &str)>,
condition: impl Fn(f64) -> bool,
) {
loop {
let test_metrics = if let Some((label_name, label_value)) = label {
super::display::parse_metrics(self.registry())
.subset_with_label_value(label_name, label_value)
} else {
super::display::parse_metrics(self.registry())
};
let current_value = test_metrics.sum_by(metric_name);
gum::debug!(target: LOG_TARGET, metric_name, current_value, "Waiting for metric");
if condition(current_value) {
break;
}
// Check value every 50ms.
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
}
}
pub fn collect_resource_usage(
&self,
subsystems_under_test: &[&str],
break_down_cpu_usage_per_task: bool,
) -> BenchmarkUsage {
BenchmarkUsage {
network_usage: self.network_usage(),
cpu_usage: self.cpu_usage(subsystems_under_test, break_down_cpu_usage_per_task),
}
}
fn network_usage(&self) -> Vec<ResourceUsage> {
let stats = self.network().peer_stats(0);
let total_node_received = (stats.received() / 1024) as f64;
let total_node_sent = (stats.sent() / 1024) as f64;
let num_blocks = self.config().num_blocks as f64;
vec![
ResourceUsage {
resource_name: "Received from peers".to_string(),
total: total_node_received,
per_block: total_node_received / num_blocks,
},
ResourceUsage {
resource_name: "Sent to peers".to_string(),
total: total_node_sent,
per_block: total_node_sent / num_blocks,
},
]
}
fn cpu_usage(
&self,
subsystems_under_test: &[&str],
break_down_per_task: bool,
) -> Vec<ResourceUsage> {
let test_metrics = super::display::parse_metrics(self.registry());
let mut usage = vec![];
let num_blocks = self.config().num_blocks as f64;
for subsystem in subsystems_under_test.iter() {
let subsystem_cpu_metrics =
test_metrics.subset_with_label_value("task_group", subsystem);
let total_cpu = subsystem_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum");
usage.push(ResourceUsage {
resource_name: subsystem.to_string(),
total: total_cpu,
per_block: total_cpu / num_blocks,
});
if break_down_per_task {
for metric in subsystem_cpu_metrics.all() {
if metric.name() != "substrate_tasks_polling_duration_sum" {
continue;
}
if let Some(task_name) = metric.label_value("task_name") {
usage.push(ResourceUsage {
resource_name: format!("{subsystem}/{task_name}"),
total: metric.value(),
per_block: metric.value() / num_blocks,
});
}
}
}
}
let test_env_cpu_metrics =
test_metrics.subset_with_label_value("task_group", "test-environment");
let total_cpu = test_env_cpu_metrics.sum_by("substrate_tasks_polling_duration_sum");
usage.push(ResourceUsage {
resource_name: "test-environment".to_string(),
total: total_cpu,
per_block: total_cpu / num_blocks,
});
usage
}
}
@@ -0,0 +1,54 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use pezkuwi_primitives::ValidatorId;
use sc_keystore::LocalKeystore;
use sp_application_crypto::AppCrypto;
use sp_core::sr25519::Public;
use sp_keystore::Keystore;
use std::sync::Arc;
/// Set of test accounts generated and kept safe by a keystore.
#[derive(Clone)]
pub struct Keyring {
keystore: Arc<LocalKeystore>,
}
impl Default for Keyring {
fn default() -> Self {
Self { keystore: Arc::new(LocalKeystore::in_memory()) }
}
}
impl Keyring {
pub fn sr25519_new(&self, seed: &str) -> Public {
self.keystore
.sr25519_generate_new(ValidatorId::ID, Some(seed))
.expect("Insert key into keystore")
}
pub fn keystore(&self) -> Arc<dyn Keystore> {
self.keystore.clone()
}
pub fn local_keystore(&self) -> Arc<LocalKeystore> {
self.keystore.clone()
}
pub fn keystore_ref(&self) -> &LocalKeystore {
self.keystore.as_ref()
}
}
@@ -0,0 +1,31 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
// The validator index that represents the node that is under test.
pub const NODE_UNDER_TEST: u32 = 0;
pub mod approval;
pub mod availability;
pub mod configuration;
pub(crate) mod display;
pub mod disputes;
pub(crate) mod environment;
pub(crate) mod keyring;
pub(crate) mod mock;
pub(crate) mod network;
pub mod statement;
pub mod usage;
pub mod utils;
@@ -0,0 +1,64 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A generic mock approval voting parallel suitable to be used in benchmarks.
use futures::FutureExt;
use pezkuwi_node_subsystem::{
messages::ApprovalVotingParallelMessage, overseer, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_types::OverseerSignal;
const LOG_TARGET: &str = "subsystem-bench::approval-voting-parallel-mock";
pub struct MockApprovalVotingParallel {}
impl MockApprovalVotingParallel {
pub fn new() -> Self {
Self {}
}
}
#[overseer::subsystem(ApprovalVotingParallel, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockApprovalVotingParallel {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "test-environment", future }
}
}
#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)]
impl MockApprovalVotingParallel {
async fn run<Context>(self, mut ctx: Context) {
loop {
let msg = ctx.recv().await.expect("Overseer never fails us");
match msg {
orchestra::FromOrchestra::Signal(signal) =>
if signal == OverseerSignal::Conclude {
return;
},
orchestra::FromOrchestra::Communication { msg } => match msg {
ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate(hash, tx) => {
gum::debug!(target: LOG_TARGET, "GetApprovalSignaturesForCandidate for candidate {:?}", hash);
tx.send(Default::default()).unwrap();
},
_ => todo!("Subsystem received unexpected message, {:?}", msg),
},
}
}
}
}
@@ -0,0 +1,270 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A generic av store subsystem mockup suitable to be used in benchmarks.
use crate::network::{HandleNetworkMessage, NetworkMessage};
use codec::Encode;
use futures::{channel::oneshot, FutureExt};
use pezkuwi_node_network_protocol::request_response::{
v1::AvailableDataFetchingResponse, v2::ChunkFetchingResponse, Protocol, ReqProtocolNames,
Requests,
};
use pezkuwi_node_primitives::{AvailableData, ErasureChunk};
use pezkuwi_node_subsystem::{
messages::AvailabilityStoreMessage, overseer, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_types::OverseerSignal;
use pezkuwi_primitives::{CandidateHash, ChunkIndex, CoreIndex, ValidatorIndex};
use std::collections::HashMap;
pub struct AvailabilityStoreState {
candidate_hashes: HashMap<CandidateHash, usize>,
chunks: Vec<Vec<ErasureChunk>>,
chunk_indices: Vec<Vec<ChunkIndex>>,
candidate_hash_to_core_index: HashMap<CandidateHash, CoreIndex>,
}
const LOG_TARGET: &str = "subsystem-bench::av-store-mock";
/// Mockup helper. Contains Chunks and full availability data of all teyrchain blocks
/// used in a test.
#[derive(Clone)]
pub struct NetworkAvailabilityState {
pub req_protocol_names: ReqProtocolNames,
pub candidate_hashes: HashMap<CandidateHash, usize>,
pub available_data: Vec<AvailableData>,
pub chunks: Vec<Vec<ErasureChunk>>,
pub chunk_indices: Vec<Vec<ChunkIndex>>,
pub candidate_hash_to_core_index: HashMap<CandidateHash, CoreIndex>,
}
// Implement access to the state.
#[async_trait::async_trait]
impl HandleNetworkMessage for NetworkAvailabilityState {
async fn handle(
&self,
message: NetworkMessage,
_node_sender: &mut futures::channel::mpsc::UnboundedSender<NetworkMessage>,
) -> Option<NetworkMessage> {
match message {
NetworkMessage::RequestFromNode(peer, request) => match *request {
Requests::ChunkFetching(outgoing_request) => {
gum::debug!(target: LOG_TARGET, request = ?outgoing_request, "Received `RequestFromNode`");
let validator_index: usize = outgoing_request.payload.index.0 as usize;
let candidate_hash = outgoing_request.payload.candidate_hash;
let candidate_index = self
.candidate_hashes
.get(&candidate_hash)
.expect("candidate was generated previously; qed");
gum::warn!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index");
let candidate_chunks = self.chunks.get(*candidate_index).unwrap();
let chunk_indices = self
.chunk_indices
.get(
self.candidate_hash_to_core_index.get(&candidate_hash).unwrap().0
as usize,
)
.unwrap();
let chunk = candidate_chunks
.get(chunk_indices.get(validator_index).unwrap().0 as usize)
.unwrap();
let response = Ok((
ChunkFetchingResponse::from(Some(chunk.clone())).encode(),
self.req_protocol_names.get_name(Protocol::ChunkFetchingV2),
));
if let Err(err) = outgoing_request.pending_response.send(response) {
gum::error!(target: LOG_TARGET, ?err, "Failed to send `ChunkFetchingResponse`");
}
None
},
Requests::AvailableDataFetchingV1(outgoing_request) => {
let candidate_hash = outgoing_request.payload.candidate_hash;
let candidate_index = self
.candidate_hashes
.get(&candidate_hash)
.expect("candidate was generated previously; qed");
gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index");
let available_data = self.available_data.get(*candidate_index).unwrap().clone();
let response = Ok((
AvailableDataFetchingResponse::from(Some(available_data)).encode(),
self.req_protocol_names.get_name(Protocol::AvailableDataFetchingV1),
));
outgoing_request
.pending_response
.send(response)
.expect("Response is always sent successfully");
None
},
_ => Some(NetworkMessage::RequestFromNode(peer, request)),
},
message => Some(message),
}
}
}
/// A mock of the availability store subsystem. This one also generates all the
/// candidates that a
pub struct MockAvailabilityStore {
state: AvailabilityStoreState,
}
impl MockAvailabilityStore {
pub fn new(
chunks: Vec<Vec<ErasureChunk>>,
chunk_indices: Vec<Vec<ChunkIndex>>,
candidate_hashes: HashMap<CandidateHash, usize>,
candidate_hash_to_core_index: HashMap<CandidateHash, CoreIndex>,
) -> MockAvailabilityStore {
Self {
state: AvailabilityStoreState {
chunks,
candidate_hashes,
chunk_indices,
candidate_hash_to_core_index,
},
}
}
async fn respond_to_query_all_request(
&self,
candidate_hash: CandidateHash,
send_chunk: impl Fn(ValidatorIndex) -> bool,
tx: oneshot::Sender<Vec<(ValidatorIndex, ErasureChunk)>>,
) {
let candidate_index = self
.state
.candidate_hashes
.get(&candidate_hash)
.expect("candidate was generated previously; qed");
gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index");
let n_validators = self.state.chunks[0].len();
let candidate_chunks = self.state.chunks.get(*candidate_index).unwrap();
let core_index = self.state.candidate_hash_to_core_index.get(&candidate_hash).unwrap();
// We'll likely only send our chunk, so use capacity 1.
let mut v = Vec::with_capacity(1);
for validator_index in 0..n_validators {
if !send_chunk(ValidatorIndex(validator_index as u32)) {
continue;
}
let chunk_index = self
.state
.chunk_indices
.get(core_index.0 as usize)
.unwrap()
.get(validator_index)
.unwrap();
let chunk = candidate_chunks.get(chunk_index.0 as usize).unwrap().clone();
v.push((ValidatorIndex(validator_index as u32), chunk.clone()));
}
let _ = tx.send(v);
}
}
#[overseer::subsystem(AvailabilityStore, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockAvailabilityStore {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "test-environment", future }
}
}
#[overseer::contextbounds(AvailabilityStore, prefix = self::overseer)]
impl MockAvailabilityStore {
async fn run<Context>(self, mut ctx: Context) {
gum::debug!(target: LOG_TARGET, "Subsystem running");
loop {
let msg = ctx.recv().await.expect("Overseer never fails us");
match msg {
orchestra::FromOrchestra::Signal(signal) =>
if signal == OverseerSignal::Conclude {
return;
},
orchestra::FromOrchestra::Communication { msg } => match msg {
AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx) => {
gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryAvailableData");
// We never have the full available data.
let _ = tx.send(None);
},
AvailabilityStoreMessage::QueryAllChunks(candidate_hash, tx) => {
// We always have our own chunk.
gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryAllChunks");
self.respond_to_query_all_request(
candidate_hash,
|index| index == 0.into(),
tx,
)
.await;
},
AvailabilityStoreMessage::QueryChunkSize(candidate_hash, tx) => {
gum::debug!(target: LOG_TARGET, candidate_hash = ?candidate_hash, "Responding to QueryChunkSize");
let candidate_index = self
.state
.candidate_hashes
.get(&candidate_hash)
.expect("candidate was generated previously; qed");
gum::debug!(target: LOG_TARGET, ?candidate_hash, candidate_index, "Candidate mapped to index");
let chunk_size = self
.state
.chunks
.get(*candidate_index)
.unwrap()
.first()
.unwrap()
.encoded_size();
let _ = tx.send(Some(chunk_size));
},
AvailabilityStoreMessage::StoreChunk {
candidate_hash,
chunk,
tx,
validator_index,
} => {
gum::debug!(
target: LOG_TARGET,
chunk_index = ?chunk.index,
validator_index = ?validator_index,
candidate_hash = ?candidate_hash,
"Responding to StoreChunk"
);
let _ = tx.send(Ok(()));
},
_ => {
unimplemented!("Unexpected av-store message")
},
},
}
}
}
}
@@ -0,0 +1,76 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A generic mock availability recovery suitable to be used in benchmarks.
use std::sync::Arc;
use futures::FutureExt;
use pezkuwi_node_primitives::{AvailableData, BlockData, PoV};
use pezkuwi_node_subsystem::{
messages::AvailabilityRecoveryMessage, overseer, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_types::OverseerSignal;
use pezkuwi_primitives::{Hash, HeadData, PersistedValidationData};
const LOG_TARGET: &str = "subsystem-bench::availability-recovery-mock";
pub struct MockAvailabilityRecovery {}
impl MockAvailabilityRecovery {
pub fn new() -> Self {
Self {}
}
}
#[overseer::subsystem(AvailabilityRecovery, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockAvailabilityRecovery {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "test-environment", future }
}
}
#[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)]
impl MockAvailabilityRecovery {
async fn run<Context>(self, mut ctx: Context) {
loop {
let msg = ctx.recv().await.expect("Overseer never fails us");
match msg {
orchestra::FromOrchestra::Signal(signal) =>
if signal == OverseerSignal::Conclude {
return;
},
orchestra::FromOrchestra::Communication { msg } => match msg {
AvailabilityRecoveryMessage::RecoverAvailableData(receipt, _, _, _, tx) => {
gum::debug!(target: LOG_TARGET, "RecoverAvailableData for candidate {:?}", receipt.hash());
let available_data = AvailableData {
pov: Arc::new(PoV { block_data: BlockData(Vec::new()) }),
validation_data: PersistedValidationData {
parent_head: HeadData(Vec::new()),
relay_parent_number: 0,
relay_parent_storage_root: Hash::default(),
max_pov_size: 2,
},
};
tx.send(Ok(available_data)).unwrap();
},
},
}
}
}
}
@@ -0,0 +1,171 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A generic candidate backing subsystem mockup suitable to be used in benchmarks.
use crate::{configuration::TestConfiguration, NODE_UNDER_TEST};
use futures::FutureExt;
use pezkuwi_node_primitives::{SignedFullStatementWithPVD, Statement, StatementWithPVD};
use pezkuwi_node_subsystem::{
messages::CandidateBackingMessage, overseer, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_types::OverseerSignal;
use pezkuwi_primitives::{
CandidateHash, Hash, PersistedValidationData, SigningContext, ValidatorIndex, ValidatorPair,
};
use sp_core::Pair;
use std::collections::HashMap;
const LOG_TARGET: &str = "subsystem-bench::candidate-backing-mock";
struct MockCandidateBackingState {
pair: ValidatorPair,
pvd: PersistedValidationData,
own_backing_group: Vec<ValidatorIndex>,
}
pub struct MockCandidateBacking {
config: TestConfiguration,
state: MockCandidateBackingState,
}
impl MockCandidateBacking {
pub fn new(
config: TestConfiguration,
pair: ValidatorPair,
pvd: PersistedValidationData,
own_backing_group: Vec<ValidatorIndex>,
) -> Self {
Self { config, state: MockCandidateBackingState { pair, pvd, own_backing_group } }
}
fn handle_statement(
&self,
relay_parent: Hash,
statement: SignedFullStatementWithPVD,
statements_tracker: &mut HashMap<CandidateHash, u32>,
) -> Vec<pezkuwi_node_subsystem::messages::StatementDistributionMessage> {
let mut messages = vec![];
let validator_id = statement.validator_index();
let is_own_backing_group = self.state.own_backing_group.contains(&validator_id);
match statement.payload() {
StatementWithPVD::Seconded(receipt, _pvd) => {
let candidate_hash = receipt.hash();
statements_tracker
.entry(candidate_hash)
.and_modify(|v| {
*v += 1;
})
.or_insert(1);
let statements_received_count = *statements_tracker.get(&candidate_hash).unwrap();
if statements_received_count == (self.config.minimum_backing_votes - 1) &&
is_own_backing_group
{
let statement = Statement::Valid(candidate_hash);
let context = SigningContext { parent_hash: relay_parent, session_index: 0 };
let payload = statement.to_compact().signing_payload(&context);
let message =
pezkuwi_node_subsystem::messages::StatementDistributionMessage::Share(
relay_parent,
SignedFullStatementWithPVD::new(
statement.supply_pvd(self.state.pvd.clone()),
ValidatorIndex(NODE_UNDER_TEST),
self.state.pair.sign(&payload[..]),
&context,
&self.state.pair.public(),
)
.unwrap(),
);
messages.push(message);
}
if statements_received_count == self.config.minimum_backing_votes {
let message =
pezkuwi_node_subsystem::messages::StatementDistributionMessage::Backed(
candidate_hash,
);
messages.push(message);
}
},
StatementWithPVD::Valid(candidate_hash) => {
statements_tracker
.entry(*candidate_hash)
.and_modify(|v| {
*v += 1;
})
.or_insert(1);
let statements_received_count = *statements_tracker.get(candidate_hash).unwrap();
if statements_received_count == self.config.minimum_backing_votes {
let message =
pezkuwi_node_subsystem::messages::StatementDistributionMessage::Backed(
*candidate_hash,
);
messages.push(message);
}
},
}
messages
}
}
#[overseer::subsystem(CandidateBacking, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockCandidateBacking {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "test-environment", future }
}
}
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
impl MockCandidateBacking {
async fn run<Context>(self, mut ctx: Context) {
let mut statements_tracker: HashMap<CandidateHash, u32> = Default::default();
loop {
let msg = ctx.recv().await.expect("Overseer never fails us");
match msg {
orchestra::FromOrchestra::Signal(signal) =>
if signal == OverseerSignal::Conclude {
return;
},
orchestra::FromOrchestra::Communication { msg } => {
gum::trace!(target: LOG_TARGET, msg=?msg, "recv message");
match msg {
CandidateBackingMessage::Statement(relay_parent, statement) => {
let messages = self.handle_statement(
relay_parent,
statement,
&mut statements_tracker,
);
for message in messages {
ctx.send_message(message).await;
}
},
_ => {
unimplemented!("Unexpected candidate-backing message")
},
}
},
}
}
}
}
@@ -0,0 +1,81 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A generic mock candidate validation subsystem suitable for using in benchmarks, it
//! is responding with candidate valid for every request.
use futures::FutureExt;
use pezkuwi_node_primitives::ValidationResult;
use pezkuwi_node_subsystem::{
messages::CandidateValidationMessage, overseer, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_types::OverseerSignal;
use pezkuwi_primitives::{CandidateCommitments, Hash, HeadData, PersistedValidationData};
const LOG_TARGET: &str = "subsystem-bench::candidate-validation-mock";
pub struct MockCandidateValidation {}
impl MockCandidateValidation {
pub fn new() -> Self {
Self {}
}
}
#[overseer::subsystem(CandidateValidation, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockCandidateValidation {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "test-environment", future }
}
}
#[overseer::contextbounds(CandidateValidation, prefix = self::overseer)]
impl MockCandidateValidation {
async fn run<Context>(self, mut ctx: Context) {
loop {
let msg = ctx.recv().await.expect("Overseer never fails us");
match msg {
orchestra::FromOrchestra::Signal(signal) =>
if signal == OverseerSignal::Conclude {
return;
},
orchestra::FromOrchestra::Communication { msg } => match msg {
CandidateValidationMessage::ValidateFromExhaustive {
response_sender,
validation_data,
..
} => {
gum::debug!(target: LOG_TARGET, "ValidateFromExhaustive, PVD hash {:?}", validation_data.hash());
response_sender
.send(Ok(ValidationResult::Valid(
CandidateCommitments::default(),
PersistedValidationData {
parent_head: HeadData(Vec::new()),
relay_parent_number: 0,
relay_parent_storage_root: Hash::default(),
max_pov_size: 2,
},
)))
.unwrap()
},
_ => unimplemented!("Unexpected chain-api message"),
},
}
}
}
}
@@ -0,0 +1,132 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A generic runtime api subsystem mockup suitable to be used in benchmarks.
use futures::FutureExt;
use itertools::Itertools;
use pezkuwi_node_subsystem::{
messages::ChainApiMessage, overseer, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_types::OverseerSignal;
use pezkuwi_primitives::Header;
use sp_core::H256;
use std::collections::HashMap;
const LOG_TARGET: &str = "subsystem-bench::chain-api-mock";
/// State used to respond to `BlockHeader` requests.
pub struct ChainApiState {
pub block_headers: HashMap<H256, Header>,
}
pub struct MockChainApi {
state: ChainApiState,
}
impl ChainApiState {
fn get_header_by_number(&self, requested_number: u32) -> Option<&Header> {
self.block_headers.values().find(|header| header.number == requested_number)
}
}
impl MockChainApi {
pub fn new(state: ChainApiState) -> MockChainApi {
Self { state }
}
}
#[overseer::subsystem(ChainApi, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockChainApi {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "test-environment", future }
}
}
#[overseer::contextbounds(ChainApi, prefix = self::overseer)]
impl MockChainApi {
async fn run<Context>(self, mut ctx: Context) {
loop {
let msg = ctx.recv().await.expect("Overseer never fails us");
match msg {
orchestra::FromOrchestra::Signal(signal) =>
if signal == OverseerSignal::Conclude {
return;
},
orchestra::FromOrchestra::Communication { msg } => {
gum::debug!(target: LOG_TARGET, msg=?msg, "recv message");
match msg {
ChainApiMessage::BlockHeader(hash, response_channel) => {
let _ = response_channel.send(Ok(Some(
self.state
.block_headers
.get(&hash)
.cloned()
.expect("Relay chain block hashes are known"),
)));
},
ChainApiMessage::FinalizedBlockNumber(val) => {
val.send(Ok(0)).unwrap();
},
ChainApiMessage::FinalizedBlockHash(requested_number, sender) => {
let hash = self
.state
.get_header_by_number(requested_number)
.expect("Unknown block number")
.hash();
sender.send(Ok(Some(hash))).unwrap();
},
ChainApiMessage::BlockNumber(requested_hash, sender) => {
sender
.send(Ok(Some(
self.state
.block_headers
.get(&requested_hash)
.expect("Unknown block hash")
.number,
)))
.unwrap();
},
ChainApiMessage::Ancestors { hash, k: _, response_channel } => {
let block_number = self
.state
.block_headers
.get(&hash)
.expect("Unknown block hash")
.number;
let ancestors = self
.state
.block_headers
.iter()
.filter(|(_, header)| header.number < block_number)
.sorted_by(|a, b| a.1.number.cmp(&b.1.number))
.map(|(hash, _)| *hash)
.collect_vec();
response_channel.send(Ok(ancestors)).unwrap();
},
_ => {
unimplemented!("Unexpected chain-api message")
},
}
},
}
}
}
}
@@ -0,0 +1,101 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Dummy subsystem mocks.
use futures::FutureExt;
use paste::paste;
use pezkuwi_node_subsystem::{overseer, SpawnedSubsystem, SubsystemError};
use std::time::Duration;
use tokio::time::sleep;
const LOG_TARGET: &str = "subsystem-bench::mockery";
macro_rules! mock {
// Just query by relay parent
($subsystem_name:ident) => {
paste! {
pub struct [<Mock $subsystem_name >] {}
#[overseer::subsystem($subsystem_name, error=SubsystemError, prefix=self::overseer)]
impl<Context> [<Mock $subsystem_name >] {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
// The name will appear in substrate CPU task metrics as `task_group`.`
SpawnedSubsystem { name: "test-environment", future }
}
}
#[overseer::contextbounds($subsystem_name, prefix = self::overseer)]
impl [<Mock $subsystem_name >] {
async fn run<Context>(self, mut ctx: Context) {
let mut count_total_msg = 0;
loop {
futures::select!{
msg = ctx.recv().fuse() => {
match msg.unwrap() {
orchestra::FromOrchestra::Signal(signal) => {
match signal {
pezkuwi_node_subsystem_types::OverseerSignal::Conclude => {return},
_ => {}
}
},
orchestra::FromOrchestra::Communication { msg } => {
gum::debug!(target: LOG_TARGET, msg = ?msg, "mocked subsystem received message");
}
}
count_total_msg +=1;
}
_ = sleep(Duration::from_secs(6)).fuse() => {
if count_total_msg > 0 {
gum::trace!(target: LOG_TARGET, "Subsystem {} processed {} messages since last time", stringify!($subsystem_name), count_total_msg);
}
count_total_msg = 0;
}
}
}
}
}
}
};
}
// Generate dummy implementation for all subsystems
mock!(AvailabilityStore);
mock!(StatementDistribution);
mock!(BitfieldSigning);
mock!(BitfieldDistribution);
mock!(Provisioner);
mock!(NetworkBridgeRx);
mock!(CollationGeneration);
mock!(CollatorProtocol);
mock!(GossipSupport);
mock!(DisputeDistribution);
mock!(DisputeCoordinator);
mock!(ProspectiveTeyrchains);
mock!(PvfChecker);
mock!(CandidateBacking);
mock!(AvailabilityDistribution);
mock!(CandidateValidation);
mock!(AvailabilityRecovery);
mock!(NetworkBridgeTx);
mock!(ChainApi);
mock!(ChainSelection);
mock!(ApprovalVoting);
mock!(ApprovalVotingParallel);
mock!(ApprovalDistribution);
mock!(RuntimeApi);
@@ -0,0 +1,93 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use pezkuwi_node_subsystem::HeadSupportsTeyrchains;
use pezkuwi_node_subsystem_types::Hash;
use sp_consensus::SyncOracle;
pub mod approval_voting_parallel;
pub mod av_store;
pub mod availability_recovery;
pub mod candidate_backing;
pub mod candidate_validation;
pub mod chain_api;
pub mod dummy;
pub mod network_bridge;
pub mod prospective_teyrchains;
pub mod runtime_api;
pub struct AlwaysSupportsTeyrchains {}
#[async_trait::async_trait]
impl HeadSupportsTeyrchains for AlwaysSupportsTeyrchains {
async fn head_supports_teyrchains(&self, _head: &Hash) -> bool {
true
}
}
// An orchestra with dummy subsystems
#[macro_export]
macro_rules! dummy_builder {
($spawn_task_handle: ident, $metrics: ident) => {{
use $crate::mock::dummy::*;
// Initialize a mock overseer.
// All subsystem except approval_voting and approval_distribution are mock subsystems.
Overseer::builder()
.approval_voting(MockApprovalVoting {})
.approval_voting_parallel(MockApprovalVotingParallel {})
.approval_distribution(MockApprovalDistribution {})
.availability_recovery(MockAvailabilityRecovery {})
.candidate_validation(MockCandidateValidation {})
.chain_api(MockChainApi {})
.chain_selection(MockChainSelection {})
.dispute_coordinator(MockDisputeCoordinator {})
.runtime_api(MockRuntimeApi {})
.network_bridge_tx(MockNetworkBridgeTx {})
.availability_distribution(MockAvailabilityDistribution {})
.availability_store(MockAvailabilityStore {})
.pvf_checker(MockPvfChecker {})
.candidate_backing(MockCandidateBacking {})
.statement_distribution(MockStatementDistribution {})
.bitfield_signing(MockBitfieldSigning {})
.bitfield_distribution(MockBitfieldDistribution {})
.provisioner(MockProvisioner {})
.network_bridge_rx(MockNetworkBridgeRx {})
.collation_generation(MockCollationGeneration {})
.collator_protocol(MockCollatorProtocol {})
.gossip_support(MockGossipSupport {})
.dispute_distribution(MockDisputeDistribution {})
.prospective_teyrchains(MockProspectiveTeyrchains {})
.activation_external_listeners(Default::default())
.active_leaves(Default::default())
.metrics($metrics)
.supports_teyrchains(AlwaysSupportsTeyrchains {})
.spawner(SpawnGlue($spawn_task_handle))
}};
}
#[derive(Clone)]
pub struct TestSyncOracle {}
impl SyncOracle for TestSyncOracle {
fn is_major_syncing(&self) -> bool {
false
}
fn is_offline(&self) -> bool {
unimplemented!("not used by subsystem benchmarks")
}
}
@@ -0,0 +1,241 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Mocked `network-bridge` subsystems that uses a `NetworkInterface` to access
//! the emulated network.
use crate::{
configuration::TestAuthorities,
network::{NetworkEmulatorHandle, NetworkInterfaceReceiver, NetworkMessage, RequestExt},
};
use futures::{channel::mpsc::UnboundedSender, FutureExt, StreamExt};
use pezkuwi_node_network_protocol::ValidationProtocols;
use pezkuwi_node_subsystem::{
messages::{ApprovalVotingParallelMessage, NetworkBridgeTxMessage},
overseer, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_types::{
messages::{BitfieldDistributionMessage, NetworkBridgeEvent, StatementDistributionMessage},
OverseerSignal,
};
use sc_network::{request_responses::ProtocolConfig, RequestFailure};
const LOG_TARGET: &str = "subsystem-bench::network-bridge";
const ALLOWED_PROTOCOLS: &[&str] = &[
"/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_chunk/2",
"/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/req_attested_candidate/2",
"/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/send_dispute/1",
];
/// A mock of the network bridge tx subsystem.
pub struct MockNetworkBridgeTx {
/// A network emulator handle
network: NetworkEmulatorHandle,
/// A channel to the network interface,
to_network_interface: UnboundedSender<NetworkMessage>,
/// Test authorities
test_authorities: TestAuthorities,
}
/// A mock of the network bridge tx subsystem.
pub struct MockNetworkBridgeRx {
/// A network interface receiver
network_receiver: NetworkInterfaceReceiver,
/// Chunk request sender
chunk_request_sender: Option<ProtocolConfig>,
}
impl MockNetworkBridgeTx {
pub fn new(
network: NetworkEmulatorHandle,
to_network_interface: UnboundedSender<NetworkMessage>,
test_authorities: TestAuthorities,
) -> MockNetworkBridgeTx {
Self { network, to_network_interface, test_authorities }
}
}
impl MockNetworkBridgeRx {
pub fn new(
network_receiver: NetworkInterfaceReceiver,
chunk_request_sender: Option<ProtocolConfig>,
) -> MockNetworkBridgeRx {
Self { network_receiver, chunk_request_sender }
}
}
#[overseer::subsystem(NetworkBridgeTx, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockNetworkBridgeTx {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "network-bridge-tx", future }
}
}
#[overseer::subsystem(NetworkBridgeRx, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockNetworkBridgeRx {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "network-bridge-rx", future }
}
}
#[overseer::contextbounds(NetworkBridgeTx, prefix = self::overseer)]
impl MockNetworkBridgeTx {
async fn run<Context>(self, mut ctx: Context) {
// Main subsystem loop.
loop {
let subsystem_message = ctx.recv().await.expect("Overseer never fails us");
match subsystem_message {
orchestra::FromOrchestra::Signal(signal) =>
if signal == OverseerSignal::Conclude {
return;
},
orchestra::FromOrchestra::Communication { msg } => match msg {
NetworkBridgeTxMessage::SendRequests(requests, _if_disconnected) => {
for request in requests {
gum::debug!(target: LOG_TARGET, request = ?request, "Processing request");
let peer_id = match request.authority_id() {
Some(v) => v.clone(),
None => self
.test_authorities
.peer_id_to_authority
.get(request.peer_id().expect("Should exist"))
.expect("Should exist")
.clone(),
};
if !self.network.is_peer_connected(&peer_id) {
// Attempting to send a request to a disconnected peer.
request
.into_response_sender()
.send(Err(RequestFailure::NotConnected))
.expect("send never fails");
continue;
}
let peer_message =
NetworkMessage::RequestFromNode(peer_id.clone(), Box::new(request));
let _ = self.to_network_interface.unbounded_send(peer_message);
}
},
NetworkBridgeTxMessage::ReportPeer(_) => {
// ignore rep changes
},
NetworkBridgeTxMessage::SendValidationMessage(peers, message) => {
for peer in peers {
self.to_network_interface
.unbounded_send(NetworkMessage::MessageFromNode(
self.test_authorities
.peer_id_to_authority
.get(&peer)
.unwrap()
.clone(),
message.clone(),
))
.expect("Should not fail");
}
},
NetworkBridgeTxMessage::SendValidationMessages(messages) => {
for (peers, message) in messages {
for peer in peers {
self.to_network_interface
.unbounded_send(NetworkMessage::MessageFromNode(
self.test_authorities
.peer_id_to_authority
.get(&peer)
.unwrap()
.clone(),
message.clone(),
))
.expect("Should not fail");
}
}
},
message => unimplemented!("Unexpected network bridge message {:?}", message),
},
}
}
}
}
#[overseer::contextbounds(NetworkBridgeRx, prefix = self::overseer)]
impl MockNetworkBridgeRx {
async fn run<Context>(mut self, mut ctx: Context) {
// Main subsystem loop.
let mut from_network_interface = self.network_receiver.0;
loop {
futures::select! {
maybe_peer_message = from_network_interface.next() => {
if let Some(message) = maybe_peer_message {
match message {
NetworkMessage::MessageFromPeer(peer_id, message) => match message {
ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ValidationProtocol::BitfieldDistribution(
bitfield,
),
) => {
ctx.send_message(
BitfieldDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, pezkuwi_node_network_protocol::ValidationProtocols::V3(bitfield)))
).await;
},
ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ValidationProtocol::ApprovalDistribution(msg)
) => {
ctx.send_message(
ApprovalVotingParallelMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, pezkuwi_node_network_protocol::ValidationProtocols::V3(msg)))
).await;
}
ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ValidationProtocol::StatementDistribution(msg)
) => {
ctx.send_message(
StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(peer_id, pezkuwi_node_network_protocol::ValidationProtocols::V3(msg)))
).await;
},
},
NetworkMessage::RequestFromPeer(request) => {
if let Some(protocol) = self.chunk_request_sender.as_mut() {
assert!(ALLOWED_PROTOCOLS.contains(&&*protocol.name), "Unexpected protocol {:?}", protocol.name);
if let Some(inbound_queue) = protocol.inbound_queue.as_ref() {
inbound_queue
.send(request)
.await
.expect("Forwarding requests to subsystem never fails");
}
}
},
_ => {
panic!("NetworkMessage::RequestFromNode is not expected to be received from a peer")
}
}
}
},
subsystem_message = ctx.recv().fuse() => {
match subsystem_message.expect("Overseer never fails us") {
orchestra::FromOrchestra::Signal(signal) => if signal == OverseerSignal::Conclude { return },
_ => {
unimplemented!("Unexpected network bridge rx message")
},
}
}
}
}
}
}
@@ -0,0 +1,74 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A generic prospective teyrchains subsystem mockup suitable to be used in benchmarks.
use futures::FutureExt;
use pezkuwi_node_subsystem::{
messages::ProspectiveTeyrchainsMessage, overseer, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_types::OverseerSignal;
use pezkuwi_primitives::Hash;
pub struct MockProspectiveTeyrchains {}
impl MockProspectiveTeyrchains {
pub fn new() -> Self {
Self {}
}
}
#[overseer::subsystem(ProspectiveTeyrchains, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockProspectiveTeyrchains {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "test-environment", future }
}
}
#[overseer::contextbounds(ProspectiveTeyrchains, prefix = self::overseer)]
impl MockProspectiveTeyrchains {
async fn run<Context>(self, mut ctx: Context) {
loop {
let msg = ctx.recv().await.expect("Overseer never fails us");
match msg {
orchestra::FromOrchestra::Signal(signal) =>
if signal == OverseerSignal::Conclude {
return;
},
orchestra::FromOrchestra::Communication { msg } => match msg {
ProspectiveTeyrchainsMessage::GetMinimumRelayParents(_relay_parent, tx) => {
tx.send(vec![]).unwrap();
},
ProspectiveTeyrchainsMessage::GetHypotheticalMembership(req, tx) => {
tx.send(
req.candidates
.iter()
.cloned()
.map(|candidate| (candidate, vec![Hash::repeat_byte(0)]))
.collect(),
)
.unwrap();
},
_ => {
unimplemented!("Unexpected chain-api message")
},
},
}
}
}
}
@@ -0,0 +1,370 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A generic runtime api subsystem mockup suitable to be used in benchmarks.
use crate::configuration::{TestAuthorities, TestConfiguration};
use bitvec::prelude::BitVec;
use futures::FutureExt;
use itertools::Itertools;
use pezkuwi_node_subsystem::{
messages::{RuntimeApiMessage, RuntimeApiRequest},
overseer, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_types::OverseerSignal;
use pezkuwi_primitives::{
node_features, ApprovalVotingParams, AsyncBackingParams, CandidateEvent,
CandidateReceiptV2 as CandidateReceipt, CoreIndex, CoreState, GroupIndex, GroupRotationInfo,
Id as ParaId, IndexedVec, NodeFeatures, OccupiedCore, ScheduledCore, SessionIndex, SessionInfo,
ValidationCode, ValidatorIndex,
};
use sp_consensus_babe::Epoch as BabeEpoch;
use sp_core::H256;
use std::collections::{BTreeMap, HashMap, VecDeque};
const LOG_TARGET: &str = "subsystem-bench::runtime-api-mock";
/// Minimal state to answer requests.
#[derive(Clone)]
pub struct RuntimeApiState {
// All authorities in the test,
authorities: TestAuthorities,
// Node features state in the runtime
node_features: NodeFeatures,
// Candidate hashes per block
candidate_hashes: HashMap<H256, Vec<CandidateReceipt>>,
// Candidate events per block
candidate_events: HashMap<H256, Vec<CandidateEvent>>,
babe_epoch: Option<BabeEpoch>,
// The session child index,
session_index: SessionIndex,
// The claim queue
claim_queue: BTreeMap<CoreIndex, VecDeque<ParaId>>,
}
#[derive(Clone)]
pub enum MockRuntimeApiCoreState {
Occupied,
Scheduled,
#[allow(dead_code)]
Free,
}
/// A mocked `runtime-api` subsystem.
#[derive(Clone)]
pub struct MockRuntimeApi {
state: RuntimeApiState,
config: TestConfiguration,
core_state: MockRuntimeApiCoreState,
}
impl MockRuntimeApi {
pub fn new(
config: TestConfiguration,
authorities: TestAuthorities,
candidate_hashes: HashMap<H256, Vec<CandidateReceipt>>,
candidate_events: HashMap<H256, Vec<CandidateEvent>>,
babe_epoch: Option<BabeEpoch>,
session_index: SessionIndex,
core_state: MockRuntimeApiCoreState,
) -> MockRuntimeApi {
// Enable chunk mapping feature to make systematic av-recovery possible.
let node_features = default_node_features();
let validator_group_count =
session_info_for_peers(&config, &authorities).validator_groups.len();
// Each para gets one core assigned and there is only one candidate per
// teyrchain per relay chain block (no elastic scaling).
let claim_queue = candidate_hashes
.iter()
.next()
.expect("Candidates are generated at test start")
.1
.iter()
.enumerate()
.map(|(index, candidate_receipt)| {
// Ensure test breaks if badly configured.
assert!(index < validator_group_count);
(CoreIndex(index as u32), vec![candidate_receipt.descriptor.para_id()].into())
})
.collect();
Self {
state: RuntimeApiState {
authorities,
candidate_hashes,
candidate_events,
babe_epoch,
session_index,
node_features,
claim_queue,
},
config,
core_state,
}
}
fn session_info(&self) -> SessionInfo {
session_info_for_peers(&self.config, &self.state.authorities)
}
}
/// Generates a test session info with all passed authorities as consensus validators.
pub fn session_info_for_peers(
configuration: &TestConfiguration,
authorities: &TestAuthorities,
) -> SessionInfo {
let all_validators = (0..configuration.n_validators)
.map(|i| ValidatorIndex(i as _))
.collect::<Vec<_>>();
let validator_groups = all_validators
.chunks(configuration.max_validators_per_core)
.map(Vec::from)
.collect::<Vec<_>>();
SessionInfo {
validators: authorities.validator_public.iter().cloned().collect(),
discovery_keys: authorities.validator_authority_id.to_vec(),
assignment_keys: authorities.validator_assignment_id.to_vec(),
validator_groups: IndexedVec::<GroupIndex, Vec<ValidatorIndex>>::from(validator_groups),
n_cores: configuration.n_cores as u32,
needed_approvals: configuration.needed_approvals as u32,
zeroth_delay_tranche_width: configuration.zeroth_delay_tranche_width as u32,
relay_vrf_modulo_samples: configuration.relay_vrf_modulo_samples as u32,
n_delay_tranches: configuration.n_delay_tranches as u32,
no_show_slots: configuration.no_show_slots as u32,
active_validator_indices: (0..authorities.validator_authority_id.len())
.map(|index| ValidatorIndex(index as u32))
.collect_vec(),
dispute_period: 6,
random_seed: [0u8; 32],
}
}
#[overseer::subsystem(RuntimeApi, error=SubsystemError, prefix=self::overseer)]
impl<Context> MockRuntimeApi {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = self.run(ctx).map(|_| Ok(())).boxed();
SpawnedSubsystem { name: "test-environment", future }
}
}
#[overseer::contextbounds(RuntimeApi, prefix = self::overseer)]
impl MockRuntimeApi {
async fn run<Context>(self, mut ctx: Context) {
let validator_group_count = self.session_info().validator_groups.len();
loop {
let msg = ctx.recv().await.expect("Overseer never fails us");
match msg {
orchestra::FromOrchestra::Signal(signal) =>
if signal == OverseerSignal::Conclude {
return;
},
orchestra::FromOrchestra::Communication { msg } => {
gum::debug!(target: LOG_TARGET, msg=?msg, "recv message");
match msg {
RuntimeApiMessage::Request(
request,
RuntimeApiRequest::CandidateEvents(sender),
) => {
let candidate_events = self.state.candidate_events.get(&request);
let _ = sender.send(Ok(candidate_events.cloned().unwrap_or_default()));
},
RuntimeApiMessage::Request(
_block_hash,
RuntimeApiRequest::SessionInfo(_session_index, sender),
) => {
let _ = sender.send(Ok(Some(self.session_info())));
},
RuntimeApiMessage::Request(
_block_hash,
RuntimeApiRequest::NodeFeatures(_session_index, sender),
) => {
let _ = sender.send(Ok(self.state.node_features.clone()));
},
RuntimeApiMessage::Request(
_block_hash,
RuntimeApiRequest::SessionExecutorParams(_session_index, sender),
) => {
let _ = sender.send(Ok(Some(Default::default())));
},
RuntimeApiMessage::Request(
_block_hash,
RuntimeApiRequest::Validators(sender),
) => {
let _ =
sender.send(Ok(self.state.authorities.validator_public.clone()));
},
RuntimeApiMessage::Request(
_block_hash,
RuntimeApiRequest::SessionIndexForChild(sender),
) => {
// Session is always the same.
let _ = sender.send(Ok(self.state.session_index));
},
RuntimeApiMessage::Request(
block_hash,
RuntimeApiRequest::AvailabilityCores(sender),
) => {
let candidate_hashes = self
.state
.candidate_hashes
.get(&block_hash)
.expect("Relay chain block hashes are generated at test start");
// All cores are always occupied.
let cores = candidate_hashes
.iter()
.enumerate()
.map(|(index, candidate_receipt)| {
// Ensure test breaks if badly configured.
assert!(index < validator_group_count);
use MockRuntimeApiCoreState::*;
match self.core_state {
Occupied => CoreState::Occupied(OccupiedCore {
next_up_on_available: None,
occupied_since: 0,
time_out_at: 0,
next_up_on_time_out: None,
availability: BitVec::default(),
group_responsible: GroupIndex(index as u32),
candidate_hash: candidate_receipt.hash(),
candidate_descriptor: candidate_receipt
.descriptor
.clone(),
}),
Scheduled => CoreState::Scheduled(ScheduledCore {
para_id: (index + 1).into(),
collator: None,
}),
Free => todo!(),
}
})
.collect::<Vec<_>>();
let _ = sender.send(Ok(cores));
},
RuntimeApiMessage::Request(
_request,
RuntimeApiRequest::CurrentBabeEpoch(sender),
) => {
let _ = sender.send(Ok(self
.state
.babe_epoch
.clone()
.expect("Babe epoch unpopulated")));
},
RuntimeApiMessage::Request(
_block_hash,
RuntimeApiRequest::AsyncBackingParams(sender),
) => {
let _ = sender.send(Ok(AsyncBackingParams {
max_candidate_depth: self.config.max_candidate_depth,
allowed_ancestry_len: self.config.allowed_ancestry_len,
}));
},
RuntimeApiMessage::Request(_parent, RuntimeApiRequest::Version(tx)) => {
tx.send(Ok(RuntimeApiRequest::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT))
.unwrap();
},
RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::DisabledValidators(tx),
) => {
tx.send(Ok(vec![])).unwrap();
},
RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::MinimumBackingVotes(_session_index, tx),
) => {
tx.send(Ok(self.config.minimum_backing_votes)).unwrap();
},
RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::ValidatorGroups(tx),
) => {
let groups = self.session_info().validator_groups.to_vec();
let group_rotation_info = GroupRotationInfo {
session_start_block: 1,
group_rotation_frequency: 12,
now: 1,
};
tx.send(Ok((groups, group_rotation_info))).unwrap();
},
RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::ValidationCodeByHash(hash, tx),
) => {
gum::debug!(target: LOG_TARGET, "ValidationCodeByHash: {:?}", hash);
let validation_code = ValidationCode(Vec::new());
if let Err(err) = tx.send(Ok(Some(validation_code))) {
gum::error!(target: LOG_TARGET, ?err, "validation code wasn't received");
}
},
RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::ApprovalVotingParams(_, tx),
) =>
if let Err(err) = tx.send(Ok(ApprovalVotingParams::default())) {
gum::error!(target: LOG_TARGET, ?err, "Voting params weren't received");
},
RuntimeApiMessage::Request(_parent, RuntimeApiRequest::ClaimQueue(tx)) => {
tx.send(Ok(self.state.claim_queue.clone())).unwrap();
},
RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::FetchOnChainVotes(tx),
) => {
tx.send(Ok(None)).unwrap();
},
RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::UnappliedSlashes(tx),
) => {
tx.send(Ok(vec![])).unwrap();
},
RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::UnappliedSlashesV2(tx),
) => {
tx.send(Ok(vec![])).unwrap();
},
// Long term TODO: implement more as needed.
message => {
unimplemented!("Unexpected runtime-api message: {:?}", message)
},
}
},
}
}
}
}
pub fn default_node_features() -> NodeFeatures {
let mut node_features = NodeFeatures::new();
node_features.resize(node_features::FeatureIndex::FirstUnassigned as usize, false);
node_features.set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true);
node_features.set(node_features::FeatureIndex::ElasticScalingMVP as u8 as usize, true);
node_features.set(node_features::FeatureIndex::CandidateReceiptV2 as u8 as usize, true);
node_features
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,431 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::{
configuration::TestAuthorities,
dummy_builder,
environment::{TestEnvironment, TestEnvironmentDependencies, GENESIS_HASH},
mock::{
candidate_backing::MockCandidateBacking,
chain_api::{ChainApiState, MockChainApi},
network_bridge::{MockNetworkBridgeRx, MockNetworkBridgeTx},
prospective_teyrchains::MockProspectiveTeyrchains,
runtime_api::{MockRuntimeApi, MockRuntimeApiCoreState},
AlwaysSupportsTeyrchains,
},
network::{new_network, NetworkEmulatorHandle, NetworkInterface, NetworkInterfaceReceiver},
usage::BenchmarkUsage,
NODE_UNDER_TEST,
};
use bitvec::vec::BitVec;
use colored::Colorize;
use itertools::Itertools;
use pezkuwi_node_metrics::metrics::Metrics;
use pezkuwi_node_network_protocol::{
grid_topology::{SessionGridTopology, TopologyPeerInfo},
request_response::{IncomingRequest, ReqProtocolNames},
v3::{self, BackedCandidateManifest, StatementFilter},
view, ValidationProtocols, View,
};
use pezkuwi_node_subsystem::messages::{
network_bridge_event::NewGossipTopology, AllMessages, NetworkBridgeEvent,
StatementDistributionMessage,
};
use pezkuwi_overseer::{
Handle as OverseerHandle, Overseer, OverseerConnector, OverseerMetrics, SpawnGlue,
};
use pezkuwi_primitives::{
AuthorityDiscoveryId, Block, GroupIndex, Hash, Id, ValidatorId, ValidatorIndex,
};
use pezkuwi_statement_distribution::StatementDistributionSubsystem;
use sc_keystore::LocalKeystore;
use sc_network_types::PeerId;
use sc_service::SpawnTaskHandle;
use sp_keystore::{Keystore, KeystorePtr};
use sp_runtime::RuntimeAppPublic;
use std::{
sync::{atomic::Ordering, Arc},
time::{Duration, Instant},
};
pub use test_state::TestState;
mod test_state;
const LOG_TARGET: &str = "subsystem-bench::statement";
pub fn make_keystore() -> KeystorePtr {
let keystore: KeystorePtr = Arc::new(LocalKeystore::in_memory());
Keystore::sr25519_generate_new(&*keystore, ValidatorId::ID, Some("//Node0"))
.expect("Insert key into keystore");
Keystore::sr25519_generate_new(&*keystore, AuthorityDiscoveryId::ID, Some("//Node0"))
.expect("Insert key into keystore");
keystore
}
fn build_overseer(
state: &TestState,
network: NetworkEmulatorHandle,
network_interface: NetworkInterface,
network_receiver: NetworkInterfaceReceiver,
dependencies: &TestEnvironmentDependencies,
) -> (Overseer<SpawnGlue<SpawnTaskHandle>, AlwaysSupportsTeyrchains>, OverseerHandle) {
let overseer_connector = OverseerConnector::with_event_capacity(64000);
let overseer_metrics = OverseerMetrics::try_register(&dependencies.registry).unwrap();
let spawn_task_handle = dependencies.task_manager.spawn_handle();
let mock_runtime_api = MockRuntimeApi::new(
state.config.clone(),
state.test_authorities.clone(),
state.candidate_receipts.clone(),
Default::default(),
Default::default(),
0,
MockRuntimeApiCoreState::Scheduled,
);
let chain_api_state = ChainApiState { block_headers: state.block_headers.clone() };
let mock_chain_api = MockChainApi::new(chain_api_state);
let mock_prospective_teyrchains = MockProspectiveTeyrchains::new();
let mock_candidate_backing = MockCandidateBacking::new(
state.config.clone(),
state
.test_authorities
.validator_pairs
.get(NODE_UNDER_TEST as usize)
.unwrap()
.clone(),
state.pvd.clone(),
state.own_backing_group.clone(),
);
let (candidate_req_receiver, candidate_req_cfg) =
IncomingRequest::get_config_receiver::<Block, sc_network::NetworkWorker<Block, Hash>>(
&ReqProtocolNames::new(GENESIS_HASH, None),
);
let keystore = make_keystore();
let subsystem = StatementDistributionSubsystem::new(
keystore.clone(),
candidate_req_receiver,
Metrics::try_register(&dependencies.registry).unwrap(),
);
let network_bridge_tx = MockNetworkBridgeTx::new(
network,
network_interface.subsystem_sender(),
state.test_authorities.clone(),
);
let network_bridge_rx = MockNetworkBridgeRx::new(network_receiver, Some(candidate_req_cfg));
let dummy = dummy_builder!(spawn_task_handle, overseer_metrics)
.replace_runtime_api(|_| mock_runtime_api)
.replace_chain_api(|_| mock_chain_api)
.replace_prospective_teyrchains(|_| mock_prospective_teyrchains)
.replace_candidate_backing(|_| mock_candidate_backing)
.replace_statement_distribution(|_| subsystem)
.replace_network_bridge_tx(|_| network_bridge_tx)
.replace_network_bridge_rx(|_| network_bridge_rx);
let (overseer, raw_handle) = dummy.build_with_connector(overseer_connector).unwrap();
let overseer_handle = OverseerHandle::new(raw_handle);
(overseer, overseer_handle)
}
pub fn prepare_test(state: &TestState, with_prometheus_endpoint: bool) -> TestEnvironment {
let dependencies = TestEnvironmentDependencies::default();
let (network, network_interface, network_receiver) = new_network(
&state.config,
&dependencies,
&state.test_authorities,
vec![Arc::new(state.clone())],
);
let (overseer, overseer_handle) =
build_overseer(state, network.clone(), network_interface, network_receiver, &dependencies);
TestEnvironment::new(
dependencies,
state.config.clone(),
network,
overseer,
overseer_handle,
state.test_authorities.clone(),
with_prometheus_endpoint,
)
}
pub fn generate_peer_view_change(block_hash: Hash, peer_id: PeerId) -> AllMessages {
let network = NetworkBridgeEvent::PeerViewChange(peer_id, View::new([block_hash], 0));
AllMessages::StatementDistribution(StatementDistributionMessage::NetworkBridgeUpdate(network))
}
pub fn generate_new_session_topology(
topology: &SessionGridTopology,
test_node: ValidatorIndex,
) -> Vec<AllMessages> {
let event = NetworkBridgeEvent::NewGossipTopology(NewGossipTopology {
session: 0,
topology: topology.clone(),
local_index: Some(test_node),
});
vec![AllMessages::StatementDistribution(StatementDistributionMessage::NetworkBridgeUpdate(
event,
))]
}
/// Generates a topology to be used for this benchmark.
pub fn generate_topology(test_authorities: &TestAuthorities) -> SessionGridTopology {
let keyrings = test_authorities
.validator_authority_id
.clone()
.into_iter()
.zip(test_authorities.peer_ids.clone())
.collect_vec();
let topology = keyrings
.clone()
.into_iter()
.enumerate()
.map(|(index, (discovery_id, peer_id))| TopologyPeerInfo {
peer_ids: vec![peer_id],
validator_index: ValidatorIndex(index as u32),
discovery_id,
})
.collect_vec();
let shuffled = (0..keyrings.len()).collect_vec();
SessionGridTopology::new(shuffled, topology)
}
pub async fn benchmark_statement_distribution(
env: &mut TestEnvironment,
state: &TestState,
) -> BenchmarkUsage {
state.reset_trackers();
let connected_validators = state
.test_authorities
.validator_authority_id
.iter()
.enumerate()
.filter_map(|(i, id)| if env.network().is_peer_connected(id) { Some(i) } else { None })
.collect_vec();
let seconding_validator_in_own_backing_group = state
.own_backing_group
.iter()
.find(|v| connected_validators.contains(&(v.0 as usize)))
.unwrap()
.to_owned();
let config = env.config().clone();
let groups = state.session_info.validator_groups.clone();
let own_backing_group_index = groups
.iter()
.position(|group| group.iter().any(|v| v.0 == NODE_UNDER_TEST))
.unwrap();
env.metrics().set_n_validators(config.n_validators);
env.metrics().set_n_cores(config.n_cores);
let topology = generate_topology(&state.test_authorities);
let peer_connected_messages = env.network().generate_peer_connected(|e| {
AllMessages::StatementDistribution(StatementDistributionMessage::NetworkBridgeUpdate(e))
});
let new_session_topology_messages =
generate_new_session_topology(&topology, ValidatorIndex(NODE_UNDER_TEST));
for message in peer_connected_messages.into_iter().chain(new_session_topology_messages) {
env.send_message(message).await;
}
let test_start = Instant::now();
let mut candidates_advertised = 0;
for block_info in state.block_infos.iter() {
let block_num = block_info.number as usize;
gum::info!(target: LOG_TARGET, "Current block {}/{} {:?}", block_num, config.num_blocks, block_info.hash);
env.metrics().set_current_block(block_num);
env.import_block(block_info.clone()).await;
for peer_view_change in env
.network()
.generate_statement_distribution_peer_view_change(view![block_info.hash])
{
env.send_message(peer_view_change).await;
}
let seconding_peer_id = *state
.test_authorities
.peer_ids
.get(seconding_validator_in_own_backing_group.0 as usize)
.unwrap();
let candidate = state.candidate_receipts.get(&block_info.hash).unwrap().first().unwrap();
let candidate_hash = candidate.hash();
let statement = state
.statements
.get(&candidate_hash)
.unwrap()
.get(seconding_validator_in_own_backing_group.0 as usize)
.unwrap()
.clone();
let message = AllMessages::StatementDistribution(
StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(
seconding_peer_id,
ValidationProtocols::V3(v3::StatementDistributionMessage::Statement(
block_info.hash,
statement,
)),
)),
);
env.send_message(message).await;
let max_messages_per_candidate = state.config.max_candidate_depth + 1;
// One was just sent for the own backing group
let mut messages_tracker = (0..groups.len())
.map(|i| if i == own_backing_group_index { max_messages_per_candidate } else { 0 })
.collect_vec();
let neighbors =
topology.compute_grid_neighbors_for(ValidatorIndex(NODE_UNDER_TEST)).unwrap();
let connected_neighbors_x = neighbors
.validator_indices_x
.iter()
.filter(|&v| connected_validators.contains(&(v.0 as usize)))
.cloned()
.collect_vec();
let connected_neighbors_y = neighbors
.validator_indices_y
.iter()
.filter(|&v| connected_validators.contains(&(v.0 as usize)))
.cloned()
.collect_vec();
let one_hop_peers_and_groups = connected_neighbors_x
.iter()
.chain(connected_neighbors_y.iter())
.map(|validator_index| {
let peer_id =
*state.test_authorities.peer_ids.get(validator_index.0 as usize).unwrap();
let group_index =
groups.iter().position(|group| group.contains(validator_index)).unwrap();
(peer_id, group_index)
})
.collect_vec();
let two_hop_x_peers_and_groups = connected_neighbors_x
.iter()
.flat_map(|validator_index| {
let peer_id =
*state.test_authorities.peer_ids.get(validator_index.0 as usize).unwrap();
topology
.compute_grid_neighbors_for(*validator_index)
.unwrap()
.validator_indices_y
.iter()
.map(|validator_neighbor| {
let group_index = groups
.iter()
.position(|group| group.contains(validator_neighbor))
.unwrap();
(peer_id, group_index)
})
.collect_vec()
})
.collect_vec();
let two_hop_y_peers_and_groups = connected_neighbors_y
.iter()
.flat_map(|validator_index| {
let peer_id =
*state.test_authorities.peer_ids.get(validator_index.0 as usize).unwrap();
topology
.compute_grid_neighbors_for(*validator_index)
.unwrap()
.validator_indices_x
.iter()
.map(|validator_neighbor| {
let group_index = groups
.iter()
.position(|group| group.contains(validator_neighbor))
.unwrap();
(peer_id, group_index)
})
.collect_vec()
})
.collect_vec();
for (seconding_peer_id, group_index) in one_hop_peers_and_groups
.into_iter()
.chain(two_hop_x_peers_and_groups)
.chain(two_hop_y_peers_and_groups)
{
let messages_sent_count = messages_tracker.get_mut(group_index).unwrap();
if *messages_sent_count == max_messages_per_candidate {
continue;
}
*messages_sent_count += 1;
let candidate_hash = state
.candidate_receipts
.get(&block_info.hash)
.unwrap()
.get(group_index)
.unwrap()
.hash();
let manifest = BackedCandidateManifest {
relay_parent: block_info.hash,
candidate_hash,
group_index: GroupIndex(group_index as u32),
para_id: Id::new(group_index as u32 + 1),
parent_head_data_hash: state.pvd.parent_head.hash(),
statement_knowledge: StatementFilter {
seconded_in_group: BitVec::from_iter(
groups.get(GroupIndex(group_index as u32)).unwrap().iter().map(|_| true),
),
validated_in_group: BitVec::from_iter(
groups.get(GroupIndex(group_index as u32)).unwrap().iter().map(|_| false),
),
},
};
let message = AllMessages::StatementDistribution(
StatementDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage(
seconding_peer_id,
ValidationProtocols::V3(
v3::StatementDistributionMessage::BackedCandidateManifest(manifest),
),
)),
);
env.send_message(message).await;
}
candidates_advertised += messages_tracker.iter().filter(|&&v| v > 0).collect_vec().len();
loop {
let manifests_count = state
.manifests_tracker
.values()
.filter(|v| v.load(Ordering::SeqCst))
.collect::<Vec<_>>()
.len();
gum::debug!(target: LOG_TARGET, "{}/{} manifest exchanges", manifests_count, candidates_advertised);
if manifests_count == candidates_advertised {
break;
}
tokio::time::sleep(Duration::from_millis(50)).await;
}
}
let duration: u128 = test_start.elapsed().as_millis();
gum::info!(target: LOG_TARGET, "All blocks processed in {}", format!("{duration:?}ms").cyan());
gum::info!(target: LOG_TARGET,
"Avg block time: {}",
format!("{} ms", test_start.elapsed().as_millis() / env.config().num_blocks as u128).red()
);
env.stop().await;
env.collect_resource_usage(&["statement-distribution"], false)
}
@@ -0,0 +1,446 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use crate::{
configuration::{TestAuthorities, TestConfiguration},
mock::runtime_api::session_info_for_peers,
network::{HandleNetworkMessage, NetworkMessage},
NODE_UNDER_TEST,
};
use bitvec::vec::BitVec;
use codec::{Decode, Encode};
use futures::channel::oneshot;
use itertools::Itertools;
use pezkuwi_node_network_protocol::{
request_response::{
v2::{AttestedCandidateRequest, AttestedCandidateResponse},
Requests,
},
v3::{
BackedCandidateAcknowledgement, StatementDistributionMessage, StatementFilter,
ValidationProtocol,
},
ValidationProtocols,
};
use pezkuwi_node_primitives::{AvailableData, BlockData, PoV};
use pezkuwi_node_subsystem_test_helpers::{
derive_erasure_chunks_with_proofs_and_root, mock::new_block_import_info,
};
use pezkuwi_overseer::BlockInfo;
use pezkuwi_primitives::MutateDescriptorV2;
use pezkuwi_primitives::{
BlockNumber, CandidateHash, CandidateReceiptV2 as CandidateReceipt,
CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CompactStatement, CoreIndex, Hash,
Header, Id, PersistedValidationData, SessionInfo, SignedStatement, SigningContext,
UncheckedSigned, ValidatorIndex, ValidatorPair,
};
use pezkuwi_primitives_test_helpers::{
dummy_committed_candidate_receipt_v2, dummy_hash, dummy_head_data, dummy_pvd,
};
use sc_network::{config::IncomingRequest, ProtocolName};
use sp_core::{Pair, H256};
use std::{
collections::HashMap,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
const SESSION_INDEX: u32 = 0;
#[derive(Clone)]
pub struct TestState {
// Full test config
pub config: TestConfiguration,
// Authority keys for the network emulation.
pub test_authorities: TestAuthorities,
// Relay chain block infos
pub block_infos: Vec<BlockInfo>,
// Map from generated candidate receipts
pub candidate_receipts: HashMap<H256, Vec<CandidateReceipt>>,
// Map from generated commited candidate receipts
pub commited_candidate_receipts: HashMap<H256, Vec<CommittedCandidateReceipt>>,
// PersistedValidationData, we use one for all candidates
pub pvd: PersistedValidationData,
// Relay chain block headers
pub block_headers: HashMap<H256, Header>,
// Session info
pub session_info: SessionInfo,
// Pregenerated statements
pub statements: HashMap<CandidateHash, Vec<UncheckedSigned<CompactStatement>>>,
// Indices in the backing group where the node under test is
pub own_backing_group: Vec<ValidatorIndex>,
// Tracks how many statements we received for a candidates
pub statements_tracker: HashMap<CandidateHash, Vec<Arc<AtomicBool>>>,
// Tracks if manifest exchange happened
pub manifests_tracker: HashMap<CandidateHash, Arc<AtomicBool>>,
}
impl TestState {
pub fn new(config: &TestConfiguration) -> Self {
let test_authorities = config.generate_authorities();
let session_info = session_info_for_peers(config, &test_authorities);
let own_backing_group = session_info
.validator_groups
.iter()
.find(|g| g.contains(&ValidatorIndex(NODE_UNDER_TEST)))
.unwrap()
.clone();
let mut state = Self {
config: config.clone(),
test_authorities,
block_infos: (1..=config.num_blocks).map(generate_block_info).collect(),
candidate_receipts: Default::default(),
commited_candidate_receipts: Default::default(),
pvd: dummy_pvd(dummy_head_data(), 0),
block_headers: Default::default(),
statements_tracker: Default::default(),
manifests_tracker: Default::default(),
session_info,
own_backing_group,
statements: Default::default(),
};
state.block_headers = state.block_infos.iter().map(generate_block_header).collect();
// For each unique pov we create a candidate receipt.
let pov_sizes = Vec::from(config.pov_sizes()); // For n_cores
let pov_size_to_candidate = generate_pov_size_to_candidate(&pov_sizes);
let receipt_templates =
generate_receipt_templates(&pov_size_to_candidate, config.n_validators, &state.pvd);
for block_info in state.block_infos.iter() {
for core_idx in 0..config.n_cores {
let pov_size = pov_sizes.get(core_idx).expect("This is a cycle; qed");
let candidate_index =
*pov_size_to_candidate.get(pov_size).expect("pov_size always exists; qed");
let mut receipt = receipt_templates[candidate_index].clone();
receipt.descriptor.set_para_id(Id::new(core_idx as u32 + 1));
receipt.descriptor.set_relay_parent(block_info.hash);
receipt.descriptor.set_core_index(CoreIndex(core_idx as u32));
receipt.descriptor.set_session_index(SESSION_INDEX);
state.candidate_receipts.entry(block_info.hash).or_default().push(
CandidateReceipt {
descriptor: receipt.descriptor.clone(),
commitments_hash: receipt.commitments.hash(),
},
);
state.statements_tracker.entry(receipt.hash()).or_default().extend(
(0..config.n_validators)
.map(|_| Arc::new(AtomicBool::new(false)))
.collect_vec(),
);
state.manifests_tracker.insert(receipt.hash(), Arc::new(AtomicBool::new(false)));
state
.commited_candidate_receipts
.entry(block_info.hash)
.or_default()
.push(receipt);
}
}
let groups = state.session_info.validator_groups.clone();
for block_info in state.block_infos.iter() {
for (index, group) in groups.iter().enumerate() {
let candidate =
state.candidate_receipts.get(&block_info.hash).unwrap().get(index).unwrap();
let statements = group
.iter()
.map(|&v| {
sign_statement(
CompactStatement::Seconded(candidate.hash()),
block_info.hash,
v,
state.test_authorities.validator_pairs.get(v.0 as usize).unwrap(),
)
})
.collect_vec();
state.statements.insert(candidate.hash(), statements);
}
}
state
}
pub fn reset_trackers(&self) {
self.statements_tracker.values().for_each(|v| {
v.iter()
.enumerate()
.for_each(|(index, v)| v.as_ref().store(index <= 1, Ordering::SeqCst))
});
self.manifests_tracker
.values()
.for_each(|v| v.as_ref().store(false, Ordering::SeqCst));
}
}
fn sign_statement(
statement: CompactStatement,
relay_parent: H256,
validator_index: ValidatorIndex,
pair: &ValidatorPair,
) -> UncheckedSigned<CompactStatement> {
let context = SigningContext { parent_hash: relay_parent, session_index: SESSION_INDEX };
let payload = statement.signing_payload(&context);
SignedStatement::new(
statement,
validator_index,
pair.sign(&payload[..]),
&context,
&pair.public(),
)
.unwrap()
.as_unchecked()
.to_owned()
}
fn generate_block_info(block_num: usize) -> BlockInfo {
new_block_import_info(Hash::repeat_byte(block_num as u8), block_num as BlockNumber)
}
fn generate_block_header(info: &BlockInfo) -> (H256, Header) {
(
info.hash,
Header {
digest: Default::default(),
number: info.number,
parent_hash: info.parent_hash,
extrinsics_root: Default::default(),
state_root: Default::default(),
},
)
}
fn generate_pov_size_to_candidate(pov_sizes: &[usize]) -> HashMap<usize, usize> {
pov_sizes
.iter()
.cloned()
.unique()
.enumerate()
.map(|(index, pov_size)| (pov_size, index))
.collect()
}
fn generate_receipt_templates(
pov_size_to_candidate: &HashMap<usize, usize>,
n_validators: usize,
pvd: &PersistedValidationData,
) -> Vec<CommittedCandidateReceipt> {
pov_size_to_candidate
.iter()
.map(|(&pov_size, &index)| {
let mut receipt = dummy_committed_candidate_receipt_v2(dummy_hash());
let (_, erasure_root) = derive_erasure_chunks_with_proofs_and_root(
n_validators,
&AvailableData {
validation_data: pvd.clone(),
pov: Arc::new(PoV { block_data: BlockData(vec![index as u8; pov_size]) }),
},
|_, _| {},
);
receipt.descriptor.set_persisted_validation_data_hash(pvd.hash());
receipt.descriptor.set_erasure_root(erasure_root);
receipt
})
.collect()
}
#[async_trait::async_trait]
impl HandleNetworkMessage for TestState {
async fn handle(
&self,
message: NetworkMessage,
node_sender: &mut futures::channel::mpsc::UnboundedSender<NetworkMessage>,
) -> Option<NetworkMessage> {
match message {
NetworkMessage::RequestFromNode(_authority_id, requests) => {
let Requests::AttestedCandidateV2(req) = *requests else { return None };
let payload = req.payload;
let candidate_receipt = self
.commited_candidate_receipts
.values()
.flatten()
.find(|v| v.hash() == payload.candidate_hash)
.unwrap()
.clone();
let persisted_validation_data = self.pvd.clone();
let statements = self.statements.get(&payload.candidate_hash).unwrap().clone();
let res = AttestedCandidateResponse {
candidate_receipt,
persisted_validation_data,
statements,
};
let _ = req.pending_response.send(Ok((res.encode(), ProtocolName::from(""))));
None
},
NetworkMessage::MessageFromNode(
authority_id,
ValidationProtocols::V3(ValidationProtocol::StatementDistribution(
StatementDistributionMessage::Statement(relay_parent, statement),
)),
) => {
let index = self
.test_authorities
.validator_authority_id
.iter()
.position(|v| v == &authority_id)
.unwrap();
let candidate_hash = *statement.unchecked_payload().candidate_hash();
let statements_sent_count = self
.statements_tracker
.get(&candidate_hash)
.unwrap()
.get(index)
.unwrap()
.as_ref();
if statements_sent_count.load(Ordering::SeqCst) {
return None;
} else {
statements_sent_count.store(true, Ordering::SeqCst);
}
let group_statements = self.statements.get(&candidate_hash).unwrap();
if !group_statements.iter().any(|s| s.unchecked_validator_index().0 == index as u32)
{
return None;
}
let statement = CompactStatement::Valid(candidate_hash);
let context =
SigningContext { parent_hash: relay_parent, session_index: SESSION_INDEX };
let payload = statement.signing_payload(&context);
let pair = self.test_authorities.validator_pairs.get(index).unwrap();
let signature = pair.sign(&payload[..]);
let statement = SignedStatement::new(
statement,
ValidatorIndex(index as u32),
signature,
&context,
&pair.public(),
)
.unwrap()
.as_unchecked()
.to_owned();
node_sender
.start_send(NetworkMessage::MessageFromPeer(
*self.test_authorities.peer_ids.get(index).unwrap(),
ValidationProtocols::V3(ValidationProtocol::StatementDistribution(
StatementDistributionMessage::Statement(relay_parent, statement),
)),
))
.unwrap();
None
},
NetworkMessage::MessageFromNode(
authority_id,
ValidationProtocols::V3(ValidationProtocol::StatementDistribution(
StatementDistributionMessage::BackedCandidateManifest(manifest),
)),
) => {
let index = self
.test_authorities
.validator_authority_id
.iter()
.position(|v| v == &authority_id)
.unwrap();
let backing_group =
self.session_info.validator_groups.get(manifest.group_index).unwrap();
let group_size = backing_group.len();
let is_own_backing_group = backing_group.contains(&ValidatorIndex(NODE_UNDER_TEST));
let mut seconded_in_group =
BitVec::from_iter((0..group_size).map(|_| !is_own_backing_group));
let mut validated_in_group = BitVec::from_iter((0..group_size).map(|_| false));
if is_own_backing_group {
let (pending_response, response_receiver) = oneshot::channel();
let peer_id = self.test_authorities.peer_ids.get(index).unwrap().to_owned();
node_sender
.start_send(NetworkMessage::RequestFromPeer(IncomingRequest {
peer: peer_id,
payload: AttestedCandidateRequest {
candidate_hash: manifest.candidate_hash,
mask: StatementFilter::blank(self.own_backing_group.len()),
}
.encode(),
pending_response,
}))
.unwrap();
let response = response_receiver.await.unwrap();
let response =
AttestedCandidateResponse::decode(&mut response.result.unwrap().as_ref())
.unwrap();
for statement in response.statements {
let validator_index = statement.unchecked_validator_index();
let position_in_group =
backing_group.iter().position(|v| *v == validator_index).unwrap();
match statement.unchecked_payload() {
CompactStatement::Seconded(_) =>
seconded_in_group.set(position_in_group, true),
CompactStatement::Valid(_) =>
validated_in_group.set(position_in_group, true),
}
}
}
let ack = BackedCandidateAcknowledgement {
candidate_hash: manifest.candidate_hash,
statement_knowledge: StatementFilter { seconded_in_group, validated_in_group },
};
node_sender
.start_send(NetworkMessage::MessageFromPeer(
*self.test_authorities.peer_ids.get(index).unwrap(),
ValidationProtocols::V3(ValidationProtocol::StatementDistribution(
StatementDistributionMessage::BackedCandidateKnown(ack),
)),
))
.unwrap();
self.manifests_tracker
.get(&manifest.candidate_hash)
.unwrap()
.as_ref()
.store(true, Ordering::SeqCst);
None
},
NetworkMessage::MessageFromNode(
_authority_id,
ValidationProtocols::V3(ValidationProtocol::StatementDistribution(
StatementDistributionMessage::BackedCandidateKnown(ack),
)),
) => {
self.manifests_tracker
.get(&ack.candidate_hash)
.unwrap()
.as_ref()
.store(true, Ordering::SeqCst);
None
},
_ => Some(message),
}
}
}
@@ -0,0 +1,183 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Test usage implementation
use colored::Colorize;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BenchmarkUsage {
pub network_usage: Vec<ResourceUsage>,
pub cpu_usage: Vec<ResourceUsage>,
}
impl std::fmt::Display for BenchmarkUsage {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"\n{}\n{}\n\n{}\n{}\n",
format!("{:<64}{:>12}{:>12}", "Network usage, KiB", "total", "per block").blue(),
self.network_usage
.iter()
.map(|v| v.to_string())
.sorted()
.collect::<Vec<String>>()
.join("\n"),
format!("{:<64}{:>12}{:>12}", "CPU usage, seconds", "total", "per block").blue(),
self.cpu_usage
.iter()
.map(|v| v.to_string())
.sorted()
.collect::<Vec<String>>()
.join("\n")
)
}
}
impl BenchmarkUsage {
pub fn average(usages: &[Self]) -> Self {
let all_network_usages: Vec<&ResourceUsage> =
usages.iter().flat_map(|v| &v.network_usage).collect();
let all_cpu_usage: Vec<&ResourceUsage> = usages.iter().flat_map(|v| &v.cpu_usage).collect();
Self {
network_usage: ResourceUsage::average_by_resource_name(&all_network_usages),
cpu_usage: ResourceUsage::average_by_resource_name(&all_cpu_usage),
}
}
pub fn check_network_usage(&self, checks: &[ResourceUsageCheck]) -> Vec<String> {
check_usage(&self.network_usage, checks)
}
pub fn check_cpu_usage(&self, checks: &[ResourceUsageCheck]) -> Vec<String> {
check_usage(&self.cpu_usage, checks)
}
pub fn cpu_usage_diff(&self, other: &Self, resource_name: &str) -> Option<f64> {
let self_res = self.cpu_usage.iter().find(|v| v.resource_name == resource_name);
let other_res = other.cpu_usage.iter().find(|v| v.resource_name == resource_name);
match (self_res, other_res) {
(Some(self_res), Some(other_res)) => Some(self_res.diff(other_res)),
_ => None,
}
}
// Prepares a json string for a graph representation
// See: https://github.com/benchmark-action/github-action-benchmark?tab=readme-ov-file#examples
pub fn to_chart_json(&self) -> color_eyre::eyre::Result<String> {
let chart = self
.network_usage
.iter()
.map(|v| ChartItem {
name: v.resource_name.clone(),
unit: "KiB".to_string(),
value: v.per_block,
})
.chain(self.cpu_usage.iter().map(|v| ChartItem {
name: v.resource_name.clone(),
unit: "seconds".to_string(),
value: v.per_block,
}))
.collect::<Vec<_>>();
Ok(serde_json::to_string(&chart)?)
}
}
fn check_usage(usage: &[ResourceUsage], checks: &[ResourceUsageCheck]) -> Vec<String> {
checks.iter().filter_map(|check| check_resource_usage(usage, check)).collect()
}
fn check_resource_usage(
usage: &[ResourceUsage],
(resource_name, base, precision): &ResourceUsageCheck,
) -> Option<String> {
if let Some(usage) = usage.iter().find(|v| v.resource_name == *resource_name) {
let diff = (base - usage.per_block).abs() / base;
if diff < *precision {
None
} else {
Some(format!(
"The resource `{}` is expected to be equal to {} with a precision {}, but the current value is {} ({})",
resource_name, base, precision, usage.per_block, diff
))
}
} else {
Some(format!("The resource `{resource_name}` is not found"))
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ResourceUsage {
pub resource_name: String,
pub total: f64,
pub per_block: f64,
}
impl std::fmt::Display for ResourceUsage {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:<64}{:>12.4}{:>12.4}", self.resource_name.cyan(), self.total, self.per_block)
}
}
impl ResourceUsage {
fn average_by_resource_name(usages: &[&Self]) -> Vec<Self> {
let mut by_name: HashMap<String, Vec<&Self>> = Default::default();
for usage in usages {
by_name.entry(usage.resource_name.clone()).or_default().push(usage);
}
let mut average = vec![];
for (resource_name, values) in by_name {
let total = values.iter().map(|v| v.total).sum::<f64>() / values.len() as f64;
let per_block = values.iter().map(|v| v.per_block).sum::<f64>() / values.len() as f64;
let per_block_sd =
standard_deviation(&values.iter().map(|v| v.per_block).collect::<Vec<f64>>());
println!(
"[{}] standart_deviation {:.2}%",
resource_name,
per_block_sd / per_block * 100.0
);
average.push(Self { resource_name, total, per_block });
}
average
}
fn diff(&self, other: &Self) -> f64 {
(self.per_block - other.per_block).abs() / self.per_block
}
}
type ResourceUsageCheck<'a> = (&'a str, f64, f64);
#[derive(Debug, Serialize)]
pub struct ChartItem {
pub name: String,
pub unit: String,
pub value: f64,
}
fn standard_deviation(values: &[f64]) -> f64 {
let n = values.len() as f64;
let mean = values.iter().sum::<f64>() / n;
let variance = values.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / (n - 1.0);
variance.sqrt()
}
@@ -0,0 +1,41 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Test utils
use std::{fs::File, io::Write};
// Saves a given string to a file
pub fn save_to_file(path: &str, value: String) -> color_eyre::eyre::Result<()> {
let output = std::process::Command::new(env!("CARGO"))
.arg("locate-project")
.arg("--workspace")
.arg("--message-format=plain")
.output()
.unwrap()
.stdout;
let workspace_dir = std::path::Path::new(std::str::from_utf8(&output).unwrap().trim())
.parent()
.unwrap();
let path = workspace_dir.join(path);
if let Some(dir) = path.parent() {
std::fs::create_dir_all(dir)?;
}
let mut file = File::create(path)?;
file.write_all(value.as_bytes())?;
Ok(())
}