mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-28 01:38:04 +00:00
Remove request multiplexer (#3624)
* WIP: Get rid of request multiplexer. * WIP * Receiver for handling of incoming requests. * Get rid of useless `Fault` abstraction. The things the type system let us do are not worth getting abstracted in its own type. Instead error handling is going to be merely a pattern. * Make most things compile again. * Port availability distribution away from request multiplexer. * Formatting. * Port dispute distribution over. * Fixup statement distribution. * Handle request directly in collator protocol. + Only allow fatal errors at top level. * Use direct request channel for availability recovery. * Finally get rid of request multiplexer Fixes #2842 and paves the way for more back pressure possibilities. * Fix overseer and statement distribution tests. * Fix collator protocol and network bridge tests. * Fix tests in availability recovery. * Fix availability distribution tests. * Fix dispute distribution tests. * Add missing dependency * Typos. * Review remarks. * More remarks.
This commit is contained in:
@@ -18,6 +18,7 @@ use std::collections::HashSet;
|
||||
|
||||
use futures::{executor, future, Future};
|
||||
|
||||
use polkadot_node_network_protocol::request_response::IncomingRequest;
|
||||
use polkadot_primitives::v1::CoreState;
|
||||
use sp_keystore::SyncCryptoStorePtr;
|
||||
|
||||
@@ -41,17 +42,21 @@ fn test_harness<T: Future<Output = ()>>(
|
||||
let pool = sp_core::testing::TaskExecutor::new();
|
||||
let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone());
|
||||
|
||||
let subsystem = AvailabilityDistributionSubsystem::new(keystore, Default::default());
|
||||
{
|
||||
let subsystem = subsystem.run(context);
|
||||
let (pov_req_receiver, pov_req_cfg) = IncomingRequest::get_config_receiver();
|
||||
let (chunk_req_receiver, chunk_req_cfg) = IncomingRequest::get_config_receiver();
|
||||
let subsystem = AvailabilityDistributionSubsystem::new(
|
||||
keystore,
|
||||
IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver },
|
||||
Default::default(),
|
||||
);
|
||||
let subsystem = subsystem.run(context);
|
||||
|
||||
let test_fut = test_fx(TestHarness { virtual_overseer, pool });
|
||||
let test_fut = test_fx(TestHarness { virtual_overseer, pov_req_cfg, chunk_req_cfg, pool });
|
||||
|
||||
futures::pin_mut!(test_fut);
|
||||
futures::pin_mut!(subsystem);
|
||||
futures::pin_mut!(test_fut);
|
||||
futures::pin_mut!(subsystem);
|
||||
|
||||
executor::block_on(future::join(test_fut, subsystem)).1.unwrap();
|
||||
}
|
||||
executor::block_on(future::join(test_fut, subsystem)).1.unwrap();
|
||||
}
|
||||
|
||||
/// Simple basic check, whether the subsystem works as expected.
|
||||
|
||||
@@ -30,7 +30,7 @@ use futures::{
|
||||
use futures_timer::Delay;
|
||||
|
||||
use sc_network as network;
|
||||
use sc_network::{config as netconfig, IfDisconnected};
|
||||
use sc_network::{config as netconfig, config::RequestResponseConfig, IfDisconnected};
|
||||
use sp_core::{testing::TaskExecutor, traits::SpawnNamed};
|
||||
use sp_keystore::SyncCryptoStorePtr;
|
||||
|
||||
@@ -59,6 +59,8 @@ use crate::LOG_TARGET;
|
||||
type VirtualOverseer = test_helpers::TestSubsystemContextHandle<AvailabilityDistributionMessage>;
|
||||
pub struct TestHarness {
|
||||
pub virtual_overseer: VirtualOverseer,
|
||||
pub pov_req_cfg: RequestResponseConfig,
|
||||
pub chunk_req_cfg: RequestResponseConfig,
|
||||
pub pool: TaskExecutor,
|
||||
}
|
||||
|
||||
@@ -152,9 +154,7 @@ impl TestState {
|
||||
/// Run, but fail after some timeout.
|
||||
pub async fn run(self, harness: TestHarness) {
|
||||
// Make sure test won't run forever.
|
||||
let f = self
|
||||
.run_inner(harness.pool, harness.virtual_overseer)
|
||||
.timeout(Duration::from_secs(10));
|
||||
let f = self.run_inner(harness).timeout(Duration::from_secs(10));
|
||||
assert!(f.await.is_some(), "Test ran into timeout");
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ impl TestState {
|
||||
///
|
||||
/// We try to be as agnostic about details as possible, how the subsystem achieves those goals
|
||||
/// should not be a matter to this test suite.
|
||||
async fn run_inner(mut self, executor: TaskExecutor, virtual_overseer: VirtualOverseer) {
|
||||
async fn run_inner(mut self, mut harness: TestHarness) {
|
||||
// We skip genesis here (in reality ActiveLeavesUpdate can also skip a block:
|
||||
let updates = {
|
||||
let mut advanced = self.relay_chain.iter();
|
||||
@@ -191,12 +191,12 @@ impl TestState {
|
||||
// Test will fail if this does not happen until timeout.
|
||||
let mut remaining_stores = self.valid_chunks.len();
|
||||
|
||||
let TestSubsystemContextHandle { tx, mut rx } = virtual_overseer;
|
||||
let TestSubsystemContextHandle { tx, mut rx } = harness.virtual_overseer;
|
||||
|
||||
// Spawning necessary as incoming queue can only hold a single item, we don't want to dead
|
||||
// lock ;-)
|
||||
let update_tx = tx.clone();
|
||||
executor.spawn(
|
||||
harness.pool.spawn(
|
||||
"Sending active leaves updates",
|
||||
async move {
|
||||
for update in updates {
|
||||
@@ -219,16 +219,15 @@ impl TestState {
|
||||
)) => {
|
||||
for req in reqs {
|
||||
// Forward requests:
|
||||
let in_req = to_incoming_req(&executor, req);
|
||||
|
||||
executor.spawn(
|
||||
"Request forwarding",
|
||||
overseer_send(
|
||||
tx.clone(),
|
||||
AvailabilityDistributionMessage::ChunkFetchingRequest(in_req),
|
||||
)
|
||||
.boxed(),
|
||||
);
|
||||
let in_req = to_incoming_req(&harness.pool, req);
|
||||
harness
|
||||
.chunk_req_cfg
|
||||
.inbound_queue
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.send(in_req.into_raw())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
},
|
||||
AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryChunk(
|
||||
@@ -295,18 +294,6 @@ async fn overseer_signal(
|
||||
tx.send(FromOverseer::Signal(msg)).await.expect("Test subsystem no longer live");
|
||||
}
|
||||
|
||||
async fn overseer_send(
|
||||
mut tx: SingleItemSink<FromOverseer<AvailabilityDistributionMessage>>,
|
||||
msg: impl Into<AvailabilityDistributionMessage>,
|
||||
) {
|
||||
let msg = msg.into();
|
||||
tracing::trace!(target: LOG_TARGET, msg = ?msg, "sending message");
|
||||
tx.send(FromOverseer::Communication { msg })
|
||||
.await
|
||||
.expect("Test subsystem no longer live");
|
||||
tracing::trace!(target: LOG_TARGET, "sent message");
|
||||
}
|
||||
|
||||
async fn overseer_recv(rx: &mut mpsc::UnboundedReceiver<AllMessages>) -> AllMessages {
|
||||
tracing::trace!(target: LOG_TARGET, "waiting for message ...");
|
||||
rx.next().await.expect("Test subsystem no longer live")
|
||||
|
||||
Reference in New Issue
Block a user