Rework the event system of sc-network (#1370)

This commit introduces a new concept called `NotificationService` which
allows Polkadot protocols to communicate with the underlying
notification protocol implementation directly, without routing events
through `NetworkWorker`. This implies that each protocol has its own
service which it uses to communicate with remote peers and that each
`NotificationService` is unique with respect to the underlying
notification protocol, meaning `NotificationService` for the transaction
protocol can only be used to send and receive transaction-related
notifications.

The `NotificationService` concept introduces two additional benefits:
  * allow protocols to start using custom handshakes
  * allow protocols to accept/reject inbound peers

Previously the validation of inbound connections was solely the
responsibility of `ProtocolController`. This caused issues with light
peers and `SyncingEngine` as `ProtocolController` would accept more
peers than `SyncingEngine` could accept which caused peers to have
differing views of their own states. `SyncingEngine` would reject excess
peers but these rejections were not properly communicated to those peers
causing them to assume that they were accepted.

With `NotificationService`, the local handshake is not sent to remote
peer if peer is rejected which allows it to detect that it was rejected.

This commit also deprecates the use of `NetworkEventStream` for all
notification-related events and going forward only DHT events are
provided through `NetworkEventStream`. If protocols wish to follow each
other's events, they must introduce additional abtractions, as is done
for GRANDPA and transactions protocols by following the syncing protocol
through `SyncEventStream`.

Fixes https://github.com/paritytech/polkadot-sdk/issues/512
Fixes https://github.com/paritytech/polkadot-sdk/issues/514
Fixes https://github.com/paritytech/polkadot-sdk/issues/515
Fixes https://github.com/paritytech/polkadot-sdk/issues/554
Fixes https://github.com/paritytech/polkadot-sdk/issues/556

---
These changes are transferred from
https://github.com/paritytech/substrate/pull/14197 but there are no
functional changes compared to that PR

---------

Co-authored-by: Dmitry Markin <dmitry@markin.tech>
Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
This commit is contained in:
Aaro Altonen
2023-11-28 20:18:52 +02:00
committed by GitHub
parent ec3a61ed86
commit e71c484d5b
102 changed files with 5694 additions and 2603 deletions
+60 -37
View File
@@ -58,7 +58,7 @@ use sc_network::{
request_responses::ProtocolConfig as RequestResponseConfig,
types::ProtocolName,
Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest,
NetworkWorker,
NetworkWorker, NotificationService,
};
use sc_network_common::role::Roles;
use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
@@ -239,6 +239,7 @@ pub struct Peer<D, BlockImport> {
imported_blocks_stream: Pin<Box<dyn Stream<Item = BlockImportNotification<Block>> + Send>>,
finality_notification_stream: Pin<Box<dyn Stream<Item = FinalityNotification<Block>> + Send>>,
listen_addr: Multiaddr,
notification_services: HashMap<ProtocolName, Box<dyn NotificationService>>,
}
impl<D, B> Peer<D, B>
@@ -263,8 +264,8 @@ where
}
/// Returns the number of peers we're connected to.
pub fn num_peers(&self) -> usize {
self.network.num_connected_peers()
pub async fn num_peers(&self) -> usize {
self.sync_service.status().await.unwrap().num_connected_peers as usize
}
/// Returns the number of downloaded blocks.
@@ -502,10 +503,19 @@ where
self.network.service()
}
/// Get `SyncingService`.
pub fn sync_service(&self) -> &Arc<SyncingService<Block>> {
&self.sync_service
}
/// Take notification handle for enabled protocol.
pub fn take_notification_service(
&mut self,
protocol: &ProtocolName,
) -> Option<Box<dyn NotificationService>> {
self.notification_services.remove(protocol)
}
/// Get a reference to the network worker.
pub fn network(&self) -> &NetworkWorker<Block, <Block as BlockT>::Hash> {
&self.network
@@ -778,6 +788,23 @@ pub trait TestNetFactory: Default + Sized + Send {
network_config.transport = TransportConfig::MemoryOnly;
network_config.listen_addresses = vec![listen_addr.clone()];
network_config.allow_non_globals_in_dht = true;
let (notif_configs, notif_handles): (Vec<_>, Vec<_>) = config
.notifications_protocols
.into_iter()
.map(|p| {
let (config, handle) = NonDefaultSetConfig::new(
p.clone(),
Vec::new(),
1024 * 1024,
None,
Default::default(),
);
(config, (p, handle))
})
.unzip();
if let Some(connect_to) = config.connect_to_peers {
let addrs = connect_to
.iter()
@@ -849,11 +876,16 @@ pub trait TestNetFactory: Default + Sized + Send {
protocol_config
};
let peer_store = PeerStore::new(
network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
);
let peer_store_handle = peer_store.handle();
self.spawn_task(peer_store.run().boxed());
let block_announce_validator = config
.block_announce_validator
.unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator));
let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000);
let (engine, sync_service, block_announce_config) =
sc_network_sync::engine::SyncingEngine::new(
Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }),
@@ -869,7 +901,7 @@ pub trait TestNetFactory: Default + Sized + Send {
block_relay_params.downloader,
state_request_protocol_config.name.clone(),
Some(warp_protocol_config.name.clone()),
rx,
peer_store_handle.clone(),
)
.unwrap();
let sync_service_import_queue = Box::new(sync_service.clone());
@@ -887,22 +919,10 @@ pub trait TestNetFactory: Default + Sized + Send {
full_net_config.add_request_response_protocol(config);
}
for protocol in config.notifications_protocols {
full_net_config.add_notification_protocol(NonDefaultSetConfig {
notifications_protocol: protocol,
fallback_names: Vec::new(),
max_notification_size: 1024 * 1024,
handshake: None,
set_config: Default::default(),
});
for config in notif_configs {
full_net_config.add_notification_protocol(config);
}
let peer_store = PeerStore::new(
network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
);
let peer_store_handle = peer_store.handle();
self.spawn_task(peer_store.run().boxed());
let genesis_hash =
client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed");
let network = NetworkWorker::new(sc_network::config::Params {
@@ -917,7 +937,6 @@ pub trait TestNetFactory: Default + Sized + Send {
fork_id,
metrics_registry: None,
block_announce_config,
tx,
})
.unwrap();
@@ -953,6 +972,7 @@ pub trait TestNetFactory: Default + Sized + Send {
backend: Some(backend),
imported_blocks_stream,
finality_notification_stream,
notification_services: HashMap::from_iter(notif_handles.into_iter()),
block_import,
verifier,
network,
@@ -967,20 +987,6 @@ pub trait TestNetFactory: Default + Sized + Send {
tokio::spawn(f);
}
/// Polls the testnet until all peers are connected to each other.
///
/// Must be executed in a task context.
fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> {
self.poll(cx);
let num_peers = self.peers().len();
if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) {
return Poll::Ready(())
}
Poll::Pending
}
async fn is_in_sync(&mut self) -> bool {
let mut highest = None;
let peers = self.peers_mut();
@@ -1058,10 +1064,27 @@ pub trait TestNetFactory: Default + Sized + Send {
}
/// Run the network until all peers are connected to each other.
///
/// Calls `poll_until_connected` repeatedly with the runtime passed as parameter.
async fn run_until_connected(&mut self) {
futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)).await;
let num_peers = self.peers().len();
let sync_services =
self.peers().iter().map(|info| info.sync_service.clone()).collect::<Vec<_>>();
'outer: loop {
for sync_service in &sync_services {
if sync_service.status().await.unwrap().num_connected_peers as usize !=
num_peers - 1
{
futures::future::poll_fn::<(), _>(|cx| {
self.poll(cx);
Poll::Ready(())
})
.await;
continue 'outer
}
}
break
}
}
/// Polls the testnet. Processes all the pending actions.