Rework the event system of sc-network (#1370)

This commit introduces a new concept called `NotificationService` which
allows Polkadot protocols to communicate with the underlying
notification protocol implementation directly, without routing events
through `NetworkWorker`. This implies that each protocol has its own
service which it uses to communicate with remote peers and that each
`NotificationService` is unique with respect to the underlying
notification protocol, meaning `NotificationService` for the transaction
protocol can only be used to send and receive transaction-related
notifications.

The `NotificationService` concept introduces two additional benefits:
  * allow protocols to start using custom handshakes
  * allow protocols to accept/reject inbound peers

Previously the validation of inbound connections was solely the
responsibility of `ProtocolController`. This caused issues with light
peers and `SyncingEngine` as `ProtocolController` would accept more
peers than `SyncingEngine` could accept which caused peers to have
differing views of their own states. `SyncingEngine` would reject excess
peers but these rejections were not properly communicated to those peers
causing them to assume that they were accepted.

With `NotificationService`, the local handshake is not sent to remote
peer if peer is rejected which allows it to detect that it was rejected.

This commit also deprecates the use of `NetworkEventStream` for all
notification-related events and going forward only DHT events are
provided through `NetworkEventStream`. If protocols wish to follow each
other's events, they must introduce additional abtractions, as is done
for GRANDPA and transactions protocols by following the syncing protocol
through `SyncEventStream`.

Fixes https://github.com/paritytech/polkadot-sdk/issues/512
Fixes https://github.com/paritytech/polkadot-sdk/issues/514
Fixes https://github.com/paritytech/polkadot-sdk/issues/515
Fixes https://github.com/paritytech/polkadot-sdk/issues/554
Fixes https://github.com/paritytech/polkadot-sdk/issues/556

---
These changes are transferred from
https://github.com/paritytech/substrate/pull/14197 but there are no
functional changes compared to that PR

---------

Co-authored-by: Dmitry Markin <dmitry@markin.tech>
Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
This commit is contained in:
Aaro Altonen
2023-11-28 20:18:52 +02:00
committed by GitHub
parent ec3a61ed86
commit e71c484d5b
102 changed files with 5694 additions and 2603 deletions
+60 -37
View File
@@ -58,7 +58,7 @@ use sc_network::{
request_responses::ProtocolConfig as RequestResponseConfig,
types::ProtocolName,
Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest,
NetworkWorker,
NetworkWorker, NotificationService,
};
use sc_network_common::role::Roles;
use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
@@ -239,6 +239,7 @@ pub struct Peer<D, BlockImport> {
imported_blocks_stream: Pin<Box<dyn Stream<Item = BlockImportNotification<Block>> + Send>>,
finality_notification_stream: Pin<Box<dyn Stream<Item = FinalityNotification<Block>> + Send>>,
listen_addr: Multiaddr,
notification_services: HashMap<ProtocolName, Box<dyn NotificationService>>,
}
impl<D, B> Peer<D, B>
@@ -263,8 +264,8 @@ where
}
/// Returns the number of peers we're connected to.
pub fn num_peers(&self) -> usize {
self.network.num_connected_peers()
pub async fn num_peers(&self) -> usize {
self.sync_service.status().await.unwrap().num_connected_peers as usize
}
/// Returns the number of downloaded blocks.
@@ -502,10 +503,19 @@ where
self.network.service()
}
/// Get `SyncingService`.
pub fn sync_service(&self) -> &Arc<SyncingService<Block>> {
&self.sync_service
}
/// Take notification handle for enabled protocol.
pub fn take_notification_service(
&mut self,
protocol: &ProtocolName,
) -> Option<Box<dyn NotificationService>> {
self.notification_services.remove(protocol)
}
/// Get a reference to the network worker.
pub fn network(&self) -> &NetworkWorker<Block, <Block as BlockT>::Hash> {
&self.network
@@ -778,6 +788,23 @@ pub trait TestNetFactory: Default + Sized + Send {
network_config.transport = TransportConfig::MemoryOnly;
network_config.listen_addresses = vec![listen_addr.clone()];
network_config.allow_non_globals_in_dht = true;
let (notif_configs, notif_handles): (Vec<_>, Vec<_>) = config
.notifications_protocols
.into_iter()
.map(|p| {
let (config, handle) = NonDefaultSetConfig::new(
p.clone(),
Vec::new(),
1024 * 1024,
None,
Default::default(),
);
(config, (p, handle))
})
.unzip();
if let Some(connect_to) = config.connect_to_peers {
let addrs = connect_to
.iter()
@@ -849,11 +876,16 @@ pub trait TestNetFactory: Default + Sized + Send {
protocol_config
};
let peer_store = PeerStore::new(
network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
);
let peer_store_handle = peer_store.handle();
self.spawn_task(peer_store.run().boxed());
let block_announce_validator = config
.block_announce_validator
.unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator));
let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000);
let (engine, sync_service, block_announce_config) =
sc_network_sync::engine::SyncingEngine::new(
Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }),
@@ -869,7 +901,7 @@ pub trait TestNetFactory: Default + Sized + Send {
block_relay_params.downloader,
state_request_protocol_config.name.clone(),
Some(warp_protocol_config.name.clone()),
rx,
peer_store_handle.clone(),
)
.unwrap();
let sync_service_import_queue = Box::new(sync_service.clone());
@@ -887,22 +919,10 @@ pub trait TestNetFactory: Default + Sized + Send {
full_net_config.add_request_response_protocol(config);
}
for protocol in config.notifications_protocols {
full_net_config.add_notification_protocol(NonDefaultSetConfig {
notifications_protocol: protocol,
fallback_names: Vec::new(),
max_notification_size: 1024 * 1024,
handshake: None,
set_config: Default::default(),
});
for config in notif_configs {
full_net_config.add_notification_protocol(config);
}
let peer_store = PeerStore::new(
network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
);
let peer_store_handle = peer_store.handle();
self.spawn_task(peer_store.run().boxed());
let genesis_hash =
client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed");
let network = NetworkWorker::new(sc_network::config::Params {
@@ -917,7 +937,6 @@ pub trait TestNetFactory: Default + Sized + Send {
fork_id,
metrics_registry: None,
block_announce_config,
tx,
})
.unwrap();
@@ -953,6 +972,7 @@ pub trait TestNetFactory: Default + Sized + Send {
backend: Some(backend),
imported_blocks_stream,
finality_notification_stream,
notification_services: HashMap::from_iter(notif_handles.into_iter()),
block_import,
verifier,
network,
@@ -967,20 +987,6 @@ pub trait TestNetFactory: Default + Sized + Send {
tokio::spawn(f);
}
/// Polls the testnet until all peers are connected to each other.
///
/// Must be executed in a task context.
fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> {
self.poll(cx);
let num_peers = self.peers().len();
if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) {
return Poll::Ready(())
}
Poll::Pending
}
async fn is_in_sync(&mut self) -> bool {
let mut highest = None;
let peers = self.peers_mut();
@@ -1058,10 +1064,27 @@ pub trait TestNetFactory: Default + Sized + Send {
}
/// Run the network until all peers are connected to each other.
///
/// Calls `poll_until_connected` repeatedly with the runtime passed as parameter.
async fn run_until_connected(&mut self) {
futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)).await;
let num_peers = self.peers().len();
let sync_services =
self.peers().iter().map(|info| info.sync_service.clone()).collect::<Vec<_>>();
'outer: loop {
for sync_service in &sync_services {
if sync_service.status().await.unwrap().num_connected_peers as usize !=
num_peers - 1
{
futures::future::poll_fn::<(), _>(|cx| {
self.poll(cx);
Poll::Ready(())
})
.await;
continue 'outer
}
}
break
}
}
/// Polls the testnet. Processes all the pending actions.
+167 -174
View File
@@ -24,8 +24,9 @@ use sc_network::{
config::{self, FullNetworkConfiguration, MultiaddrWithPeerId, ProtocolId, TransportConfig},
event::Event,
peer_store::PeerStore,
NetworkEventStream, NetworkNotification, NetworkPeers, NetworkService, NetworkStateInfo,
NetworkWorker,
service::traits::{NotificationEvent, ValidationResult},
NetworkEventStream, NetworkPeers, NetworkService, NetworkStateInfo, NetworkWorker,
NotificationService,
};
use sc_network_common::role::Roles;
use sc_network_light::light_client_requests::handler::LightClientRequestHandler;
@@ -116,7 +117,7 @@ impl TestNetworkBuilder {
self
}
pub fn build(mut self) -> TestNetwork {
pub fn build(mut self) -> (TestNetwork, Option<Box<dyn NotificationService>>) {
let client = self.client.as_mut().map_or(
Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0),
|v| v.clone(),
@@ -183,7 +184,12 @@ impl TestNetworkBuilder {
protocol_config
};
let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000);
let peer_store = PeerStore::new(
network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
);
let peer_store_handle = peer_store.handle();
tokio::spawn(peer_store.run().boxed());
let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new(
Roles::from(&config::Role::Full),
client.clone(),
@@ -198,24 +204,27 @@ impl TestNetworkBuilder {
block_relay_params.downloader,
state_request_protocol_config.name.clone(),
None,
rx,
peer_store_handle.clone(),
)
.unwrap();
let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone()));
if !self.notification_protocols.is_empty() {
let handle = if !self.notification_protocols.is_empty() {
for config in self.notification_protocols {
full_net_config.add_notification_protocol(config);
}
None
} else {
full_net_config.add_notification_protocol(config::NonDefaultSetConfig {
notifications_protocol: PROTOCOL_NAME.into(),
fallback_names: Vec::new(),
max_notification_size: 1024 * 1024,
handshake: None,
set_config: self.set_config.unwrap_or_default(),
});
}
let (config, handle) = config::NonDefaultSetConfig::new(
PROTOCOL_NAME.into(),
Vec::new(),
1024 * 1024,
None,
self.set_config.unwrap_or_default(),
);
full_net_config.add_notification_protocol(config);
Some(handle)
};
for config in [
block_relay_params.request_response_config,
@@ -225,12 +234,6 @@ impl TestNetworkBuilder {
full_net_config.add_request_response_protocol(config);
}
let peer_store = PeerStore::new(
network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(),
);
let peer_store_handle = peer_store.handle();
tokio::spawn(peer_store.run().boxed());
let genesis_hash =
client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed");
let worker = NetworkWorker::<
@@ -248,7 +251,6 @@ impl TestNetworkBuilder {
protocol_id,
fork_id,
metrics_registry: None,
tx,
})
.unwrap();
@@ -268,7 +270,7 @@ impl TestNetworkBuilder {
});
tokio::spawn(engine.run());
TestNetwork::new(worker)
(TestNetwork::new(worker), handle)
}
}
@@ -276,18 +278,18 @@ impl TestNetworkBuilder {
/// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered.
fn build_nodes_one_proto() -> (
Arc<TestNetworkService>,
impl Stream<Item = Event>,
Option<Box<dyn NotificationService>>,
Arc<TestNetworkService>,
impl Stream<Item = Event>,
Option<Box<dyn NotificationService>>,
) {
let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
let (node1, events_stream1) = TestNetworkBuilder::new()
let (network1, handle1) = TestNetworkBuilder::new()
.with_listen_addresses(vec![listen_addr.clone()])
.build()
.start_network();
.build();
let (node1, _) = network1.start_network();
let (node2, events_stream2) = TestNetworkBuilder::new()
let (network2, handle2) = TestNetworkBuilder::new()
.with_set_config(config::SetConfig {
reserved_nodes: vec![MultiaddrWithPeerId {
multiaddr: listen_addr,
@@ -295,10 +297,11 @@ fn build_nodes_one_proto() -> (
}],
..Default::default()
})
.build()
.start_network();
.build();
(node1, events_stream1, node2, events_stream2)
let (node2, _) = network2.start_network();
(node1, handle1, node2, handle2)
}
#[tokio::test]
@@ -306,22 +309,15 @@ async fn notifications_state_consistent() {
// Runs two nodes and ensures that events are propagated out of the API in a consistent
// correct order, which means no notification received on a closed substream.
let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto();
let (node1, handle1, node2, handle2) = build_nodes_one_proto();
let (mut handle1, mut handle2) = (handle1.unwrap(), handle2.unwrap());
// Write some initial notifications that shouldn't get through.
for _ in 0..(rand::random::<u8>() % 5) {
node1.write_notification(
node2.local_peer_id(),
PROTOCOL_NAME.into(),
b"hello world".to_vec(),
);
let _ = handle1.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec());
}
for _ in 0..(rand::random::<u8>() % 5) {
node2.write_notification(
node1.local_peer_id(),
PROTOCOL_NAME.into(),
b"hello world".to_vec(),
);
let _ = handle2.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec());
}
// True if we have an active substream from node1 to node2.
@@ -343,18 +339,10 @@ async fn notifications_state_consistent() {
// Start by sending a notification from node1 to node2 and vice-versa. Part of the
// test consists in ensuring that notifications get ignored if the stream isn't open.
if rand::random::<u8>() % 5 >= 3 {
node1.write_notification(
node2.local_peer_id(),
PROTOCOL_NAME.into(),
b"hello world".to_vec(),
);
let _ = handle1.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec());
}
if rand::random::<u8>() % 5 >= 3 {
node2.write_notification(
node1.local_peer_id(),
PROTOCOL_NAME.into(),
b"hello world".to_vec(),
);
let _ = handle2.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec());
}
// Also randomly disconnect the two nodes from time to time.
@@ -367,8 +355,8 @@ async fn notifications_state_consistent() {
// Grab next event from either `events_stream1` or `events_stream2`.
let next_event = {
let next1 = events_stream1.next();
let next2 = events_stream2.next();
let next1 = handle1.next_event();
let next2 = handle2.next_event();
// We also await on a small timer, otherwise it is possible for the test to wait
// forever while nothing at all happens on the network.
let continue_test = futures_timer::Delay::new(Duration::from_millis(20));
@@ -383,58 +371,55 @@ async fn notifications_state_consistent() {
};
match next_event {
future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. }) =>
if protocol == PROTOCOL_NAME.into() {
something_happened = true;
assert!(!node1_to_node2_open);
node1_to_node2_open = true;
assert_eq!(remote, node2.local_peer_id());
},
future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. }) =>
if protocol == PROTOCOL_NAME.into() {
something_happened = true;
assert!(!node2_to_node1_open);
node2_to_node1_open = true;
assert_eq!(remote, node1.local_peer_id());
},
future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. }) =>
if protocol == PROTOCOL_NAME.into() {
assert!(node1_to_node2_open);
node1_to_node2_open = false;
assert_eq!(remote, node2.local_peer_id());
},
future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. }) =>
if protocol == PROTOCOL_NAME.into() {
assert!(node2_to_node1_open);
node2_to_node1_open = false;
assert_eq!(remote, node1.local_peer_id());
},
future::Either::Left(Event::NotificationsReceived { remote, .. }) => {
future::Either::Left(NotificationEvent::ValidateInboundSubstream {
result_tx, ..
}) => {
result_tx.send(ValidationResult::Accept).unwrap();
},
future::Either::Right(NotificationEvent::ValidateInboundSubstream {
result_tx,
..
}) => {
result_tx.send(ValidationResult::Accept).unwrap();
},
future::Either::Left(NotificationEvent::NotificationStreamOpened { peer, .. }) => {
something_happened = true;
assert!(!node1_to_node2_open);
node1_to_node2_open = true;
assert_eq!(peer, node2.local_peer_id());
},
future::Either::Right(NotificationEvent::NotificationStreamOpened { peer, .. }) => {
something_happened = true;
assert!(!node2_to_node1_open);
node2_to_node1_open = true;
assert_eq!(peer, node1.local_peer_id());
},
future::Either::Left(NotificationEvent::NotificationStreamClosed { peer, .. }) => {
assert!(node1_to_node2_open);
assert_eq!(remote, node2.local_peer_id());
if rand::random::<u8>() % 5 >= 4 {
node1.write_notification(
node2.local_peer_id(),
PROTOCOL_NAME.into(),
b"hello world".to_vec(),
);
}
node1_to_node2_open = false;
assert_eq!(peer, node2.local_peer_id());
},
future::Either::Right(Event::NotificationsReceived { remote, .. }) => {
future::Either::Right(NotificationEvent::NotificationStreamClosed { peer, .. }) => {
assert!(node2_to_node1_open);
assert_eq!(remote, node1.local_peer_id());
node2_to_node1_open = false;
assert_eq!(peer, node1.local_peer_id());
},
future::Either::Left(NotificationEvent::NotificationReceived { peer, .. }) => {
assert!(node1_to_node2_open);
assert_eq!(peer, node2.local_peer_id());
if rand::random::<u8>() % 5 >= 4 {
node2.write_notification(
node1.local_peer_id(),
PROTOCOL_NAME.into(),
b"hello world".to_vec(),
);
let _ = handle1
.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec());
}
},
future::Either::Right(NotificationEvent::NotificationReceived { peer, .. }) => {
assert!(node2_to_node1_open);
assert_eq!(peer, node1.local_peer_id());
if rand::random::<u8>() % 5 >= 4 {
let _ = handle2
.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec());
}
},
// Add new events here.
future::Either::Left(Event::Dht(_)) => {},
future::Either::Right(Event::Dht(_)) => {},
};
}
}
@@ -444,20 +429,29 @@ async fn lots_of_incoming_peers_works() {
sp_tracing::try_init_simple();
let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
let (main_node, _) = TestNetworkBuilder::new()
let (main_node, handle1) = TestNetworkBuilder::new()
.with_listen_addresses(vec![listen_addr.clone()])
.with_set_config(config::SetConfig { in_peers: u32::MAX, ..Default::default() })
.build()
.start_network();
.build();
let mut handle1 = handle1.unwrap();
let (main_node, _) = main_node.start_network();
let main_node_peer_id = main_node.local_peer_id();
tokio::spawn(async move {
while let Some(event) = handle1.next_event().await {
if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event {
result_tx.send(ValidationResult::Accept).unwrap();
}
}
});
// We spawn background tasks and push them in this `Vec`. They will all be waited upon before
// this test ends.
let mut background_tasks_to_wait = Vec::new();
for _ in 0..32 {
let (_dialing_node, event_stream) = TestNetworkBuilder::new()
let (dialing_node, handle) = TestNetworkBuilder::new()
.with_set_config(config::SetConfig {
reserved_nodes: vec![MultiaddrWithPeerId {
multiaddr: listen_addr.clone(),
@@ -465,8 +459,9 @@ async fn lots_of_incoming_peers_works() {
}],
..Default::default()
})
.build()
.start_network();
.build();
let mut handle = handle.unwrap();
let (_, _) = dialing_node.start_network();
background_tasks_to_wait.push(tokio::spawn(async move {
// Create a dummy timer that will "never" fire, and that will be overwritten when we
@@ -474,34 +469,23 @@ async fn lots_of_incoming_peers_works() {
// make the code below way more complicated.
let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse();
let mut event_stream = event_stream.fuse();
let mut sync_protocol_name = None;
loop {
futures::select! {
_ = timer => {
// Test succeeds when timer fires.
return;
}
ev = event_stream.next() => {
match ev.unwrap() {
Event::NotificationStreamOpened { protocol, remote, .. } => {
if let None = sync_protocol_name {
sync_protocol_name = Some(protocol.clone());
}
assert_eq!(remote, main_node_peer_id);
// Test succeeds after 5 seconds. This timer is here in order to
// detect a potential problem after opening.
timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse();
}
Event::NotificationStreamClosed { protocol, .. } => {
if Some(protocol) != sync_protocol_name {
// Test failed.
panic!();
}
}
_ => {}
ev = handle.next_event().fuse() => match ev.unwrap() {
NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
result_tx.send(ValidationResult::Accept).unwrap();
}
NotificationEvent::NotificationStreamOpened { peer, .. } => {
assert_eq!(peer, main_node_peer_id);
// Test succeeds after 5 seconds. This timer is here in order to
// detect a potential problem after opening.
timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse();
}
_ => {}
}
}
}
@@ -518,33 +502,27 @@ async fn notifications_back_pressure() {
const TOTAL_NOTIFS: usize = 10_000;
let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto();
let (_node1, handle1, node2, handle2) = build_nodes_one_proto();
let (mut handle1, mut handle2) = (handle1.unwrap(), handle2.unwrap());
let node2_id = node2.local_peer_id();
let receiver = tokio::spawn(async move {
let mut received_notifications = 0;
let mut sync_protocol_name = None;
while received_notifications < TOTAL_NOTIFS {
match events_stream2.next().await.unwrap() {
Event::NotificationStreamOpened { protocol, .. } => {
if let None = sync_protocol_name {
sync_protocol_name = Some(protocol);
}
match handle2.next_event().await.unwrap() {
NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
result_tx.send(ValidationResult::Accept).unwrap();
},
Event::NotificationStreamClosed { protocol, .. } => {
if Some(&protocol) != sync_protocol_name.as_ref() {
panic!()
}
NotificationEvent::NotificationReceived { notification, .. } => {
assert_eq!(
notification,
format!("hello #{}", received_notifications).into_bytes()
);
received_notifications += 1;
},
Event::NotificationsReceived { messages, .. } =>
for message in messages {
assert_eq!(message.0, PROTOCOL_NAME.into());
assert_eq!(message.1, format!("hello #{}", received_notifications));
received_notifications += 1;
},
_ => {},
};
}
if rand::random::<u8>() < 2 {
tokio::time::sleep(Duration::from_millis(rand::random::<u64>() % 750)).await;
@@ -554,20 +532,20 @@ async fn notifications_back_pressure() {
// Wait for the `NotificationStreamOpened`.
loop {
match events_stream1.next().await.unwrap() {
Event::NotificationStreamOpened { .. } => break,
match handle1.next_event().await.unwrap() {
NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
result_tx.send(ValidationResult::Accept).unwrap();
},
NotificationEvent::NotificationStreamOpened { .. } => break,
_ => {},
};
}
// Sending!
for num in 0..TOTAL_NOTIFS {
let notif = node1.notification_sender(node2_id, PROTOCOL_NAME.into()).unwrap();
notif
.ready()
handle1
.send_async_notification(&node2_id, format!("hello #{}", num).into_bytes())
.await
.unwrap()
.send(format!("hello #{}", num).into_bytes())
.unwrap();
}
@@ -576,28 +554,31 @@ async fn notifications_back_pressure() {
#[tokio::test]
async fn fallback_name_working() {
sp_tracing::try_init_simple();
// Node 1 supports the protocols "new" and "old". Node 2 only supports "old". Checks whether
// they can connect.
const NEW_PROTOCOL_NAME: &str = "/new-shiny-protocol-that-isnt-PROTOCOL_NAME";
let listen_addr = config::build_multiaddr![Memory(rand::random::<u64>())];
let (node1, mut events_stream1) = TestNetworkBuilder::new()
.with_notification_protocol(config::NonDefaultSetConfig {
notifications_protocol: NEW_PROTOCOL_NAME.into(),
fallback_names: vec![PROTOCOL_NAME.into()],
max_notification_size: 1024 * 1024,
handshake: None,
set_config: Default::default(),
})
let (config, mut handle1) = config::NonDefaultSetConfig::new(
NEW_PROTOCOL_NAME.into(),
vec![PROTOCOL_NAME.into()],
1024 * 1024,
None,
Default::default(),
);
let (network1, _) = TestNetworkBuilder::new()
.with_notification_protocol(config)
.with_config(config::NetworkConfiguration {
listen_addresses: vec![listen_addr.clone()],
transport: TransportConfig::MemoryOnly,
..config::NetworkConfiguration::new_local()
})
.build()
.start_network();
.build();
let (_, mut events_stream2) = TestNetworkBuilder::new()
let (node1, _) = network1.start_network();
let (network2, handle2) = TestNetworkBuilder::new()
.with_set_config(config::SetConfig {
reserved_nodes: vec![MultiaddrWithPeerId {
multiaddr: listen_addr,
@@ -605,34 +586,38 @@ async fn fallback_name_working() {
}],
..Default::default()
})
.build()
.start_network();
.build();
let mut handle2 = handle2.unwrap();
let _ = network2.start_network();
let receiver = tokio::spawn(async move {
// Wait for the `NotificationStreamOpened`.
loop {
match events_stream2.next().await.unwrap() {
Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => {
assert_eq!(protocol, PROTOCOL_NAME.into());
match handle2.next_event().await.unwrap() {
NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
result_tx.send(ValidationResult::Accept).unwrap();
},
NotificationEvent::NotificationStreamOpened { negotiated_fallback, .. } => {
assert_eq!(negotiated_fallback, None);
break
},
_ => {},
};
}
}
});
// Wait for the `NotificationStreamOpened`.
loop {
match events_stream1.next().await.unwrap() {
Event::NotificationStreamOpened { protocol, negotiated_fallback, .. }
if protocol == NEW_PROTOCOL_NAME.into() =>
{
match handle1.next_event().await.unwrap() {
NotificationEvent::ValidateInboundSubstream { result_tx, .. } => {
result_tx.send(ValidationResult::Accept).unwrap();
},
NotificationEvent::NotificationStreamOpened { negotiated_fallback, .. } => {
assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME.into()));
break
},
_ => {},
};
}
}
receiver.await.unwrap();
@@ -655,6 +640,7 @@ async fn ensure_listen_addresses_consistent_with_transport_memory() {
)
})
.build()
.0
.start_network();
}
@@ -674,6 +660,7 @@ async fn ensure_listen_addresses_consistent_with_transport_not_memory() {
)
})
.build()
.0
.start_network();
}
@@ -699,6 +686,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_memory() {
)
})
.build()
.0
.start_network();
}
@@ -723,6 +711,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_not_memory() {
)
})
.build()
.0
.start_network();
}
@@ -751,6 +740,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_memory() {
)
})
.build()
.0
.start_network();
}
@@ -778,6 +768,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() {
)
})
.build()
.0
.start_network();
}
@@ -800,6 +791,7 @@ async fn ensure_public_addresses_consistent_with_transport_memory() {
)
})
.build()
.0
.start_network();
}
@@ -821,5 +813,6 @@ async fn ensure_public_addresses_consistent_with_transport_not_memory() {
)
})
.build()
.0
.start_network();
}
+40 -46
View File
@@ -44,16 +44,16 @@ async fn sync_peers_works() {
sp_tracing::try_init_simple();
let mut net = TestNet::new(3);
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
for peer in 0..3 {
if net.peer(peer).num_peers() != 2 {
return Poll::Pending
}
}
Poll::Ready(())
})
.await;
while net.peer(0).num_peers().await != 2 &&
net.peer(1).num_peers().await != 2 &&
net.peer(2).num_peers().await != 2
{
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
Poll::Ready(())
})
.await;
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@@ -412,15 +412,13 @@ async fn can_sync_small_non_best_forks() {
assert!(net.peer(1).client().header(small_hash).unwrap().is_none());
// poll until the two nodes connect, otherwise announcing the block will not work
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 {
Poll::Pending
} else {
while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 {
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
Poll::Ready(())
}
})
.await;
})
.await;
}
// synchronization: 0 synced to longer chain and 1 didn't sync to small chain.
@@ -465,6 +463,7 @@ async fn can_sync_forks_ahead_of_the_best_chain() {
net.peer(1).push_blocks(1, false);
net.run_until_connected().await;
// Peer 0 is on 2-block fork which is announced with is_best=false
let fork_hash = net
.peer(0)
@@ -516,15 +515,13 @@ async fn can_sync_explicit_forks() {
assert!(net.peer(1).client().header(small_hash).unwrap().is_none());
// poll until the two nodes connect, otherwise announcing the block will not work
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 {
Poll::Pending
} else {
while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 {
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
Poll::Ready(())
}
})
.await;
})
.await;
}
// synchronization: 0 synced to longer chain and 1 didn't sync to small chain.
@@ -613,15 +610,14 @@ async fn full_sync_requires_block_body() {
net.peer(0).push_headers(1);
// Wait for nodes to connect
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 {
Poll::Pending
} else {
while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 {
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
Poll::Ready(())
}
})
.await;
})
.await;
}
net.run_until_idle().await;
assert_eq!(net.peer(1).client.info().best_number, 0);
}
@@ -917,18 +913,16 @@ async fn block_announce_data_is_propagated() {
});
// Wait until peer 1 is connected to both nodes.
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
if net.peer(1).num_peers() == 2 &&
net.peer(0).num_peers() == 1 &&
net.peer(2).num_peers() == 1
{
while net.peer(1).num_peers().await != 2 ||
net.peer(0).num_peers().await != 1 ||
net.peer(2).num_peers().await != 1
{
futures::future::poll_fn::<(), _>(|cx| {
net.poll(cx);
Poll::Ready(())
} else {
Poll::Pending
}
})
.await;
})
.await;
}
let block_hash = net
.peer(0)
@@ -1010,7 +1004,7 @@ async fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() {
tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;
net.peer(0).push_blocks(1, false);
net.run_until_sync().await;
assert_eq!(1, net.peer(0).num_peers());
assert_eq!(1, net.peer(0).num_peers().await);
}
let hashof10 = hashes[9];