Use async/await instead of manual polling of NetworkWorker (#13219)

* Convert `NetworkWorker::poll()` into async `next_action()`

* Use `NetworkWorker::next_action` instead of `poll` in `sc-network-test`

* Revert "Use `NetworkWorker::next_action` instead of `poll` in `sc-network-test`"

This reverts commit 4b5d851ec864f78f9d083a18a618fbe117c896d2.

* Fix `sc-network-test` to poll `NetworkWorker::next_action`

* Fix `sc_network::service` tests to poll `NetworkWorker::next_action`

* Fix docs

* kick CI

* Factor out `next_worker_message()` & `next_swarm_event()`

* Error handling: replace `futures::pending!()` with `expect()`

* Simplify stream polling in `select!`

* Replace `NetworkWorker::next_action()` with `run()`

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* minor: comment

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* Print debug log when network future is shut down

* Evaluate `NetworkWorker::run()` future once before the loop

* Fix client code to match new `NetworkService` interfaces

* Make clippy happy

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* Revert "Apply suggestions from code review"

This reverts commit 9fa646d0ed613e5f8623d3d37d1d59ec0a535850.

* Make `NetworkWorker::run()` consume `self`

* Terminate system RPC future if RPC rx stream has terminated.

* Rewrite with let-else

* Fix comments

* Get `best_seen_block` and call `on_block_finalized` via `ChainSync` instead of `NetworkService`

* rustfmt

* make clippy happy

* Tests: schedule wake if `next_action()` returned true

* minor: comment

* minor: fix `NetworkWorker` rustdoc

* minor: amend the rustdoc

* Fix bug that caused `on_demand_beefy_justification_sync` test to hang

* rustfmt

* Apply review suggestions

---------

Co-authored-by: Bastian Köcher <git@kchr.de>
This commit is contained in:
Dmitry Markin
2023-02-20 15:08:02 +03:00
committed by GitHub
parent fdd5203add
commit 8d033b6dfb
12 changed files with 861 additions and 747 deletions
@@ -184,6 +184,10 @@ impl NetworkStateInfo for TestNetwork {
fn external_addresses(&self) -> Vec<Multiaddr> {
self.external_addresses.clone()
}
fn listen_addresses(&self) -> Vec<Multiaddr> {
self.external_addresses.clone()
}
}
struct TestSigner<'a> {
@@ -180,13 +180,13 @@ pub trait NetworkPeers {
/// purposes.
fn deny_unreserved_peers(&self);
/// Adds a `PeerId` and its `Multiaddr` as reserved.
/// Adds a `PeerId` and its `Multiaddr` as reserved for a sync protocol (default peer set).
///
/// Returns an `Err` if the given string is not a valid multiaddress
/// or contains an invalid peer ID (which includes the local peer ID).
fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String>;
/// Removes a `PeerId` from the list of reserved peers.
/// Removes a `PeerId` from the list of reserved peers for a sync protocol (default peer set).
fn remove_reserved_peer(&self, peer_id: PeerId);
/// Sets the reserved set of a protocol to the given set of peers.
@@ -359,6 +359,9 @@ pub trait NetworkStateInfo {
/// Returns the local external addresses.
fn external_addresses(&self) -> Vec<Multiaddr>;
/// Returns the listening addresses (without trailing `/p2p/` with our `PeerId`).
fn listen_addresses(&self) -> Vec<Multiaddr>;
/// Returns the local Peer ID.
fn local_peer_id(&self) -> PeerId;
}
@@ -372,6 +375,10 @@ where
T::external_addresses(self)
}
fn listen_addresses(&self) -> Vec<Multiaddr> {
T::listen_addresses(self)
}
fn local_peer_id(&self) -> PeerId {
T::local_peer_id(self)
}
File diff suppressed because it is too large Load Diff
@@ -75,12 +75,8 @@ async fn normal_network_poll_no_peers() {
.with_chain_sync((chain_sync, chain_sync_service))
.build();
// poll the network once
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
#[tokio::test]
@@ -110,11 +106,8 @@ async fn request_justification() {
// send "request justifiction" message and poll the network
network.service().request_justification(&hash, number);
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
#[tokio::test]
@@ -141,11 +134,8 @@ async fn clear_justification_requests() {
// send "request justifiction" message and poll the network
network.service().clear_justification_requests();
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
#[tokio::test]
@@ -180,11 +170,8 @@ async fn set_sync_fork_request() {
// send "set sync fork request" message and poll the network
network.service().set_sync_fork_request(copy_peers, hash, number);
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
#[tokio::test]
@@ -225,11 +212,8 @@ async fn on_block_finalized() {
// send "set sync fork request" message and poll the network
network.network().on_block_finalized(hash, header);
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
// report from mock import queue that importing a justification was not successful
@@ -80,10 +80,7 @@ impl TestNetwork {
let service = worker.service().clone();
let event_stream = service.event_stream("test");
tokio::spawn(async move {
futures::pin_mut!(worker);
let _ = worker.await;
});
tokio::spawn(worker.run());
(service, event_stream)
}
+6
View File
@@ -1358,6 +1358,12 @@ where
);
}
},
ToServiceCommand::BlockFinalized(hash, number) => {
self.on_block_finalized(&hash, number);
},
ToServiceCommand::Status { pending_response } => {
let _ = pending_response.send(self.status());
},
}
}
@@ -16,9 +16,10 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use futures::channel::oneshot;
use libp2p::PeerId;
use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link};
use sc_network_common::service::NetworkSyncForkRequest;
use sc_network_common::{service::NetworkSyncForkRequest, sync::SyncStatus};
use sc_utils::mpsc::TracingUnboundedSender;
use sp_runtime::traits::{Block as BlockT, NumberFor};
@@ -34,6 +35,10 @@ pub enum ToServiceCommand<B: BlockT> {
Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
),
JustificationImported(PeerId, B::Hash, NumberFor<B>, bool),
BlockFinalized(B::Hash, NumberFor<B>),
Status {
pending_response: oneshot::Sender<SyncStatus<B>>,
},
}
/// Handle for communicating with `ChainSync` asynchronously
@@ -47,6 +52,21 @@ impl<B: BlockT> ChainSyncInterfaceHandle<B> {
pub fn new(tx: TracingUnboundedSender<ToServiceCommand<B>>) -> Self {
Self { tx }
}
/// Notify ChainSync about finalized block
pub fn on_block_finalized(&self, hash: B::Hash, number: NumberFor<B>) {
let _ = self.tx.unbounded_send(ToServiceCommand::BlockFinalized(hash, number));
}
/// Get sync status
///
/// Returns an error if `ChainSync` has terminated.
pub async fn status(&self) -> Result<SyncStatus<B>, ()> {
let (tx, rx) = oneshot::channel();
let _ = self.tx.unbounded_send(ToServiceCommand::Status { pending_response: tx });
rx.await.map_err(|_| ())
}
}
impl<B: BlockT + 'static> NetworkSyncForkRequest<B::Hash, NumberFor<B>>
+13 -4
View File
@@ -31,7 +31,7 @@ use std::{
time::Duration,
};
use futures::{channel::oneshot, future::BoxFuture, prelude::*};
use futures::{channel::oneshot, future::BoxFuture, pin_mut, prelude::*};
use libp2p::{build_multiaddr, PeerId};
use log::trace;
use parking_lot::Mutex;
@@ -83,7 +83,7 @@ use sp_runtime::{
};
use substrate_test_runtime_client::AccountKeyring;
pub use substrate_test_runtime_client::{
runtime::{Block, Extrinsic, Hash, Transfer},
runtime::{Block, Extrinsic, Hash, Header, Transfer},
TestClient, TestClientBuilder, TestClientBuilderExt,
};
use tokio::time::timeout;
@@ -1078,8 +1078,17 @@ where
self.mut_peers(|peers| {
for (i, peer) in peers.iter_mut().enumerate() {
trace!(target: "sync", "-- Polling {}: {}", i, peer.id());
if let Poll::Ready(()) = peer.network.poll_unpin(cx) {
panic!("NetworkWorker has terminated unexpectedly.")
loop {
// The code below is not quite correct, because we are polling a different
// instance of the future every time. But as long as
// `NetworkWorker::next_action()` contains just streams polling not interleaved
// with other `.await`s, dropping the future and recreating it works the same as
// polling a single instance.
let net_poll_future = peer.network.next_action();
pin_mut!(net_poll_future);
if let Poll::Pending = net_poll_future.poll(cx) {
break
}
}
trace!(target: "sync", "-- Polling complete {}: {}", i, peer.id());
+4
View File
@@ -419,6 +419,10 @@ mod tests {
fn local_peer_id(&self) -> PeerId {
PeerId::random()
}
fn listen_addresses(&self) -> Vec<Multiaddr> {
Vec::new()
}
}
fn offchain_api() -> (Api, AsyncApi) {
+4
View File
@@ -270,6 +270,10 @@ mod tests {
fn local_peer_id(&self) -> PeerId {
PeerId::random()
}
fn listen_addresses(&self) -> Vec<Multiaddr> {
Vec::new()
}
}
impl NetworkPeers for TestNetwork {
+20 -10
View File
@@ -17,7 +17,7 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{
build_network_future,
build_network_future, build_system_rpc_future,
client::{Client, ClientConfig},
config::{Configuration, KeystoreConfig, PrometheusConfig},
error::Error,
@@ -963,19 +963,29 @@ where
Some("networking"),
chain_sync_network_provider.run(network.clone()),
);
spawn_handle.spawn("import-queue", None, import_queue.run(Box::new(chain_sync_service)));
spawn_handle.spawn(
"import-queue",
None,
import_queue.run(Box::new(chain_sync_service.clone())),
);
let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc", 10_000);
let future = build_network_future(
config.role.clone(),
network_mut,
client,
system_rpc_rx,
has_bootnodes,
config.announce_block,
spawn_handle.spawn(
"system-rpc-handler",
Some("networking"),
build_system_rpc_future(
config.role.clone(),
network_mut.service().clone(),
chain_sync_service.clone(),
client.clone(),
system_rpc_rx,
has_bootnodes,
),
);
let future =
build_network_future(network_mut, client, chain_sync_service, config.announce_block);
// TODO: Normally, one is supposed to pass a list of notifications protocols supported by the
// node through the `NetworkConfiguration` struct. But because this function doesn't know in
// advance which components, such as GrandPa or Polkadot, will be plugged on top of the
+165 -106
View File
@@ -37,12 +37,16 @@ mod task_manager;
use std::{collections::HashMap, net::SocketAddr};
use codec::{Decode, Encode};
use futures::{channel::mpsc, FutureExt, StreamExt};
use futures::{channel::mpsc, pin_mut, FutureExt, StreamExt};
use jsonrpsee::{core::Error as JsonRpseeError, RpcModule};
use log::{debug, error, warn};
use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider};
use sc_network::PeerId;
use sc_network_common::{config::MultiaddrWithPeerId, service::NetworkBlock};
use sc_network::{NetworkStateInfo, PeerId};
use sc_network_common::{
config::MultiaddrWithPeerId,
service::{NetworkBlock, NetworkPeers},
};
use sc_network_sync::service::chain_sync::ChainSyncInterfaceHandle;
use sc_utils::mpsc::TracingUnboundedReceiver;
use sp_blockchain::HeaderMetadata;
use sp_consensus::SyncOracle;
@@ -138,9 +142,7 @@ pub struct PartialComponents<Client, Backend, SelectChain, ImportQueue, Transact
pub other: Other,
}
/// Builds a never-ending future that continuously polls the network.
///
/// The `status_sink` contain a list of senders to send a periodic network status to.
/// Builds a future that continuously polls the network.
async fn build_network_future<
B: BlockT,
C: BlockchainEvents<B>
@@ -153,21 +155,21 @@ async fn build_network_future<
+ 'static,
H: sc_network_common::ExHashT,
>(
role: Role,
mut network: sc_network::NetworkWorker<B, H, C>,
network: sc_network::NetworkWorker<B, H, C>,
client: Arc<C>,
mut rpc_rx: TracingUnboundedReceiver<sc_rpc::system::Request<B>>,
should_have_peers: bool,
chain_sync_service: ChainSyncInterfaceHandle<B>,
announce_imported_blocks: bool,
) {
let mut imported_blocks_stream = client.import_notification_stream().fuse();
// Current best block at initialization, to report to the RPC layer.
let starting_block = client.info().best_number;
// Stream of finalized blocks reported by the client.
let mut finality_notification_stream = client.finality_notification_stream().fuse();
let network_service = network.service().clone();
let network_run = network.run().fuse();
pin_mut!(network_run);
loop {
futures::select! {
// List of blocks that the client has imported.
@@ -176,15 +178,18 @@ async fn build_network_future<
Some(n) => n,
// If this stream is shut down, that means the client has shut down, and the
// most appropriate thing to do for the network future is to shut down too.
None => return,
None => {
debug!("Block import stream has terminated, shutting down the network future.");
return
},
};
if announce_imported_blocks {
network.service().announce_block(notification.hash, None);
network_service.announce_block(notification.hash, None);
}
if notification.is_new_best {
network.service().new_best_block_imported(
network_service.new_best_block_imported(
notification.hash,
*notification.header.number(),
);
@@ -193,106 +198,160 @@ async fn build_network_future<
// List of blocks that the client has finalized.
notification = finality_notification_stream.select_next_some() => {
network.on_block_finalized(notification.hash, notification.header);
chain_sync_service.on_block_finalized(notification.hash, *notification.header.number());
}
// Answer incoming RPC requests.
request = rpc_rx.select_next_some() => {
match request {
sc_rpc::system::Request::Health(sender) => {
let _ = sender.send(sc_rpc::system::Health {
peers: network.peers_debug_info().len(),
is_syncing: network.service().is_major_syncing(),
should_have_peers,
});
},
sc_rpc::system::Request::LocalPeerId(sender) => {
let _ = sender.send(network.local_peer_id().to_base58());
},
sc_rpc::system::Request::LocalListenAddresses(sender) => {
let peer_id = (*network.local_peer_id()).into();
let p2p_proto_suffix = sc_network::multiaddr::Protocol::P2p(peer_id);
let addresses = network.listen_addresses()
.map(|addr| addr.clone().with(p2p_proto_suffix.clone()).to_string())
.collect();
let _ = sender.send(addresses);
},
sc_rpc::system::Request::Peers(sender) => {
let _ = sender.send(network.peers_debug_info().into_iter().map(|(peer_id, p)|
sc_rpc::system::PeerInfo {
// Drive the network. Shut down the network future if `NetworkWorker` has terminated.
_ = network_run => {
debug!("`NetworkWorker` has terminated, shutting down the network future.");
return
}
}
}
}
/// Builds a future that processes system RPC requests.
async fn build_system_rpc_future<
B: BlockT,
C: BlockchainEvents<B>
+ HeaderBackend<B>
+ BlockBackend<B>
+ HeaderMetadata<B, Error = sp_blockchain::Error>
+ ProofProvider<B>
+ Send
+ Sync
+ 'static,
H: sc_network_common::ExHashT,
>(
role: Role,
network_service: Arc<sc_network::NetworkService<B, H>>,
chain_sync_service: ChainSyncInterfaceHandle<B>,
client: Arc<C>,
mut rpc_rx: TracingUnboundedReceiver<sc_rpc::system::Request<B>>,
should_have_peers: bool,
) {
// Current best block at initialization, to report to the RPC layer.
let starting_block = client.info().best_number;
loop {
// Answer incoming RPC requests.
let Some(req) = rpc_rx.next().await else {
debug!("RPC requests stream has terminated, shutting down the system RPC future.");
return;
};
match req {
sc_rpc::system::Request::Health(sender) => {
let peers = network_service.peers_debug_info().await;
if let Ok(peers) = peers {
let _ = sender.send(sc_rpc::system::Health {
peers: peers.len(),
is_syncing: network_service.is_major_syncing(),
should_have_peers,
});
} else {
break
}
},
sc_rpc::system::Request::LocalPeerId(sender) => {
let _ = sender.send(network_service.local_peer_id().to_base58());
},
sc_rpc::system::Request::LocalListenAddresses(sender) => {
let peer_id = network_service.local_peer_id().into();
let p2p_proto_suffix = sc_network::multiaddr::Protocol::P2p(peer_id);
let addresses = network_service
.listen_addresses()
.iter()
.map(|addr| addr.clone().with(p2p_proto_suffix.clone()).to_string())
.collect();
let _ = sender.send(addresses);
},
sc_rpc::system::Request::Peers(sender) => {
let peers = network_service.peers_debug_info().await;
if let Ok(peers) = peers {
let _ = sender.send(
peers
.into_iter()
.map(|(peer_id, p)| sc_rpc::system::PeerInfo {
peer_id: peer_id.to_base58(),
roles: format!("{:?}", p.roles),
best_hash: p.best_hash,
best_number: p.best_number,
}
).collect());
}
sc_rpc::system::Request::NetworkState(sender) => {
if let Ok(network_state) = serde_json::to_value(&network.network_state()) {
let _ = sender.send(network_state);
}
}
sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => {
let result = match MultiaddrWithPeerId::try_from(peer_addr) {
Ok(peer) => {
network.add_reserved_peer(peer)
},
Err(err) => {
Err(err.to_string())
},
};
let x = result.map_err(sc_rpc::system::error::Error::MalformattedPeerArg);
let _ = sender.send(x);
}
sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => {
let _ = match peer_id.parse::<PeerId>() {
Ok(peer_id) => {
network.remove_reserved_peer(peer_id);
sender.send(Ok(()))
}
Err(e) => sender.send(Err(sc_rpc::system::error::Error::MalformattedPeerArg(
e.to_string(),
))),
};
}
sc_rpc::system::Request::NetworkReservedPeers(sender) => {
let reserved_peers = network.reserved_peers();
let reserved_peers = reserved_peers
.map(|peer_id| peer_id.to_base58())
.collect();
let _ = sender.send(reserved_peers);
}
sc_rpc::system::Request::NodeRoles(sender) => {
use sc_rpc::system::NodeRole;
let node_role = match role {
Role::Authority { .. } => NodeRole::Authority,
Role::Full => NodeRole::Full,
};
let _ = sender.send(vec![node_role]);
}
sc_rpc::system::Request::SyncState(sender) => {
use sc_rpc::system::SyncState;
let best_number = client.info().best_number;
let _ = sender.send(SyncState {
starting_block,
current_block: best_number,
highest_block: network.best_seen_block().unwrap_or(best_number),
});
}
})
.collect(),
);
} else {
break
}
}
},
sc_rpc::system::Request::NetworkState(sender) => {
let network_state = network_service.network_state().await;
if let Ok(network_state) = network_state {
if let Ok(network_state) = serde_json::to_value(network_state) {
let _ = sender.send(network_state);
}
} else {
break
}
},
sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => {
let result = match MultiaddrWithPeerId::try_from(peer_addr) {
Ok(peer) => network_service.add_reserved_peer(peer),
Err(err) => Err(err.to_string()),
};
let x = result.map_err(sc_rpc::system::error::Error::MalformattedPeerArg);
let _ = sender.send(x);
},
sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => {
let _ = match peer_id.parse::<PeerId>() {
Ok(peer_id) => {
network_service.remove_reserved_peer(peer_id);
sender.send(Ok(()))
},
Err(e) => sender.send(Err(sc_rpc::system::error::Error::MalformattedPeerArg(
e.to_string(),
))),
};
},
sc_rpc::system::Request::NetworkReservedPeers(sender) => {
let reserved_peers = network_service.reserved_peers().await;
if let Ok(reserved_peers) = reserved_peers {
let reserved_peers =
reserved_peers.iter().map(|peer_id| peer_id.to_base58()).collect();
let _ = sender.send(reserved_peers);
} else {
break
}
},
sc_rpc::system::Request::NodeRoles(sender) => {
use sc_rpc::system::NodeRole;
// The network worker has done something. Nothing special to do, but could be
// used in the future to perform actions in response of things that happened on
// the network.
_ = (&mut network).fuse() => {}
let node_role = match role {
Role::Authority { .. } => NodeRole::Authority,
Role::Full => NodeRole::Full,
};
let _ = sender.send(vec![node_role]);
},
sc_rpc::system::Request::SyncState(sender) => {
use sc_rpc::system::SyncState;
let best_number = client.info().best_number;
let Ok(status) = chain_sync_service.status().await else {
debug!("`ChainSync` has terminated, shutting down the system RPC future.");
return
};
let _ = sender.send(SyncState {
starting_block,
current_block: best_number,
highest_block: status.best_seen_block.unwrap_or(best_number),
});
},
}
}
debug!("`NetworkWorker` has terminated, shutting down the system RPC future.");
}
// Wrapper for HTTP and WS servers that makes sure they are properly shut down.