Use async/await instead of manual polling of NetworkWorker (#13219)

* Convert `NetworkWorker::poll()` into async `next_action()`

* Use `NetworkWorker::next_action` instead of `poll` in `sc-network-test`

* Revert "Use `NetworkWorker::next_action` instead of `poll` in `sc-network-test`"

This reverts commit 4b5d851ec864f78f9d083a18a618fbe117c896d2.

* Fix `sc-network-test` to poll `NetworkWorker::next_action`

* Fix `sc_network::service` tests to poll `NetworkWorker::next_action`

* Fix docs

* kick CI

* Factor out `next_worker_message()` & `next_swarm_event()`

* Error handling: replace `futures::pending!()` with `expect()`

* Simplify stream polling in `select!`

* Replace `NetworkWorker::next_action()` with `run()`

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* minor: comment

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* Print debug log when network future is shut down

* Evaluate `NetworkWorker::run()` future once before the loop

* Fix client code to match new `NetworkService` interfaces

* Make clippy happy

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <git@kchr.de>

* Revert "Apply suggestions from code review"

This reverts commit 9fa646d0ed613e5f8623d3d37d1d59ec0a535850.

* Make `NetworkWorker::run()` consume `self`

* Terminate system RPC future if RPC rx stream has terminated.

* Rewrite with let-else

* Fix comments

* Get `best_seen_block` and call `on_block_finalized` via `ChainSync` instead of `NetworkService`

* rustfmt

* make clippy happy

* Tests: schedule wake if `next_action()` returned true

* minor: comment

* minor: fix `NetworkWorker` rustdoc

* minor: amend the rustdoc

* Fix bug that caused `on_demand_beefy_justification_sync` test to hang

* rustfmt

* Apply review suggestions

---------

Co-authored-by: Bastian Köcher <git@kchr.de>
This commit is contained in:
Dmitry Markin
2023-02-20 15:08:02 +03:00
committed by GitHub
parent fdd5203add
commit 8d033b6dfb
12 changed files with 861 additions and 747 deletions
@@ -180,13 +180,13 @@ pub trait NetworkPeers {
/// purposes.
fn deny_unreserved_peers(&self);
/// Adds a `PeerId` and its `Multiaddr` as reserved.
/// Adds a `PeerId` and its `Multiaddr` as reserved for a sync protocol (default peer set).
///
/// Returns an `Err` if the given string is not a valid multiaddress
/// or contains an invalid peer ID (which includes the local peer ID).
fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String>;
/// Removes a `PeerId` from the list of reserved peers.
/// Removes a `PeerId` from the list of reserved peers for a sync protocol (default peer set).
fn remove_reserved_peer(&self, peer_id: PeerId);
/// Sets the reserved set of a protocol to the given set of peers.
@@ -359,6 +359,9 @@ pub trait NetworkStateInfo {
/// Returns the local external addresses.
fn external_addresses(&self) -> Vec<Multiaddr>;
/// Returns the listening addresses (without trailing `/p2p/` with our `PeerId`).
fn listen_addresses(&self) -> Vec<Multiaddr>;
/// Returns the local Peer ID.
fn local_peer_id(&self) -> PeerId;
}
@@ -372,6 +375,10 @@ where
T::external_addresses(self)
}
fn listen_addresses(&self) -> Vec<Multiaddr> {
T::listen_addresses(self)
}
fn local_peer_id(&self) -> PeerId {
T::local_peer_id(self)
}
File diff suppressed because it is too large Load Diff
@@ -75,12 +75,8 @@ async fn normal_network_poll_no_peers() {
.with_chain_sync((chain_sync, chain_sync_service))
.build();
// poll the network once
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
#[tokio::test]
@@ -110,11 +106,8 @@ async fn request_justification() {
// send "request justifiction" message and poll the network
network.service().request_justification(&hash, number);
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
#[tokio::test]
@@ -141,11 +134,8 @@ async fn clear_justification_requests() {
// send "request justifiction" message and poll the network
network.service().clear_justification_requests();
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
#[tokio::test]
@@ -180,11 +170,8 @@ async fn set_sync_fork_request() {
// send "set sync fork request" message and poll the network
network.service().set_sync_fork_request(copy_peers, hash, number);
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
#[tokio::test]
@@ -225,11 +212,8 @@ async fn on_block_finalized() {
// send "set sync fork request" message and poll the network
network.network().on_block_finalized(hash, header);
futures::future::poll_fn(|cx| {
let _ = network.network().poll_unpin(cx);
Poll::Ready(())
})
.await;
// perform one action on network
let _ = network.network().next_action().await;
}
// report from mock import queue that importing a justification was not successful
@@ -80,10 +80,7 @@ impl TestNetwork {
let service = worker.service().clone();
let event_stream = service.event_stream("test");
tokio::spawn(async move {
futures::pin_mut!(worker);
let _ = worker.await;
});
tokio::spawn(worker.run());
(service, event_stream)
}
+6
View File
@@ -1358,6 +1358,12 @@ where
);
}
},
ToServiceCommand::BlockFinalized(hash, number) => {
self.on_block_finalized(&hash, number);
},
ToServiceCommand::Status { pending_response } => {
let _ = pending_response.send(self.status());
},
}
}
@@ -16,9 +16,10 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use futures::channel::oneshot;
use libp2p::PeerId;
use sc_consensus::{BlockImportError, BlockImportStatus, JustificationSyncLink, Link};
use sc_network_common::service::NetworkSyncForkRequest;
use sc_network_common::{service::NetworkSyncForkRequest, sync::SyncStatus};
use sc_utils::mpsc::TracingUnboundedSender;
use sp_runtime::traits::{Block as BlockT, NumberFor};
@@ -34,6 +35,10 @@ pub enum ToServiceCommand<B: BlockT> {
Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
),
JustificationImported(PeerId, B::Hash, NumberFor<B>, bool),
BlockFinalized(B::Hash, NumberFor<B>),
Status {
pending_response: oneshot::Sender<SyncStatus<B>>,
},
}
/// Handle for communicating with `ChainSync` asynchronously
@@ -47,6 +52,21 @@ impl<B: BlockT> ChainSyncInterfaceHandle<B> {
pub fn new(tx: TracingUnboundedSender<ToServiceCommand<B>>) -> Self {
Self { tx }
}
/// Notify ChainSync about finalized block
pub fn on_block_finalized(&self, hash: B::Hash, number: NumberFor<B>) {
let _ = self.tx.unbounded_send(ToServiceCommand::BlockFinalized(hash, number));
}
/// Get sync status
///
/// Returns an error if `ChainSync` has terminated.
pub async fn status(&self) -> Result<SyncStatus<B>, ()> {
let (tx, rx) = oneshot::channel();
let _ = self.tx.unbounded_send(ToServiceCommand::Status { pending_response: tx });
rx.await.map_err(|_| ())
}
}
impl<B: BlockT + 'static> NetworkSyncForkRequest<B::Hash, NumberFor<B>>
+13 -4
View File
@@ -31,7 +31,7 @@ use std::{
time::Duration,
};
use futures::{channel::oneshot, future::BoxFuture, prelude::*};
use futures::{channel::oneshot, future::BoxFuture, pin_mut, prelude::*};
use libp2p::{build_multiaddr, PeerId};
use log::trace;
use parking_lot::Mutex;
@@ -83,7 +83,7 @@ use sp_runtime::{
};
use substrate_test_runtime_client::AccountKeyring;
pub use substrate_test_runtime_client::{
runtime::{Block, Extrinsic, Hash, Transfer},
runtime::{Block, Extrinsic, Hash, Header, Transfer},
TestClient, TestClientBuilder, TestClientBuilderExt,
};
use tokio::time::timeout;
@@ -1078,8 +1078,17 @@ where
self.mut_peers(|peers| {
for (i, peer) in peers.iter_mut().enumerate() {
trace!(target: "sync", "-- Polling {}: {}", i, peer.id());
if let Poll::Ready(()) = peer.network.poll_unpin(cx) {
panic!("NetworkWorker has terminated unexpectedly.")
loop {
// The code below is not quite correct, because we are polling a different
// instance of the future every time. But as long as
// `NetworkWorker::next_action()` contains just streams polling not interleaved
// with other `.await`s, dropping the future and recreating it works the same as
// polling a single instance.
let net_poll_future = peer.network.next_action();
pin_mut!(net_poll_future);
if let Poll::Pending = net_poll_future.poll(cx) {
break
}
}
trace!(target: "sync", "-- Polling complete {}: {}", i, peer.id());