Files
pezkuwi-subxt/substrate/client/network/sync/src/pending_responses.rs
T
Aaro Altonen 80616f6d03 Integrate litep2p into Polkadot SDK (#2944)
[litep2p](https://github.com/altonen/litep2p) is a libp2p-compatible P2P
networking library. It supports all of the features of `rust-libp2p`
that are currently being utilized by Polkadot SDK.

Compared to `rust-libp2p`, `litep2p` has a quite different architecture
which is why the new `litep2p` network backend is only able to use a
little of the existing code in `sc-network`. The design has been mainly
influenced by how we'd wish to structure our networking-related code in
Polkadot SDK: independent higher-levels protocols directly communicating
with the network over links that support bidirectional backpressure. A
good example would be `NotificationHandle`/`RequestResponseHandle`
abstractions which allow, e.g., `SyncingEngine` to directly communicate
with peers to announce/request blocks.

I've tried running `polkadot --network-backend litep2p` with a few
different peer configurations and there is a noticeable reduction in
networking CPU usage. For high load (`--out-peers 200`), networking CPU
usage goes down from ~110% to ~30% (80 pp) and for normal load
(`--out-peers 40`), the usage goes down from ~55% to ~18% (37 pp).

These should not be taken as final numbers because:

a) there are still some low-hanging optimization fruits, such as
enabling [receive window
auto-tuning](https://github.com/libp2p/rust-yamux/pull/176), integrating
`Peerset` more closely with `litep2p` or improving memory usage of the
WebSocket transport
b) fixing bugs/instabilities that incorrectly cause `litep2p` to do less
work will increase the networking CPU usage
c) verification in a more diverse set of tests/conditions is needed

Nevertheless, these numbers should give an early estimate for CPU usage
of the new networking backend.

This PR consists of three separate changes:
* introduce a generic `PeerId` (wrapper around `Multihash`) so that we
don't have use `NetworkService::PeerId` in every part of the code that
uses a `PeerId`
* introduce `NetworkBackend` trait, implement it for the libp2p network
stack and make Polkadot SDK generic over `NetworkBackend`
  * implement `NetworkBackend` for litep2p

The new library should be considered experimental which is why
`rust-libp2p` will remain as the default option for the time being. This
PR currently depends on the master branch of `litep2p` but I'll cut a
new release for the library once all review comments have been
addresses.

---------

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
Co-authored-by: Dmitry Markin <dmitry@markin.tech>
Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
Co-authored-by: Alexandru Vasile <alexandru.vasile@parity.io>
2024-04-08 16:44:13 +00:00

146 lines
4.2 KiB
Rust

// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! [`PendingResponses`] is responsible for keeping track of pending responses and
//! polling them. [`Stream`] implemented by [`PendingResponses`] never terminates.
use crate::{strategy::StrategyKey, types::PeerRequest, LOG_TARGET};
use futures::{
channel::oneshot,
future::BoxFuture,
stream::{BoxStream, FusedStream, Stream},
FutureExt, StreamExt,
};
use log::error;
use sc_network::{request_responses::RequestFailure, types::ProtocolName};
use sc_network_types::PeerId;
use sp_runtime::traits::Block as BlockT;
use std::task::{Context, Poll, Waker};
use tokio_stream::StreamMap;
/// Response result.
type ResponseResult = Result<Result<(Vec<u8>, ProtocolName), RequestFailure>, oneshot::Canceled>;
/// A future yielding [`ResponseResult`].
type ResponseFuture = BoxFuture<'static, ResponseResult>;
/// An event we receive once a pending response future resolves.
pub(crate) struct ResponseEvent<B: BlockT> {
pub peer_id: PeerId,
pub key: StrategyKey,
pub request: PeerRequest<B>,
pub response: ResponseResult,
}
/// Stream taking care of polling pending responses.
pub(crate) struct PendingResponses<B: BlockT> {
/// Pending responses
pending_responses:
StreamMap<(PeerId, StrategyKey), BoxStream<'static, (PeerRequest<B>, ResponseResult)>>,
/// Waker to implement never terminating stream
waker: Option<Waker>,
}
impl<B: BlockT> PendingResponses<B> {
pub fn new() -> Self {
Self { pending_responses: StreamMap::new(), waker: None }
}
pub fn insert(
&mut self,
peer_id: PeerId,
key: StrategyKey,
request: PeerRequest<B>,
response_future: ResponseFuture,
) {
let request_type = request.get_type();
if self
.pending_responses
.insert(
(peer_id, key),
Box::pin(async move { (request, response_future.await) }.into_stream()),
)
.is_some()
{
error!(
target: LOG_TARGET,
"Discarded pending response from peer {peer_id}, request type: {request_type:?}.",
);
debug_assert!(false);
}
if let Some(waker) = self.waker.take() {
waker.wake();
}
}
pub fn remove(&mut self, peer_id: PeerId, key: StrategyKey) -> bool {
self.pending_responses.remove(&(peer_id, key)).is_some()
}
pub fn remove_all(&mut self, peer_id: &PeerId) {
let to_remove = self
.pending_responses
.keys()
.filter(|(peer, _key)| peer == peer_id)
.cloned()
.collect::<Vec<_>>();
to_remove.iter().for_each(|k| {
self.pending_responses.remove(k);
});
}
pub fn len(&self) -> usize {
self.pending_responses.len()
}
}
impl<B: BlockT> Stream for PendingResponses<B> {
type Item = ResponseEvent<B>;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.pending_responses.poll_next_unpin(cx) {
Poll::Ready(Some(((peer_id, key), (request, response)))) => {
// We need to manually remove the stream, because `StreamMap` doesn't know yet that
// it's going to yield `None`, so may not remove it before the next request is made
// to the same peer.
self.pending_responses.remove(&(peer_id, key));
Poll::Ready(Some(ResponseEvent { peer_id, key, request, response }))
},
Poll::Ready(None) | Poll::Pending => {
self.waker = Some(cx.waker().clone());
Poll::Pending
},
}
}
}
// As [`PendingResponses`] never terminates, we can easily implement [`FusedStream`] for it.
impl<B: BlockT> FusedStream for PendingResponses<B> {
fn is_terminated(&self) -> bool {
false
}
}