mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 20:27:58 +00:00
80616f6d03
[litep2p](https://github.com/altonen/litep2p) is a libp2p-compatible P2P networking library. It supports all of the features of `rust-libp2p` that are currently being utilized by Polkadot SDK. Compared to `rust-libp2p`, `litep2p` has a quite different architecture which is why the new `litep2p` network backend is only able to use a little of the existing code in `sc-network`. The design has been mainly influenced by how we'd wish to structure our networking-related code in Polkadot SDK: independent higher-levels protocols directly communicating with the network over links that support bidirectional backpressure. A good example would be `NotificationHandle`/`RequestResponseHandle` abstractions which allow, e.g., `SyncingEngine` to directly communicate with peers to announce/request blocks. I've tried running `polkadot --network-backend litep2p` with a few different peer configurations and there is a noticeable reduction in networking CPU usage. For high load (`--out-peers 200`), networking CPU usage goes down from ~110% to ~30% (80 pp) and for normal load (`--out-peers 40`), the usage goes down from ~55% to ~18% (37 pp). These should not be taken as final numbers because: a) there are still some low-hanging optimization fruits, such as enabling [receive window auto-tuning](https://github.com/libp2p/rust-yamux/pull/176), integrating `Peerset` more closely with `litep2p` or improving memory usage of the WebSocket transport b) fixing bugs/instabilities that incorrectly cause `litep2p` to do less work will increase the networking CPU usage c) verification in a more diverse set of tests/conditions is needed Nevertheless, these numbers should give an early estimate for CPU usage of the new networking backend. This PR consists of three separate changes: * introduce a generic `PeerId` (wrapper around `Multihash`) so that we don't have use `NetworkService::PeerId` in every part of the code that uses a `PeerId` * introduce `NetworkBackend` trait, implement it for the libp2p network stack and make Polkadot SDK generic over `NetworkBackend` * implement `NetworkBackend` for litep2p The new library should be considered experimental which is why `rust-libp2p` will remain as the default option for the time being. This PR currently depends on the master branch of `litep2p` but I'll cut a new release for the library once all review comments have been addresses. --------- Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> Co-authored-by: Dmitry Markin <dmitry@markin.tech> Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Alexandru Vasile <alexandru.vasile@parity.io>
295 lines
8.5 KiB
Rust
295 lines
8.5 KiB
Rust
// Copyright (C) Parity Technologies (UK) Ltd.
|
|
// This file is part of Substrate.
|
|
|
|
// Substrate is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
|
|
// Substrate is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU General Public License for more details.
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
//! Helper for handling (i.e. answering) state requests from a remote peer via the
|
|
//! `crate::request_responses::RequestResponsesBehaviour`.
|
|
|
|
use crate::{
|
|
schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse},
|
|
LOG_TARGET,
|
|
};
|
|
|
|
use codec::{Decode, Encode};
|
|
use futures::{channel::oneshot, stream::StreamExt};
|
|
use log::{debug, trace};
|
|
use prost::Message;
|
|
use sc_network_types::PeerId;
|
|
use schnellru::{ByLength, LruMap};
|
|
|
|
use sc_client_api::{BlockBackend, ProofProvider};
|
|
use sc_network::{
|
|
config::ProtocolId,
|
|
request_responses::{IncomingRequest, OutgoingResponse},
|
|
NetworkBackend,
|
|
};
|
|
use sp_runtime::traits::Block as BlockT;
|
|
|
|
use std::{
|
|
hash::{Hash, Hasher},
|
|
sync::Arc,
|
|
time::Duration,
|
|
};
|
|
|
|
const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigger.
|
|
const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2;
|
|
|
|
mod rep {
|
|
use sc_network::ReputationChange as Rep;
|
|
|
|
/// Reputation change when a peer sent us the same request multiple times.
|
|
pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times");
|
|
}
|
|
|
|
/// Generates a `RequestResponseProtocolConfig` for the state request protocol, refusing incoming
|
|
/// requests.
|
|
pub fn generate_protocol_config<
|
|
Hash: AsRef<[u8]>,
|
|
B: BlockT,
|
|
N: NetworkBackend<B, <B as BlockT>::Hash>,
|
|
>(
|
|
protocol_id: &ProtocolId,
|
|
genesis_hash: Hash,
|
|
fork_id: Option<&str>,
|
|
inbound_queue: async_channel::Sender<IncomingRequest>,
|
|
) -> N::RequestResponseProtocolConfig {
|
|
N::request_response_config(
|
|
generate_protocol_name(genesis_hash, fork_id).into(),
|
|
std::iter::once(generate_legacy_protocol_name(protocol_id).into()).collect(),
|
|
1024 * 1024,
|
|
16 * 1024 * 1024,
|
|
Duration::from_secs(40),
|
|
Some(inbound_queue),
|
|
)
|
|
}
|
|
|
|
/// Generate the state protocol name from the genesis hash and fork id.
|
|
fn generate_protocol_name<Hash: AsRef<[u8]>>(genesis_hash: Hash, fork_id: Option<&str>) -> String {
|
|
let genesis_hash = genesis_hash.as_ref();
|
|
if let Some(fork_id) = fork_id {
|
|
format!("/{}/{}/state/2", array_bytes::bytes2hex("", genesis_hash), fork_id)
|
|
} else {
|
|
format!("/{}/state/2", array_bytes::bytes2hex("", genesis_hash))
|
|
}
|
|
}
|
|
|
|
/// Generate the legacy state protocol name from chain specific protocol identifier.
|
|
fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String {
|
|
format!("/{}/state/2", protocol_id.as_ref())
|
|
}
|
|
|
|
/// The key of [`BlockRequestHandler::seen_requests`].
|
|
#[derive(Eq, PartialEq, Clone)]
|
|
struct SeenRequestsKey<B: BlockT> {
|
|
peer: PeerId,
|
|
block: B::Hash,
|
|
start: Vec<Vec<u8>>,
|
|
}
|
|
|
|
#[allow(clippy::derived_hash_with_manual_eq)]
|
|
impl<B: BlockT> Hash for SeenRequestsKey<B> {
|
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
|
self.peer.hash(state);
|
|
self.block.hash(state);
|
|
self.start.hash(state);
|
|
}
|
|
}
|
|
|
|
/// The value of [`StateRequestHandler::seen_requests`].
|
|
enum SeenRequestsValue {
|
|
/// First time we have seen the request.
|
|
First,
|
|
/// We have fulfilled the request `n` times.
|
|
Fulfilled(usize),
|
|
}
|
|
|
|
/// Handler for incoming block requests from a remote peer.
|
|
pub struct StateRequestHandler<B: BlockT, Client> {
|
|
client: Arc<Client>,
|
|
request_receiver: async_channel::Receiver<IncomingRequest>,
|
|
/// Maps from request to number of times we have seen this request.
|
|
///
|
|
/// This is used to check if a peer is spamming us with the same request.
|
|
seen_requests: LruMap<SeenRequestsKey<B>, SeenRequestsValue>,
|
|
}
|
|
|
|
impl<B, Client> StateRequestHandler<B, Client>
|
|
where
|
|
B: BlockT,
|
|
Client: BlockBackend<B> + ProofProvider<B> + Send + Sync + 'static,
|
|
{
|
|
/// Create a new [`StateRequestHandler`].
|
|
pub fn new<N: NetworkBackend<B, <B as BlockT>::Hash>>(
|
|
protocol_id: &ProtocolId,
|
|
fork_id: Option<&str>,
|
|
client: Arc<Client>,
|
|
num_peer_hint: usize,
|
|
) -> (Self, N::RequestResponseProtocolConfig) {
|
|
// Reserve enough request slots for one request per peer when we are at the maximum
|
|
// number of peers.
|
|
let capacity = std::cmp::max(num_peer_hint, 1);
|
|
let (tx, request_receiver) = async_channel::bounded(capacity);
|
|
|
|
let protocol_config = generate_protocol_config::<_, B, N>(
|
|
protocol_id,
|
|
client
|
|
.block_hash(0u32.into())
|
|
.ok()
|
|
.flatten()
|
|
.expect("Genesis block exists; qed"),
|
|
fork_id,
|
|
tx,
|
|
);
|
|
|
|
let capacity = ByLength::new(num_peer_hint.max(1) as u32 * 2);
|
|
let seen_requests = LruMap::new(capacity);
|
|
|
|
(Self { client, request_receiver, seen_requests }, protocol_config)
|
|
}
|
|
|
|
/// Run [`StateRequestHandler`].
|
|
pub async fn run(mut self) {
|
|
while let Some(request) = self.request_receiver.next().await {
|
|
let IncomingRequest { peer, payload, pending_response } = request;
|
|
|
|
match self.handle_request(payload, pending_response, &peer) {
|
|
Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer),
|
|
Err(e) => debug!(
|
|
target: LOG_TARGET,
|
|
"Failed to handle state request from {}: {}", peer, e,
|
|
),
|
|
}
|
|
}
|
|
}
|
|
|
|
fn handle_request(
|
|
&mut self,
|
|
payload: Vec<u8>,
|
|
pending_response: oneshot::Sender<OutgoingResponse>,
|
|
peer: &PeerId,
|
|
) -> Result<(), HandleRequestError> {
|
|
let request = StateRequest::decode(&payload[..])?;
|
|
let block: B::Hash = Decode::decode(&mut request.block.as_ref())?;
|
|
|
|
let key = SeenRequestsKey { peer: *peer, block, start: request.start.clone() };
|
|
|
|
let mut reputation_changes = Vec::new();
|
|
|
|
match self.seen_requests.get(&key) {
|
|
Some(SeenRequestsValue::First) => {},
|
|
Some(SeenRequestsValue::Fulfilled(ref mut requests)) => {
|
|
*requests = requests.saturating_add(1);
|
|
|
|
if *requests > MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER {
|
|
reputation_changes.push(rep::SAME_REQUEST);
|
|
}
|
|
},
|
|
None => {
|
|
self.seen_requests.insert(key.clone(), SeenRequestsValue::First);
|
|
},
|
|
}
|
|
|
|
trace!(
|
|
target: LOG_TARGET,
|
|
"Handling state request from {}: Block {:?}, Starting at {:x?}, no_proof={}",
|
|
peer,
|
|
request.block,
|
|
&request.start,
|
|
request.no_proof,
|
|
);
|
|
|
|
let result = if reputation_changes.is_empty() {
|
|
let mut response = StateResponse::default();
|
|
|
|
if !request.no_proof {
|
|
let (proof, _count) = self.client.read_proof_collection(
|
|
block,
|
|
request.start.as_slice(),
|
|
MAX_RESPONSE_BYTES,
|
|
)?;
|
|
response.proof = proof.encode();
|
|
} else {
|
|
let entries = self.client.storage_collection(
|
|
block,
|
|
request.start.as_slice(),
|
|
MAX_RESPONSE_BYTES,
|
|
)?;
|
|
response.entries = entries
|
|
.into_iter()
|
|
.map(|(state, complete)| KeyValueStateEntry {
|
|
state_root: state.state_root,
|
|
entries: state
|
|
.key_values
|
|
.into_iter()
|
|
.map(|(key, value)| StateEntry { key, value })
|
|
.collect(),
|
|
complete,
|
|
})
|
|
.collect();
|
|
}
|
|
|
|
trace!(
|
|
target: LOG_TARGET,
|
|
"StateResponse contains {} keys, {}, proof nodes, from {:?} to {:?}",
|
|
response.entries.len(),
|
|
response.proof.len(),
|
|
response.entries.get(0).and_then(|top| top
|
|
.entries
|
|
.first()
|
|
.map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))),
|
|
response.entries.get(0).and_then(|top| top
|
|
.entries
|
|
.last()
|
|
.map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))),
|
|
);
|
|
if let Some(value) = self.seen_requests.get(&key) {
|
|
// If this is the first time we have processed this request, we need to change
|
|
// it to `Fulfilled`.
|
|
if let SeenRequestsValue::First = value {
|
|
*value = SeenRequestsValue::Fulfilled(1);
|
|
}
|
|
}
|
|
|
|
let mut data = Vec::with_capacity(response.encoded_len());
|
|
response.encode(&mut data)?;
|
|
Ok(data)
|
|
} else {
|
|
Err(())
|
|
};
|
|
|
|
pending_response
|
|
.send(OutgoingResponse { result, reputation_changes, sent_feedback: None })
|
|
.map_err(|_| HandleRequestError::SendResponse)
|
|
}
|
|
}
|
|
|
|
#[derive(Debug, thiserror::Error)]
|
|
enum HandleRequestError {
|
|
#[error("Failed to decode request: {0}.")]
|
|
DecodeProto(#[from] prost::DecodeError),
|
|
|
|
#[error("Failed to encode response: {0}.")]
|
|
EncodeProto(#[from] prost::EncodeError),
|
|
|
|
#[error("Failed to decode block hash: {0}.")]
|
|
InvalidHash(#[from] codec::Error),
|
|
|
|
#[error(transparent)]
|
|
Client(#[from] sp_blockchain::Error),
|
|
|
|
#[error("Failed to send response.")]
|
|
SendResponse,
|
|
}
|