Integrate litep2p into Polkadot SDK (#2944)

[litep2p](https://github.com/altonen/litep2p) is a libp2p-compatible P2P
networking library. It supports all of the features of `rust-libp2p`
that are currently being utilized by Polkadot SDK.

Compared to `rust-libp2p`, `litep2p` has a quite different architecture
which is why the new `litep2p` network backend is only able to use a
little of the existing code in `sc-network`. The design has been mainly
influenced by how we'd wish to structure our networking-related code in
Polkadot SDK: independent higher-levels protocols directly communicating
with the network over links that support bidirectional backpressure. A
good example would be `NotificationHandle`/`RequestResponseHandle`
abstractions which allow, e.g., `SyncingEngine` to directly communicate
with peers to announce/request blocks.

I've tried running `polkadot --network-backend litep2p` with a few
different peer configurations and there is a noticeable reduction in
networking CPU usage. For high load (`--out-peers 200`), networking CPU
usage goes down from ~110% to ~30% (80 pp) and for normal load
(`--out-peers 40`), the usage goes down from ~55% to ~18% (37 pp).

These should not be taken as final numbers because:

a) there are still some low-hanging optimization fruits, such as
enabling [receive window
auto-tuning](https://github.com/libp2p/rust-yamux/pull/176), integrating
`Peerset` more closely with `litep2p` or improving memory usage of the
WebSocket transport
b) fixing bugs/instabilities that incorrectly cause `litep2p` to do less
work will increase the networking CPU usage
c) verification in a more diverse set of tests/conditions is needed

Nevertheless, these numbers should give an early estimate for CPU usage
of the new networking backend.

This PR consists of three separate changes:
* introduce a generic `PeerId` (wrapper around `Multihash`) so that we
don't have use `NetworkService::PeerId` in every part of the code that
uses a `PeerId`
* introduce `NetworkBackend` trait, implement it for the libp2p network
stack and make Polkadot SDK generic over `NetworkBackend`
  * implement `NetworkBackend` for litep2p

The new library should be considered experimental which is why
`rust-libp2p` will remain as the default option for the time being. This
PR currently depends on the master branch of `litep2p` but I'll cut a
new release for the library once all review comments have been
addresses.

---------

Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io>
Co-authored-by: Dmitry Markin <dmitry@markin.tech>
Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
Co-authored-by: Alexandru Vasile <alexandru.vasile@parity.io>
This commit is contained in:
Aaro Altonen
2024-04-08 19:44:13 +03:00
committed by GitHub
parent 9543d31474
commit 80616f6d03
181 changed files with 11055 additions and 1862 deletions
+3 -3
View File
@@ -20,7 +20,7 @@ use crate::{
discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut},
event::DhtEvent,
peer_info,
peer_store::PeerStoreHandle,
peer_store::PeerStoreProvider,
protocol::{CustomMessageOutcome, NotificationsSink, Protocol},
protocol_controller::SetId,
request_responses::{self, IfDisconnected, ProtocolConfig, RequestFailure},
@@ -173,7 +173,7 @@ impl<B: BlockT> Behaviour<B> {
local_public_key: PublicKey,
disco_config: DiscoveryConfig,
request_response_protocols: Vec<ProtocolConfig>,
peer_store_handle: PeerStoreHandle,
peer_store_handle: Arc<dyn PeerStoreProvider>,
external_addresses: Arc<Mutex<HashSet<Multiaddr>>>,
) -> Result<Self, request_responses::RegisterError> {
Ok(Self {
@@ -186,7 +186,7 @@ impl<B: BlockT> Behaviour<B> {
discovery: disco_config.finish(),
request_responses: request_responses::RequestResponsesBehaviour::new(
request_response_protocols.into_iter(),
Box::new(peer_store_handle),
peer_store_handle,
)?,
})
}
+532
View File
@@ -0,0 +1,532 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Bitswap server for Substrate.
//!
//! Allows querying transactions by hash over standard bitswap protocol
//! Only supports bitswap 1.2.0.
//! CID is expected to reference 256-bit Blake2b transaction hash.
use crate::{
request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig},
types::ProtocolName,
};
use cid::{self, Version};
use futures::StreamExt;
use log::{debug, error, trace};
use prost::Message;
use sc_client_api::BlockBackend;
use sc_network_types::PeerId;
use schema::bitswap::{
message::{wantlist::WantType, Block as MessageBlock, BlockPresence, BlockPresenceType},
Message as BitswapMessage,
};
use sp_runtime::traits::Block as BlockT;
use std::{io, sync::Arc, time::Duration};
use unsigned_varint::encode as varint_encode;
mod schema;
const LOG_TARGET: &str = "bitswap";
// Undocumented, but according to JS the bitswap messages have a max size of 512*1024 bytes
// https://github.com/ipfs/js-ipfs-bitswap/blob/
// d8f80408aadab94c962f6b88f343eb9f39fa0fcc/src/decision-engine/index.js#L16
// We set it to the same value as max substrate protocol message
const MAX_PACKET_SIZE: u64 = 16 * 1024 * 1024;
/// Max number of queued responses before denying requests.
const MAX_REQUEST_QUEUE: usize = 20;
/// Max number of blocks per wantlist
const MAX_WANTED_BLOCKS: usize = 16;
/// Bitswap protocol name
const PROTOCOL_NAME: &'static str = "/ipfs/bitswap/1.2.0";
/// Prefix represents all metadata of a CID, without the actual content.
#[derive(PartialEq, Eq, Clone, Debug)]
struct Prefix {
/// The version of CID.
pub version: Version,
/// The codec of CID.
pub codec: u64,
/// The multihash type of CID.
pub mh_type: u64,
/// The multihash length of CID.
pub mh_len: u8,
}
impl Prefix {
/// Convert the prefix to encoded bytes.
pub fn to_bytes(&self) -> Vec<u8> {
let mut res = Vec::with_capacity(4);
let mut buf = varint_encode::u64_buffer();
let version = varint_encode::u64(self.version.into(), &mut buf);
res.extend_from_slice(version);
let mut buf = varint_encode::u64_buffer();
let codec = varint_encode::u64(self.codec, &mut buf);
res.extend_from_slice(codec);
let mut buf = varint_encode::u64_buffer();
let mh_type = varint_encode::u64(self.mh_type, &mut buf);
res.extend_from_slice(mh_type);
let mut buf = varint_encode::u64_buffer();
let mh_len = varint_encode::u64(self.mh_len as u64, &mut buf);
res.extend_from_slice(mh_len);
res
}
}
/// Bitswap request handler
pub struct BitswapRequestHandler<B> {
client: Arc<dyn BlockBackend<B> + Send + Sync>,
request_receiver: async_channel::Receiver<IncomingRequest>,
}
impl<B: BlockT> BitswapRequestHandler<B> {
/// Create a new [`BitswapRequestHandler`].
pub fn new(client: Arc<dyn BlockBackend<B> + Send + Sync>) -> (Self, ProtocolConfig) {
let (tx, request_receiver) = async_channel::bounded(MAX_REQUEST_QUEUE);
let config = ProtocolConfig {
name: ProtocolName::from(PROTOCOL_NAME),
fallback_names: vec![],
max_request_size: MAX_PACKET_SIZE,
max_response_size: MAX_PACKET_SIZE,
request_timeout: Duration::from_secs(15),
inbound_queue: Some(tx),
};
(Self { client, request_receiver }, config)
}
/// Run [`BitswapRequestHandler`].
pub async fn run(mut self) {
while let Some(request) = self.request_receiver.next().await {
let IncomingRequest { peer, payload, pending_response } = request;
match self.handle_message(&peer, &payload) {
Ok(response) => {
let response = OutgoingResponse {
result: Ok(response),
reputation_changes: Vec::new(),
sent_feedback: None,
};
match pending_response.send(response) {
Ok(()) => {
trace!(target: LOG_TARGET, "Handled bitswap request from {peer}.",)
},
Err(_) => debug!(
target: LOG_TARGET,
"Failed to handle light client request from {peer}: {}",
BitswapError::SendResponse,
),
}
},
Err(err) => {
error!(target: LOG_TARGET, "Failed to process request from {peer}: {err}");
// TODO: adjust reputation?
let response = OutgoingResponse {
result: Err(()),
reputation_changes: vec![],
sent_feedback: None,
};
if pending_response.send(response).is_err() {
debug!(
target: LOG_TARGET,
"Failed to handle bitswap request from {peer}: {}",
BitswapError::SendResponse,
);
}
},
}
}
}
/// Handle received Bitswap request
fn handle_message(
&mut self,
peer: &PeerId,
payload: &Vec<u8>,
) -> Result<Vec<u8>, BitswapError> {
let request = schema::bitswap::Message::decode(&payload[..])?;
trace!(target: LOG_TARGET, "Received request: {:?} from {}", request, peer);
let mut response = BitswapMessage::default();
let wantlist = match request.wantlist {
Some(wantlist) => wantlist,
None => {
debug!(target: LOG_TARGET, "Unexpected bitswap message from {}", peer);
return Err(BitswapError::InvalidWantList)
},
};
if wantlist.entries.len() > MAX_WANTED_BLOCKS {
trace!(target: LOG_TARGET, "Ignored request: too many entries");
return Err(BitswapError::TooManyEntries)
}
for entry in wantlist.entries {
let cid = match cid::Cid::read_bytes(entry.block.as_slice()) {
Ok(cid) => cid,
Err(e) => {
trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e);
continue
},
};
if cid.version() != cid::Version::V1 ||
cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) ||
cid.hash().size() != 32
{
debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid);
continue
}
let mut hash = B::Hash::default();
hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]);
let transaction = match self.client.indexed_transaction(hash) {
Ok(ex) => ex,
Err(e) => {
error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e);
None
},
};
match transaction {
Some(transaction) => {
trace!(target: LOG_TARGET, "Found CID {:?}, hash {:?}", cid, hash);
if entry.want_type == WantType::Block as i32 {
let prefix = Prefix {
version: cid.version(),
codec: cid.codec(),
mh_type: cid.hash().code(),
mh_len: cid.hash().size(),
};
response
.payload
.push(MessageBlock { prefix: prefix.to_bytes(), data: transaction });
} else {
response.block_presences.push(BlockPresence {
r#type: BlockPresenceType::Have as i32,
cid: cid.to_bytes(),
});
}
},
None => {
trace!(target: LOG_TARGET, "Missing CID {:?}, hash {:?}", cid, hash);
if entry.send_dont_have {
response.block_presences.push(BlockPresence {
r#type: BlockPresenceType::DontHave as i32,
cid: cid.to_bytes(),
});
}
},
}
}
Ok(response.encode_to_vec())
}
}
/// Bitswap protocol error.
#[derive(Debug, thiserror::Error)]
pub enum BitswapError {
/// Protobuf decoding error.
#[error("Failed to decode request: {0}.")]
DecodeProto(#[from] prost::DecodeError),
/// Protobuf encoding error.
#[error("Failed to encode response: {0}.")]
EncodeProto(#[from] prost::EncodeError),
/// Client backend error.
#[error(transparent)]
Client(#[from] sp_blockchain::Error),
/// Error parsing CID
#[error(transparent)]
BadCid(#[from] cid::Error),
/// Packet read error.
#[error(transparent)]
Read(#[from] io::Error),
/// Error sending response.
#[error("Failed to send response.")]
SendResponse,
/// Message doesn't have a WANT list.
#[error("Invalid WANT list.")]
InvalidWantList,
/// Too many blocks requested.
#[error("Too many block entries in the request.")]
TooManyEntries,
}
#[cfg(test)]
mod tests {
use super::*;
use futures::channel::oneshot;
use sc_block_builder::BlockBuilderBuilder;
use schema::bitswap::{
message::{wantlist::Entry, Wantlist},
Message as BitswapMessage,
};
use sp_consensus::BlockOrigin;
use sp_runtime::codec::Encode;
use substrate_test_runtime::ExtrinsicBuilder;
use substrate_test_runtime_client::{self, prelude::*, TestClientBuilder};
#[tokio::test]
async fn undecodable_message() {
let client = substrate_test_runtime_client::new();
let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client));
tokio::spawn(async move { bitswap.run().await });
let (tx, rx) = oneshot::channel();
config
.inbound_queue
.unwrap()
.send(IncomingRequest {
peer: PeerId::random(),
payload: vec![0x13, 0x37, 0x13, 0x38],
pending_response: tx,
})
.await
.unwrap();
if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await {
assert_eq!(result, Err(()));
assert_eq!(reputation_changes, Vec::new());
assert!(sent_feedback.is_none());
} else {
panic!("invalid event received");
}
}
#[tokio::test]
async fn empty_want_list() {
let client = substrate_test_runtime_client::new();
let (bitswap, mut config) = BitswapRequestHandler::new(Arc::new(client));
tokio::spawn(async move { bitswap.run().await });
let (tx, rx) = oneshot::channel();
config
.inbound_queue
.as_mut()
.unwrap()
.send(IncomingRequest {
peer: PeerId::random(),
payload: BitswapMessage { wantlist: None, ..Default::default() }.encode_to_vec(),
pending_response: tx,
})
.await
.unwrap();
if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await {
assert_eq!(result, Err(()));
assert_eq!(reputation_changes, Vec::new());
assert!(sent_feedback.is_none());
} else {
panic!("invalid event received");
}
// Empty WANT list should not cause an error
let (tx, rx) = oneshot::channel();
config
.inbound_queue
.unwrap()
.send(IncomingRequest {
peer: PeerId::random(),
payload: BitswapMessage {
wantlist: Some(Default::default()),
..Default::default()
}
.encode_to_vec(),
pending_response: tx,
})
.await
.unwrap();
if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await {
assert_eq!(result, Ok(BitswapMessage::default().encode_to_vec()));
assert_eq!(reputation_changes, Vec::new());
assert!(sent_feedback.is_none());
} else {
panic!("invalid event received");
}
}
#[tokio::test]
async fn too_long_want_list() {
let client = substrate_test_runtime_client::new();
let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client));
tokio::spawn(async move { bitswap.run().await });
let (tx, rx) = oneshot::channel();
config
.inbound_queue
.unwrap()
.send(IncomingRequest {
peer: PeerId::random(),
payload: BitswapMessage {
wantlist: Some(Wantlist {
entries: (0..MAX_WANTED_BLOCKS + 1)
.map(|_| Entry::default())
.collect::<Vec<_>>(),
full: false,
}),
..Default::default()
}
.encode_to_vec(),
pending_response: tx,
})
.await
.unwrap();
if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await {
assert_eq!(result, Err(()));
assert_eq!(reputation_changes, Vec::new());
assert!(sent_feedback.is_none());
} else {
panic!("invalid event received");
}
}
#[tokio::test]
async fn transaction_not_found() {
let client = TestClientBuilder::with_tx_storage(u32::MAX).build();
let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client));
tokio::spawn(async move { bitswap.run().await });
let (tx, rx) = oneshot::channel();
config
.inbound_queue
.unwrap()
.send(IncomingRequest {
peer: PeerId::random(),
payload: BitswapMessage {
wantlist: Some(Wantlist {
entries: vec![Entry {
block: cid::Cid::new_v1(
0x70,
cid::multihash::Multihash::wrap(
u64::from(cid::multihash::Code::Blake2b256),
&[0u8; 32],
)
.unwrap(),
)
.to_bytes(),
..Default::default()
}],
full: false,
}),
..Default::default()
}
.encode_to_vec(),
pending_response: tx,
})
.await
.unwrap();
if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await {
assert_eq!(result, Ok(vec![]));
assert_eq!(reputation_changes, Vec::new());
assert!(sent_feedback.is_none());
} else {
panic!("invalid event received");
}
}
#[tokio::test]
async fn transaction_found() {
let mut client = TestClientBuilder::with_tx_storage(u32::MAX).build();
let mut block_builder = BlockBuilderBuilder::new(&client)
.on_parent_block(client.chain_info().genesis_hash)
.with_parent_block_number(0)
.build()
.unwrap();
// encoded extrinsic: [161, .. , 2, 6, 16, 19, 55, 19, 56]
let ext = ExtrinsicBuilder::new_indexed_call(vec![0x13, 0x37, 0x13, 0x38]).build();
let pattern_index = ext.encoded_size() - 4;
block_builder.push(ext.clone()).unwrap();
let block = block_builder.build().unwrap().block;
client.import(BlockOrigin::File, block).await.unwrap();
let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client));
tokio::spawn(async move { bitswap.run().await });
let (tx, rx) = oneshot::channel();
config
.inbound_queue
.unwrap()
.send(IncomingRequest {
peer: PeerId::random(),
payload: BitswapMessage {
wantlist: Some(Wantlist {
entries: vec![Entry {
block: cid::Cid::new_v1(
0x70,
cid::multihash::Multihash::wrap(
u64::from(cid::multihash::Code::Blake2b256),
&sp_crypto_hashing::blake2_256(&ext.encode()[pattern_index..]),
)
.unwrap(),
)
.to_bytes(),
..Default::default()
}],
full: false,
}),
..Default::default()
}
.encode_to_vec(),
pending_response: tx,
})
.await
.unwrap();
if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await {
assert_eq!(reputation_changes, Vec::new());
assert!(sent_feedback.is_none());
let response =
schema::bitswap::Message::decode(&result.expect("fetch to succeed")[..]).unwrap();
assert_eq!(response.payload[0].data, vec![0x13, 0x37, 0x13, 0x38]);
} else {
panic!("invalid event received");
}
}
}
@@ -0,0 +1,23 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Include sources generated from protobuf definitions.
pub(crate) mod bitswap {
include!(concat!(env!("OUT_DIR"), "/bitswap.message.rs"));
}
+166 -18
View File
@@ -23,21 +23,26 @@
pub use crate::{
discovery::DEFAULT_KADEMLIA_REPLICATION_FACTOR,
peer_store::PeerStoreProvider,
protocol::{notification_service, NotificationsSink, ProtocolHandlePair},
request_responses::{
IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig,
},
service::traits::NotificationService,
service::{
metrics::NotificationMetrics,
traits::{NotificationConfig, NotificationService, PeerStore},
},
types::ProtocolName,
};
pub use libp2p::{
build_multiaddr,
identity::{self, ed25519, Keypair},
multiaddr, Multiaddr, PeerId,
multiaddr, Multiaddr,
};
use sc_network_types::PeerId;
use crate::peer_store::PeerStoreHandle;
use crate::service::{ensure_addresses_consistent_with_transport, traits::NetworkBackend};
use codec::Encode;
use prometheus_endpoint::Registry;
use zeroize::Zeroize;
@@ -61,6 +66,7 @@ use std::{
path::{Path, PathBuf},
pin::Pin,
str::{self, FromStr},
sync::Arc,
};
/// Protocol name prefix, transmitted on the wire for legacy protocol names.
@@ -99,7 +105,7 @@ impl fmt::Debug for ProtocolId {
/// let (peer_id, addr) = parse_str_addr(
/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"
/// ).unwrap();
/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::<PeerId>().unwrap());
/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::<PeerId>().unwrap().into());
/// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::<Multiaddr>().unwrap());
/// ```
pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> {
@@ -569,6 +575,17 @@ impl NonDefaultSetConfig {
}
}
impl NotificationConfig for NonDefaultSetConfig {
fn set_config(&self) -> &SetConfig {
&self.set_config
}
/// Get reference to protocol name.
fn protocol_name(&self) -> &ProtocolName {
&self.protocol_name
}
}
/// Network service configuration.
#[derive(Clone, Debug)]
pub struct NetworkConfiguration {
@@ -655,6 +672,9 @@ pub struct NetworkConfiguration {
/// a modification of the way the implementation works. Different nodes with different
/// configured values remain compatible with each other.
pub yamux_window_size: Option<u32>,
/// Networking backend used for P2P communication.
pub network_backend: NetworkBackendType,
}
impl NetworkConfiguration {
@@ -687,6 +707,7 @@ impl NetworkConfiguration {
.expect("value is a constant; constant is non-zero; qed."),
yamux_window_size: None,
ipfs_server: false,
network_backend: NetworkBackendType::Libp2p,
}
}
@@ -722,18 +743,15 @@ impl NetworkConfiguration {
}
/// Network initialization parameters.
pub struct Params<Block: BlockT> {
pub struct Params<Block: BlockT, H: ExHashT, N: NetworkBackend<Block, H>> {
/// Assigned role for our node (full, light, ...).
pub role: Role,
/// How to spawn background tasks.
pub executor: Box<dyn Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send>,
pub executor: Box<dyn Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send + Sync>,
/// Network layer configuration.
pub network_config: FullNetworkConfiguration,
/// Peer store with known nodes, peer reputations, etc.
pub peer_store: PeerStoreHandle,
pub network_config: FullNetworkConfiguration<Block, H, N>,
/// Legacy name of the protocol to use on the wire. Should be different for each chain.
pub protocol_id: ProtocolId,
@@ -749,25 +767,43 @@ pub struct Params<Block: BlockT> {
pub metrics_registry: Option<Registry>,
/// Block announce protocol configuration
pub block_announce_config: NonDefaultSetConfig,
pub block_announce_config: N::NotificationProtocolConfig,
/// Bitswap configuration, if the server has been enabled.
pub bitswap_config: Option<N::BitswapConfig>,
/// Notification metrics.
pub notification_metrics: NotificationMetrics,
}
/// Full network configuration.
pub struct FullNetworkConfiguration {
pub struct FullNetworkConfiguration<B: BlockT + 'static, H: ExHashT, N: NetworkBackend<B, H>> {
/// Installed notification protocols.
pub(crate) notification_protocols: Vec<NonDefaultSetConfig>,
pub(crate) notification_protocols: Vec<N::NotificationProtocolConfig>,
/// List of request-response protocols that the node supports.
pub(crate) request_response_protocols: Vec<RequestResponseConfig>,
pub(crate) request_response_protocols: Vec<N::RequestResponseProtocolConfig>,
/// Network configuration.
pub network_config: NetworkConfiguration,
/// [`PeerStore`](crate::peer_store::PeerStore),
peer_store: Option<N::PeerStore>,
/// Handle to [`PeerStore`](crate::peer_store::PeerStore).
peer_store_handle: Arc<dyn PeerStoreProvider>,
}
impl FullNetworkConfiguration {
impl<B: BlockT + 'static, H: ExHashT, N: NetworkBackend<B, H>> FullNetworkConfiguration<B, H, N> {
/// Create new [`FullNetworkConfiguration`].
pub fn new(network_config: &NetworkConfiguration) -> Self {
let bootnodes = network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect();
let peer_store = N::peer_store(bootnodes);
let peer_store_handle = peer_store.handle();
Self {
peer_store: Some(peer_store),
peer_store_handle,
notification_protocols: Vec::new(),
request_response_protocols: Vec::new(),
network_config: network_config.clone(),
@@ -775,19 +811,131 @@ impl FullNetworkConfiguration {
}
/// Add a notification protocol.
pub fn add_notification_protocol(&mut self, config: NonDefaultSetConfig) {
pub fn add_notification_protocol(&mut self, config: N::NotificationProtocolConfig) {
self.notification_protocols.push(config);
}
/// Get reference to installed notification protocols.
pub fn notification_protocols(&self) -> &Vec<NonDefaultSetConfig> {
pub fn notification_protocols(&self) -> &Vec<N::NotificationProtocolConfig> {
&self.notification_protocols
}
/// Add a request-response protocol.
pub fn add_request_response_protocol(&mut self, config: RequestResponseConfig) {
pub fn add_request_response_protocol(&mut self, config: N::RequestResponseProtocolConfig) {
self.request_response_protocols.push(config);
}
/// Get handle to [`PeerStore`].
pub fn peer_store_handle(&self) -> Arc<dyn PeerStoreProvider> {
Arc::clone(&self.peer_store_handle)
}
/// Take [`PeerStore`].
///
/// `PeerStore` is created when `FullNetworkConfig` is initialized so that `PeerStoreHandle`s
/// can be passed onto notification protocols. `PeerStore` itself should be started only once
/// and since technically it's not a libp2p task, it should be started with `SpawnHandle` in
/// `builder.rs` instead of using the libp2p/litep2p executor in the networking backend. This
/// function consumes `PeerStore` and starts its event loop in the appropriate place.
pub fn take_peer_store(&mut self) -> N::PeerStore {
self.peer_store
.take()
.expect("`PeerStore` can only be taken once when it's started; qed")
}
/// Verify addresses are consistent with enabled transports.
pub fn sanity_check_addresses(&self) -> Result<(), crate::error::Error> {
ensure_addresses_consistent_with_transport(
self.network_config.listen_addresses.iter(),
&self.network_config.transport,
)?;
ensure_addresses_consistent_with_transport(
self.network_config.boot_nodes.iter().map(|x| &x.multiaddr),
&self.network_config.transport,
)?;
ensure_addresses_consistent_with_transport(
self.network_config
.default_peers_set
.reserved_nodes
.iter()
.map(|x| &x.multiaddr),
&self.network_config.transport,
)?;
for notification_protocol in &self.notification_protocols {
ensure_addresses_consistent_with_transport(
notification_protocol.set_config().reserved_nodes.iter().map(|x| &x.multiaddr),
&self.network_config.transport,
)?;
}
ensure_addresses_consistent_with_transport(
self.network_config.public_addresses.iter(),
&self.network_config.transport,
)?;
Ok(())
}
/// Check for duplicate bootnodes.
pub fn sanity_check_bootnodes(&self) -> Result<(), crate::error::Error> {
self.network_config.boot_nodes.iter().try_for_each(|bootnode| {
if let Some(other) = self
.network_config
.boot_nodes
.iter()
.filter(|o| o.multiaddr == bootnode.multiaddr)
.find(|o| o.peer_id != bootnode.peer_id)
{
Err(crate::error::Error::DuplicateBootnode {
address: bootnode.multiaddr.clone(),
first_id: bootnode.peer_id.into(),
second_id: other.peer_id.into(),
})
} else {
Ok(())
}
})
}
/// Collect all reserved nodes and bootnodes addresses.
pub fn known_addresses(&self) -> Vec<(PeerId, Multiaddr)> {
let mut addresses: Vec<_> = self
.network_config
.default_peers_set
.reserved_nodes
.iter()
.map(|reserved| (reserved.peer_id, reserved.multiaddr.clone()))
.chain(self.notification_protocols.iter().flat_map(|protocol| {
protocol
.set_config()
.reserved_nodes
.iter()
.map(|reserved| (reserved.peer_id, reserved.multiaddr.clone()))
}))
.chain(
self.network_config
.boot_nodes
.iter()
.map(|bootnode| (bootnode.peer_id, bootnode.multiaddr.clone())),
)
.collect();
// Remove possible duplicates.
addresses.sort();
addresses.dedup();
addresses
}
}
/// Network backend type.
#[derive(Debug, Clone)]
pub enum NetworkBackendType {
/// Use libp2p for P2P networking.
Libp2p,
/// Use litep2p for P2P networking.
Litep2p,
}
#[cfg(test)]
+3
View File
@@ -77,6 +77,9 @@ pub enum Error {
/// Connection closed.
#[error("Connection closed")]
ConnectionClosed,
/// Litep2p error.
#[error("Litep2p error: `{0}`")]
Litep2p(litep2p::Error),
}
// Make `Debug` use the `Display` implementation.
+8 -5
View File
@@ -243,6 +243,8 @@
//! More precise usage details are still being worked on and will likely change in the future.
mod behaviour;
mod bitswap;
mod litep2p;
mod protocol;
#[cfg(test)]
@@ -262,27 +264,28 @@ pub mod transport;
pub mod types;
pub mod utils;
pub use crate::litep2p::Litep2pNetworkBackend;
pub use event::{DhtEvent, Event};
#[doc(inline)]
pub use libp2p::{multiaddr, Multiaddr, PeerId};
pub use request_responses::{Config, IfDisconnected, RequestFailure};
pub use sc_network_common::{
role::{ObservedRole, Roles},
types::ReputationChange,
};
pub use service::{
metrics::NotificationMetrics,
signature::Signature,
traits::{
KademliaKey, MessageSink, NetworkBlock, NetworkDHTProvider, NetworkEventStream,
NetworkNotification, NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo,
NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest,
KademliaKey, MessageSink, NetworkBackend, NetworkBlock, NetworkDHTProvider,
NetworkEventStream, NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo,
NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, NotificationConfig,
NotificationSender as NotificationSenderT, NotificationSenderError,
NotificationSenderReady, NotificationService,
},
DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, OutboundFailure,
PublicKey,
};
pub use types::ProtocolName;
pub use types::{multiaddr, Multiaddr, PeerId, ProtocolName};
/// The maximum allowed number of established connections per peer.
///
@@ -0,0 +1,528 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! libp2p-related discovery code for litep2p backend.
use crate::{
config::{NetworkConfiguration, ProtocolId},
multiaddr::Protocol,
peer_store::PeerStoreProvider,
Multiaddr,
};
use array_bytes::bytes2hex;
use futures::{FutureExt, Stream};
use futures_timer::Delay;
use ip_network::IpNetwork;
use libp2p::kad::record::Key as KademliaKey;
use litep2p::{
protocol::{
libp2p::{
identify::{Config as IdentifyConfig, IdentifyEvent},
kademlia::{
Config as KademliaConfig, ConfigBuilder as KademliaConfigBuilder, KademliaEvent,
KademliaHandle, QueryId, Quorum, Record, RecordKey,
},
ping::{Config as PingConfig, PingEvent},
},
mdns::{Config as MdnsConfig, MdnsEvent},
},
PeerId, ProtocolName,
};
use parking_lot::RwLock;
use schnellru::{ByLength, LruMap};
use std::{
cmp,
collections::{HashMap, HashSet, VecDeque},
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
/// Logging target for the file.
const LOG_TARGET: &str = "sub-libp2p::discovery";
/// Kademlia query interval.
const KADEMLIA_QUERY_INTERVAL: Duration = Duration::from_secs(5);
/// mDNS query interval.
const MDNS_QUERY_INTERVAL: Duration = Duration::from_secs(30);
/// Minimum number of confirmations received before an address is verified.
const MIN_ADDRESS_CONFIRMATIONS: usize = 5;
/// Discovery events.
#[derive(Debug)]
pub enum DiscoveryEvent {
/// Ping RTT measured for peer.
Ping {
/// Remote peer ID.
peer: PeerId,
/// Ping round-trip time.
rtt: Duration,
},
/// Peer identified over `/ipfs/identify/1.0.0` protocol.
Identified {
/// Peer ID.
peer: PeerId,
/// Identify protocol version.
protocol_version: Option<String>,
/// Identify user agent version.
user_agent: Option<String>,
/// Observed address.
observed_address: Multiaddr,
/// Listen addresses.
listen_addresses: Vec<Multiaddr>,
/// Supported protocols.
supported_protocols: HashSet<ProtocolName>,
},
/// One or more addresses discovered.
Discovered {
/// Discovered addresses.
addresses: Vec<Multiaddr>,
},
/// Routing table has been updated.
RoutingTableUpdate {
/// Peers that were added to routing table.
peers: HashSet<PeerId>,
},
/// New external address discovered.
ExternalAddressDiscovered {
/// Discovered addresses.
address: Multiaddr,
},
/// Record was found from the DHT.
GetRecordSuccess {
/// Query ID.
query_id: QueryId,
/// Record.
record: Record,
},
/// Record was successfully stored on the DHT.
PutRecordSuccess {
/// Query ID.
query_id: QueryId,
},
/// Query failed.
QueryFailed {
/// Query ID.
query_id: QueryId,
},
}
/// Discovery.
pub struct Discovery {
/// Ping event stream.
ping_event_stream: Box<dyn Stream<Item = PingEvent> + Send + Unpin>,
/// Identify event stream.
identify_event_stream: Box<dyn Stream<Item = IdentifyEvent> + Send + Unpin>,
/// mDNS event stream, if enabled.
mdns_event_stream: Option<Box<dyn Stream<Item = MdnsEvent> + Send + Unpin>>,
/// Kademlia handle.
kademlia_handle: KademliaHandle,
/// `Peerstore` handle.
_peerstore_handle: Arc<dyn PeerStoreProvider>,
/// Next Kademlia query for a random peer ID.
///
/// If `None`, there is currently a query pending.
next_kad_query: Option<Delay>,
/// Active `FIND_NODE` query if it exists.
find_node_query_id: Option<QueryId>,
/// Pending events.
pending_events: VecDeque<DiscoveryEvent>,
/// Allow non-global addresses in the DHT.
allow_non_global_addresses: bool,
/// Protocols supported by the local node.
local_protocols: HashSet<ProtocolName>,
/// Public addresses.
public_addresses: HashSet<Multiaddr>,
/// Listen addresses.
listen_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
/// External address confirmations.
address_confirmations: LruMap<Multiaddr, usize>,
/// Delay to next `FIND_NODE` query.
duration_to_next_find_query: Duration,
}
/// Legacy (fallback) Kademlia protocol name based on `protocol_id`.
fn legacy_kademlia_protocol_name(id: &ProtocolId) -> ProtocolName {
ProtocolName::from(format!("/{}/kad", id.as_ref()))
}
/// Kademlia protocol name based on `genesis_hash` and `fork_id`.
fn kademlia_protocol_name<Hash: AsRef<[u8]>>(
genesis_hash: Hash,
fork_id: Option<&str>,
) -> ProtocolName {
let genesis_hash_hex = bytes2hex("", genesis_hash.as_ref());
let protocol = if let Some(fork_id) = fork_id {
format!("/{}/{}/kad", genesis_hash_hex, fork_id)
} else {
format!("/{}/kad", genesis_hash_hex)
};
ProtocolName::from(protocol)
}
impl Discovery {
/// Create new [`Discovery`].
///
/// Enables `/ipfs/ping/1.0.0` and `/ipfs/identify/1.0.0` by default and starts
/// the mDNS peer discovery if it was enabled.
pub fn new<Hash: AsRef<[u8]> + Clone>(
config: &NetworkConfiguration,
genesis_hash: Hash,
fork_id: Option<&str>,
protocol_id: &ProtocolId,
known_peers: HashMap<PeerId, Vec<Multiaddr>>,
listen_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
_peerstore_handle: Arc<dyn PeerStoreProvider>,
) -> (Self, PingConfig, IdentifyConfig, KademliaConfig, Option<MdnsConfig>) {
let (ping_config, ping_event_stream) = PingConfig::default();
let user_agent = format!("{} ({})", config.client_version, config.node_name);
let (identify_config, identify_event_stream) = IdentifyConfig::new(
"/substrate/1.0".to_string(),
Some(user_agent),
config.public_addresses.clone(),
);
let (mdns_config, mdns_event_stream) = match config.transport {
crate::config::TransportConfig::Normal { enable_mdns, .. } => match enable_mdns {
true => {
let (mdns_config, mdns_event_stream) = MdnsConfig::new(MDNS_QUERY_INTERVAL);
(Some(mdns_config), Some(mdns_event_stream))
},
false => (None, None),
},
_ => panic!("memory transport not supported"),
};
let (kademlia_config, kademlia_handle) = {
let protocol_names = vec![
kademlia_protocol_name(genesis_hash.clone(), fork_id),
legacy_kademlia_protocol_name(protocol_id),
];
KademliaConfigBuilder::new()
.with_known_peers(known_peers)
.with_protocol_names(protocol_names)
.build()
};
(
Self {
ping_event_stream,
identify_event_stream,
mdns_event_stream,
kademlia_handle,
_peerstore_handle,
listen_addresses,
find_node_query_id: None,
pending_events: VecDeque::new(),
duration_to_next_find_query: Duration::from_secs(1),
address_confirmations: LruMap::new(ByLength::new(8)),
allow_non_global_addresses: config.allow_non_globals_in_dht,
public_addresses: config.public_addresses.iter().cloned().collect(),
next_kad_query: Some(Delay::new(KADEMLIA_QUERY_INTERVAL)),
local_protocols: HashSet::from_iter([
kademlia_protocol_name(genesis_hash, fork_id),
legacy_kademlia_protocol_name(protocol_id),
]),
},
ping_config,
identify_config,
kademlia_config,
mdns_config,
)
}
/// Add known peer to `Kademlia`.
#[allow(unused)]
pub async fn add_known_peer(&mut self, peer: PeerId, addresses: Vec<Multiaddr>) {
self.kademlia_handle.add_known_peer(peer, addresses).await;
}
/// Add self-reported addresses to routing table if `peer` supports
/// at least one of the locally supported DHT protocol.
pub async fn add_self_reported_address(
&mut self,
peer: PeerId,
supported_protocols: HashSet<ProtocolName>,
addresses: Vec<Multiaddr>,
) {
if self.local_protocols.is_disjoint(&supported_protocols) {
return
}
let addresses = addresses
.into_iter()
.filter_map(|address| {
if !self.allow_non_global_addresses && !Discovery::can_add_to_dht(&address) {
log::trace!(
target: LOG_TARGET,
"ignoring self-reported non-global address {address} from {peer}."
);
return None
}
Some(address)
})
.collect();
log::trace!(
target: LOG_TARGET,
"add self-reported addresses for {peer:?}: {addresses:?}",
);
self.kademlia_handle.add_known_peer(peer, addresses).await;
}
/// Start Kademlia `GET_VALUE` query for `key`.
pub async fn get_value(&mut self, key: KademliaKey) -> QueryId {
self.kademlia_handle
.get_record(RecordKey::new(&key.to_vec()), Quorum::One)
.await
}
/// Publish value on the DHT using Kademlia `PUT_VALUE`.
pub async fn put_value(&mut self, key: KademliaKey, value: Vec<u8>) -> QueryId {
self.kademlia_handle
.put_record(Record::new(RecordKey::new(&key.to_vec()), value))
.await
}
/// Check if the observed address is a known address.
fn is_known_address(known: &Multiaddr, observed: &Multiaddr) -> bool {
let mut known = known.iter();
let mut observed = observed.iter();
loop {
match (known.next(), observed.next()) {
(None, None) => return true,
(None, Some(Protocol::P2p(_))) => return true,
(Some(Protocol::P2p(_)), None) => return true,
(known, observed) if known != observed => return false,
_ => {},
}
}
}
/// Can `address` be added to DHT.
fn can_add_to_dht(address: &Multiaddr) -> bool {
let ip = match address.iter().next() {
Some(Protocol::Ip4(ip)) => IpNetwork::from(ip),
Some(Protocol::Ip6(ip)) => IpNetwork::from(ip),
Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) =>
return true,
_ => return false,
};
ip.is_global()
}
/// Check if `address` can be considered a new external address.
fn is_new_external_address(&mut self, address: &Multiaddr) -> bool {
log::trace!(target: LOG_TARGET, "verify new external address: {address}");
// is the address one of our known addresses
if self
.listen_addresses
.read()
.iter()
.chain(self.public_addresses.iter())
.any(|known_address| Discovery::is_known_address(&known_address, &address))
{
return true
}
match self.address_confirmations.get(address) {
Some(confirmations) => {
*confirmations += 1usize;
if *confirmations >= MIN_ADDRESS_CONFIRMATIONS {
return true
}
},
None => {
self.address_confirmations.insert(address.clone(), 1usize);
},
}
false
}
}
impl Stream for Discovery {
type Item = DiscoveryEvent;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = Pin::into_inner(self);
if let Some(event) = this.pending_events.pop_front() {
return Poll::Ready(Some(event))
}
if let Some(mut delay) = this.next_kad_query.take() {
match delay.poll_unpin(cx) {
Poll::Pending => {
this.next_kad_query = Some(delay);
},
Poll::Ready(()) => {
let peer = PeerId::random();
log::trace!(target: LOG_TARGET, "start next kademlia query for {peer:?}");
match this.kademlia_handle.try_find_node(peer) {
Ok(query_id) => {
this.find_node_query_id = Some(query_id);
},
Err(()) => {
this.duration_to_next_find_query = cmp::min(
this.duration_to_next_find_query * 2,
Duration::from_secs(60),
);
this.next_kad_query =
Some(Delay::new(this.duration_to_next_find_query));
},
}
},
}
}
match Pin::new(&mut this.kademlia_handle).poll_next(cx) {
Poll::Pending => {},
Poll::Ready(None) => return Poll::Ready(None),
Poll::Ready(Some(KademliaEvent::FindNodeSuccess { peers, .. })) => {
// the addresses are already inserted into the DHT and in `TransportManager` so
// there is no need to add them again. The found peers must be registered to
// `Peerstore` so other protocols are aware of them through `Peerset`.
log::trace!(target: LOG_TARGET, "dht random walk yielded {} peers", peers.len());
this.next_kad_query = Some(Delay::new(KADEMLIA_QUERY_INTERVAL));
return Poll::Ready(Some(DiscoveryEvent::RoutingTableUpdate {
peers: peers.into_iter().map(|(peer, _)| peer).collect(),
}))
},
Poll::Ready(Some(KademliaEvent::RoutingTableUpdate { peers })) => {
log::trace!(target: LOG_TARGET, "routing table update, discovered {} peers", peers.len());
return Poll::Ready(Some(DiscoveryEvent::RoutingTableUpdate {
peers: peers.into_iter().collect(),
}))
},
Poll::Ready(Some(KademliaEvent::GetRecordSuccess { query_id, record })) => {
log::trace!(
target: LOG_TARGET,
"`GET_RECORD` succeeded for {query_id:?}: {record:?}",
);
return Poll::Ready(Some(DiscoveryEvent::GetRecordSuccess { query_id, record }));
},
Poll::Ready(Some(KademliaEvent::PutRecordSucess { query_id, key: _ })) =>
return Poll::Ready(Some(DiscoveryEvent::PutRecordSuccess { query_id })),
Poll::Ready(Some(KademliaEvent::QueryFailed { query_id })) => {
match this.find_node_query_id == Some(query_id) {
true => {
this.find_node_query_id = None;
this.duration_to_next_find_query =
cmp::min(this.duration_to_next_find_query * 2, Duration::from_secs(60));
this.next_kad_query = Some(Delay::new(this.duration_to_next_find_query));
},
false => return Poll::Ready(Some(DiscoveryEvent::QueryFailed { query_id })),
}
},
}
match Pin::new(&mut this.identify_event_stream).poll_next(cx) {
Poll::Pending => {},
Poll::Ready(None) => return Poll::Ready(None),
Poll::Ready(Some(IdentifyEvent::PeerIdentified {
peer,
protocol_version,
user_agent,
listen_addresses,
supported_protocols,
observed_address,
})) => {
if this.is_new_external_address(&observed_address) {
this.pending_events.push_back(DiscoveryEvent::ExternalAddressDiscovered {
address: observed_address.clone(),
});
}
return Poll::Ready(Some(DiscoveryEvent::Identified {
peer,
protocol_version,
user_agent,
listen_addresses,
observed_address,
supported_protocols,
}));
},
}
match Pin::new(&mut this.ping_event_stream).poll_next(cx) {
Poll::Pending => {},
Poll::Ready(None) => return Poll::Ready(None),
Poll::Ready(Some(PingEvent::Ping { peer, ping })) =>
return Poll::Ready(Some(DiscoveryEvent::Ping { peer, rtt: ping })),
}
if let Some(ref mut mdns_event_stream) = &mut this.mdns_event_stream {
match Pin::new(mdns_event_stream).poll_next(cx) {
Poll::Pending => {},
Poll::Ready(None) => return Poll::Ready(None),
Poll::Ready(Some(MdnsEvent::Discovered(addresses))) =>
return Poll::Ready(Some(DiscoveryEvent::Discovered { addresses })),
}
}
Poll::Pending
}
}
+989
View File
@@ -0,0 +1,989 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! `NetworkBackend` implementation for `litep2p`.
use crate::{
config::{
FullNetworkConfiguration, IncomingRequest, NodeKeyConfig, NotificationHandshake, Params,
SetConfig, TransportConfig,
},
error::Error,
event::{DhtEvent, Event},
litep2p::{
discovery::{Discovery, DiscoveryEvent},
peerstore::Peerstore,
service::{Litep2pNetworkService, NetworkServiceCommand},
shim::{
bitswap::BitswapServer,
notification::{
config::{NotificationProtocolConfig, ProtocolControlHandle},
peerset::PeersetCommand,
},
request_response::{RequestResponseConfig, RequestResponseProtocol},
},
},
multiaddr::{Multiaddr, Protocol},
peer_store::PeerStoreProvider,
protocol,
service::{
metrics::{register_without_sources, MetricSources, Metrics, NotificationMetrics},
out_events,
traits::{BandwidthSink, NetworkBackend, NetworkService},
},
NetworkStatus, NotificationService, ProtocolName,
};
use codec::Encode;
use futures::StreamExt;
use libp2p::kad::RecordKey;
use litep2p::{
config::ConfigBuilder,
crypto::ed25519::{Keypair, SecretKey},
executor::Executor,
protocol::{
libp2p::{bitswap::Config as BitswapConfig, kademlia::QueryId},
request_response::ConfigBuilder as RequestResponseConfigBuilder,
},
transport::{
tcp::config::Config as TcpTransportConfig,
websocket::config::Config as WebSocketTransportConfig, Endpoint,
},
types::ConnectionId,
Error as Litep2pError, Litep2p, Litep2pEvent, ProtocolName as Litep2pProtocolName,
};
use parking_lot::RwLock;
use prometheus_endpoint::Registry;
use sc_client_api::BlockBackend;
use sc_network_common::{role::Roles, ExHashT};
use sc_network_types::PeerId;
use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver};
use sp_runtime::traits::Block as BlockT;
use std::{
cmp,
collections::{hash_map::Entry, HashMap, HashSet},
fs,
future::Future,
io, iter,
pin::Pin,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::{Duration, Instant},
};
mod discovery;
mod peerstore;
mod service;
mod shim;
/// Litep2p bandwidth sink.
struct Litep2pBandwidthSink {
sink: litep2p::BandwidthSink,
}
impl BandwidthSink for Litep2pBandwidthSink {
fn total_inbound(&self) -> u64 {
self.sink.inbound() as u64
}
fn total_outbound(&self) -> u64 {
self.sink.outbound() as u64
}
}
/// Litep2p task executor.
struct Litep2pExecutor {
/// Executor.
executor: Box<dyn Fn(Pin<Box<dyn Future<Output = ()> + Send>>) + Send + Sync>,
}
impl Executor for Litep2pExecutor {
fn run(&self, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
(self.executor)(future)
}
fn run_with_name(&self, _: &'static str, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
(self.executor)(future)
}
}
/// Logging target for the file.
const LOG_TARGET: &str = "sub-libp2p";
/// Peer context.
struct ConnectionContext {
/// Peer endpoints.
endpoints: HashMap<ConnectionId, Endpoint>,
/// Number of active connections.
num_connections: usize,
}
/// Networking backend for `litep2p`.
pub struct Litep2pNetworkBackend {
/// Main `litep2p` object.
litep2p: Litep2p,
/// `NetworkService` implementation for `Litep2pNetworkBackend`.
network_service: Arc<dyn NetworkService>,
/// RX channel for receiving commands from `Litep2pNetworkService`.
cmd_rx: TracingUnboundedReceiver<NetworkServiceCommand>,
/// `Peerset` handles to notification protocols.
peerset_handles: HashMap<ProtocolName, ProtocolControlHandle>,
/// Pending `GET_VALUE` queries.
pending_get_values: HashMap<QueryId, (RecordKey, Instant)>,
/// Pending `PUT_VALUE` queries.
pending_put_values: HashMap<QueryId, (RecordKey, Instant)>,
/// Discovery.
discovery: Discovery,
/// Number of connected peers.
num_connected: Arc<AtomicUsize>,
/// Connected peers.
peers: HashMap<litep2p::PeerId, ConnectionContext>,
/// Peerstore.
peerstore_handle: Arc<dyn PeerStoreProvider>,
/// Block announce protocol name.
block_announce_protocol: ProtocolName,
/// Sender for DHT events.
event_streams: out_events::OutChannels,
/// Prometheus metrics.
metrics: Option<Metrics>,
/// External addresses.
external_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
}
impl Litep2pNetworkBackend {
/// From an iterator of multiaddress(es), parse and group all addresses of peers
/// so that litep2p can consume the information easily.
fn parse_addresses(
addresses: impl Iterator<Item = Multiaddr>,
) -> HashMap<PeerId, Vec<Multiaddr>> {
addresses
.into_iter()
.filter_map(|address| match address.iter().next() {
Some(
Protocol::Dns(_) |
Protocol::Dns4(_) |
Protocol::Dns6(_) |
Protocol::Ip6(_) |
Protocol::Ip4(_),
) => match address.iter().find(|protocol| std::matches!(protocol, Protocol::P2p(_)))
{
Some(Protocol::P2p(multihash)) => PeerId::from_multihash(multihash)
.map_or(None, |peer| Some((peer, Some(address)))),
_ => None,
},
Some(Protocol::P2p(multihash)) =>
PeerId::from_multihash(multihash).map_or(None, |peer| Some((peer, None))),
_ => None,
})
.fold(HashMap::new(), |mut acc, (peer, maybe_address)| {
let entry = acc.entry(peer).or_default();
maybe_address.map(|address| entry.push(address));
acc
})
}
/// Add new known addresses to `litep2p` and return the parsed peer IDs.
fn add_addresses(&mut self, peers: impl Iterator<Item = Multiaddr>) -> HashSet<PeerId> {
Self::parse_addresses(peers.into_iter())
.into_iter()
.filter_map(|(peer, addresses)| {
// `peers` contained multiaddress in the form `/p2p/<peer ID>`
if addresses.is_empty() {
return Some(peer)
}
if self.litep2p.add_known_address(peer.into(), addresses.clone().into_iter()) == 0 {
log::warn!(
target: LOG_TARGET,
"couldn't add any addresses for {peer:?} and it won't be added as reserved peer",
);
return None
}
self.peerstore_handle.add_known_peer(peer);
Some(peer)
})
.collect()
}
}
impl Litep2pNetworkBackend {
/// Get `litep2p` keypair from `NodeKeyConfig`.
fn get_keypair(node_key: &NodeKeyConfig) -> Result<(Keypair, litep2p::PeerId), Error> {
let secret = libp2p::identity::Keypair::try_into_ed25519(node_key.clone().into_keypair()?)
.map_err(|error| {
log::error!(target: LOG_TARGET, "failed to convert to ed25519: {error:?}");
Error::Io(io::ErrorKind::InvalidInput.into())
})?
.secret();
let mut secret = secret.as_ref().iter().cloned().collect::<Vec<_>>();
let secret = SecretKey::from_bytes(&mut secret)
.map_err(|_| Error::Io(io::ErrorKind::InvalidInput.into()))?;
let local_identity = Keypair::from(secret);
let local_public = local_identity.public();
let local_peer_id = local_public.to_peer_id();
Ok((local_identity, local_peer_id))
}
/// Configure transport protocols for `Litep2pNetworkBackend`.
fn configure_transport<B: BlockT + 'static, H: ExHashT>(
config: &FullNetworkConfiguration<B, H, Self>,
) -> ConfigBuilder {
let _ = match config.network_config.transport {
TransportConfig::MemoryOnly => panic!("memory transport not supported"),
TransportConfig::Normal { .. } => false,
};
let config_builder = ConfigBuilder::new();
// The yamux buffer size limit is configured to be equal to the maximum frame size
// of all protocols. 10 bytes are added to each limit for the length prefix that
// is not included in the upper layer protocols limit but is still present in the
// yamux buffer. These 10 bytes correspond to the maximum size required to encode
// a variable-length-encoding 64bits number. In other words, we make the
// assumption that no notification larger than 2^64 will ever be sent.
let yamux_maximum_buffer_size = {
let requests_max = config
.request_response_protocols
.iter()
.map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::MAX));
let responses_max = config
.request_response_protocols
.iter()
.map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX));
let notifs_max = config
.notification_protocols
.iter()
.map(|cfg| usize::try_from(cfg.max_notification_size()).unwrap_or(usize::MAX));
// A "default" max is added to cover all the other protocols: ping, identify,
// kademlia, block announces, and transactions.
let default_max = cmp::max(
1024 * 1024,
usize::try_from(protocol::BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE)
.unwrap_or(usize::MAX),
);
iter::once(default_max)
.chain(requests_max)
.chain(responses_max)
.chain(notifs_max)
.max()
.expect("iterator known to always yield at least one element; qed")
.saturating_add(10)
};
let yamux_config = {
let mut yamux_config = litep2p::yamux::Config::default();
// Enable proper flow-control: window updates are only sent when
// buffered data has been consumed.
yamux_config.set_window_update_mode(litep2p::yamux::WindowUpdateMode::OnRead);
yamux_config.set_max_buffer_size(yamux_maximum_buffer_size);
if let Some(yamux_window_size) = config.network_config.yamux_window_size {
yamux_config.set_receive_window(yamux_window_size);
}
yamux_config
};
let (tcp, websocket): (Vec<Option<_>>, Vec<Option<_>>) = config
.network_config
.listen_addresses
.iter()
.filter_map(|address| {
let mut iter = address.iter();
match iter.next() {
Some(Protocol::Ip4(_) | Protocol::Ip6(_)) => {},
protocol => {
log::error!(
target: LOG_TARGET,
"unknown protocol {protocol:?}, ignoring {address:?}",
);
return None
},
}
match iter.next() {
Some(Protocol::Tcp(_)) => match iter.next() {
Some(Protocol::Ws(_) | Protocol::Wss(_)) =>
Some((None, Some(address.clone()))),
Some(Protocol::P2p(_)) | None => Some((Some(address.clone()), None)),
protocol => {
log::error!(
target: LOG_TARGET,
"unknown protocol {protocol:?}, ignoring {address:?}",
);
None
},
},
protocol => {
log::error!(
target: LOG_TARGET,
"unknown protocol {protocol:?}, ignoring {address:?}",
);
None
},
}
})
.unzip();
config_builder
.with_websocket(WebSocketTransportConfig {
listen_addresses: websocket.into_iter().flatten().collect(),
yamux_config: yamux_config.clone(),
..Default::default()
})
.with_tcp(TcpTransportConfig {
listen_addresses: tcp.into_iter().flatten().collect(),
yamux_config,
..Default::default()
})
}
}
#[async_trait::async_trait]
impl<B: BlockT + 'static, H: ExHashT> NetworkBackend<B, H> for Litep2pNetworkBackend {
type NotificationProtocolConfig = NotificationProtocolConfig;
type RequestResponseProtocolConfig = RequestResponseConfig;
type NetworkService<Block, Hash> = Arc<Litep2pNetworkService>;
type PeerStore = Peerstore;
type BitswapConfig = BitswapConfig;
fn new(mut params: Params<B, H, Self>) -> Result<Self, Error>
where
Self: Sized,
{
let (keypair, local_peer_id) =
Self::get_keypair(&params.network_config.network_config.node_key)?;
let (cmd_tx, cmd_rx) = tracing_unbounded("mpsc_network_worker", 100_000);
params.network_config.network_config.boot_nodes = params
.network_config
.network_config
.boot_nodes
.into_iter()
.filter(|boot_node| boot_node.peer_id != local_peer_id.into())
.collect();
params.network_config.network_config.default_peers_set.reserved_nodes = params
.network_config
.network_config
.default_peers_set
.reserved_nodes
.into_iter()
.filter(|reserved_node| {
if reserved_node.peer_id == local_peer_id.into() {
log::warn!(
target: LOG_TARGET,
"Local peer ID used in reserved node, ignoring: {reserved_node}",
);
false
} else {
true
}
})
.collect();
if let Some(path) = &params.network_config.network_config.net_config_path {
fs::create_dir_all(path)?;
}
log::info!(target: LOG_TARGET, "Local node identity is: {local_peer_id}");
log::info!(target: LOG_TARGET, "Running litep2p network backend");
params.network_config.sanity_check_addresses()?;
params.network_config.sanity_check_bootnodes()?;
let mut config_builder =
Self::configure_transport(&params.network_config).with_keypair(keypair.clone());
let known_addresses = params.network_config.known_addresses();
let peer_store_handle = params.network_config.peer_store_handle();
let executor = Arc::new(Litep2pExecutor { executor: params.executor });
let FullNetworkConfiguration {
notification_protocols,
request_response_protocols,
network_config,
..
} = params.network_config;
// initialize notification protocols
//
// pass the protocol configuration to `Litep2pConfigBuilder` and save the TX channel
// to the protocol's `Peerset` together with the protocol name to allow other subsystems
// of Polkadot SDK to control connectivity of the notification protocol
let block_announce_protocol = params.block_announce_config.protocol_name().clone();
let mut notif_protocols = HashMap::from_iter([(
params.block_announce_config.protocol_name().clone(),
params.block_announce_config.handle,
)]);
// handshake for all but the syncing protocol is set to node role
config_builder = notification_protocols
.into_iter()
.fold(config_builder, |config_builder, mut config| {
config.config.set_handshake(Roles::from(&params.role).encode());
notif_protocols.insert(config.protocol_name, config.handle);
config_builder.with_notification_protocol(config.config)
})
.with_notification_protocol(params.block_announce_config.config);
// initialize request-response protocols
let metrics = match &params.metrics_registry {
Some(registry) => Some(register_without_sources(registry)?),
None => None,
};
// create channels that are used to send request before initializing protocols so the
// senders can be passed onto all request-response protocols
//
// all protocols must have each others' senders so they can send the fallback request in
// case the main protocol is not supported by the remote peer and user specified a fallback
let (mut request_response_receivers, request_response_senders): (
HashMap<_, _>,
HashMap<_, _>,
) = request_response_protocols
.iter()
.map(|config| {
let (tx, rx) = tracing_unbounded("outbound-requests", 10_000);
((config.protocol_name.clone(), rx), (config.protocol_name.clone(), tx))
})
.unzip();
config_builder = request_response_protocols.into_iter().fold(
config_builder,
|config_builder, config| {
let (protocol_config, handle) = RequestResponseConfigBuilder::new(
Litep2pProtocolName::from(config.protocol_name.clone()),
)
.with_max_size(cmp::max(config.max_request_size, config.max_response_size) as usize)
.with_fallback_names(config.fallback_names.into_iter().map(From::from).collect())
.with_timeout(config.request_timeout)
.build();
let protocol = RequestResponseProtocol::new(
config.protocol_name.clone(),
handle,
Arc::clone(&peer_store_handle),
config.inbound_queue,
request_response_receivers
.remove(&config.protocol_name)
.expect("receiver exists as it was just added and there are no duplicate protocols; qed"),
request_response_senders.clone(),
metrics.clone(),
);
executor.run(Box::pin(async move {
protocol.run().await;
}));
config_builder.with_request_response_protocol(protocol_config)
},
);
// collect known addresses
let known_addresses: HashMap<litep2p::PeerId, Vec<Multiaddr>> =
known_addresses.into_iter().fold(HashMap::new(), |mut acc, (peer, address)| {
let address = match address.iter().last() {
Some(Protocol::Ws(_) | Protocol::Wss(_) | Protocol::Tcp(_)) =>
address.with(Protocol::P2p(peer.into())),
Some(Protocol::P2p(_)) => address,
_ => return acc,
};
acc.entry(peer.into()).or_default().push(address);
peer_store_handle.add_known_peer(peer);
acc
});
// enable ipfs ping, identify and kademlia, and potentially mdns if user enabled it
let listen_addresses = Arc::new(Default::default());
let (discovery, ping_config, identify_config, kademlia_config, maybe_mdns_config) =
Discovery::new(
&network_config,
params.genesis_hash,
params.fork_id.as_deref(),
&params.protocol_id,
known_addresses.clone(),
Arc::clone(&listen_addresses),
Arc::clone(&peer_store_handle),
);
config_builder = config_builder
.with_known_addresses(known_addresses.clone().into_iter())
.with_libp2p_ping(ping_config)
.with_libp2p_identify(identify_config)
.with_libp2p_kademlia(kademlia_config)
.with_executor(executor);
if let Some(config) = maybe_mdns_config {
config_builder = config_builder.with_mdns(config);
}
if let Some(config) = params.bitswap_config {
config_builder = config_builder.with_libp2p_bitswap(config);
}
let litep2p =
Litep2p::new(config_builder.build()).map_err(|error| Error::Litep2p(error))?;
let external_addresses: Arc<RwLock<HashSet<Multiaddr>>> = Arc::new(RwLock::new(
HashSet::from_iter(network_config.public_addresses.iter().cloned()),
));
litep2p.listen_addresses().for_each(|address| {
log::debug!(target: LOG_TARGET, "listening on: {address}");
listen_addresses.write().insert(address.clone());
});
let network_service = Arc::new(Litep2pNetworkService::new(
local_peer_id,
keypair.clone(),
cmd_tx,
Arc::clone(&peer_store_handle),
notif_protocols.clone(),
block_announce_protocol.clone(),
request_response_senders,
Arc::clone(&listen_addresses),
Arc::clone(&external_addresses),
));
// register rest of the metrics now that `Litep2p` has been created
let num_connected = Arc::new(Default::default());
let bandwidth: Arc<dyn BandwidthSink> =
Arc::new(Litep2pBandwidthSink { sink: litep2p.bandwidth_sink() });
if let Some(registry) = &params.metrics_registry {
MetricSources::register(registry, bandwidth, Arc::clone(&num_connected))?;
}
Ok(Self {
network_service,
cmd_rx,
metrics,
peerset_handles: notif_protocols,
num_connected,
discovery,
pending_put_values: HashMap::new(),
pending_get_values: HashMap::new(),
peerstore_handle: peer_store_handle,
block_announce_protocol,
event_streams: out_events::OutChannels::new(None)?,
peers: HashMap::new(),
litep2p,
external_addresses,
})
}
fn network_service(&self) -> Arc<dyn NetworkService> {
Arc::clone(&self.network_service)
}
fn peer_store(bootnodes: Vec<sc_network_types::PeerId>) -> Self::PeerStore {
Peerstore::new(bootnodes)
}
fn register_notification_metrics(registry: Option<&Registry>) -> NotificationMetrics {
NotificationMetrics::new(registry)
}
/// Create Bitswap server.
fn bitswap_server(
client: Arc<dyn BlockBackend<B> + Send + Sync>,
) -> (Pin<Box<dyn Future<Output = ()> + Send>>, Self::BitswapConfig) {
BitswapServer::new(client)
}
/// Create notification protocol configuration for `protocol`.
fn notification_config(
protocol_name: ProtocolName,
fallback_names: Vec<ProtocolName>,
max_notification_size: u64,
handshake: Option<NotificationHandshake>,
set_config: SetConfig,
metrics: NotificationMetrics,
peerstore_handle: Arc<dyn PeerStoreProvider>,
) -> (Self::NotificationProtocolConfig, Box<dyn NotificationService>) {
Self::NotificationProtocolConfig::new(
protocol_name,
fallback_names,
max_notification_size as usize,
handshake,
set_config,
metrics,
peerstore_handle,
)
}
/// Create request-response protocol configuration.
fn request_response_config(
protocol_name: ProtocolName,
fallback_names: Vec<ProtocolName>,
max_request_size: u64,
max_response_size: u64,
request_timeout: Duration,
inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
) -> Self::RequestResponseProtocolConfig {
Self::RequestResponseProtocolConfig::new(
protocol_name,
fallback_names,
max_request_size,
max_response_size,
request_timeout,
inbound_queue,
)
}
/// Start [`Litep2pNetworkBackend`] event loop.
async fn run(mut self) {
log::debug!(target: LOG_TARGET, "starting litep2p network backend");
loop {
let num_connected_peers = self
.peerset_handles
.get(&self.block_announce_protocol)
.map_or(0usize, |handle| handle.connected_peers.load(Ordering::Relaxed));
self.num_connected.store(num_connected_peers, Ordering::Relaxed);
tokio::select! {
command = self.cmd_rx.next() => match command {
None => return,
Some(command) => match command {
NetworkServiceCommand::GetValue{ key } => {
let query_id = self.discovery.get_value(key.clone()).await;
self.pending_get_values.insert(query_id, (key, Instant::now()));
}
NetworkServiceCommand::PutValue { key, value } => {
let query_id = self.discovery.put_value(key.clone(), value).await;
self.pending_put_values.insert(query_id, (key, Instant::now()));
}
NetworkServiceCommand::EventStream { tx } => {
self.event_streams.push(tx);
}
NetworkServiceCommand::Status { tx } => {
let _ = tx.send(NetworkStatus {
num_connected_peers: self
.peerset_handles
.get(&self.block_announce_protocol)
.map_or(0usize, |handle| handle.connected_peers.load(Ordering::Relaxed)),
total_bytes_inbound: self.litep2p.bandwidth_sink().inbound() as u64,
total_bytes_outbound: self.litep2p.bandwidth_sink().outbound() as u64,
});
}
NetworkServiceCommand::AddPeersToReservedSet {
protocol,
peers,
} => {
let peers = self.add_addresses(peers.into_iter());
match self.peerset_handles.get(&protocol) {
Some(handle) => {
let _ = handle.tx.unbounded_send(PeersetCommand::AddReservedPeers { peers });
}
None => log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist"),
};
}
NetworkServiceCommand::AddKnownAddress { peer, mut address } => {
if !address.iter().any(|protocol| std::matches!(protocol, Protocol::P2p(_))) {
address.push(Protocol::P2p(peer.into()));
}
if self.litep2p.add_known_address(peer.into(), iter::once(address.clone())) == 0usize {
log::warn!(
target: LOG_TARGET,
"couldn't add known address ({address}) for {peer:?}, unsupported transport"
);
}
},
NetworkServiceCommand::SetReservedPeers { protocol, peers } => {
let peers = self.add_addresses(peers.into_iter());
match self.peerset_handles.get(&protocol) {
Some(handle) => {
let _ = handle.tx.unbounded_send(PeersetCommand::SetReservedPeers { peers });
}
None => log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist"),
}
},
NetworkServiceCommand::DisconnectPeer {
protocol,
peer,
} => {
let Some(handle) = self.peerset_handles.get(&protocol) else {
log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist");
continue
};
let _ = handle.tx.unbounded_send(PeersetCommand::DisconnectPeer { peer });
}
NetworkServiceCommand::SetReservedOnly {
protocol,
reserved_only,
} => {
let Some(handle) = self.peerset_handles.get(&protocol) else {
log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist");
continue
};
let _ = handle.tx.unbounded_send(PeersetCommand::SetReservedOnly { reserved_only });
}
NetworkServiceCommand::RemoveReservedPeers {
protocol,
peers,
} => {
let Some(handle) = self.peerset_handles.get(&protocol) else {
log::warn!(target: LOG_TARGET, "protocol {protocol} doens't exist");
continue
};
let _ = handle.tx.unbounded_send(PeersetCommand::RemoveReservedPeers { peers });
}
}
},
event = self.discovery.next() => match event {
None => return,
Some(DiscoveryEvent::Discovered { addresses }) => {
// if at least one address was added for the peer, report the peer to `Peerstore`
for (peer, addresses) in Litep2pNetworkBackend::parse_addresses(addresses.into_iter()) {
if self.litep2p.add_known_address(peer.into(), addresses.clone().into_iter()) > 0 {
self.peerstore_handle.add_known_peer(peer);
}
}
}
Some(DiscoveryEvent::RoutingTableUpdate { peers }) => {
for peer in peers {
self.peerstore_handle.add_known_peer(peer.into());
}
}
Some(DiscoveryEvent::GetRecordSuccess { query_id, record }) => {
match self.pending_get_values.remove(&query_id) {
None => log::warn!(
target: LOG_TARGET,
"`GET_VALUE` succeeded for a non-existent query",
),
Some((_key, started)) => {
log::trace!(
target: LOG_TARGET,
"`GET_VALUE` for {:?} ({query_id:?}) succeeded",
record.key,
);
self.event_streams.send(Event::Dht(
DhtEvent::ValueFound(vec![
(libp2p::kad::RecordKey::new(&record.key), record.value)
])
));
if let Some(ref metrics) = self.metrics {
metrics
.kademlia_query_duration
.with_label_values(&["value-get"])
.observe(started.elapsed().as_secs_f64());
}
}
}
}
Some(DiscoveryEvent::PutRecordSuccess { query_id }) => {
match self.pending_put_values.remove(&query_id) {
None => log::warn!(
target: LOG_TARGET,
"`PUT_VALUE` succeeded for a non-existent query",
),
Some((key, started)) => {
log::trace!(
target: LOG_TARGET,
"`PUT_VALUE` for {key:?} ({query_id:?}) succeeded",
);
if let Some(ref metrics) = self.metrics {
metrics
.kademlia_query_duration
.with_label_values(&["value-put"])
.observe(started.elapsed().as_secs_f64());
}
}
}
}
Some(DiscoveryEvent::QueryFailed { query_id }) => {
match self.pending_get_values.remove(&query_id) {
None => match self.pending_put_values.remove(&query_id) {
None => log::warn!(
target: LOG_TARGET,
"non-existent query failed ({query_id:?})",
),
Some((key, started)) => {
log::debug!(
target: LOG_TARGET,
"`PUT_VALUE` ({query_id:?}) failed for key {key:?}",
);
self.event_streams.send(Event::Dht(
DhtEvent::ValuePutFailed(libp2p::kad::RecordKey::new(&key))
));
if let Some(ref metrics) = self.metrics {
metrics
.kademlia_query_duration
.with_label_values(&["value-put-failed"])
.observe(started.elapsed().as_secs_f64());
}
}
}
Some((key, started)) => {
log::debug!(
target: LOG_TARGET,
"`GET_VALUE` ({query_id:?}) failed for key {key:?}",
);
self.event_streams.send(Event::Dht(
DhtEvent::ValueNotFound(libp2p::kad::RecordKey::new(&key))
));
if let Some(ref metrics) = self.metrics {
metrics
.kademlia_query_duration
.with_label_values(&["value-get-failed"])
.observe(started.elapsed().as_secs_f64());
}
}
}
}
Some(DiscoveryEvent::Identified { peer, listen_addresses, supported_protocols, .. }) => {
self.discovery.add_self_reported_address(peer, supported_protocols, listen_addresses).await;
}
Some(DiscoveryEvent::ExternalAddressDiscovered { address }) => {
let mut addresses = self.external_addresses.write();
if addresses.insert(address.clone()) {
log::info!(target: LOG_TARGET, "discovered new external address for our node: {address}");
}
}
Some(DiscoveryEvent::Ping { peer, rtt }) => {
log::trace!(
target: LOG_TARGET,
"ping time with {peer:?}: {rtt:?}",
);
}
},
event = self.litep2p.next_event() => match event {
Some(Litep2pEvent::ConnectionEstablished { peer, endpoint }) => {
let Some(metrics) = &self.metrics else {
continue;
};
let direction = match endpoint {
Endpoint::Dialer { .. } => "out",
Endpoint::Listener { .. } => "in",
};
metrics.connections_opened_total.with_label_values(&[direction]).inc();
match self.peers.entry(peer) {
Entry::Vacant(entry) => {
entry.insert(ConnectionContext {
endpoints: HashMap::from_iter([(endpoint.connection_id(), endpoint)]),
num_connections: 1usize,
});
metrics.distinct_peers_connections_opened_total.inc();
}
Entry::Occupied(entry) => {
let entry = entry.into_mut();
entry.num_connections += 1;
entry.endpoints.insert(endpoint.connection_id(), endpoint);
}
}
}
Some(Litep2pEvent::ConnectionClosed { peer, connection_id }) => {
let Some(metrics) = &self.metrics else {
continue;
};
let Some(context) = self.peers.get_mut(&peer) else {
log::debug!(target: LOG_TARGET, "unknown peer disconnected: {peer:?} ({connection_id:?})");
continue
};
let direction = match context.endpoints.remove(&connection_id) {
None => {
log::debug!(target: LOG_TARGET, "connection {connection_id:?} doesn't exist for {peer:?} ");
continue
}
Some(endpoint) => {
context.num_connections -= 1;
match endpoint {
Endpoint::Dialer { .. } => "out",
Endpoint::Listener { .. } => "in",
}
}
};
metrics.connections_closed_total.with_label_values(&[direction, "actively-closed"]).inc();
if context.num_connections == 0 {
self.peers.remove(&peer);
metrics.distinct_peers_connections_closed_total.inc();
}
}
Some(Litep2pEvent::DialFailure { address, error }) => {
log::trace!(
target: LOG_TARGET,
"failed to dial peer at {address:?}: {error:?}",
);
let reason = match error {
Litep2pError::PeerIdMismatch(_, _) => "invalid-peer-id",
Litep2pError::Timeout | Litep2pError::TransportError(_) |
Litep2pError::IoError(_) | Litep2pError::WebSocket(_) => "transport-error",
_ => "other",
};
if let Some(metrics) = &self.metrics {
metrics.pending_connections_errors_total.with_label_values(&[reason]).inc();
}
}
_ => {}
},
}
}
}
}
@@ -0,0 +1,391 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! `Peerstore` implementation for `litep2p`.
//!
//! `Peerstore` is responsible for storing information about remote peers
//! such as their addresses, reputations, supported protocols etc.
use crate::{
peer_store::{PeerStoreProvider, ProtocolHandle},
service::traits::PeerStore,
ObservedRole, ReputationChange,
};
use parking_lot::Mutex;
use wasm_timer::Delay;
use sc_network_types::PeerId;
use std::{
collections::{HashMap, HashSet},
sync::Arc,
time::{Duration, Instant},
};
/// Logging target for the file.
const LOG_TARGET: &str = "sub-libp2p::peerstore";
/// We don't accept nodes whose reputation is under this value.
pub const BANNED_THRESHOLD: i32 = 82 * (i32::MIN / 100);
/// Relative decrement of a reputation value that is applied every second. I.e., for inverse
/// decrement of 50 we decrease absolute value of the reputation by 1/50. This corresponds to a
/// factor of `k = 0.98`. It takes ~ `ln(0.5) / ln(k)` seconds to reduce the reputation by half,
/// or 34.3 seconds for the values above. In this setup the maximum allowed absolute value of
/// `i32::MAX` becomes 0 in ~1100 seconds (actually less due to integer arithmetic).
const INVERSE_DECREMENT: i32 = 50;
/// Amount of time between the moment we last updated the [`PeerStore`] entry and the moment we
/// remove it, once the reputation value reaches 0.
const FORGET_AFTER: Duration = Duration::from_secs(3600);
/// Peer information.
#[derive(Debug, Clone, Copy)]
struct PeerInfo {
/// Reputation of the peer.
reputation: i32,
/// Instant when the peer was last updated.
last_updated: Instant,
/// Role of the peer, if known.
role: Option<ObservedRole>,
}
impl Default for PeerInfo {
fn default() -> Self {
Self { reputation: 0i32, last_updated: Instant::now(), role: None }
}
}
impl PeerInfo {
fn is_banned(&self) -> bool {
self.reputation < BANNED_THRESHOLD
}
fn decay_reputation(&mut self, seconds_passed: u64) {
// Note that decaying the reputation value happens "on its own",
// so we don't do `bump_last_updated()`.
for _ in 0..seconds_passed {
let mut diff = self.reputation / INVERSE_DECREMENT;
if diff == 0 && self.reputation < 0 {
diff = -1;
} else if diff == 0 && self.reputation > 0 {
diff = 1;
}
self.reputation = self.reputation.saturating_sub(diff);
if self.reputation == 0 {
break
}
}
}
}
#[derive(Debug, Default)]
pub struct PeerstoreHandleInner {
peers: HashMap<PeerId, PeerInfo>,
protocols: Vec<Arc<dyn ProtocolHandle>>,
}
#[derive(Debug, Clone, Default)]
pub struct PeerstoreHandle(Arc<Mutex<PeerstoreHandleInner>>);
impl PeerstoreHandle {
/// Add known peer to [`Peerstore`].
pub fn add_known_peer(&self, peer: PeerId) {
self.0
.lock()
.peers
.insert(peer, PeerInfo { reputation: 0i32, last_updated: Instant::now(), role: None });
}
pub fn peer_count(&self) -> usize {
self.0.lock().peers.len()
}
fn progress_time(&self, seconds_passed: u64) {
if seconds_passed == 0 {
return
}
let mut lock = self.0.lock();
// Drive reputation values towards 0.
lock.peers
.iter_mut()
.for_each(|(_, info)| info.decay_reputation(seconds_passed));
// Retain only entries with non-zero reputation values or not expired ones.
let now = Instant::now();
lock.peers
.retain(|_, info| info.reputation != 0 || info.last_updated + FORGET_AFTER > now);
}
}
impl PeerStoreProvider for PeerstoreHandle {
fn is_banned(&self, peer: &PeerId) -> bool {
self.0.lock().peers.get(peer).map_or(false, |info| info.is_banned())
}
/// Register a protocol handle to disconnect peers whose reputation drops below the threshold.
fn register_protocol(&self, protocol_handle: Arc<dyn ProtocolHandle>) {
self.0.lock().protocols.push(protocol_handle);
}
/// Report peer disconnection for reputation adjustment.
fn report_disconnect(&self, _peer: PeerId) {
unimplemented!();
}
/// Adjust peer reputation.
fn report_peer(&self, peer: PeerId, reputation_change: ReputationChange) {
let mut lock = self.0.lock();
log::trace!(target: LOG_TARGET, "report peer {reputation_change:?}");
match lock.peers.get_mut(&peer) {
Some(info) => {
info.reputation = info.reputation.saturating_add(reputation_change.value);
},
None => {
lock.peers.insert(
peer,
PeerInfo {
reputation: reputation_change.value,
last_updated: Instant::now(),
role: None,
},
);
},
}
if lock
.peers
.get(&peer)
.expect("peer exist since it was just modified; qed")
.is_banned()
{
log::warn!(target: LOG_TARGET, "{peer:?} banned, disconnecting, reason: {}", reputation_change.reason);
for sender in &lock.protocols {
sender.disconnect_peer(peer);
}
}
}
/// Set peer role.
fn set_peer_role(&self, peer: &PeerId, role: ObservedRole) {
self.0.lock().peers.entry(*peer).or_default().role = Some(role);
}
/// Get peer reputation.
fn peer_reputation(&self, peer: &PeerId) -> i32 {
self.0.lock().peers.get(peer).map_or(0i32, |info| info.reputation)
}
/// Get peer role, if available.
fn peer_role(&self, peer: &PeerId) -> Option<ObservedRole> {
self.0.lock().peers.get(peer).and_then(|info| info.role)
}
/// Get candidates with highest reputations for initiating outgoing connections.
fn outgoing_candidates(&self, count: usize, ignored: HashSet<PeerId>) -> Vec<PeerId> {
let handle = self.0.lock();
let mut candidates = handle
.peers
.iter()
.filter_map(|(peer, info)| {
(!ignored.contains(&peer) && !info.is_banned()).then_some((*peer, info.reputation))
})
.collect::<Vec<(PeerId, _)>>();
candidates.sort_by(|(_, a), (_, b)| b.cmp(a));
candidates
.into_iter()
.take(count)
.map(|(peer, _score)| peer)
.collect::<Vec<_>>()
}
/// Get the number of known peers.
///
/// This number might not include some connected peers in rare cases when their reputation
/// was not updated for one hour, because their entries in [`PeerStore`] were dropped.
fn num_known_peers(&self) -> usize {
self.0.lock().peers.len()
}
/// Add known peer.
fn add_known_peer(&self, peer: PeerId) {
self.0.lock().peers.entry(peer).or_default().last_updated = Instant::now();
}
}
/// `Peerstore` handle for testing.
///
/// This instance of `Peerstore` is not shared between protocols.
#[cfg(test)]
pub fn peerstore_handle_test() -> PeerstoreHandle {
PeerstoreHandle(Arc::new(Mutex::new(Default::default())))
}
/// Peerstore implementation.
pub struct Peerstore {
/// Handle to `Peerstore`.
peerstore_handle: PeerstoreHandle,
}
impl Peerstore {
/// Create new [`Peerstore`].
pub fn new(bootnodes: Vec<PeerId>) -> Self {
let peerstore_handle = PeerstoreHandle(Arc::new(Mutex::new(Default::default())));
for bootnode in bootnodes {
peerstore_handle.add_known_peer(bootnode);
}
Self { peerstore_handle }
}
/// Create new [`Peerstore`] from a [`PeerstoreHandle`].
pub fn from_handle(peerstore_handle: PeerstoreHandle, bootnodes: Vec<PeerId>) -> Self {
for bootnode in bootnodes {
peerstore_handle.add_known_peer(bootnode);
}
Self { peerstore_handle }
}
/// Get mutable reference to the underlying [`PeerstoreHandle`].
pub fn handle(&mut self) -> &mut PeerstoreHandle {
&mut self.peerstore_handle
}
/// Add known peer to [`Peerstore`].
pub fn add_known_peer(&mut self, peer: PeerId) {
self.peerstore_handle.add_known_peer(peer);
}
/// Start [`Peerstore`] event loop.
async fn run(self) {
let started = Instant::now();
let mut latest_time_update = started;
loop {
let now = Instant::now();
// We basically do `(now - self.latest_update).as_secs()`, except that by the way we do
// it we know that we're not going to miss seconds because of rounding to integers.
let seconds_passed = {
let elapsed_latest = latest_time_update - started;
let elapsed_now = now - started;
latest_time_update = now;
elapsed_now.as_secs() - elapsed_latest.as_secs()
};
self.peerstore_handle.progress_time(seconds_passed);
let _ = Delay::new(Duration::from_secs(1)).await;
}
}
}
#[async_trait::async_trait]
impl PeerStore for Peerstore {
/// Get handle to `PeerStore`.
fn handle(&self) -> Arc<dyn PeerStoreProvider> {
Arc::new(self.peerstore_handle.clone())
}
/// Start running `PeerStore` event loop.
async fn run(self) {
self.run().await;
}
}
#[cfg(test)]
mod tests {
use super::PeerInfo;
#[test]
fn decaying_zero_reputation_yields_zero() {
let mut peer_info = PeerInfo::default();
assert_eq!(peer_info.reputation, 0);
peer_info.decay_reputation(1);
assert_eq!(peer_info.reputation, 0);
peer_info.decay_reputation(100_000);
assert_eq!(peer_info.reputation, 0);
}
#[test]
fn decaying_positive_reputation_decreases_it() {
const INITIAL_REPUTATION: i32 = 100;
let mut peer_info = PeerInfo::default();
peer_info.reputation = INITIAL_REPUTATION;
peer_info.decay_reputation(1);
assert!(peer_info.reputation >= 0);
assert!(peer_info.reputation < INITIAL_REPUTATION);
}
#[test]
fn decaying_negative_reputation_increases_it() {
const INITIAL_REPUTATION: i32 = -100;
let mut peer_info = PeerInfo::default();
peer_info.reputation = INITIAL_REPUTATION;
peer_info.decay_reputation(1);
assert!(peer_info.reputation <= 0);
assert!(peer_info.reputation > INITIAL_REPUTATION);
}
#[test]
fn decaying_max_reputation_finally_yields_zero() {
const INITIAL_REPUTATION: i32 = i32::MAX;
const SECONDS: u64 = 1000;
let mut peer_info = PeerInfo::default();
peer_info.reputation = INITIAL_REPUTATION;
peer_info.decay_reputation(SECONDS / 2);
assert!(peer_info.reputation > 0);
peer_info.decay_reputation(SECONDS / 2);
assert_eq!(peer_info.reputation, 0);
}
#[test]
fn decaying_min_reputation_finally_yields_zero() {
const INITIAL_REPUTATION: i32 = i32::MIN;
const SECONDS: u64 = 1000;
let mut peer_info = PeerInfo::default();
peer_info.reputation = INITIAL_REPUTATION;
peer_info.decay_reputation(SECONDS / 2);
assert!(peer_info.reputation < 0);
peer_info.decay_reputation(SECONDS / 2);
assert_eq!(peer_info.reputation, 0);
}
}
@@ -0,0 +1,469 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! `NetworkService` implementation for `litep2p`.
use crate::{
config::MultiaddrWithPeerId,
litep2p::shim::{
notification::{config::ProtocolControlHandle, peerset::PeersetCommand},
request_response::OutboundRequest,
},
multiaddr::Protocol,
network_state::NetworkState,
peer_store::PeerStoreProvider,
service::out_events,
Event, IfDisconnected, NetworkDHTProvider, NetworkEventStream, NetworkPeers, NetworkRequest,
NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider, ProtocolName,
RequestFailure, Signature,
};
use codec::DecodeAll;
use futures::{channel::oneshot, stream::BoxStream};
use libp2p::{identity::SigningError, kad::record::Key as KademliaKey, Multiaddr};
use litep2p::crypto::ed25519::Keypair;
use parking_lot::RwLock;
use sc_network_common::{
role::{ObservedRole, Roles},
types::ReputationChange,
};
use sc_network_types::PeerId;
use sc_utils::mpsc::TracingUnboundedSender;
use std::{
collections::{HashMap, HashSet},
sync::{atomic::Ordering, Arc},
};
/// Logging target for the file.
const LOG_TARGET: &str = "sub-libp2p";
/// Commands sent by [`Litep2pNetworkService`] to
/// [`Litep2pNetworkBackend`](super::Litep2pNetworkBackend).
#[derive(Debug)]
pub enum NetworkServiceCommand {
/// Get value from DHT.
GetValue {
/// Record key.
key: KademliaKey,
},
/// Put value to DHT.
PutValue {
/// Record key.
key: KademliaKey,
/// Record value.
value: Vec<u8>,
},
/// Query network status.
Status {
/// `oneshot::Sender` for sending the status.
tx: oneshot::Sender<NetworkStatus>,
},
/// Add `peers` to `protocol`'s reserved set.
AddPeersToReservedSet {
/// Protocol.
protocol: ProtocolName,
/// Reserved peers.
peers: HashSet<Multiaddr>,
},
/// Add known address for peer.
AddKnownAddress {
/// Peer ID.
peer: PeerId,
/// Address.
address: Multiaddr,
},
/// Set reserved peers for `protocol`.
SetReservedPeers {
/// Protocol.
protocol: ProtocolName,
/// Reserved peers.
peers: HashSet<Multiaddr>,
},
/// Disconnect peer from protocol.
DisconnectPeer {
/// Protocol.
protocol: ProtocolName,
/// Peer ID.
peer: PeerId,
},
/// Set protocol to reserved only (true/false) mode.
SetReservedOnly {
/// Protocol.
protocol: ProtocolName,
/// Reserved only?
reserved_only: bool,
},
/// Remove reserved peers from protocol.
RemoveReservedPeers {
/// Protocol.
protocol: ProtocolName,
/// Peers to remove from the reserved set.
peers: HashSet<PeerId>,
},
/// Create event stream for DHT events.
EventStream {
/// Sender for the events.
tx: out_events::Sender,
},
}
/// `NetworkService` implementation for `litep2p`.
#[derive(Debug, Clone)]
pub struct Litep2pNetworkService {
/// Local peer ID.
local_peer_id: litep2p::PeerId,
/// The `KeyPair` that defines the `PeerId` of the local node.
keypair: Keypair,
/// TX channel for sending commands to [`Litep2pNetworkBackend`](super::Litep2pNetworkBackend).
cmd_tx: TracingUnboundedSender<NetworkServiceCommand>,
/// Handle to `PeerStore`.
peer_store_handle: Arc<dyn PeerStoreProvider>,
/// Peerset handles.
peerset_handles: HashMap<ProtocolName, ProtocolControlHandle>,
/// Name for the block announce protocol.
block_announce_protocol: ProtocolName,
/// Installed request-response protocols.
request_response_protocols: HashMap<ProtocolName, TracingUnboundedSender<OutboundRequest>>,
/// Listen addresses.
listen_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
/// External addresses.
external_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
}
impl Litep2pNetworkService {
/// Create new [`Litep2pNetworkService`].
pub fn new(
local_peer_id: litep2p::PeerId,
keypair: Keypair,
cmd_tx: TracingUnboundedSender<NetworkServiceCommand>,
peer_store_handle: Arc<dyn PeerStoreProvider>,
peerset_handles: HashMap<ProtocolName, ProtocolControlHandle>,
block_announce_protocol: ProtocolName,
request_response_protocols: HashMap<ProtocolName, TracingUnboundedSender<OutboundRequest>>,
listen_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
external_addresses: Arc<RwLock<HashSet<Multiaddr>>>,
) -> Self {
Self {
local_peer_id,
keypair,
cmd_tx,
peer_store_handle,
peerset_handles,
block_announce_protocol,
request_response_protocols,
listen_addresses,
external_addresses,
}
}
}
impl NetworkSigner for Litep2pNetworkService {
fn sign_with_local_identity(&self, msg: Vec<u8>) -> Result<Signature, SigningError> {
let public_key = self.keypair.public();
let bytes = self.keypair.sign(msg.as_ref());
Ok(Signature {
public_key: crate::service::signature::PublicKey::Litep2p(
litep2p::crypto::PublicKey::Ed25519(public_key),
),
bytes,
})
}
fn verify(
&self,
peer: PeerId,
public_key: &Vec<u8>,
signature: &Vec<u8>,
message: &Vec<u8>,
) -> Result<bool, String> {
let public_key = litep2p::crypto::PublicKey::from_protobuf_encoding(&public_key)
.map_err(|error| error.to_string())?;
let peer: litep2p::PeerId = peer.into();
Ok(peer == public_key.to_peer_id() && public_key.verify(message, signature))
}
}
impl NetworkDHTProvider for Litep2pNetworkService {
fn get_value(&self, key: &KademliaKey) {
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::GetValue { key: key.clone() });
}
fn put_value(&self, key: KademliaKey, value: Vec<u8>) {
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::PutValue { key, value });
}
}
#[async_trait::async_trait]
impl NetworkStatusProvider for Litep2pNetworkService {
async fn status(&self) -> Result<NetworkStatus, ()> {
let (tx, rx) = oneshot::channel();
self.cmd_tx
.unbounded_send(NetworkServiceCommand::Status { tx })
.map_err(|_| ())?;
rx.await.map_err(|_| ())
}
async fn network_state(&self) -> Result<NetworkState, ()> {
Ok(NetworkState {
peer_id: self.local_peer_id.to_base58(),
listened_addresses: self.listen_addresses.read().iter().cloned().collect(),
external_addresses: self.external_addresses.read().iter().cloned().collect(),
connected_peers: HashMap::new(),
not_connected_peers: HashMap::new(),
// TODO: Check what info we can include here.
// Issue reference: https://github.com/paritytech/substrate/issues/14160.
peerset: serde_json::json!(
"Unimplemented. See https://github.com/paritytech/substrate/issues/14160."
),
})
}
}
// Manual implementation to avoid extra boxing here
// TODO: functions modifying peerset state could be modified to call peerset directly if the
// `Multiaddr` only contains a `PeerId`
#[async_trait::async_trait]
impl NetworkPeers for Litep2pNetworkService {
fn set_authorized_peers(&self, peers: HashSet<PeerId>) {
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::SetReservedPeers {
protocol: self.block_announce_protocol.clone(),
peers: peers
.into_iter()
.map(|peer| Multiaddr::empty().with(Protocol::P2p(peer.into())))
.collect(),
});
}
fn set_authorized_only(&self, reserved_only: bool) {
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::SetReservedOnly {
protocol: self.block_announce_protocol.clone(),
reserved_only,
});
}
fn add_known_address(&self, peer: PeerId, address: Multiaddr) {
let _ = self
.cmd_tx
.unbounded_send(NetworkServiceCommand::AddKnownAddress { peer, address });
}
fn peer_reputation(&self, peer_id: &PeerId) -> i32 {
self.peer_store_handle.peer_reputation(peer_id)
}
fn report_peer(&self, peer: PeerId, cost_benefit: ReputationChange) {
self.peer_store_handle.report_peer(peer, cost_benefit);
}
fn disconnect_peer(&self, peer: PeerId, protocol: ProtocolName) {
let _ = self
.cmd_tx
.unbounded_send(NetworkServiceCommand::DisconnectPeer { protocol, peer });
}
fn accept_unreserved_peers(&self) {
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::SetReservedOnly {
protocol: self.block_announce_protocol.clone(),
reserved_only: false,
});
}
fn deny_unreserved_peers(&self) {
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::SetReservedOnly {
protocol: self.block_announce_protocol.clone(),
reserved_only: true,
});
}
fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> {
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::AddPeersToReservedSet {
protocol: self.block_announce_protocol.clone(),
peers: HashSet::from_iter([peer.concat()]),
});
Ok(())
}
fn remove_reserved_peer(&self, peer: PeerId) {
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::RemoveReservedPeers {
protocol: self.block_announce_protocol.clone(),
peers: HashSet::from_iter([peer]),
});
}
fn set_reserved_peers(
&self,
protocol: ProtocolName,
peers: HashSet<Multiaddr>,
) -> Result<(), String> {
let _ = self
.cmd_tx
.unbounded_send(NetworkServiceCommand::SetReservedPeers { protocol, peers });
Ok(())
}
fn add_peers_to_reserved_set(
&self,
protocol: ProtocolName,
peers: HashSet<Multiaddr>,
) -> Result<(), String> {
let _ = self
.cmd_tx
.unbounded_send(NetworkServiceCommand::AddPeersToReservedSet { protocol, peers });
Ok(())
}
fn remove_peers_from_reserved_set(
&self,
protocol: ProtocolName,
peers: Vec<PeerId>,
) -> Result<(), String> {
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::RemoveReservedPeers {
protocol,
peers: peers.into_iter().map(From::from).collect(),
});
Ok(())
}
fn sync_num_connected(&self) -> usize {
self.peerset_handles
.get(&self.block_announce_protocol)
.map_or(0usize, |handle| handle.connected_peers.load(Ordering::Relaxed))
}
fn peer_role(&self, peer: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
match Roles::decode_all(&mut &handshake[..]) {
Ok(role) => Some(role.into()),
Err(_) => {
log::debug!(target: LOG_TARGET, "handshake doesn't contain peer role: {handshake:?}");
self.peer_store_handle.peer_role(&(peer.into()))
},
}
}
/// Get the list of reserved peers.
///
/// Returns an error if the `NetworkWorker` is no longer running.
async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
let Some(handle) = self.peerset_handles.get(&self.block_announce_protocol) else {
return Err(())
};
let (tx, rx) = oneshot::channel();
handle
.tx
.unbounded_send(PeersetCommand::GetReservedPeers { tx })
.map_err(|_| ())?;
// the channel can only be closed if `Peerset` no longer exists
rx.await.map_err(|_| ())
}
}
impl NetworkEventStream for Litep2pNetworkService {
fn event_stream(&self, stream_name: &'static str) -> BoxStream<'static, Event> {
let (tx, rx) = out_events::channel(stream_name, 100_000);
let _ = self.cmd_tx.unbounded_send(NetworkServiceCommand::EventStream { tx });
Box::pin(rx)
}
}
impl NetworkStateInfo for Litep2pNetworkService {
fn external_addresses(&self) -> Vec<Multiaddr> {
self.external_addresses.read().iter().cloned().collect()
}
fn listen_addresses(&self) -> Vec<Multiaddr> {
self.listen_addresses.read().iter().cloned().collect()
}
fn local_peer_id(&self) -> PeerId {
self.local_peer_id.into()
}
}
// Manual implementation to avoid extra boxing here
#[async_trait::async_trait]
impl NetworkRequest for Litep2pNetworkService {
async fn request(
&self,
_target: PeerId,
_protocol: ProtocolName,
_request: Vec<u8>,
_fallback_request: Option<(Vec<u8>, ProtocolName)>,
_connect: IfDisconnected,
) -> Result<(Vec<u8>, ProtocolName), RequestFailure> {
unimplemented!();
}
fn start_request(
&self,
peer: PeerId,
protocol: ProtocolName,
request: Vec<u8>,
fallback_request: Option<(Vec<u8>, ProtocolName)>,
sender: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
connect: IfDisconnected,
) {
match self.request_response_protocols.get(&protocol) {
Some(tx) => {
let _ = tx.unbounded_send(OutboundRequest::new(
peer,
request,
sender,
fallback_request,
connect,
));
},
None => log::warn!(
target: LOG_TARGET,
"{protocol} doesn't exist, cannot send request to {peer:?}"
),
}
}
}
@@ -0,0 +1,104 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Shim for litep2p's Bitswap implementation to make it work with `sc-network`.
use futures::StreamExt;
use litep2p::protocol::libp2p::bitswap::{
BitswapEvent, BitswapHandle, BlockPresenceType, Config, ResponseType, WantType,
};
use sc_client_api::BlockBackend;
use sp_runtime::traits::Block as BlockT;
use std::{future::Future, pin::Pin, sync::Arc};
/// Logging target for the file.
const LOG_TARGET: &str = "sub-libp2p::bitswap";
pub struct BitswapServer<Block: BlockT> {
/// Bitswap handle.
handle: BitswapHandle,
/// Blockchain client.
client: Arc<dyn BlockBackend<Block> + Send + Sync>,
}
impl<Block: BlockT> BitswapServer<Block> {
/// Create new [`BitswapServer`].
pub fn new(
client: Arc<dyn BlockBackend<Block> + Send + Sync>,
) -> (Pin<Box<dyn Future<Output = ()> + Send>>, Config) {
let (config, handle) = Config::new();
let bitswap = Self { client, handle };
(Box::pin(async move { bitswap.run().await }), config)
}
async fn run(mut self) {
log::debug!(target: LOG_TARGET, "starting bitswap server");
while let Some(event) = self.handle.next().await {
match event {
BitswapEvent::Request { peer, cids } => {
log::debug!(target: LOG_TARGET, "handle bitswap request from {peer:?} for {cids:?}");
let response: Vec<ResponseType> = cids
.into_iter()
.map(|(cid, want_type)| {
let mut hash = Block::Hash::default();
hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]);
let transaction = match self.client.indexed_transaction(hash) {
Ok(ex) => ex,
Err(error) => {
log::error!(target: LOG_TARGET, "error retrieving transaction {hash}: {error}");
None
},
};
match transaction {
Some(transaction) => {
log::trace!(target: LOG_TARGET, "found cid {cid:?}, hash {hash:?}");
match want_type {
WantType::Block =>
ResponseType::Block { cid, block: transaction },
_ => ResponseType::Presence {
cid,
presence: BlockPresenceType::Have,
},
}
},
None => {
log::trace!(target: LOG_TARGET, "missing cid {cid:?}, hash {hash:?}");
ResponseType::Presence {
cid,
presence: BlockPresenceType::DontHave,
}
},
}
})
.collect();
self.handle.send_response(peer, response).await;
},
}
}
}
}
@@ -0,0 +1,23 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Shims for fitting `litep2p` APIs to `sc-network` APIs.
pub(crate) mod bitswap;
pub(crate) mod notification;
pub(crate) mod request_response;
@@ -0,0 +1,168 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! `litep2p` notification protocol configuration.
use crate::{
config::{MultiaddrWithPeerId, NonReservedPeerMode, NotificationHandshake, SetConfig},
litep2p::shim::notification::{
peerset::{Peerset, PeersetCommand},
NotificationProtocol,
},
peer_store::PeerStoreProvider,
service::{metrics::NotificationMetrics, traits::NotificationConfig},
NotificationService, ProtocolName,
};
use litep2p::protocol::notification::{Config, ConfigBuilder};
use sc_utils::mpsc::TracingUnboundedSender;
use std::sync::{atomic::AtomicUsize, Arc};
/// Handle for controlling the notification protocol.
#[derive(Debug, Clone)]
pub struct ProtocolControlHandle {
/// TX channel for sending commands to `Peerset` of the notification protocol.
pub tx: TracingUnboundedSender<PeersetCommand>,
/// Peers currently connected to this protocol.
pub connected_peers: Arc<AtomicUsize>,
}
impl ProtocolControlHandle {
/// Create new [`ProtocolControlHandle`].
pub fn new(
tx: TracingUnboundedSender<PeersetCommand>,
connected_peers: Arc<AtomicUsize>,
) -> Self {
Self { tx, connected_peers }
}
}
/// Configuration for the notification protocol.
#[derive(Debug)]
pub struct NotificationProtocolConfig {
/// Name of the notifications protocols of this set. A substream on this set will be
/// considered established once this protocol is open.
pub protocol_name: ProtocolName,
/// Maximum allowed size of single notifications.
max_notification_size: usize,
/// Base configuration.
set_config: SetConfig,
/// `litep2p` notification config.
pub config: Config,
/// Handle for controlling the notification protocol.
pub handle: ProtocolControlHandle,
}
impl NotificationProtocolConfig {
// Create new [`NotificationProtocolConfig`].
pub fn new(
protocol_name: ProtocolName,
fallback_names: Vec<ProtocolName>,
max_notification_size: usize,
handshake: Option<NotificationHandshake>,
set_config: SetConfig,
metrics: NotificationMetrics,
peerstore_handle: Arc<dyn PeerStoreProvider>,
) -> (Self, Box<dyn NotificationService>) {
// create `Peerset`/`Peerstore` handle for the protocol
let connected_peers = Arc::new(Default::default());
let (peerset, peerset_tx) = Peerset::new(
protocol_name.clone(),
set_config.out_peers as usize,
set_config.in_peers as usize,
set_config.non_reserved_mode == NonReservedPeerMode::Deny,
set_config.reserved_nodes.iter().map(|address| address.peer_id).collect(),
Arc::clone(&connected_peers),
peerstore_handle,
);
// create `litep2p` notification protocol configuration for the protocol
//
// NOTE: currently only dummy value is given as the handshake as protocols (apart from
// syncing) are not configuring their own handshake and instead default to role being the
// handshake. As the time of writing this, most protocols are not aware of the role and
// that should be refactored in the future.
let (config, handle) = ConfigBuilder::new(protocol_name.clone().into())
.with_handshake(handshake.map_or(vec![1], |handshake| (*handshake).to_vec()))
.with_max_size(max_notification_size as usize)
.with_auto_accept_inbound(true)
.with_fallback_names(fallback_names.into_iter().map(From::from).collect())
.build();
// initialize the actual object implementing `NotificationService` and combine the
// `litep2p::NotificationHandle` with `Peerset` to implement a full and independent
// notification protocol runner
let protocol = NotificationProtocol::new(protocol_name.clone(), handle, peerset, metrics);
(
Self {
protocol_name,
max_notification_size,
set_config,
config,
handle: ProtocolControlHandle::new(peerset_tx, connected_peers),
},
Box::new(protocol),
)
}
/// Get reference to protocol name.
pub fn protocol_name(&self) -> &ProtocolName {
&self.protocol_name
}
/// Get reference to `SetConfig`.
pub fn set_config(&self) -> &SetConfig {
&self.set_config
}
/// Modifies the configuration to allow non-reserved nodes.
pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) {
self.set_config.in_peers = in_peers;
self.set_config.out_peers = out_peers;
self.set_config.non_reserved_mode = NonReservedPeerMode::Accept;
}
/// Add a node to the list of reserved nodes.
pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) {
self.set_config.reserved_nodes.push(peer);
}
/// Get maximum notification size.
pub fn max_notification_size(&self) -> usize {
self.max_notification_size
}
}
impl NotificationConfig for NotificationProtocolConfig {
fn set_config(&self) -> &SetConfig {
&self.set_config
}
/// Get reference to protocol name.
fn protocol_name(&self) -> &ProtocolName {
&self.protocol_name
}
}
@@ -0,0 +1,374 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Shim for `litep2p::NotificationHandle` to combine `Peerset`-like behavior
//! with `NotificationService`.
use crate::{
error::Error,
litep2p::shim::notification::peerset::{OpenResult, Peerset, PeersetNotificationCommand},
service::{
metrics::NotificationMetrics,
traits::{NotificationEvent as SubstrateNotificationEvent, ValidationResult},
},
MessageSink, NotificationService, ProtocolName,
};
use futures::{future::BoxFuture, stream::FuturesUnordered, StreamExt};
use litep2p::protocol::notification::{
NotificationEvent, NotificationHandle, NotificationSink,
ValidationResult as Litep2pValidationResult,
};
use tokio::sync::oneshot;
use sc_network_types::PeerId;
use std::{collections::HashSet, fmt};
pub mod config;
pub mod peerset;
#[cfg(test)]
mod tests;
/// Logging target for the file.
const LOG_TARGET: &str = "sub-libp2p::notification";
/// Wrapper over `litep2p`'s notification sink.
pub struct Litep2pMessageSink {
/// Protocol.
protocol: ProtocolName,
/// Remote peer ID.
peer: PeerId,
/// Notification sink.
sink: NotificationSink,
/// Notification metrics.
metrics: NotificationMetrics,
}
impl Litep2pMessageSink {
/// Create new [`Litep2pMessageSink`].
fn new(
peer: PeerId,
protocol: ProtocolName,
sink: NotificationSink,
metrics: NotificationMetrics,
) -> Self {
Self { protocol, peer, sink, metrics }
}
}
#[async_trait::async_trait]
impl MessageSink for Litep2pMessageSink {
/// Send synchronous `notification` to the peer associated with this [`MessageSink`].
fn send_sync_notification(&self, notification: Vec<u8>) {
let size = notification.len();
match self.sink.send_sync_notification(notification) {
Ok(_) => self.metrics.register_notification_sent(&self.protocol, size),
Err(error) => log::trace!(
target: LOG_TARGET,
"{}: failed to send sync notification to {:?}: {error:?}",
self.protocol,
self.peer,
),
}
}
/// Send an asynchronous `notification` to to the peer associated with this [`MessageSink`],
/// allowing sender to exercise backpressure.
///
/// Returns an error if the peer does not exist.
async fn send_async_notification(&self, notification: Vec<u8>) -> Result<(), Error> {
let size = notification.len();
match self.sink.send_async_notification(notification).await {
Ok(_) => {
self.metrics.register_notification_sent(&self.protocol, size);
Ok(())
},
Err(error) => {
log::trace!(
target: LOG_TARGET,
"{}: failed to send async notification to {:?}: {error:?}",
self.protocol,
self.peer,
);
Err(Error::Litep2p(error))
},
}
}
}
/// Notification protocol implementation.
pub struct NotificationProtocol {
/// Protocol name.
protocol: ProtocolName,
/// `litep2p` notification handle.
handle: NotificationHandle,
/// Peerset for the notification protocol.
///
/// Listens to peering-related events and either opens or closes substreams to remote peers.
peerset: Peerset,
/// Pending validations for inbound substreams.
pending_validations: FuturesUnordered<
BoxFuture<'static, (PeerId, Result<ValidationResult, oneshot::error::RecvError>)>,
>,
/// Pending cancels.
pending_cancels: HashSet<litep2p::PeerId>,
/// Notification metrics.
metrics: NotificationMetrics,
}
impl fmt::Debug for NotificationProtocol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("NotificationProtocol")
.field("protocol", &self.protocol)
.field("handle", &self.handle)
.finish()
}
}
impl NotificationProtocol {
/// Create new [`NotificationProtocol`].
pub fn new(
protocol: ProtocolName,
handle: NotificationHandle,
peerset: Peerset,
metrics: NotificationMetrics,
) -> Self {
Self {
protocol,
handle,
peerset,
metrics,
pending_cancels: HashSet::new(),
pending_validations: FuturesUnordered::new(),
}
}
/// Handle `Peerset` command.
async fn on_peerset_command(&mut self, command: PeersetNotificationCommand) {
match command {
PeersetNotificationCommand::OpenSubstream { peers } => {
log::debug!(target: LOG_TARGET, "{}: open substreams to {peers:?}", self.protocol);
let _ = self.handle.open_substream_batch(peers.into_iter().map(From::from)).await;
},
PeersetNotificationCommand::CloseSubstream { peers } => {
log::debug!(target: LOG_TARGET, "{}: close substreams to {peers:?}", self.protocol);
self.handle.close_substream_batch(peers.into_iter().map(From::from)).await;
},
}
}
}
#[async_trait::async_trait]
impl NotificationService for NotificationProtocol {
async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
unimplemented!();
}
async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
unimplemented!();
}
fn send_sync_notification(&mut self, peer: &PeerId, notification: Vec<u8>) {
let size = notification.len();
if let Ok(_) = self.handle.send_sync_notification(peer.into(), notification) {
self.metrics.register_notification_sent(&self.protocol, size);
}
}
async fn send_async_notification(
&mut self,
peer: &PeerId,
notification: Vec<u8>,
) -> Result<(), Error> {
let size = notification.len();
match self.handle.send_async_notification(peer.into(), notification).await {
Ok(_) => {
self.metrics.register_notification_sent(&self.protocol, size);
Ok(())
},
Err(_) => Err(Error::ChannelClosed),
}
}
/// Set handshake for the notification protocol replacing the old handshake.
async fn set_handshake(&mut self, handshake: Vec<u8>) -> Result<(), ()> {
self.handle.set_handshake(handshake);
Ok(())
}
/// Set handshake for the notification protocol replacing the old handshake.
///
/// For `litep2p` this is identical to `NotificationService::set_handshake()` since `litep2p`
/// allows updating the handshake synchronously.
fn try_set_handshake(&mut self, handshake: Vec<u8>) -> Result<(), ()> {
self.handle.set_handshake(handshake);
Ok(())
}
/// Make a copy of the object so it can be shared between protocol components
/// who wish to have access to the same underlying notification protocol.
fn clone(&mut self) -> Result<Box<dyn NotificationService>, ()> {
unimplemented!("clonable `NotificationService` not supported by `litep2p`");
}
/// Get protocol name of the `NotificationService`.
fn protocol(&self) -> &ProtocolName {
&self.protocol
}
/// Get message sink of the peer.
fn message_sink(&self, peer: &PeerId) -> Option<Box<dyn MessageSink>> {
self.handle.notification_sink(peer.into()).map(|sink| {
let sink: Box<dyn MessageSink> = Box::new(Litep2pMessageSink::new(
*peer,
self.protocol.clone(),
sink,
self.metrics.clone(),
));
sink
})
}
/// Get next event from the `Notifications` event stream.
async fn next_event(&mut self) -> Option<SubstrateNotificationEvent> {
loop {
tokio::select! {
biased;
event = self.handle.next() => match event? {
NotificationEvent::ValidateSubstream { peer, handshake, .. } => {
if let ValidationResult::Reject = self.peerset.report_inbound_substream(peer.into()) {
self.handle.send_validation_result(peer, Litep2pValidationResult::Reject);
continue;
}
let (tx, rx) = oneshot::channel();
self.pending_validations.push(Box::pin(async move { (peer.into(), rx.await) }));
log::trace!(target: LOG_TARGET, "{}: validate substream for {peer:?}", self.protocol);
return Some(SubstrateNotificationEvent::ValidateInboundSubstream {
peer: peer.into(),
handshake,
result_tx: tx,
});
}
NotificationEvent::NotificationStreamOpened {
peer,
fallback,
handshake,
direction,
..
} => {
self.metrics.register_substream_opened(&self.protocol);
match self.peerset.report_substream_opened(peer.into(), direction.into()) {
OpenResult::Reject => {
let _ = self.handle.close_substream_batch(vec![peer].into_iter().map(From::from)).await;
self.pending_cancels.insert(peer);
continue
}
OpenResult::Accept { direction } => {
log::trace!(target: LOG_TARGET, "{}: substream opened for {peer:?}", self.protocol);
return Some(SubstrateNotificationEvent::NotificationStreamOpened {
peer: peer.into(),
handshake,
direction,
negotiated_fallback: fallback.map(From::from),
});
}
}
}
NotificationEvent::NotificationStreamClosed {
peer,
} => {
log::trace!(target: LOG_TARGET, "{}: substream closed for {peer:?}", self.protocol);
self.metrics.register_substream_closed(&self.protocol);
self.peerset.report_substream_closed(peer.into());
if self.pending_cancels.remove(&peer) {
log::debug!(
target: LOG_TARGET,
"{}: substream closed to canceled peer ({peer:?})",
self.protocol
);
continue
}
return Some(SubstrateNotificationEvent::NotificationStreamClosed { peer: peer.into() })
}
NotificationEvent::NotificationStreamOpenFailure {
peer,
error,
} => {
log::trace!(target: LOG_TARGET, "{}: open failure for {peer:?}", self.protocol);
self.peerset.report_substream_open_failure(peer.into(), error);
}
NotificationEvent::NotificationReceived {
peer,
notification,
} => {
self.metrics.register_notification_received(&self.protocol, notification.len());
if !self.pending_cancels.contains(&peer) {
return Some(SubstrateNotificationEvent::NotificationReceived {
peer: peer.into(),
notification: notification.to_vec(),
});
}
}
},
result = self.pending_validations.next(), if !self.pending_validations.is_empty() => {
let (peer, result) = result?;
let validation_result = match result {
Ok(ValidationResult::Accept) => Litep2pValidationResult::Accept,
_ => {
self.peerset.report_substream_rejected(peer);
Litep2pValidationResult::Reject
}
};
self.handle.send_validation_result(peer.into(), validation_result);
}
command = self.peerset.next() => self.on_peerset_command(command?).await,
}
}
}
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,384 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Fuzz test emulates network events and peer connection handling by `Peerset`
//! and `PeerStore` to discover possible inconsistencies in peer management.
use crate::{
litep2p::{
peerstore::Peerstore,
shim::notification::peerset::{
OpenResult, Peerset, PeersetCommand, PeersetNotificationCommand,
},
},
service::traits::{Direction, PeerStore, ValidationResult},
ProtocolName,
};
use futures::{FutureExt, StreamExt};
use litep2p::protocol::notification::NotificationError;
use rand::{
distributions::{Distribution, Uniform, WeightedIndex},
seq::IteratorRandom,
};
use sc_network_common::types::ReputationChange;
use sc_network_types::PeerId;
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
#[tokio::test]
#[cfg(debug_assertions)]
async fn run() {
sp_tracing::try_init_simple();
for _ in 0..50 {
test_once().await;
}
}
#[cfg(debug_assertions)]
async fn test_once() {
// PRNG to use.
let mut rng = rand::thread_rng();
// peers that the peerset knows about.
let mut known_peers = HashSet::<PeerId>::new();
// peers that we have reserved. Always a subset of `known_peers`.
let mut reserved_peers = HashSet::<PeerId>::new();
// reserved only mode
let mut reserved_only = Uniform::new_inclusive(0, 10).sample(&mut rng) == 0;
// Bootnodes for `PeerStore` initialization.
let bootnodes = (0..Uniform::new_inclusive(0, 4).sample(&mut rng))
.map(|_| {
let id = PeerId::random();
known_peers.insert(id);
id
})
.collect();
let peerstore = Peerstore::new(bootnodes);
let peer_store_handle = peerstore.handle();
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
Uniform::new_inclusive(0, 25).sample(&mut rng),
Uniform::new_inclusive(0, 25).sample(&mut rng),
reserved_only,
(0..Uniform::new_inclusive(0, 2).sample(&mut rng))
.map(|_| {
let id = PeerId::random();
known_peers.insert(id);
reserved_peers.insert(id);
id
})
.collect(),
Default::default(),
Arc::clone(&peer_store_handle),
);
tokio::spawn(peerstore.run());
// opening substreams
let mut opening = HashMap::<PeerId, Direction>::new();
// open substreams
let mut open = HashMap::<PeerId, Direction>::new();
// closing substreams
let mut closing = HashSet::<PeerId>::new();
// closed substreams
let mut closed = HashSet::<PeerId>::new();
// perform a certain number of actions while checking that the state is consistent.
//
// if we reach the end of the loop, the run has succeeded
let _ = tokio::task::spawn_blocking(move || {
// PRNG to use in `spawn_blocking` context.
let mut rng = rand::thread_rng();
for _ in 0..2500 {
// each of these weights corresponds to an action that we may perform
let action_weights =
[300, 110, 110, 110, 110, 90, 70, 30, 110, 110, 110, 110, 20, 110, 50, 110];
match WeightedIndex::new(&action_weights).unwrap().sample(&mut rng) {
0 => match peerset.next().now_or_never() {
// open substreams to `peers`
Some(Some(PeersetNotificationCommand::OpenSubstream { peers })) =>
for peer in peers {
opening.insert(peer, Direction::Outbound);
closed.remove(&peer);
assert!(!closing.contains(&peer));
assert!(!open.contains_key(&peer));
},
// close substreams to `peers`
Some(Some(PeersetNotificationCommand::CloseSubstream { peers })) =>
for peer in peers {
assert!(closing.insert(peer));
assert!(open.remove(&peer).is_some());
assert!(!opening.contains_key(&peer));
},
Some(None) => panic!("peerset exited"),
None => {},
},
// get inbound connection from an unknown peer
1 => {
let new_peer = PeerId::random();
peer_store_handle.add_known_peer(new_peer);
match peerset.report_inbound_substream(new_peer) {
ValidationResult::Accept => {
opening.insert(new_peer, Direction::Inbound);
},
ValidationResult::Reject => {},
}
},
// substream opened successfully
//
// remove peer from `opening` (which contains its direction), report the open
// substream to `Peerset` and move peer state to `open`.
//
// if the substream was canceled while it was opening, move peer to `closing`
2 =>
if let Some(peer) = opening.keys().choose(&mut rng).copied() {
let direction = opening.remove(&peer).unwrap();
match peerset.report_substream_opened(peer, direction) {
OpenResult::Accept { .. } => {
assert!(open.insert(peer, direction).is_none());
},
OpenResult::Reject => {
assert!(closing.insert(peer));
},
}
},
// substream failed to open
3 =>
if let Some(peer) = opening.keys().choose(&mut rng).copied() {
let _ = opening.remove(&peer).unwrap();
peerset.report_substream_open_failure(peer, NotificationError::Rejected);
},
// substream was closed by remote peer
4 =>
if let Some(peer) = open.keys().choose(&mut rng).copied() {
let _ = open.remove(&peer).unwrap();
peerset.report_substream_closed(peer);
assert!(closed.insert(peer));
},
// substream was closed by local node
5 =>
if let Some(peer) = closing.iter().choose(&mut rng).copied() {
assert!(closing.remove(&peer));
assert!(closed.insert(peer));
peerset.report_substream_closed(peer);
},
// random connected peer was disconnected by the protocol
6 =>
if let Some(peer) = open.keys().choose(&mut rng).copied() {
to_peerset.unbounded_send(PeersetCommand::DisconnectPeer { peer }).unwrap();
},
// ban random peer
7 =>
if let Some(peer) = known_peers.iter().choose(&mut rng).copied() {
peer_store_handle.report_peer(peer, ReputationChange::new_fatal(""));
},
// inbound substream is received for a peer that was considered
// outbound
8 => {
let outbound_peers = opening
.iter()
.filter_map(|(peer, direction)| {
std::matches!(direction, Direction::Outbound).then_some(*peer)
})
.collect::<HashSet<_>>();
if let Some(peer) = outbound_peers.iter().choose(&mut rng).copied() {
match peerset.report_inbound_substream(peer) {
ValidationResult::Accept => {
opening.insert(peer, Direction::Inbound);
},
ValidationResult::Reject => {},
}
}
},
// set reserved peers
//
// choose peers from all available sets (open, opening, closing, closed) + some new
// peers
9 => {
let num_open = Uniform::new_inclusive(0, open.len()).sample(&mut rng);
let num_opening = Uniform::new_inclusive(0, opening.len()).sample(&mut rng);
let num_closing = Uniform::new_inclusive(0, closing.len()).sample(&mut rng);
let num_closed = Uniform::new_inclusive(0, closed.len()).sample(&mut rng);
let peers = open
.keys()
.copied()
.choose_multiple(&mut rng, num_open)
.into_iter()
.chain(
opening
.keys()
.copied()
.choose_multiple(&mut rng, num_opening)
.into_iter(),
)
.chain(
closing
.iter()
.copied()
.choose_multiple(&mut rng, num_closing)
.into_iter(),
)
.chain(
closed
.iter()
.copied()
.choose_multiple(&mut rng, num_closed)
.into_iter(),
)
.chain((0..5).map(|_| {
let peer = PeerId::random();
known_peers.insert(peer);
peer_store_handle.add_known_peer(peer);
peer
}))
.filter(|peer| !reserved_peers.contains(peer))
.collect::<HashSet<_>>();
reserved_peers.extend(peers.clone().into_iter());
to_peerset.unbounded_send(PeersetCommand::SetReservedPeers { peers }).unwrap();
},
// add reserved peers
10 => {
let num_open = Uniform::new_inclusive(0, open.len()).sample(&mut rng);
let num_opening = Uniform::new_inclusive(0, opening.len()).sample(&mut rng);
let num_closing = Uniform::new_inclusive(0, closing.len()).sample(&mut rng);
let num_closed = Uniform::new_inclusive(0, closed.len()).sample(&mut rng);
let peers = open
.keys()
.copied()
.choose_multiple(&mut rng, num_open)
.into_iter()
.chain(
opening
.keys()
.copied()
.choose_multiple(&mut rng, num_opening)
.into_iter(),
)
.chain(
closing
.iter()
.copied()
.choose_multiple(&mut rng, num_closing)
.into_iter(),
)
.chain(
closed
.iter()
.copied()
.choose_multiple(&mut rng, num_closed)
.into_iter(),
)
.chain((0..5).map(|_| {
let peer = PeerId::random();
known_peers.insert(peer);
peer_store_handle.add_known_peer(peer);
peer
}))
.filter(|peer| !reserved_peers.contains(peer))
.collect::<HashSet<_>>();
reserved_peers.extend(peers.clone().into_iter());
to_peerset.unbounded_send(PeersetCommand::AddReservedPeers { peers }).unwrap();
},
// remove reserved peers
11 => {
let num_to_remove =
Uniform::new_inclusive(0, reserved_peers.len()).sample(&mut rng);
let peers = reserved_peers
.iter()
.copied()
.choose_multiple(&mut rng, num_to_remove)
.into_iter()
.collect::<HashSet<_>>();
peers.iter().for_each(|peer| {
assert!(reserved_peers.remove(peer));
});
to_peerset
.unbounded_send(PeersetCommand::RemoveReservedPeers { peers })
.unwrap();
},
// set reserved only
12 => {
reserved_only = !reserved_only;
let _ = to_peerset
.unbounded_send(PeersetCommand::SetReservedOnly { reserved_only });
},
//
// discover a new node.
13 => {
let new_peer = PeerId::random();
known_peers.insert(new_peer);
peer_store_handle.add_known_peer(new_peer);
},
// protocol rejected a substream that was accepted by `Peerset`
14 => {
let inbound_peers = opening
.iter()
.filter_map(|(peer, direction)| {
std::matches!(direction, Direction::Inbound).then_some(*peer)
})
.collect::<HashSet<_>>();
if let Some(peer) = inbound_peers.iter().choose(&mut rng).copied() {
peerset.report_substream_rejected(peer);
opening.remove(&peer);
}
},
// inbound substream received for a peer in `closed`
15 =>
if let Some(peer) = closed.iter().choose(&mut rng).copied() {
match peerset.report_inbound_substream(peer) {
ValidationResult::Accept => {
assert!(closed.remove(&peer));
opening.insert(peer, Direction::Inbound);
},
ValidationResult::Reject => {},
}
},
_ => unreachable!(),
}
}
})
.await
.unwrap();
}
@@ -0,0 +1,22 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#[cfg(test)]
mod fuzz;
#[cfg(test)]
mod peerset;
@@ -0,0 +1,891 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{
litep2p::{
peerstore::peerstore_handle_test,
shim::notification::peerset::{
Direction, OpenResult, PeerState, Peerset, PeersetCommand, PeersetNotificationCommand,
Reserved,
},
},
service::traits::{self, ValidationResult},
ProtocolName,
};
use futures::prelude::*;
use litep2p::protocol::notification::NotificationError;
use sc_network_types::PeerId;
use std::{
collections::HashSet,
sync::{atomic::Ordering, Arc},
task::Poll,
};
// outbound substream was initiated for a peer but an inbound substream from that same peer
// was receied while the `Peerset` was waiting for the outbound substream to be opened
//
// verify that the peer state is updated correctly
#[tokio::test]
async fn inbound_substream_for_outbound_peer() {
let peerstore_handle = Arc::new(peerstore_handle_test());
let peers = (0..3)
.map(|_| {
let peer = PeerId::random();
peerstore_handle.add_known_peer(peer);
peer
})
.collect::<Vec<_>>();
let inbound_peer = *peers.iter().next().unwrap();
let (mut peerset, _to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
25,
25,
false,
Default::default(),
Default::default(),
peerstore_handle,
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(out_peers.len(), 3usize);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 3usize);
assert_eq!(
peerset.peers().get(&inbound_peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
);
},
event => panic!("invalid event: {event:?}"),
}
// inbound substream was received from peer who was marked outbound
//
// verify that the peer state and inbound/outbound counts are updated correctly
assert_eq!(peerset.report_inbound_substream(inbound_peer), ValidationResult::Accept);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 3usize);
assert_eq!(
peerset.peers().get(&inbound_peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
);
}
// substream was opening to peer but then it was canceled and before the substream
// was fully closed, the peer got banned
#[tokio::test]
async fn canceled_peer_gets_banned() {
sp_tracing::try_init_simple();
let peerstore_handle = Arc::new(peerstore_handle_test());
let peers = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
0,
0,
true,
peers.clone(),
Default::default(),
peerstore_handle,
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
for outbound_peer in &out_peers {
assert!(peers.contains(outbound_peer));
assert_eq!(
peerset.peers().get(&outbound_peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
);
}
},
event => panic!("invalid event: {event:?}"),
}
// remove all reserved peers
to_peerset
.unbounded_send(PeersetCommand::RemoveReservedPeers { peers: peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
assert!(out_peers.is_empty());
},
event => panic!("invalid event: {event:?}"),
}
// verify all reserved peers are canceled
for (_, state) in peerset.peers() {
assert_eq!(state, &PeerState::Canceled { direction: Direction::Outbound(Reserved::Yes) });
}
}
#[tokio::test]
async fn peer_added_and_removed_from_peerset() {
sp_tracing::try_init_simple();
let peerstore_handle = Arc::new(peerstore_handle_test());
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
0,
0,
true,
Default::default(),
Default::default(),
peerstore_handle,
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
// add peers to reserved set
let peers = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
to_peerset
.unbounded_send(PeersetCommand::AddReservedPeers { peers: peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
for outbound_peer in &out_peers {
assert!(peers.contains(outbound_peer));
assert!(peerset.reserved_peers().contains(outbound_peer));
assert_eq!(
peerset.peers().get(&outbound_peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
);
}
},
event => panic!("invalid event: {event:?}"),
}
// report that all substreams were opened
for peer in &peers {
assert!(std::matches!(
peerset.report_substream_opened(*peer, traits::Direction::Outbound),
OpenResult::Accept { .. }
));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
);
}
// remove all reserved peers
to_peerset
.unbounded_send(PeersetCommand::RemoveReservedPeers { peers: peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
assert!(!out_peers.is_empty());
for peer in &out_peers {
assert!(peers.contains(peer));
assert!(!peerset.reserved_peers().contains(peer));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
);
}
},
event => panic!("invalid event: {event:?}"),
}
// add the peers again and verify that the command is ignored because the substreams are closing
to_peerset
.unbounded_send(PeersetCommand::AddReservedPeers { peers: peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert!(out_peers.is_empty());
for peer in &peers {
assert!(peerset.reserved_peers().contains(peer));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
);
}
},
event => panic!("invalid event: {event:?}"),
}
// remove the peers again and verify the state remains as `Closing`
to_peerset
.unbounded_send(PeersetCommand::RemoveReservedPeers { peers: peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
assert!(out_peers.is_empty());
for peer in &peers {
assert!(!peerset.reserved_peers().contains(peer));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
);
}
},
event => panic!("invalid event: {event:?}"),
}
}
#[tokio::test]
async fn set_reserved_peers() {
sp_tracing::try_init_simple();
let reserved = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
25,
25,
true,
reserved.clone(),
Default::default(),
Arc::new(peerstore_handle_test()),
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
for outbound_peer in &out_peers {
assert!(reserved.contains(outbound_peer));
assert!(peerset.reserved_peers().contains(outbound_peer));
assert_eq!(
peerset.peers().get(&outbound_peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
);
}
},
event => panic!("invalid event: {event:?}"),
}
// report that all substreams were opened
for peer in &reserved {
assert!(std::matches!(
peerset.report_substream_opened(*peer, traits::Direction::Outbound),
OpenResult::Accept { .. }
));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
);
}
// add a totally new set of reserved peers
let new_reserved_peers =
HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
to_peerset
.unbounded_send(PeersetCommand::SetReservedPeers { peers: new_reserved_peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
assert!(!out_peers.is_empty());
assert_eq!(out_peers.len(), 3);
for peer in &out_peers {
assert!(reserved.contains(peer));
assert!(!peerset.reserved_peers().contains(peer));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
);
}
for peer in &new_reserved_peers {
assert!(peerset.reserved_peers().contains(peer));
}
},
event => panic!("invalid event: {event:?}"),
}
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert!(!out_peers.is_empty());
assert_eq!(out_peers.len(), 3);
for peer in &new_reserved_peers {
assert!(peerset.reserved_peers().contains(peer));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }),
);
}
},
event => panic!("invalid event: {event:?}"),
}
}
#[tokio::test]
async fn set_reserved_peers_one_peer_already_in_the_set() {
sp_tracing::try_init_simple();
let reserved = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
let common_peer = *reserved.iter().next().unwrap();
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
25,
25,
true,
reserved.clone(),
Default::default(),
Arc::new(peerstore_handle_test()),
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
for outbound_peer in &out_peers {
assert!(reserved.contains(outbound_peer));
assert!(peerset.reserved_peers().contains(outbound_peer));
assert_eq!(
peerset.peers().get(&outbound_peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
);
}
},
event => panic!("invalid event: {event:?}"),
}
// report that all substreams were opened
for peer in &reserved {
assert!(std::matches!(
peerset.report_substream_opened(*peer, traits::Direction::Outbound),
OpenResult::Accept { .. }
));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
);
}
// add a new set of reserved peers with one peer from the original set
let new_reserved_peers = HashSet::from_iter([PeerId::random(), PeerId::random(), common_peer]);
to_peerset
.unbounded_send(PeersetCommand::SetReservedPeers { peers: new_reserved_peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
assert_eq!(out_peers.len(), 2);
for peer in &out_peers {
assert!(reserved.contains(peer));
if peer != &common_peer {
assert!(!peerset.reserved_peers().contains(peer));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::Yes) }),
);
} else {
panic!("common peer disconnected");
}
}
for peer in &new_reserved_peers {
assert!(peerset.reserved_peers().contains(peer));
}
},
event => panic!("invalid event: {event:?}"),
}
// verify the `common_peer` peer between the reserved sets is still in the state `Open`
assert_eq!(
peerset.peers().get(&common_peer),
Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
);
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert!(!out_peers.is_empty());
assert_eq!(out_peers.len(), 2);
for peer in &new_reserved_peers {
assert!(peerset.reserved_peers().contains(peer));
if peer != &common_peer {
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }),
);
}
}
},
event => panic!("invalid event: {event:?}"),
}
}
#[tokio::test]
async fn add_reserved_peers_one_peer_already_in_the_set() {
sp_tracing::try_init_simple();
let peerstore_handle = Arc::new(peerstore_handle_test());
let reserved = (0..3)
.map(|_| {
let peer = PeerId::random();
peerstore_handle.add_known_peer(peer);
peer
})
.collect::<Vec<_>>();
let common_peer = *reserved.iter().next().unwrap();
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
25,
25,
true,
reserved.iter().cloned().collect(),
Default::default(),
peerstore_handle,
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
assert_eq!(out_peers.len(), 3);
for outbound_peer in &out_peers {
assert!(reserved.contains(outbound_peer));
assert!(peerset.reserved_peers().contains(outbound_peer));
assert_eq!(
peerset.peers().get(&outbound_peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
);
}
},
event => panic!("invalid event: {event:?}"),
}
// report that all substreams were opened
for peer in &reserved {
assert!(std::matches!(
peerset.report_substream_opened(*peer, traits::Direction::Outbound),
OpenResult::Accept { .. }
));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
);
}
// add a new set of reserved peers with one peer from the original set
let new_reserved_peers = HashSet::from_iter([PeerId::random(), PeerId::random(), common_peer]);
to_peerset
.unbounded_send(PeersetCommand::AddReservedPeers { peers: new_reserved_peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(out_peers.len(), 2);
assert!(!out_peers.iter().any(|peer| peer == &common_peer));
for peer in &out_peers {
assert!(!reserved.contains(peer));
if peer != &common_peer {
assert!(peerset.reserved_peers().contains(peer));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }),
);
}
}
},
event => panic!("invalid event: {event:?}"),
}
// verify the `common_peer` peer between the reserved sets is still in the state `Open`
assert_eq!(
peerset.peers().get(&common_peer),
Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::Yes) })
);
}
#[tokio::test]
async fn opening_peer_gets_canceled_and_disconnected() {
sp_tracing::try_init_simple();
let peerstore_handle = Arc::new(peerstore_handle_test());
let _known_peers = (0..1)
.map(|_| {
let peer = PeerId::random();
peerstore_handle.add_known_peer(peer);
peer
})
.collect::<Vec<_>>();
let num_connected = Arc::new(Default::default());
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
25,
25,
false,
Default::default(),
Arc::clone(&num_connected),
peerstore_handle,
);
assert_eq!(peerset.num_in(), 0);
assert_eq!(peerset.num_out(), 0);
let peer = match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(peerset.num_in(), 0);
assert_eq!(peerset.num_out(), 1);
assert_eq!(out_peers.len(), 1);
for peer in &out_peers {
assert_eq!(
peerset.peers().get(&peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
);
}
out_peers[0]
},
event => panic!("invalid event: {event:?}"),
};
// disconnect the now-opening peer
to_peerset.unbounded_send(PeersetCommand::DisconnectPeer { peer }).unwrap();
// poll `Peerset` to register the command and verify the peer is now in state `Canceled`
futures::future::poll_fn(|cx| match peerset.poll_next_unpin(cx) {
Poll::Pending => Poll::Ready(()),
_ => panic!("unexpected event"),
})
.await;
assert_eq!(
peerset.peers().get(&peer),
Some(&PeerState::Canceled { direction: Direction::Outbound(Reserved::No) })
);
assert_eq!(peerset.num_out(), 1);
// report to `Peerset` that the substream was opened, verify that it gets closed
assert!(std::matches!(
peerset.report_substream_opened(peer, traits::Direction::Outbound),
OpenResult::Reject { .. }
));
assert_eq!(
peerset.peers().get(&peer),
Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::No) })
);
assert_eq!(num_connected.load(Ordering::SeqCst), 1);
assert_eq!(peerset.num_out(), 1);
// report close event to `Peerset` and verify state
peerset.report_substream_closed(peer);
assert_eq!(peerset.num_out(), 0);
assert_eq!(num_connected.load(Ordering::SeqCst), 0);
assert_eq!(peerset.peers().get(&peer), Some(&PeerState::Backoff));
}
#[tokio::test]
async fn open_failure_for_canceled_peer() {
sp_tracing::try_init_simple();
let peerstore_handle = Arc::new(peerstore_handle_test());
let _known_peers = (0..1)
.map(|_| {
let peer = PeerId::random();
peerstore_handle.add_known_peer(peer);
peer
})
.collect::<Vec<_>>();
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
25,
25,
false,
Default::default(),
Default::default(),
peerstore_handle,
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
let peer = match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 1usize);
assert_eq!(out_peers.len(), 1);
for peer in &out_peers {
assert_eq!(
peerset.peers().get(&peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
);
}
out_peers[0]
},
event => panic!("invalid event: {event:?}"),
};
// disconnect the now-opening peer
to_peerset.unbounded_send(PeersetCommand::DisconnectPeer { peer }).unwrap();
// poll `Peerset` to register the command and verify the peer is now in state `Canceled`
futures::future::poll_fn(|cx| match peerset.poll_next_unpin(cx) {
Poll::Pending => Poll::Ready(()),
_ => panic!("unexpected event"),
})
.await;
assert_eq!(
peerset.peers().get(&peer),
Some(&PeerState::Canceled { direction: Direction::Outbound(Reserved::No) })
);
// the substream failed to open, verify that peer state is now `Backoff`
// and that `Peerset` doesn't emit any events
peerset.report_substream_open_failure(peer, NotificationError::NoConnection);
assert_eq!(peerset.peers().get(&peer), Some(&PeerState::Backoff));
futures::future::poll_fn(|cx| match peerset.poll_next_unpin(cx) {
Poll::Pending => Poll::Ready(()),
_ => panic!("unexpected event"),
})
.await;
}
#[tokio::test]
async fn peer_disconnected_when_being_validated_then_rejected() {
sp_tracing::try_init_simple();
let peerstore_handle = Arc::new(peerstore_handle_test());
let (mut peerset, _to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
25,
25,
false,
Default::default(),
Default::default(),
peerstore_handle,
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
// inbound substream received
let peer = PeerId::random();
assert_eq!(peerset.report_inbound_substream(peer), ValidationResult::Accept);
// substream failed to open while it was being validated by the protocol
peerset.report_substream_open_failure(peer, NotificationError::NoConnection);
assert_eq!(peerset.peers().get(&peer), Some(&PeerState::Backoff));
// protocol rejected substream, verify
peerset.report_substream_rejected(peer);
assert_eq!(peerset.peers().get(&peer), Some(&PeerState::Backoff));
}
#[tokio::test]
async fn removed_reserved_peer_kept_due_to_free_slots() {
sp_tracing::try_init_simple();
let peerstore_handle = Arc::new(peerstore_handle_test());
let peers = HashSet::from_iter([PeerId::random(), PeerId::random(), PeerId::random()]);
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
25,
25,
true,
peers.clone(),
Default::default(),
peerstore_handle,
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
for outbound_peer in &out_peers {
assert!(peers.contains(outbound_peer));
assert_eq!(
peerset.peers().get(&outbound_peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) })
);
}
},
event => panic!("invalid event: {event:?}"),
}
// remove all reserved peers
to_peerset
.unbounded_send(PeersetCommand::RemoveReservedPeers { peers: peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
assert!(out_peers.is_empty());
},
event => panic!("invalid event: {event:?}"),
}
// verify all reserved peers are canceled
for (_, state) in peerset.peers() {
assert_eq!(state, &PeerState::Opening { direction: Direction::Outbound(Reserved::No) });
}
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 3usize);
}
#[tokio::test]
async fn set_reserved_peers_but_available_slots() {
sp_tracing::try_init_simple();
let peerstore_handle = Arc::new(peerstore_handle_test());
let known_peers = (0..3)
.map(|_| {
let peer = PeerId::random();
peerstore_handle.add_known_peer(peer);
peer
})
.collect::<Vec<_>>();
// one peer is common across operations meaning an outbound substream will be opened to them
// when `Peerset` is polled (along with two random peers) and later on `SetReservedPeers`
// is called with the common peer and with two new random peers
let common_peer = *known_peers.iter().next().unwrap();
let disconnected_peers = known_peers.iter().skip(1).copied().collect::<HashSet<_>>();
assert_eq!(disconnected_peers.len(), 2);
let (mut peerset, to_peerset) = Peerset::new(
ProtocolName::from("/notif/1"),
25,
25,
false,
Default::default(),
Default::default(),
peerstore_handle,
);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 0usize);
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers: out_peers }) => {
assert_eq!(out_peers.len(), 3);
for peer in &out_peers {
assert_eq!(
peerset.peers().get(&peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::No) })
);
}
},
event => panic!("invalid event: {event:?}"),
}
// verify all three peers are counted as outbound peers
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 3usize);
// report that all substreams were opened
for peer in &known_peers {
assert!(std::matches!(
peerset.report_substream_opened(*peer, traits::Direction::Outbound),
OpenResult::Accept { .. }
));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Connected { direction: Direction::Outbound(Reserved::No) })
);
}
// set reserved peers with `common_peer` being one of them
let reserved_peers = HashSet::from_iter([common_peer, PeerId::random(), PeerId::random()]);
to_peerset
.unbounded_send(PeersetCommand::SetReservedPeers { peers: reserved_peers.clone() })
.unwrap();
match peerset.next().await {
Some(PeersetNotificationCommand::CloseSubstream { peers: out_peers }) => {
assert_eq!(out_peers.len(), 2);
for peer in &out_peers {
assert!(disconnected_peers.contains(peer));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Closing { direction: Direction::Outbound(Reserved::No) }),
);
}
},
event => panic!("invalid event: {event:?}"),
}
// verify that `Peerset` is aware of five peers, with two of them as outbound
// (the two disconnected peers)
assert_eq!(peerset.peers().len(), 5);
assert_eq!(peerset.num_in(), 0usize);
assert_eq!(peerset.num_out(), 2usize);
match peerset.next().await {
Some(PeersetNotificationCommand::OpenSubstream { peers }) => {
assert_eq!(peers.len(), 2);
assert!(!peers.contains(&common_peer));
for peer in &peers {
assert!(reserved_peers.contains(peer));
assert!(peerset.reserved_peers().contains(peer));
assert_eq!(
peerset.peers().get(peer),
Some(&PeerState::Opening { direction: Direction::Outbound(Reserved::Yes) }),
);
}
},
event => panic!("invalid event: {event:?}"),
}
assert_eq!(peerset.peers().len(), 5);
assert_eq!(peerset.num_in(), 0usize);
// two substreams are closing still closing
assert_eq!(peerset.num_out(), 2usize);
}
@@ -0,0 +1,78 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Metrics for [`RequestResponseProtocol`](super::RequestResponseProtocol).
use crate::{service::metrics::Metrics, types::ProtocolName};
use std::time::Duration;
/// Request-response metrics.
pub struct RequestResponseMetrics {
/// Metrics.
metrics: Option<Metrics>,
/// Protocol name.
protocol: ProtocolName,
}
impl RequestResponseMetrics {
pub fn new(metrics: Option<Metrics>, protocol: ProtocolName) -> Self {
Self { metrics, protocol }
}
/// Register inbound request failure to Prometheus
pub fn register_inbound_request_failure(&self, reason: &str) {
if let Some(metrics) = &self.metrics {
metrics
.requests_in_failure_total
.with_label_values(&[&self.protocol, reason])
.inc();
}
}
/// Register inbound request success to Prometheus
pub fn register_inbound_request_success(&self, serve_time: Duration) {
if let Some(metrics) = &self.metrics {
metrics
.requests_in_success_total
.with_label_values(&[&self.protocol])
.observe(serve_time.as_secs_f64());
}
}
/// Register inbound request failure to Prometheus
pub fn register_outbound_request_failure(&self, reason: &str) {
if let Some(metrics) = &self.metrics {
metrics
.requests_out_failure_total
.with_label_values(&[&self.protocol, reason])
.inc();
}
}
/// Register inbound request success to Prometheus
pub fn register_outbound_request_success(&self, duration: Duration) {
if let Some(metrics) = &self.metrics {
metrics
.requests_out_success_total
.with_label_values(&[&self.protocol])
.observe(duration.as_secs_f64());
}
}
}
@@ -0,0 +1,529 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Shim for litep2p's request-response implementation to make it work with `sc_network`'s
//! request-response API.
use crate::{
litep2p::shim::request_response::metrics::RequestResponseMetrics,
peer_store::PeerStoreProvider,
request_responses::{IncomingRequest, OutgoingResponse},
service::{metrics::Metrics, traits::RequestResponseConfig as RequestResponseConfigT},
IfDisconnected, ProtocolName, RequestFailure,
};
use futures::{channel::oneshot, future::BoxFuture, stream::FuturesUnordered, StreamExt};
use litep2p::{
protocol::request_response::{
DialOptions, RequestResponseError, RequestResponseEvent, RequestResponseHandle,
},
types::RequestId,
};
use sc_network_types::PeerId;
use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender};
use std::{
collections::HashMap,
sync::Arc,
time::{Duration, Instant},
};
mod metrics;
#[cfg(test)]
mod tests;
/// Logging target for the file.
const LOG_TARGET: &str = "sub-libp2p::request-response";
/// Type containing information related to an outbound request.
#[derive(Debug)]
pub struct OutboundRequest {
/// Peer ID.
peer: PeerId,
/// Request.
request: Vec<u8>,
/// Fallback request, if provided.
fallback_request: Option<(Vec<u8>, ProtocolName)>,
/// `oneshot::Sender` for sending the received response, or failure.
sender: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
/// What should the node do if `peer` is disconnected.
dial_behavior: IfDisconnected,
}
impl OutboundRequest {
/// Create new [`OutboundRequest`].
pub fn new(
peer: PeerId,
request: Vec<u8>,
sender: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
fallback_request: Option<(Vec<u8>, ProtocolName)>,
dial_behavior: IfDisconnected,
) -> Self {
OutboundRequest { peer, request, sender, fallback_request, dial_behavior }
}
}
/// Pending request.
struct PendingRequest {
tx: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
started: Instant,
fallback_request: Option<(Vec<u8>, ProtocolName)>,
}
impl PendingRequest {
/// Create new [`PendingRequest`].
fn new(
tx: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
started: Instant,
fallback_request: Option<(Vec<u8>, ProtocolName)>,
) -> Self {
Self { tx, started, fallback_request }
}
}
/// Request-response protocol configuration.
///
/// See [`RequestResponseConfiguration`](crate::request_response::ProtocolConfig) for more details.
#[derive(Debug)]
pub struct RequestResponseConfig {
/// Name of the protocol on the wire. Should be something like `/foo/bar`.
pub protocol_name: ProtocolName,
/// Fallback on the wire protocol names to support.
pub fallback_names: Vec<ProtocolName>,
/// Maximum allowed size, in bytes, of a request.
pub max_request_size: u64,
/// Maximum allowed size, in bytes, of a response.
pub max_response_size: u64,
/// Duration after which emitted requests are considered timed out.
pub request_timeout: Duration,
/// Channel on which the networking service will send incoming requests.
pub inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
}
impl RequestResponseConfig {
/// Create new [`RequestResponseConfig`].
pub(crate) fn new(
protocol_name: ProtocolName,
fallback_names: Vec<ProtocolName>,
max_request_size: u64,
max_response_size: u64,
request_timeout: Duration,
inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
) -> Self {
Self {
protocol_name,
fallback_names,
max_request_size,
max_response_size,
request_timeout,
inbound_queue,
}
}
}
impl RequestResponseConfigT for RequestResponseConfig {
fn protocol_name(&self) -> &ProtocolName {
&self.protocol_name
}
}
/// Request-response protocol.
///
/// This is slightly different from the `RequestResponsesBehaviour` in that it is protocol-specific,
/// meaning there is an instance of `RequestResponseProtocol` for each installed request-response
/// protocol and that instance deals only with the requests and responses of that protocol, nothing
/// else. It also differs from the other implementation by combining both inbound and outbound
/// requests under one instance so all request-response-related behavior of any given protocol is
/// handled through one instance of `RequestResponseProtocol`.
pub struct RequestResponseProtocol {
/// Protocol name.
protocol: ProtocolName,
/// Handle to request-response protocol.
handle: RequestResponseHandle,
/// Inbound queue for sending received requests to protocol implementation in Polkadot SDK.
inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
/// Handle to `Peerstore`.
peerstore_handle: Arc<dyn PeerStoreProvider>,
/// Pending responses.
pending_inbound_responses: HashMap<RequestId, PendingRequest>,
/// Pending outbound responses.
pending_outbound_responses: FuturesUnordered<
BoxFuture<'static, (litep2p::PeerId, RequestId, Result<OutgoingResponse, ()>, Instant)>,
>,
/// RX channel for receiving info for outbound requests.
request_rx: TracingUnboundedReceiver<OutboundRequest>,
/// Map of supported request-response protocols which are used to support fallback requests.
///
/// If negotiation for the main protocol fails and the request was sent with a fallback,
/// [`RequestResponseProtocol`] queries this map and sends the request that protocol for
/// processing.
request_tx: HashMap<ProtocolName, TracingUnboundedSender<OutboundRequest>>,
/// Metrics, if enabled.
metrics: RequestResponseMetrics,
}
impl RequestResponseProtocol {
/// Create new [`RequestResponseProtocol`].
pub fn new(
protocol: ProtocolName,
handle: RequestResponseHandle,
peerstore_handle: Arc<dyn PeerStoreProvider>,
inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
request_rx: TracingUnboundedReceiver<OutboundRequest>,
request_tx: HashMap<ProtocolName, TracingUnboundedSender<OutboundRequest>>,
metrics: Option<Metrics>,
) -> Self {
Self {
handle,
request_rx,
request_tx,
inbound_queue,
peerstore_handle,
protocol: protocol.clone(),
pending_inbound_responses: HashMap::new(),
pending_outbound_responses: FuturesUnordered::new(),
metrics: RequestResponseMetrics::new(metrics, protocol),
}
}
/// Send `request` to `peer`.
async fn on_send_request(
&mut self,
peer: PeerId,
request: Vec<u8>,
fallback_request: Option<(Vec<u8>, ProtocolName)>,
tx: oneshot::Sender<Result<(Vec<u8>, ProtocolName), RequestFailure>>,
connect: IfDisconnected,
) {
let dial_options = match connect {
IfDisconnected::TryConnect => DialOptions::Dial,
IfDisconnected::ImmediateError => DialOptions::Reject,
};
log::trace!(
target: LOG_TARGET,
"{}: send request to {:?} (fallback {:?}) (dial options: {:?})",
self.protocol,
peer,
fallback_request,
dial_options,
);
match self.handle.try_send_request(peer.into(), request, dial_options) {
Ok(request_id) => {
self.pending_inbound_responses
.insert(request_id, PendingRequest::new(tx, Instant::now(), fallback_request));
},
Err(error) => {
log::warn!(
target: LOG_TARGET,
"{}: failed to send request to {peer:?}: {error:?}",
self.protocol,
);
let _ = tx.send(Err(RequestFailure::Refused));
self.metrics.register_inbound_request_failure(error.to_string().as_ref());
},
}
}
/// Handle inbound request from `peer`
///
/// If the protocol is configured outbound only, reject the request immediately.
fn on_inbound_request(
&mut self,
peer: litep2p::PeerId,
fallback: Option<litep2p::ProtocolName>,
request_id: RequestId,
request: Vec<u8>,
) {
let Some(inbound_queue) = &self.inbound_queue else {
log::trace!(
target: LOG_TARGET,
"{}: rejecting inbound request from {peer:?}, protocol configured as outbound only",
self.protocol,
);
self.handle.reject_request(request_id);
return;
};
log::trace!(
target: LOG_TARGET,
"{}: request received from {peer:?} ({fallback:?} {request_id:?}), request size {:?}",
self.protocol,
request.len(),
);
let (tx, rx) = oneshot::channel();
match inbound_queue.try_send(IncomingRequest {
peer: peer.into(),
payload: request,
pending_response: tx,
}) {
Ok(_) => {
self.pending_outbound_responses.push(Box::pin(async move {
(peer, request_id, rx.await.map_err(|_| ()), Instant::now())
}));
},
Err(error) => {
log::trace!(
target: LOG_TARGET,
"{:?}: dropping request from {peer:?} ({request_id:?}), inbound queue full",
self.protocol,
);
self.handle.reject_request(request_id);
self.metrics.register_inbound_request_failure(error.to_string().as_ref());
},
}
}
/// Handle received inbound response.
fn on_inbound_response(
&mut self,
peer: litep2p::PeerId,
request_id: RequestId,
fallback: Option<litep2p::ProtocolName>,
response: Vec<u8>,
) {
match self.pending_inbound_responses.remove(&request_id) {
None => log::warn!(
target: LOG_TARGET,
"{:?}: response received for {peer:?} but {request_id:?} doesn't exist",
self.protocol,
),
Some(PendingRequest { tx, started, .. }) => {
log::trace!(
target: LOG_TARGET,
"{:?}: response received for {peer:?} ({request_id:?}), response size {:?}",
self.protocol,
response.len(),
);
let _ = tx.send(Ok((
response,
fallback.map_or_else(|| self.protocol.clone(), Into::into),
)));
self.metrics.register_outbound_request_success(started.elapsed());
},
}
}
/// Handle failed outbound request.
fn on_request_failed(
&mut self,
peer: litep2p::PeerId,
request_id: RequestId,
error: RequestResponseError,
) {
log::debug!(
target: LOG_TARGET,
"{:?}: request failed for {peer:?} ({request_id:?}): {error:?}",
self.protocol
);
let Some(PendingRequest { tx, fallback_request, .. }) =
self.pending_inbound_responses.remove(&request_id)
else {
log::warn!(
target: LOG_TARGET,
"{:?}: request failed for peer {peer:?} but {request_id:?} doesn't exist",
self.protocol,
);
return
};
let error = match error {
RequestResponseError::NotConnected => Some(RequestFailure::NotConnected),
RequestResponseError::Rejected | RequestResponseError::Timeout =>
Some(RequestFailure::Refused),
RequestResponseError::Canceled => {
log::debug!(
target: LOG_TARGET,
"{}: request canceled by local node to {peer:?} ({request_id:?})",
self.protocol,
);
None
},
RequestResponseError::TooLargePayload => {
log::warn!(
target: LOG_TARGET,
"{}: tried to send too large request to {peer:?} ({request_id:?})",
self.protocol,
);
Some(RequestFailure::Refused)
},
RequestResponseError::UnsupportedProtocol => match fallback_request {
Some((request, protocol)) => match self.request_tx.get(&protocol) {
Some(sender) => {
log::debug!(
target: LOG_TARGET,
"{}: failed to negotiate protocol with {:?}, try fallback request: ({})",
self.protocol,
peer,
protocol,
);
let outbound_request = OutboundRequest::new(
peer.into(),
request,
tx,
None,
IfDisconnected::ImmediateError,
);
// since remote peer doesn't support the main protocol (`self.protocol`),
// try to send the request over a fallback protocol by creating a new
// `OutboundRequest` from the original data, now with the fallback request
// payload, and send it over to the (fallback) request handler like it was
// a normal request.
let _ = sender.unbounded_send(outbound_request);
return;
},
None => {
log::warn!(
target: LOG_TARGET,
"{}: fallback request provided but protocol ({}) doesn't exist (peer {:?})",
self.protocol,
protocol,
peer,
);
Some(RequestFailure::Refused)
},
},
None => Some(RequestFailure::Refused),
},
};
if let Some(error) = error {
self.metrics.register_outbound_request_failure(error.to_string().as_ref());
let _ = tx.send(Err(error));
}
}
/// Handle outbound response.
fn on_outbound_response(
&mut self,
peer: litep2p::PeerId,
request_id: RequestId,
response: OutgoingResponse,
started: Instant,
) {
let OutgoingResponse { result, reputation_changes, sent_feedback } = response;
for change in reputation_changes {
log::trace!(target: LOG_TARGET, "{}: report {peer:?}: {change:?}", self.protocol);
self.peerstore_handle.report_peer(peer.into(), change);
}
match result {
Err(()) => {
log::debug!(
target: LOG_TARGET,
"{}: response rejected ({request_id:?}) for {peer:?}",
self.protocol,
);
self.handle.reject_request(request_id);
self.metrics.register_inbound_request_failure("rejected");
},
Ok(response) => {
log::trace!(
target: LOG_TARGET,
"{}: send response ({request_id:?}) to {peer:?}, response size {}",
self.protocol,
response.len(),
);
match sent_feedback {
None => self.handle.send_response(request_id, response),
Some(feedback) =>
self.handle.send_response_with_feedback(request_id, response, feedback),
}
self.metrics.register_inbound_request_success(started.elapsed());
},
}
}
/// Start running event loop of the request-response protocol.
pub async fn run(mut self) {
loop {
tokio::select! {
event = self.handle.next() => match event {
None => return,
Some(RequestResponseEvent::RequestReceived {
peer,
fallback,
request_id,
request,
}) => self.on_inbound_request(peer, fallback, request_id, request),
Some(RequestResponseEvent::ResponseReceived { peer, request_id, fallback, response }) => {
self.on_inbound_response(peer, request_id, fallback, response);
},
Some(RequestResponseEvent::RequestFailed { peer, request_id, error }) => {
self.on_request_failed(peer, request_id, error);
},
},
event = self.pending_outbound_responses.next(), if !self.pending_outbound_responses.is_empty() => match event {
None => return,
Some((peer, request_id, Err(()), _)) => {
log::debug!(target: LOG_TARGET, "{}: reject request ({request_id:?}) from {peer:?}", self.protocol);
self.handle.reject_request(request_id);
self.metrics.register_inbound_request_failure("rejected");
}
Some((peer, request_id, Ok(response), started)) => {
self.on_outbound_response(peer, request_id, response, started);
}
},
event = self.request_rx.next() => match event {
None => return,
Some(outbound_request) => {
let OutboundRequest { peer, request, sender, dial_behavior, fallback_request } = outbound_request;
self.on_send_request(peer, request, fallback_request, sender, dial_behavior).await;
}
}
}
}
}
}
@@ -0,0 +1,901 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{
litep2p::{
peerstore::peerstore_handle_test,
shim::request_response::{OutboundRequest, RequestResponseProtocol},
},
request_responses::{IfDisconnected, IncomingRequest, OutgoingResponse},
ProtocolName, RequestFailure,
};
use futures::{channel::oneshot, StreamExt};
use litep2p::{
config::ConfigBuilder as Litep2pConfigBuilder,
protocol::request_response::{
ConfigBuilder, DialOptions, RequestResponseError, RequestResponseEvent,
RequestResponseHandle,
},
transport::tcp::config::Config as TcpConfig,
Litep2p, Litep2pEvent,
};
use sc_network_types::PeerId;
use sc_utils::mpsc::tracing_unbounded;
use std::{collections::HashMap, sync::Arc, task::Poll};
/// Create `litep2p` for testing.
async fn make_litep2p() -> (Litep2p, RequestResponseHandle) {
let (config, handle) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
.with_max_size(1024)
.build();
(
Litep2p::new(
Litep2pConfigBuilder::new()
.with_request_response_protocol(config)
.with_tcp(TcpConfig {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
"/ip6/::/tcp/0".parse().unwrap(),
],
..Default::default()
})
.build(),
)
.unwrap(),
handle,
)
}
// connect two `litep2p` instances together
async fn connect_peers(litep2p1: &mut Litep2p, litep2p2: &mut Litep2p) {
let address = litep2p2.listen_addresses().next().unwrap().clone();
litep2p1.dial_address(address).await.unwrap();
let mut litep2p1_connected = false;
let mut litep2p2_connected = false;
loop {
tokio::select! {
event = litep2p1.next_event() => match event.unwrap() {
Litep2pEvent::ConnectionEstablished { .. } => {
litep2p1_connected = true;
}
_ => {},
},
event = litep2p2.next_event() => match event.unwrap() {
Litep2pEvent::ConnectionEstablished { .. } => {
litep2p2_connected = true;
}
_ => {},
}
}
if litep2p1_connected && litep2p2_connected {
break
}
}
}
#[tokio::test]
async fn dial_failure() {
let (mut litep2p, handle) = make_litep2p().await;
let (tx, _rx) = async_channel::bounded(64);
let (outbound_tx, outbound_rx) = tracing_unbounded("outbound-request", 1000);
let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx.clone())]);
let protocol = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle,
Arc::new(peerstore_handle_test()),
Some(tx),
outbound_rx,
senders,
None,
);
tokio::spawn(protocol.run());
tokio::spawn(async move { while let Some(_) = litep2p.next_event().await {} });
let peer = PeerId::random();
let (result_tx, result_rx) = oneshot::channel();
outbound_tx
.unbounded_send(OutboundRequest {
peer,
request: vec![1, 2, 3, 4],
sender: result_tx,
fallback_request: None,
dial_behavior: IfDisconnected::TryConnect,
})
.unwrap();
assert!(std::matches!(result_rx.await, Ok(Err(RequestFailure::Refused))));
}
#[tokio::test]
async fn send_request_to_disconnected_peer() {
let (mut litep2p, handle) = make_litep2p().await;
let (tx, _rx) = async_channel::bounded(64);
let (outbound_tx, outbound_rx) = tracing_unbounded("outbound-request", 1000);
let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx.clone())]);
let protocol = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle,
Arc::new(peerstore_handle_test()),
Some(tx),
outbound_rx,
senders,
None,
);
tokio::spawn(protocol.run());
tokio::spawn(async move { while let Some(_) = litep2p.next_event().await {} });
let peer = PeerId::random();
let (result_tx, result_rx) = oneshot::channel();
outbound_tx
.unbounded_send(OutboundRequest {
peer,
request: vec![1, 2, 3, 4],
sender: result_tx,
fallback_request: None,
dial_behavior: IfDisconnected::ImmediateError,
})
.unwrap();
assert!(std::matches!(result_rx.await, Ok(Err(RequestFailure::NotConnected))));
}
#[tokio::test]
async fn send_request_to_disconnected_peer_and_dial() {
let (mut litep2p1, handle1) = make_litep2p().await;
let (mut litep2p2, handle2) = make_litep2p().await;
let peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
litep2p1.add_known_address(
peer2,
std::iter::once(litep2p2.listen_addresses().next().expect("listen address").clone()),
);
let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx1.clone())]);
let (tx1, _rx1) = async_channel::bounded(64);
let protocol1 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle1,
Arc::new(peerstore_handle_test()),
Some(tx1),
outbound_rx1,
senders,
None,
);
let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx2)]);
let (tx2, rx2) = async_channel::bounded(64);
let protocol2 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle2,
Arc::new(peerstore_handle_test()),
Some(tx2),
outbound_rx2,
senders,
None,
);
tokio::spawn(protocol1.run());
tokio::spawn(protocol2.run());
tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
let (result_tx, _result_rx) = oneshot::channel();
outbound_tx1
.unbounded_send(OutboundRequest {
peer: peer2.into(),
request: vec![1, 2, 3, 4],
sender: result_tx,
fallback_request: None,
dial_behavior: IfDisconnected::TryConnect,
})
.unwrap();
match rx2.recv().await {
Ok(IncomingRequest { peer, payload, .. }) => {
assert_eq!(peer, Into::<PeerId>::into(peer1));
assert_eq!(payload, vec![1, 2, 3, 4]);
},
Err(error) => panic!("unexpected error: {error:?}"),
}
}
#[tokio::test]
async fn too_many_inbound_requests() {
let (mut litep2p1, handle1) = make_litep2p().await;
let (mut litep2p2, mut handle2) = make_litep2p().await;
let peer1 = *litep2p1.local_peer_id();
connect_peers(&mut litep2p1, &mut litep2p2).await;
let (outbound_tx, outbound_rx) = tracing_unbounded("outbound-request", 1000);
let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx)]);
let (tx, _rx) = async_channel::bounded(4);
let protocol = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle1,
Arc::new(peerstore_handle_test()),
Some(tx),
outbound_rx,
senders,
None,
);
tokio::spawn(protocol.run());
tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
// send 5 request and verify that one of the requests will fail
for _ in 0..5 {
handle2
.send_request(peer1, vec![1, 2, 3, 4], DialOptions::Reject)
.await
.unwrap();
}
// verify that one of the requests is rejected
match handle2.next().await {
Some(RequestResponseEvent::RequestFailed { peer, error, .. }) => {
assert_eq!(peer, peer1);
assert_eq!(error, RequestResponseError::Rejected);
},
event => panic!("inavlid event: {event:?}"),
}
// verify that no other events are read from the handle
futures::future::poll_fn(|cx| match handle2.poll_next_unpin(cx) {
Poll::Pending => Poll::Ready(()),
event => panic!("invalid event: {event:?}"),
})
.await;
}
#[tokio::test]
async fn feedback_works() {
let (mut litep2p1, handle1) = make_litep2p().await;
let (mut litep2p2, mut handle2) = make_litep2p().await;
let peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
connect_peers(&mut litep2p1, &mut litep2p2).await;
let (outbound_tx, outbound_rx) = tracing_unbounded("outbound-request", 1000);
let senders = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx)]);
let (tx, rx) = async_channel::bounded(4);
let protocol = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle1,
Arc::new(peerstore_handle_test()),
Some(tx),
outbound_rx,
senders,
None,
);
tokio::spawn(protocol.run());
tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
let request_id = handle2
.send_request(peer1, vec![1, 2, 3, 4], DialOptions::Reject)
.await
.unwrap();
let rx = match rx.recv().await {
Ok(IncomingRequest { peer, payload, pending_response }) => {
assert_eq!(peer, peer2.into());
assert_eq!(payload, vec![1, 2, 3, 4]);
let (tx, rx) = oneshot::channel();
pending_response
.send(OutgoingResponse {
result: Ok(vec![5, 6, 7, 8]),
reputation_changes: Vec::new(),
sent_feedback: Some(tx),
})
.unwrap();
rx
},
event => panic!("invalid event: {event:?}"),
};
match handle2.next().await {
Some(RequestResponseEvent::ResponseReceived {
peer,
request_id: received_id,
response,
..
}) => {
assert_eq!(peer, peer1);
assert_eq!(request_id, received_id);
assert_eq!(response, vec![5, 6, 7, 8]);
assert!(rx.await.is_ok());
},
event => panic!("invalid event: {event:?}"),
}
}
#[tokio::test]
async fn fallback_request_compatible_peers() {
// `litep2p1` supports both the new and the old protocol
let (mut litep2p1, handle1_1, handle1_2) = {
let (config1, handle1) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
.with_max_size(1024)
.build();
let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
.with_max_size(1024)
.build();
(
Litep2p::new(
Litep2pConfigBuilder::new()
.with_request_response_protocol(config1)
.with_request_response_protocol(config2)
.with_tcp(TcpConfig {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
"/ip6/::/tcp/0".parse().unwrap(),
],
..Default::default()
})
.build(),
)
.unwrap(),
handle1,
handle2,
)
};
// `litep2p2` supports only the new protocol
let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
.with_max_size(1024)
.build();
let mut litep2p2 = Litep2p::new(
Litep2pConfigBuilder::new()
.with_request_response_protocol(config2)
.with_tcp(TcpConfig {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
"/ip6/::/tcp/0".parse().unwrap(),
],
..Default::default()
})
.build(),
)
.unwrap();
let peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
connect_peers(&mut litep2p1, &mut litep2p2).await;
let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
let (outbound_tx_fallback, outbound_rx_fallback) = tracing_unbounded("outbound-request", 1000);
let senders1 = HashMap::from_iter([
(ProtocolName::from("/protocol/2"), outbound_tx1.clone()),
(ProtocolName::from("/protocol/1"), outbound_tx_fallback),
]);
let (tx1, _rx1) = async_channel::bounded(4);
let protocol1 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/2"),
handle1_1,
Arc::new(peerstore_handle_test()),
Some(tx1),
outbound_rx1,
senders1.clone(),
None,
);
let (tx_fallback, _rx_fallback) = async_channel::bounded(4);
let protocol_fallback = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle1_2,
Arc::new(peerstore_handle_test()),
Some(tx_fallback),
outbound_rx_fallback,
senders1,
None,
);
let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
let senders2 = HashMap::from_iter([(ProtocolName::from("/protocol/2"), outbound_tx2)]);
let (tx2, rx2) = async_channel::bounded(4);
let protocol2 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/2"),
handle2,
Arc::new(peerstore_handle_test()),
Some(tx2),
outbound_rx2,
senders2,
None,
);
tokio::spawn(protocol1.run());
tokio::spawn(protocol2.run());
tokio::spawn(protocol_fallback.run());
tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
let (result_tx, result_rx) = oneshot::channel();
outbound_tx1
.unbounded_send(OutboundRequest {
peer: peer2.into(),
request: vec![1, 2, 3, 4],
sender: result_tx,
fallback_request: Some((vec![1, 3, 3, 7], ProtocolName::from("/protocol/1"))),
dial_behavior: IfDisconnected::ImmediateError,
})
.unwrap();
match rx2.recv().await {
Ok(IncomingRequest { peer, payload, pending_response }) => {
assert_eq!(peer, peer1.into());
assert_eq!(payload, vec![1, 2, 3, 4]);
pending_response
.send(OutgoingResponse {
result: Ok(vec![5, 6, 7, 8]),
reputation_changes: Vec::new(),
sent_feedback: None,
})
.unwrap();
},
event => panic!("invalid event: {event:?}"),
}
match result_rx.await {
Ok(Ok((response, protocol))) => {
assert_eq!(response, vec![5, 6, 7, 8]);
assert_eq!(protocol, ProtocolName::from("/protocol/2"));
},
event => panic!("invalid event: {event:?}"),
}
}
#[tokio::test]
async fn fallback_request_old_peer_receives() {
sp_tracing::try_init_simple();
// `litep2p1` supports both the new and the old protocol
let (mut litep2p1, handle1_1, handle1_2) = {
let (config1, handle1) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
.with_max_size(1024)
.build();
let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
.with_max_size(1024)
.build();
(
Litep2p::new(
Litep2pConfigBuilder::new()
.with_request_response_protocol(config1)
.with_request_response_protocol(config2)
.with_tcp(TcpConfig {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
"/ip6/::/tcp/0".parse().unwrap(),
],
..Default::default()
})
.build(),
)
.unwrap(),
handle1,
handle2,
)
};
// `litep2p2` supports only the new protocol
let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
.with_max_size(1024)
.build();
let mut litep2p2 = Litep2p::new(
Litep2pConfigBuilder::new()
.with_request_response_protocol(config2)
.with_tcp(TcpConfig {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
"/ip6/::/tcp/0".parse().unwrap(),
],
..Default::default()
})
.build(),
)
.unwrap();
let peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
connect_peers(&mut litep2p1, &mut litep2p2).await;
let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
let (outbound_tx_fallback, outbound_rx_fallback) = tracing_unbounded("outbound-request", 1000);
let senders1 = HashMap::from_iter([
(ProtocolName::from("/protocol/2"), outbound_tx1.clone()),
(ProtocolName::from("/protocol/1"), outbound_tx_fallback),
]);
let (tx1, _rx1) = async_channel::bounded(4);
let protocol1 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/2"),
handle1_1,
Arc::new(peerstore_handle_test()),
Some(tx1),
outbound_rx1,
senders1.clone(),
None,
);
let (tx_fallback, _rx_fallback) = async_channel::bounded(4);
let protocol_fallback = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle1_2,
Arc::new(peerstore_handle_test()),
Some(tx_fallback),
outbound_rx_fallback,
senders1,
None,
);
let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
let senders2 = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx2)]);
let (tx2, rx2) = async_channel::bounded(4);
let protocol2 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle2,
Arc::new(peerstore_handle_test()),
Some(tx2),
outbound_rx2,
senders2,
None,
);
tokio::spawn(protocol1.run());
tokio::spawn(protocol2.run());
tokio::spawn(protocol_fallback.run());
tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
let (result_tx, result_rx) = oneshot::channel();
outbound_tx1
.unbounded_send(OutboundRequest {
peer: peer2.into(),
request: vec![1, 2, 3, 4],
sender: result_tx,
fallback_request: Some((vec![1, 3, 3, 7], ProtocolName::from("/protocol/1"))),
dial_behavior: IfDisconnected::ImmediateError,
})
.unwrap();
match rx2.recv().await {
Ok(IncomingRequest { peer, payload, pending_response }) => {
assert_eq!(peer, peer1.into());
assert_eq!(payload, vec![1, 3, 3, 7]);
pending_response
.send(OutgoingResponse {
result: Ok(vec![1, 3, 3, 8]),
reputation_changes: Vec::new(),
sent_feedback: None,
})
.unwrap();
},
event => panic!("invalid event: {event:?}"),
}
match result_rx.await {
Ok(Ok((response, protocol))) => {
assert_eq!(response, vec![1, 3, 3, 8]);
assert_eq!(protocol, ProtocolName::from("/protocol/1"));
},
event => panic!("invalid event: {event:?}"),
}
}
#[tokio::test]
async fn fallback_request_old_peer_sends() {
sp_tracing::try_init_simple();
// `litep2p1` supports both the new and the old protocol
let (mut litep2p1, handle1_1, handle1_2) = {
let (config1, handle1) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
.with_max_size(1024)
.build();
let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
.with_max_size(1024)
.build();
(
Litep2p::new(
Litep2pConfigBuilder::new()
.with_request_response_protocol(config1)
.with_request_response_protocol(config2)
.with_tcp(TcpConfig {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
"/ip6/::/tcp/0".parse().unwrap(),
],
..Default::default()
})
.build(),
)
.unwrap(),
handle1,
handle2,
)
};
// `litep2p2` supports only the new protocol
let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
.with_max_size(1024)
.build();
let mut litep2p2 = Litep2p::new(
Litep2pConfigBuilder::new()
.with_request_response_protocol(config2)
.with_tcp(TcpConfig {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
"/ip6/::/tcp/0".parse().unwrap(),
],
..Default::default()
})
.build(),
)
.unwrap();
let peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
connect_peers(&mut litep2p1, &mut litep2p2).await;
let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
let (outbound_tx_fallback, outbound_rx_fallback) = tracing_unbounded("outbound-request", 1000);
let senders1 = HashMap::from_iter([
(ProtocolName::from("/protocol/2"), outbound_tx1.clone()),
(ProtocolName::from("/protocol/1"), outbound_tx_fallback),
]);
let (tx1, _rx1) = async_channel::bounded(4);
let protocol1 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/2"),
handle1_1,
Arc::new(peerstore_handle_test()),
Some(tx1),
outbound_rx1,
senders1.clone(),
None,
);
let (tx_fallback, rx_fallback) = async_channel::bounded(4);
let protocol_fallback = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle1_2,
Arc::new(peerstore_handle_test()),
Some(tx_fallback),
outbound_rx_fallback,
senders1,
None,
);
let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
let senders2 = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx2.clone())]);
let (tx2, _rx2) = async_channel::bounded(4);
let protocol2 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle2,
Arc::new(peerstore_handle_test()),
Some(tx2),
outbound_rx2,
senders2,
None,
);
tokio::spawn(protocol1.run());
tokio::spawn(protocol2.run());
tokio::spawn(protocol_fallback.run());
tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
let (result_tx, result_rx) = oneshot::channel();
outbound_tx2
.unbounded_send(OutboundRequest {
peer: peer1.into(),
request: vec![1, 2, 3, 4],
sender: result_tx,
fallback_request: None,
dial_behavior: IfDisconnected::ImmediateError,
})
.unwrap();
match rx_fallback.recv().await {
Ok(IncomingRequest { peer, payload, pending_response }) => {
assert_eq!(peer, peer2.into());
assert_eq!(payload, vec![1, 2, 3, 4]);
pending_response
.send(OutgoingResponse {
result: Ok(vec![1, 3, 3, 8]),
reputation_changes: Vec::new(),
sent_feedback: None,
})
.unwrap();
},
event => panic!("invalid event: {event:?}"),
}
match result_rx.await {
Ok(Ok((response, protocol))) => {
assert_eq!(response, vec![1, 3, 3, 8]);
assert_eq!(protocol, ProtocolName::from("/protocol/1"));
},
event => panic!("invalid event: {event:?}"),
}
}
#[tokio::test]
async fn old_protocol_supported_but_no_fallback_provided() {
sp_tracing::try_init_simple();
// `litep2p1` supports both the new and the old protocol
let (mut litep2p1, handle1_1, handle1_2) = {
let (config1, handle1) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/2"))
.with_max_size(1024)
.build();
let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
.with_max_size(1024)
.build();
(
Litep2p::new(
Litep2pConfigBuilder::new()
.with_request_response_protocol(config1)
.with_request_response_protocol(config2)
.with_tcp(TcpConfig {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
"/ip6/::/tcp/0".parse().unwrap(),
],
..Default::default()
})
.build(),
)
.unwrap(),
handle1,
handle2,
)
};
// `litep2p2` supports only the old protocol
let (config2, handle2) = ConfigBuilder::new(litep2p::ProtocolName::from("/protocol/1"))
.with_max_size(1024)
.build();
let mut litep2p2 = Litep2p::new(
Litep2pConfigBuilder::new()
.with_request_response_protocol(config2)
.with_tcp(TcpConfig {
listen_addresses: vec![
"/ip4/0.0.0.0/tcp/0".parse().unwrap(),
"/ip6/::/tcp/0".parse().unwrap(),
],
..Default::default()
})
.build(),
)
.unwrap();
let peer2 = *litep2p2.local_peer_id();
connect_peers(&mut litep2p1, &mut litep2p2).await;
let (outbound_tx1, outbound_rx1) = tracing_unbounded("outbound-request", 1000);
let (outbound_tx_fallback, outbound_rx_fallback) = tracing_unbounded("outbound-request", 1000);
let senders1 = HashMap::from_iter([
(ProtocolName::from("/protocol/2"), outbound_tx1.clone()),
(ProtocolName::from("/protocol/1"), outbound_tx_fallback),
]);
let (tx1, _rx1) = async_channel::bounded(4);
let protocol1 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/2"),
handle1_1,
Arc::new(peerstore_handle_test()),
Some(tx1),
outbound_rx1,
senders1.clone(),
None,
);
let (tx_fallback, _rx_fallback) = async_channel::bounded(4);
let protocol_fallback = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle1_2,
Arc::new(peerstore_handle_test()),
Some(tx_fallback),
outbound_rx_fallback,
senders1,
None,
);
let (outbound_tx2, outbound_rx2) = tracing_unbounded("outbound-request", 1000);
let senders2 = HashMap::from_iter([(ProtocolName::from("/protocol/1"), outbound_tx2)]);
let (tx2, _rx2) = async_channel::bounded(4);
let protocol2 = RequestResponseProtocol::new(
ProtocolName::from("/protocol/1"),
handle2,
Arc::new(peerstore_handle_test()),
Some(tx2),
outbound_rx2,
senders2,
None,
);
tokio::spawn(protocol1.run());
tokio::spawn(protocol2.run());
tokio::spawn(protocol_fallback.run());
tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
let (result_tx, result_rx) = oneshot::channel();
outbound_tx1
.unbounded_send(OutboundRequest {
peer: peer2.into(),
request: vec![1, 2, 3, 4],
sender: result_tx,
fallback_request: None,
dial_behavior: IfDisconnected::ImmediateError,
})
.unwrap();
match result_rx.await {
Ok(Err(error)) => {
assert!(std::matches!(error, RequestFailure::Refused));
},
event => panic!("invalid event: {event:?}"),
}
}
+21 -8
View File
@@ -18,10 +18,15 @@
//! Mocked components for tests.
use crate::{peer_store::PeerStoreProvider, protocol_controller::ProtocolHandle, ReputationChange};
use libp2p::PeerId;
use crate::{
peer_store::{PeerStoreProvider, ProtocolHandle},
ReputationChange,
};
use sc_network_common::role::ObservedRole;
use std::collections::HashSet;
use sc_network_types::PeerId;
use std::{collections::HashSet, sync::Arc};
/// No-op `PeerStore`.
#[derive(Debug)]
@@ -33,15 +38,15 @@ impl PeerStoreProvider for MockPeerStore {
false
}
fn register_protocol(&self, _protocol_handle: ProtocolHandle) {
fn register_protocol(&self, _protocol_handle: Arc<dyn ProtocolHandle>) {
// Make sure not to fail.
}
fn report_disconnect(&mut self, _peer_id: PeerId) {
fn report_disconnect(&self, _peer_id: PeerId) {
// Make sure not to fail.
}
fn report_peer(&mut self, _peer_id: PeerId, _change: ReputationChange) {
fn report_peer(&self, _peer_id: PeerId, _change: ReputationChange) {
// Make sure not to fail.
}
@@ -54,11 +59,19 @@ impl PeerStoreProvider for MockPeerStore {
None
}
fn set_peer_role(&mut self, _peer_id: &PeerId, _role: ObservedRole) {
fn set_peer_role(&self, _peer_id: &PeerId, _role: ObservedRole) {
unimplemented!();
}
fn outgoing_candidates(&self, _count: usize, _ignored: HashSet<&PeerId>) -> Vec<PeerId> {
fn outgoing_candidates(&self, _count: usize, _ignored: HashSet<PeerId>) -> Vec<PeerId> {
unimplemented!()
}
fn num_known_peers(&self) -> usize {
0usize
}
fn add_known_peer(&self, _peer_id: PeerId) {
unimplemented!()
}
}
+75 -41
View File
@@ -19,7 +19,8 @@
//! [`PeerStore`] manages peer reputations and provides connection candidates to
//! [`crate::protocol_controller::ProtocolController`].
use libp2p::PeerId;
use crate::{service::traits::PeerStore as PeerStoreT, PeerId};
use log::trace;
use parking_lot::Mutex;
use partial_sort::PartialSort;
@@ -33,8 +34,6 @@ use std::{
};
use wasm_timer::Delay;
use crate::protocol_controller::ProtocolHandle;
/// Log target for this file.
pub const LOG_TARGET: &str = "peerset";
@@ -52,31 +51,50 @@ const INVERSE_DECREMENT: i32 = 50;
/// remove it, once the reputation value reaches 0.
const FORGET_AFTER: Duration = Duration::from_secs(3600);
/// Trait describing the required functionality from a `Peerset` handle.
pub trait ProtocolHandle: Debug + Send + Sync {
/// Disconnect peer.
fn disconnect_peer(&self, peer_id: sc_network_types::PeerId);
}
/// Trait providing peer reputation management and connection candidates.
pub trait PeerStoreProvider: Debug + Send {
pub trait PeerStoreProvider: Debug + Send + Sync {
/// Check whether the peer is banned.
fn is_banned(&self, peer_id: &PeerId) -> bool;
fn is_banned(&self, peer_id: &sc_network_types::PeerId) -> bool;
/// Register a protocol handle to disconnect peers whose reputation drops below the threshold.
fn register_protocol(&self, protocol_handle: ProtocolHandle);
fn register_protocol(&self, protocol_handle: Arc<dyn ProtocolHandle>);
/// Report peer disconnection for reputation adjustment.
fn report_disconnect(&mut self, peer_id: PeerId);
fn report_disconnect(&self, peer_id: sc_network_types::PeerId);
/// Adjust peer reputation.
fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange);
fn report_peer(&self, peer_id: sc_network_types::PeerId, change: ReputationChange);
/// Set peer role.
fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole);
fn set_peer_role(&self, peer_id: &sc_network_types::PeerId, role: ObservedRole);
/// Get peer reputation.
fn peer_reputation(&self, peer_id: &PeerId) -> i32;
fn peer_reputation(&self, peer_id: &sc_network_types::PeerId) -> i32;
/// Get peer role, if available.
fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole>;
fn peer_role(&self, peer_id: &sc_network_types::PeerId) -> Option<ObservedRole>;
/// Get candidates with highest reputations for initiating outgoing connections.
fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec<PeerId>;
fn outgoing_candidates(
&self,
count: usize,
ignored: HashSet<sc_network_types::PeerId>,
) -> Vec<sc_network_types::PeerId>;
/// Get the number of known peers.
///
/// This number might not include some connected peers in rare cases when their reputation
/// was not updated for one hour, because their entries in [`PeerStore`] were dropped.
fn num_known_peers(&self) -> usize;
/// Add known peer.
fn add_known_peer(&self, peer_id: sc_network_types::PeerId);
}
/// Actual implementation of peer reputations and connection candidates provider.
@@ -86,51 +104,56 @@ pub struct PeerStoreHandle {
}
impl PeerStoreProvider for PeerStoreHandle {
fn is_banned(&self, peer_id: &PeerId) -> bool {
self.inner.lock().is_banned(peer_id)
fn is_banned(&self, peer_id: &sc_network_types::PeerId) -> bool {
self.inner.lock().is_banned(&peer_id.into())
}
fn register_protocol(&self, protocol_handle: ProtocolHandle) {
fn register_protocol(&self, protocol_handle: Arc<dyn ProtocolHandle>) {
self.inner.lock().register_protocol(protocol_handle);
}
fn report_disconnect(&mut self, peer_id: PeerId) {
self.inner.lock().report_disconnect(peer_id)
fn report_disconnect(&self, peer_id: sc_network_types::PeerId) {
let mut inner = self.inner.lock();
inner.report_disconnect(peer_id.into())
}
fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange) {
self.inner.lock().report_peer(peer_id, change)
fn report_peer(&self, peer_id: sc_network_types::PeerId, change: ReputationChange) {
let mut inner = self.inner.lock();
inner.report_peer(peer_id.into(), change)
}
fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole) {
self.inner.lock().set_peer_role(peer_id, role)
fn set_peer_role(&self, peer_id: &sc_network_types::PeerId, role: ObservedRole) {
let mut inner = self.inner.lock();
inner.set_peer_role(&peer_id.into(), role)
}
fn peer_reputation(&self, peer_id: &PeerId) -> i32 {
self.inner.lock().peer_reputation(peer_id)
fn peer_reputation(&self, peer_id: &sc_network_types::PeerId) -> i32 {
self.inner.lock().peer_reputation(&peer_id.into())
}
fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole> {
self.inner.lock().peer_role(peer_id)
fn peer_role(&self, peer_id: &sc_network_types::PeerId) -> Option<ObservedRole> {
self.inner.lock().peer_role(&peer_id.into())
}
fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec<PeerId> {
self.inner.lock().outgoing_candidates(count, ignored)
fn outgoing_candidates(
&self,
count: usize,
ignored: HashSet<sc_network_types::PeerId>,
) -> Vec<sc_network_types::PeerId> {
self.inner
.lock()
.outgoing_candidates(count, ignored.iter().map(|peer_id| (*peer_id).into()).collect())
.iter()
.map(|peer_id| peer_id.into())
.collect()
}
}
impl PeerStoreHandle {
/// Get the number of known peers.
///
/// This number might not include some connected peers in rare cases when their reputation
/// was not updated for one hour, because their entries in [`PeerStore`] were dropped.
pub fn num_known_peers(&self) -> usize {
fn num_known_peers(&self) -> usize {
self.inner.lock().peers.len()
}
/// Add known peer.
pub fn add_known_peer(&mut self, peer_id: PeerId) {
self.inner.lock().add_known_peer(peer_id);
fn add_known_peer(&self, peer_id: sc_network_types::PeerId) {
self.inner.lock().add_known_peer(peer_id.into());
}
}
@@ -210,7 +233,7 @@ impl PeerInfo {
#[derive(Debug)]
struct PeerStoreInner {
peers: HashMap<PeerId, PeerInfo>,
protocols: Vec<ProtocolHandle>,
protocols: Vec<Arc<dyn ProtocolHandle>>,
}
impl PeerStoreInner {
@@ -218,7 +241,7 @@ impl PeerStoreInner {
self.peers.get(peer_id).map_or(false, |info| info.is_banned())
}
fn register_protocol(&mut self, protocol_handle: ProtocolHandle) {
fn register_protocol(&mut self, protocol_handle: Arc<dyn ProtocolHandle>) {
self.protocols.push(protocol_handle);
}
@@ -240,7 +263,7 @@ impl PeerStoreInner {
peer_info.add_reputation(change.value);
if peer_info.reputation < BANNED_THRESHOLD {
self.protocols.iter().for_each(|handle| handle.disconnect_peer(peer_id));
self.protocols.iter().for_each(|handle| handle.disconnect_peer(peer_id.into()));
log::warn!(
target: LOG_TARGET,
@@ -283,7 +306,7 @@ impl PeerStoreInner {
self.peers.get(peer_id).map_or(None, |info| info.role)
}
fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec<PeerId> {
fn outgoing_candidates(&self, count: usize, ignored: HashSet<PeerId>) -> Vec<PeerId> {
let mut candidates = self
.peers
.iter()
@@ -378,6 +401,17 @@ impl PeerStore {
}
}
#[async_trait::async_trait]
impl PeerStoreT for PeerStore {
fn handle(&self) -> Arc<dyn PeerStoreProvider> {
Arc::new(self.handle())
}
async fn run(self) {
self.run().await;
}
}
#[cfg(test)]
mod tests {
use super::PeerInfo;
+10 -12
View File
@@ -18,9 +18,9 @@
use crate::{
config, error,
peer_store::{PeerStoreHandle, PeerStoreProvider},
peer_store::PeerStoreProvider,
protocol_controller::{self, SetId},
service::traits::Direction,
service::{metrics::NotificationMetrics, traits::Direction},
types::ProtocolName,
};
@@ -36,14 +36,13 @@ use libp2p::{
use log::warn;
use codec::DecodeAll;
use prometheus_endpoint::Registry;
use sc_network_common::role::Roles;
use sc_utils::mpsc::TracingUnboundedReceiver;
use sp_runtime::traits::Block as BlockT;
use std::{collections::HashSet, iter, task::Poll};
use std::{collections::HashSet, iter, sync::Arc, task::Poll};
use notifications::{metrics, Notifications, NotificationsOut};
use notifications::{Notifications, NotificationsOut};
pub(crate) use notifications::ProtocolHandle;
@@ -69,7 +68,7 @@ pub struct Protocol<B: BlockT> {
/// List of notifications protocols that have been registered.
notification_protocols: Vec<ProtocolName>,
/// Handle to `PeerStore`.
peer_store_handle: PeerStoreHandle,
peer_store_handle: Arc<dyn PeerStoreProvider>,
/// Streams for peers whose handshake couldn't be determined.
bad_handshake_streams: HashSet<PeerId>,
sync_handle: ProtocolHandle,
@@ -80,10 +79,10 @@ impl<B: BlockT> Protocol<B> {
/// Create a new instance.
pub(crate) fn new(
roles: Roles,
registry: &Option<Registry>,
notification_metrics: NotificationMetrics,
notification_protocols: Vec<config::NonDefaultSetConfig>,
block_announces_protocol: config::NonDefaultSetConfig,
peer_store_handle: PeerStoreHandle,
peer_store_handle: Arc<dyn PeerStoreProvider>,
protocol_controller_handles: Vec<protocol_controller::ProtocolHandle>,
from_protocol_controllers: TracingUnboundedReceiver<protocol_controller::Message>,
) -> error::Result<(Self, Vec<ProtocolHandle>)> {
@@ -122,16 +121,15 @@ impl<B: BlockT> Protocol<B> {
}))
.unzip();
let metrics = registry.as_ref().and_then(|registry| metrics::register(&registry).ok());
handles.iter_mut().for_each(|handle| {
handle.set_metrics(metrics.clone());
handle.set_metrics(notification_metrics.clone());
});
(
Notifications::new(
protocol_controller_handles,
from_protocol_controllers,
metrics,
notification_metrics,
protocol_configs.into_iter(),
),
installed_protocols,
@@ -179,7 +177,7 @@ impl<B: BlockT> Protocol<B> {
fn role_available(&self, peer_id: &PeerId, handshake: &Vec<u8>) -> bool {
match Roles::decode_all(&mut &handshake[..]) {
Ok(_) => true,
Err(_) => self.peer_store_handle.peer_role(&peer_id).is_some(),
Err(_) => self.peer_store_handle.peer_role(&((*peer_id).into())).is_some(),
}
}
}
@@ -25,7 +25,7 @@ pub use self::{
service::{notification_service, ProtocolHandlePair},
};
pub(crate) use self::service::{metrics, ProtocolHandle};
pub(crate) use self::service::ProtocolHandle;
mod behaviour;
mod handler;
@@ -19,10 +19,13 @@
use crate::{
protocol::notifications::{
handler::{self, NotificationsSink, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut},
service::{metrics, NotificationCommand, ProtocolHandle, ValidationCallResult},
service::{NotificationCommand, ProtocolHandle, ValidationCallResult},
},
protocol_controller::{self, IncomingIndex, Message, SetId},
service::traits::{Direction, ValidationResult},
service::{
metrics::NotificationMetrics,
traits::{Direction, ValidationResult},
},
types::ProtocolName,
};
@@ -167,7 +170,7 @@ pub struct Notifications {
pending_inbound_validations: FuturesUnordered<PendingInboundValidation>,
/// Metrics for notifications.
metrics: Option<metrics::Metrics>,
metrics: NotificationMetrics,
}
/// Configuration for a notifications protocol.
@@ -404,7 +407,7 @@ impl Notifications {
pub(crate) fn new(
protocol_controller_handles: Vec<protocol_controller::ProtocolHandle>,
from_protocol_controllers: TracingUnboundedReceiver<Message>,
metrics: Option<metrics::Metrics>,
metrics: NotificationMetrics,
notif_protocols: impl Iterator<
Item = (
ProtocolConfig,
@@ -1230,7 +1233,7 @@ impl NetworkBehaviour for Notifications {
send_back_addr: remote_addr.clone(),
},
self.notif_protocols.clone(),
self.metrics.clone(),
Some(self.metrics.clone()),
))
}
@@ -1245,7 +1248,7 @@ impl NetworkBehaviour for Notifications {
peer,
ConnectedPoint::Dialer { address: addr.clone(), role_override },
self.notif_protocols.clone(),
self.metrics.clone(),
Some(self.metrics.clone()),
))
}
@@ -2442,7 +2445,7 @@ mod tests {
reserved_only: false,
},
to_notifications,
Box::new(MockPeerStore {}),
Arc::new(MockPeerStore {}),
);
let (notif_handle, command_stream) = protocol_handle_pair.split();
@@ -2450,7 +2453,7 @@ mod tests {
Notifications::new(
vec![handle],
from_controller,
None,
NotificationMetrics::new(None),
iter::once((
ProtocolConfig {
name: "/foo".into(),
@@ -58,13 +58,11 @@
//! [`NotifsHandlerIn::Open`] has gotten an answer.
use crate::{
protocol::notifications::{
service::metrics,
upgrade::{
NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream,
UpgradeCollec,
},
protocol::notifications::upgrade::{
NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream,
UpgradeCollec,
},
service::metrics::NotificationMetrics,
types::ProtocolName,
};
@@ -131,7 +129,7 @@ pub struct NotifsHandler {
>,
/// Metrics.
metrics: Option<Arc<metrics::Metrics>>,
metrics: Option<Arc<NotificationMetrics>>,
}
impl NotifsHandler {
@@ -140,7 +138,7 @@ impl NotifsHandler {
peer_id: PeerId,
endpoint: ConnectedPoint,
protocols: Vec<ProtocolConfig>,
metrics: Option<metrics::Metrics>,
metrics: Option<NotificationMetrics>,
) -> Self {
Self {
protocols: protocols
@@ -345,7 +343,7 @@ pub enum NotifsHandlerOut {
#[derive(Debug, Clone)]
pub struct NotificationsSink {
inner: Arc<NotificationsSinkInner>,
metrics: Option<Arc<metrics::Metrics>>,
metrics: Option<Arc<NotificationMetrics>>,
}
impl NotificationsSink {
@@ -372,7 +370,7 @@ impl NotificationsSink {
}
/// Get reference to metrics.
pub fn metrics(&self) -> &Option<Arc<metrics::Metrics>> {
pub fn metrics(&self) -> &Option<Arc<NotificationMetrics>> {
&self.metrics
}
}
@@ -16,115 +16,40 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::types::ProtocolName;
use prometheus_endpoint::{
self as prometheus, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry,
U64,
};
use std::sync::Arc;
/// Notification metrics.
#[derive(Debug, Clone)]
pub struct Metrics {
// Total number of opened substreams.
pub notifications_streams_opened_total: CounterVec<U64>,
/// Total number of closed substreams.
pub notifications_streams_closed_total: CounterVec<U64>,
/// In/outbound notification sizes.
pub notifications_sizes: HistogramVec,
}
impl Metrics {
fn register(registry: &Registry) -> Result<Self, PrometheusError> {
Ok(Self {
notifications_sizes: prometheus::register(
HistogramVec::new(
HistogramOpts {
common_opts: Opts::new(
"substrate_sub_libp2p_notifications_sizes",
"Sizes of the notifications send to and received from all nodes",
),
buckets: prometheus::exponential_buckets(64.0, 4.0, 8)
.expect("parameters are always valid values; qed"),
},
&["direction", "protocol"],
)?,
registry,
)?,
notifications_streams_closed_total: prometheus::register(
CounterVec::new(
Opts::new(
"substrate_sub_libp2p_notifications_streams_closed_total",
"Total number of notification substreams that have been closed",
),
&["protocol"],
)?,
registry,
)?,
notifications_streams_opened_total: prometheus::register(
CounterVec::new(
Opts::new(
"substrate_sub_libp2p_notifications_streams_opened_total",
"Total number of notification substreams that have been opened",
),
&["protocol"],
)?,
registry,
)?,
})
}
}
/// Register metrics.
pub fn register(registry: &Registry) -> Result<Metrics, PrometheusError> {
Metrics::register(registry)
}
use crate::{service::metrics::NotificationMetrics, types::ProtocolName};
/// Register opened substream to Prometheus.
pub fn register_substream_opened(metrics: &Option<Metrics>, protocol: &ProtocolName) {
pub fn register_substream_opened(metrics: &Option<NotificationMetrics>, protocol: &ProtocolName) {
if let Some(metrics) = metrics {
metrics.notifications_streams_opened_total.with_label_values(&[&protocol]).inc();
metrics.register_substream_opened(&protocol);
}
}
/// Register closed substream to Prometheus.
pub fn register_substream_closed(metrics: &Option<Metrics>, protocol: &ProtocolName) {
pub fn register_substream_closed(metrics: &Option<NotificationMetrics>, protocol: &ProtocolName) {
if let Some(metrics) = metrics {
metrics
.notifications_streams_closed_total
.with_label_values(&[&protocol[..]])
.inc();
metrics.register_substream_closed(&protocol);
}
}
/// Register sent notification to Prometheus.
pub fn register_notification_sent(
metrics: &Option<Arc<Metrics>>,
metrics: &Option<std::sync::Arc<NotificationMetrics>>,
protocol: &ProtocolName,
size: usize,
) {
if let Some(metrics) = metrics {
metrics
.notifications_sizes
.with_label_values(&["out", protocol])
.observe(size as f64);
metrics.register_notification_sent(protocol, size);
}
}
/// Register received notification to Prometheus.
pub fn register_notification_received(
metrics: &Option<Metrics>,
metrics: &Option<NotificationMetrics>,
protocol: &ProtocolName,
size: usize,
) {
if let Some(metrics) = metrics {
metrics
.notifications_sizes
.with_label_values(&["in", protocol])
.observe(size as f64);
metrics.register_notification_received(protocol, size);
}
}
@@ -21,17 +21,20 @@
use crate::{
error,
protocol::notifications::handler::NotificationsSink,
service::traits::{
Direction, MessageSink, NotificationEvent, NotificationService, ValidationResult,
service::{
metrics::NotificationMetrics,
traits::{
Direction, MessageSink, NotificationEvent, NotificationService, ValidationResult,
},
},
types::ProtocolName,
PeerId,
};
use futures::{
stream::{FuturesUnordered, Stream},
StreamExt,
};
use libp2p::PeerId;
use parking_lot::Mutex;
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream;
@@ -66,7 +69,7 @@ impl MessageSink for NotificationSink {
fn send_sync_notification(&self, notification: Vec<u8>) {
let sink = self.lock();
metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification.len());
metrics::register_notification_sent(sink.0.metrics(), &sink.1, notification.len());
sink.0.send_sync_notification(notification);
}
@@ -87,7 +90,7 @@ impl MessageSink for NotificationSink {
.map_err(|_| error::Error::ConnectionClosed)?;
permit.send(notification).map_err(|_| error::Error::ChannelClosed).map(|res| {
metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification_len);
metrics::register_notification_sent(sink.0.metrics(), &sink.1, notification_len);
res
})
}
@@ -220,20 +223,20 @@ impl NotificationHandle {
#[async_trait::async_trait]
impl NotificationService for NotificationHandle {
/// Instruct `Notifications` to open a new substream for `peer`.
async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
async fn open_substream(&mut self, _peer: sc_network_types::PeerId) -> Result<(), ()> {
todo!("support for opening substreams not implemented yet");
}
/// Instruct `Notifications` to close substream for `peer`.
async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> {
async fn close_substream(&mut self, _peer: sc_network_types::PeerId) -> Result<(), ()> {
todo!("support for closing substreams not implemented yet, call `NetworkService::disconnect_peer()` instead");
}
/// Send synchronous `notification` to `peer`.
fn send_sync_notification(&self, peer: &PeerId, notification: Vec<u8>) {
if let Some(info) = self.peers.get(&peer) {
fn send_sync_notification(&mut self, peer: &sc_network_types::PeerId, notification: Vec<u8>) {
if let Some(info) = self.peers.get(&((*peer).into())) {
metrics::register_notification_sent(
&info.sink.metrics(),
info.sink.metrics(),
&self.protocol,
notification.len(),
);
@@ -244,12 +247,16 @@ impl NotificationService for NotificationHandle {
/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
async fn send_async_notification(
&self,
peer: &PeerId,
&mut self,
peer: &sc_network_types::PeerId,
notification: Vec<u8>,
) -> Result<(), error::Error> {
let notification_len = notification.len();
let sink = &self.peers.get(&peer).ok_or_else(|| error::Error::PeerDoesntExist(*peer))?.sink;
let sink = &self
.peers
.get(&peer.into())
.ok_or_else(|| error::Error::PeerDoesntExist((*peer).into()))?
.sink;
sink.reserve_notification()
.await
@@ -258,7 +265,7 @@ impl NotificationService for NotificationHandle {
.map_err(|_| error::Error::ChannelClosed)
.map(|res| {
metrics::register_notification_sent(
&sink.metrics(),
sink.metrics(),
&self.protocol,
notification_len,
);
@@ -288,7 +295,7 @@ impl NotificationService for NotificationHandle {
match self.rx.next().await? {
InnerNotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } =>
return Some(NotificationEvent::ValidateInboundSubstream {
peer,
peer: peer.into(),
handshake,
result_tx,
}),
@@ -307,7 +314,7 @@ impl NotificationService for NotificationHandle {
},
);
return Some(NotificationEvent::NotificationStreamOpened {
peer,
peer: peer.into(),
handshake,
direction,
negotiated_fallback,
@@ -315,10 +322,13 @@ impl NotificationService for NotificationHandle {
},
InnerNotificationEvent::NotificationStreamClosed { peer } => {
self.peers.remove(&peer);
return Some(NotificationEvent::NotificationStreamClosed { peer })
return Some(NotificationEvent::NotificationStreamClosed { peer: peer.into() })
},
InnerNotificationEvent::NotificationReceived { peer, notification } =>
return Some(NotificationEvent::NotificationReceived { peer, notification }),
return Some(NotificationEvent::NotificationReceived {
peer: peer.into(),
notification,
}),
InnerNotificationEvent::NotificationSinkReplaced { peer, sink } => {
match self.peers.get_mut(&peer) {
None => log::error!(
@@ -357,8 +367,8 @@ impl NotificationService for NotificationHandle {
}
/// Get message sink of the peer.
fn message_sink(&self, peer: &PeerId) -> Option<Box<dyn MessageSink>> {
match self.peers.get(peer) {
fn message_sink(&self, peer: &sc_network_types::PeerId) -> Option<Box<dyn MessageSink>> {
match self.peers.get(&peer.into()) {
Some(context) => Some(Box::new(context.shared_sink.clone())),
None => None,
}
@@ -417,7 +427,7 @@ pub(crate) struct ProtocolHandle {
delegate_to_peerset: bool,
/// Prometheus metrics.
metrics: Option<metrics::Metrics>,
metrics: Option<NotificationMetrics>,
}
pub(crate) enum ValidationCallResult {
@@ -432,8 +442,8 @@ impl ProtocolHandle {
}
/// Set metrics.
pub fn set_metrics(&mut self, metrics: Option<metrics::Metrics>) {
self.metrics = metrics;
pub fn set_metrics(&mut self, metrics: NotificationMetrics) {
self.metrics = Some(metrics);
}
/// Delegate validation to `Peerset`.
@@ -38,7 +38,7 @@ async fn validate_and_accept_substream() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -66,7 +66,7 @@ async fn substream_opened() {
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -92,7 +92,7 @@ async fn send_sync_notification() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -112,7 +112,7 @@ async fn send_sync_notification() {
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -120,7 +120,7 @@ async fn send_sync_notification() {
panic!("invalid event received");
}
notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 8]);
assert_eq!(
sync_rx.next().await,
Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] })
@@ -144,7 +144,7 @@ async fn send_async_notification() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -164,7 +164,7 @@ async fn send_async_notification() {
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -172,7 +172,7 @@ async fn send_async_notification() {
panic!("invalid event received");
}
notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]).await.unwrap();
assert_eq!(
async_rx.next().await,
Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] })
@@ -181,24 +181,24 @@ async fn send_async_notification() {
#[tokio::test]
async fn send_sync_notification_to_non_existent_peer() {
let (proto, notif) = notification_service("/proto/1".into());
let (proto, mut notif) = notification_service("/proto/1".into());
let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random());
let (_handle, _stream) = proto.split();
let peer = PeerId::random();
// as per the original implementation, the call doesn't fail
notif.send_sync_notification(&peer, vec![1, 3, 3, 7])
notif.send_sync_notification(&peer.into(), vec![1, 3, 3, 7])
}
#[tokio::test]
async fn send_async_notification_to_non_existent_peer() {
let (proto, notif) = notification_service("/proto/1".into());
let (proto, mut notif) = notification_service("/proto/1".into());
let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random());
let (_handle, _stream) = proto.split();
let peer = PeerId::random();
if let Err(error::Error::PeerDoesntExist(peer_id)) =
notif.send_async_notification(&peer, vec![1, 3, 3, 7]).await
notif.send_async_notification(&peer.into(), vec![1, 3, 3, 7]).await
{
assert_eq!(peer, peer_id);
} else {
@@ -223,7 +223,7 @@ async fn receive_notification() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -243,7 +243,7 @@ async fn receive_notification() {
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -257,7 +257,7 @@ async fn receive_notification() {
if let Some(NotificationEvent::NotificationReceived { peer, notification }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(notification, vec![1, 3, 3, 8]);
} else {
panic!("invalid event received");
@@ -281,7 +281,7 @@ async fn backpressure_works() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -301,7 +301,7 @@ async fn backpressure_works() {
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -311,12 +311,15 @@ async fn backpressure_works() {
// fill the message buffer with messages
for i in 0..=ASYNC_NOTIFICATIONS_BUFFER_SIZE {
assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, i as u8]))
.is_ready());
assert!(futures::poll!(
notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, i as u8])
)
.is_ready());
}
// try to send one more message and verify that the call blocks
assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_pending());
assert!(futures::poll!(notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]))
.is_pending());
// release one slot from the buffer for new message
assert_eq!(
@@ -325,7 +328,9 @@ async fn backpressure_works() {
);
// verify that a message can be sent
assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_ready());
assert!(
futures::poll!(notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9])).is_ready()
);
}
#[tokio::test]
@@ -345,7 +350,7 @@ async fn peer_disconnects_then_sync_notification_is_sent() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -365,7 +370,7 @@ async fn peer_disconnects_then_sync_notification_is_sent() {
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -379,7 +384,7 @@ async fn peer_disconnects_then_sync_notification_is_sent() {
drop(sync_rx);
// as per documentation, error is not reported but the notification is silently dropped
notif.send_sync_notification(&peer_id, vec![1, 3, 3, 7]);
notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 7]);
}
#[tokio::test]
@@ -399,7 +404,7 @@ async fn peer_disconnects_then_async_notification_is_sent() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -419,7 +424,7 @@ async fn peer_disconnects_then_async_notification_is_sent() {
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -434,7 +439,7 @@ async fn peer_disconnects_then_async_notification_is_sent() {
// as per documentation, error is not reported but the notification is silently dropped
if let Err(error::Error::ConnectionClosed) =
notif.send_async_notification(&peer_id, vec![1, 3, 3, 7]).await
notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 7]).await
{
} else {
panic!("invalid state after calling `send_async_notification()` on closed connection")
@@ -460,7 +465,7 @@ async fn cloned_service_opening_substream_works() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif1.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -475,7 +480,7 @@ async fn cloned_service_opening_substream_works() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif2.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -505,7 +510,7 @@ async fn cloned_service_one_service_rejects_substream() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -519,7 +524,7 @@ async fn cloned_service_one_service_rejects_substream() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif3.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Reject).unwrap();
} else {
@@ -549,7 +554,7 @@ async fn cloned_service_opening_substream_sending_and_receiving_notifications_wo
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -571,7 +576,7 @@ async fn cloned_service_opening_substream_sending_and_receiving_notifications_wo
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -586,16 +591,16 @@ async fn cloned_service_opening_substream_sending_and_receiving_notifications_wo
if let Some(NotificationEvent::NotificationReceived { peer, notification }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(notification, vec![1, 3, 3, 8]);
} else {
panic!("invalid event received");
}
}
for (i, notif) in vec![&mut notif1, &mut notif2, &mut notif3].iter().enumerate() {
for (i, notif) in vec![&mut notif1, &mut notif2, &mut notif3].iter_mut().enumerate() {
// send notification from each service and verify peer receives it
notif.send_sync_notification(&peer_id, vec![1, 3, 3, i as u8]);
notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, i as u8]);
assert_eq!(
sync_rx.next().await,
Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, i as u8] })
@@ -608,7 +613,7 @@ async fn cloned_service_opening_substream_sending_and_receiving_notifications_wo
for notif in vec![&mut notif1, &mut notif2, &mut notif3] {
if let Some(NotificationEvent::NotificationStreamClosed { peer }) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
} else {
panic!("invalid event received");
}
@@ -632,7 +637,7 @@ async fn sending_notifications_using_notifications_sink_works() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -652,7 +657,7 @@ async fn sending_notifications_using_notifications_sink_works() {
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -661,7 +666,7 @@ async fn sending_notifications_using_notifications_sink_works() {
}
// get a copy of the notification sink and send a synchronous notification using.
let sink = notif.message_sink(&peer_id).unwrap();
let sink = notif.message_sink(&peer_id.into()).unwrap();
sink.send_sync_notification(vec![1, 3, 3, 6]);
// send an asynchronous notification using the acquired notifications sink.
@@ -677,8 +682,8 @@ async fn sending_notifications_using_notifications_sink_works() {
);
// send notifications using the stored notification sink as well.
notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 8]);
notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]).await.unwrap();
assert_eq!(
sync_rx.next().await,
@@ -693,7 +698,7 @@ async fn sending_notifications_using_notifications_sink_works() {
#[test]
fn try_to_get_notifications_sink_for_non_existent_peer() {
let (_proto, notif) = notification_service("/proto/1".into());
assert!(notif.message_sink(&PeerId::random()).is_none());
assert!(notif.message_sink(&sc_network_types::PeerId::random()).is_none());
}
#[tokio::test]
@@ -713,7 +718,7 @@ async fn notification_sink_replaced() {
if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) =
notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(handshake, vec![1, 3, 3, 7]);
let _ = result_tx.send(ValidationResult::Accept).unwrap();
} else {
@@ -733,7 +738,7 @@ async fn notification_sink_replaced() {
direction,
}) = notif.next_event().await
{
assert_eq!(peer_id, peer);
assert_eq!(peer_id, peer.into());
assert_eq!(negotiated_fallback, None);
assert_eq!(handshake, vec![1, 3, 3, 7]);
assert_eq!(direction, Direction::Inbound);
@@ -742,7 +747,7 @@ async fn notification_sink_replaced() {
}
// get a copy of the notification sink and send a synchronous notification using.
let sink = notif.message_sink(&peer_id).unwrap();
let sink = notif.message_sink(&peer_id.into()).unwrap();
sink.send_sync_notification(vec![1, 3, 3, 6]);
// send an asynchronous notification using the acquired notifications sink.
@@ -758,8 +763,8 @@ async fn notification_sink_replaced() {
);
// send notifications using the stored notification sink as well.
notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 8]);
notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]).await.unwrap();
assert_eq!(
sync_rx.next().await,
@@ -788,8 +793,8 @@ async fn notification_sink_replaced() {
// verify that using the `NotificationService` API automatically results in using the correct
// sink
notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]);
notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap();
notif.send_sync_notification(&peer_id.into(), vec![1, 3, 3, 8]);
notif.send_async_notification(&peer_id.into(), vec![1, 3, 3, 9]).await.unwrap();
assert_eq!(
new_sync_rx.next().await,
@@ -22,7 +22,10 @@ use crate::{
peer_store::PeerStore,
protocol::notifications::{Notifications, NotificationsOut, ProtocolConfig},
protocol_controller::{ProtoSetConfig, ProtocolController, SetId},
service::traits::{NotificationEvent, ValidationResult},
service::{
metrics::NotificationMetrics,
traits::{NotificationEvent, ValidationResult},
},
};
use futures::{future::BoxFuture, prelude::*};
@@ -40,6 +43,7 @@ use sc_utils::mpsc::tracing_unbounded;
use std::{
iter,
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
@@ -91,7 +95,7 @@ fn build_nodes() -> (Swarm<CustomProtoWithAddr>, Swarm<CustomProtoWithAddr>) {
reserved_only: false,
},
to_notifications,
Box::new(peer_store.handle()),
Arc::new(peer_store.handle()),
);
let (notif_handle, command_stream) = protocol_handle_pair.split();
@@ -99,7 +103,7 @@ fn build_nodes() -> (Swarm<CustomProtoWithAddr>, Swarm<CustomProtoWithAddr>) {
inner: Notifications::new(
vec![controller_handle],
from_controller,
None,
NotificationMetrics::new(None),
iter::once((
ProtocolConfig {
name: "/foo".into(),
@@ -41,19 +41,22 @@
//! Even though this does not guarantee that `ProtocolController` and `Notifications` have the same
//! view of the peers' states at any given moment, the eventual consistency is maintained.
use crate::{
peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT},
PeerId,
};
use futures::{channel::oneshot, future::Either, FutureExt, StreamExt};
use libp2p::PeerId;
use log::{debug, error, trace, warn};
use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
use sp_arithmetic::traits::SaturatedConversion;
use std::{
collections::{HashMap, HashSet},
sync::Arc,
time::{Duration, Instant},
};
use wasm_timer::Delay;
use crate::peer_store::PeerStoreProvider;
/// Log target for this file.
pub const LOG_TARGET: &str = "peerset";
@@ -230,6 +233,12 @@ impl ProtocolHandle {
}
}
impl ProtocolHandleT for ProtocolHandle {
fn disconnect_peer(&self, peer_id: sc_network_types::PeerId) {
let _ = self.actions_tx.unbounded_send(Action::DisconnectPeer(peer_id.into()));
}
}
/// Direction of a connection
#[derive(Clone, Copy, Debug)]
enum Direction {
@@ -289,7 +298,7 @@ pub struct ProtocolController {
to_notifications: TracingUnboundedSender<Message>,
/// `PeerStore` handle for checking peer reputation values and getting connection candidates
/// with highest reputation.
peer_store: Box<dyn PeerStoreProvider>,
peer_store: Arc<dyn PeerStoreProvider>,
}
impl ProtocolController {
@@ -298,12 +307,12 @@ impl ProtocolController {
set_id: SetId,
config: ProtoSetConfig,
to_notifications: TracingUnboundedSender<Message>,
peer_store: Box<dyn PeerStoreProvider>,
peer_store: Arc<dyn PeerStoreProvider>,
) -> (ProtocolHandle, ProtocolController) {
let (actions_tx, actions_rx) = tracing_unbounded("mpsc_api_protocol", 10_000);
let (events_tx, events_rx) = tracing_unbounded("mpsc_notifications_protocol", 10_000);
let handle = ProtocolHandle { actions_tx, events_tx };
peer_store.register_protocol(handle.clone());
peer_store.register_protocol(Arc::new(handle.clone()));
let reserved_nodes =
config.reserved_nodes.iter().map(|p| (*p, PeerState::NotConnected)).collect();
let controller = ProtocolController {
@@ -445,12 +454,12 @@ impl ProtocolController {
/// Report peer disconnect event to `PeerStore` for it to update peer's reputation accordingly.
/// Should only be called if the remote node disconnected us, not the other way around.
fn report_disconnect(&mut self, peer_id: PeerId) {
self.peer_store.report_disconnect(peer_id);
self.peer_store.report_disconnect(peer_id.into());
}
/// Ask `Peerset` if the peer has a reputation value not sufficient for connection with it.
fn is_banned(&self, peer_id: &PeerId) -> bool {
self.peer_store.is_banned(peer_id)
self.peer_store.is_banned(&peer_id.into())
}
/// Add the peer to the set of reserved peers. [`ProtocolController`] will try to always
@@ -665,7 +674,7 @@ impl ProtocolController {
self.accept_connection(peer_id, incoming_index);
},
PeerState::NotConnected =>
if self.peer_store.is_banned(&peer_id) {
if self.peer_store.is_banned(&peer_id.into()) {
self.reject_connection(peer_id, incoming_index);
} else {
*state = PeerState::Connected(Direction::Inbound);
@@ -778,7 +787,7 @@ impl ProtocolController {
self.reserved_nodes
.iter_mut()
.filter_map(|(peer_id, state)| {
(!state.is_connected() && !self.peer_store.is_banned(peer_id)).then(|| {
(!state.is_connected() && !self.peer_store.is_banned(&peer_id.into())).then(|| {
*state = PeerState::Connected(Direction::Outbound);
peer_id
})
@@ -803,8 +812,11 @@ impl ProtocolController {
let ignored = self
.reserved_nodes
.keys()
.collect::<HashSet<&PeerId>>()
.union(&self.nodes.keys().collect::<HashSet<&PeerId>>())
.map(From::from)
.collect::<HashSet<sc_network_types::PeerId>>()
.union(
&self.nodes.keys().map(From::from).collect::<HashSet<sc_network_types::PeerId>>(),
)
.cloned()
.collect();
@@ -813,16 +825,17 @@ impl ProtocolController {
.outgoing_candidates(available_slots, ignored)
.into_iter()
.filter_map(|peer_id| {
(!self.reserved_nodes.contains_key(&peer_id) && !self.nodes.contains_key(&peer_id))
.then_some(peer_id)
.or_else(|| {
error!(
target: LOG_TARGET,
"`PeerStore` returned a node we asked to ignore: {peer_id}.",
);
debug_assert!(false, "`PeerStore` returned a node we asked to ignore.");
None
})
(!self.reserved_nodes.contains_key(&peer_id.into()) &&
!self.nodes.contains_key(&peer_id.into()))
.then_some(peer_id)
.or_else(|| {
error!(
target: LOG_TARGET,
"`PeerStore` returned a node we asked to ignore: {peer_id}.",
);
debug_assert!(false, "`PeerStore` returned a node we asked to ignore.");
None
})
})
.collect::<Vec<_>>();
@@ -836,8 +849,8 @@ impl ProtocolController {
candidates.into_iter().take(available_slots).for_each(|peer_id| {
self.num_out += 1;
self.nodes.insert(peer_id, Direction::Outbound);
self.start_connection(peer_id);
self.nodes.insert(peer_id.into(), Direction::Outbound);
self.start_connection(peer_id.into());
})
}
}
@@ -845,8 +858,10 @@ impl ProtocolController {
#[cfg(test)]
mod tests {
use super::*;
use crate::{peer_store::PeerStoreProvider, ReputationChange};
use libp2p::PeerId;
use crate::{
peer_store::{PeerStoreProvider, ProtocolHandle as ProtocolHandleT},
PeerId, ReputationChange,
};
use sc_network_common::role::ObservedRole;
use sc_utils::mpsc::{tracing_unbounded, TryRecvError};
use std::collections::HashSet;
@@ -856,14 +871,16 @@ mod tests {
pub PeerStoreHandle {}
impl PeerStoreProvider for PeerStoreHandle {
fn is_banned(&self, peer_id: &PeerId) -> bool;
fn register_protocol(&self, protocol_handle: ProtocolHandle);
fn report_disconnect(&mut self, peer_id: PeerId);
fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole);
fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange);
fn peer_reputation(&self, peer_id: &PeerId) -> i32;
fn peer_role(&self, peer_id: &PeerId) -> Option<ObservedRole>;
fn outgoing_candidates<'a>(&self, count: usize, ignored: HashSet<&'a PeerId>) -> Vec<PeerId>;
fn is_banned(&self, peer_id: &sc_network_types::PeerId) -> bool;
fn register_protocol(&self, protocol_handle: Arc<dyn ProtocolHandleT>);
fn report_disconnect(&self, peer_id: sc_network_types::PeerId);
fn set_peer_role(&self, peer_id: &sc_network_types::PeerId, role: ObservedRole);
fn report_peer(&self, peer_id: sc_network_types::PeerId, change: ReputationChange);
fn peer_reputation(&self, peer_id: &sc_network_types::PeerId) -> i32;
fn peer_role(&self, peer_id: &sc_network_types::PeerId) -> Option<ObservedRole>;
fn outgoing_candidates(&self, count: usize, ignored: HashSet<sc_network_types::PeerId>) -> Vec<sc_network_types::PeerId>;
fn num_known_peers(&self) -> usize;
fn add_known_peer(&self, peer_id: sc_network_types::PeerId);
}
}
@@ -887,7 +904,7 @@ mod tests {
peer_store.expect_report_disconnect().times(2).return_const(());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Add second reserved node at runtime (this currently calls `alloc_slots` internally).
controller.on_add_reserved_peer(reserved2);
@@ -948,7 +965,7 @@ mod tests {
peer_store.expect_is_banned().times(6).return_const(true);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Add second reserved node at runtime (this currently calls `alloc_slots` internally).
controller.on_add_reserved_peer(reserved2);
@@ -1000,7 +1017,7 @@ mod tests {
peer_store.expect_report_disconnect().times(2).return_const(());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Add second reserved node at runtime (this calls `alloc_slots` internally).
controller.on_add_reserved_peer(reserved2);
@@ -1042,7 +1059,7 @@ mod tests {
fn nodes_supplied_by_peer_store_are_connected() {
let peer1 = PeerId::random();
let peer2 = PeerId::random();
let candidates = vec![peer1, peer2];
let candidates = vec![peer1.into(), peer2.into()];
let config = ProtoSetConfig {
in_peers: 0,
@@ -1058,7 +1075,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(candidates);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Initiate connections.
controller.alloc_slots();
@@ -1092,7 +1109,7 @@ mod tests {
let reserved2 = PeerId::random();
let regular1 = PeerId::random();
let regular2 = PeerId::random();
let outgoing_candidates = vec![regular1, regular2];
let outgoing_candidates = vec![regular1.into(), regular2.into()];
let reserved_nodes = [reserved1, reserved2].iter().cloned().collect();
let config =
@@ -1105,7 +1122,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Initiate connections.
controller.alloc_slots();
@@ -1128,8 +1145,8 @@ mod tests {
let peer1 = PeerId::random();
let peer2 = PeerId::random();
let peer3 = PeerId::random();
let candidates1 = vec![peer1, peer2];
let candidates2 = vec![peer3];
let candidates1 = vec![peer1.into(), peer2.into()];
let candidates2 = vec![peer3.into()];
let config = ProtoSetConfig {
in_peers: 0,
@@ -1147,7 +1164,7 @@ mod tests {
peer_store.expect_report_disconnect().times(2).return_const(());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Initiate connections.
controller.alloc_slots();
@@ -1214,7 +1231,7 @@ mod tests {
peer_store.expect_register_protocol().once().return_const(());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Initiate connections.
controller.alloc_slots();
@@ -1240,7 +1257,7 @@ mod tests {
peer_store.expect_register_protocol().once().return_const(());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
let peer = PeerId::random();
let incoming_index = IncomingIndex(1);
@@ -1262,7 +1279,7 @@ mod tests {
fn disabling_reserved_only_mode_allows_to_connect_to_peers() {
let peer1 = PeerId::random();
let peer2 = PeerId::random();
let candidates = vec![peer1, peer2];
let candidates = vec![peer1.into(), peer2.into()];
let config = ProtoSetConfig {
in_peers: 0,
@@ -1278,7 +1295,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(candidates);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Initiate connections.
controller.alloc_slots();
@@ -1309,7 +1326,7 @@ mod tests {
let reserved2 = PeerId::random();
let regular1 = PeerId::random();
let regular2 = PeerId::random();
let outgoing_candidates = vec![regular1];
let outgoing_candidates = vec![regular1.into()];
let config = ProtoSetConfig {
in_peers: 10,
@@ -1325,7 +1342,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
assert_eq!(controller.num_out, 0);
assert_eq!(controller.num_in, 0);
@@ -1383,7 +1400,7 @@ mod tests {
peer_store.expect_register_protocol().once().return_const(());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
assert_eq!(controller.reserved_nodes.len(), 2);
assert_eq!(controller.nodes.len(), 0);
assert_eq!(controller.num_out, 0);
@@ -1416,7 +1433,7 @@ mod tests {
peer_store.expect_is_banned().times(2).return_const(false);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Initiate connections.
controller.alloc_slots();
@@ -1460,10 +1477,13 @@ mod tests {
let mut peer_store = MockPeerStoreHandle::new();
peer_store.expect_register_protocol().once().return_const(());
peer_store.expect_is_banned().times(2).return_const(false);
peer_store.expect_outgoing_candidates().once().return_const(Vec::new());
peer_store
.expect_outgoing_candidates()
.once()
.return_const(Vec::<sc_network_types::PeerId>::new());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Connect `peer1` as inbound, `peer2` as outbound.
controller.on_incoming_connection(peer1, IncomingIndex(1));
@@ -1493,7 +1513,7 @@ mod tests {
fn regular_nodes_stop_occupying_slots_when_become_reserved() {
let peer1 = PeerId::random();
let peer2 = PeerId::random();
let outgoing_candidates = vec![peer1];
let outgoing_candidates = vec![peer1.into()];
let config = ProtoSetConfig {
in_peers: 10,
@@ -1509,7 +1529,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Connect `peer1` as outbound & `peer2` as inbound.
controller.alloc_slots();
@@ -1535,7 +1555,7 @@ mod tests {
fn disconnecting_regular_peers_work() {
let peer1 = PeerId::random();
let peer2 = PeerId::random();
let outgoing_candidates = vec![peer1];
let outgoing_candidates = vec![peer1.into()];
let config = ProtoSetConfig {
in_peers: 10,
@@ -1551,7 +1571,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Connect `peer1` as outbound & `peer2` as inbound.
controller.alloc_slots();
@@ -1610,7 +1630,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(Vec::new());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Connect `reserved1` as inbound & `reserved2` as outbound.
controller.on_incoming_connection(reserved1, IncomingIndex(1));
@@ -1650,7 +1670,7 @@ mod tests {
fn dropping_regular_peers_work() {
let peer1 = PeerId::random();
let peer2 = PeerId::random();
let outgoing_candidates = vec![peer1];
let outgoing_candidates = vec![peer1.into()];
let config = ProtoSetConfig {
in_peers: 10,
@@ -1667,7 +1687,7 @@ mod tests {
peer_store.expect_report_disconnect().times(2).return_const(());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Connect `peer1` as outbound & `peer2` as inbound.
controller.alloc_slots();
@@ -1718,7 +1738,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(Vec::new());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Connect `reserved1` as inbound & `reserved2` as outbound.
controller.on_incoming_connection(reserved1, IncomingIndex(1));
@@ -1762,7 +1782,7 @@ mod tests {
fn incoming_request_for_connected_regular_node_switches_it_to_inbound() {
let regular1 = PeerId::random();
let regular2 = PeerId::random();
let outgoing_candidates = vec![regular1];
let outgoing_candidates = vec![regular1.into()];
let config = ProtoSetConfig {
in_peers: 10,
@@ -1778,7 +1798,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
assert_eq!(controller.num_out, 0);
assert_eq!(controller.num_in, 0);
@@ -1814,7 +1834,7 @@ mod tests {
fn incoming_request_for_connected_node_is_rejected_if_its_banned() {
let regular1 = PeerId::random();
let regular2 = PeerId::random();
let outgoing_candidates = vec![regular1];
let outgoing_candidates = vec![regular1.into()];
let config = ProtoSetConfig {
in_peers: 10,
@@ -1831,7 +1851,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
assert_eq!(controller.num_out, 0);
assert_eq!(controller.num_in, 0);
@@ -1867,7 +1887,7 @@ mod tests {
fn incoming_request_for_connected_node_is_rejected_if_no_slots_available() {
let regular1 = PeerId::random();
let regular2 = PeerId::random();
let outgoing_candidates = vec![regular1];
let outgoing_candidates = vec![regular1.into()];
let config = ProtoSetConfig {
in_peers: 1,
@@ -1883,7 +1903,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(outgoing_candidates);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
assert_eq!(controller.num_out, 0);
assert_eq!(controller.num_in, 0);
@@ -1935,7 +1955,7 @@ mod tests {
peer_store.expect_is_banned().once().return_const(false);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Connect `peer1` as inbound.
controller.on_incoming_connection(peer1, IncomingIndex(1));
@@ -1965,7 +1985,7 @@ mod tests {
peer_store.expect_is_banned().once().return_const(true);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
// Incoming request.
controller.on_incoming_connection(peer1, IncomingIndex(1));
@@ -1990,7 +2010,7 @@ mod tests {
peer_store.expect_is_banned().once().return_const(true);
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
assert!(controller.reserved_nodes.contains_key(&reserved1));
// Incoming request.
@@ -2017,7 +2037,7 @@ mod tests {
peer_store.expect_outgoing_candidates().once().return_const(Vec::new());
let (_handle, mut controller) =
ProtocolController::new(SetId::from(0), config, tx, Box::new(peer_store));
ProtocolController::new(SetId::from(0), config, tx, Arc::new(peer_store));
assert!(matches!(controller.reserved_nodes.get(&reserved1), Some(PeerState::NotConnected)));
// Initiate connections
@@ -36,6 +36,7 @@
use crate::{
peer_store::{PeerStoreProvider, BANNED_THRESHOLD},
service::traits::RequestResponseConfig as RequestResponseConfigT,
types::ProtocolName,
ReputationChange,
};
@@ -58,6 +59,7 @@ use std::{
io, iter,
ops::Deref,
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::{Duration, Instant},
};
@@ -129,11 +131,17 @@ pub struct ProtocolConfig {
pub inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
}
impl RequestResponseConfigT for ProtocolConfig {
fn protocol_name(&self) -> &ProtocolName {
&self.name
}
}
/// A single request received by a peer on a request-response protocol.
#[derive(Debug)]
pub struct IncomingRequest {
/// Who sent the request.
pub peer: PeerId,
pub peer: sc_network_types::PeerId,
/// Request sent by the remote. Will always be smaller than
/// [`ProtocolConfig::max_request_size`].
@@ -288,7 +296,7 @@ pub struct RequestResponsesBehaviour {
send_feedback: HashMap<ProtocolRequestId, oneshot::Sender<()>>,
/// Primarily used to get a reputation of a node.
peer_store: Box<dyn PeerStoreProvider>,
peer_store: Arc<dyn PeerStoreProvider>,
}
/// Generated by the response builder and waiting to be processed.
@@ -305,7 +313,7 @@ impl RequestResponsesBehaviour {
/// the same protocol is passed twice.
pub fn new(
list: impl Iterator<Item = ProtocolConfig>,
peer_store: Box<dyn PeerStoreProvider>,
peer_store: Arc<dyn PeerStoreProvider>,
) -> Result<Self, RegisterError> {
let mut protocols = HashMap::new();
for protocol in list {
@@ -670,7 +678,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
self.pending_responses_arrival_time
.insert((protocol.clone(), request_id).into(), Instant::now());
let reputation = self.peer_store.peer_reputation(&peer);
let reputation = self.peer_store.peer_reputation(&peer.into());
if reputation < BANNED_THRESHOLD {
log::debug!(
@@ -694,7 +702,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour {
// because the latter allocates an extra slot for every cloned
// sender.
let _ = resp_builder.try_send(IncomingRequest {
peer,
peer: peer.into(),
payload: request,
pending_response: tx,
});
@@ -1093,7 +1101,7 @@ mod tests {
.multiplex(libp2p::yamux::Config::default())
.boxed();
let behaviour = RequestResponsesBehaviour::new(list, Box::new(MockPeerStore {})).unwrap();
let behaviour = RequestResponsesBehaviour::new(list, Arc::new(MockPeerStore {})).unwrap();
let runtime = tokio::runtime::Runtime::new().unwrap();
let mut swarm = SwarmBuilder::with_executor(
@@ -0,0 +1,43 @@
syntax = "proto3";
package bitswap.message;
message Message {
message Wantlist {
enum WantType {
Block = 0;
Have = 1;
}
message Entry {
bytes block = 1; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0)
int32 priority = 2; // the priority (normalized). default to 1
bool cancel = 3; // whether this revokes an entry
WantType wantType = 4; // Note: defaults to enum 0, ie Block
bool sendDontHave = 5; // Note: defaults to false
}
repeated Entry entries = 1; // a list of wantlist entries
bool full = 2; // whether this is the full wantlist. default to false
}
message Block {
bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length)
bytes data = 2;
}
enum BlockPresenceType {
Have = 0;
DontHave = 1;
}
message BlockPresence {
bytes cid = 1;
BlockPresenceType type = 2;
}
Wantlist wantlist = 1;
repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0
repeated Block payload = 3; // used to send Blocks in bitswap 1.1.0
repeated BlockPresence blockPresences = 4;
int32 pendingBytes = 5;
}
+238 -99
View File
@@ -29,29 +29,33 @@
use crate::{
behaviour::{self, Behaviour, BehaviourOut},
config::{parse_addr, FullNetworkConfiguration, MultiaddrWithPeerId, Params, TransportConfig},
bitswap::BitswapRequestHandler,
config::{
parse_addr, FullNetworkConfiguration, IncomingRequest, MultiaddrWithPeerId,
NonDefaultSetConfig, NotificationHandshake, Params, SetConfig, TransportConfig,
},
discovery::DiscoveryConfig,
error::Error,
event::{DhtEvent, Event},
network_state::{
NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer,
},
peer_store::{PeerStoreHandle, PeerStoreProvider},
peer_store::{PeerStore, PeerStoreProvider},
protocol::{self, NotifsHandlerError, Protocol, Ready},
protocol_controller::{self, ProtoSetConfig, ProtocolController, SetId},
request_responses::{IfDisconnected, RequestFailure},
request_responses::{IfDisconnected, ProtocolConfig as RequestResponseConfig, RequestFailure},
service::{
signature::{Signature, SigningError},
traits::{
NetworkDHTProvider, NetworkEventStream, NetworkNotification, NetworkPeers,
NetworkRequest, NetworkSigner, NetworkStateInfo, NetworkStatus, NetworkStatusProvider,
NotificationSender as NotificationSenderT, NotificationSenderError,
NotificationSenderReady as NotificationSenderReadyT,
BandwidthSink, NetworkBackend, NetworkDHTProvider, NetworkEventStream, NetworkPeers,
NetworkRequest, NetworkService as NetworkServiceT, NetworkSigner, NetworkStateInfo,
NetworkStatus, NetworkStatusProvider, NotificationSender as NotificationSenderT,
NotificationSenderError, NotificationSenderReady as NotificationSenderReadyT,
},
},
transport,
types::ProtocolName,
ReputationChange,
Multiaddr, NotificationService, PeerId, ReputationChange,
};
use codec::DecodeAll;
@@ -69,12 +73,13 @@ use libp2p::{
AddressScore, ConnectionError, ConnectionId, ConnectionLimits, DialError, Executor,
ListenError, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent, THandlerErr,
},
Multiaddr, PeerId,
};
use log::{debug, error, info, trace, warn};
use metrics::{Histogram, MetricSources, Metrics};
use parking_lot::Mutex;
use prometheus_endpoint::Registry;
use sc_client_api::BlockBackend;
use sc_network_common::{
role::{ObservedRole, Roles},
ExHashT,
@@ -94,18 +99,34 @@ use std::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::Duration,
};
pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure};
pub use libp2p::identity::{DecodingError, Keypair, PublicKey};
pub use metrics::NotificationMetrics;
pub use protocol::NotificationsSink;
mod metrics;
mod out_events;
pub(crate) mod metrics;
pub(crate) mod out_events;
pub mod signature;
pub mod traits;
struct Libp2pBandwidthSink {
sink: Arc<transport::BandwidthSinks>,
}
impl BandwidthSink for Libp2pBandwidthSink {
fn total_inbound(&self) -> u64 {
self.sink.total_inbound()
}
fn total_outbound(&self) -> u64 {
self.sink.total_outbound()
}
}
/// Substrate network service. Handles network IO and manages connectivity.
pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
/// Number of peers we're connected to.
@@ -119,9 +140,7 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
/// The `KeyPair` that defines the `PeerId` of the local node.
local_identity: Keypair,
/// Bandwidth logging system. Can be queried to know the average bandwidth consumed.
bandwidth: Arc<transport::BandwidthSinks>,
/// Used to query and report reputation changes.
peer_store_handle: PeerStoreHandle,
bandwidth: Arc<dyn BandwidthSink>,
/// Channel that sends messages to the actual worker.
to_worker: TracingUnboundedSender<ServiceToWorkerMsg>,
/// Protocol name -> `SetId` mapping for notification protocols. The map never changes after
@@ -132,6 +151,8 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
protocol_handles: Vec<protocol_controller::ProtocolHandle>,
/// Shortcut to sync protocol handle (`protocol_handles[0]`).
sync_protocol_handle: protocol_controller::ProtocolHandle,
/// Handle to `PeerStore`.
peer_store_handle: Arc<dyn PeerStoreProvider>,
/// Marker to pin the `H` generic. Serves no purpose except to not break backwards
/// compatibility.
_marker: PhantomData<H>,
@@ -139,6 +160,91 @@ pub struct NetworkService<B: BlockT + 'static, H: ExHashT> {
_block: PhantomData<B>,
}
#[async_trait::async_trait]
impl<B, H> NetworkBackend<B, H> for NetworkWorker<B, H>
where
B: BlockT + 'static,
H: ExHashT,
{
type NotificationProtocolConfig = NonDefaultSetConfig;
type RequestResponseProtocolConfig = RequestResponseConfig;
type NetworkService<Block, Hash> = Arc<NetworkService<B, H>>;
type PeerStore = PeerStore;
type BitswapConfig = RequestResponseConfig;
fn new(params: Params<B, H, Self>) -> Result<Self, Error>
where
Self: Sized,
{
NetworkWorker::new(params)
}
/// Get handle to `NetworkService` of the `NetworkBackend`.
fn network_service(&self) -> Arc<dyn NetworkServiceT> {
self.service.clone()
}
/// Create `PeerStore`.
fn peer_store(bootnodes: Vec<sc_network_types::PeerId>) -> Self::PeerStore {
PeerStore::new(bootnodes.into_iter().map(From::from).collect())
}
fn register_notification_metrics(registry: Option<&Registry>) -> NotificationMetrics {
NotificationMetrics::new(registry)
}
fn bitswap_server(
client: Arc<dyn BlockBackend<B> + Send + Sync>,
) -> (Pin<Box<dyn Future<Output = ()> + Send>>, Self::BitswapConfig) {
let (handler, protocol_config) = BitswapRequestHandler::new(client.clone());
(Box::pin(async move { handler.run().await }), protocol_config)
}
/// Create notification protocol configuration.
fn notification_config(
protocol_name: ProtocolName,
fallback_names: Vec<ProtocolName>,
max_notification_size: u64,
handshake: Option<NotificationHandshake>,
set_config: SetConfig,
_metrics: NotificationMetrics,
_peerstore_handle: Arc<dyn PeerStoreProvider>,
) -> (Self::NotificationProtocolConfig, Box<dyn NotificationService>) {
NonDefaultSetConfig::new(
protocol_name,
fallback_names,
max_notification_size,
handshake,
set_config,
)
}
/// Create request-response protocol configuration.
fn request_response_config(
protocol_name: ProtocolName,
fallback_names: Vec<ProtocolName>,
max_request_size: u64,
max_response_size: u64,
request_timeout: Duration,
inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
) -> Self::RequestResponseProtocolConfig {
Self::RequestResponseProtocolConfig {
name: protocol_name,
fallback_names,
max_request_size,
max_response_size,
request_timeout,
inbound_queue,
}
}
/// Start [`NetworkBackend`] event loop.
async fn run(mut self) {
self.run().await
}
}
impl<B, H> NetworkWorker<B, H>
where
B: BlockT + 'static,
@@ -149,11 +255,13 @@ where
/// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order
/// for the network processing to advance. From it, you can extract a `NetworkService` using
/// `worker.service()`. The `NetworkService` can be shared through the codebase.
pub fn new(params: Params<B>) -> Result<Self, Error> {
pub fn new(params: Params<B, H, Self>) -> Result<Self, Error> {
let peer_store_handle = params.network_config.peer_store_handle();
let FullNetworkConfiguration {
notification_protocols,
request_response_protocols,
mut network_config,
..
} = params.network_config;
// Private and public keys configuration.
@@ -164,14 +272,14 @@ where
network_config.boot_nodes = network_config
.boot_nodes
.into_iter()
.filter(|boot_node| boot_node.peer_id != local_peer_id)
.filter(|boot_node| boot_node.peer_id != local_peer_id.into())
.collect();
network_config.default_peers_set.reserved_nodes = network_config
.default_peers_set
.reserved_nodes
.into_iter()
.filter(|reserved_node| {
if reserved_node.peer_id == local_peer_id {
if reserved_node.peer_id == local_peer_id.into() {
warn!(
target: "sub-libp2p",
"Local peer ID used in reserved node, ignoring: {}",
@@ -219,6 +327,7 @@ where
"🏷 Local node identity is: {}",
local_peer_id.to_base58(),
);
log::info!(target: "sub-libp2p", "Running libp2p network backend");
let (transport, bandwidth) = {
let config_mem = match network_config.transport {
@@ -284,7 +393,7 @@ where
reserved_nodes: set_config
.reserved_nodes
.iter()
.map(|node| node.peer_id)
.map(|node| node.peer_id.into())
.collect(),
reserved_only: set_config.non_reserved_mode.is_reserved_only(),
};
@@ -293,7 +402,7 @@ where
SetId::from(set_id),
proto_set_config,
to_notifications.clone(),
Box::new(params.peer_store.clone()),
Arc::clone(&peer_store_handle),
)
})
.unzip();
@@ -354,8 +463,8 @@ where
{
Err(Error::DuplicateBootnode {
address: bootnode.multiaddr.clone(),
first_id: bootnode.peer_id,
second_id: other.peer_id,
first_id: bootnode.peer_id.into(),
second_id: other.peer_id.into(),
})
} else {
Ok(())
@@ -367,7 +476,7 @@ where
for bootnode in network_config.boot_nodes.iter() {
boot_node_ids
.entry(bootnode.peer_id)
.entry(bootnode.peer_id.into())
.or_default()
.push(bootnode.multiaddr.clone());
}
@@ -379,10 +488,10 @@ where
let (protocol, notif_protocol_handles) = Protocol::new(
From::from(&params.role),
&params.metrics_registry,
params.notification_metrics,
notification_protocols,
params.block_announce_config,
params.peer_store.clone(),
Arc::clone(&peer_store_handle),
protocol_handles.clone(),
from_protocol_controllers,
)?;
@@ -394,7 +503,12 @@ where
let discovery_config = {
let mut config = DiscoveryConfig::new(local_public.to_peer_id());
config.with_permanent_addresses(known_addresses);
config.with_permanent_addresses(
known_addresses
.iter()
.map(|(peer, address)| (peer.into(), address.clone()))
.collect::<Vec<_>>(),
);
config.discovery_limit(u64::from(network_config.default_peers_set.out_peers) + 15);
config.with_kademlia(
params.genesis_hash,
@@ -433,7 +547,7 @@ where
local_public,
discovery_config,
request_response_protocols,
params.peer_store.clone(),
Arc::clone(&peer_store_handle),
external_addresses.clone(),
);
@@ -474,7 +588,7 @@ where
.per_connection_event_buffer_size(24)
.max_negotiating_inbound_streams(2048);
(builder.build(), bandwidth)
(builder.build(), Arc::new(Libp2pBandwidthSink { sink: bandwidth }))
};
// Initialize the metrics.
@@ -518,7 +632,7 @@ where
notification_protocol_ids,
protocol_handles,
sync_protocol_handle,
peer_store_handle: params.peer_store.clone(),
peer_store_handle: Arc::clone(&peer_store_handle),
_marker: PhantomData,
_block: Default::default(),
});
@@ -533,7 +647,7 @@ where
metrics,
boot_node_ids,
reported_invalid_boot_nodes: Default::default(),
peer_store_handle: params.peer_store,
peer_store_handle: Arc::clone(&peer_store_handle),
notif_protocol_handles,
_marker: Default::default(),
_block: Default::default(),
@@ -701,7 +815,7 @@ where
/// Removes a `PeerId` from the list of reserved peers.
pub fn remove_reserved_peer(&self, peer: PeerId) {
self.service.remove_reserved_peer(peer);
self.service.remove_reserved_peer(peer.into());
}
/// Adds a `PeerId` and its `Multiaddr` as reserved.
@@ -731,18 +845,6 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
}
}
/// Get the list of reserved peers.
///
/// Returns an error if the `NetworkWorker` is no longer running.
pub async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()> {
let (tx, rx) = oneshot::channel();
self.sync_protocol_handle.reserved_peers(tx);
// The channel can only be closed if `ProtocolController` no longer exists.
rx.await.map_err(|_| ())
}
/// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates.
///
/// Returns an `Err` if one of the given addresses is invalid or contains an
@@ -788,8 +890,8 @@ where
}
/// Returns the local Peer ID.
fn local_peer_id(&self) -> PeerId {
self.local_peer_id
fn local_peer_id(&self) -> sc_network_types::PeerId {
self.local_peer_id.into()
}
}
@@ -798,8 +900,29 @@ where
B: sp_runtime::traits::Block,
H: ExHashT,
{
fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result<Signature, SigningError> {
Signature::sign_message(msg.as_ref(), &self.local_identity)
fn sign_with_local_identity(&self, msg: Vec<u8>) -> Result<Signature, SigningError> {
let public_key = self.local_identity.public();
let bytes = self.local_identity.sign(msg.as_ref())?;
Ok(Signature {
public_key: crate::service::signature::PublicKey::Libp2p(public_key),
bytes,
})
}
fn verify(
&self,
peer_id: sc_network_types::PeerId,
public_key: &Vec<u8>,
signature: &Vec<u8>,
message: &Vec<u8>,
) -> Result<bool, String> {
let public_key =
PublicKey::try_decode_protobuf(&public_key).map_err(|error| error.to_string())?;
let peer_id: PeerId = peer_id.into();
let remote: libp2p::PeerId = public_key.to_peer_id();
Ok(peer_id == remote && public_key.verify(message, signature))
}
}
@@ -844,39 +967,55 @@ where
Err(_) => Err(()),
}
}
async fn network_state(&self) -> Result<NetworkState, ()> {
let (tx, rx) = oneshot::channel();
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::NetworkState { pending_response: tx });
match rx.await {
Ok(v) => v.map_err(|_| ()),
// The channel can only be closed if the network worker no longer exists.
Err(_) => Err(()),
}
}
}
#[async_trait::async_trait]
impl<B, H> NetworkPeers for NetworkService<B, H>
where
B: BlockT + 'static,
H: ExHashT,
{
fn set_authorized_peers(&self, peers: HashSet<PeerId>) {
self.sync_protocol_handle.set_reserved_peers(peers);
fn set_authorized_peers(&self, peers: HashSet<sc_network_types::PeerId>) {
self.sync_protocol_handle
.set_reserved_peers(peers.iter().map(|peer| (*peer).into()).collect());
}
fn set_authorized_only(&self, reserved_only: bool) {
self.sync_protocol_handle.set_reserved_only(reserved_only);
}
fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) {
fn add_known_address(&self, peer_id: sc_network_types::PeerId, addr: Multiaddr) {
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr));
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.into(), addr));
}
fn report_peer(&self, peer_id: PeerId, cost_benefit: ReputationChange) {
fn report_peer(&self, peer_id: sc_network_types::PeerId, cost_benefit: ReputationChange) {
self.peer_store_handle.clone().report_peer(peer_id, cost_benefit);
}
fn peer_reputation(&self, peer_id: &PeerId) -> i32 {
fn peer_reputation(&self, peer_id: &sc_network_types::PeerId) -> i32 {
self.peer_store_handle.peer_reputation(peer_id)
}
fn disconnect_peer(&self, peer_id: PeerId, protocol: ProtocolName) {
fn disconnect_peer(&self, peer_id: sc_network_types::PeerId, protocol: ProtocolName) {
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(peer_id, protocol));
.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(peer_id.into(), protocol));
}
fn accept_unreserved_peers(&self) {
@@ -889,19 +1028,21 @@ where
fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> {
// Make sure the local peer ID is never added as a reserved peer.
if peer.peer_id == self.local_peer_id {
if peer.peer_id == self.local_peer_id.into() {
return Err("Local peer ID cannot be added as a reserved peer.".to_string())
}
let _ = self
.to_worker
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer.peer_id, peer.multiaddr));
self.sync_protocol_handle.add_reserved_peer(peer.peer_id);
let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(
peer.peer_id.into(),
peer.multiaddr,
));
self.sync_protocol_handle.add_reserved_peer(peer.peer_id.into());
Ok(())
}
fn remove_reserved_peer(&self, peer_id: PeerId) {
self.sync_protocol_handle.remove_reserved_peer(peer_id);
fn remove_reserved_peer(&self, peer_id: sc_network_types::PeerId) {
self.sync_protocol_handle.remove_reserved_peer(peer_id.into());
}
fn set_reserved_peers(
@@ -915,7 +1056,8 @@ where
let peers_addrs = self.split_multiaddr_and_peer_id(peers)?;
let mut peers: HashSet<PeerId> = HashSet::with_capacity(peers_addrs.len());
let mut peers: HashSet<sc_network_types::PeerId> =
HashSet::with_capacity(peers_addrs.len());
for (peer_id, addr) in peers_addrs.into_iter() {
// Make sure the local peer ID is never added to the PSM.
@@ -923,7 +1065,7 @@ where
return Err("Local peer ID cannot be added as a reserved peer.".to_string())
}
peers.insert(peer_id);
peers.insert(peer_id.into());
if !addr.is_empty() {
let _ = self
@@ -932,7 +1074,8 @@ where
}
}
self.protocol_handles[usize::from(*set_id)].set_reserved_peers(peers);
self.protocol_handles[usize::from(*set_id)]
.set_reserved_peers(peers.iter().map(|peer| (*peer).into()).collect());
Ok(())
}
@@ -972,7 +1115,7 @@ where
fn remove_peers_from_reserved_set(
&self,
protocol: ProtocolName,
peers: Vec<PeerId>,
peers: Vec<sc_network_types::PeerId>,
) -> Result<(), String> {
let Some(set_id) = self.notification_protocol_ids.get(&protocol) else {
return Err(format!(
@@ -982,7 +1125,7 @@ where
};
for peer_id in peers.into_iter() {
self.protocol_handles[usize::from(*set_id)].remove_reserved_peer(peer_id);
self.protocol_handles[usize::from(*set_id)].remove_reserved_peer(peer_id.into());
}
Ok(())
@@ -992,15 +1135,33 @@ where
self.num_connected.load(Ordering::Relaxed)
}
fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
fn peer_role(
&self,
peer_id: sc_network_types::PeerId,
handshake: Vec<u8>,
) -> Option<ObservedRole> {
match Roles::decode_all(&mut &handshake[..]) {
Ok(role) => Some(role.into()),
Err(_) => {
log::debug!(target: "sub-libp2p", "handshake doesn't contain peer role: {handshake:?}");
self.peer_store_handle.peer_role(&peer_id)
self.peer_store_handle.peer_role(&(peer_id.into()))
},
}
}
/// Get the list of reserved peers.
///
/// Returns an error if the `NetworkWorker` is no longer running.
async fn reserved_peers(&self) -> Result<Vec<sc_network_types::PeerId>, ()> {
let (tx, rx) = oneshot::channel();
self.sync_protocol_handle.reserved_peers(tx);
// The channel can only be closed if `ProtocolController` no longer exists.
rx.await
.map(|peers| peers.into_iter().map(From::from).collect())
.map_err(|_| ())
}
}
impl<B, H> NetworkEventStream for NetworkService<B, H>
@@ -1015,28 +1176,6 @@ where
}
}
impl<B, H> NetworkNotification for NetworkService<B, H>
where
B: BlockT + 'static,
H: ExHashT,
{
fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec<u8>) {
unimplemented!();
}
fn notification_sender(
&self,
_target: PeerId,
_protocol: ProtocolName,
) -> Result<Box<dyn NotificationSenderT>, NotificationSenderError> {
unimplemented!();
}
fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec<u8>) {
unimplemented!();
}
}
#[async_trait::async_trait]
impl<B, H> NetworkRequest for NetworkService<B, H>
where
@@ -1045,7 +1184,7 @@ where
{
async fn request(
&self,
target: PeerId,
target: sc_network_types::PeerId,
protocol: ProtocolName,
request: Vec<u8>,
fallback_request: Option<(Vec<u8>, ProtocolName)>,
@@ -1053,7 +1192,7 @@ where
) -> Result<(Vec<u8>, ProtocolName), RequestFailure> {
let (tx, rx) = oneshot::channel();
self.start_request(target, protocol, request, fallback_request, tx, connect);
self.start_request(target.into(), protocol, request, fallback_request, tx, connect);
match rx.await {
Ok(v) => v,
@@ -1066,7 +1205,7 @@ where
fn start_request(
&self,
target: PeerId,
target: sc_network_types::PeerId,
protocol: ProtocolName,
request: Vec<u8>,
fallback_request: Option<(Vec<u8>, ProtocolName)>,
@@ -1074,7 +1213,7 @@ where
connect: IfDisconnected,
) {
let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request {
target,
target: target.into(),
protocol: protocol.into(),
request,
fallback_request,
@@ -1204,7 +1343,7 @@ where
/// Boot nodes that we already have reported as invalid.
reported_invalid_boot_nodes: HashSet<PeerId>,
/// Peer reputation store handle.
peer_store_handle: PeerStoreHandle,
peer_store_handle: Arc<dyn PeerStoreProvider>,
/// Notification protocol handles.
notif_protocol_handles: Vec<protocol::ProtocolHandle>,
/// Marker to pin the `H` generic. Serves no purpose except to not break backwards
@@ -1394,7 +1533,7 @@ where
},
SwarmEvent::Behaviour(BehaviourOut::ReputationChanges { peer, changes }) => {
for change in changes {
self.peer_store_handle.report_peer(peer, change);
self.peer_store_handle.report_peer(peer.into(), change);
}
},
SwarmEvent::Behaviour(BehaviourOut::PeerIdentify {
@@ -1417,10 +1556,10 @@ where
.behaviour_mut()
.add_self_reported_address_to_dht(&peer_id, &protocols, addr);
}
self.peer_store_handle.add_known_peer(peer_id);
self.peer_store_handle.add_known_peer(peer_id.into());
},
SwarmEvent::Behaviour(BehaviourOut::Discovered(peer_id)) => {
self.peer_store_handle.add_known_peer(peer_id);
self.peer_store_handle.add_known_peer(peer_id.into());
},
SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted) => {
if let Some(metrics) = self.metrics.as_ref() {
@@ -1720,7 +1859,7 @@ where
{
}
fn ensure_addresses_consistent_with_transport<'a>(
pub(crate) fn ensure_addresses_consistent_with_transport<'a>(
addresses: impl Iterator<Item = &'a Multiaddr>,
transport: &TransportConfig,
) -> Result<(), Error> {
+135 -4
View File
@@ -16,11 +16,13 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::transport::BandwidthSinks;
use crate::{service::traits::BandwidthSink, ProtocolName};
use prometheus_endpoint::{
self as prometheus, Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, MetricSource, Opts,
PrometheusError, Registry, SourcedCounter, SourcedGauge, U64,
};
use std::{
str,
sync::{
@@ -38,13 +40,30 @@ pub fn register(registry: &Registry, sources: MetricSources) -> Result<Metrics,
Metrics::register(registry)
}
// Register `sc-network` metrics without bandwidth/connected peer sources.
pub fn register_without_sources(registry: &Registry) -> Result<Metrics, PrometheusError> {
Metrics::register(registry)
}
/// Predefined metric sources that are fed directly into prometheus.
pub struct MetricSources {
pub bandwidth: Arc<BandwidthSinks>,
pub bandwidth: Arc<dyn BandwidthSink>,
pub connected_peers: Arc<AtomicUsize>,
}
impl MetricSources {
pub fn register(
registry: &Registry,
bandwidth: Arc<dyn BandwidthSink>,
connected_peers: Arc<AtomicUsize>,
) -> Result<(), PrometheusError> {
BandwidthCounters::register(registry, bandwidth)?;
NumConnectedGauge::register(registry, connected_peers)
}
}
/// Dedicated metrics.
#[derive(Clone)]
pub struct Metrics {
// This list is ordered alphabetically
pub connections_closed_total: CounterVec<U64>,
@@ -208,12 +227,12 @@ impl Metrics {
/// The bandwidth counter metric.
#[derive(Clone)]
pub struct BandwidthCounters(Arc<BandwidthSinks>);
pub struct BandwidthCounters(Arc<dyn BandwidthSink>);
impl BandwidthCounters {
/// Registers the `BandwidthCounters` metric whose values are
/// obtained from the given sinks.
fn register(registry: &Registry, sinks: Arc<BandwidthSinks>) -> Result<(), PrometheusError> {
fn register(registry: &Registry, sinks: Arc<dyn BandwidthSink>) -> Result<(), PrometheusError> {
prometheus::register(
SourcedCounter::new(
&Opts::new("substrate_sub_libp2p_network_bytes_total", "Total bandwidth usage")
@@ -263,3 +282,115 @@ impl MetricSource for NumConnectedGauge {
set(&[], self.0.load(Ordering::Relaxed) as u64);
}
}
/// Notification metrics.
///
/// Wrapper over `Option<InnerNotificationMetrics>` to make metrics reporting code cleaner.
#[derive(Debug, Clone)]
pub struct NotificationMetrics {
/// Metrics, if enabled.
metrics: Option<InnerNotificationMetrics>,
}
impl NotificationMetrics {
/// Create new [`NotificationMetrics`].
pub fn new(registry: Option<&Registry>) -> NotificationMetrics {
let metrics = match registry {
Some(registry) => InnerNotificationMetrics::register(registry).ok(),
None => None,
};
Self { metrics }
}
/// Register opened substream to Prometheus.
pub fn register_substream_opened(&self, protocol: &ProtocolName) {
if let Some(metrics) = &self.metrics {
metrics.notifications_streams_opened_total.with_label_values(&[&protocol]).inc();
}
}
/// Register closed substream to Prometheus.
pub fn register_substream_closed(&self, protocol: &ProtocolName) {
if let Some(metrics) = &self.metrics {
metrics
.notifications_streams_closed_total
.with_label_values(&[&protocol[..]])
.inc();
}
}
/// Register sent notification to Prometheus.
pub fn register_notification_sent(&self, protocol: &ProtocolName, size: usize) {
if let Some(metrics) = &self.metrics {
metrics
.notifications_sizes
.with_label_values(&["out", protocol])
.observe(size as f64);
}
}
/// Register received notification to Prometheus.
pub fn register_notification_received(&self, protocol: &ProtocolName, size: usize) {
if let Some(metrics) = &self.metrics {
metrics
.notifications_sizes
.with_label_values(&["in", protocol])
.observe(size as f64);
}
}
}
/// Notification metrics.
#[derive(Debug, Clone)]
struct InnerNotificationMetrics {
// Total number of opened substreams.
pub notifications_streams_opened_total: CounterVec<U64>,
/// Total number of closed substreams.
pub notifications_streams_closed_total: CounterVec<U64>,
/// In/outbound notification sizes.
pub notifications_sizes: HistogramVec,
}
impl InnerNotificationMetrics {
fn register(registry: &Registry) -> Result<Self, PrometheusError> {
Ok(Self {
notifications_sizes: prometheus::register(
HistogramVec::new(
HistogramOpts {
common_opts: Opts::new(
"substrate_sub_libp2p_notifications_sizes",
"Sizes of the notifications send to and received from all nodes",
),
buckets: prometheus::exponential_buckets(64.0, 4.0, 8)
.expect("parameters are always valid values; qed"),
},
&["direction", "protocol"],
)?,
registry,
)?,
notifications_streams_closed_total: prometheus::register(
CounterVec::new(
Opts::new(
"substrate_sub_libp2p_notifications_streams_closed_total",
"Total number of notification substreams that have been closed",
),
&["protocol"],
)?,
registry,
)?,
notifications_streams_opened_total: prometheus::register(
CounterVec::new(
Opts::new(
"substrate_sub_libp2p_notifications_streams_opened_total",
"Total number of notification substreams that have been opened",
),
&["protocol"],
)?,
registry,
)?,
})
}
}
@@ -20,38 +20,94 @@
//! Signature-related code
use libp2p::{
identity::{Keypair, PublicKey},
PeerId,
};
pub use libp2p::identity::SigningError;
/// Public key.
pub enum PublicKey {
/// Litep2p public key.
Libp2p(libp2p::identity::PublicKey),
/// Libp2p public key.
Litep2p(litep2p::crypto::PublicKey),
}
impl PublicKey {
/// Protobuf-encode [`PublicKey`].
pub fn encode_protobuf(&self) -> Vec<u8> {
match self {
Self::Libp2p(public) => public.encode_protobuf(),
Self::Litep2p(public) => public.to_protobuf_encoding(),
}
}
/// Get `PeerId` of the [`PublicKey`].
pub fn to_peer_id(&self) -> sc_network_types::PeerId {
match self {
Self::Libp2p(public) => public.to_peer_id().into(),
Self::Litep2p(public) => public.to_peer_id().into(),
}
}
}
/// Keypair.
pub enum Keypair {
/// Litep2p keypair.
Libp2p(libp2p::identity::Keypair),
/// Libp2p keypair.
Litep2p(litep2p::crypto::ed25519::Keypair),
}
impl Keypair {
/// Generate ed25519 keypair.
pub fn generate_ed25519() -> Self {
Keypair::Litep2p(litep2p::crypto::ed25519::Keypair::generate())
}
/// Get [`Keypair`]'s public key.
pub fn public(&self) -> PublicKey {
match self {
Keypair::Libp2p(keypair) => PublicKey::Libp2p(keypair.public()),
Keypair::Litep2p(keypair) => PublicKey::Litep2p(keypair.public().into()),
}
}
}
/// A result of signing a message with a network identity. Since `PeerId` is potentially a hash of a
/// `PublicKey`, you need to reveal the `PublicKey` next to the signature, so the verifier can check
/// if the signature was made by the entity that controls a given `PeerId`.
pub struct Signature {
/// The public key derived from the network identity that signed the message.
pub public_key: PublicKey,
/// The actual signature made for the message signed.
pub bytes: Vec<u8>,
}
impl Signature {
/// Create new [`Signature`].
pub fn new(public_key: PublicKey, bytes: Vec<u8>) -> Self {
Self { public_key, bytes }
}
/// Create a signature for a message with a given network identity.
pub fn sign_message(
message: impl AsRef<[u8]>,
keypair: &Keypair,
) -> Result<Self, SigningError> {
let public_key = keypair.public();
let bytes = keypair.sign(message.as_ref())?;
Ok(Self { public_key, bytes })
}
match keypair {
Keypair::Libp2p(keypair) => {
let public_key = keypair.public();
let bytes = keypair.sign(message.as_ref())?;
/// Verify whether the signature was made for the given message by the entity that controls the
/// given `PeerId`.
pub fn verify(&self, message: impl AsRef<[u8]>, peer_id: &PeerId) -> bool {
*peer_id == self.public_key.to_peer_id() &&
self.public_key.verify(message.as_ref(), &self.bytes)
Ok(Signature { public_key: PublicKey::Libp2p(public_key), bytes })
},
Keypair::Litep2p(keypair) => {
let public_key = keypair.public();
let bytes = keypair.sign(message.as_ref());
Ok(Signature { public_key: PublicKey::Litep2p(public_key.into()), bytes })
},
}
}
}
+211 -135
View File
@@ -21,28 +21,165 @@
//! Traits defined by `sc-network`.
use crate::{
config::MultiaddrWithPeerId,
error,
config::{IncomingRequest, MultiaddrWithPeerId, NotificationHandshake, Params, SetConfig},
error::{self, Error},
event::Event,
network_state::NetworkState,
request_responses::{IfDisconnected, RequestFailure},
service::signature::Signature,
service::{metrics::NotificationMetrics, signature::Signature, PeerStoreProvider},
types::ProtocolName,
ReputationChange,
Multiaddr, ReputationChange,
};
use futures::{channel::oneshot, Stream};
use libp2p::{Multiaddr, PeerId};
use prometheus_endpoint::Registry;
use sc_network_common::role::ObservedRole;
use sc_client_api::BlockBackend;
use sc_network_common::{role::ObservedRole, ExHashT};
use sc_network_types::PeerId;
use sp_runtime::traits::Block as BlockT;
use std::{collections::HashSet, fmt::Debug, future::Future, pin::Pin, sync::Arc};
use std::{collections::HashSet, fmt::Debug, future::Future, pin::Pin, sync::Arc, time::Duration};
pub use libp2p::{identity::SigningError, kad::record::Key as KademliaKey};
/// Supertrait defining the services provided by [`NetworkBackend`] service handle.
pub trait NetworkService:
NetworkSigner
+ NetworkDHTProvider
+ NetworkStatusProvider
+ NetworkPeers
+ NetworkEventStream
+ NetworkStateInfo
+ NetworkRequest
+ Send
+ Sync
+ 'static
{
}
impl<T> NetworkService for T where
T: NetworkSigner
+ NetworkDHTProvider
+ NetworkStatusProvider
+ NetworkPeers
+ NetworkEventStream
+ NetworkStateInfo
+ NetworkRequest
+ Send
+ Sync
+ 'static
{
}
/// Trait defining the required functionality from a notification protocol configuration.
pub trait NotificationConfig: Debug {
/// Get access to the `SetConfig` of the notification protocol.
fn set_config(&self) -> &SetConfig;
/// Get protocol name.
fn protocol_name(&self) -> &ProtocolName;
}
/// Trait defining the required functionality from a request-response protocol configuration.
pub trait RequestResponseConfig: Debug {
/// Get protocol name.
fn protocol_name(&self) -> &ProtocolName;
}
/// Trait defining required functionality from `PeerStore`.
#[async_trait::async_trait]
pub trait PeerStore {
/// Get handle to `PeerStore`.
fn handle(&self) -> Arc<dyn PeerStoreProvider>;
/// Start running `PeerStore` event loop.
async fn run(self);
}
/// Networking backend.
#[async_trait::async_trait]
pub trait NetworkBackend<B: BlockT + 'static, H: ExHashT>: Send + 'static {
/// Type representing notification protocol-related configuration.
type NotificationProtocolConfig: NotificationConfig;
/// Type representing request-response protocol-related configuration.
type RequestResponseProtocolConfig: RequestResponseConfig;
/// Type implementing `NetworkService` for the networking backend.
///
/// `NetworkService` allows other subsystems of the blockchain to interact with `sc-network`
/// using `NetworkService`.
type NetworkService<Block, Hash>: NetworkService + Clone;
/// Type implementing [`PeerStore`].
type PeerStore: PeerStore;
/// Bitswap config.
type BitswapConfig;
/// Create new `NetworkBackend`.
fn new(params: Params<B, H, Self>) -> Result<Self, Error>
where
Self: Sized;
/// Get handle to `NetworkService` of the `NetworkBackend`.
fn network_service(&self) -> Arc<dyn NetworkService>;
/// Create [`PeerStore`].
fn peer_store(bootnodes: Vec<PeerId>) -> Self::PeerStore;
/// Register metrics that are used by the notification protocols.
fn register_notification_metrics(registry: Option<&Registry>) -> NotificationMetrics;
/// Create Bitswap server.
fn bitswap_server(
client: Arc<dyn BlockBackend<B> + Send + Sync>,
) -> (Pin<Box<dyn Future<Output = ()> + Send>>, Self::BitswapConfig);
/// Create notification protocol configuration and an associated `NotificationService`
/// for the protocol.
fn notification_config(
protocol_name: ProtocolName,
fallback_names: Vec<ProtocolName>,
max_notification_size: u64,
handshake: Option<NotificationHandshake>,
set_config: SetConfig,
metrics: NotificationMetrics,
peerstore_handle: Arc<dyn PeerStoreProvider>,
) -> (Self::NotificationProtocolConfig, Box<dyn NotificationService>);
/// Create request-response protocol configuration.
fn request_response_config(
protocol_name: ProtocolName,
fallback_names: Vec<ProtocolName>,
max_request_size: u64,
max_response_size: u64,
request_timeout: Duration,
inbound_queue: Option<async_channel::Sender<IncomingRequest>>,
) -> Self::RequestResponseProtocolConfig;
/// Start [`NetworkBackend`] event loop.
async fn run(mut self);
}
/// Signer with network identity
pub trait NetworkSigner {
/// Signs the message with the `KeyPair` that defines the local [`PeerId`].
fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result<Signature, SigningError>;
fn sign_with_local_identity(&self, msg: Vec<u8>) -> Result<Signature, SigningError>;
/// Verify signature using peer's public key.
///
/// `public_key` must be Protobuf-encoded ed25519 public key.
///
/// Returns `Err(())` if public cannot be parsed into a valid ed25519 public key.
fn verify(
&self,
peer_id: sc_network_types::PeerId,
public_key: &Vec<u8>,
signature: &Vec<u8>,
message: &Vec<u8>,
) -> Result<bool, String>;
}
impl<T> NetworkSigner for Arc<T>
@@ -50,9 +187,19 @@ where
T: ?Sized,
T: NetworkSigner,
{
fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result<Signature, SigningError> {
fn sign_with_local_identity(&self, msg: Vec<u8>) -> Result<Signature, SigningError> {
T::sign_with_local_identity(self, msg)
}
fn verify(
&self,
peer_id: sc_network_types::PeerId,
public_key: &Vec<u8>,
signature: &Vec<u8>,
message: &Vec<u8>,
) -> Result<bool, String> {
T::verify(self, peer_id, public_key, signature, message)
}
}
/// Provides access to the networking DHT.
@@ -117,6 +264,11 @@ pub trait NetworkStatusProvider {
///
/// Returns an error if the `NetworkWorker` is no longer running.
async fn status(&self) -> Result<NetworkStatus, ()>;
/// Get the network state.
///
/// Returns an error if the `NetworkWorker` is no longer running.
async fn network_state(&self) -> Result<NetworkState, ()>;
}
// Manual implementation to avoid extra boxing here
@@ -134,9 +286,20 @@ where
{
T::status(self)
}
fn network_state<'life0, 'async_trait>(
&'life0 self,
) -> Pin<Box<dyn Future<Output = Result<NetworkState, ()>> + Send + 'async_trait>>
where
'life0: 'async_trait,
Self: 'async_trait,
{
T::network_state(self)
}
}
/// Provides low-level API for manipulating network peers.
#[async_trait::async_trait]
pub trait NetworkPeers {
/// Set authorized peers.
///
@@ -237,9 +400,15 @@ pub trait NetworkPeers {
/// decoded into a role, the role queried from `PeerStore` and if the role is not stored
/// there either, `None` is returned and the peer should be discarded.
fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole>;
/// Get the list of reserved peers.
///
/// Returns an error if the `NetworkWorker` is no longer running.
async fn reserved_peers(&self) -> Result<Vec<PeerId>, ()>;
}
// Manual implementation to avoid extra boxing here
#[async_trait::async_trait]
impl<T> NetworkPeers for Arc<T>
where
T: ?Sized,
@@ -316,6 +485,16 @@ where
fn peer_role(&self, peer_id: PeerId, handshake: Vec<u8>) -> Option<ObservedRole> {
T::peer_role(self, peer_id, handshake)
}
fn reserved_peers<'life0, 'async_trait>(
&'life0 self,
) -> Pin<Box<dyn Future<Output = Result<Vec<PeerId>, ()>> + Send + 'async_trait>>
where
'life0: 'async_trait,
Self: 'async_trait,
{
T::reserved_peers(self)
}
}
/// Provides access to network-level event stream.
@@ -389,15 +568,15 @@ pub trait NotificationSender: Send + Sync + 'static {
-> Result<Box<dyn NotificationSenderReady + '_>, NotificationSenderError>;
}
/// Error returned by [`NetworkNotification::notification_sender`].
/// Error returned by the notification sink.
#[derive(Debug, thiserror::Error)]
pub enum NotificationSenderError {
/// The notification receiver has been closed, usually because the underlying connection
/// closed.
///
/// Some of the notifications most recently sent may not have been received. However,
/// the peer may still be connected and a new `NotificationSender` for the same
/// protocol obtained from [`NetworkNotification::notification_sender`].
/// the peer may still be connected and a new notification sink for the same
/// protocol obtained from [`NotificationService::message_sink()`].
#[error("The notification receiver has been closed")]
Closed,
/// Protocol name hasn't been registered.
@@ -405,127 +584,6 @@ pub enum NotificationSenderError {
BadProtocol,
}
/// Provides ability to send network notifications.
pub trait NetworkNotification {
/// Appends a notification to the buffer of pending outgoing notifications with the given peer.
/// Has no effect if the notifications channel with this protocol name is not open.
///
/// If the buffer of pending outgoing notifications with that peer is full, the notification
/// is silently dropped and the connection to the remote will start being shut down. This
/// happens if you call this method at a higher rate than the rate at which the peer processes
/// these notifications, or if the available network bandwidth is too low.
///
/// For this reason, this method is considered soft-deprecated. You are encouraged to use
/// [`NetworkNotification::notification_sender`] instead.
///
/// > **Note**: The reason why this is a no-op in the situation where we have no channel is
/// > that we don't guarantee message delivery anyway. Networking issues can cause
/// > connections to drop at any time, and higher-level logic shouldn't differentiate
/// > between the remote voluntarily closing a substream or a network error
/// > preventing the message from being delivered.
///
/// The protocol must have been registered with
/// `crate::config::NetworkConfiguration::notifications_protocols`.
fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec<u8>);
/// Obtains a [`NotificationSender`] for a connected peer, if it exists.
///
/// A `NotificationSender` is scoped to a particular connection to the peer that holds
/// a receiver. With a `NotificationSender` at hand, sending a notification is done in two
/// steps:
///
/// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready
/// for another notification, yielding a [`NotificationSenderReady`] token.
/// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation
/// can only fail if the underlying notification substream or connection has suddenly closed.
///
/// An error is returned by [`NotificationSenderReady::send`] if there exists no open
/// notifications substream with that combination of peer and protocol, or if the remote
/// has asked to close the notifications substream. If that happens, it is guaranteed that an
/// [`Event::NotificationStreamClosed`] has been generated on the stream returned by
/// [`NetworkEventStream::event_stream`].
///
/// If the remote requests to close the notifications substream, all notifications successfully
/// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the
/// substream actually gets closed, but attempting to enqueue more notifications will now
/// return an error. It is however possible for the entire connection to be abruptly closed,
/// in which case enqueued notifications will be lost.
///
/// The protocol must have been registered with
/// `crate::config::NetworkConfiguration::notifications_protocols`.
///
/// # Usage
///
/// This method returns a struct that allows waiting until there is space available in the
/// buffer of messages towards the given peer. If the peer processes notifications at a slower
/// rate than we send them, this buffer will quickly fill up.
///
/// As such, you should never do something like this:
///
/// ```ignore
/// // Do NOT do this
/// for peer in peers {
/// if let Ok(n) = network.notification_sender(peer, ...) {
/// if let Ok(s) = n.ready().await {
/// let _ = s.send(...);
/// }
/// }
/// }
/// ```
///
/// Doing so would slow down all peers to the rate of the slowest one. A malicious or
/// malfunctioning peer could intentionally process notifications at a very slow rate.
///
/// Instead, you are encouraged to maintain your own buffer of notifications on top of the one
/// maintained by `sc-network`, and use `notification_sender` to progressively send out
/// elements from your buffer. If this additional buffer is full (which will happen at some
/// point if the peer is too slow to process notifications), appropriate measures can be taken,
/// such as removing non-critical notifications from the buffer or disconnecting the peer
/// using [`NetworkPeers::disconnect_peer`].
///
///
/// Notifications Per-peer buffer
/// broadcast +-------> of notifications +--> `notification_sender` +--> Internet
/// ^ (not covered by
/// | sc-network)
/// +
/// Notifications should be dropped
/// if buffer is full
///
///
/// See also the `sc-network-gossip` crate for a higher-level way to send notifications.
fn notification_sender(
&self,
target: PeerId,
protocol: ProtocolName,
) -> Result<Box<dyn NotificationSender>, NotificationSenderError>;
/// Set handshake for the notification protocol.
fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>);
}
impl<T> NetworkNotification for Arc<T>
where
T: ?Sized,
T: NetworkNotification,
{
fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec<u8>) {
T::write_notification(self, target, protocol, message)
}
fn notification_sender(
&self,
target: PeerId,
protocol: ProtocolName,
) -> Result<Box<dyn NotificationSender>, NotificationSenderError> {
T::notification_sender(self, target, protocol)
}
fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec<u8>) {
T::set_notification_handshake(self, protocol, handshake)
}
}
/// Provides ability to send network requests.
#[async_trait::async_trait]
pub trait NetworkRequest {
@@ -662,6 +720,15 @@ pub enum Direction {
Outbound,
}
impl From<litep2p::protocol::notification::Direction> for Direction {
fn from(direction: litep2p::protocol::notification::Direction) -> Self {
match direction {
litep2p::protocol::notification::Direction::Inbound => Direction::Inbound,
litep2p::protocol::notification::Direction::Outbound => Direction::Outbound,
}
}
}
impl Direction {
/// Is the direction inbound.
pub fn is_inbound(&self) -> bool {
@@ -771,13 +838,13 @@ pub trait NotificationService: Debug + Send {
async fn close_substream(&mut self, peer: PeerId) -> Result<(), ()>;
/// Send synchronous `notification` to `peer`.
fn send_sync_notification(&self, peer: &PeerId, notification: Vec<u8>);
fn send_sync_notification(&mut self, peer: &PeerId, notification: Vec<u8>);
/// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure.
///
/// Returns an error if the peer doesn't exist.
async fn send_async_notification(
&self,
&mut self,
peer: &PeerId,
notification: Vec<u8>,
) -> Result<(), error::Error>;
@@ -827,3 +894,12 @@ pub trait MessageSink: Send + Sync {
/// Returns an error if the peer does not exist.
async fn send_async_notification(&self, notification: Vec<u8>) -> Result<(), error::Error>;
}
/// Trait defining the behavior of a bandwidth sink.
pub trait BandwidthSink: Send + Sync {
/// Get the number of bytes received.
fn total_inbound(&self) -> u64;
/// Get the number of bytes sent.
fn total_outbound(&self) -> u64;
}
+20
View File
@@ -28,6 +28,8 @@ use std::{
sync::Arc,
};
pub use libp2p::{multiaddr, Multiaddr, PeerId};
/// The protocol name transmitted on the wire.
#[derive(Debug, Clone)]
pub enum ProtocolName {
@@ -98,6 +100,24 @@ impl upgrade::ProtocolName for ProtocolName {
}
}
impl From<ProtocolName> for litep2p::ProtocolName {
fn from(protocol: ProtocolName) -> Self {
match protocol {
ProtocolName::Static(inner) => litep2p::ProtocolName::from(inner),
ProtocolName::OnHeap(inner) => litep2p::ProtocolName::from(inner),
}
}
}
impl From<litep2p::ProtocolName> for ProtocolName {
fn from(protocol: litep2p::ProtocolName) -> Self {
match protocol {
litep2p::ProtocolName::Static(protocol) => ProtocolName::from(protocol),
litep2p::ProtocolName::Allocated(protocol) => ProtocolName::from(protocol),
}
}
}
#[cfg(test)]
mod tests {
use super::ProtocolName;