mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-27 12:48:00 +00:00
Rework priority groups, take 2 (#7700)
* Rework priority groups * Broken tests fix * Fix warning causing CI to fail * [Hack] Try restore backwards-compatibility * Fix peerset bug * Doc fixes and clean up * Error on state mismatch * Try debug CI * CI debugging * [CI debug] Can I please see this line * Revert "[CI debug] Can I please see this line" This reverts commit 4b7cf7c1511f579cd818b21d46bd11642dfac5cb. * Revert "CI debugging" This reverts commit 9011f1f564b860386dc7dd6ffa9fc34ea7107623. * Fix error! which isn't actually an error * Fix Ok() returned when actually Err() * Tweaks and fixes * Fix build * Peerset bugfix * [Debug] Try outbound GrandPa slots * Another bugfix * Revert "[Debug] Try outbound GrandPa slots" This reverts commit d175b9208c088faad77d9f0ce36ff6f48bd92dd3. * [Debug] Try outbound GrandPa slots * Apply suggestions from code review Co-authored-by: Max Inden <mail@max-inden.de> * Use consts for hardcoded peersets * Revert "Try debug CI" This reverts commit 62c4ad5e79c03d561c714a008022ecac463a597e. * Renames * Line widths * Add doc Co-authored-by: Max Inden <mail@max-inden.de>
This commit is contained in:
@@ -30,7 +30,7 @@
|
||||
use crate::{
|
||||
ExHashT, NetworkStateInfo, NetworkStatus,
|
||||
behaviour::{self, Behaviour, BehaviourOut},
|
||||
config::{parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig},
|
||||
config::{parse_str_addr, Params, Role, TransportConfig},
|
||||
DhtEvent,
|
||||
discovery::DiscoveryConfig,
|
||||
error::Error,
|
||||
@@ -147,9 +147,15 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkWorker<B, H> {
|
||||
¶ms.network_config.transport,
|
||||
)?;
|
||||
ensure_addresses_consistent_with_transport(
|
||||
params.network_config.reserved_nodes.iter().map(|x| &x.multiaddr),
|
||||
params.network_config.default_peers_set.reserved_nodes.iter().map(|x| &x.multiaddr),
|
||||
¶ms.network_config.transport,
|
||||
)?;
|
||||
for extra_set in ¶ms.network_config.extra_sets {
|
||||
ensure_addresses_consistent_with_transport(
|
||||
extra_set.set_config.reserved_nodes.iter().map(|x| &x.multiaddr),
|
||||
¶ms.network_config.transport,
|
||||
)?;
|
||||
}
|
||||
ensure_addresses_consistent_with_transport(
|
||||
params.network_config.public_addresses.iter(),
|
||||
¶ms.network_config.transport,
|
||||
@@ -157,12 +163,35 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkWorker<B, H> {
|
||||
|
||||
let (to_worker, from_service) = tracing_unbounded("mpsc_network_worker");
|
||||
|
||||
if let Some(path) = params.network_config.net_config_path {
|
||||
fs::create_dir_all(&path)?;
|
||||
if let Some(path) = ¶ms.network_config.net_config_path {
|
||||
fs::create_dir_all(path)?;
|
||||
}
|
||||
|
||||
// Private and public keys configuration.
|
||||
let local_identity = params.network_config.node_key.clone().into_keypair()?;
|
||||
let local_public = local_identity.public();
|
||||
let local_peer_id = local_public.clone().into_peer_id();
|
||||
info!(
|
||||
target: "sub-libp2p",
|
||||
"🏷 Local node identity is: {}",
|
||||
local_peer_id.to_base58(),
|
||||
);
|
||||
|
||||
let (protocol, peerset_handle, mut known_addresses) = Protocol::new(
|
||||
protocol::ProtocolConfig {
|
||||
roles: From::from(¶ms.role),
|
||||
max_parallel_downloads: params.network_config.max_parallel_downloads,
|
||||
},
|
||||
params.chain.clone(),
|
||||
params.transaction_pool,
|
||||
params.protocol_id.clone(),
|
||||
¶ms.role,
|
||||
¶ms.network_config,
|
||||
params.block_announce_validator,
|
||||
params.metrics_registry.as_ref(),
|
||||
)?;
|
||||
|
||||
// List of multiaddresses that we know in the network.
|
||||
let mut known_addresses = Vec::new();
|
||||
let mut bootnodes = Vec::new();
|
||||
let mut boot_node_ids = HashSet::new();
|
||||
|
||||
@@ -192,71 +221,21 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkWorker<B, H> {
|
||||
}
|
||||
)?;
|
||||
|
||||
// Initialize the peers we should always be connected to.
|
||||
let priority_groups = {
|
||||
let mut reserved_nodes = HashSet::new();
|
||||
for reserved in params.network_config.reserved_nodes.iter() {
|
||||
reserved_nodes.insert(reserved.peer_id.clone());
|
||||
known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone()));
|
||||
}
|
||||
|
||||
let print_deprecated_message = match ¶ms.role {
|
||||
Role::Sentry { .. } => true,
|
||||
Role::Authority { sentry_nodes } if !sentry_nodes.is_empty() => true,
|
||||
_ => false,
|
||||
};
|
||||
if print_deprecated_message {
|
||||
log::warn!(
|
||||
"🙇 Sentry nodes are deprecated, and the `--sentry` and `--sentry-nodes` \
|
||||
CLI options will eventually be removed in a future version. The Substrate \
|
||||
and Polkadot networking protocol require validators to be \
|
||||
publicly-accessible. Please do not block access to your validator nodes. \
|
||||
For details, see https://github.com/paritytech/substrate/issues/6845."
|
||||
);
|
||||
}
|
||||
|
||||
let mut sentries_and_validators = HashSet::new();
|
||||
match ¶ms.role {
|
||||
Role::Sentry { validators } => {
|
||||
for validator in validators {
|
||||
sentries_and_validators.insert(validator.peer_id.clone());
|
||||
reserved_nodes.insert(validator.peer_id.clone());
|
||||
known_addresses.push((validator.peer_id.clone(), validator.multiaddr.clone()));
|
||||
}
|
||||
}
|
||||
Role::Authority { sentry_nodes } => {
|
||||
for sentry_node in sentry_nodes {
|
||||
sentries_and_validators.insert(sentry_node.peer_id.clone());
|
||||
reserved_nodes.insert(sentry_node.peer_id.clone());
|
||||
known_addresses.push((sentry_node.peer_id.clone(), sentry_node.multiaddr.clone()));
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
vec![
|
||||
("reserved".to_owned(), reserved_nodes),
|
||||
("sentries_and_validators".to_owned(), sentries_and_validators),
|
||||
]
|
||||
// Print a message about the deprecation of sentry nodes.
|
||||
let print_deprecated_message = match ¶ms.role {
|
||||
Role::Sentry { .. } => true,
|
||||
Role::Authority { sentry_nodes } if !sentry_nodes.is_empty() => true,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
let peerset_config = sc_peerset::PeersetConfig {
|
||||
in_peers: params.network_config.in_peers,
|
||||
out_peers: params.network_config.out_peers,
|
||||
bootnodes,
|
||||
reserved_only: params.network_config.non_reserved_mode == NonReservedPeerMode::Deny,
|
||||
priority_groups,
|
||||
};
|
||||
|
||||
// Private and public keys configuration.
|
||||
let local_identity = params.network_config.node_key.clone().into_keypair()?;
|
||||
let local_public = local_identity.public();
|
||||
let local_peer_id = local_public.clone().into_peer_id();
|
||||
info!(
|
||||
target: "sub-libp2p",
|
||||
"🏷 Local node identity is: {}",
|
||||
local_peer_id.to_base58(),
|
||||
);
|
||||
if print_deprecated_message {
|
||||
log::warn!(
|
||||
"🙇 Sentry nodes are deprecated, and the `--sentry` and `--sentry-nodes` \
|
||||
CLI options will eventually be removed in a future version. The Substrate \
|
||||
and Polkadot networking protocol require validators to be \
|
||||
publicly-accessible. Please do not block access to your validator nodes. \
|
||||
For details, see https://github.com/paritytech/substrate/issues/6845."
|
||||
);
|
||||
}
|
||||
|
||||
let checker = params.on_demand.as_ref()
|
||||
.map(|od| od.checker().clone())
|
||||
@@ -264,20 +243,6 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkWorker<B, H> {
|
||||
|
||||
let num_connected = Arc::new(AtomicUsize::new(0));
|
||||
let is_major_syncing = Arc::new(AtomicBool::new(false));
|
||||
let (protocol, peerset_handle) = Protocol::new(
|
||||
protocol::ProtocolConfig {
|
||||
roles: From::from(¶ms.role),
|
||||
max_parallel_downloads: params.network_config.max_parallel_downloads,
|
||||
},
|
||||
local_peer_id.clone(),
|
||||
params.chain.clone(),
|
||||
params.transaction_pool,
|
||||
params.protocol_id.clone(),
|
||||
peerset_config,
|
||||
params.block_announce_validator,
|
||||
params.metrics_registry.as_ref(),
|
||||
boot_node_ids.clone(),
|
||||
)?;
|
||||
|
||||
// Build the swarm.
|
||||
let (mut swarm, bandwidth): (Swarm<B, H>, _) = {
|
||||
@@ -299,7 +264,7 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkWorker<B, H> {
|
||||
let discovery_config = {
|
||||
let mut config = DiscoveryConfig::new(local_public.clone());
|
||||
config.with_user_defined(known_addresses);
|
||||
config.discovery_limit(u64::from(params.network_config.out_peers) + 15);
|
||||
config.discovery_limit(u64::from(params.network_config.default_peers_set.out_peers) + 15);
|
||||
config.add_protocol(params.protocol_id.clone());
|
||||
config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht);
|
||||
config.use_kademlia_disjoint_query_paths(params.network_config.kademlia_disjoint_query_paths);
|
||||
@@ -318,7 +283,7 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkWorker<B, H> {
|
||||
config
|
||||
};
|
||||
|
||||
let mut behaviour = {
|
||||
let behaviour = {
|
||||
let result = Behaviour::new(
|
||||
protocol,
|
||||
params.role,
|
||||
@@ -340,9 +305,6 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkWorker<B, H> {
|
||||
}
|
||||
};
|
||||
|
||||
for protocol in ¶ms.network_config.notifications_protocols {
|
||||
behaviour.register_notifications_protocol(protocol.clone());
|
||||
}
|
||||
let (transport, bandwidth) = {
|
||||
let (config_mem, config_wasm) = match params.network_config.transport {
|
||||
TransportConfig::MemoryOnly => (true, None),
|
||||
@@ -551,8 +513,6 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkWorker<B, H> {
|
||||
version_string: swarm.node(peer_id)
|
||||
.and_then(|i| i.client_version().map(|s| s.to_owned())),
|
||||
latest_ping_time: swarm.node(peer_id).and_then(|i| i.latest_ping()),
|
||||
enabled: swarm.user_protocol().is_enabled(&peer_id),
|
||||
open: swarm.user_protocol().is_open(&peer_id),
|
||||
known_addresses,
|
||||
}))
|
||||
}).collect()
|
||||
@@ -622,7 +582,9 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
|
||||
/// Need a better solution to manage authorized peers, but now just use reserved peers for
|
||||
/// prototyping.
|
||||
pub fn set_authorized_peers(&self, peers: HashSet<PeerId>) {
|
||||
self.peerset.set_reserved_peers(peers)
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::SetReserved(peers));
|
||||
}
|
||||
|
||||
/// Set authorized_only flag.
|
||||
@@ -630,7 +592,9 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
|
||||
/// Need a better solution to decide authorized_only, but now just use reserved_only flag for
|
||||
/// prototyping.
|
||||
pub fn set_authorized_only(&self, reserved_only: bool) {
|
||||
self.peerset.set_reserved_only(reserved_only)
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only));
|
||||
}
|
||||
|
||||
/// Appends a notification to the buffer of pending outgoing notifications with the given peer.
|
||||
@@ -686,7 +650,7 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
|
||||
message.len()
|
||||
);
|
||||
trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target);
|
||||
sink.send_sync_notification(protocol, message);
|
||||
sink.send_sync_notification(message);
|
||||
}
|
||||
|
||||
/// Obtains a [`NotificationSender`] for a connected peer, if it exists.
|
||||
@@ -871,8 +835,12 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
|
||||
/// Disconnect from a node as soon as possible.
|
||||
///
|
||||
/// This triggers the same effects as if the connection had closed itself spontaneously.
|
||||
pub fn disconnect_peer(&self, who: PeerId) {
|
||||
let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who));
|
||||
///
|
||||
/// See also [`NetworkService::remove_from_peers_set`], which has the same effect but also
|
||||
/// prevents the local node from re-establishing an outgoing substream to this peer until it
|
||||
/// is added again.
|
||||
pub fn disconnect_peer(&self, who: PeerId, protocol: impl Into<Cow<'static, str>>) {
|
||||
let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol.into()));
|
||||
}
|
||||
|
||||
/// Request a justification for the given block from the network.
|
||||
@@ -910,19 +878,19 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
|
||||
.unbounded_send(ServiceToWorkerMsg::PutValue(key, value));
|
||||
}
|
||||
|
||||
/// Connect to unreserved peers and allow unreserved peers to connect.
|
||||
/// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes.
|
||||
pub fn accept_unreserved_peers(&self) {
|
||||
self.peerset.set_reserved_only(false);
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false));
|
||||
}
|
||||
|
||||
/// Disconnect from unreserved peers and deny new unreserved peers to connect.
|
||||
/// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing
|
||||
/// purposes.
|
||||
pub fn deny_unreserved_peers(&self) {
|
||||
self.peerset.set_reserved_only(true);
|
||||
}
|
||||
|
||||
/// Removes a `PeerId` from the list of reserved peers.
|
||||
pub fn remove_reserved_peer(&self, peer: PeerId) {
|
||||
self.peerset.remove_reserved_peer(peer);
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true));
|
||||
}
|
||||
|
||||
/// Adds a `PeerId` and its address as reserved. The string should encode the address
|
||||
@@ -936,10 +904,71 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
|
||||
if peer_id == self.local_peer_id {
|
||||
return Err("Local peer ID cannot be added as a reserved peer.".to_string())
|
||||
}
|
||||
self.peerset.add_reserved_peer(peer_id.clone());
|
||||
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr));
|
||||
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr));
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::AddReserved(peer_id));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes a `PeerId` from the list of reserved peers.
|
||||
pub fn remove_reserved_peer(&self, peer_id: PeerId) {
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id));
|
||||
}
|
||||
|
||||
/// Add peers to a peer set.
|
||||
///
|
||||
/// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also
|
||||
/// consist of only `/p2p/<peerid>`.
|
||||
///
|
||||
/// Returns an `Err` if one of the given addresses is invalid or contains an
|
||||
/// invalid peer ID (which includes the local peer ID).
|
||||
pub fn add_peers_to_reserved_set(&self, protocol: Cow<'static, str>, peers: HashSet<Multiaddr>) -> Result<(), String> {
|
||||
let peers = self.split_multiaddr_and_peer_id(peers)?;
|
||||
|
||||
for (peer_id, addr) in peers.into_iter() {
|
||||
// Make sure the local peer ID is never added to the PSM.
|
||||
if peer_id == self.local_peer_id {
|
||||
return Err("Local peer ID cannot be added as a reserved peer.".to_string())
|
||||
}
|
||||
|
||||
if !addr.is_empty() {
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr));
|
||||
}
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::AddSetReserved(protocol.clone(), peer_id));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove peers from a peer set.
|
||||
///
|
||||
/// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`.
|
||||
///
|
||||
/// Returns an `Err` if one of the given addresses is invalid or contains an
|
||||
/// invalid peer ID (which includes the local peer ID).
|
||||
//
|
||||
// NOTE: technically, this function only needs `Vec<PeerId>`, but we use `Multiaddr` here for convenience.
|
||||
pub fn remove_peers_from_reserved_set(
|
||||
&self,
|
||||
protocol: Cow<'static, str>,
|
||||
peers: HashSet<Multiaddr>
|
||||
) -> Result<(), String> {
|
||||
let peers = self.split_multiaddr_and_peer_id(peers)?;
|
||||
for (peer_id, _) in peers.into_iter() {
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::RemoveSetReserved(protocol.clone(), peer_id));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -955,68 +984,53 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
|
||||
.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number));
|
||||
}
|
||||
|
||||
/// Modify a peerset priority group.
|
||||
/// Add a peer to a set of peers.
|
||||
///
|
||||
/// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`.
|
||||
/// If the set has slots available, it will try to open a substream with this peer.
|
||||
///
|
||||
/// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also
|
||||
/// consist of only `/p2p/<peerid>`.
|
||||
///
|
||||
/// Returns an `Err` if one of the given addresses is invalid or contains an
|
||||
/// invalid peer ID (which includes the local peer ID).
|
||||
//
|
||||
// NOTE: even though this function is currently sync, it's marked as async for
|
||||
// future-proofing, see https://github.com/paritytech/substrate/pull/7247#discussion_r502263451.
|
||||
pub async fn set_priority_group(&self, group_id: String, peers: HashSet<Multiaddr>) -> Result<(), String> {
|
||||
pub fn add_to_peers_set(&self, protocol: Cow<'static, str>, peers: HashSet<Multiaddr>) -> Result<(), String> {
|
||||
let peers = self.split_multiaddr_and_peer_id(peers)?;
|
||||
|
||||
let peer_ids = peers.iter().map(|(peer_id, _addr)| peer_id.clone()).collect();
|
||||
|
||||
self.peerset.set_priority_group(group_id, peer_ids);
|
||||
|
||||
for (peer_id, addr) in peers.into_iter() {
|
||||
// Make sure the local peer ID is never added to the PSM.
|
||||
if peer_id == self.local_peer_id {
|
||||
return Err("Local peer ID cannot be added as a reserved peer.".to_string())
|
||||
}
|
||||
|
||||
if !addr.is_empty() {
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr));
|
||||
}
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr));
|
||||
.unbounded_send(ServiceToWorkerMsg::AddToPeersSet(protocol.clone(), peer_id));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add peers to a peerset priority group.
|
||||
/// Remove peers from a peer set.
|
||||
///
|
||||
/// If we currently have an open substream with this peer, it will soon be closed.
|
||||
///
|
||||
/// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`.
|
||||
///
|
||||
/// Returns an `Err` if one of the given addresses is invalid or contains an
|
||||
/// invalid peer ID (which includes the local peer ID).
|
||||
//
|
||||
// NOTE: even though this function is currently sync, it's marked as async for
|
||||
// future-proofing, see https://github.com/paritytech/substrate/pull/7247#discussion_r502263451.
|
||||
pub async fn add_to_priority_group(&self, group_id: String, peers: HashSet<Multiaddr>) -> Result<(), String> {
|
||||
let peers = self.split_multiaddr_and_peer_id(peers)?;
|
||||
|
||||
for (peer_id, addr) in peers.into_iter() {
|
||||
self.peerset.add_to_priority_group(group_id.clone(), peer_id.clone());
|
||||
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove peers from a peerset priority group.
|
||||
///
|
||||
/// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`.
|
||||
///
|
||||
/// Returns an `Err` if one of the given addresses is invalid or contains an
|
||||
/// invalid peer ID (which includes the local peer ID).
|
||||
//
|
||||
// NOTE: even though this function is currently sync, it's marked as async for
|
||||
// future-proofing, see https://github.com/paritytech/substrate/pull/7247#discussion_r502263451.
|
||||
// NOTE: technically, this function only needs `Vec<PeerId>`, but we use `Multiaddr` here for convenience.
|
||||
pub async fn remove_from_priority_group(&self, group_id: String, peers: HashSet<Multiaddr>) -> Result<(), String> {
|
||||
pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: HashSet<Multiaddr>) -> Result<(), String> {
|
||||
let peers = self.split_multiaddr_and_peer_id(peers)?;
|
||||
for (peer_id, _) in peers.into_iter() {
|
||||
self.peerset.remove_from_priority_group(group_id.clone(), peer_id);
|
||||
let _ = self
|
||||
.to_worker
|
||||
.unbounded_send(ServiceToWorkerMsg::RemoveFromPeersSet(protocol.clone(), peer_id));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1033,7 +1047,7 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
|
||||
.unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number));
|
||||
}
|
||||
|
||||
/// Utility function to extract `PeerId` from each `Multiaddr` for priority group updates.
|
||||
/// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates.
|
||||
///
|
||||
/// Returns an `Err` if one of the given addresses is invalid or contains an
|
||||
/// invalid peer ID (which includes the local peer ID).
|
||||
@@ -1049,7 +1063,7 @@ impl<B: BlockT + 'static, H: ExHashT> NetworkService<B, H> {
|
||||
// Make sure the local peer ID is never added to the PSM
|
||||
// or added as a "known address", even if given.
|
||||
if peer == self.local_peer_id {
|
||||
Err("Local peer ID in priority group.".to_string())
|
||||
Err("Local peer ID in peer set.".to_string())
|
||||
} else {
|
||||
Ok((peer, addr))
|
||||
}
|
||||
@@ -1115,11 +1129,12 @@ impl NotificationSender {
|
||||
/// Returns a future that resolves when the `NotificationSender` is ready to send a notification.
|
||||
pub async fn ready<'a>(&'a self) -> Result<NotificationSenderReady<'a>, NotificationSenderError> {
|
||||
Ok(NotificationSenderReady {
|
||||
ready: match self.sink.reserve_notification(self.protocol_name.clone()).await {
|
||||
ready: match self.sink.reserve_notification().await {
|
||||
Ok(r) => r,
|
||||
Err(()) => return Err(NotificationSenderError::Closed),
|
||||
},
|
||||
peer_id: self.sink.peer_id(),
|
||||
protocol_name: &self.protocol_name,
|
||||
notification_size_metric: self.notification_size_metric.clone(),
|
||||
})
|
||||
}
|
||||
@@ -1133,6 +1148,9 @@ pub struct NotificationSenderReady<'a> {
|
||||
/// Target of the notification.
|
||||
peer_id: &'a PeerId,
|
||||
|
||||
/// Name of the protocol on the wire.
|
||||
protocol_name: &'a Cow<'static, str>,
|
||||
|
||||
/// Field extracted from the [`Metrics`] struct and necessary to report the
|
||||
/// notifications-related metrics.
|
||||
notification_size_metric: Option<Histogram>,
|
||||
@@ -1149,9 +1167,9 @@ impl<'a> NotificationSenderReady<'a> {
|
||||
|
||||
trace!(
|
||||
target: "sub-libp2p",
|
||||
"External API => Notification({:?}, {:?}, {} bytes)",
|
||||
"External API => Notification({:?}, {}, {} bytes)",
|
||||
self.peer_id,
|
||||
self.ready.protocol_name(),
|
||||
self.protocol_name,
|
||||
notification.len()
|
||||
);
|
||||
trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id);
|
||||
@@ -1186,6 +1204,14 @@ enum ServiceToWorkerMsg<B: BlockT, H: ExHashT> {
|
||||
GetValue(record::Key),
|
||||
PutValue(record::Key, Vec<u8>),
|
||||
AddKnownAddress(PeerId, Multiaddr),
|
||||
SetReservedOnly(bool),
|
||||
AddReserved(PeerId),
|
||||
RemoveReserved(PeerId),
|
||||
SetReserved(HashSet<PeerId>),
|
||||
AddSetReserved(Cow<'static, str>, PeerId),
|
||||
RemoveSetReserved(Cow<'static, str>, PeerId),
|
||||
AddToPeersSet(Cow<'static, str>, PeerId),
|
||||
RemoveFromPeersSet(Cow<'static, str>, PeerId),
|
||||
SyncFork(Vec<PeerId>, B::Hash, NumberFor<B>),
|
||||
EventStream(out_events::Sender),
|
||||
Request {
|
||||
@@ -1194,7 +1220,7 @@ enum ServiceToWorkerMsg<B: BlockT, H: ExHashT> {
|
||||
request: Vec<u8>,
|
||||
pending_response: oneshot::Sender<Result<Vec<u8>, RequestFailure>>,
|
||||
},
|
||||
DisconnectPeer(PeerId),
|
||||
DisconnectPeer(PeerId, Cow<'static, str>),
|
||||
NewBestBlockImported(B::Hash, NumberFor<B>),
|
||||
}
|
||||
|
||||
@@ -1290,8 +1316,24 @@ impl<B: BlockT + 'static, H: ExHashT> Future for NetworkWorker<B, H> {
|
||||
this.network_service.get_value(&key),
|
||||
ServiceToWorkerMsg::PutValue(key, value) =>
|
||||
this.network_service.put_value(key, value),
|
||||
ServiceToWorkerMsg::SetReservedOnly(reserved_only) =>
|
||||
this.network_service.user_protocol_mut().set_reserved_only(reserved_only),
|
||||
ServiceToWorkerMsg::SetReserved(peers) =>
|
||||
this.network_service.user_protocol_mut().set_reserved_peers(peers),
|
||||
ServiceToWorkerMsg::AddReserved(peer_id) =>
|
||||
this.network_service.user_protocol_mut().add_reserved_peer(peer_id),
|
||||
ServiceToWorkerMsg::RemoveReserved(peer_id) =>
|
||||
this.network_service.user_protocol_mut().remove_reserved_peer(peer_id),
|
||||
ServiceToWorkerMsg::AddSetReserved(protocol, peer_id) =>
|
||||
this.network_service.user_protocol_mut().add_set_reserved_peer(protocol, peer_id),
|
||||
ServiceToWorkerMsg::RemoveSetReserved(protocol, peer_id) =>
|
||||
this.network_service.user_protocol_mut().remove_set_reserved_peer(protocol, peer_id),
|
||||
ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) =>
|
||||
this.network_service.add_known_address(peer_id, addr),
|
||||
ServiceToWorkerMsg::AddToPeersSet(protocol, peer_id) =>
|
||||
this.network_service.user_protocol_mut().add_to_peers_set(protocol, peer_id),
|
||||
ServiceToWorkerMsg::RemoveFromPeersSet(protocol, peer_id) =>
|
||||
this.network_service.user_protocol_mut().remove_from_peers_set(protocol, peer_id),
|
||||
ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) =>
|
||||
this.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number),
|
||||
ServiceToWorkerMsg::EventStream(sender) =>
|
||||
@@ -1299,8 +1341,8 @@ impl<B: BlockT + 'static, H: ExHashT> Future for NetworkWorker<B, H> {
|
||||
ServiceToWorkerMsg::Request { target, protocol, request, pending_response } => {
|
||||
this.network_service.send_request(&target, &protocol, request, pending_response);
|
||||
},
|
||||
ServiceToWorkerMsg::DisconnectPeer(who) =>
|
||||
this.network_service.user_protocol_mut().disconnect_peer(&who),
|
||||
ServiceToWorkerMsg::DisconnectPeer(who, protocol_name) =>
|
||||
this.network_service.user_protocol_mut().disconnect_peer(&who, &protocol_name),
|
||||
ServiceToWorkerMsg::NewBestBlockImported(hash, number) =>
|
||||
this.network_service.user_protocol_mut().new_best_block_imported(hash, number),
|
||||
}
|
||||
@@ -1479,6 +1521,12 @@ impl<B: BlockT + 'static, H: ExHashT> Future for NetworkWorker<B, H> {
|
||||
messages,
|
||||
});
|
||||
},
|
||||
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::SyncConnected(remote))) => {
|
||||
this.event_streams.send(Event::SyncConnected { remote });
|
||||
},
|
||||
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::SyncDisconnected(remote))) => {
|
||||
this.event_streams.send(Event::SyncDisconnected { remote });
|
||||
},
|
||||
Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration))) => {
|
||||
if let Some(metrics) = this.metrics.as_ref() {
|
||||
let query_type = match event {
|
||||
@@ -1702,12 +1750,7 @@ impl<'a, B: BlockT, H: ExHashT> Link<B> for NetworkLink<'a, B, H> {
|
||||
self.protocol.user_protocol_mut().on_blocks_processed(imported, count, results)
|
||||
}
|
||||
fn justification_imported(&mut self, who: PeerId, hash: &B::Hash, number: NumberFor<B>, success: bool) {
|
||||
self.protocol.user_protocol_mut().justification_import_result(hash.clone(), number, success);
|
||||
if !success {
|
||||
info!("💔 Invalid justification provided by {} for #{}", who, hash);
|
||||
self.protocol.user_protocol_mut().disconnect_peer(&who);
|
||||
self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid justification"));
|
||||
}
|
||||
self.protocol.user_protocol_mut().justification_import_result(who, hash.clone(), number, success);
|
||||
}
|
||||
fn request_justification(&mut self, hash: &B::Hash, number: NumberFor<B>) {
|
||||
self.protocol.user_protocol_mut().request_justification(hash, number)
|
||||
|
||||
Reference in New Issue
Block a user