mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 23:57:56 +00:00
Rewrite the libp2p networking (#742)
* Rewrite the libp2p networking * Fix erroneous replacement during rebase * Update libp2p * Update libp2p * Remove the logic error about useless substreams * Use the new NodeHandler system of libp2p * Commit the Cargo.lock * Upgrade yamux * Forward port latest changes * Fix compilation issues * Implement the external URL * Update to latest networking fixes * Forward port rest of v0.2 * Fix reserved peers being dropped when we're full
This commit is contained in:
committed by
Arkadiy Paronyan
parent
abf799f78f
commit
52dbf0cace
@@ -72,7 +72,7 @@ impl<T> RegisteredProtocol<T> {
|
||||
/// passed inside the `RegisteredProtocolOutput`.
|
||||
pub fn new(custom_data: T, protocol: ProtocolId, versions: &[(u8, u8)])
|
||||
-> Self {
|
||||
let mut proto_name = Bytes::from_static(b"/core/");
|
||||
let mut proto_name = Bytes::from_static(b"/substrate/");
|
||||
proto_name.extend_from_slice(&protocol);
|
||||
proto_name.extend_from_slice(b"/");
|
||||
|
||||
@@ -100,9 +100,8 @@ impl<T> RegisteredProtocol<T> {
|
||||
}
|
||||
|
||||
// `Maf` is short for `MultiaddressFuture`
|
||||
impl<T, C, Maf> ConnectionUpgrade<C, Maf> for RegisteredProtocol<T>
|
||||
impl<T, C> ConnectionUpgrade<C> for RegisteredProtocol<T>
|
||||
where C: AsyncRead + AsyncWrite + Send + 'static, // TODO: 'static :-/
|
||||
Maf: Future<Item = Multiaddr, Error = IoError> + Send + 'static, // TODO: 'static :(
|
||||
{
|
||||
type NamesIter = VecIntoIter<(Bytes, Self::UpgradeIdentifier)>;
|
||||
type UpgradeIdentifier = u8; // Protocol version
|
||||
@@ -119,8 +118,7 @@ where C: AsyncRead + AsyncWrite + Send + 'static, // TODO: 'static :-/
|
||||
}
|
||||
|
||||
type Output = RegisteredProtocolOutput<T>;
|
||||
type MultiaddrFuture = Maf;
|
||||
type Future = future::FutureResult<(Self::Output, Self::MultiaddrFuture), IoError>;
|
||||
type Future = future::FutureResult<Self::Output, IoError>;
|
||||
|
||||
#[allow(deprecated)]
|
||||
fn upgrade(
|
||||
@@ -128,7 +126,7 @@ where C: AsyncRead + AsyncWrite + Send + 'static, // TODO: 'static :-/
|
||||
socket: C,
|
||||
protocol_version: Self::UpgradeIdentifier,
|
||||
endpoint: Endpoint,
|
||||
remote_addr: Maf
|
||||
_: &Multiaddr
|
||||
) -> Self::Future {
|
||||
let packet_count = self.supported_versions
|
||||
.iter()
|
||||
@@ -224,7 +222,7 @@ where C: AsyncRead + AsyncWrite + Send + 'static, // TODO: 'static :-/
|
||||
incoming: Box::new(incoming),
|
||||
};
|
||||
|
||||
future::ok((out, remote_addr))
|
||||
future::ok(out)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -233,6 +231,12 @@ where C: AsyncRead + AsyncWrite + Send + 'static, // TODO: 'static :-/
|
||||
pub struct RegisteredProtocols<T>(pub Vec<RegisteredProtocol<T>>);
|
||||
|
||||
impl<T> RegisteredProtocols<T> {
|
||||
/// Returns the number of protocols.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
/// Finds a protocol in the list by its id.
|
||||
pub fn find_protocol(&self, protocol: ProtocolId)
|
||||
-> Option<&RegisteredProtocol<T>> {
|
||||
@@ -251,27 +255,24 @@ impl<T> Default for RegisteredProtocols<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, C, Maf> ConnectionUpgrade<C, Maf> for RegisteredProtocols<T>
|
||||
impl<T, C> ConnectionUpgrade<C> for RegisteredProtocols<T>
|
||||
where C: AsyncRead + AsyncWrite + Send + 'static, // TODO: 'static :-/
|
||||
Maf: Future<Item = Multiaddr, Error = IoError> + Send + 'static, // TODO: 'static :(
|
||||
{
|
||||
type NamesIter = VecIntoIter<(Bytes, Self::UpgradeIdentifier)>;
|
||||
type UpgradeIdentifier = (usize,
|
||||
<RegisteredProtocol<T> as ConnectionUpgrade<C, Maf>>::UpgradeIdentifier);
|
||||
<RegisteredProtocol<T> as ConnectionUpgrade<C>>::UpgradeIdentifier);
|
||||
|
||||
fn protocol_names(&self) -> Self::NamesIter {
|
||||
// We concat the lists of `RegisteredProtocol::protocol_names` for
|
||||
// each protocol.
|
||||
self.0.iter().enumerate().flat_map(|(n, proto)|
|
||||
ConnectionUpgrade::<C, Maf>::protocol_names(proto)
|
||||
ConnectionUpgrade::<C>::protocol_names(proto)
|
||||
.map(move |(name, id)| (name, (n, id)))
|
||||
).collect::<Vec<_>>().into_iter()
|
||||
}
|
||||
|
||||
type Output = <RegisteredProtocol<T> as ConnectionUpgrade<C, Maf>>::Output;
|
||||
type MultiaddrFuture = <RegisteredProtocol<T> as
|
||||
ConnectionUpgrade<C, Maf>>::MultiaddrFuture;
|
||||
type Future = <RegisteredProtocol<T> as ConnectionUpgrade<C, Maf>>::Future;
|
||||
type Output = <RegisteredProtocol<T> as ConnectionUpgrade<C>>::Output;
|
||||
type Future = <RegisteredProtocol<T> as ConnectionUpgrade<C>>::Future;
|
||||
|
||||
#[inline]
|
||||
fn upgrade(
|
||||
@@ -279,7 +280,7 @@ where C: AsyncRead + AsyncWrite + Send + 'static, // TODO: 'static :-/
|
||||
socket: C,
|
||||
upgrade_identifier: Self::UpgradeIdentifier,
|
||||
endpoint: Endpoint,
|
||||
remote_addr: Maf
|
||||
remote_addr: &Multiaddr
|
||||
) -> Self::Future {
|
||||
let (protocol_index, inner_proto_id) = upgrade_identifier;
|
||||
self.0.into_iter()
|
||||
|
||||
@@ -25,15 +25,18 @@ extern crate parking_lot;
|
||||
extern crate fnv;
|
||||
extern crate futures;
|
||||
extern crate tokio;
|
||||
extern crate tokio_executor;
|
||||
extern crate tokio_io;
|
||||
extern crate tokio_timer;
|
||||
extern crate libc;
|
||||
#[macro_use]
|
||||
extern crate libp2p;
|
||||
extern crate rand;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
extern crate smallvec;
|
||||
extern crate bytes;
|
||||
extern crate unsigned_varint;
|
||||
|
||||
@@ -46,18 +49,25 @@ extern crate log;
|
||||
#[cfg(test)] #[macro_use]
|
||||
extern crate assert_matches;
|
||||
|
||||
use libp2p::PeerId;
|
||||
|
||||
pub use connection_filter::{ConnectionFilter, ConnectionDirection};
|
||||
pub use error::{Error, ErrorKind, DisconnectReason};
|
||||
pub use libp2p::{Multiaddr, multiaddr::AddrComponent};
|
||||
pub use libp2p::{Multiaddr, multiaddr::Protocol};
|
||||
pub use traits::*;
|
||||
|
||||
pub type TimerToken = usize;
|
||||
|
||||
// TODO: remove as it is unused ; however modifying `network` causes a clusterfuck of dependencies
|
||||
// resolve errors at the moment
|
||||
mod connection_filter;
|
||||
mod custom_proto;
|
||||
mod error;
|
||||
mod network_state;
|
||||
mod node_handler;
|
||||
mod secret;
|
||||
mod service;
|
||||
mod service_task;
|
||||
mod swarm;
|
||||
mod timeouts;
|
||||
mod topology;
|
||||
mod traits;
|
||||
@@ -67,8 +77,19 @@ pub use service::NetworkService;
|
||||
|
||||
/// Check if node url is valid
|
||||
pub fn validate_node_url(url: &str) -> Result<(), Error> {
|
||||
match url.parse::<libp2p::multiaddr::Multiaddr>() {
|
||||
match url.parse::<Multiaddr>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => Err(ErrorKind::InvalidNodeId.into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses a string address and returns the component, if valid.
|
||||
pub(crate) fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), Error> {
|
||||
let mut addr: Multiaddr = addr_str.parse().map_err(|_| ErrorKind::AddressParse)?;
|
||||
let who = match addr.pop() {
|
||||
Some(Protocol::P2p(key)) =>
|
||||
PeerId::from_multihash(key).map_err(|_| ErrorKind::AddressParse)?,
|
||||
_ => return Err(ErrorKind::AddressParse.into()),
|
||||
};
|
||||
Ok((who, addr))
|
||||
}
|
||||
|
||||
@@ -1,953 +0,0 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bytes::Bytes;
|
||||
use fnv::{FnvHashMap, FnvHashSet};
|
||||
use futures::sync::mpsc;
|
||||
use libp2p::core::{multiaddr::ToMultiaddr, Multiaddr, AddrComponent, Endpoint, UniqueConnec};
|
||||
use libp2p::core::{UniqueConnecState, PeerId, PublicKey};
|
||||
use libp2p::kad::KadConnecController;
|
||||
use libp2p::ping::Pinger;
|
||||
use libp2p::secio;
|
||||
use {Error, ErrorKind, NetworkConfiguration, NonReservedPeerMode};
|
||||
use {NodeIndex, ProtocolId, SessionInfo};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use rand::{self, Rng};
|
||||
use topology::{DisconnectReason, NetTopology};
|
||||
use std::fs;
|
||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind, Read, Write};
|
||||
use std::path::Path;
|
||||
use std::sync::atomic;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
// File where the peers are stored.
|
||||
const NODES_FILE: &str = "nodes.json";
|
||||
// File where the private key is stored.
|
||||
const SECRET_FILE: &str = "secret";
|
||||
// Duration during which a peer is disabled.
|
||||
const PEER_DISABLE_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
// Common struct shared throughout all the components of the service.
|
||||
pub struct NetworkState {
|
||||
/// Contains the information about the network.
|
||||
topology: RwLock<NetTopology>,
|
||||
|
||||
/// Active connections.
|
||||
connections: RwLock<Connections>,
|
||||
|
||||
/// Maximum incoming peers.
|
||||
max_incoming_peers: u32,
|
||||
/// Maximum outgoing peers.
|
||||
max_outgoing_peers: u32,
|
||||
|
||||
/// If true, only reserved peers can connect.
|
||||
reserved_only: atomic::AtomicBool,
|
||||
/// List of the IDs of the reserved peers.
|
||||
reserved_peers: RwLock<FnvHashSet<PeerId>>,
|
||||
|
||||
/// Each node we discover gets assigned a new unique ID. This ID increases linearly.
|
||||
next_node_index: atomic::AtomicUsize,
|
||||
|
||||
/// List of the IDs of the disabled peers. These peers will see their
|
||||
/// connections refused. Includes the time when the disabling expires.
|
||||
disabled_nodes: Mutex<FnvHashMap<PeerId, Instant>>,
|
||||
|
||||
/// Local private key.
|
||||
local_private_key: secio::SecioKeyPair,
|
||||
/// Local public key.
|
||||
local_public_key: PublicKey,
|
||||
}
|
||||
|
||||
struct Connections {
|
||||
/// For each libp2p peer ID, the ID of the peer in the API we expose.
|
||||
/// Also corresponds to the index in `info_by_peer`.
|
||||
peer_by_nodeid: FnvHashMap<PeerId, usize>,
|
||||
|
||||
/// For each peer ID, information about our connection to this peer.
|
||||
info_by_peer: FnvHashMap<NodeIndex, PeerConnectionInfo>,
|
||||
}
|
||||
|
||||
struct PeerConnectionInfo {
|
||||
/// A list of protocols, and the potential corresponding connection.
|
||||
/// The `UniqueConnec` contains a sender and the protocol version.
|
||||
/// The sender can be used to transmit data for the remote. Note that the
|
||||
/// packet_id has to be inside the `Bytes`.
|
||||
protocols: Vec<(ProtocolId, UniqueConnec<(mpsc::UnboundedSender<Bytes>, u8)>)>,
|
||||
|
||||
/// The Kademlia connection to this node.
|
||||
kad_connec: UniqueConnec<KadConnecController>,
|
||||
|
||||
/// The ping connection to this node.
|
||||
ping_connec: UniqueConnec<Pinger>,
|
||||
|
||||
/// Id of the peer.
|
||||
id: PeerId,
|
||||
|
||||
/// True if this connection was initiated by us. `None` if we're not connected.
|
||||
/// Note that it is theoretically possible that we dial the remote at the
|
||||
/// same time they dial us, in which case the protocols may be dispatched
|
||||
/// between both connections, and in which case the value here will be racy.
|
||||
originated: Option<bool>,
|
||||
|
||||
/// Latest known ping duration.
|
||||
ping: Option<Duration>,
|
||||
|
||||
/// The client version of the remote, or `None` if not known.
|
||||
client_version: Option<String>,
|
||||
|
||||
/// The multiaddresses of the remote, or `None` if not known.
|
||||
remote_addresses: Vec<Multiaddr>,
|
||||
|
||||
/// The local multiaddress used to communicate with the remote, or `None`
|
||||
/// if not known.
|
||||
// TODO: never filled ; also shouldn't be an `Option`
|
||||
local_address: Option<Multiaddr>,
|
||||
}
|
||||
|
||||
/// Simplified, POD version of PeerConnectionInfo.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PeerInfo {
|
||||
/// Id of the peer.
|
||||
pub id: PeerId,
|
||||
|
||||
/// True if this connection was initiated by us.
|
||||
/// Note that it is theoretically possible that we dial the remote at the
|
||||
/// same time they dial us, in which case the protocols may be dispatched
|
||||
/// between both connections, and in which case the value here will be racy.
|
||||
pub originated: bool,
|
||||
|
||||
/// Latest known ping duration.
|
||||
pub ping: Option<Duration>,
|
||||
|
||||
/// The client version of the remote, or `None` if not known.
|
||||
pub client_version: Option<String>,
|
||||
|
||||
/// The multiaddress of the remote.
|
||||
pub remote_address: Option<Multiaddr>,
|
||||
|
||||
/// The local multiaddress used to communicate with the remote, or `None`
|
||||
/// if not known.
|
||||
pub local_address: Option<Multiaddr>,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a PeerConnectionInfo> for PeerInfo {
|
||||
fn from(i: &'a PeerConnectionInfo) -> PeerInfo {
|
||||
PeerInfo {
|
||||
id: i.id.clone(),
|
||||
originated: i.originated.unwrap_or(true),
|
||||
ping: i.ping,
|
||||
client_version: i.client_version.clone(),
|
||||
remote_address: i.remote_addresses.get(0).map(|a| a.clone()),
|
||||
local_address: i.local_address.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NetworkState {
|
||||
pub fn new(config: &NetworkConfiguration) -> Result<NetworkState, Error> {
|
||||
// Private and public keys configuration.
|
||||
let local_private_key = obtain_private_key(&config)?;
|
||||
let local_public_key = local_private_key.to_public_key();
|
||||
|
||||
// Build the storage for peers, including the bootstrap nodes.
|
||||
let mut topology = if let Some(ref path) = config.net_config_path {
|
||||
let path = Path::new(path).join(NODES_FILE);
|
||||
debug!(target: "sub-libp2p", "Initializing peer store for JSON file {:?}", path);
|
||||
NetTopology::from_file(path)
|
||||
} else {
|
||||
debug!(target: "sub-libp2p", "No peers file configured ; peers won't be saved");
|
||||
NetTopology::memory()
|
||||
};
|
||||
|
||||
let reserved_peers = {
|
||||
let mut reserved_peers = FnvHashSet::with_capacity_and_hasher(
|
||||
config.reserved_nodes.len(),
|
||||
Default::default()
|
||||
);
|
||||
for peer in config.reserved_nodes.iter() {
|
||||
let (id, _) = parse_and_add_to_topology(peer, &mut topology)?;
|
||||
reserved_peers.insert(id);
|
||||
}
|
||||
RwLock::new(reserved_peers)
|
||||
};
|
||||
|
||||
let expected_max_peers = config.max_peers as usize + config.reserved_nodes.len();
|
||||
|
||||
Ok(NetworkState {
|
||||
topology: RwLock::new(topology),
|
||||
max_outgoing_peers: config.min_peers,
|
||||
max_incoming_peers: config.max_peers.saturating_sub(config.min_peers),
|
||||
connections: RwLock::new(Connections {
|
||||
peer_by_nodeid: FnvHashMap::with_capacity_and_hasher(expected_max_peers, Default::default()),
|
||||
info_by_peer: FnvHashMap::with_capacity_and_hasher(expected_max_peers, Default::default()),
|
||||
}),
|
||||
reserved_only: atomic::AtomicBool::new(config.non_reserved_mode == NonReservedPeerMode::Deny),
|
||||
reserved_peers,
|
||||
next_node_index: atomic::AtomicUsize::new(0),
|
||||
disabled_nodes: Mutex::new(Default::default()),
|
||||
local_private_key,
|
||||
local_public_key,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the private key of the local node.
|
||||
pub fn local_private_key(&self) -> &secio::SecioKeyPair {
|
||||
&self.local_private_key
|
||||
}
|
||||
|
||||
/// Returns the public key of the local node.
|
||||
pub fn local_public_key(&self) -> &PublicKey {
|
||||
&self.local_public_key
|
||||
}
|
||||
|
||||
/// Returns a list of peers and addresses which we should try connect to.
|
||||
///
|
||||
/// Because of expiration and back-off mechanisms, this list can change
|
||||
/// by itself over time. The `Instant` that is returned corresponds to
|
||||
/// the earlier known time when a new entry will be added automatically to
|
||||
/// the list.
|
||||
pub fn outgoing_connections_to_attempt(&self) -> (Vec<(PeerId, Multiaddr)>, Instant) {
|
||||
// TODO: handle better
|
||||
let connections = self.connections.read();
|
||||
|
||||
let num_to_attempt = if self.reserved_only.load(atomic::Ordering::Relaxed) {
|
||||
0
|
||||
} else {
|
||||
let num_open_custom_connections = num_open_custom_connections(&connections, &self.reserved_peers.read());
|
||||
self.max_outgoing_peers.saturating_sub(num_open_custom_connections.unreserved_outgoing)
|
||||
};
|
||||
|
||||
let topology = self.topology.read();
|
||||
let (list, change) = topology.addrs_to_attempt();
|
||||
let list = list
|
||||
.filter(|&(peer, _)| {
|
||||
// Filter out peers which we are already connected to.
|
||||
let cur = match connections.peer_by_nodeid.get(peer) {
|
||||
Some(e) => e,
|
||||
None => return true
|
||||
};
|
||||
|
||||
let infos = match connections.info_by_peer.get(&cur) {
|
||||
Some(i) => i,
|
||||
None => return true
|
||||
};
|
||||
|
||||
!infos.protocols.iter().any(|(_, conn)| conn.is_alive())
|
||||
})
|
||||
.take(num_to_attempt as usize)
|
||||
.map(|(addr, peer)| (addr.clone(), peer.clone()))
|
||||
.collect();
|
||||
(list, change)
|
||||
}
|
||||
|
||||
/// Returns true if we are connected to any peer at all.
|
||||
pub fn has_connected_peer(&self) -> bool {
|
||||
!self.connections.read().peer_by_nodeid.is_empty()
|
||||
}
|
||||
|
||||
/// Get a list of all connected peers by id.
|
||||
pub fn connected_peers(&self) -> Vec<NodeIndex> {
|
||||
self.connections.read().peer_by_nodeid.values().cloned().collect()
|
||||
}
|
||||
|
||||
/// Returns true if the given `NodeIndex` is valid.
|
||||
///
|
||||
/// `NodeIndex`s are never reused, so once this function returns `false` it
|
||||
/// will never return `true` again for the same `NodeIndex`.
|
||||
pub fn is_peer_connected(&self, peer: NodeIndex) -> bool {
|
||||
self.connections.read().info_by_peer.contains_key(&peer)
|
||||
}
|
||||
|
||||
/// Reports the ping of the peer. Returned later by `session_info()`.
|
||||
/// No-op if the `who` is not valid/expired.
|
||||
pub fn report_ping_duration(&self, who: NodeIndex, ping: Duration) {
|
||||
let mut connections = self.connections.write();
|
||||
let info = match connections.info_by_peer.get_mut(&who) {
|
||||
Some(info) => info,
|
||||
None => return,
|
||||
};
|
||||
info.ping = Some(ping);
|
||||
}
|
||||
|
||||
/// If we're connected to a peer with the given protocol, returns
|
||||
/// information about the connection. Otherwise, returns `None`.
|
||||
pub fn session_info(&self, peer: NodeIndex, protocol: ProtocolId) -> Option<SessionInfo> {
|
||||
let connections = self.connections.read();
|
||||
let info = match connections.info_by_peer.get(&peer) {
|
||||
Some(info) => info,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let protocol_version = match info.protocols.iter().find(|&(ref p, _)| p == &protocol) {
|
||||
Some(&(_, ref unique_connec)) =>
|
||||
if let Some(val) = unique_connec.poll() {
|
||||
val.1 as u32
|
||||
} else {
|
||||
return None
|
||||
}
|
||||
None => return None,
|
||||
};
|
||||
|
||||
Some(SessionInfo {
|
||||
id: None, // TODO: ???? what to do??? wrong format!
|
||||
client_version: info.client_version.clone().take().unwrap_or(String::new()),
|
||||
protocol_version,
|
||||
capabilities: Vec::new(), // TODO: list of supported protocols ; hard
|
||||
peer_capabilities: Vec::new(), // TODO: difference with `peer_capabilities`?
|
||||
ping: info.ping,
|
||||
originated: info.originated.unwrap_or(true),
|
||||
remote_address: info.remote_addresses.get(0).map(|a| a.to_string()).unwrap_or_default(),
|
||||
local_address: info.local_address.as_ref().map(|a| a.to_string())
|
||||
.unwrap_or(String::new()),
|
||||
})
|
||||
}
|
||||
|
||||
/// If we're connected to a peer with the given protocol, returns the
|
||||
/// protocol version. Otherwise, returns `None`.
|
||||
pub fn protocol_version(&self, peer: NodeIndex, protocol: ProtocolId) -> Option<u8> {
|
||||
let connections = self.connections.read();
|
||||
let peer = match connections.info_by_peer.get(&peer) {
|
||||
Some(peer) => peer,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
peer.protocols.iter()
|
||||
.find(|p| p.0 == protocol)
|
||||
.and_then(|p| p.1.poll())
|
||||
.map(|(_, version)| version)
|
||||
}
|
||||
|
||||
/// Equivalent to `session_info(peer).map(|info| info.client_version)`.
|
||||
pub fn peer_client_version(&self, peer: NodeIndex, protocol: ProtocolId) -> Option<String> {
|
||||
// TODO: implement more directly, without going through `session_info`
|
||||
self.session_info(peer, protocol)
|
||||
.map(|info| info.client_version)
|
||||
}
|
||||
|
||||
/// Adds an address discovered by Kademlia.
|
||||
/// Note that we don't have to be connected to a peer to add an address.
|
||||
/// If `connectable` is `true`, that means we have a hint from a remote that this node can be
|
||||
/// connected to.
|
||||
pub fn add_kad_discovered_addr(&self, node_id: &PeerId, addr: Multiaddr, connectable: bool) {
|
||||
self.topology.write().add_kademlia_discovered_addr(node_id, addr, connectable)
|
||||
}
|
||||
|
||||
/// Returns the known multiaddresses of a peer.
|
||||
///
|
||||
/// The boolean associated to each address indicates whether we're connected to it.
|
||||
pub fn addrs_of_peer(&self, node_id: &PeerId) -> Vec<(Multiaddr, bool)> {
|
||||
let topology = self.topology.read();
|
||||
// Note: I have no idea why, but fusing the two lines below fails the
|
||||
// borrow check
|
||||
let out: Vec<_> = topology
|
||||
.addrs_of_peer(node_id).map(|(a, c)| (a.clone(), c)).collect();
|
||||
out
|
||||
}
|
||||
|
||||
/// Sets information about a peer.
|
||||
///
|
||||
/// No-op if the node index is invalid.
|
||||
pub fn set_node_info(
|
||||
&self,
|
||||
node_index: NodeIndex,
|
||||
client_version: String
|
||||
) {
|
||||
let mut connections = self.connections.write();
|
||||
let infos = match connections.info_by_peer.get_mut(&node_index) {
|
||||
Some(i) => i,
|
||||
None => return
|
||||
};
|
||||
|
||||
infos.client_version = Some(client_version);
|
||||
}
|
||||
|
||||
/// Adds a peer to the internal peer store.
|
||||
/// Returns an error if the peer address is invalid.
|
||||
pub fn add_bootstrap_peer(&self, peer: &str) -> Result<(PeerId, Multiaddr), Error> {
|
||||
parse_and_add_to_topology(peer, &mut self.topology.write())
|
||||
}
|
||||
|
||||
/// Adds a reserved peer to the list of reserved peers.
|
||||
/// Returns an error if the peer address is invalid.
|
||||
pub fn add_reserved_peer(&self, peer: &str) -> Result<(), Error> {
|
||||
let (id, _) = parse_and_add_to_topology(peer, &mut self.topology.write())?;
|
||||
self.reserved_peers.write().insert(id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes the peer from the list of reserved peers. If we're in reserved mode, drops any
|
||||
/// active connection to this peer.
|
||||
/// Returns an error if the peer address is invalid.
|
||||
pub fn remove_reserved_peer(&self, peer: &str) -> Result<(), Error> {
|
||||
let (id, _) = parse_and_add_to_topology(peer, &mut self.topology.write())?;
|
||||
self.reserved_peers.write().remove(&id);
|
||||
|
||||
// Dropping the peer if we're in reserved mode.
|
||||
if self.reserved_only.load(atomic::Ordering::SeqCst) {
|
||||
let mut connections = self.connections.write();
|
||||
if let Some(who) = connections.peer_by_nodeid.remove(&id) {
|
||||
connections.info_by_peer.remove(&who);
|
||||
// TODO: use drop_peer instead
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set the non-reserved peer mode.
|
||||
pub fn set_non_reserved_mode(&self, mode: NonReservedPeerMode) {
|
||||
match mode {
|
||||
NonReservedPeerMode::Accept =>
|
||||
self.reserved_only.store(false, atomic::Ordering::SeqCst),
|
||||
NonReservedPeerMode::Deny =>
|
||||
// TODO: drop existing peers?
|
||||
self.reserved_only.store(true, atomic::Ordering::SeqCst),
|
||||
}
|
||||
}
|
||||
|
||||
/// Reports that we tried to connect to the given address but failed.
|
||||
///
|
||||
/// This decreases the chance this address will be tried again in the future.
|
||||
#[inline]
|
||||
pub fn report_failed_to_connect(&self, addr: &Multiaddr) {
|
||||
trace!(target: "sub-libp2p", "Failed to connect to {:?}", addr);
|
||||
self.topology.write().report_failed_to_connect(addr);
|
||||
}
|
||||
|
||||
/// Returns the `NodeIndex` corresponding to a node id, or assigns a `NodeIndex` if none
|
||||
/// exists.
|
||||
///
|
||||
/// Returns an error if this node is on the list of disabled/banned nodes..
|
||||
pub fn assign_node_index(
|
||||
&self,
|
||||
node_id: &PeerId
|
||||
) -> Result<NodeIndex, IoError> {
|
||||
// Check whether node is disabled.
|
||||
// TODO: figure out the locking strategy here to avoid possible deadlocks
|
||||
// TODO: put disabled_nodes in connections?
|
||||
let mut disabled_nodes = self.disabled_nodes.lock();
|
||||
if let Some(timeout) = disabled_nodes.get(node_id).cloned() {
|
||||
if timeout > Instant::now() {
|
||||
debug!(target: "sub-libp2p", "Refusing peer {:?} because it is disabled", node_id);
|
||||
return Err(IoError::new(IoErrorKind::ConnectionRefused, "peer is disabled"));
|
||||
} else {
|
||||
disabled_nodes.remove(node_id);
|
||||
}
|
||||
}
|
||||
drop(disabled_nodes);
|
||||
|
||||
let mut connections = self.connections.write();
|
||||
let connections = &mut *connections;
|
||||
let peer_by_nodeid = &mut connections.peer_by_nodeid;
|
||||
let info_by_peer = &mut connections.info_by_peer;
|
||||
|
||||
let who = *peer_by_nodeid.entry(node_id.clone()).or_insert_with(|| {
|
||||
let new_id = self.next_node_index.fetch_add(1, atomic::Ordering::Relaxed);
|
||||
trace!(target: "sub-libp2p", "Creating new peer #{:?} for {:?}", new_id, node_id);
|
||||
|
||||
info_by_peer.insert(new_id, PeerConnectionInfo {
|
||||
protocols: Vec::new(), // TODO: Vec::with_capacity(num_registered_protocols),
|
||||
kad_connec: UniqueConnec::empty(),
|
||||
ping_connec: UniqueConnec::empty(),
|
||||
id: node_id.clone(),
|
||||
originated: None,
|
||||
ping: None,
|
||||
client_version: None,
|
||||
local_address: None,
|
||||
remote_addresses: Vec::with_capacity(1),
|
||||
});
|
||||
|
||||
new_id
|
||||
});
|
||||
|
||||
Ok(who)
|
||||
}
|
||||
|
||||
/// Notifies that we're connected to a node through an address.
|
||||
///
|
||||
/// Returns an error if we refuse the connection.
|
||||
///
|
||||
/// Note that is it legal to connection multiple times to the same node id through different
|
||||
/// addresses and endpoints.
|
||||
pub fn report_connected(
|
||||
&self,
|
||||
node_index: NodeIndex,
|
||||
addr: &Multiaddr,
|
||||
endpoint: Endpoint
|
||||
) -> Result<(), IoError> {
|
||||
let mut connections = self.connections.write();
|
||||
|
||||
// TODO: double locking in this function ; although this has been reviewed to not deadlock
|
||||
// as of the writing of this code, it is possible that a later change that isn't carefully
|
||||
// reviewed triggers one
|
||||
|
||||
if endpoint == Endpoint::Listener {
|
||||
let stats = num_open_custom_connections(&connections, &self.reserved_peers.read());
|
||||
if stats.unreserved_incoming >= self.max_incoming_peers {
|
||||
debug!(target: "sub-libp2p", "Refusing incoming connection from {} because we \
|
||||
reached max incoming peers", addr);
|
||||
return Err(IoError::new(IoErrorKind::ConnectionRefused,
|
||||
"maximum incoming peers reached"));
|
||||
}
|
||||
}
|
||||
|
||||
let infos = match connections.info_by_peer.get_mut(&node_index) {
|
||||
Some(i) => i,
|
||||
None => return Ok(())
|
||||
};
|
||||
|
||||
if !infos.remote_addresses.iter().any(|a| a == addr) {
|
||||
infos.remote_addresses.push(addr.clone());
|
||||
}
|
||||
|
||||
if infos.originated.is_none() {
|
||||
infos.originated = Some(endpoint == Endpoint::Dialer);
|
||||
}
|
||||
|
||||
self.topology.write().report_connected(addr, &infos.id);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the node id from a node index.
|
||||
///
|
||||
/// Returns `None` if the node index is invalid.
|
||||
pub fn node_id_from_index(
|
||||
&self,
|
||||
node_index: NodeIndex
|
||||
) -> Option<PeerId> {
|
||||
let mut connections = self.connections.write();
|
||||
let infos = match connections.info_by_peer.get_mut(&node_index) {
|
||||
Some(i) => i,
|
||||
None => return None
|
||||
};
|
||||
Some(infos.id.clone())
|
||||
}
|
||||
|
||||
/// Obtains the `UniqueConnec` corresponding to the Kademlia connection to a peer.
|
||||
///
|
||||
/// Returns `None` if the node index is invalid.
|
||||
pub fn kad_connection(
|
||||
&self,
|
||||
node_index: NodeIndex
|
||||
) -> Option<UniqueConnec<KadConnecController>> {
|
||||
let mut connections = self.connections.write();
|
||||
let infos = match connections.info_by_peer.get_mut(&node_index) {
|
||||
Some(i) => i,
|
||||
None => return None
|
||||
};
|
||||
Some(infos.kad_connec.clone())
|
||||
}
|
||||
|
||||
/// Obtains the `UniqueConnec` corresponding to the Ping connection to a peer.
|
||||
///
|
||||
/// Returns `None` if the node index is invalid.
|
||||
pub fn ping_connection(
|
||||
&self,
|
||||
node_index: NodeIndex
|
||||
) -> Option<UniqueConnec<Pinger>> {
|
||||
let mut connections = self.connections.write();
|
||||
let infos = match connections.info_by_peer.get_mut(&node_index) {
|
||||
Some(i) => i,
|
||||
None => return None
|
||||
};
|
||||
Some(infos.ping_connec.clone())
|
||||
}
|
||||
|
||||
/// Cleans up inactive connections and returns a list of
|
||||
/// connections to ping and identify.
|
||||
pub fn cleanup_and_prepare_updates(
|
||||
&self
|
||||
) -> Vec<PeriodicUpdate> {
|
||||
self.topology.write().cleanup();
|
||||
|
||||
let mut connections = self.connections.write();
|
||||
let connections = &mut *connections;
|
||||
let peer_by_nodeid = &mut connections.peer_by_nodeid;
|
||||
let info_by_peer = &mut connections.info_by_peer;
|
||||
|
||||
let mut ret = Vec::with_capacity(info_by_peer.len());
|
||||
info_by_peer.retain(|&who, infos| {
|
||||
// Remove the peer if neither Kad nor any protocol is alive.
|
||||
if !infos.kad_connec.is_alive() &&
|
||||
!infos.protocols.iter().any(|(_, conn)| conn.is_alive())
|
||||
{
|
||||
peer_by_nodeid.remove(&infos.id);
|
||||
trace!(target: "sub-libp2p", "Cleaning up expired peer \
|
||||
#{:?} ({:?})", who, infos.id);
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Some(addr) = infos.remote_addresses.get(0) {
|
||||
ret.push(PeriodicUpdate {
|
||||
node_index: who,
|
||||
peer_id: infos.id.clone(),
|
||||
address: addr.clone(),
|
||||
pinger: infos.ping_connec.clone(),
|
||||
identify: infos.client_version.is_none(),
|
||||
});
|
||||
}
|
||||
true
|
||||
});
|
||||
ret
|
||||
}
|
||||
|
||||
/// Obtains the `UniqueConnec` corresponding to a custom protocol connection to a peer.
|
||||
///
|
||||
/// Returns `None` if the node index is invalid.
|
||||
pub fn custom_proto(
|
||||
&self,
|
||||
node_index: NodeIndex,
|
||||
protocol_id: ProtocolId,
|
||||
) -> Option<UniqueConnec<(mpsc::UnboundedSender<Bytes>, u8)>> {
|
||||
let mut connections = self.connections.write();
|
||||
let infos = match connections.info_by_peer.get_mut(&node_index) {
|
||||
Some(i) => i,
|
||||
None => return None
|
||||
};
|
||||
|
||||
if let Some((_, ref uconn)) = infos.protocols.iter().find(|&(prot, _)| prot == &protocol_id) {
|
||||
return Some(uconn.clone())
|
||||
}
|
||||
|
||||
let unique_connec = UniqueConnec::empty();
|
||||
infos.protocols.push((protocol_id.clone(), unique_connec.clone()));
|
||||
Some(unique_connec)
|
||||
}
|
||||
|
||||
/// Sends some data to the given peer, using the sender that was passed
|
||||
/// to the `UniqueConnec` of `custom_proto`.
|
||||
pub fn send(&self, who: NodeIndex, protocol: ProtocolId, message: Bytes) -> Result<(), Error> {
|
||||
if let Some(peer) = self.connections.read().info_by_peer.get(&who) {
|
||||
let sender = peer.protocols.iter().find(|elem| elem.0 == protocol)
|
||||
.and_then(|e| e.1.poll())
|
||||
.map(|e| e.0);
|
||||
if let Some(sender) = sender {
|
||||
sender.unbounded_send(message)
|
||||
.map_err(|err| ErrorKind::Io(IoError::new(IoErrorKind::Other, err)))?;
|
||||
Ok(())
|
||||
} else {
|
||||
// We are connected to this peer, but not with the current
|
||||
// protocol.
|
||||
debug!(target: "sub-libp2p",
|
||||
"Tried to send message to peer {} for which we aren't connected with the requested protocol",
|
||||
who
|
||||
);
|
||||
return Err(ErrorKind::PeerNotFound.into())
|
||||
}
|
||||
} else {
|
||||
debug!(target: "sub-libp2p", "Tried to send message to invalid peer ID {}", who);
|
||||
return Err(ErrorKind::PeerNotFound.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the info on a peer, if there's an active connection.
|
||||
pub fn peer_info(&self, who: NodeIndex) -> Option<PeerInfo> {
|
||||
self.connections.read().info_by_peer.get(&who).map(Into::into)
|
||||
}
|
||||
|
||||
/// Reports that an attempt to make a low-level ping of the peer failed.
|
||||
pub fn report_ping_failed(&self, who: NodeIndex) {
|
||||
self.drop_peer(who);
|
||||
}
|
||||
|
||||
/// Disconnects a peer, if a connection exists (ie. drops the Kademlia
|
||||
/// controller, and the senders that were stored in the `UniqueConnec` of
|
||||
/// `custom_proto`).
|
||||
pub fn drop_peer(&self, who: NodeIndex) {
|
||||
let mut connections = self.connections.write();
|
||||
if let Some(peer_info) = connections.info_by_peer.remove(&who) {
|
||||
trace!(target: "sub-libp2p", "Destroying peer #{} {:?} ; kademlia = {:?} ; num_protos = {:?}",
|
||||
who,
|
||||
peer_info.id,
|
||||
peer_info.kad_connec.is_alive(),
|
||||
peer_info.protocols.iter().filter(|c| c.1.is_alive()).count());
|
||||
let old = connections.peer_by_nodeid.remove(&peer_info.id);
|
||||
debug_assert_eq!(old, Some(who));
|
||||
for addr in &peer_info.remote_addresses {
|
||||
self.topology.write().report_disconnected(addr,
|
||||
DisconnectReason::ClosedGracefully); // TODO: wrong reason
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Disconnects all the peers.
|
||||
/// This destroys all the Kademlia controllers and the senders that were
|
||||
/// stored in the `UniqueConnec` of `custom_proto`.
|
||||
pub fn disconnect_all(&self) {
|
||||
let mut connec = self.connections.write();
|
||||
*connec = Connections {
|
||||
info_by_peer: FnvHashMap::with_capacity_and_hasher(
|
||||
connec.peer_by_nodeid.capacity(), Default::default()),
|
||||
peer_by_nodeid: FnvHashMap::with_capacity_and_hasher(
|
||||
connec.peer_by_nodeid.capacity(), Default::default()),
|
||||
};
|
||||
}
|
||||
|
||||
/// Disables a peer for `PEER_DISABLE_DURATION`. This adds the peer to the
|
||||
/// list of disabled peers, and drops any existing connections if
|
||||
/// necessary (ie. drops the sender that was stored in the `UniqueConnec`
|
||||
/// of `custom_proto`).
|
||||
pub fn ban_peer(&self, who: NodeIndex, reason: &str) {
|
||||
// TODO: what do we do if the peer is reserved?
|
||||
// TODO: same logging as in drop_peer
|
||||
let mut connections = self.connections.write();
|
||||
let peer_info = if let Some(peer_info) = connections.info_by_peer.remove(&who) {
|
||||
if let &Some(ref client_version) = &peer_info.client_version {
|
||||
info!(target: "network", "Peer {} (version: {}, addresses: {:?}) disabled. {}", who, client_version, peer_info.remote_addresses, reason);
|
||||
} else {
|
||||
info!(target: "network", "Peer {} (addresses: {:?}) disabled. {}", who, peer_info.remote_addresses, reason);
|
||||
}
|
||||
let old = connections.peer_by_nodeid.remove(&peer_info.id);
|
||||
debug_assert_eq!(old, Some(who));
|
||||
peer_info
|
||||
} else {
|
||||
return
|
||||
};
|
||||
|
||||
drop(connections);
|
||||
let timeout = Instant::now() + PEER_DISABLE_DURATION;
|
||||
self.disabled_nodes.lock().insert(peer_info.id.clone(), timeout);
|
||||
}
|
||||
|
||||
/// Flushes the caches to the disk.
|
||||
///
|
||||
/// This is done in an atomical way, so that an error doesn't corrupt
|
||||
/// anything.
|
||||
pub fn flush_caches_to_disk(&self) -> Result<(), IoError> {
|
||||
match self.topology.read().flush_to_disk() {
|
||||
Ok(()) => {
|
||||
debug!(target: "sub-libp2p", "Flushed JSON peer store to disk");
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(target: "sub-libp2p", "Failed to flush changes to JSON peer store: {}", err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NetworkState {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.flush_caches_to_disk();
|
||||
}
|
||||
}
|
||||
|
||||
/// Periodic update that should be performed by the user of the network state.
|
||||
pub struct PeriodicUpdate {
|
||||
/// Index of the node in the network state.
|
||||
pub node_index: NodeIndex,
|
||||
/// Id of the peer.
|
||||
pub peer_id: PeerId,
|
||||
/// Address of the node to ping.
|
||||
pub address: Multiaddr,
|
||||
/// Object that allows pinging the node.
|
||||
pub pinger: UniqueConnec<Pinger>,
|
||||
/// The node should be identified as well.
|
||||
pub identify: bool,
|
||||
}
|
||||
|
||||
struct OpenCustomConnectionsNumbers {
|
||||
/// Total number of open and pending connections.
|
||||
pub total: u32,
|
||||
/// Unreserved incoming number of open and pending connections.
|
||||
pub unreserved_incoming: u32,
|
||||
/// Unreserved outgoing number of open and pending connections.
|
||||
pub unreserved_outgoing: u32,
|
||||
}
|
||||
|
||||
/// Returns the number of open and pending connections with
|
||||
/// custom protocols.
|
||||
fn num_open_custom_connections(connections: &Connections, reserved_peers: &FnvHashSet<PeerId>) -> OpenCustomConnectionsNumbers {
|
||||
let filtered = connections
|
||||
.info_by_peer
|
||||
.values()
|
||||
.filter(|info|
|
||||
info.protocols.iter().any(|&(_, ref connec)|
|
||||
match connec.state() {
|
||||
UniqueConnecState::Pending | UniqueConnecState::Full => true,
|
||||
_ => false
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
let mut total: u32 = 0;
|
||||
let mut unreserved_incoming: u32 = 0;
|
||||
let mut unreserved_outgoing: u32 = 0;
|
||||
|
||||
for info in filtered {
|
||||
total += 1;
|
||||
let node_is_reserved = reserved_peers.contains(&info.id);
|
||||
if !node_is_reserved {
|
||||
if !info.originated.unwrap_or(true) {
|
||||
unreserved_incoming += 1;
|
||||
} else {
|
||||
unreserved_outgoing += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
OpenCustomConnectionsNumbers {
|
||||
total,
|
||||
unreserved_incoming,
|
||||
unreserved_outgoing,
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses an address of the form `/ip4/x.x.x.x/tcp/x/p2p/xxxxxx`, and adds it
|
||||
/// to the given topology. Returns the corresponding peer ID and multiaddr.
|
||||
fn parse_and_add_to_topology(
|
||||
addr_str: &str,
|
||||
topology: &mut NetTopology
|
||||
) -> Result<(PeerId, Multiaddr), Error> {
|
||||
|
||||
let mut addr = addr_str.to_multiaddr().map_err(|_| ErrorKind::AddressParse)?;
|
||||
let who = match addr.pop() {
|
||||
Some(AddrComponent::P2P(key)) =>
|
||||
PeerId::from_multihash(key).map_err(|_| ErrorKind::AddressParse)?,
|
||||
_ => return Err(ErrorKind::AddressParse.into()),
|
||||
};
|
||||
|
||||
topology.add_bootstrap_addr(&who, addr.clone());
|
||||
Ok((who, addr))
|
||||
}
|
||||
|
||||
/// Obtains or generates the local private key using the configuration.
|
||||
fn obtain_private_key(config: &NetworkConfiguration)
|
||||
-> Result<secio::SecioKeyPair, IoError> {
|
||||
if let Some(ref secret) = config.use_secret {
|
||||
// Key was specified in the configuration.
|
||||
secio::SecioKeyPair::secp256k1_raw_key(&secret[..])
|
||||
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))
|
||||
|
||||
} else {
|
||||
if let Some(ref path) = config.net_config_path {
|
||||
fs::create_dir_all(Path::new(path))?;
|
||||
|
||||
// Try fetch the key from a the file containing th esecret.
|
||||
let secret_path = Path::new(path).join(SECRET_FILE);
|
||||
match load_private_key_from_file(&secret_path) {
|
||||
Ok(s) => Ok(s),
|
||||
Err(err) => {
|
||||
// Failed to fetch existing file ; generate a new key
|
||||
trace!(target: "sub-libp2p",
|
||||
"Failed to load existing secret key file {:?}, generating new key ; err = {:?}",
|
||||
secret_path,
|
||||
err
|
||||
);
|
||||
Ok(gen_key_and_try_write_to_file(&secret_path))
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// No path in the configuration, nothing we can do except generate
|
||||
// a new key.
|
||||
let mut key: [u8; 32] = [0; 32];
|
||||
rand::rngs::EntropyRng::new().fill(&mut key);
|
||||
Ok(secio::SecioKeyPair::secp256k1_raw_key(&key)
|
||||
.expect("randomly-generated key with correct len should always be valid"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to load a private key from a file located at the given path.
|
||||
fn load_private_key_from_file<P>(path: P)
|
||||
-> Result<secio::SecioKeyPair, IoError>
|
||||
where P: AsRef<Path>
|
||||
{
|
||||
fs::File::open(path)
|
||||
.and_then(|mut file| {
|
||||
// We are in 2018 and there is still no method on `std::io::Read`
|
||||
// that directly returns a `Vec`.
|
||||
let mut buf = Vec::new();
|
||||
file.read_to_end(&mut buf).map(|_| buf)
|
||||
})
|
||||
.and_then(|content|
|
||||
secio::SecioKeyPair::secp256k1_raw_key(&content)
|
||||
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates a new secret key and tries to write it to the given file.
|
||||
/// Doesn't error if we couldn't open or write to the file.
|
||||
fn gen_key_and_try_write_to_file<P>(path: P) -> secio::SecioKeyPair
|
||||
where P: AsRef<Path> {
|
||||
let raw_key: [u8; 32] = rand::rngs::EntropyRng::new().gen();
|
||||
let secio_key = secio::SecioKeyPair::secp256k1_raw_key(&raw_key)
|
||||
.expect("randomly-generated key with correct len should always be valid");
|
||||
|
||||
// And store the newly-generated key in the file if possible.
|
||||
// Errors that happen while doing so are ignored.
|
||||
match open_priv_key_file(&path) {
|
||||
Ok(mut file) =>
|
||||
match file.write_all(&raw_key) {
|
||||
Ok(()) => (),
|
||||
Err(err) => warn!(target: "sub-libp2p",
|
||||
"Failed to write secret key in file {:?} ; err = {:?}",
|
||||
path.as_ref(),
|
||||
err
|
||||
),
|
||||
},
|
||||
Err(err) => warn!(target: "sub-libp2p",
|
||||
"Failed to store secret key in file {:?} ; err = {:?}",
|
||||
path.as_ref(),
|
||||
err
|
||||
),
|
||||
}
|
||||
|
||||
secio_key
|
||||
}
|
||||
|
||||
/// Opens a file containing a private key in write mode.
|
||||
#[cfg(unix)]
|
||||
fn open_priv_key_file<P>(path: P) -> Result<fs::File, IoError>
|
||||
where P: AsRef<Path>
|
||||
{
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.mode(256 | 128) // 0o600 in decimal
|
||||
.open(path)
|
||||
}
|
||||
/// Opens a file containing a private key in write mode.
|
||||
#[cfg(not(unix))]
|
||||
fn open_priv_key_file<P>(path: P) -> Result<fs::File, IoError>
|
||||
where P: AsRef<Path>
|
||||
{
|
||||
fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open(path)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use libp2p::core::PublicKey;
|
||||
use network_state::NetworkState;
|
||||
|
||||
#[test]
|
||||
fn refuse_disabled_peer() {
|
||||
let state = NetworkState::new(&Default::default()).unwrap();
|
||||
let example_peer = PublicKey::Rsa(vec![1, 2, 3, 4]).into_peer_id();
|
||||
|
||||
let who = state.assign_node_index(&example_peer).unwrap();
|
||||
state.ban_peer(who, "Just a test");
|
||||
|
||||
assert!(state.assign_node_index(&example_peer).is_err());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,881 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bytes::Bytes;
|
||||
use custom_proto::{RegisteredProtocols, RegisteredProtocolOutput};
|
||||
use futures::{prelude::*, task};
|
||||
use libp2p::core::{ConnectionUpgrade, Endpoint, PeerId, PublicKey, upgrade};
|
||||
use libp2p::core::nodes::handled_node::{NodeHandler, NodeHandlerEndpoint, NodeHandlerEvent};
|
||||
use libp2p::core::nodes::swarm::ConnectedPoint;
|
||||
use libp2p::kad::{KadConnecConfig, KadFindNodeRespond, KadIncomingRequest, KadConnecController};
|
||||
use libp2p::{identify, ping};
|
||||
use parking_lot::Mutex;
|
||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
use tokio_timer::{Delay, Interval};
|
||||
use {Multiaddr, PacketId, ProtocolId};
|
||||
|
||||
/// Duration after which we consider that a ping failed.
|
||||
const PING_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
/// After a ping succeeded, wait this long before the next ping.
|
||||
const DELAY_TO_NEXT_PING: Duration = Duration::from_secs(15);
|
||||
/// Period at which we identify the remote.
|
||||
const PERIOD_IDENTIFY: Duration = Duration::from_secs(5 * 60);
|
||||
/// Delay between the moment we connect and the first time we ping.
|
||||
const DELAY_TO_FIRST_PING: Duration = Duration::from_secs(5);
|
||||
/// Delay between the moment we connect and the first time we identify.
|
||||
const DELAY_TO_FIRST_IDENTIFY: Duration = Duration::from_secs(2);
|
||||
|
||||
/// This struct handles the open substreams of a specific node.
|
||||
///
|
||||
/// It doesn't handle opening the substreams, but only what to do with substreams that have been
|
||||
/// opened.
|
||||
///
|
||||
/// The node will be pinged at a regular interval to determine whether it's still alive. We will
|
||||
/// also regularly query the remote for identification information, for statistics purposes.
|
||||
pub struct SubstrateNodeHandler<TSubstream, TUserData> {
|
||||
/// List of registered custom protocols.
|
||||
registered_custom: Arc<RegisteredProtocols<TUserData>>,
|
||||
/// Substreams open for "custom" protocols (eg. dot).
|
||||
custom_protocols_substreams: Vec<RegisteredProtocolOutput<TUserData>>,
|
||||
|
||||
/// Address of the node.
|
||||
address: Multiaddr,
|
||||
|
||||
/// Substream open for Kademlia, if any.
|
||||
kademlia_substream: Option<(KadConnecController, Box<Stream<Item = KadIncomingRequest, Error = IoError> + Send>)>,
|
||||
/// If true, we need to send back a `KadOpen` event on the stream (if Kademlia is open).
|
||||
need_report_kad_open: bool,
|
||||
|
||||
/// Substream open for sending pings, if any.
|
||||
ping_out_substream: Option<ping::PingDialer<TSubstream, Instant>>,
|
||||
/// Active pinging attempt with the moment it expires.
|
||||
active_ping_out: Option<Delay>,
|
||||
/// Substreams open for receiving pings.
|
||||
ping_in_substreams: Vec<ping::PingListener<TSubstream>>,
|
||||
/// Future that fires when we need to ping the node again.
|
||||
///
|
||||
/// Every time we receive a pong, we reset the timer to the next time.
|
||||
next_ping: Delay,
|
||||
|
||||
/// Substreams for sending back our identify info to the remote.
|
||||
///
|
||||
/// This is in an `Arc` in order to avoid borrowing issues with the future.
|
||||
identify_send_back: Arc<Mutex<Vec<Box<Future<Item = (), Error = IoError> + Send>>>>,
|
||||
/// Stream that fires when we need to identify the node again.
|
||||
next_identify: Interval,
|
||||
|
||||
/// Substreams being upgraded on the listening side.
|
||||
upgrades_in_progress_listen: Vec<Box<Future<Item = FinalUpgrade<TSubstream, TUserData>, Error = IoError> + Send>>,
|
||||
/// Substreams being upgraded on the dialing side. Contrary to `upgrades_in_progress_listen`,
|
||||
/// these have a known purpose.
|
||||
upgrades_in_progress_dial: Vec<(UpgradePurpose, Box<Future<Item = FinalUpgrade<TSubstream, TUserData>, Error = IoError> + Send>)>,
|
||||
/// The substreams we want to open.
|
||||
queued_dial_upgrades: Vec<UpgradePurpose>,
|
||||
/// Number of outbound substreams the outside should open for us.
|
||||
num_out_user_must_open: usize,
|
||||
|
||||
/// The node has started its shutdown process.
|
||||
is_shutting_down: bool,
|
||||
|
||||
/// Task to notify if we add an element to one of the lists from the public API.
|
||||
to_notify: Option<task::Task>,
|
||||
}
|
||||
|
||||
/// Purpose of an upgrade in progress on the dialing side.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
enum UpgradePurpose {
|
||||
Custom(ProtocolId),
|
||||
Kad,
|
||||
Identify,
|
||||
Ping,
|
||||
}
|
||||
|
||||
/// Event that can happen on the `SubstrateNodeHandler`.
|
||||
pub enum SubstrateOutEvent<TSubstream> {
|
||||
/// The node has been determined to be unresponsive.
|
||||
Unresponsive,
|
||||
|
||||
/// The node works but we can't do anything useful with it.
|
||||
Useless,
|
||||
|
||||
/// Started pinging the remote. This can be used to print a diagnostic message in the logs.
|
||||
PingStart,
|
||||
|
||||
/// The node has successfully responded to a ping.
|
||||
PingSuccess(Duration),
|
||||
|
||||
/// Opened a custom protocol with the remote.
|
||||
CustomProtocolOpen {
|
||||
/// Identifier of the protocol.
|
||||
protocol_id: ProtocolId,
|
||||
/// Version of the protocol that has been opened.
|
||||
version: u8,
|
||||
},
|
||||
|
||||
/// Closed a custom protocol with the remote.
|
||||
CustomProtocolClosed {
|
||||
/// Identifier of the protocol.
|
||||
protocol_id: ProtocolId,
|
||||
/// Reason why the substream closed. If `Ok`, then it's a graceful exit (EOF).
|
||||
result: Result<(), IoError>,
|
||||
},
|
||||
|
||||
/// Receives a message on a custom protocol substream.
|
||||
CustomMessage {
|
||||
/// Protocol which generated the message.
|
||||
protocol_id: ProtocolId,
|
||||
/// Identifier of the packet.
|
||||
packet_id: u8,
|
||||
/// Data that has been received.
|
||||
data: Bytes,
|
||||
},
|
||||
|
||||
/// We obtained identification information from the remote
|
||||
Identified {
|
||||
/// Information of the remote.
|
||||
info: identify::IdentifyInfo,
|
||||
/// Address the remote observes us as.
|
||||
observed_addr: Multiaddr,
|
||||
},
|
||||
|
||||
/// The remote wants us to send back identification information.
|
||||
///
|
||||
/// The `IdentificationRequest` object should be used to send the information.
|
||||
IdentificationRequest(IdentificationRequest<TSubstream>),
|
||||
|
||||
/// Opened a Kademlia substream with the node.
|
||||
KadOpen(KadConnecController),
|
||||
|
||||
/// The remote wants us to answer a Kademlia `FIND_NODE` request.
|
||||
///
|
||||
/// The `responder` should be used to answer that query.
|
||||
// TODO: this API with the "responder" is bad, but changing it requires modifications in libp2p
|
||||
KadFindNode {
|
||||
/// The value being searched.
|
||||
searched: PeerId,
|
||||
/// Object to use to respond to the request.
|
||||
responder: KadFindNodeRespond,
|
||||
},
|
||||
|
||||
/// The Kademlia substream has been closed.
|
||||
///
|
||||
/// The parameter contains the reason why it has been closed. `Ok` means that it's been closed
|
||||
/// gracefully.
|
||||
KadClosed(Result<(), IoError>),
|
||||
|
||||
/// An error happened while upgrading a substream.
|
||||
///
|
||||
/// This can be used to print a diagnostic message.
|
||||
SubstreamUpgradeFail(IoError),
|
||||
}
|
||||
|
||||
/// The remote wants us to send back information.
|
||||
pub struct IdentificationRequest<TSubstream> {
|
||||
/// Where to store the future that sends back the information.
|
||||
identify_send_back: Arc<Mutex<Vec<Box<Future<Item = (), Error = IoError> + Send>>>>,
|
||||
/// Object that sends back the information.
|
||||
sender: identify::IdentifySender<TSubstream>,
|
||||
/// Protocol names that we support, to send back.
|
||||
protocols: Vec<String>,
|
||||
}
|
||||
|
||||
impl<TSubstream> IdentificationRequest<TSubstream> {
|
||||
/// Responds to the request.
|
||||
///
|
||||
/// - `local_key` must contain our local public key.
|
||||
/// - `listen_addrs` must contain the list of addresses we're listening on (preferably after
|
||||
/// NAT traversal).
|
||||
/// - `remote_addr` must be the address of the remote from our local point of view.
|
||||
///
|
||||
pub fn respond(
|
||||
self,
|
||||
local_key: PublicKey,
|
||||
listen_addrs: Vec<Multiaddr>,
|
||||
remote_addr: &Multiaddr
|
||||
) where TSubstream: AsyncRead + AsyncWrite + Send + 'static {
|
||||
// TODO: what to return for `protocol_version` and `agent_version`?
|
||||
let sender = self.sender.send(
|
||||
identify::IdentifyInfo {
|
||||
public_key: local_key,
|
||||
protocol_version: concat!("substrate/", env!("CARGO_PKG_VERSION")).to_owned(),
|
||||
agent_version: concat!("substrate/", env!("CARGO_PKG_VERSION")).to_owned(),
|
||||
listen_addrs,
|
||||
protocols: self.protocols,
|
||||
},
|
||||
remote_addr
|
||||
);
|
||||
|
||||
self.identify_send_back.lock().push(sender);
|
||||
}
|
||||
}
|
||||
|
||||
/// Event that can be received by a `SubstrateNodeHandler`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum SubstrateInEvent {
|
||||
/// Before anything happens on the node, we wait for an `Accept` event. This is used to deny
|
||||
/// nodes based on their peer ID.
|
||||
Accept,
|
||||
|
||||
/// Sends a message through a custom protocol substream.
|
||||
SendCustomMessage {
|
||||
protocol: ProtocolId,
|
||||
packet_id: PacketId,
|
||||
data: Vec<u8>,
|
||||
},
|
||||
|
||||
/// Requests to open a Kademlia substream.
|
||||
// TODO: document better
|
||||
OpenKademlia,
|
||||
}
|
||||
|
||||
/// Ideally we would have a method on `SubstrateNodeHandler` that builds this type, but in practice it's a
|
||||
/// bit tedious to express, even with the `impl Trait` syntax.
|
||||
/// Therefore we simply use a macro instead.
|
||||
macro_rules! listener_upgrade {
|
||||
($self:expr) => (
|
||||
upgrade::or(upgrade::or(upgrade::or(
|
||||
upgrade::map((*$self.registered_custom).clone(), move |c| FinalUpgrade::Custom(c)),
|
||||
upgrade::map(KadConnecConfig::new(), move |(c, s)| FinalUpgrade::Kad(c, s))),
|
||||
upgrade::map(ping::Ping::default(), move |p| FinalUpgrade::from(p))),
|
||||
upgrade::map(identify::IdentifyProtocolConfig, move |i| FinalUpgrade::from(i)))
|
||||
// TODO: meh for cloning a Vec here
|
||||
)
|
||||
}
|
||||
|
||||
impl<TSubstream, TUserData> SubstrateNodeHandler<TSubstream, TUserData>
|
||||
where TSubstream: AsyncRead + AsyncWrite + Send + 'static,
|
||||
TUserData: Clone + Send + 'static,
|
||||
{
|
||||
/// Creates a new node handler.
|
||||
#[inline]
|
||||
pub fn new(registered_custom: Arc<RegisteredProtocols<TUserData>>, endpoint: ConnectedPoint) -> Self {
|
||||
let registered_custom_len = registered_custom.len();
|
||||
let queued_dial_upgrades = registered_custom.0
|
||||
.iter()
|
||||
.map(|proto| UpgradePurpose::Custom(proto.id()))
|
||||
.collect();
|
||||
|
||||
let address = match endpoint {
|
||||
ConnectedPoint::Dialer { address } => address.clone(),
|
||||
ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(),
|
||||
};
|
||||
|
||||
SubstrateNodeHandler {
|
||||
address,
|
||||
custom_protocols_substreams: Vec::with_capacity(registered_custom_len),
|
||||
kademlia_substream: None,
|
||||
need_report_kad_open: false,
|
||||
identify_send_back: Arc::new(Mutex::new(Vec::with_capacity(1))),
|
||||
ping_in_substreams: Vec::with_capacity(1),
|
||||
ping_out_substream: None,
|
||||
active_ping_out: None,
|
||||
registered_custom,
|
||||
upgrades_in_progress_listen: Vec::with_capacity(registered_custom_len + 3),
|
||||
upgrades_in_progress_dial: Vec::with_capacity(registered_custom_len + 3),
|
||||
next_ping: Delay::new(Instant::now() + DELAY_TO_FIRST_PING),
|
||||
next_identify: Interval::new(Instant::now() + DELAY_TO_FIRST_IDENTIFY, PERIOD_IDENTIFY),
|
||||
queued_dial_upgrades,
|
||||
num_out_user_must_open: registered_custom_len,
|
||||
is_shutting_down: false,
|
||||
to_notify: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream, TUserData> NodeHandler<TSubstream> for SubstrateNodeHandler<TSubstream, TUserData>
|
||||
where TSubstream: AsyncRead + AsyncWrite + Send + 'static,
|
||||
TUserData: Clone + Send + 'static,
|
||||
{
|
||||
type InEvent = SubstrateInEvent;
|
||||
type OutEvent = SubstrateOutEvent<TSubstream>;
|
||||
type OutboundOpenInfo = ();
|
||||
|
||||
fn inject_substream(&mut self, substream: TSubstream, endpoint: NodeHandlerEndpoint<Self::OutboundOpenInfo>) {
|
||||
// For listeners, propose all the possible upgrades.
|
||||
if endpoint == NodeHandlerEndpoint::Listener {
|
||||
let listener_upgrade = listener_upgrade!(self);
|
||||
let upgrade = upgrade::apply(substream, listener_upgrade, Endpoint::Listener, &self.address);
|
||||
self.upgrades_in_progress_listen.push(Box::new(upgrade) as Box<_>);
|
||||
// Since we pushed to `upgrades_in_progress_listen`, we have to notify the task.
|
||||
if let Some(task) = self.to_notify.take() {
|
||||
task.notify();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// If we're the dialer, we have to decide which upgrade we want.
|
||||
let purpose = if self.queued_dial_upgrades.is_empty() {
|
||||
// Since we sometimes remove elements from `queued_dial_upgrades` before they succeed
|
||||
// but after the outbound substream has started opening, it is possible that the queue
|
||||
// is empty when we receive a substream. This is not an error.
|
||||
// Example: we want to open a Kademlia substream, we start opening one, but in the
|
||||
// meanwhile the remote opens a Kademlia substream. When we receive the new substream,
|
||||
// we don't need it anymore.
|
||||
return;
|
||||
} else {
|
||||
self.queued_dial_upgrades.remove(0)
|
||||
};
|
||||
|
||||
match purpose {
|
||||
UpgradePurpose::Custom(id) => {
|
||||
let wanted = if let Some(proto) = self.registered_custom.find_protocol(id) {
|
||||
// TODO: meh for cloning
|
||||
upgrade::map(proto.clone(), move |c| FinalUpgrade::Custom(c))
|
||||
} else {
|
||||
error!(target: "sub-libp2p", "Logic error: wrong custom protocol id for \
|
||||
opened substream");
|
||||
return;
|
||||
};
|
||||
|
||||
// TODO: shouldn't be &self.address ; requires a change in libp2p
|
||||
let upgrade = upgrade::apply(substream, wanted, Endpoint::Dialer, &self.address);
|
||||
self.upgrades_in_progress_dial.push((purpose, Box::new(upgrade) as Box<_>));
|
||||
}
|
||||
UpgradePurpose::Kad => {
|
||||
let wanted = upgrade::map(KadConnecConfig::new(), move |(c, s)| FinalUpgrade::Kad(c, s));
|
||||
// TODO: shouldn't be &self.address ; requires a change in libp2p
|
||||
let upgrade = upgrade::apply(substream, wanted, Endpoint::Dialer, &self.address);
|
||||
self.upgrades_in_progress_dial.push((purpose, Box::new(upgrade) as Box<_>));
|
||||
}
|
||||
UpgradePurpose::Identify => {
|
||||
let wanted = upgrade::map(identify::IdentifyProtocolConfig, move |i| FinalUpgrade::from(i));
|
||||
// TODO: shouldn't be &self.address ; requires a change in libp2p
|
||||
let upgrade = upgrade::apply(substream, wanted, Endpoint::Dialer, &self.address);
|
||||
self.upgrades_in_progress_dial.push((purpose, Box::new(upgrade) as Box<_>));
|
||||
}
|
||||
UpgradePurpose::Ping => {
|
||||
let wanted = upgrade::map(ping::Ping::default(), move |p| FinalUpgrade::from(p));
|
||||
// TODO: shouldn't be &self.address ; requires a change in libp2p
|
||||
let upgrade = upgrade::apply(substream, wanted, Endpoint::Dialer, &self.address);
|
||||
self.upgrades_in_progress_dial.push((purpose, Box::new(upgrade) as Box<_>));
|
||||
}
|
||||
};
|
||||
|
||||
// Since we pushed to `upgrades_in_progress_dial`, we have to notify the task.
|
||||
if let Some(task) = self.to_notify.take() {
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn inject_inbound_closed(&mut self) {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn inject_outbound_closed(&mut self, _: Self::OutboundOpenInfo) {
|
||||
}
|
||||
|
||||
fn inject_event(&mut self, event: Self::InEvent) {
|
||||
match event {
|
||||
SubstrateInEvent::SendCustomMessage { protocol, packet_id, data } => {
|
||||
self.send_custom_message(protocol, packet_id, data);
|
||||
},
|
||||
SubstrateInEvent::OpenKademlia => self.open_kademlia(),
|
||||
SubstrateInEvent::Accept => {
|
||||
// TODO: implement
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn shutdown(&mut self) {
|
||||
// TODO: close gracefully
|
||||
self.is_shutting_down = true;
|
||||
if let Some(to_notify) = self.to_notify.take() {
|
||||
to_notify.notify();
|
||||
}
|
||||
}
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<NodeHandlerEvent<Self::OutboundOpenInfo, Self::OutEvent>>, IoError> {
|
||||
if self.is_shutting_down {
|
||||
return Ok(Async::Ready(None));
|
||||
}
|
||||
|
||||
match self.poll_upgrades_in_progress()? {
|
||||
Async::Ready(value) => return Ok(Async::Ready(value.map(NodeHandlerEvent::Custom))),
|
||||
Async::NotReady => (),
|
||||
};
|
||||
|
||||
match self.poll_custom_protocols()? {
|
||||
Async::Ready(value) => return Ok(Async::Ready(value.map(NodeHandlerEvent::Custom))),
|
||||
Async::NotReady => (),
|
||||
};
|
||||
|
||||
match self.poll_kademlia()? {
|
||||
Async::Ready(value) => return Ok(Async::Ready(value.map(NodeHandlerEvent::Custom))),
|
||||
Async::NotReady => (),
|
||||
};
|
||||
|
||||
match self.poll_ping()? {
|
||||
Async::Ready(value) => return Ok(Async::Ready(value.map(NodeHandlerEvent::Custom))),
|
||||
Async::NotReady => (),
|
||||
};
|
||||
|
||||
match self.poll_identify()? {
|
||||
Async::Ready(value) => return Ok(Async::Ready(value.map(NodeHandlerEvent::Custom))),
|
||||
Async::NotReady => (),
|
||||
};
|
||||
|
||||
// Request new outbound substreams from the user if necessary.
|
||||
if self.num_out_user_must_open >= 1 {
|
||||
self.num_out_user_must_open -= 1;
|
||||
return Ok(Async::Ready(Some(NodeHandlerEvent::OutboundSubstreamRequest(()))));
|
||||
}
|
||||
|
||||
// Nothing happened. Register our task to be notified and return.
|
||||
self.to_notify = Some(task::current());
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream, TUserData> SubstrateNodeHandler<TSubstream, TUserData>
|
||||
where TSubstream: AsyncRead + AsyncWrite + Send + 'static,
|
||||
TUserData: Clone + Send + 'static,
|
||||
{
|
||||
/// Sends a message on a custom protocol substream.
|
||||
fn send_custom_message(
|
||||
&mut self,
|
||||
protocol: ProtocolId,
|
||||
packet_id: PacketId,
|
||||
data: Vec<u8>,
|
||||
) {
|
||||
debug_assert!(self.registered_custom.has_protocol(protocol),
|
||||
"invalid protocol id requested in the API of the libp2p networking");
|
||||
let proto = match self.custom_protocols_substreams.iter().find(|p| p.protocol_id == protocol) {
|
||||
Some(proto) => proto,
|
||||
None => {
|
||||
// We are processing a message event before we could report to the outside that
|
||||
// we disconnected from the protocol. This is not an error.
|
||||
return
|
||||
},
|
||||
};
|
||||
|
||||
let mut message = Bytes::with_capacity(1 + data.len());
|
||||
message.extend_from_slice(&[packet_id]);
|
||||
message.extend_from_slice(&data);
|
||||
|
||||
if let Err(_) = proto.outgoing.unbounded_send(message) {
|
||||
error!(target: "sub-libp2p", "Error while sending custom message to channel");
|
||||
}
|
||||
}
|
||||
|
||||
/// The node will try to open a Kademlia substream and produce a `KadOpen` event containing the
|
||||
/// controller. If a Kademlia substream is already open, produces the event immediately.
|
||||
fn open_kademlia(&mut self) {
|
||||
if self.kademlia_substream.is_some() {
|
||||
self.need_report_kad_open = true;
|
||||
if let Some(to_notify) = self.to_notify.take() {
|
||||
to_notify.notify();
|
||||
}
|
||||
} else if self.has_upgrade_purpose(&UpgradePurpose::Kad) {
|
||||
// We are currently upgrading a substream to Kademlia ; nothing more to do except wait.
|
||||
} else {
|
||||
// Opening a new substream for Kademlia.
|
||||
self.queued_dial_upgrades.push(UpgradePurpose::Kad);
|
||||
self.num_out_user_must_open += 1;
|
||||
if let Some(to_notify) = self.to_notify.take() {
|
||||
to_notify.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if we are currently upgrading to the given protocol.
|
||||
fn has_upgrade_purpose(&self, purpose: &UpgradePurpose) -> bool {
|
||||
self.upgrades_in_progress_dial.iter().any(|&(ref p, _)| p == purpose) ||
|
||||
self.queued_dial_upgrades.iter().any(|p| p == purpose)
|
||||
}
|
||||
|
||||
/// Cancels a dialing upgrade in progress.
|
||||
///
|
||||
/// Useful when the listener opened the protocol we wanted.
|
||||
fn cancel_dial_upgrade(&mut self, purpose: &UpgradePurpose) {
|
||||
self.upgrades_in_progress_dial.retain(|&(purp, _)| &purp != purpose);
|
||||
self.queued_dial_upgrades.retain(|u| u != purpose);
|
||||
}
|
||||
|
||||
/// Returns the names of the protocols that we supporitt.
|
||||
fn supported_protocol_names(&self) -> Vec<String> {
|
||||
let list = listener_upgrade!(self);
|
||||
ConnectionUpgrade::<TSubstream>::protocol_names(&list)
|
||||
.filter_map(|(n, _)| String::from_utf8(n.to_vec()).ok())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Inject a fully negotiated substream into the state.
|
||||
///
|
||||
/// Optionally produces an event to dispatch.
|
||||
fn inject_fully_negotiated(
|
||||
&mut self,
|
||||
upgrade: FinalUpgrade<TSubstream, TUserData>
|
||||
) -> Option<SubstrateOutEvent<TSubstream>> {
|
||||
match upgrade {
|
||||
FinalUpgrade::IdentifyListener(sender) =>
|
||||
Some(SubstrateOutEvent::IdentificationRequest(IdentificationRequest {
|
||||
sender,
|
||||
identify_send_back: self.identify_send_back.clone(),
|
||||
protocols: self.supported_protocol_names(),
|
||||
})),
|
||||
FinalUpgrade::IdentifyDialer(info, observed_addr) => {
|
||||
self.cancel_dial_upgrade(&UpgradePurpose::Identify);
|
||||
Some(SubstrateOutEvent::Identified { info, observed_addr })
|
||||
},
|
||||
FinalUpgrade::PingDialer(ping_dialer) => {
|
||||
self.cancel_dial_upgrade(&UpgradePurpose::Ping);
|
||||
// We always open the ping substream for a reason, which is to immediately ping.
|
||||
self.ping_out_substream = Some(ping_dialer);
|
||||
self.active_ping_out = None;
|
||||
if self.ping_remote() {
|
||||
Some(SubstrateOutEvent::PingStart)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
FinalUpgrade::PingListener(ping_listener) => {
|
||||
self.ping_in_substreams.push(ping_listener);
|
||||
None
|
||||
},
|
||||
FinalUpgrade::Kad(controller, stream) => {
|
||||
// Remove all upgrades in the progress for Kademlia.
|
||||
self.cancel_dial_upgrade(&UpgradePurpose::Kad);
|
||||
// Refuse the substream if we already have Kademlia substream open.
|
||||
if self.kademlia_substream.is_none() {
|
||||
self.kademlia_substream = Some((controller.clone(), stream));
|
||||
Some(SubstrateOutEvent::KadOpen(controller))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
FinalUpgrade::Custom(proto) => {
|
||||
self.cancel_dial_upgrade(&UpgradePurpose::Custom(proto.protocol_id));
|
||||
if self.custom_protocols_substreams.iter().any(|p| p.protocol_id == proto.protocol_id) {
|
||||
// Skipping protocol that's already open.
|
||||
return None;
|
||||
}
|
||||
|
||||
let event = SubstrateOutEvent::CustomProtocolOpen {
|
||||
protocol_id: proto.protocol_id,
|
||||
version: proto.protocol_version,
|
||||
};
|
||||
|
||||
self.custom_protocols_substreams.push(proto);
|
||||
Some(event)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the process of identifying the remote.
|
||||
fn identify_remote(&mut self) {
|
||||
if !self.has_upgrade_purpose(&UpgradePurpose::Identify) {
|
||||
self.queued_dial_upgrades.push(UpgradePurpose::Identify);
|
||||
self.num_out_user_must_open += 1;
|
||||
if let Some(to_notify) = self.to_notify.take() {
|
||||
to_notify.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the process of pinging the remote.
|
||||
///
|
||||
/// Doesn't do anything if a ping attempt is already in progress.
|
||||
///
|
||||
/// Returns true if this actually starts a ping, false is this just opens a substream or does
|
||||
/// nothing.
|
||||
fn ping_remote(&mut self) -> bool {
|
||||
// Ignore if we are already actively pinging.
|
||||
if self.active_ping_out.is_some() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we have a ping open, ping it!
|
||||
if let Some(ref mut pinger) = self.ping_out_substream {
|
||||
let now = Instant::now();
|
||||
pinger.ping(now);
|
||||
let future = Delay::new(now + PING_TIMEOUT);
|
||||
self.active_ping_out = Some(future);
|
||||
if let Some(to_notify) = self.to_notify.take() {
|
||||
to_notify.notify();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Otherwise, ensure we have an upgrade for a ping substream in queue.
|
||||
if !self.has_upgrade_purpose(&UpgradePurpose::Ping) {
|
||||
self.queued_dial_upgrades.push(UpgradePurpose::Ping);
|
||||
self.num_out_user_must_open += 1;
|
||||
// We also start the unresponsiveness counter when opening the substream, as a
|
||||
// peer may not respond to our opening request.
|
||||
let future = Delay::new(Instant::now() + PING_TIMEOUT);
|
||||
self.active_ping_out = Some(future);
|
||||
if let Some(to_notify) = self.to_notify.take() {
|
||||
to_notify.notify();
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Polls the upgrades in progress.
|
||||
fn poll_upgrades_in_progress(&mut self) -> Poll<Option<SubstrateOutEvent<TSubstream>>, IoError> {
|
||||
// Continue negotiation of newly-opened substreams on the listening side.
|
||||
// We remove each element from `upgrades_in_progress_listen` one by one and add them back
|
||||
// if not ready.
|
||||
for n in (0 .. self.upgrades_in_progress_listen.len()).rev() {
|
||||
let mut in_progress = self.upgrades_in_progress_listen.swap_remove(n);
|
||||
match in_progress.poll() {
|
||||
Ok(Async::Ready(upgrade)) => {
|
||||
if let Some(event) = self.inject_fully_negotiated(upgrade) {
|
||||
return Ok(Async::Ready(Some(event)));
|
||||
}
|
||||
},
|
||||
Ok(Async::NotReady) => {
|
||||
self.upgrades_in_progress_listen.push(in_progress);
|
||||
},
|
||||
Err(err) => {
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::SubstreamUpgradeFail(err))));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Continue negotiation of newly-opened substreams.
|
||||
// We remove each element from `upgrades_in_progress_dial` one by one and add them back if
|
||||
// not ready.
|
||||
for n in (0 .. self.upgrades_in_progress_dial.len()).rev() {
|
||||
let (purpose, mut in_progress) = self.upgrades_in_progress_dial.swap_remove(n);
|
||||
match in_progress.poll() {
|
||||
Ok(Async::Ready(upgrade)) => {
|
||||
if let Some(event) = self.inject_fully_negotiated(upgrade) {
|
||||
return Ok(Async::Ready(Some(event)));
|
||||
}
|
||||
},
|
||||
Ok(Async::NotReady) =>
|
||||
self.upgrades_in_progress_dial.push((purpose, in_progress)),
|
||||
Err(err) => {
|
||||
// TODO: dispatch depending on actual error ; right now we assume that
|
||||
// error == not supported, which is not necessarily true in theory
|
||||
if let UpgradePurpose::Custom(_) = purpose {
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::Useless)));
|
||||
} else {
|
||||
let msg = format!("While upgrading to {:?}: {:?}", purpose, err);
|
||||
let err = IoError::new(IoErrorKind::Other, msg);
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::SubstreamUpgradeFail(err))));
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
|
||||
/// Polls the upgrades in progress.
|
||||
fn poll_custom_protocols(&mut self) -> Poll<Option<SubstrateOutEvent<TSubstream>>, IoError> {
|
||||
// Poll for messages on the custom protocol stream.
|
||||
for n in (0 .. self.custom_protocols_substreams.len()).rev() {
|
||||
let mut custom_proto = self.custom_protocols_substreams.swap_remove(n);
|
||||
match custom_proto.incoming.poll() {
|
||||
Ok(Async::NotReady) => self.custom_protocols_substreams.push(custom_proto),
|
||||
Ok(Async::Ready(Some((packet_id, data)))) => {
|
||||
let protocol_id = custom_proto.protocol_id;
|
||||
self.custom_protocols_substreams.push(custom_proto);
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::CustomMessage {
|
||||
protocol_id,
|
||||
packet_id,
|
||||
data,
|
||||
})));
|
||||
},
|
||||
Ok(Async::Ready(None)) => {
|
||||
// Trying to reopen the protocol.
|
||||
self.queued_dial_upgrades.push(UpgradePurpose::Custom(custom_proto.protocol_id));
|
||||
self.num_out_user_must_open += 1;
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::CustomProtocolClosed {
|
||||
protocol_id: custom_proto.protocol_id,
|
||||
result: Ok(()),
|
||||
})))
|
||||
},
|
||||
Err(err) => {
|
||||
// Trying to reopen the protocol.
|
||||
self.queued_dial_upgrades.push(UpgradePurpose::Custom(custom_proto.protocol_id));
|
||||
self.num_out_user_must_open += 1;
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::CustomProtocolClosed {
|
||||
protocol_id: custom_proto.protocol_id,
|
||||
result: Err(err),
|
||||
})))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
|
||||
/// Polls the open Kademlia substream, if any.
|
||||
fn poll_kademlia(&mut self) -> Poll<Option<SubstrateOutEvent<TSubstream>>, IoError> {
|
||||
// Produce a `KadOpen` event if necessary.
|
||||
if self.need_report_kad_open {
|
||||
self.need_report_kad_open = false;
|
||||
if let Some((ref kad_ctrl, _)) = self.kademlia_substream {
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::KadOpen(kad_ctrl.clone()))));
|
||||
}
|
||||
}
|
||||
|
||||
// Poll for Kademlia events.
|
||||
if let Some((controller, mut stream)) = self.kademlia_substream.take() {
|
||||
loop {
|
||||
match stream.poll() {
|
||||
Ok(Async::Ready(Some(KadIncomingRequest::FindNode { searched, responder }))) => {
|
||||
self.kademlia_substream = Some((controller, stream));
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::KadFindNode { searched, responder })));
|
||||
},
|
||||
// We don't care about Kademlia pings, they are unused.
|
||||
Ok(Async::Ready(Some(KadIncomingRequest::PingPong))) => {},
|
||||
Ok(Async::NotReady) => {
|
||||
self.kademlia_substream = Some((controller, stream));
|
||||
break;
|
||||
},
|
||||
Ok(Async::Ready(None)) => return Ok(Async::Ready(Some(SubstrateOutEvent::KadClosed(Ok(()))))),
|
||||
Err(err) => return Ok(Async::Ready(Some(SubstrateOutEvent::KadClosed(Err(err))))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
|
||||
/// Polls the ping substreams.
|
||||
fn poll_ping(&mut self) -> Poll<Option<SubstrateOutEvent<TSubstream>>, IoError> {
|
||||
// Poll the future that fires when we need to ping the node again.
|
||||
match self.next_ping.poll() {
|
||||
Ok(Async::NotReady) => {},
|
||||
Ok(Async::Ready(())) => {
|
||||
// We reset `next_ping` to a very long time in the future so that we can poll
|
||||
// it again without having an accident.
|
||||
self.next_ping.reset(Instant::now() + Duration::from_secs(5 * 60));
|
||||
if self.ping_remote() {
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::PingStart)));
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "sub-libp2p", "Ping timer errored: {:?}", err);
|
||||
return Err(IoError::new(IoErrorKind::Other, err));
|
||||
}
|
||||
}
|
||||
|
||||
// Poll for answering pings.
|
||||
for n in (0 .. self.ping_in_substreams.len()).rev() {
|
||||
let mut ping = self.ping_in_substreams.swap_remove(n);
|
||||
match ping.poll() {
|
||||
Ok(Async::Ready(())) => {},
|
||||
Ok(Async::NotReady) => self.ping_in_substreams.push(ping),
|
||||
Err(err) => warn!(target: "sub-libp2p", "Remote ping substream errored: {:?}", err),
|
||||
}
|
||||
}
|
||||
|
||||
// Poll the ping substream.
|
||||
if let Some(mut ping_dialer) = self.ping_out_substream.take() {
|
||||
match ping_dialer.poll() {
|
||||
Ok(Async::Ready(Some(started))) => {
|
||||
self.active_ping_out = None;
|
||||
self.next_ping.reset(Instant::now() + DELAY_TO_NEXT_PING);
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::PingSuccess(started.elapsed()))));
|
||||
},
|
||||
Ok(Async::Ready(None)) => {
|
||||
// Try re-open ping if it got closed.
|
||||
self.queued_dial_upgrades.push(UpgradePurpose::Ping);
|
||||
self.num_out_user_must_open += 1;
|
||||
},
|
||||
Ok(Async::NotReady) => self.ping_out_substream = Some(ping_dialer),
|
||||
Err(_) => {},
|
||||
}
|
||||
}
|
||||
|
||||
// Poll the active ping attempt.
|
||||
if let Some(mut deadline) = self.active_ping_out.take() {
|
||||
match deadline.poll() {
|
||||
Ok(Async::Ready(())) =>
|
||||
return Ok(Async::Ready(Some(SubstrateOutEvent::Unresponsive))),
|
||||
Ok(Async::NotReady) => self.active_ping_out = Some(deadline),
|
||||
Err(err) => {
|
||||
warn!(target: "sub-libp2p", "Active ping deadline errored: {:?}", err);
|
||||
return Err(IoError::new(IoErrorKind::Other, err));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
|
||||
/// Polls the identify substreams.
|
||||
fn poll_identify(&mut self) -> Poll<Option<SubstrateOutEvent<TSubstream>>, IoError> {
|
||||
// Poll the future that fires when we need to identify the node again.
|
||||
loop {
|
||||
match self.next_identify.poll() {
|
||||
Ok(Async::NotReady) => break,
|
||||
Ok(Async::Ready(Some(_))) => self.identify_remote(),
|
||||
Ok(Async::Ready(None)) => {
|
||||
warn!(target: "sub-libp2p", "Identify timer closed unexpectedly");
|
||||
return Ok(Async::Ready(None));
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(target: "sub-libp2p", "Identify timer errored: {:?}", err);
|
||||
return Err(IoError::new(IoErrorKind::Other, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Poll for sending identify information to the remote.
|
||||
let mut identify_send_back = self.identify_send_back.lock();
|
||||
for n in (0 .. identify_send_back.len()).rev() {
|
||||
let mut id_send_back = identify_send_back.swap_remove(n);
|
||||
match id_send_back.poll() {
|
||||
Ok(Async::Ready(())) => {},
|
||||
Ok(Async::NotReady) => identify_send_back.push(id_send_back),
|
||||
Err(err) => warn!(target: "sub-libp2p", "Sending back identify info errored: {:?}", err),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
/// Enum of all the possible protocols our service handles.
|
||||
enum FinalUpgrade<TSubstream, TUserData> {
|
||||
Kad(KadConnecController, Box<Stream<Item = KadIncomingRequest, Error = IoError> + Send>),
|
||||
IdentifyListener(identify::IdentifySender<TSubstream>),
|
||||
IdentifyDialer(identify::IdentifyInfo, Multiaddr),
|
||||
PingDialer(ping::PingDialer<TSubstream, Instant>),
|
||||
PingListener(ping::PingListener<TSubstream>),
|
||||
Custom(RegisteredProtocolOutput<TUserData>),
|
||||
}
|
||||
|
||||
impl<TSubstream, TUserData> From<ping::PingOutput<TSubstream, Instant>> for FinalUpgrade<TSubstream, TUserData> {
|
||||
fn from(out: ping::PingOutput<TSubstream, Instant>) -> Self {
|
||||
match out {
|
||||
ping::PingOutput::Ponger(ponger) => FinalUpgrade::PingListener(ponger),
|
||||
ping::PingOutput::Pinger(pinger) => FinalUpgrade::PingDialer(pinger),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSubstream, TUserData> From<identify::IdentifyOutput<TSubstream>> for FinalUpgrade<TSubstream, TUserData> {
|
||||
fn from(out: identify::IdentifyOutput<TSubstream>) -> Self {
|
||||
match out {
|
||||
identify::IdentifyOutput::RemoteInfo { info, observed_addr } =>
|
||||
FinalUpgrade::IdentifyDialer(info, observed_addr),
|
||||
identify::IdentifyOutput::Sender { sender } =>
|
||||
FinalUpgrade::IdentifyListener(sender),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,132 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use libp2p::secio;
|
||||
use rand::{self, Rng};
|
||||
use std::fs;
|
||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind, Read, Write};
|
||||
use std::path::Path;
|
||||
use NetworkConfiguration;
|
||||
|
||||
// File where the private key is stored.
|
||||
const SECRET_FILE: &str = "secret";
|
||||
|
||||
/// Obtains or generates the local private key using the configuration.
|
||||
pub(crate) fn obtain_private_key(
|
||||
config: &NetworkConfiguration
|
||||
) -> Result<secio::SecioKeyPair, IoError> {
|
||||
if let Some(ref secret) = config.use_secret {
|
||||
// Key was specified in the configuration.
|
||||
secio::SecioKeyPair::secp256k1_raw_key(&secret[..])
|
||||
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))
|
||||
|
||||
} else {
|
||||
if let Some(ref path) = config.net_config_path {
|
||||
fs::create_dir_all(Path::new(path))?;
|
||||
|
||||
// Try fetch the key from a the file containing the secret.
|
||||
let secret_path = Path::new(path).join(SECRET_FILE);
|
||||
match load_private_key_from_file(&secret_path) {
|
||||
Ok(s) => Ok(s),
|
||||
Err(err) => {
|
||||
// Failed to fetch existing file ; generate a new key
|
||||
trace!(target: "sub-libp2p",
|
||||
"Failed to load existing secret key file {:?}, generating new key ; err = {:?}",
|
||||
secret_path,
|
||||
err
|
||||
);
|
||||
Ok(gen_key_and_try_write_to_file(&secret_path))
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// No path in the configuration, nothing we can do except generate
|
||||
// a new key.
|
||||
let mut key: [u8; 32] = [0; 32];
|
||||
rand::rngs::EntropyRng::new().fill(&mut key);
|
||||
Ok(secio::SecioKeyPair::secp256k1_raw_key(&key)
|
||||
.expect("randomly-generated key with correct len should always be valid"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to load a private key from a file located at the given path.
|
||||
fn load_private_key_from_file<P>(path: P)
|
||||
-> Result<secio::SecioKeyPair, IoError>
|
||||
where P: AsRef<Path> {
|
||||
fs::File::open(path)
|
||||
.and_then(|mut file| {
|
||||
// We are in 2018 and there is still no method on `std::io::Read`
|
||||
// that directly returns a `Vec`.
|
||||
let mut buf = Vec::new();
|
||||
file.read_to_end(&mut buf).map(|_| buf)
|
||||
})
|
||||
.and_then(|content|
|
||||
secio::SecioKeyPair::secp256k1_raw_key(&content)
|
||||
.map_err(|err| IoError::new(IoErrorKind::InvalidData, err))
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates a new secret key and tries to write it to the given file.
|
||||
/// Doesn't error if we couldn't open or write to the file.
|
||||
fn gen_key_and_try_write_to_file<P>(path: P) -> secio::SecioKeyPair
|
||||
where P: AsRef<Path> {
|
||||
let raw_key: [u8; 32] = rand::rngs::EntropyRng::new().gen();
|
||||
let secio_key = secio::SecioKeyPair::secp256k1_raw_key(&raw_key)
|
||||
.expect("randomly-generated key with correct len should always be valid");
|
||||
|
||||
// And store the newly-generated key in the file if possible.
|
||||
// Errors that happen while doing so are ignored.
|
||||
match open_priv_key_file(&path) {
|
||||
Ok(mut file) =>
|
||||
match file.write_all(&raw_key) {
|
||||
Ok(()) => (),
|
||||
Err(err) => warn!(target: "sub-libp2p",
|
||||
"Failed to write secret key in file {:?} ; err = {:?}",
|
||||
path.as_ref(),
|
||||
err
|
||||
),
|
||||
},
|
||||
Err(err) => warn!(target: "sub-libp2p",
|
||||
"Failed to store secret key in file {:?} ; err = {:?}",
|
||||
path.as_ref(),
|
||||
err
|
||||
),
|
||||
}
|
||||
|
||||
secio_key
|
||||
}
|
||||
|
||||
/// Opens a file containing a private key in write mode.
|
||||
#[cfg(unix)]
|
||||
fn open_priv_key_file<P>(path: P) -> Result<fs::File, IoError>
|
||||
where P: AsRef<Path> {
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.mode(256 | 128) // 0o600 in decimal
|
||||
.open(path)
|
||||
}
|
||||
/// Opens a file containing a private key in write mode.
|
||||
#[cfg(not(unix))]
|
||||
fn open_priv_key_file<P>(path: P) -> Result<fs::File, IoError>
|
||||
where P: AsRef<Path> {
|
||||
fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.open(path)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,987 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bytes::Bytes;
|
||||
use custom_proto::RegisteredProtocols;
|
||||
use fnv::{FnvHashMap, FnvHashSet};
|
||||
use futures::{prelude::*, task, Stream};
|
||||
use futures::sync::{oneshot, mpsc};
|
||||
use libp2p::{Multiaddr, PeerId};
|
||||
use libp2p::core::{Endpoint, PublicKey};
|
||||
use libp2p::core::nodes::swarm::ConnectedPoint;
|
||||
use libp2p::kad::{KadSystem, KadSystemConfig, KadConnecController, KadPeer};
|
||||
use libp2p::kad::{KadConnectionType, KadQueryEvent};
|
||||
use parking_lot::Mutex;
|
||||
use rand;
|
||||
use secret::obtain_private_key;
|
||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
|
||||
use std::iter;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use swarm::{self, Swarm, SwarmEvent};
|
||||
use topology::{DisconnectReason, NetTopology};
|
||||
use tokio_timer::{Delay, Interval};
|
||||
use {Error, ErrorKind, NetworkConfiguration, NetworkProtocolHandler, NodeIndex, parse_str_addr};
|
||||
use {NonReservedPeerMode, PacketId, ProtocolId};
|
||||
|
||||
// File where the network topology is stored.
|
||||
const NODES_FILE: &str = "nodes.json";
|
||||
// Duration during which a peer is disabled.
|
||||
const PEER_DISABLE_DURATION: Duration = Duration::from_secs(5 * 60);
|
||||
|
||||
/// Starts the substrate libp2p service.
|
||||
///
|
||||
/// Returns a stream that must be polled regularly in order for the networking to function.
|
||||
pub fn start_service(
|
||||
config: NetworkConfiguration,
|
||||
registered_custom: Arc<RegisteredProtocols<Arc<NetworkProtocolHandler + Send + Sync>>>,
|
||||
) -> Result<Service, Error> {
|
||||
// Private and public keys configuration.
|
||||
let local_private_key = obtain_private_key(&config)?;
|
||||
let local_public_key = local_private_key.to_public_key();
|
||||
let local_peer_id = local_public_key.clone().into_peer_id();
|
||||
|
||||
// Build the swarm.
|
||||
let mut swarm = swarm::start_swarm(registered_custom, local_private_key)?;
|
||||
|
||||
// Listen on multiaddresses.
|
||||
for addr in &config.listen_addresses {
|
||||
match swarm.listen_on(addr.clone()) {
|
||||
Ok(new_addr) => debug!(target: "sub-libp2p", "Libp2p listening on {}", new_addr),
|
||||
Err(_) => {
|
||||
warn!(target: "sub-libp2p", "Can't listen on {}, protocol not supported", addr);
|
||||
return Err(ErrorKind::BadProtocol.into())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Register the external addresses provided by the user.
|
||||
for addr in &config.public_addresses {
|
||||
swarm.add_external_address(addr.clone());
|
||||
}
|
||||
|
||||
// Initialize the topology of the network.
|
||||
let mut topology = if let Some(ref path) = config.net_config_path {
|
||||
let path = Path::new(path).join(NODES_FILE);
|
||||
debug!(target: "sub-libp2p", "Initializing peer store for JSON file {:?}", path);
|
||||
NetTopology::from_file(path)
|
||||
} else {
|
||||
debug!(target: "sub-libp2p", "No peers file configured ; peers won't be saved");
|
||||
NetTopology::memory()
|
||||
};
|
||||
|
||||
// Create the Kademlia system, containing the kbuckets.
|
||||
let kad_system = KadSystem::without_init(KadSystemConfig {
|
||||
parallelism: 3,
|
||||
local_peer_id,
|
||||
kbuckets_timeout: Duration::from_secs(600),
|
||||
request_timeout: Duration::from_secs(10),
|
||||
known_initial_peers: iter::empty(),
|
||||
});
|
||||
|
||||
// Add the bootstrap nodes to the topology and connect to them.
|
||||
for bootnode in config.boot_nodes.iter() {
|
||||
match parse_str_addr(bootnode) {
|
||||
Ok((peer_id, addr)) => {
|
||||
topology.add_bootstrap_addr(&peer_id, addr.clone());
|
||||
kad_system.update_kbuckets(peer_id.clone());
|
||||
if let Err(_) = swarm.ensure_connection(peer_id, addr) {
|
||||
warn!(target: "sub-libp2p", "Failed to dial boot node: {}", bootnode);
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
// If the format of the bootstrap node is not a multiaddr, try to parse it as
|
||||
// a `SocketAddr`. This corresponds to the format `IP:PORT`.
|
||||
let addr = match bootnode.parse::<SocketAddr>() {
|
||||
Ok(SocketAddr::V4(socket)) => multiaddr![Ip4(*socket.ip()), Tcp(socket.port())],
|
||||
Ok(SocketAddr::V6(socket)) => multiaddr![Ip6(*socket.ip()), Tcp(socket.port())],
|
||||
_ => {
|
||||
warn!(target: "sub-libp2p", "Not a valid bootnode address: {}", bootnode);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
debug!(target: "sub-libp2p", "Dialing {} with no peer id", addr);
|
||||
if let Err(addr) = swarm.dial(addr) {
|
||||
warn!(target: "sub-libp2p", "Bootstrap address not supported: {}", addr);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the reserved peers.
|
||||
let mut reserved_peers = FnvHashSet::default();
|
||||
for reserved in config.reserved_nodes.iter() {
|
||||
match parse_str_addr(reserved) {
|
||||
Ok((peer_id, addr)) => {
|
||||
reserved_peers.insert(peer_id.clone());
|
||||
topology.add_bootstrap_addr(&peer_id, addr.clone());
|
||||
if let Err(_) = swarm.ensure_connection(peer_id, addr) {
|
||||
warn!(target: "sub-libp2p", "Failed to dial reserved node: {}", reserved);
|
||||
}
|
||||
},
|
||||
Err(_) =>
|
||||
// TODO: also handle the `IP:PORT` format ; however we need to somehow add the
|
||||
// reserved ID to `reserved_peers` at some point
|
||||
warn!(target: "sub-libp2p", "Not a valid reserved node address: {}", reserved),
|
||||
}
|
||||
}
|
||||
|
||||
debug!(target: "sub-libp2p", "Topology started with {} entries", topology.num_peers());
|
||||
|
||||
let (kad_new_ctrl_req_tx, kad_new_ctrl_req_rx) = mpsc::unbounded();
|
||||
|
||||
Ok(Service {
|
||||
swarm,
|
||||
max_incoming_connections: config.max_peers.saturating_sub(config.min_peers) as usize,
|
||||
max_outgoing_connections: config.min_peers as usize,
|
||||
topology,
|
||||
nodes_addresses: Default::default(),
|
||||
disabled_peers: Default::default(),
|
||||
reserved_peers,
|
||||
reserved_only: config.non_reserved_mode == NonReservedPeerMode::Deny,
|
||||
kad_system,
|
||||
kad_pending_ctrls: Default::default(),
|
||||
kad_new_ctrl_req_tx,
|
||||
kad_new_ctrl_req_rx,
|
||||
kad_queries: Vec::with_capacity(1),
|
||||
next_connect_to_nodes: Delay::new(Instant::now()),
|
||||
next_kad_random_query: Interval::new(Instant::now() + Duration::from_secs(5), Duration::from_secs(45)),
|
||||
cleanup: Interval::new_interval(Duration::from_secs(60)),
|
||||
injected_events: Vec::new(),
|
||||
to_notify: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Event produced by the service.
|
||||
pub enum ServiceEvent {
|
||||
/// We have successfully connected to a new node.
|
||||
NewNode {
|
||||
/// Index that was attributed for this node. Will be used for all further interaction with
|
||||
/// it.
|
||||
node_index: NodeIndex,
|
||||
/// Public key of the node as a peer id.
|
||||
peer_id: PeerId,
|
||||
/// Whether we dialed the node or if it came to us. Should be used only for statistics
|
||||
/// purposes.
|
||||
endpoint: ConnectedPoint,
|
||||
},
|
||||
|
||||
/// Closed connection to a node.
|
||||
///
|
||||
/// It is guaranteed that this node has been opened with a `NewNode` event beforehand. However
|
||||
/// not all `ClosedCustomProtocol` events have been dispatched.
|
||||
NodeClosed {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// List of custom protocols that were still open.
|
||||
closed_custom_protocols: Vec<ProtocolId>,
|
||||
},
|
||||
|
||||
/// Report the duration of the ping for the given node.
|
||||
PingDuration(NodeIndex, Duration),
|
||||
|
||||
/// Report information about the node.
|
||||
NodeInfos {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// The client version. Note that it can be anything and should not be trusted.
|
||||
client_version: String,
|
||||
},
|
||||
|
||||
/// A custom protocol substream has been opened with a node.
|
||||
OpenedCustomProtocol {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Protocol that has been opened.
|
||||
protocol: ProtocolId,
|
||||
/// Version of the protocol that was opened.
|
||||
version: u8,
|
||||
},
|
||||
|
||||
/// A custom protocol substream has been closed.
|
||||
ClosedCustomProtocol {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Protocol that has been closed.
|
||||
protocol: ProtocolId,
|
||||
},
|
||||
|
||||
/// Sustom protocol substreams has been closed.
|
||||
///
|
||||
/// Same as `ClosedCustomProtocol` but with multiple protocols.
|
||||
ClosedCustomProtocols {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Protocols that have been closed.
|
||||
protocols: Vec<ProtocolId>,
|
||||
},
|
||||
|
||||
/// Receives a message on a custom protocol stream.
|
||||
CustomMessage {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Protocol which generated the message.
|
||||
protocol_id: ProtocolId,
|
||||
/// Identifier of the packet.
|
||||
packet_id: u8,
|
||||
/// Data that has been received.
|
||||
data: Bytes,
|
||||
},
|
||||
}
|
||||
|
||||
/// Network service. Must be polled regularly in order for the networking to work.
|
||||
pub struct Service {
|
||||
/// Stream of events of the swarm.
|
||||
swarm: Swarm<Arc<NetworkProtocolHandler + Send + Sync>>,
|
||||
|
||||
/// Maximum number of incoming non-reserved connections, taken from the config.
|
||||
max_incoming_connections: usize,
|
||||
|
||||
/// Maximum number of outgoing non-reserved connections, taken from the config.
|
||||
max_outgoing_connections: usize,
|
||||
|
||||
/// For each node we're connected to, its address if known.
|
||||
///
|
||||
/// This is used purely to report disconnections to the topology.
|
||||
nodes_addresses: FnvHashMap<NodeIndex, Multiaddr>,
|
||||
|
||||
/// If true, only reserved peers can connect.
|
||||
reserved_only: bool,
|
||||
|
||||
/// List of the IDs of the reserved peers.
|
||||
reserved_peers: FnvHashSet<PeerId>,
|
||||
|
||||
/// List of the IDs of disabled peers, and when the ban expires.
|
||||
/// Purged at a regular interval.
|
||||
disabled_peers: FnvHashMap<PeerId, Instant>,
|
||||
|
||||
/// Topology of the network.
|
||||
topology: NetTopology,
|
||||
|
||||
/// Handles the Kademlia queries.
|
||||
// TODO: put the kbuckets in the topology instead
|
||||
kad_system: KadSystem,
|
||||
|
||||
/// List of Kademlia controller we want to open.
|
||||
///
|
||||
/// A clone of tihs `Arc` is stored in each Kademlia query stream.
|
||||
// TODO: use a better container?
|
||||
kad_pending_ctrls: Arc<Mutex<FnvHashMap<PeerId, Vec<oneshot::Sender<KadConnecController>>>>>,
|
||||
|
||||
/// Sender whenever we inserted an entry in `kad_pending_ctrls`, so that we can process it.
|
||||
kad_new_ctrl_req_tx: mpsc::UnboundedSender<PeerId>,
|
||||
/// Receiver side of `kad_new_ctrl_req_tx`.
|
||||
kad_new_ctrl_req_rx: mpsc::UnboundedReceiver<PeerId>,
|
||||
|
||||
/// Active Kademlia queries.
|
||||
kad_queries: Vec<Box<Stream<Item = KadQueryEvent<Vec<PeerId>>, Error = IoError> + Send>>,
|
||||
|
||||
/// Future that will fire when we need to connect to new nodes.
|
||||
next_connect_to_nodes: Delay,
|
||||
|
||||
/// Stream that fires when we need to perform the next Kademlia query.
|
||||
next_kad_random_query: Interval,
|
||||
|
||||
/// Stream that fires when we need to cleanup and flush the topology, and cleanup the disabled
|
||||
/// peers.
|
||||
cleanup: Interval,
|
||||
|
||||
/// Events to produce on the Stream.
|
||||
injected_events: Vec<ServiceEvent>,
|
||||
|
||||
/// Task to notify when elements are added to `injected_events`.
|
||||
to_notify: Option<task::Task>,
|
||||
}
|
||||
|
||||
impl Service {
|
||||
/// Returns an iterator that produces the list of addresses we're listening on.
|
||||
#[inline]
|
||||
pub fn listeners(&self) -> impl Iterator<Item = &Multiaddr> {
|
||||
self.swarm.listeners()
|
||||
}
|
||||
|
||||
/// Returns the peer id of the local node.
|
||||
#[inline]
|
||||
pub fn peer_id(&self) -> &PeerId {
|
||||
self.kad_system.local_peer_id()
|
||||
}
|
||||
|
||||
/// Try to add a reserved peer.
|
||||
pub fn add_reserved_peer(&mut self, peer_id: PeerId, addr: Multiaddr) {
|
||||
self.reserved_peers.insert(peer_id.clone());
|
||||
self.topology.add_bootstrap_addr(&peer_id, addr.clone());
|
||||
let _ = self.swarm.ensure_connection(peer_id, addr);
|
||||
}
|
||||
|
||||
/// Try to remove a reserved peer.
|
||||
// TODO: remove `_addr` parameter?
|
||||
pub fn remove_reserved_peer(&mut self, peer_id: PeerId, _addr: Multiaddr) {
|
||||
self.reserved_peers.remove(&peer_id);
|
||||
if self.reserved_only {
|
||||
if let Some(node_index) = self.swarm.latest_node_by_peer_id(&peer_id) {
|
||||
self.drop_node_inner(node_index, DisconnectReason::NoSlot, None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the non-reserved peer mode.
|
||||
pub fn set_non_reserved_mode(&mut self, mode: NonReservedPeerMode) {
|
||||
self.reserved_only = mode == NonReservedPeerMode::Deny;
|
||||
if self.reserved_only {
|
||||
// Disconnect the nodes that are not reserved.
|
||||
let to_disconnect: Vec<NodeIndex> = self.swarm
|
||||
.nodes()
|
||||
.filter(|&n| {
|
||||
let peer_id = self.swarm.peer_id_of_node(n)
|
||||
.expect("swarm.nodes() always returns valid node indices");
|
||||
!self.reserved_peers.contains(peer_id)
|
||||
})
|
||||
.collect();
|
||||
for node_index in to_disconnect {
|
||||
self.drop_node_inner(node_index, DisconnectReason::NoSlot, None);
|
||||
}
|
||||
} else {
|
||||
self.connect_to_nodes();
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a message to a peer using the custom protocol.
|
||||
// TODO: report invalid node index or protocol?
|
||||
pub fn send_custom_message(
|
||||
&mut self,
|
||||
node_index: NodeIndex,
|
||||
protocol: ProtocolId,
|
||||
packet_id: PacketId,
|
||||
data: Vec<u8>
|
||||
) {
|
||||
self.swarm.send_custom_message(node_index, protocol, packet_id, data)
|
||||
}
|
||||
|
||||
/// Disconnects a peer and bans it for a little while.
|
||||
///
|
||||
/// Same as `drop_node`, except that the same peer will not be able to reconnect later.
|
||||
#[inline]
|
||||
pub fn ban_node(&mut self, node_index: NodeIndex) {
|
||||
self.drop_node_inner(node_index, DisconnectReason::Banned, Some(PEER_DISABLE_DURATION));
|
||||
}
|
||||
|
||||
/// Disconnects a peer.
|
||||
///
|
||||
/// This is asynchronous and will not immediately close the peer.
|
||||
/// Corresponding closing events will be generated once the closing actually happens.
|
||||
#[inline]
|
||||
pub fn drop_node(&mut self, node_index: NodeIndex) {
|
||||
self.drop_node_inner(node_index, DisconnectReason::Useless, None);
|
||||
}
|
||||
|
||||
/// Common implementation of `drop_node` and `ban_node`.
|
||||
fn drop_node_inner(
|
||||
&mut self,
|
||||
node_index: NodeIndex,
|
||||
reason: DisconnectReason,
|
||||
disable_duration: Option<Duration>
|
||||
) {
|
||||
let peer_id = match self.swarm.peer_id_of_node(node_index) {
|
||||
Some(pid) => pid.clone(),
|
||||
None => return, // TODO: report?
|
||||
};
|
||||
|
||||
// Kill the node from the swarm, and inject an event about it.
|
||||
let closed_custom_protocols = self.swarm.drop_node(node_index)
|
||||
.expect("we checked right above that node is valid");
|
||||
self.injected_events.push(ServiceEvent::NodeClosed {
|
||||
node_index,
|
||||
closed_custom_protocols,
|
||||
});
|
||||
|
||||
if let Some(to_notify) = self.to_notify.take() {
|
||||
to_notify.notify();
|
||||
}
|
||||
|
||||
if let Some(addr) = self.nodes_addresses.remove(&node_index) {
|
||||
self.topology.report_disconnected(&addr, reason);
|
||||
}
|
||||
|
||||
if let Some(disable_duration) = disable_duration {
|
||||
let timeout = Instant::now() + disable_duration;
|
||||
self.disabled_peers.insert(peer_id, timeout);
|
||||
}
|
||||
|
||||
self.connect_to_nodes();
|
||||
}
|
||||
|
||||
/// Counts the number of non-reserved ingoing connections.
|
||||
fn num_ingoing_connections(&self) -> usize {
|
||||
self.swarm.nodes()
|
||||
.filter(|&i| self.swarm.node_endpoint(i) == Some(Endpoint::Listener) &&
|
||||
!self.reserved_peers.contains(&self.swarm.peer_id_of_node(i).unwrap()))
|
||||
.count()
|
||||
}
|
||||
|
||||
/// Counts the number of non-reserved outgoing connections.
|
||||
fn num_outgoing_connections(&self) -> usize {
|
||||
self.swarm.nodes()
|
||||
.filter(|&i| self.swarm.node_endpoint(i) == Some(Endpoint::Dialer) &&
|
||||
!self.reserved_peers.contains(&self.swarm.peer_id_of_node(i).unwrap()))
|
||||
.count()
|
||||
}
|
||||
|
||||
/// Updates the attempted connections to nodes.
|
||||
///
|
||||
/// Also updates `next_connect_to_nodes` with the earliest known moment when we need to
|
||||
/// update connections again.
|
||||
fn connect_to_nodes(&mut self) {
|
||||
// Make sure we are connected or connecting to all the reserved nodes.
|
||||
for reserved in self.reserved_peers.iter() {
|
||||
let addrs = self.topology.addrs_of_peer(&reserved);
|
||||
for (addr, _) in addrs {
|
||||
let _ = self.swarm.ensure_connection(reserved.clone(), addr.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Counter of number of connections to open, decreased when we open one.
|
||||
let mut num_to_open = self.max_outgoing_connections - self.num_outgoing_connections();
|
||||
|
||||
let (to_try, will_change) = self.topology.addrs_to_attempt();
|
||||
for (peer_id, addr) in to_try {
|
||||
if num_to_open == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
if peer_id == self.kad_system.local_peer_id() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if self.disabled_peers.contains_key(&peer_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: it is possible that we are connected to this peer, but the topology
|
||||
// doesn't know about that because we don't know its multiaddress yet
|
||||
// TODO: after some changes in libp2p, we can avoid this situation and also remove
|
||||
// the `num_to_open` variable
|
||||
match self.swarm.ensure_connection(peer_id.clone(), addr.clone()) {
|
||||
Ok(true) => (),
|
||||
Ok(false) => num_to_open -= 1,
|
||||
Err(_) => ()
|
||||
}
|
||||
}
|
||||
|
||||
self.next_connect_to_nodes.reset(will_change);
|
||||
}
|
||||
|
||||
/// Starts a random Kademlia query in order to fill the topology.
|
||||
///
|
||||
/// Query the node IDs that are closest to a random ID.
|
||||
/// Note that the randomness doesn't have to be secure, as this only influences which nodes we
|
||||
/// end up being connected to.
|
||||
fn perform_kad_random_query(&mut self) {
|
||||
let random_key = PublicKey::Ed25519((0 .. 32)
|
||||
.map(|_| -> u8 { rand::random() }).collect());
|
||||
let random_peer_id = random_key.into_peer_id();
|
||||
debug!(target: "sub-libp2p", "Start random Kademlia query for {:?}", random_peer_id);
|
||||
|
||||
let kad_pending_ctrls = self.kad_pending_ctrls.clone();
|
||||
let kad_new_ctrl_req_tx = self.kad_new_ctrl_req_tx.clone();
|
||||
let stream = self.kad_system
|
||||
.find_node(random_peer_id, move |who| {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let mut kad_pending_ctrls = kad_pending_ctrls.lock();
|
||||
kad_pending_ctrls.entry(who.clone()).or_insert(Vec::new()).push(tx);
|
||||
let _ = kad_new_ctrl_req_tx.unbounded_send(who.clone());
|
||||
rx.map_err(|_| IoError::new(IoErrorKind::Other, "Couldn't reach peer"))
|
||||
});
|
||||
|
||||
self.kad_queries.push(Box::new(stream));
|
||||
}
|
||||
|
||||
/// If a remote performs a `FIND_NODE` Kademlia request for `searched`, this function builds
|
||||
/// the response to send back.
|
||||
fn build_kademlia_response(&self, searched: &PeerId) -> Vec<KadPeer> {
|
||||
self.kad_system
|
||||
.known_closest_peers(searched)
|
||||
.map(|who| {
|
||||
if who == *self.kad_system.local_peer_id() {
|
||||
KadPeer {
|
||||
node_id: who.clone(),
|
||||
multiaddrs: self.swarm.external_addresses().cloned().collect(),
|
||||
connection_ty: KadConnectionType::Connected,
|
||||
}
|
||||
} else {
|
||||
let mut addrs = self.topology.addrs_of_peer(&who)
|
||||
.map(|(a, c)| (a.clone(), c))
|
||||
.collect::<Vec<_>>();
|
||||
let connected = addrs.iter().any(|&(_, conn)| conn);
|
||||
// The Kademlia protocol of libp2p doesn't allow specifying which address is valid
|
||||
// and which is outdated, therefore in order to stay honest towards the network
|
||||
// we only report the addresses we're connected to if we're connected to any.
|
||||
if connected {
|
||||
addrs.retain(|&(_, connected)| connected);
|
||||
}
|
||||
|
||||
KadPeer {
|
||||
node_id: who.clone(),
|
||||
multiaddrs: addrs.into_iter().map(|(a, _)| a).collect(),
|
||||
connection_ty: if connected {
|
||||
KadConnectionType::Connected
|
||||
} else {
|
||||
KadConnectionType::NotConnected
|
||||
},
|
||||
}
|
||||
}
|
||||
})
|
||||
// TODO: we really want to remove nodes with no multiaddress from
|
||||
// the results, but a flaw in the Kad protocol of libp2p makes it
|
||||
// impossible to return empty results ; therefore we must at least
|
||||
// return ourselves
|
||||
.filter(|p| p.node_id == *self.kad_system.local_peer_id() || !p.multiaddrs.is_empty())
|
||||
.take(20)
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// Adds a list of peers to the network topology.
|
||||
fn add_discovered_peers(&mut self, list: impl IntoIterator<Item = KadPeer>) {
|
||||
for peer in list {
|
||||
let connected = match peer.connection_ty {
|
||||
KadConnectionType::NotConnected => false,
|
||||
KadConnectionType::Connected => true,
|
||||
KadConnectionType::CanConnect => true,
|
||||
KadConnectionType::CannotConnect => continue,
|
||||
};
|
||||
|
||||
self.topology.add_kademlia_discovered_addrs(
|
||||
&peer.node_id,
|
||||
peer.multiaddrs.iter().map(|a| (a.clone(), connected))
|
||||
);
|
||||
}
|
||||
|
||||
// Potentially connect to the newly-discovered nodes.
|
||||
// TODO: only do so if the topology reports that something new has been added
|
||||
self.connect_to_nodes();
|
||||
}
|
||||
|
||||
/// Handles the swarm opening a connection to the given peer.
|
||||
///
|
||||
/// Returns the `NewNode` event to produce.
|
||||
///
|
||||
/// > **Note**: Must be called from inside `poll()`, otherwise it will panic.
|
||||
fn handle_connection(
|
||||
&mut self,
|
||||
node_index: NodeIndex,
|
||||
peer_id: PeerId,
|
||||
endpoint: ConnectedPoint
|
||||
) -> Option<ServiceEvent> {
|
||||
// Reject connections to our own node, which can happen if the DHT contains `127.0.0.1`
|
||||
// for example.
|
||||
if &peer_id == self.kad_system.local_peer_id() {
|
||||
debug!(target: "sub-libp2p", "Rejected connection to/from ourself: {:?}", endpoint);
|
||||
assert_eq!(self.swarm.drop_node(node_index), Ok(Vec::new()));
|
||||
if let ConnectedPoint::Dialer { ref address } = endpoint {
|
||||
self.topology.report_failed_to_connect(address);
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
// Reject non-reserved nodes if we're in reserved mode.
|
||||
let is_reserved = self.reserved_peers.contains(&peer_id);
|
||||
if self.reserved_only && !is_reserved {
|
||||
debug!(target: "sub-libp2p", "Rejected non-reserved peer {:?}", peer_id);
|
||||
assert_eq!(self.swarm.drop_node(node_index), Ok(Vec::new()));
|
||||
if let ConnectedPoint::Dialer { ref address } = endpoint {
|
||||
self.topology.report_failed_to_connect(address);
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
// Reject connections from disabled peers.
|
||||
if let Some(expires) = self.disabled_peers.get(&peer_id) {
|
||||
if expires > &Instant::now() {
|
||||
info!(target: "sub-libp2p", "Rejected connection from disabled peer: {:?}", peer_id);
|
||||
assert_eq!(self.swarm.drop_node(node_index), Ok(Vec::new()));
|
||||
if let ConnectedPoint::Dialer { ref address } = endpoint {
|
||||
self.topology.report_failed_to_connect(address);
|
||||
}
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
match endpoint {
|
||||
ConnectedPoint::Listener { ref listen_addr, ref send_back_addr } => {
|
||||
if is_reserved || self.num_ingoing_connections() < self.max_incoming_connections {
|
||||
debug!(target: "sub-libp2p", "Connected to {:?} through {} on listener {}",
|
||||
peer_id, send_back_addr, listen_addr);
|
||||
} else {
|
||||
info!(target: "sub-libp2p", "Rejected incoming peer {:?} because we are full", peer_id);
|
||||
assert_eq!(self.swarm.drop_node(node_index), Ok(Vec::new()));
|
||||
return None;
|
||||
}
|
||||
},
|
||||
ConnectedPoint::Dialer { ref address } => {
|
||||
if is_reserved || self.num_outgoing_connections() < self.max_outgoing_connections {
|
||||
debug!(target: "sub-libp2p", "Connected to {:?} through {}", peer_id, address);
|
||||
self.topology.report_connected(address, &peer_id);
|
||||
self.nodes_addresses.insert(node_index, address.clone());
|
||||
} else {
|
||||
debug!(target: "sub-libp2p", "Rejected dialed peer {:?} because we are full", peer_id);
|
||||
assert_eq!(self.swarm.drop_node(node_index), Ok(Vec::new()));
|
||||
return None;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
if let Err(_) = self.swarm.accept_node(node_index) {
|
||||
error!(target: "sub-libp2p", "accept_node returned an error");
|
||||
}
|
||||
|
||||
// If we're waiting for a Kademlia substream for this peer id, open one.
|
||||
let kad_pending_ctrls = self.kad_pending_ctrls.lock();
|
||||
if kad_pending_ctrls.contains_key(&peer_id) {
|
||||
let res = self.swarm.open_kademlia(node_index);
|
||||
debug_assert!(res.is_ok());
|
||||
}
|
||||
drop(kad_pending_ctrls);
|
||||
|
||||
Some(ServiceEvent::NewNode {
|
||||
node_index,
|
||||
peer_id,
|
||||
endpoint
|
||||
})
|
||||
}
|
||||
|
||||
/// Processes an event received by the swarm.
|
||||
///
|
||||
/// Optionally returns an event to report back to the outside.
|
||||
///
|
||||
/// > **Note**: Must be called from inside `poll()`, otherwise it will panic.
|
||||
fn process_network_event(
|
||||
&mut self,
|
||||
event: SwarmEvent
|
||||
) -> Option<ServiceEvent> {
|
||||
match event {
|
||||
SwarmEvent::NodePending { node_index, peer_id, endpoint } =>
|
||||
if let Some(event) = self.handle_connection(node_index, peer_id, endpoint) {
|
||||
Some(event)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
SwarmEvent::Reconnected { node_index, endpoint, closed_custom_protocols } => {
|
||||
if let Some(addr) = self.nodes_addresses.remove(&node_index) {
|
||||
self.topology.report_disconnected(&addr, DisconnectReason::FoundBetterAddr);
|
||||
}
|
||||
if let ConnectedPoint::Dialer { address } = endpoint {
|
||||
let peer_id = self.swarm.peer_id_of_node(node_index)
|
||||
.expect("the swarm always produces events containing valid node indices");
|
||||
self.nodes_addresses.insert(node_index, address.clone());
|
||||
self.topology.report_connected(&address, peer_id);
|
||||
}
|
||||
Some(ServiceEvent::ClosedCustomProtocols {
|
||||
node_index,
|
||||
protocols: closed_custom_protocols,
|
||||
})
|
||||
},
|
||||
SwarmEvent::NodeClosed { node_index, peer_id, closed_custom_protocols } => {
|
||||
debug!(target: "sub-libp2p", "Connection to {:?} closed gracefully", peer_id);
|
||||
if let Some(addr) = self.nodes_addresses.get(&node_index) {
|
||||
self.topology.report_disconnected(addr, DisconnectReason::RemoteClosed);
|
||||
}
|
||||
self.connect_to_nodes();
|
||||
Some(ServiceEvent::NodeClosed {
|
||||
node_index,
|
||||
closed_custom_protocols,
|
||||
})
|
||||
},
|
||||
SwarmEvent::DialFail { address, error } => {
|
||||
debug!(target: "sub-libp2p", "Failed to dial address {}: {:?}", address, error);
|
||||
self.topology.report_failed_to_connect(&address);
|
||||
self.connect_to_nodes();
|
||||
None
|
||||
},
|
||||
SwarmEvent::UnresponsiveNode { node_index } => {
|
||||
let closed_custom_protocols = self.swarm.drop_node(node_index)
|
||||
.expect("the swarm always produces events containing valid node indices");
|
||||
if let Some(addr) = self.nodes_addresses.remove(&node_index) {
|
||||
self.topology.report_disconnected(&addr, DisconnectReason::Useless);
|
||||
}
|
||||
Some(ServiceEvent::NodeClosed {
|
||||
node_index,
|
||||
closed_custom_protocols,
|
||||
})
|
||||
},
|
||||
SwarmEvent::UselessNode { node_index } => {
|
||||
let peer_id = self.swarm.peer_id_of_node(node_index)
|
||||
.expect("the swarm always produces events containing valid node indices")
|
||||
.clone();
|
||||
let closed_custom_protocols = self.swarm.drop_node(node_index)
|
||||
.expect("the swarm always produces events containing valid node indices");
|
||||
self.topology.report_useless(&peer_id);
|
||||
if let Some(addr) = self.nodes_addresses.remove(&node_index) {
|
||||
self.topology.report_disconnected(&addr, DisconnectReason::Useless);
|
||||
}
|
||||
Some(ServiceEvent::NodeClosed {
|
||||
node_index,
|
||||
closed_custom_protocols,
|
||||
})
|
||||
},
|
||||
SwarmEvent::PingDuration(node_index, ping) =>
|
||||
Some(ServiceEvent::PingDuration(node_index, ping)),
|
||||
SwarmEvent::NodeInfos { node_index, client_version, listen_addrs } => {
|
||||
let peer_id = self.swarm.peer_id_of_node(node_index)
|
||||
.expect("the swarm always produces events containing valid node indices");
|
||||
self.topology.add_self_reported_listen_addrs(
|
||||
peer_id,
|
||||
listen_addrs.into_iter()
|
||||
);
|
||||
Some(ServiceEvent::NodeInfos {
|
||||
node_index,
|
||||
client_version,
|
||||
})
|
||||
},
|
||||
SwarmEvent::KadFindNode { searched, responder, .. } => {
|
||||
let response = self.build_kademlia_response(&searched);
|
||||
responder.respond(response);
|
||||
None
|
||||
},
|
||||
SwarmEvent::KadOpen { node_index, controller } => {
|
||||
let peer_id = self.swarm.peer_id_of_node(node_index)
|
||||
.expect("the swarm always produces events containing valid node indices");
|
||||
trace!(target: "sub-libp2p", "Opened Kademlia substream with {:?}", peer_id);
|
||||
if let Some(list) = self.kad_pending_ctrls.lock().remove(&peer_id) {
|
||||
for tx in list {
|
||||
let _ = tx.send(controller.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
},
|
||||
SwarmEvent::KadClosed { .. } => {
|
||||
None
|
||||
},
|
||||
SwarmEvent::OpenedCustomProtocol { node_index, protocol, version } => {
|
||||
let peer_id = self.swarm.peer_id_of_node(node_index)
|
||||
.expect("the swarm always produces events containing valid node indices");
|
||||
self.kad_system.update_kbuckets(peer_id.clone());
|
||||
Some(ServiceEvent::OpenedCustomProtocol {
|
||||
node_index,
|
||||
protocol,
|
||||
version,
|
||||
})
|
||||
},
|
||||
SwarmEvent::ClosedCustomProtocol { node_index, protocol } =>
|
||||
Some(ServiceEvent::ClosedCustomProtocol {
|
||||
node_index,
|
||||
protocol,
|
||||
}),
|
||||
SwarmEvent::CustomMessage { node_index, protocol_id, packet_id, data } => {
|
||||
let peer_id = self.swarm.peer_id_of_node(node_index)
|
||||
.expect("the swarm always produces events containing valid node indices");
|
||||
self.kad_system.update_kbuckets(peer_id.clone());
|
||||
Some(ServiceEvent::CustomMessage {
|
||||
node_index,
|
||||
protocol_id,
|
||||
packet_id,
|
||||
data,
|
||||
})
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles a Kademlia query requesting a Kademlia controller with the given peer.
|
||||
fn handle_kad_ctrl_request(&mut self, peer_id: PeerId) {
|
||||
if let Some(node_index) = self.swarm.latest_node_by_peer_id(&peer_id) {
|
||||
if let Err(_) = self.swarm.open_kademlia(node_index) {
|
||||
self.kad_pending_ctrls.lock().remove(&peer_id);
|
||||
}
|
||||
} else {
|
||||
let addrs = self.topology.addrs_of_peer(&peer_id);
|
||||
let mut one_worked = false;
|
||||
for (addr, _) in addrs {
|
||||
if let Ok(_) = self.swarm.ensure_connection(peer_id.clone(), addr.clone()) {
|
||||
one_worked = true;
|
||||
}
|
||||
}
|
||||
if !one_worked {
|
||||
debug!(target: "sub-libp2p", "Couldn't open Kad substream with {:?} \
|
||||
because no address is known", peer_id);
|
||||
// Closing the senders in order to generate errors on the Kad query.
|
||||
self.kad_pending_ctrls.lock().remove(&peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Polls for what happened on the main network side.
|
||||
fn poll_swarm(&mut self) -> Poll<Option<ServiceEvent>, IoError> {
|
||||
loop {
|
||||
match self.swarm.poll() {
|
||||
Ok(Async::Ready(Some(event))) =>
|
||||
if let Some(event) = self.process_network_event(event) {
|
||||
return Ok(Async::Ready(Some(event)));
|
||||
}
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(None)) => unreachable!("The Swarm stream never ends"),
|
||||
// TODO: this `Err` contains a `Void` ; remove variant when Rust allows that
|
||||
Err(_) => unreachable!("The Swarm stream never errors"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Polls the Kademlia system.
|
||||
fn poll_kademlia(&mut self) -> Poll<Option<ServiceEvent>, IoError> {
|
||||
// Polls the active Kademlia queries.
|
||||
// We remove each element from `kad_queries` one by one and add them back if not ready.
|
||||
for n in (0 .. self.kad_queries.len()).rev() {
|
||||
let mut query = self.kad_queries.swap_remove(n);
|
||||
loop {
|
||||
match query.poll() {
|
||||
Ok(Async::Ready(Some(KadQueryEvent::PeersReported(list)))) =>
|
||||
self.add_discovered_peers(list),
|
||||
// We don't actually care about the results
|
||||
Ok(Async::Ready(Some(KadQueryEvent::Finished(_out)))) => {
|
||||
if _out.is_empty() {
|
||||
warn!(target: "sub-libp2p", "Random Kademlia request has yielded \
|
||||
empty results");
|
||||
}
|
||||
break
|
||||
},
|
||||
Ok(Async::Ready(None)) => break,
|
||||
Ok(Async::NotReady) => {
|
||||
self.kad_queries.push(query);
|
||||
break;
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "sub-libp2p", "Kademlia query failed: {:?}", err);
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Poll the future that fires when we need to reply to a Kademlia query.
|
||||
loop {
|
||||
match self.kad_new_ctrl_req_rx.poll() {
|
||||
Ok(Async::NotReady) => break,
|
||||
Ok(Async::Ready(Some(peer_id))) => self.handle_kad_ctrl_request(peer_id),
|
||||
Ok(Async::Ready(None)) => unreachable!("The tx is in self"),
|
||||
Err(()) => unreachable!("An UnboundedReceiver never errors"),
|
||||
}
|
||||
}
|
||||
|
||||
// Poll the future that fires when we need to perform a random Kademlia query.
|
||||
loop {
|
||||
match self.next_kad_random_query.poll() {
|
||||
Ok(Async::NotReady) => break,
|
||||
Ok(Async::Ready(Some(_))) => self.perform_kad_random_query(),
|
||||
Ok(Async::Ready(None)) => {
|
||||
warn!(target: "sub-libp2p", "Kad query timer closed unexpectedly");
|
||||
return Ok(Async::Ready(None));
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(target: "sub-libp2p", "Kad query timer errored: {:?}", err);
|
||||
return Err(IoError::new(IoErrorKind::Other, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
|
||||
// Polls the future that fires when we need to refresh our connections.
|
||||
fn poll_next_connect_refresh(&mut self) -> Poll<Option<ServiceEvent>, IoError> {
|
||||
loop {
|
||||
match self.next_connect_to_nodes.poll() {
|
||||
Ok(Async::Ready(())) => self.connect_to_nodes(),
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Err(err) => {
|
||||
warn!(target: "sub-libp2p", "Connect to nodes timer errored: {:?}", err);
|
||||
return Err(IoError::new(IoErrorKind::Other, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Polls the stream that fires when we need to cleanup and flush the topology.
|
||||
fn poll_cleanup(&mut self) -> Poll<Option<ServiceEvent>, IoError> {
|
||||
loop {
|
||||
match self.cleanup.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(Some(_))) => {
|
||||
debug!(target: "sub-libp2p", "Cleaning and flushing topology");
|
||||
self.topology.cleanup();
|
||||
if let Err(err) = self.topology.flush_to_disk() {
|
||||
warn!(target: "sub-libp2p", "Failed to flush topology: {:?}", err);
|
||||
}
|
||||
let now = Instant::now();
|
||||
self.disabled_peers.retain(move |_, v| *v < now);
|
||||
debug!(target: "sub-libp2p", "Topology now contains {} nodes",
|
||||
self.topology.num_peers());
|
||||
},
|
||||
Ok(Async::Ready(None)) => {
|
||||
warn!(target: "sub-libp2p", "Topology flush stream ended unexpectedly");
|
||||
return Ok(Async::Ready(None));
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(target: "sub-libp2p", "Topology flush stream errored: {:?}", err);
|
||||
return Err(IoError::new(IoErrorKind::Other, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Service {
|
||||
fn drop(&mut self) {
|
||||
if let Err(err) = self.topology.flush_to_disk() {
|
||||
warn!(target: "sub-libp2p", "Failed to flush topology: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for Service {
|
||||
type Item = ServiceEvent;
|
||||
type Error = IoError;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
if !self.injected_events.is_empty() {
|
||||
return Ok(Async::Ready(Some(self.injected_events.remove(0))));
|
||||
}
|
||||
|
||||
match self.poll_swarm()? {
|
||||
Async::Ready(value) => return Ok(Async::Ready(value)),
|
||||
Async::NotReady => (),
|
||||
}
|
||||
|
||||
match self.poll_kademlia()? {
|
||||
Async::Ready(value) => return Ok(Async::Ready(value)),
|
||||
Async::NotReady => (),
|
||||
}
|
||||
|
||||
match self.poll_next_connect_refresh()? {
|
||||
Async::Ready(value) => return Ok(Async::Ready(value)),
|
||||
Async::NotReady => (),
|
||||
}
|
||||
|
||||
match self.poll_cleanup()? {
|
||||
Async::Ready(value) => return Ok(Async::Ready(value)),
|
||||
Async::NotReady => (),
|
||||
}
|
||||
|
||||
// The only way we reach this is if we went through all the `NotReady` paths above,
|
||||
// ensuring the current task is registered everywhere.
|
||||
self.to_notify = Some(task::current());
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,726 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use bytes::Bytes;
|
||||
use custom_proto::RegisteredProtocols;
|
||||
use fnv::FnvHashMap;
|
||||
use futures::{prelude::*, Stream};
|
||||
use libp2p::{Multiaddr, multiaddr::Protocol, PeerId};
|
||||
use libp2p::core::{muxing, Endpoint, PublicKey};
|
||||
use libp2p::core::nodes::node::Substream;
|
||||
use libp2p::core::nodes::swarm::{ConnectedPoint, Swarm as Libp2pSwarm, HandlerFactory};
|
||||
use libp2p::core::nodes::swarm::{SwarmEvent as Libp2pSwarmEvent, Peer as SwarmPeer};
|
||||
use libp2p::core::transport::boxed::Boxed;
|
||||
use libp2p::kad::{KadConnecController, KadFindNodeRespond};
|
||||
use libp2p::secio;
|
||||
use node_handler::{SubstrateOutEvent, SubstrateNodeHandler, SubstrateInEvent, IdentificationRequest};
|
||||
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
|
||||
use std::{mem, sync::Arc, time::Duration};
|
||||
use transport;
|
||||
use {Error, NodeIndex, PacketId, ProtocolId};
|
||||
|
||||
/// Starts a swarm.
|
||||
///
|
||||
/// Returns a stream that must be polled regularly in order for the networking to function.
|
||||
pub fn start_swarm<TUserData>(
|
||||
registered_custom: Arc<RegisteredProtocols<TUserData>>,
|
||||
local_private_key: secio::SecioKeyPair,
|
||||
) -> Result<Swarm<TUserData>, Error>
|
||||
where TUserData: Send + Sync + Clone + 'static {
|
||||
// Private and public keys.
|
||||
let local_public_key = local_private_key.to_public_key();
|
||||
let local_peer_id = local_public_key.clone().into_peer_id();
|
||||
|
||||
// Build the transport layer. This is what allows us to listen or to reach nodes.
|
||||
let transport = transport::build_transport(local_private_key);
|
||||
|
||||
// Build the underlying libp2p swarm.
|
||||
let swarm = Libp2pSwarm::with_handler_builder(transport, HandlerBuilder(registered_custom));
|
||||
|
||||
Ok(Swarm {
|
||||
swarm,
|
||||
local_public_key,
|
||||
local_peer_id,
|
||||
listening_addrs: Vec::new(),
|
||||
node_by_peer: Default::default(),
|
||||
nodes_info: Default::default(),
|
||||
next_node_index: 0,
|
||||
})
|
||||
}
|
||||
|
||||
/// Dummy structure that exists because we need to be able to express the type. Otherwise we would
|
||||
/// use a closure.
|
||||
#[derive(Clone)]
|
||||
struct HandlerBuilder<TUserData>(Arc<RegisteredProtocols<TUserData>>);
|
||||
impl<TUserData> HandlerFactory for HandlerBuilder<TUserData>
|
||||
where TUserData: Clone + Send + Sync + 'static
|
||||
{
|
||||
type Handler = SubstrateNodeHandler<Substream<Muxer>, TUserData>;
|
||||
|
||||
#[inline]
|
||||
fn new_handler(&self, addr: ConnectedPoint) -> Self::Handler {
|
||||
SubstrateNodeHandler::new(self.0.clone(), addr)
|
||||
}
|
||||
}
|
||||
|
||||
/// Event produced by the swarm.
|
||||
pub enum SwarmEvent {
|
||||
/// We have successfully connected to a node.
|
||||
///
|
||||
/// The node is in pending node, and should be accepted by calling `accept_node(node_index)`
|
||||
/// or denied by calling `drop_node(node_index)`.
|
||||
NodePending {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Public key of the node as a peer id.
|
||||
peer_id: PeerId,
|
||||
/// Whether we dialed the node or if it came to us.
|
||||
endpoint: ConnectedPoint,
|
||||
},
|
||||
|
||||
/// The connection to a peer has changed.
|
||||
Reconnected {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// The new endpoint.
|
||||
endpoint: ConnectedPoint,
|
||||
/// List of custom protocols that were closed in the process.
|
||||
closed_custom_protocols: Vec<ProtocolId>,
|
||||
},
|
||||
|
||||
/// Closed connection to a node, either gracefully or because of an error.
|
||||
///
|
||||
/// It is guaranteed that this node has been opened with a `NewNode` event beforehand. However
|
||||
/// not all `ClosedCustomProtocol` events have been dispatched.
|
||||
NodeClosed {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Peer id we were connected to.
|
||||
peer_id: PeerId,
|
||||
/// List of custom protocols that were still open.
|
||||
closed_custom_protocols: Vec<ProtocolId>,
|
||||
},
|
||||
|
||||
/// Failed to dial an address.
|
||||
DialFail {
|
||||
/// Address that failed.
|
||||
address: Multiaddr,
|
||||
/// Reason why we failed.
|
||||
error: IoError,
|
||||
},
|
||||
|
||||
/// Report the duration of the ping for the given node.
|
||||
PingDuration(NodeIndex, Duration),
|
||||
|
||||
/// Report information about the node.
|
||||
NodeInfos {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// The client version. Note that it can be anything and should not be trusted.
|
||||
client_version: String,
|
||||
/// Multiaddresses the node is listening on.
|
||||
listen_addrs: Vec<Multiaddr>,
|
||||
},
|
||||
|
||||
/// A custom protocol substream has been opened with a node.
|
||||
OpenedCustomProtocol {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Protocol that has been opened.
|
||||
protocol: ProtocolId,
|
||||
/// Version of the protocol that was opened.
|
||||
version: u8,
|
||||
},
|
||||
|
||||
/// A custom protocol substream has been closed.
|
||||
ClosedCustomProtocol {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Protocol that has been closed.
|
||||
protocol: ProtocolId,
|
||||
},
|
||||
|
||||
/// Receives a message on a custom protocol stream.
|
||||
CustomMessage {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Protocol which generated the message.
|
||||
protocol_id: ProtocolId,
|
||||
/// Identifier of the packet.
|
||||
packet_id: u8,
|
||||
/// Data that has been received.
|
||||
data: Bytes,
|
||||
},
|
||||
|
||||
/// The node has been determined to be unresponsive.
|
||||
UnresponsiveNode {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
},
|
||||
|
||||
/// The node works but we can't do anything useful with it.
|
||||
UselessNode {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
},
|
||||
|
||||
/// Opened a Kademlia substream with the node.
|
||||
// TODO: the controller API is bad, but we need to make changes in libp2p to improve that
|
||||
KadOpen {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// The Kademlia controller. Allows making queries.
|
||||
controller: KadConnecController,
|
||||
},
|
||||
|
||||
/// The remote wants us to answer a Kademlia `FIND_NODE` request.
|
||||
///
|
||||
/// The `responder` should be used to answer that query.
|
||||
// TODO: this API with the "responder" is bad, but changing it requires modifications in libp2p
|
||||
KadFindNode {
|
||||
/// Index of the node that wants an answer.
|
||||
node_index: NodeIndex,
|
||||
/// The value being searched.
|
||||
searched: PeerId,
|
||||
/// Object to use to respond to the request.
|
||||
responder: KadFindNodeRespond,
|
||||
},
|
||||
|
||||
/// A Kademlia substream has been closed.
|
||||
KadClosed {
|
||||
/// Index of the node.
|
||||
node_index: NodeIndex,
|
||||
/// Reason why it has been closed. `Ok` means that it's been closed gracefully.
|
||||
result: Result<(), IoError>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Network swarm. Must be polled regularly in order for the networking to work.
|
||||
pub struct Swarm<TUserData> {
|
||||
/// Stream of events of the swarm.
|
||||
swarm: Libp2pSwarm<
|
||||
Boxed<(PeerId, Muxer)>,
|
||||
SubstrateInEvent,
|
||||
SubstrateOutEvent<Substream<Muxer>>,
|
||||
HandlerBuilder<TUserData>
|
||||
>,
|
||||
|
||||
/// Public key of the local node.
|
||||
local_public_key: PublicKey,
|
||||
|
||||
/// Peer id of the local node.
|
||||
local_peer_id: PeerId,
|
||||
|
||||
/// Addresses we know we're listening on. Only includes NAT traversed addresses.
|
||||
listening_addrs: Vec<Multiaddr>,
|
||||
|
||||
/// For each peer id, the corresponding node index.
|
||||
node_by_peer: FnvHashMap<PeerId, NodeIndex>,
|
||||
|
||||
/// All the nodes tasks. Must be maintained consistent with `node_by_peer`.
|
||||
nodes_info: FnvHashMap<NodeIndex, NodeInfo>,
|
||||
|
||||
/// Next key to use when we insert a new entry in `nodes_info`.
|
||||
next_node_index: NodeIndex,
|
||||
}
|
||||
|
||||
/// Local information about a peer.
|
||||
struct NodeInfo {
|
||||
/// The peer id. Must be maintained consistent with the rest of the state.
|
||||
peer_id: PeerId,
|
||||
|
||||
/// Whether we opened the connection or the remote opened it.
|
||||
endpoint: Endpoint,
|
||||
|
||||
/// List of custom protocol substreams that are open.
|
||||
open_protocols: Vec<ProtocolId>,
|
||||
}
|
||||
|
||||
/// The muxer used by the transport.
|
||||
type Muxer = muxing::StreamMuxerBox;
|
||||
|
||||
impl<TUserData> Swarm<TUserData>
|
||||
where TUserData: Clone + Send + Sync + 'static {
|
||||
/// Start listening on a multiaddr.
|
||||
#[inline]
|
||||
pub fn listen_on(&mut self, addr: Multiaddr) -> Result<Multiaddr, Multiaddr> {
|
||||
match self.swarm.listen_on(addr) {
|
||||
Ok(mut addr) => {
|
||||
addr.append(Protocol::P2p(self.local_peer_id.clone().into()));
|
||||
info!(target: "sub-libp2p", "Local node address is: {}", addr);
|
||||
Ok(addr)
|
||||
},
|
||||
Err(addr) => Err(addr)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator that produces the list of addresses we're listening on.
|
||||
#[inline]
|
||||
pub fn listeners(&self) -> impl Iterator<Item = &Multiaddr> {
|
||||
self.swarm.listeners()
|
||||
}
|
||||
|
||||
/// Adds an external address. Sent to other nodes when they query it.
|
||||
#[inline]
|
||||
pub fn add_external_address(&mut self, addr: Multiaddr) {
|
||||
self.listening_addrs.push(addr);
|
||||
}
|
||||
|
||||
/// Returns an iterator to our known external addresses.
|
||||
#[inline]
|
||||
pub fn external_addresses(&self) -> impl Iterator<Item = &Multiaddr> {
|
||||
self.listening_addrs.iter()
|
||||
}
|
||||
|
||||
/// Returns all the nodes that are currently active.
|
||||
#[inline]
|
||||
pub fn nodes<'a>(&'a self) -> impl Iterator<Item = NodeIndex> + 'a {
|
||||
self.nodes_info.keys().cloned()
|
||||
}
|
||||
|
||||
/// Returns the latest node connected to this peer ID.
|
||||
#[inline]
|
||||
pub fn latest_node_by_peer_id(&self, peer_id: &PeerId) -> Option<NodeIndex> {
|
||||
self.node_by_peer.get(peer_id).map(|&i| i)
|
||||
}
|
||||
|
||||
/// Endpoint of the node.
|
||||
///
|
||||
/// Returns `None` if the index is invalid.
|
||||
#[inline]
|
||||
pub fn node_endpoint(&self, node_index: NodeIndex) -> Option<Endpoint> {
|
||||
self.nodes_info.get(&node_index).map(|i| i.endpoint)
|
||||
}
|
||||
|
||||
/// Sends a message to a peer using the custom protocol.
|
||||
// TODO: report invalid node index or protocol?
|
||||
pub fn send_custom_message(
|
||||
&mut self,
|
||||
node_index: NodeIndex,
|
||||
protocol: ProtocolId,
|
||||
packet_id: PacketId,
|
||||
data: Vec<u8>
|
||||
) {
|
||||
if let Some(info) = self.nodes_info.get_mut(&node_index) {
|
||||
if let Some(mut connected) = self.swarm.peer(info.peer_id.clone()).as_connected() {
|
||||
connected.send_event(SubstrateInEvent::SendCustomMessage { protocol, packet_id, data });
|
||||
} else {
|
||||
error!(target: "sub-libp2p", "Tried to send message to {:?}, but we're not \
|
||||
connected to it", info.peer_id);
|
||||
}
|
||||
} else {
|
||||
error!(target: "sub-libp2p", "Tried to send message to invalid node index {:?}",
|
||||
node_index);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the peer id of a node we're connected to.
|
||||
#[inline]
|
||||
pub fn peer_id_of_node(&self, node_index: NodeIndex) -> Option<&PeerId> {
|
||||
self.nodes_info.get(&node_index).map(|i| &i.peer_id)
|
||||
}
|
||||
|
||||
/// If we're not already dialing the given peer, start dialing it and return false.
|
||||
/// If we're dialing, adds the address to the queue of addresses to try (if not already) and
|
||||
/// return false.
|
||||
/// If we're already connected, do nothing and return true.
|
||||
///
|
||||
/// Returns an error if the address is not supported.
|
||||
pub fn ensure_connection(&mut self, peer_id: PeerId, addr: Multiaddr) -> Result<bool, ()> {
|
||||
match self.swarm.peer(peer_id.clone()) {
|
||||
SwarmPeer::Connected(_) => Ok(true),
|
||||
SwarmPeer::PendingConnect(mut peer) => {
|
||||
peer.append_multiaddr_attempt(addr);
|
||||
Ok(false)
|
||||
},
|
||||
SwarmPeer::NotConnected(peer) => {
|
||||
trace!(target: "sub-libp2p", "Starting to connect to {:?} through {}",
|
||||
peer_id, addr);
|
||||
match peer.connect(addr) {
|
||||
Ok(_) => Ok(false),
|
||||
Err(_) => Err(()),
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Start dialing an address, not knowing which peer ID to expect.
|
||||
#[inline]
|
||||
pub fn dial(&mut self, addr: Multiaddr) -> Result<(), Multiaddr> {
|
||||
self.swarm.dial(addr)
|
||||
}
|
||||
|
||||
/// After receiving a `NodePending` event, you should call either `accept_node` or `drop_node`
|
||||
/// with the specified index.
|
||||
///
|
||||
/// Returns an error if the node index is invalid, or if it was already accepted.
|
||||
pub fn accept_node(&mut self, node_index: NodeIndex) -> Result<(), ()> {
|
||||
// TODO: detect if already accepted?
|
||||
let peer_id = match self.nodes_info.get(&node_index) {
|
||||
Some(info) => &info.peer_id,
|
||||
None => return Err(())
|
||||
};
|
||||
|
||||
match self.swarm.peer(peer_id.clone()) {
|
||||
SwarmPeer::Connected(mut peer) => {
|
||||
peer.send_event(SubstrateInEvent::Accept);
|
||||
Ok(())
|
||||
},
|
||||
SwarmPeer::PendingConnect(_) | SwarmPeer::NotConnected(_) => {
|
||||
error!(target: "sub-libp2p", "State inconsistency detected in accept_node ; \
|
||||
nodes_info is not in sync with the underlying swarm");
|
||||
Err(())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Disconnects a peer.
|
||||
///
|
||||
/// If the peer is connected, this disconnects it.
|
||||
/// If the peer hasn't been accepted yet, this immediately drops it.
|
||||
///
|
||||
/// Returns the list of custom protocol substreams that were opened.
|
||||
#[inline]
|
||||
pub fn drop_node(&mut self, node_index: NodeIndex) -> Result<Vec<ProtocolId>, ()> {
|
||||
let info = match self.nodes_info.remove(&node_index) {
|
||||
Some(i) => i,
|
||||
None => {
|
||||
error!(target: "sub-libp2p", "Trying to close non-existing node #{}", node_index);
|
||||
return Err(());
|
||||
},
|
||||
};
|
||||
|
||||
let idx_in_hashmap = self.node_by_peer.remove(&info.peer_id);
|
||||
debug_assert_eq!(idx_in_hashmap, Some(node_index));
|
||||
|
||||
if let Some(connected) = self.swarm.peer(info.peer_id.clone()).as_connected() {
|
||||
connected.close();
|
||||
} else {
|
||||
error!(target: "sub-libp2p", "State inconsistency: node_by_peer and nodes_info are \
|
||||
not in sync with the underlying swarm");
|
||||
}
|
||||
|
||||
Ok(info.open_protocols)
|
||||
}
|
||||
|
||||
/// Opens a Kademlia substream with the given node. A `KadOpen` event will later be produced
|
||||
/// for the given node.
|
||||
///
|
||||
/// If a Kademlia substream is already open, also produces a `KadOpen` event.
|
||||
///
|
||||
/// Returns an error if the node index is invalid.
|
||||
pub fn open_kademlia(&mut self, node_index: NodeIndex) -> Result<(), ()> {
|
||||
if let Some(info) = self.nodes_info.get_mut(&node_index) {
|
||||
if let Some(mut connected) = self.swarm.peer(info.peer_id.clone()).as_connected() {
|
||||
connected.send_event(SubstrateInEvent::OpenKademlia);
|
||||
Ok(())
|
||||
} else {
|
||||
error!(target: "sub-libp2p", "Tried to open Kademlia with {:?}, but we're not \
|
||||
connected to it", info.peer_id);
|
||||
Err(())
|
||||
}
|
||||
} else {
|
||||
error!(target: "sub-libp2p", "Tried to open Kademlia with invalid node index {:?}",
|
||||
node_index);
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds an address the given peer observes us as.
|
||||
fn add_observed_addr(&mut self, peer_id: &PeerId, observed_addr: &Multiaddr) {
|
||||
for mut addr in self.swarm.nat_traversal(observed_addr) {
|
||||
// Ignore addresses we already know about.
|
||||
if self.listening_addrs.iter().any(|a| a == &addr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
debug!(target: "sub-libp2p",
|
||||
"NAT traversal: {:?} observes us as {}; registering {} as one of our own addresses",
|
||||
peer_id,
|
||||
observed_addr,
|
||||
addr
|
||||
);
|
||||
|
||||
self.listening_addrs.push(addr.clone());
|
||||
addr.append(Protocol::P2p(self.local_peer_id.clone().into()));
|
||||
info!(target: "sub-libp2p", "New external node address: {}", addr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Responds to an answer to send back identification information.
|
||||
fn respond_to_identify_request(
|
||||
&mut self,
|
||||
requester: &PeerId,
|
||||
responder: IdentificationRequest<Substream<Muxer>>
|
||||
) {
|
||||
let peer = match self.swarm.peer(requester.clone()).as_connected() {
|
||||
Some(p) => p,
|
||||
None => {
|
||||
debug!(target: "sub-libp2p", "Ignoring identify request from {:?} because we are \
|
||||
disconnected", requester);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let observed_addr = match peer.endpoint() {
|
||||
&ConnectedPoint::Dialer { ref address } => address,
|
||||
&ConnectedPoint::Listener { ref send_back_addr, .. } => send_back_addr,
|
||||
};
|
||||
|
||||
trace!(target: "sub-libp2p", "Responding to identify request from {:?}", requester);
|
||||
responder.respond(
|
||||
self.local_public_key.clone(),
|
||||
self.listening_addrs.clone(),
|
||||
&observed_addr,
|
||||
);
|
||||
}
|
||||
|
||||
/// Processes an event received by the swarm.
|
||||
///
|
||||
/// Optionally returns an event to report back to the outside.
|
||||
///
|
||||
/// > **Note**: Must be called from inside `poll()`, otherwise it will panic. This method
|
||||
/// > shouldn't be made public because of this requirement.
|
||||
fn process_network_event(
|
||||
&mut self,
|
||||
event: Libp2pSwarmEvent<Boxed<(PeerId, Muxer)>, SubstrateOutEvent<Substream<Muxer>>>
|
||||
) -> Option<SwarmEvent> {
|
||||
match event {
|
||||
Libp2pSwarmEvent::Connected { peer_id, endpoint } => {
|
||||
let node_index = self.next_node_index.clone();
|
||||
self.next_node_index += 1;
|
||||
self.node_by_peer.insert(peer_id.clone(), node_index);
|
||||
self.nodes_info.insert(node_index, NodeInfo {
|
||||
peer_id: peer_id.clone(),
|
||||
endpoint: match endpoint {
|
||||
ConnectedPoint::Listener { .. } => Endpoint::Listener,
|
||||
ConnectedPoint::Dialer { .. } => Endpoint::Dialer,
|
||||
},
|
||||
open_protocols: Vec::new(),
|
||||
});
|
||||
|
||||
return Some(SwarmEvent::NodePending {
|
||||
node_index,
|
||||
peer_id,
|
||||
endpoint
|
||||
});
|
||||
}
|
||||
Libp2pSwarmEvent::Replaced { peer_id, endpoint, .. } => {
|
||||
let node_index = *self.node_by_peer.get(&peer_id)
|
||||
.expect("node_by_peer is always kept in sync with the inner swarm");
|
||||
let infos = self.nodes_info.get_mut(&node_index)
|
||||
.expect("nodes_info is always kept in sync with the swarm");
|
||||
debug_assert_eq!(infos.peer_id, peer_id);
|
||||
infos.endpoint = match endpoint {
|
||||
ConnectedPoint::Listener { .. } => Endpoint::Listener,
|
||||
ConnectedPoint::Dialer { .. } => Endpoint::Dialer,
|
||||
};
|
||||
let closed_custom_protocols = mem::replace(&mut infos.open_protocols, Vec::new());
|
||||
|
||||
return Some(SwarmEvent::Reconnected {
|
||||
node_index,
|
||||
endpoint,
|
||||
closed_custom_protocols,
|
||||
});
|
||||
},
|
||||
Libp2pSwarmEvent::NodeClosed { peer_id, .. } => {
|
||||
debug!(target: "sub-libp2p", "Connection to {:?} closed gracefully", peer_id);
|
||||
let node_index = self.node_by_peer.remove(&peer_id)
|
||||
.expect("node_by_peer is always kept in sync with the inner swarm");
|
||||
let infos = self.nodes_info.remove(&node_index)
|
||||
.expect("nodes_info is always kept in sync with the inner swarm");
|
||||
debug_assert_eq!(infos.peer_id, peer_id);
|
||||
return Some(SwarmEvent::NodeClosed {
|
||||
node_index,
|
||||
peer_id,
|
||||
closed_custom_protocols: infos.open_protocols,
|
||||
});
|
||||
},
|
||||
Libp2pSwarmEvent::NodeError { peer_id, error, .. } => {
|
||||
debug!(target: "sub-libp2p", "Closing {:?} because of error: {:?}", peer_id, error);
|
||||
let node_index = self.node_by_peer.remove(&peer_id)
|
||||
.expect("node_by_peer is always kept in sync with the inner swarm");
|
||||
let infos = self.nodes_info.remove(&node_index)
|
||||
.expect("nodes_info is always kept in sync with the inner swarm");
|
||||
debug_assert_eq!(infos.peer_id, peer_id);
|
||||
return Some(SwarmEvent::NodeClosed {
|
||||
node_index,
|
||||
peer_id,
|
||||
closed_custom_protocols: infos.open_protocols,
|
||||
});
|
||||
},
|
||||
Libp2pSwarmEvent::DialError { multiaddr, error, .. } =>
|
||||
return Some(SwarmEvent::DialFail {
|
||||
address: multiaddr,
|
||||
error,
|
||||
}),
|
||||
Libp2pSwarmEvent::UnknownPeerDialError { multiaddr, error } =>
|
||||
return Some(SwarmEvent::DialFail {
|
||||
address: multiaddr,
|
||||
error,
|
||||
}),
|
||||
Libp2pSwarmEvent::PublicKeyMismatch {
|
||||
actual_peer_id,
|
||||
multiaddr,
|
||||
expected_peer_id,
|
||||
..
|
||||
} => {
|
||||
debug!(target: "sub-libp2p", "When dialing {:?} through {}, public key mismatch, \
|
||||
actual = {:?}", expected_peer_id, multiaddr, actual_peer_id);
|
||||
return Some(SwarmEvent::DialFail {
|
||||
address: multiaddr,
|
||||
error: IoError::new(IoErrorKind::Other, "Public key mismatch"),
|
||||
});
|
||||
},
|
||||
Libp2pSwarmEvent::ListenerClosed { listen_addr, result, .. } => {
|
||||
warn!(target: "sub-libp2p", "Listener closed for {}: {:?}", listen_addr, result);
|
||||
if self.swarm.listeners().count() == 0 {
|
||||
warn!(target: "sub-libp2p", "No listener left");
|
||||
}
|
||||
},
|
||||
Libp2pSwarmEvent::NodeEvent { peer_id, event } =>
|
||||
if let Some(event) = self.handle_node_event(peer_id, event) {
|
||||
return Some(event);
|
||||
},
|
||||
Libp2pSwarmEvent::IncomingConnection { listen_addr, send_back_addr } =>
|
||||
trace!(target: "sub-libp2p", "Incoming connection with {} on listener {}",
|
||||
send_back_addr, listen_addr),
|
||||
Libp2pSwarmEvent::IncomingConnectionError { listen_addr, send_back_addr, error } =>
|
||||
trace!(target: "sub-libp2p", "Incoming connection with {} on listener {} \
|
||||
errored: {:?}", send_back_addr, listen_addr, error),
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Processes an event obtained by a node in the swarm.
|
||||
///
|
||||
/// Optionally returns an event that the service must emit.
|
||||
///
|
||||
/// > **Note**: The event **must** have been produced by the swarm, otherwise state
|
||||
/// > inconsistencies will likely happen.
|
||||
fn handle_node_event(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
event: SubstrateOutEvent<Substream<Muxer>>
|
||||
) -> Option<SwarmEvent> {
|
||||
// Obtain the peer id and whether the node has been closed earlier.
|
||||
// If the node has been closed, do not generate any additional event about it.
|
||||
let node_index = *self.node_by_peer.get(&peer_id)
|
||||
.expect("node_by_peer is always kept in sync with the underlying swarm");
|
||||
|
||||
match event {
|
||||
SubstrateOutEvent::Unresponsive => {
|
||||
debug!(target: "sub-libp2p", "Node {:?} is unresponsive", peer_id);
|
||||
Some(SwarmEvent::UnresponsiveNode { node_index })
|
||||
},
|
||||
SubstrateOutEvent::Useless => {
|
||||
debug!(target: "sub-libp2p", "Node {:?} is useless", peer_id);
|
||||
Some(SwarmEvent::UselessNode { node_index })
|
||||
},
|
||||
SubstrateOutEvent::PingStart => {
|
||||
trace!(target: "sub-libp2p", "Pinging {:?}", peer_id);
|
||||
None
|
||||
},
|
||||
SubstrateOutEvent::PingSuccess(ping) => {
|
||||
trace!(target: "sub-libp2p", "Pong from {:?} in {:?}", peer_id, ping);
|
||||
Some(SwarmEvent::PingDuration(node_index, ping))
|
||||
},
|
||||
SubstrateOutEvent::Identified { info, observed_addr } => {
|
||||
self.add_observed_addr(&peer_id, &observed_addr);
|
||||
trace!(target: "sub-libp2p", "Client version of {:?}: {:?}", peer_id, info.agent_version);
|
||||
if !info.agent_version.contains("substrate") {
|
||||
info!(target: "sub-libp2p", "Connected to non-substrate node {:?}: {}",
|
||||
peer_id, info.agent_version);
|
||||
}
|
||||
|
||||
Some(SwarmEvent::NodeInfos {
|
||||
node_index,
|
||||
client_version: info.agent_version,
|
||||
listen_addrs: info.listen_addrs,
|
||||
})
|
||||
},
|
||||
SubstrateOutEvent::IdentificationRequest(request) => {
|
||||
self.respond_to_identify_request(&peer_id, request);
|
||||
None
|
||||
},
|
||||
SubstrateOutEvent::KadFindNode { searched, responder } => {
|
||||
Some(SwarmEvent::KadFindNode { node_index, searched, responder })
|
||||
},
|
||||
SubstrateOutEvent::KadOpen(ctrl) => {
|
||||
trace!(target: "sub-libp2p", "Opened Kademlia substream with {:?}", peer_id);
|
||||
Some(SwarmEvent::KadOpen { node_index, controller: ctrl })
|
||||
},
|
||||
SubstrateOutEvent::KadClosed(result) => {
|
||||
trace!(target: "sub-libp2p", "Closed Kademlia substream with {:?}: {:?}", peer_id, result);
|
||||
Some(SwarmEvent::KadClosed { node_index, result })
|
||||
},
|
||||
SubstrateOutEvent::CustomProtocolOpen { protocol_id, version } => {
|
||||
trace!(target: "sub-libp2p", "Opened custom protocol with {:?}", peer_id);
|
||||
self.nodes_info.get_mut(&node_index)
|
||||
.expect("nodes_info is kept in sync with the underlying swarm")
|
||||
.open_protocols.push(protocol_id);
|
||||
Some(SwarmEvent::OpenedCustomProtocol {
|
||||
node_index,
|
||||
protocol: protocol_id,
|
||||
version,
|
||||
})
|
||||
},
|
||||
SubstrateOutEvent::CustomProtocolClosed { protocol_id, result } => {
|
||||
trace!(target: "sub-libp2p", "Closed custom protocol with {:?}: {:?}", peer_id, result);
|
||||
self.nodes_info.get_mut(&node_index)
|
||||
.expect("nodes_info is kept in sync with the underlying swarm")
|
||||
.open_protocols.retain(|p| p != &protocol_id);
|
||||
Some(SwarmEvent::ClosedCustomProtocol {
|
||||
node_index,
|
||||
protocol: protocol_id,
|
||||
})
|
||||
},
|
||||
SubstrateOutEvent::CustomMessage { protocol_id, packet_id, data } => {
|
||||
Some(SwarmEvent::CustomMessage {
|
||||
node_index,
|
||||
protocol_id,
|
||||
packet_id,
|
||||
data,
|
||||
})
|
||||
},
|
||||
SubstrateOutEvent::SubstreamUpgradeFail(err) => {
|
||||
debug!(target: "sub-libp2p", "Error while negotiating final protocol \
|
||||
with {:?}: {:?}", peer_id, err);
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TUserData> Stream for Swarm<TUserData>
|
||||
where TUserData: Clone + Send + Sync + 'static {
|
||||
type Item = SwarmEvent;
|
||||
type Error = IoError;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
loop {
|
||||
match self.swarm.poll() {
|
||||
Async::Ready(Some(event)) =>
|
||||
if let Some(event) = self.process_network_event(event) {
|
||||
return Ok(Async::Ready(Some(event)));
|
||||
}
|
||||
Async::NotReady => return Ok(Async::NotReady),
|
||||
Async::Ready(None) => unreachable!("The Swarm stream never ends"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,15 +25,18 @@ use std::time::{Duration, Instant, SystemTime};
|
||||
|
||||
/// For each address we're connected to, a period of this duration increases the score by 1.
|
||||
const CONNEC_DURATION_PER_SCORE: Duration = Duration::from_secs(10);
|
||||
/// Maximum number of addresses for a given peer. If there are more than this number of addresses,
|
||||
/// the ones with a lower score will be removed.
|
||||
const MAX_ADDRESSES_PER_PEER: usize = 10;
|
||||
/// Maximum value for the score.
|
||||
const MAX_SCORE: u32 = 100;
|
||||
/// When we successfully connect to a node, raises its score to the given minimum value.
|
||||
const CONNECTED_MINIMUM_SCORE: u32 = 20;
|
||||
/// Initial score that a node discovered through Kademlia receives, where we have a hint that the
|
||||
/// node is reachable.
|
||||
const KADEMLIA_DISCOVERY_INITIAL_SCORE_CONNECTABLE: u32 = 15;
|
||||
const DISCOVERY_INITIAL_SCORE_CONNECTABLE: u32 = 15;
|
||||
/// Initial score that a node discovered through Kademlia receives, without any hint.
|
||||
const KADEMLIA_DISCOVERY_INITIAL_SCORE: u32 = 10;
|
||||
const DISCOVERY_INITIAL_SCORE: u32 = 10;
|
||||
/// Score adjustement when we fail to connect to an address.
|
||||
const SCORE_DIFF_ON_FAILED_TO_CONNECT: i32 = -1;
|
||||
/// Default time-to-live for addresses discovered through Kademlia.
|
||||
@@ -43,6 +46,8 @@ const KADEMLIA_DISCOVERY_EXPIRATION: Duration = Duration::from_secs(2 * 3600);
|
||||
const EXPIRATION_PUSH_BACK_CONNEC: Duration = Duration::from_secs(2 * 3600);
|
||||
/// Initial score that a bootstrap node receives when registered.
|
||||
const BOOTSTRAP_NODE_SCORE: u32 = 100;
|
||||
/// Score modifier to apply on a peer that has been determined to be useless.
|
||||
const USELESS_PEER_SCORE_CHANGE: i32 = -9;
|
||||
/// Time to live of a boostrap node. This only applies if you start the node later *without*
|
||||
/// that bootstrap node configured anymore.
|
||||
const BOOTSTRAP_NODE_EXPIRATION: Duration = Duration::from_secs(24 * 3600);
|
||||
@@ -110,6 +115,12 @@ impl NetTopology {
|
||||
serialize(BufWriter::with_capacity(1024 * 1024, file), &self.store)
|
||||
}
|
||||
|
||||
/// Returns the number of peers in the topology.
|
||||
#[inline]
|
||||
pub fn num_peers(&self) -> usize {
|
||||
self.store.len()
|
||||
}
|
||||
|
||||
/// Perform a cleanup pass, removing all obsolete addresses and peers.
|
||||
///
|
||||
/// This should be done from time to time.
|
||||
@@ -123,10 +134,10 @@ impl NetTopology {
|
||||
});
|
||||
}
|
||||
|
||||
/// Returns the known potential addresses of a peer, ordered by score.
|
||||
/// Returns the known potential addresses of a peer, ordered by score. Excludes backed-off
|
||||
/// addresses.
|
||||
///
|
||||
/// The boolean associated to each address indicates whether we're connected to it.
|
||||
// TODO: filter out backed off ones?
|
||||
pub fn addrs_of_peer(&self, peer: &PeerId) -> impl Iterator<Item = (&Multiaddr, bool)> {
|
||||
let peer = if let Some(peer) = self.store.get(peer) {
|
||||
peer
|
||||
@@ -135,10 +146,12 @@ impl NetTopology {
|
||||
return Vec::new().into_iter();
|
||||
};
|
||||
|
||||
let now = SystemTime::now();
|
||||
let now_st = SystemTime::now();
|
||||
let now_is = Instant::now();
|
||||
|
||||
let mut list = peer.addrs.iter().filter_map(move |addr| {
|
||||
let (score, connected) = addr.score_and_is_connected();
|
||||
if (addr.expires >= now && score > 0) || connected {
|
||||
if (addr.expires >= now_st && score > 0 && addr.back_off_until < now_is) || connected {
|
||||
Some((score, connected, &addr.addr))
|
||||
} else {
|
||||
None
|
||||
@@ -164,18 +177,29 @@ impl NetTopology {
|
||||
let mut instant = now + Duration::from_secs(3600);
|
||||
let mut addrs_out = Vec::new();
|
||||
|
||||
for (peer, info) in &self.store {
|
||||
let mut peer_addrs = Vec::new();
|
||||
|
||||
'peer_loop: for (peer, info) in &self.store {
|
||||
peer_addrs.clear();
|
||||
|
||||
for addr in &info.addrs {
|
||||
let (score, is_connected) = addr.score_and_is_connected();
|
||||
if is_connected {
|
||||
continue 'peer_loop;
|
||||
}
|
||||
if score == 0 || addr.expires < now_systime {
|
||||
continue;
|
||||
}
|
||||
if !is_connected && addr.back_off_until > now {
|
||||
if addr.back_off_until > now {
|
||||
instant = cmp::min(instant, addr.back_off_until);
|
||||
continue;
|
||||
}
|
||||
|
||||
addrs_out.push(((peer, &addr.addr), score));
|
||||
peer_addrs.push(((peer, &addr.addr), score));
|
||||
}
|
||||
|
||||
for val in peer_addrs.drain(..) {
|
||||
addrs_out.push(val);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,49 +242,81 @@ impl NetTopology {
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds an address discovered through the Kademlia DHT.
|
||||
/// Adds addresses that a node says it is listening on.
|
||||
///
|
||||
/// This address is not necessarily valid and should expire after a TTL.
|
||||
///
|
||||
/// If `connectable` is true, that means we have some sort of hint that this node can
|
||||
/// be reached.
|
||||
pub fn add_kademlia_discovered_addr(
|
||||
/// The addresses are most likely to be valid.
|
||||
#[inline]
|
||||
pub fn add_self_reported_listen_addrs<I>(
|
||||
&mut self,
|
||||
peer_id: &PeerId,
|
||||
addr: Multiaddr,
|
||||
connectable: bool
|
||||
) {
|
||||
addrs: I,
|
||||
) where I: Iterator<Item = Multiaddr> {
|
||||
self.add_discovered_addrs(peer_id, addrs.map(|a| (a, true)))
|
||||
}
|
||||
|
||||
/// Adds addresses discovered through the Kademlia DHT.
|
||||
///
|
||||
/// The addresses are not necessarily valid and should expire after a TTL.
|
||||
///
|
||||
/// For each address, incorporates a boolean. If true, that means we have some sort of hint
|
||||
/// that this address can be reached.
|
||||
#[inline]
|
||||
pub fn add_kademlia_discovered_addrs<I>(
|
||||
&mut self,
|
||||
peer_id: &PeerId,
|
||||
addrs: I,
|
||||
) where I: Iterator<Item = (Multiaddr, bool)> {
|
||||
self.add_discovered_addrs(peer_id, addrs)
|
||||
}
|
||||
|
||||
/// Inner implementaiton of the `add_*_discovered_addrs`.
|
||||
fn add_discovered_addrs<I>(
|
||||
&mut self,
|
||||
peer_id: &PeerId,
|
||||
addrs: I,
|
||||
) where I: Iterator<Item = (Multiaddr, bool)> {
|
||||
let mut addrs: Vec<_> = addrs.collect();
|
||||
let now_systime = SystemTime::now();
|
||||
let now = Instant::now();
|
||||
|
||||
let peer = peer_access(&mut self.store, peer_id);
|
||||
|
||||
let mut found = false;
|
||||
peer.addrs.retain(|a| {
|
||||
if a.expires < now_systime && !a.is_connected() {
|
||||
return false;
|
||||
}
|
||||
if a.addr == addr {
|
||||
found = true;
|
||||
if let Some(pos) = addrs.iter().position(|&(ref addr, _)| addr == &a.addr) {
|
||||
addrs.remove(pos);
|
||||
}
|
||||
true
|
||||
});
|
||||
|
||||
if !found {
|
||||
if !addrs.is_empty() {
|
||||
trace!(
|
||||
target: "sub-libp2p",
|
||||
"Peer store: adding address {} for {:?} (connectable hint: {:?})",
|
||||
addr,
|
||||
"Peer store: adding addresses {:?} for {:?}",
|
||||
addrs,
|
||||
peer_id,
|
||||
connectable
|
||||
);
|
||||
}
|
||||
|
||||
'addrs_inserter: for (addr, connectable) in addrs {
|
||||
let initial_score = if connectable {
|
||||
KADEMLIA_DISCOVERY_INITIAL_SCORE_CONNECTABLE
|
||||
DISCOVERY_INITIAL_SCORE_CONNECTABLE
|
||||
} else {
|
||||
KADEMLIA_DISCOVERY_INITIAL_SCORE
|
||||
DISCOVERY_INITIAL_SCORE
|
||||
};
|
||||
|
||||
// Enforce `MAX_ADDRESSES_PER_PEER` before inserting, or skip this entry.
|
||||
while peer.addrs.len() >= MAX_ADDRESSES_PER_PEER {
|
||||
let pos = peer.addrs.iter().position(|addr| addr.score() <= initial_score);
|
||||
if let Some(pos) = pos {
|
||||
let _ = peer.addrs.remove(pos);
|
||||
} else {
|
||||
continue 'addrs_inserter;
|
||||
}
|
||||
}
|
||||
|
||||
peer.addrs.push(Addr {
|
||||
addr,
|
||||
expires: now_systime + KADEMLIA_DISCOVERY_EXPIRATION,
|
||||
@@ -325,7 +381,11 @@ impl NetTopology {
|
||||
/// If we were indeed connected to this addr, then we can find out which peer ID it is.
|
||||
pub fn report_disconnected(&mut self, addr: &Multiaddr, reason: DisconnectReason) {
|
||||
let score_diff = match reason {
|
||||
DisconnectReason::ClosedGracefully => -1,
|
||||
DisconnectReason::NoSlot => -1,
|
||||
DisconnectReason::FoundBetterAddr => -5,
|
||||
DisconnectReason::RemoteClosed => -5,
|
||||
DisconnectReason::Useless => -5,
|
||||
DisconnectReason::Banned => -5,
|
||||
};
|
||||
|
||||
for info in self.store.values_mut() {
|
||||
@@ -354,18 +414,42 @@ impl NetTopology {
|
||||
for a in info.addrs.iter_mut() {
|
||||
if &a.addr == addr {
|
||||
a.adjust_score(SCORE_DIFF_ON_FAILED_TO_CONNECT);
|
||||
trace!(target: "sub-libp2p", "Back off for {} = {:?}", addr, a.next_back_off);
|
||||
a.back_off_until = Instant::now() + a.next_back_off;
|
||||
a.next_back_off = cmp::min(a.next_back_off * FAIL_BACKOFF_MULTIPLIER, MAX_BACKOFF);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Indicates the peer store that the given peer is useless.
|
||||
///
|
||||
/// This decreases the scores of the addresses of that peer.
|
||||
pub fn report_useless(&mut self, peer: &PeerId) {
|
||||
for (peer_in_store, info_in_store) in self.store.iter_mut() {
|
||||
if peer == peer_in_store {
|
||||
for addr in info_in_store.addrs.iter_mut() {
|
||||
addr.adjust_score(USELESS_PEER_SCORE_CHANGE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reason why we disconnected from a peer.
|
||||
#[derive(Debug)]
|
||||
pub enum DisconnectReason {
|
||||
/// The disconnection was graceful.
|
||||
ClosedGracefully,
|
||||
/// No slot available locally anymore for this peer.
|
||||
NoSlot,
|
||||
/// A better way to connect to this peer has been found, therefore we disconnect from
|
||||
/// the old one.
|
||||
FoundBetterAddr,
|
||||
/// The remote closed the connection.
|
||||
RemoteClosed,
|
||||
/// This node is considered useless for our needs. This includes time outs.
|
||||
Useless,
|
||||
/// The peer has been banned.
|
||||
Banned,
|
||||
}
|
||||
|
||||
fn peer_access<'a>(store: &'a mut FnvHashMap<PeerId, PeerInfo>, peer: &PeerId) -> &'a mut PeerInfo {
|
||||
|
||||
@@ -21,7 +21,7 @@ use std::net::Ipv4Addr;
|
||||
use std::str;
|
||||
use std::time::Duration;
|
||||
use TimerToken;
|
||||
use libp2p::{multiaddr::AddrComponent, Multiaddr};
|
||||
use libp2p::{multiaddr::Protocol, Multiaddr};
|
||||
use error::Error;
|
||||
use ethereum_types::H512;
|
||||
|
||||
@@ -139,18 +139,18 @@ impl NetworkConfiguration {
|
||||
config_path: None,
|
||||
net_config_path: None,
|
||||
listen_addresses: vec![
|
||||
iter::once(AddrComponent::IP4(Ipv4Addr::new(0, 0, 0, 0)))
|
||||
.chain(iter::once(AddrComponent::TCP(30333)))
|
||||
iter::once(Protocol::Ip4(Ipv4Addr::new(0, 0, 0, 0)))
|
||||
.chain(iter::once(Protocol::Tcp(30333)))
|
||||
.collect()
|
||||
],
|
||||
public_addresses: Vec::new(),
|
||||
boot_nodes: Vec::new(),
|
||||
use_secret: None,
|
||||
min_peers: 25,
|
||||
max_peers: 50,
|
||||
max_peers: 100,
|
||||
reserved_nodes: Vec::new(),
|
||||
non_reserved_mode: NonReservedPeerMode::Accept,
|
||||
client_version: "Parity-network".into(),
|
||||
client_version: "Parity-network".into(), // TODO: meh
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,8 +158,8 @@ impl NetworkConfiguration {
|
||||
pub fn new_local() -> NetworkConfiguration {
|
||||
let mut config = NetworkConfiguration::new();
|
||||
config.listen_addresses = vec![
|
||||
iter::once(AddrComponent::IP4(Ipv4Addr::new(127, 0, 0, 1)))
|
||||
.chain(iter::once(AddrComponent::TCP(0)))
|
||||
iter::once(Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1)))
|
||||
.chain(iter::once(Protocol::Tcp(0)))
|
||||
.collect()
|
||||
];
|
||||
config
|
||||
|
||||
@@ -15,16 +15,15 @@
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use libp2p::{self, PeerId, Transport, mplex, secio, yamux};
|
||||
use libp2p::core::{either, upgrade, transport::BoxedMuxed};
|
||||
use libp2p::core::{either, upgrade, transport::boxed::Boxed, muxing::StreamMuxerBox};
|
||||
use libp2p::transport_timeout::TransportTimeout;
|
||||
use std::time::Duration;
|
||||
use std::usize;
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
|
||||
/// Builds the transport that serves as a common ground for all connections.
|
||||
pub fn build_transport(
|
||||
local_private_key: secio::SecioKeyPair
|
||||
) -> BoxedMuxed<(PeerId, impl AsyncRead + AsyncWrite)> {
|
||||
) -> Boxed<(PeerId, StreamMuxerBox)> {
|
||||
let mut mplex_config = mplex::MplexConfig::new();
|
||||
mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block);
|
||||
mplex_config.max_buffer_len(usize::MAX);
|
||||
@@ -33,16 +32,15 @@ pub fn build_transport(
|
||||
.with_upgrade(secio::SecioConfig::new(local_private_key))
|
||||
.and_then(move |out, endpoint, client_addr| {
|
||||
let upgrade = upgrade::or(
|
||||
upgrade::map(mplex_config, either::EitherOutput::First),
|
||||
upgrade::map(yamux::Config::default(), either::EitherOutput::Second),
|
||||
upgrade::map(yamux::Config::default(), either::EitherOutput::First),
|
||||
upgrade::map(mplex_config, either::EitherOutput::Second),
|
||||
);
|
||||
let key = out.remote_key;
|
||||
let upgrade = upgrade::map(upgrade, move |muxer| (key, muxer));
|
||||
let peer_id = out.remote_key.into_peer_id();
|
||||
let upgrade = upgrade::map(upgrade, move |muxer| (peer_id, muxer));
|
||||
upgrade::apply(out.stream, upgrade, endpoint, client_addr)
|
||||
})
|
||||
.into_connection_reuse()
|
||||
.map(|(key, substream), _| (key.into_peer_id(), substream));
|
||||
.map(|(id, muxer), _| (id, StreamMuxerBox::new(muxer)));
|
||||
|
||||
TransportTimeout::new(base, Duration::from_secs(20))
|
||||
.boxed_muxed()
|
||||
.boxed()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user