Update networking code to libp2p 0.14 (#4383)

* Entirely update substrate-telemetry to futures 0.3

* Add a Closed error

* Update to libp2p 0.14

* More work

* More work

* More work

* More work

* Fix warnings

* Remove unwrap()

* Work on tests fixing

* Fix network tests

* Fix external network tests

* Update libp2p and restore Yamux in discovery test

* Ignore DNS if initializatio nfails

* Restore variables ordering

* Forgot browser-utils

* Fix downfall after merge

* Fix tests
This commit is contained in:
Pierre Krieger
2020-01-09 19:01:23 +01:00
committed by Gavin Wood
parent 6e572a9477
commit ca997cf1e4
29 changed files with 842 additions and 812 deletions
@@ -20,7 +20,6 @@ use crate::protocol::legacy_proto::upgrade::RegisteredProtocol;
use bytes::BytesMut;
use fnv::FnvHashMap;
use futures::prelude::*;
use futures03::{compat::Compat, TryFutureExt as _, StreamExt as _, TryStreamExt as _};
use libp2p::core::{ConnectedPoint, Multiaddr, PeerId};
use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters};
use log::{debug, error, trace, warn};
@@ -28,7 +27,7 @@ use rand::distributions::{Distribution as _, Uniform};
use smallvec::SmallVec;
use std::{borrow::Cow, collections::hash_map::Entry, cmp, error, marker::PhantomData, mem, pin::Pin};
use std::time::{Duration, Instant};
use tokio_io::{AsyncRead, AsyncWrite};
use std::task::{Context, Poll};
/// Network behaviour that handles opening substreams for custom protocols with other nodes.
///
@@ -103,7 +102,7 @@ enum PeerState {
/// The peerset requested that we connect to this peer. We are not connected to this node.
PendingRequest {
/// When to actually start dialing.
timer: Compat<futures_timer::Delay>,
timer: futures_timer::Delay,
/// When the `timer` will trigger.
timer_deadline: Instant,
},
@@ -135,7 +134,7 @@ enum PeerState {
/// state mismatch.
open: bool,
/// When to enable this remote.
timer: Compat<futures_timer::Delay>,
timer: futures_timer::Delay,
/// When the `timer` will trigger.
timer_deadline: Instant,
},
@@ -388,7 +387,7 @@ impl<TSubstream> LegacyProto<TSubstream> {
debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \
until {:?}", occ_entry.key(), until);
*occ_entry.into_mut() = PeerState::PendingRequest {
timer: futures_timer::Delay::new_at(until.clone()).compat(),
timer: futures_timer::Delay::new_at(until.clone()),
timer_deadline: until.clone(),
};
},
@@ -407,7 +406,7 @@ impl<TSubstream> LegacyProto<TSubstream> {
*occ_entry.into_mut() = PeerState::DisabledPendingEnable {
connected_point: connected_point.clone(),
open,
timer: futures_timer::Delay::new_at(banned.clone()).compat(),
timer: futures_timer::Delay::new_at(banned.clone()),
timer_deadline: banned.clone(),
};
},
@@ -616,7 +615,7 @@ impl<TSubstream> DiscoveryNetBehaviour for LegacyProto<TSubstream> {
impl<TSubstream> NetworkBehaviour for LegacyProto<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite,
TSubstream: AsyncRead + AsyncWrite + Unpin,
{
type ProtocolsHandler = CustomProtoHandlerProto<TSubstream>;
type OutEvent = LegacyProtoOut;
@@ -951,8 +950,9 @@ where
fn poll(
&mut self,
cx: &mut Context,
_params: &mut impl PollParameters,
) -> Async<
) -> Poll<
NetworkBehaviourAction<
CustomProtoHandlerIn,
Self::OutEvent,
@@ -961,38 +961,31 @@ where
// Poll for instructions from the peerset.
// Note that the peerset is a *best effort* crate, and we have to use defensive programming.
loop {
let mut peerset01 = futures03::stream::poll_fn(|cx|
futures03::Stream::poll_next(Pin::new(&mut self.peerset), cx)
).map(|v| Ok::<_, ()>(v)).compat();
match peerset01.poll() {
Ok(Async::Ready(Some(sc_peerset::Message::Accept(index)))) => {
match futures::Stream::poll_next(Pin::new(&mut self.peerset), cx) {
Poll::Ready(Some(sc_peerset::Message::Accept(index))) => {
self.peerset_report_accept(index);
}
Ok(Async::Ready(Some(sc_peerset::Message::Reject(index)))) => {
Poll::Ready(Some(sc_peerset::Message::Reject(index))) => {
self.peerset_report_reject(index);
}
Ok(Async::Ready(Some(sc_peerset::Message::Connect(id)))) => {
Poll::Ready(Some(sc_peerset::Message::Connect(id))) => {
self.peerset_report_connect(id);
}
Ok(Async::Ready(Some(sc_peerset::Message::Drop(id)))) => {
Poll::Ready(Some(sc_peerset::Message::Drop(id))) => {
self.peerset_report_disconnect(id);
}
Ok(Async::Ready(None)) => {
Poll::Ready(None) => {
error!(target: "sub-libp2p", "Peerset receiver stream has returned None");
break;
}
Ok(Async::NotReady) => break,
Err(err) => {
error!(target: "sub-libp2p", "Peerset receiver stream has errored: {:?}", err);
break
}
Poll::Pending => break,
}
}
for (peer_id, peer_state) in self.peers.iter_mut() {
match mem::replace(peer_state, PeerState::Poisoned) {
PeerState::PendingRequest { mut timer, timer_deadline } => {
if let Ok(Async::NotReady) = timer.poll() {
if let Poll::Pending = Pin::new(&mut timer).poll(cx) {
*peer_state = PeerState::PendingRequest { timer, timer_deadline };
continue;
}
@@ -1003,7 +996,7 @@ where
}
PeerState::DisabledPendingEnable { mut timer, connected_point, open, timer_deadline } => {
if let Ok(Async::NotReady) = timer.poll() {
if let Poll::Pending = Pin::new(&mut timer).poll(cx) {
*peer_state = PeerState::DisabledPendingEnable {
timer,
connected_point,
@@ -1026,9 +1019,9 @@ where
}
if !self.events.is_empty() {
return Async::Ready(self.events.remove(0))
return Poll::Ready(self.events.remove(0))
}
Async::NotReady
Poll::Pending
}
}
@@ -17,7 +17,6 @@
use super::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream};
use bytes::BytesMut;
use futures::prelude::*;
use futures03::{compat::Compat, TryFutureExt as _};
use futures_timer::Delay;
use libp2p::core::{ConnectedPoint, PeerId, Endpoint};
use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade};
@@ -31,7 +30,7 @@ use libp2p::swarm::{
use log::{debug, error};
use smallvec::{smallvec, SmallVec};
use std::{borrow::Cow, error, fmt, io, marker::PhantomData, mem, time::Duration};
use tokio_io::{AsyncRead, AsyncWrite};
use std::{pin::Pin, task::{Context, Poll}};
/// Implements the `IntoProtocolsHandler` trait of libp2p.
///
@@ -97,7 +96,7 @@ pub struct CustomProtoHandlerProto<TSubstream> {
impl<TSubstream> CustomProtoHandlerProto<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite,
TSubstream: AsyncRead + AsyncWrite + Unpin,
{
/// Builds a new `CustomProtoHandlerProto`.
pub fn new(protocol: RegisteredProtocol) -> Self {
@@ -110,7 +109,7 @@ where
impl<TSubstream> IntoProtocolsHandler for CustomProtoHandlerProto<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite,
TSubstream: AsyncRead + AsyncWrite + Unpin,
{
type Handler = CustomProtoHandler<TSubstream>;
@@ -125,7 +124,7 @@ where
remote_peer_id: remote_peer_id.clone(),
state: ProtocolState::Init {
substreams: SmallVec::new(),
init_deadline: Delay::new(Duration::from_secs(5)).compat()
init_deadline: Delay::new(Duration::from_secs(5))
},
events_queue: SmallVec::new(),
}
@@ -152,7 +151,7 @@ pub struct CustomProtoHandler<TSubstream> {
///
/// This queue must only ever be modified to insert elements at the back, or remove the first
/// element.
events_queue: SmallVec<[ProtocolsHandlerEvent<RegisteredProtocol, (), CustomProtoHandlerOut>; 16]>,
events_queue: SmallVec<[ProtocolsHandlerEvent<RegisteredProtocol, (), CustomProtoHandlerOut, ConnectionKillError>; 16]>,
}
/// State of the handler.
@@ -162,14 +161,14 @@ enum ProtocolState<TSubstream> {
/// List of substreams opened by the remote but that haven't been processed yet.
substreams: SmallVec<[RegisteredProtocolSubstream<TSubstream>; 6]>,
/// Deadline after which the initialization is abnormally long.
init_deadline: Compat<Delay>,
init_deadline: Delay,
},
/// Handler is opening a substream in order to activate itself.
/// If we are in this state, we haven't sent any `CustomProtocolOpen` yet.
Opening {
/// Deadline after which the opening is abnormally long.
deadline: Compat<Delay>,
deadline: Delay,
},
/// Normal operating mode. Contains the substreams that are open.
@@ -260,7 +259,7 @@ pub enum CustomProtoHandlerOut {
impl<TSubstream> CustomProtoHandler<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite,
TSubstream: AsyncRead + AsyncWrite + Unpin,
{
/// Enables the handler.
fn enable(&mut self) {
@@ -280,7 +279,7 @@ where
});
}
ProtocolState::Opening {
deadline: Delay::new(Duration::from_secs(60)).compat()
deadline: Delay::new(Duration::from_secs(60))
}
} else {
@@ -337,8 +336,8 @@ where
/// Polls the state for events. Optionally returns an event to produce.
#[must_use]
fn poll_state(&mut self)
-> Option<ProtocolsHandlerEvent<RegisteredProtocol, (), CustomProtoHandlerOut>> {
fn poll_state(&mut self, cx: &mut Context)
-> Option<ProtocolsHandlerEvent<RegisteredProtocol, (), CustomProtoHandlerOut, ConnectionKillError>> {
match mem::replace(&mut self.state, ProtocolState::Poisoned) {
ProtocolState::Poisoned => {
error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state",
@@ -348,14 +347,14 @@ where
}
ProtocolState::Init { substreams, mut init_deadline } => {
match init_deadline.poll() {
Ok(Async::Ready(())) => {
init_deadline = Delay::new(Duration::from_secs(60)).compat();
match Pin::new(&mut init_deadline).poll(cx) {
Poll::Ready(Ok(())) => {
init_deadline = Delay::new(Duration::from_secs(60));
error!(target: "sub-libp2p", "Handler initialization process is too long \
with {:?}", self.remote_peer_id)
},
Ok(Async::NotReady) => {}
Err(_) => error!(target: "sub-libp2p", "Tokio timer has errored")
Poll::Pending => {}
Poll::Ready(Err(_)) => error!(target: "sub-libp2p", "Tokio timer has errored")
}
self.state = ProtocolState::Init { substreams, init_deadline };
@@ -363,9 +362,9 @@ where
}
ProtocolState::Opening { mut deadline } => {
match deadline.poll() {
Ok(Async::Ready(())) => {
deadline = Delay::new(Duration::from_secs(60)).compat();
match Pin::new(&mut deadline).poll(cx) {
Poll::Ready(Ok(())) => {
deadline = Delay::new(Duration::from_secs(60));
let event = CustomProtoHandlerOut::ProtocolError {
is_severe: true,
error: "Timeout when opening protocol".to_string().into(),
@@ -373,13 +372,13 @@ where
self.state = ProtocolState::Opening { deadline };
Some(ProtocolsHandlerEvent::Custom(event))
},
Ok(Async::NotReady) => {
Poll::Pending => {
self.state = ProtocolState::Opening { deadline };
None
},
Err(_) => {
Poll::Ready(Err(_)) => {
error!(target: "sub-libp2p", "Tokio timer has errored");
deadline = Delay::new(Duration::from_secs(60)).compat();
deadline = Delay::new(Duration::from_secs(60));
self.state = ProtocolState::Opening { deadline };
None
},
@@ -389,9 +388,9 @@ where
ProtocolState::Normal { mut substreams, mut shutdown } => {
for n in (0..substreams.len()).rev() {
let mut substream = substreams.swap_remove(n);
match substream.poll() {
Ok(Async::NotReady) => substreams.push(substream),
Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(message)))) => {
match Pin::new(&mut substream).poll_next(cx) {
Poll::Pending => substreams.push(substream),
Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => {
let event = CustomProtoHandlerOut::CustomMessage {
message
};
@@ -399,7 +398,7 @@ where
self.state = ProtocolState::Normal { substreams, shutdown };
return Some(ProtocolsHandlerEvent::Custom(event));
},
Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged { messages }))) => {
Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged { messages }))) => {
let event = CustomProtoHandlerOut::Clogged {
messages,
};
@@ -407,7 +406,7 @@ where
self.state = ProtocolState::Normal { substreams, shutdown };
return Some(ProtocolsHandlerEvent::Custom(event));
}
Ok(Async::Ready(None)) => {
Poll::Ready(None) => {
shutdown.push(substream);
if substreams.is_empty() {
let event = CustomProtoHandlerOut::CustomProtocolClosed {
@@ -420,7 +419,7 @@ where
return Some(ProtocolsHandlerEvent::Custom(event));
}
}
Err(err) => {
Poll::Ready(Some(Err(err))) => {
if substreams.is_empty() {
let event = CustomProtoHandlerOut::CustomProtocolClosed {
reason: format!("Error on the last substream: {:?}", err).into(),
@@ -443,12 +442,12 @@ where
}
ProtocolState::Disabled { mut shutdown, reenable } => {
shutdown_list(&mut shutdown);
shutdown_list(&mut shutdown, cx);
// If `reenable` is `true`, that means we should open the substreams system again
// after all the substreams are closed.
if reenable && shutdown.is_empty() {
self.state = ProtocolState::Opening {
deadline: Delay::new(Duration::from_secs(60)).compat()
deadline: Delay::new(Duration::from_secs(60))
};
Some(ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol: SubstreamProtocol::new(self.protocol.clone()),
@@ -524,7 +523,7 @@ where
}
impl<TSubstream> ProtocolsHandler for CustomProtoHandler<TSubstream>
where TSubstream: AsyncRead + AsyncWrite {
where TSubstream: AsyncRead + AsyncWrite + Unpin {
type InEvent = CustomProtoHandlerIn;
type OutEvent = CustomProtoHandlerOut;
type Substream = TSubstream;
@@ -585,33 +584,33 @@ where TSubstream: AsyncRead + AsyncWrite {
fn poll(
&mut self,
cx: &mut Context,
) -> Poll<
ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent>,
Self::Error,
ProtocolsHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::OutEvent, Self::Error>
> {
// Flush the events queue if necessary.
if !self.events_queue.is_empty() {
let event = self.events_queue.remove(0);
return Ok(Async::Ready(event))
return Poll::Ready(event)
}
// Kill the connection if needed.
if let ProtocolState::KillAsap = self.state {
return Err(ConnectionKillError);
return Poll::Ready(ProtocolsHandlerEvent::Close(ConnectionKillError));
}
// Process all the substreams.
if let Some(event) = self.poll_state() {
return Ok(Async::Ready(event))
if let Some(event) = self.poll_state(cx) {
return Poll::Ready(event)
}
Ok(Async::NotReady)
Poll::Pending
}
}
impl<TSubstream> fmt::Debug for CustomProtoHandler<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite,
TSubstream: AsyncRead + AsyncWrite + Unpin,
{
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("CustomProtoHandler")
@@ -622,15 +621,16 @@ where
/// Given a list of substreams, tries to shut them down. The substreams that have been successfully
/// shut down are removed from the list.
fn shutdown_list<TSubstream>
(list: &mut SmallVec<impl smallvec::Array<Item = RegisteredProtocolSubstream<TSubstream>>>)
where TSubstream: AsyncRead + AsyncWrite {
(list: &mut SmallVec<impl smallvec::Array<Item = RegisteredProtocolSubstream<TSubstream>>>,
cx: &mut Context)
where TSubstream: AsyncRead + AsyncWrite + Unpin {
'outer: for n in (0..list.len()).rev() {
let mut substream = list.swap_remove(n);
loop {
match substream.poll() {
Ok(Async::Ready(Some(_))) => {}
Ok(Async::NotReady) => break,
Err(_) | Ok(Async::Ready(None)) => continue 'outer,
match substream.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(_))) => {}
Poll::Pending => break,
Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer,
}
}
list.push(substream);
@@ -16,7 +16,7 @@
#![cfg(test)]
use futures::{future, prelude::*, try_ready};
use futures::{prelude::*, ready};
use codec::{Encode, Decode};
use libp2p::core::nodes::Substream;
use libp2p::core::{ConnectedPoint, transport::boxed::Boxed, muxing::StreamMuxerBox};
@@ -24,7 +24,7 @@ use libp2p::swarm::{Swarm, ProtocolsHandler, IntoProtocolsHandler};
use libp2p::swarm::{PollParameters, NetworkBehaviour, NetworkBehaviourAction};
use libp2p::{PeerId, Multiaddr, Transport};
use rand::seq::SliceRandom;
use std::{io, time::Duration, time::Instant};
use std::{io, task::Context, task::Poll, time::Duration};
use crate::message::Message;
use crate::protocol::legacy_proto::{LegacyProto, LegacyProtoOut};
use sp_test_primitives::Block;
@@ -62,7 +62,7 @@ fn build_nodes()
endpoint,
libp2p::core::upgrade::Version::V1
)
.map(|muxer| (peer_id, libp2p::core::muxing::StreamMuxerBox::new(muxer)))
.map_ok(|muxer| (peer_id, libp2p::core::muxing::StreamMuxerBox::new(muxer)))
})
.timeout(Duration::from_secs(20))
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
@@ -170,14 +170,15 @@ impl NetworkBehaviour for CustomProtoWithAddr {
fn poll(
&mut self,
cx: &mut Context,
params: &mut impl PollParameters
) -> Async<
) -> Poll<
NetworkBehaviourAction<
<<Self::ProtocolsHandler as IntoProtocolsHandler>::Handler as ProtocolsHandler>::InEvent,
Self::OutEvent
>
> {
self.inner.poll(params)
self.inner.poll(cx, params)
}
fn inject_replaced(&mut self, peer_id: PeerId, closed_endpoint: ConnectedPoint, new_endpoint: ConnectedPoint) {
@@ -216,9 +217,9 @@ fn two_nodes_transfer_lots_of_packets() {
let (mut service1, mut service2) = build_nodes();
let fut1 = future::poll_fn(move || -> io::Result<_> {
let fut1 = future::poll_fn(move |cx| -> Poll<()> {
loop {
match try_ready!(service1.poll()) {
match ready!(service1.poll_next_unpin(cx)) {
Some(LegacyProtoOut::CustomProtocolOpen { peer_id, .. }) => {
for n in 0 .. NUM_PACKETS {
service1.send_packet(
@@ -233,9 +234,9 @@ fn two_nodes_transfer_lots_of_packets() {
});
let mut packet_counter = 0u32;
let fut2 = future::poll_fn(move || -> io::Result<_> {
let fut2 = future::poll_fn(move |cx| {
loop {
match try_ready!(service2.poll()) {
match ready!(service2.poll_next_unpin(cx)) {
Some(LegacyProtoOut::CustomProtocolOpen { .. }) => {},
Some(LegacyProtoOut::CustomMessage { message, .. }) => {
match Message::<Block>::decode(&mut &message[..]).unwrap() {
@@ -243,7 +244,7 @@ fn two_nodes_transfer_lots_of_packets() {
assert_eq!(message.len(), 1);
packet_counter += 1;
if packet_counter == NUM_PACKETS {
return Ok(Async::Ready(()))
return Poll::Ready(())
}
},
_ => panic!(),
@@ -254,8 +255,9 @@ fn two_nodes_transfer_lots_of_packets() {
}
});
let combined = fut1.select(fut2).map_err(|(err, _)| err);
let _ = tokio::runtime::Runtime::new().unwrap().block_on(combined).unwrap();
futures::executor::block_on(async move {
future::select(fut1, fut2).await;
});
}
#[test]
@@ -277,9 +279,9 @@ fn basic_two_nodes_requests_in_parallel() {
let mut to_receive = to_send.clone();
to_send.shuffle(&mut rand::thread_rng());
let fut1 = future::poll_fn(move || -> io::Result<_> {
let fut1 = future::poll_fn(move |cx| -> Poll<()> {
loop {
match try_ready!(service1.poll()) {
match ready!(service1.poll_next_unpin(cx)) {
Some(LegacyProtoOut::CustomProtocolOpen { peer_id, .. }) => {
for msg in to_send.drain(..) {
service1.send_packet(&peer_id, msg.encode());
@@ -290,15 +292,15 @@ fn basic_two_nodes_requests_in_parallel() {
}
});
let fut2 = future::poll_fn(move || -> io::Result<_> {
let fut2 = future::poll_fn(move |cx| {
loop {
match try_ready!(service2.poll()) {
match ready!(service2.poll_next_unpin(cx)) {
Some(LegacyProtoOut::CustomProtocolOpen { .. }) => {},
Some(LegacyProtoOut::CustomMessage { message, .. }) => {
let pos = to_receive.iter().position(|m| m.encode() == message).unwrap();
to_receive.remove(pos);
if to_receive.is_empty() {
return Ok(Async::Ready(()))
return Poll::Ready(())
}
}
_ => panic!(),
@@ -306,8 +308,9 @@ fn basic_two_nodes_requests_in_parallel() {
}
});
let combined = fut1.select(fut2).map_err(|(err, _)| err);
let _ = tokio::runtime::Runtime::new().unwrap().block_on_all(combined).unwrap();
futures::executor::block_on(async move {
future::select(fut1, fut2).await;
});
}
#[test]
@@ -317,9 +320,6 @@ fn reconnect_after_disconnect() {
let (mut service1, mut service2) = build_nodes();
// We use the `current_thread` runtime because it doesn't require us to have `'static` futures.
let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap();
// For this test, the services can be in the following states.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum ServiceState { NotConnected, FirstConnec, Disconnected, ConnectedAgain }
@@ -327,12 +327,12 @@ fn reconnect_after_disconnect() {
let mut service2_state = ServiceState::NotConnected;
// Run the events loops.
runtime.block_on(future::poll_fn(|| -> Result<_, io::Error> {
futures::executor::block_on(future::poll_fn(|cx| -> Poll<Result<_, io::Error>> {
loop {
let mut service1_not_ready = false;
match service1.poll().unwrap() {
Async::Ready(Some(LegacyProtoOut::CustomProtocolOpen { .. })) => {
match service1.poll_next_unpin(cx) {
Poll::Ready(Some(LegacyProtoOut::CustomProtocolOpen { .. })) => {
match service1_state {
ServiceState::NotConnected => {
service1_state = ServiceState::FirstConnec;
@@ -344,19 +344,19 @@ fn reconnect_after_disconnect() {
ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(),
}
},
Async::Ready(Some(LegacyProtoOut::CustomProtocolClosed { .. })) => {
Poll::Ready(Some(LegacyProtoOut::CustomProtocolClosed { .. })) => {
match service1_state {
ServiceState::FirstConnec => service1_state = ServiceState::Disconnected,
ServiceState::ConnectedAgain| ServiceState::NotConnected |
ServiceState::Disconnected => panic!(),
}
},
Async::NotReady => service1_not_ready = true,
Poll::Pending => service1_not_ready = true,
_ => panic!()
}
match service2.poll().unwrap() {
Async::Ready(Some(LegacyProtoOut::CustomProtocolOpen { .. })) => {
match service2.poll_next_unpin(cx) {
Poll::Ready(Some(LegacyProtoOut::CustomProtocolOpen { .. })) => {
match service2_state {
ServiceState::NotConnected => {
service2_state = ServiceState::FirstConnec;
@@ -368,43 +368,43 @@ fn reconnect_after_disconnect() {
ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(),
}
},
Async::Ready(Some(LegacyProtoOut::CustomProtocolClosed { .. })) => {
Poll::Ready(Some(LegacyProtoOut::CustomProtocolClosed { .. })) => {
match service2_state {
ServiceState::FirstConnec => service2_state = ServiceState::Disconnected,
ServiceState::ConnectedAgain| ServiceState::NotConnected |
ServiceState::Disconnected => panic!(),
}
},
Async::NotReady if service1_not_ready => break,
Async::NotReady => {}
Poll::Pending if service1_not_ready => break,
Poll::Pending => {}
_ => panic!()
}
}
if service1_state == ServiceState::ConnectedAgain && service2_state == ServiceState::ConnectedAgain {
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
} else {
Ok(Async::NotReady)
Poll::Pending
}
})).unwrap();
// Do a second 3-seconds run to make sure we don't get disconnected immediately again.
let mut delay = tokio::timer::Delay::new(Instant::now() + Duration::from_secs(3));
runtime.block_on(future::poll_fn(|| -> Result<_, io::Error> {
match service1.poll().unwrap() {
Async::NotReady => {},
let mut delay = futures_timer::Delay::new(Duration::from_secs(3));
futures::executor::block_on(future::poll_fn(|cx| -> Poll<Result<_, io::Error>> {
match service1.poll_next_unpin(cx) {
Poll::Pending => {},
_ => panic!()
}
match service2.poll().unwrap() {
Async::NotReady => {},
match service2.poll_next_unpin(cx) {
Poll::Pending => {},
_ => panic!()
}
if let Async::Ready(()) = delay.poll().unwrap() {
Ok(Async::Ready(()))
if let Poll::Ready(Ok(_)) = delay.poll_unpin(cx) {
Poll::Ready(Ok(()))
} else {
Ok(Async::NotReady)
Poll::Pending
}
})).unwrap();
}
@@ -15,12 +15,12 @@
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
use crate::config::ProtocolId;
use bytes::{Bytes, BytesMut};
use bytes::BytesMut;
use futures::prelude::*;
use futures_codec::Framed;
use libp2p::core::{Negotiated, Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName};
use libp2p::tokio_codec::Framed;
use std::{collections::VecDeque, io, vec::IntoIter as VecIntoIter};
use futures::{prelude::*, future, stream};
use tokio_io::{AsyncRead, AsyncWrite};
use std::{collections::VecDeque, io, pin::Pin, vec::IntoIter as VecIntoIter};
use std::task::{Context, Poll};
use unsigned_varint::codec::UviBytes;
/// Connection upgrade for a single protocol.
@@ -32,7 +32,7 @@ pub struct RegisteredProtocol {
id: ProtocolId,
/// Base name of the protocol as advertised on the network.
/// Ends with `/` so that we can append a version number behind.
base_name: Bytes,
base_name: Vec<u8>,
/// List of protocol versions that we support.
/// Ordered in descending order so that the best comes first.
supported_versions: Vec<u8>,
@@ -44,7 +44,7 @@ impl RegisteredProtocol {
pub fn new(protocol: impl Into<ProtocolId>, versions: &[u8])
-> Self {
let protocol = protocol.into();
let mut base_name = Bytes::from_static(b"/substrate/");
let mut base_name = b"/substrate/".to_vec();
base_name.extend_from_slice(protocol.as_bytes());
base_name.extend_from_slice(b"/");
@@ -78,11 +78,11 @@ pub struct RegisteredProtocolSubstream<TSubstream> {
/// the remote (listener).
endpoint: Endpoint,
/// Buffer of packets to send.
send_queue: VecDeque<Vec<u8>>,
send_queue: VecDeque<BytesMut>,
/// If true, we should call `poll_complete` on the inner sink.
requires_poll_complete: bool,
requires_poll_flush: bool,
/// The underlying substream.
inner: stream::Fuse<Framed<Negotiated<TSubstream>, UviBytes<Vec<u8>>>>,
inner: stream::Fuse<Framed<Negotiated<TSubstream>, UviBytes<BytesMut>>>,
/// Version of the protocol that was negotiated.
protocol_version: u8,
/// If true, we have sent a "remote is clogged" event recently and shouldn't send another one
@@ -119,7 +119,7 @@ impl<TSubstream> RegisteredProtocolSubstream<TSubstream> {
return
}
self.send_queue.push_back(data);
self.send_queue.push_back(From::from(&data[..]));
}
}
@@ -138,25 +138,31 @@ pub enum RegisteredProtocolEvent {
}
impl<TSubstream> Stream for RegisteredProtocolSubstream<TSubstream>
where TSubstream: AsyncRead + AsyncWrite {
type Item = RegisteredProtocolEvent;
type Error = io::Error;
where TSubstream: AsyncRead + AsyncWrite + Unpin {
type Item = Result<RegisteredProtocolEvent, io::Error>;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
// Flushing the local queue.
while let Some(packet) = self.send_queue.pop_front() {
match self.inner.start_send(packet)? {
AsyncSink::NotReady(packet) => {
self.send_queue.push_front(packet);
break
},
AsyncSink::Ready => self.requires_poll_complete = true,
while !self.send_queue.is_empty() {
match Pin::new(&mut self.inner).poll_ready(cx) {
Poll::Ready(Ok(())) => {},
Poll::Ready(Err(err)) => return Poll::Ready(Some(Err(err))),
Poll::Pending => break,
}
if let Some(packet) = self.send_queue.pop_front() {
Pin::new(&mut self.inner).start_send(packet)?;
self.requires_poll_flush = true;
}
}
// If we are closing, close as soon as the Sink is closed.
if self.is_closing {
return Ok(self.inner.close()?.map(|()| None))
return match Pin::new(&mut self.inner).poll_close(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(_)) => Poll::Ready(None),
Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))),
}
}
// Indicating that the remote is clogged if that's the case.
@@ -166,9 +172,9 @@ where TSubstream: AsyncRead + AsyncWrite {
// if you remove the fuse, then we will always return early from this function and
// thus never read any message from the network.
self.clogged_fuse = true;
return Ok(Async::Ready(Some(RegisteredProtocolEvent::Clogged {
return Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged {
messages: self.send_queue.iter()
.map(|m| m.clone())
.map(|m| m.clone().to_vec())
.collect(),
})))
}
@@ -177,25 +183,25 @@ where TSubstream: AsyncRead + AsyncWrite {
}
// Flushing if necessary.
if self.requires_poll_complete {
if let Async::Ready(()) = self.inner.poll_complete()? {
self.requires_poll_complete = false;
if self.requires_poll_flush {
if let Poll::Ready(()) = Pin::new(&mut self.inner).poll_flush(cx)? {
self.requires_poll_flush = false;
}
}
// Receiving incoming packets.
// Note that `inner` is wrapped in a `Fuse`, therefore we can poll it forever.
match self.inner.poll()? {
Async::Ready(Some(data)) => {
Ok(Async::Ready(Some(RegisteredProtocolEvent::Message(data))))
match Pin::new(&mut self.inner).poll_next(cx)? {
Poll::Ready(Some(data)) => {
Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(data))))
}
Async::Ready(None) =>
if !self.requires_poll_complete && self.send_queue.is_empty() {
Ok(Async::Ready(None))
Poll::Ready(None) =>
if !self.requires_poll_flush && self.send_queue.is_empty() {
Poll::Ready(None)
} else {
Ok(Async::NotReady)
Poll::Pending
}
Async::NotReady => Ok(Async::NotReady),
Poll::Pending => Poll::Pending,
}
}
}
@@ -224,7 +230,7 @@ impl UpgradeInfo for RegisteredProtocol {
#[derive(Debug, Clone)]
pub struct RegisteredProtocolName {
/// Protocol name, as advertised on the wire.
name: Bytes,
name: Vec<u8>,
/// Version number. Stored in string form in `name`, but duplicated here for easier retrieval.
version: u8,
}
@@ -236,10 +242,10 @@ impl ProtocolName for RegisteredProtocolName {
}
impl<TSubstream> InboundUpgrade<TSubstream> for RegisteredProtocol
where TSubstream: AsyncRead + AsyncWrite,
where TSubstream: AsyncRead + AsyncWrite + Unpin,
{
type Output = RegisteredProtocolSubstream<TSubstream>;
type Future = future::FutureResult<Self::Output, io::Error>;
type Future = future::Ready<Result<Self::Output, io::Error>>;
type Error = io::Error;
fn upgrade_inbound(
@@ -257,7 +263,7 @@ where TSubstream: AsyncRead + AsyncWrite,
is_closing: false,
endpoint: Endpoint::Listener,
send_queue: VecDeque::new(),
requires_poll_complete: false,
requires_poll_flush: false,
inner: framed.fuse(),
protocol_version: info.version,
clogged_fuse: false,
@@ -266,7 +272,7 @@ where TSubstream: AsyncRead + AsyncWrite,
}
impl<TSubstream> OutboundUpgrade<TSubstream> for RegisteredProtocol
where TSubstream: AsyncRead + AsyncWrite,
where TSubstream: AsyncRead + AsyncWrite + Unpin,
{
type Output = <Self as InboundUpgrade<TSubstream>>::Output;
type Future = <Self as InboundUpgrade<TSubstream>>::Future;
@@ -283,7 +289,7 @@ where TSubstream: AsyncRead + AsyncWrite,
is_closing: false,
endpoint: Endpoint::Dialer,
send_queue: VecDeque::new(),
requires_poll_complete: false,
requires_poll_flush: false,
inner: framed.fuse(),
protocol_version: info.version,
clogged_fuse: false,
@@ -23,7 +23,7 @@ use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use std::time::{Instant, Duration};
use log::{trace, info};
use futures::sync::oneshot::{Sender as OneShotSender};
use futures::channel::oneshot::{Sender as OneShotSender};
use linked_hash_map::{Entry, LinkedHashMap};
use sp_blockchain::Error as ClientError;
use sc_client_api::{FetchChecker, RemoteHeaderRequest,
@@ -680,7 +680,7 @@ pub mod tests {
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::time::Instant;
use futures::{Future, sync::oneshot};
use futures::channel::oneshot;
use sp_core::storage::ChildInfo;
use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT};
use sp_blockchain::{Error as ClientError, Result as ClientResult};
@@ -999,7 +999,7 @@ pub mod tests {
}, tx));
receive_call_response(&mut network_interface, &mut light_dispatch, peer0.clone(), 0);
assert_eq!(response.wait().unwrap().unwrap(), vec![42]);
assert_eq!(futures::executor::block_on(response).unwrap().unwrap(), vec![42]);
}
#[test]
@@ -1021,7 +1021,10 @@ pub mod tests {
id: 0,
proof: StorageProof::empty(),
});
assert_eq!(response.wait().unwrap().unwrap().remove(b":key".as_ref()).unwrap(), Some(vec![42]));
assert_eq!(
futures::executor::block_on(response).unwrap().unwrap().remove(b":key".as_ref()).unwrap(),
Some(vec![42])
);
}
#[test]
@@ -1049,7 +1052,7 @@ pub mod tests {
id: 0,
proof: StorageProof::empty(),
});
assert_eq!(response.wait().unwrap().unwrap().remove(b":key".as_ref()).unwrap(), Some(vec![42]));
assert_eq!(futures::executor::block_on(response).unwrap().unwrap().remove(b":key".as_ref()).unwrap(), Some(vec![42]));
}
#[test]
@@ -1078,7 +1081,7 @@ pub mod tests {
proof: StorageProof::empty(),
});
assert_eq!(
response.wait().unwrap().unwrap().hash(),
futures::executor::block_on(response).unwrap().unwrap().hash(),
"6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3".parse().unwrap(),
);
}
@@ -1109,7 +1112,7 @@ pub mod tests {
roots: vec![],
roots_proof: StorageProof::empty(),
});
assert_eq!(response.wait().unwrap().unwrap(), vec![(100, 2)]);
assert_eq!(futures::executor::block_on(response).unwrap().unwrap(), vec![(100, 2)]);
}
#[test]