Fix timer panics in the wasm light client (#4561)

* Make WASM browser thing compile

* Fix

* updated exit-future (github repo)

* Switch to broadcast crate

* Migrate client/cli

* Switch exit-future to modernize branch

* Small changes

* Switch to cargo version and fix fg tests

* fix basic-authorship

* Fix crash on grafana macro

* Fix grafana macro

* Switch node python version

* Disable record_metrics_slice in grafana macro on wasm

* Update client/grafana-data-source/src/lib.rs

* Revert "Update client/grafana-data-source/src/lib.rs"

This reverts commit 888009a8e0b7051bd4bfbbfdb0448bcf2e2aae93.

* Add wasm support for state machine

* Switch to my own libp2p version

* Revert "Switch to my own libp2p version"

This reverts commit ce613871b59264b3165b45c37943e6560240daa7.

* Revert "Add wasm support for state machine"

This reverts commit de7eaa0694d9534fc3b164621737968e9a6a7c5f.

* Add sc-browser

* Squash

* remove sc-browser

* Fix keystore on wasm

* stubs for removed functions to make env compatible with old runtimes

* Add test (that doesn't work)

* Fix build scripts

* Revert basic-authorship due to no panics

* Revert cli/informant

* Revert consensus

* revert offchain

* Update utils/browser/Cargo.toml

Co-Authored-By: Benjamin Kampmann <ben@gnunicorn.org>

* export console functions

* Add new chainspec

* Fix ws in chain spec

* revert chainspec

* Fix chainspec

* Use an Option<PathBuf> in keystore instead of cfg flags

* Remove crud

* Only use wasm-timer for instant and systemtime

* Remove telemetry changes

* Assuming this is ok

* Add a KeystoreConfig

* Add stubs back in

* Update libp2p

* Revert "Add stubs back in"

This reverts commit 4690cf1882aa0f99f7f00a58c4080c8aa9b77c36.

* Remove commented js again

* Bump kvdb-web version

* Fix cli

* Switch branch on futures-timer

* Fix tests

* Remove sc-client test build in check-web-wasm because there isn't a good way to build futures-timer with wasm-bindgen support in the build

* Remove more things ^^

* Switch branch on futures-timer back

* Put DB io stats behind a cfg flag

* Fix things

* Don't timeout transports on wasm

* Update branch of futures-timer and fix bad merge

* Spawn informant

* Fix network test

* Fix delay resets

* Changes

* Fix tests

* use wasm_timer for transaction pool

* Fixes

* Switch futures-timer to crates

* Only diagnose futures on native

* Fix sc-network-test tests

* Select log level in js

* Fix syncing ;^)

* Allow disabling colours in the informant

* Use OutputFormat enum for informant

* MallocSizeOf impl on transaction pool broke stuff because wasm_timer::Instant doesnt impl it so just revert the transaction pool to master

* Update futures-diagnose

* Revert "MallocSizeOf impl on transaction pool broke stuff because wasm_timer::Instant doesnt impl it so just revert the transaction pool to master"

This reverts commit baa4ffc94fd968b6660a2c17ba8113e06af15548.

* Pass whole chain spec in start_client

* Get Instant::now to work in transaction pool again

* Informant dep reordering

Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
Co-authored-by: Svyatoslav Nikolsky <svyatonik@gmail.com>
Co-authored-by: Benjamin Kampmann <ben.kampmann@googlemail.com>
Co-authored-by: Demi Obenour <48690212+DemiMarie-parity@users.noreply.github.com>
This commit is contained in:
Ashley
2020-02-10 12:23:55 +01:00
committed by GitHub
parent 34bf0caa05
commit ead6815ae4
54 changed files with 299 additions and 155 deletions
+2 -1
View File
@@ -28,7 +28,8 @@ use std::error;
use std::collections::hash_map::Entry;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use std::time::Duration;
use wasm_timer::Instant;
use crate::utils::interval;
/// Time after we disconnect from a node before we purge its information from the cache.
+7 -6
View File
@@ -52,6 +52,7 @@ use crate::chain::{Client, FinalityProofProvider};
use sc_client_api::{FetchChecker, ChangesProof, StorageProof};
use crate::error;
use util::LruHashSet;
use wasm_timer::Instant;
mod legacy_proto;
mod util;
@@ -158,7 +159,7 @@ struct PacketStats {
/// A peer that we are connected to
/// and from whom we have not yet received a Status message.
struct HandshakingPeer {
timestamp: time::Instant,
timestamp: Instant,
}
/// Peer information
@@ -166,9 +167,9 @@ struct HandshakingPeer {
struct Peer<B: BlockT, H: ExHashT> {
info: PeerInfo<B>,
/// Current block request, if any.
block_request: Option<(time::Instant, message::BlockRequest<B>)>,
block_request: Option<(Instant, message::BlockRequest<B>)>,
/// Requests we are no longer insterested in.
obsolete_requests: HashMap<message::RequestId, time::Instant>,
obsolete_requests: HashMap<message::RequestId, Instant>,
/// Holds a set of transactions known to this peer.
known_extrinsics: LruHashSet<H>,
/// Holds a set of blocks known to this peer.
@@ -701,7 +702,7 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
/// Called when a new peer is connected
pub fn on_peer_connected(&mut self, who: PeerId) {
trace!(target: "sync", "Connecting {}", who);
self.handshaking_peers.insert(who.clone(), HandshakingPeer { timestamp: time::Instant::now() });
self.handshaking_peers.insert(who.clone(), HandshakingPeer { timestamp: Instant::now() });
self.send_status(who);
}
@@ -890,7 +891,7 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
}
fn maintain_peers(&mut self) {
let tick = time::Instant::now();
let tick = Instant::now();
let mut aborting = Vec::new();
{
for (who, peer) in self.context_data.peers.iter() {
@@ -1833,7 +1834,7 @@ fn send_request<B: BlockT, H: ExHashT>(
trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who);
peer.obsolete_requests.insert(request.id, timestamp);
}
peer.block_request = Some((time::Instant::now(), r.clone()));
peer.block_request = Some((Instant::now(), r.clone()));
}
}
send_message::<B>(behaviour, stats, who, message)
@@ -26,7 +26,8 @@ use log::{debug, error, trace, warn};
use rand::distributions::{Distribution as _, Uniform};
use smallvec::SmallVec;
use std::{borrow::Cow, collections::hash_map::Entry, cmp, error, marker::PhantomData, mem, pin::Pin};
use std::time::{Duration, Instant};
use std::time::Duration;
use wasm_timer::Instant;
use std::task::{Context, Poll};
/// Network behaviour that handles opening substreams for custom protocols with other nodes.
@@ -387,7 +388,7 @@ impl<TSubstream> LegacyProto<TSubstream> {
debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \
until {:?}", occ_entry.key(), until);
*occ_entry.into_mut() = PeerState::PendingRequest {
timer: futures_timer::Delay::new_at(until.clone()),
timer: futures_timer::Delay::new(until.clone() - Instant::now()),
timer_deadline: until.clone(),
};
},
@@ -406,7 +407,7 @@ impl<TSubstream> LegacyProto<TSubstream> {
*occ_entry.into_mut() = PeerState::DisabledPendingEnable {
connected_point: connected_point.clone(),
open,
timer: futures_timer::Delay::new_at(banned.clone()),
timer: futures_timer::Delay::new(banned.clone() - Instant::now()),
timer_deadline: banned.clone(),
};
},
@@ -348,13 +348,12 @@ where
ProtocolState::Init { substreams, mut init_deadline } => {
match Pin::new(&mut init_deadline).poll(cx) {
Poll::Ready(Ok(())) => {
Poll::Ready(()) => {
init_deadline = Delay::new(Duration::from_secs(60));
error!(target: "sub-libp2p", "Handler initialization process is too long \
with {:?}", self.remote_peer_id)
},
Poll::Pending => {}
Poll::Ready(Err(_)) => error!(target: "sub-libp2p", "Tokio timer has errored")
}
self.state = ProtocolState::Init { substreams, init_deadline };
@@ -363,7 +362,7 @@ where
ProtocolState::Opening { mut deadline } => {
match Pin::new(&mut deadline).poll(cx) {
Poll::Ready(Ok(())) => {
Poll::Ready(()) => {
deadline = Delay::new(Duration::from_secs(60));
let event = CustomProtoHandlerOut::ProtocolError {
is_severe: true,
@@ -376,12 +375,6 @@ where
self.state = ProtocolState::Opening { deadline };
None
},
Poll::Ready(Err(_)) => {
error!(target: "sub-libp2p", "Tokio timer has errored");
deadline = Delay::new(Duration::from_secs(60));
self.state = ProtocolState::Opening { deadline };
None
},
}
}
@@ -409,7 +409,7 @@ fn reconnect_after_disconnect() {
_ => panic!()
}
if let Poll::Ready(Ok(_)) = delay.poll_unpin(cx) {
if let Poll::Ready(()) = delay.poll_unpin(cx) {
Poll::Ready(Ok(()))
} else {
Poll::Pending
@@ -21,7 +21,8 @@
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use std::time::{Instant, Duration};
use std::time::Duration;
use wasm_timer::Instant;
use log::{trace, info};
use futures::channel::oneshot::{Sender as OneShotSender};
use linked_hash_map::{Entry, LinkedHashMap};
@@ -21,7 +21,8 @@ use libp2p::PeerId;
use log::{debug, trace, warn};
use sp_runtime::traits::{Block as BlockT, NumberFor, Zero};
use std::collections::{HashMap, HashSet, VecDeque};
use std::time::{Duration, Instant};
use std::time::Duration;
use wasm_timer::Instant;
// Time to wait before trying to get the same extra data from the same peer.
const EXTRA_RETRY_WAIT: Duration = Duration::from_secs(10);
+11 -4
View File
@@ -118,11 +118,18 @@ pub fn build_transport(
core::upgrade::apply(stream, upgrade, endpoint, upgrade::Version::V1)
.map_ok(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer)))
})
});
.timeout(Duration::from_secs(20))
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
.boxed();
let transport = if cfg!(not(target_os = "unknown")) {
transport
.timeout(Duration::from_secs(20))
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
.boxed()
} else {
transport
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
.boxed()
};
(transport, sinks)
}