Rewrite network protocol/service to use channels (#1340)

* rewrite network protocol/service to use channels

* remove use of unwrap

* re-introduce with_spec

* remove unnecessary mut

* remove unused param

* improve with_spec, add with_gossip

* rename job to task

* style: re-add comma

* remove extra string allocs

* rename use of channel

* turn TODO into FIXME

* remove mut in match

* remove Self in new

* pass headers by value to network service

* remove network sender from service

* remove TODO

* better expect

* rationalize use of network sender in ondemand
This commit is contained in:
Gregory Terzian
2019-02-06 19:54:02 +08:00
committed by Bastian Köcher
parent 8aae19e2db
commit a2d2ed69ab
19 changed files with 1314 additions and 903 deletions
+1
View File
@@ -3740,6 +3740,7 @@ name = "substrate-network"
version = "0.1.0"
dependencies = [
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-channel 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
+2 -4
View File
@@ -780,9 +780,7 @@ mod tests {
let environ = Arc::new(DummyFactory(client.clone()));
import_notifications.push(
client.import_notification_stream()
.take_while(|n| {
Ok(!(n.origin != BlockOrigin::Own && n.header.number() < &5))
})
.take_while(|n| Ok(!(n.origin != BlockOrigin::Own && n.header.number() < &5)))
.for_each(move |_| Ok(()))
);
@@ -816,7 +814,7 @@ mod tests {
let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL)
.for_each(move |_| {
net.lock().send_import_notifications();
net.lock().sync();
net.lock().route_fast();
Ok(())
})
.map(|_| ())
+21 -11
View File
@@ -84,7 +84,7 @@ extern crate env_logger;
extern crate parity_codec_derive;
use futures::prelude::*;
use futures::sync::mpsc;
use futures::sync::{self, mpsc};
use client::{
BlockchainEvents, CallExecutor, Client, backend::Backend,
error::Error as ClientError,
@@ -249,18 +249,18 @@ pub trait Network<Block: BlockT>: Clone {
}
/// Bridge between NetworkService, gossiping consensus messages and Grandpa
pub struct NetworkBridge<B: BlockT, S: network::specialization::NetworkSpecialization<B>, H: ExHashT> {
service: Arc<NetworkService<B, S, H>>
pub struct NetworkBridge<B: BlockT, S: network::specialization::NetworkSpecialization<B>> {
service: Arc<NetworkService<B, S>>
}
impl<B: BlockT, S: network::specialization::NetworkSpecialization<B>, H: ExHashT> NetworkBridge<B, S, H> {
impl<B: BlockT, S: network::specialization::NetworkSpecialization<B>> NetworkBridge<B, S> {
/// Create a new NetworkBridge to the given NetworkService
pub fn new(service: Arc<NetworkService<B, S, H>>) -> Self {
pub fn new(service: Arc<NetworkService<B, S>>) -> Self {
NetworkBridge { service }
}
}
impl<B: BlockT, S: network::specialization::NetworkSpecialization<B>, H: ExHashT> Clone for NetworkBridge<B, S, H> {
impl<B: BlockT, S: network::specialization::NetworkSpecialization<B>,> Clone for NetworkBridge<B, S> {
fn clone(&self) -> Self {
NetworkBridge {
service: Arc::clone(&self.service)
@@ -276,10 +276,15 @@ fn commit_topic<B: BlockT>(set_id: u64) -> B::Hash {
<<B::Header as HeaderT>::Hashing as HashT>::hash(format!("{}-COMMITS", set_id).as_bytes())
}
impl<B: BlockT, S: network::specialization::NetworkSpecialization<B>, H: ExHashT> Network<B> for NetworkBridge<B, S, H> {
impl<B: BlockT, S: network::specialization::NetworkSpecialization<B>,> Network<B> for NetworkBridge<B, S> {
type In = mpsc::UnboundedReceiver<ConsensusMessage>;
fn messages_for(&self, round: u64, set_id: u64) -> Self::In {
self.service.consensus_gossip().write().messages_for(message_topic::<B>(round, set_id))
let (tx, rx) = sync::oneshot::channel();
self.service.with_gossip(move |gossip, _| {
let inner_rx = gossip.messages_for(message_topic::<B>(round, set_id));
let _ = tx.send(inner_rx);
});
rx.wait().ok().expect("1. Network is running, 2. it should handle the above closure successfully")
}
fn send_message(&self, round: u64, set_id: u64, message: Vec<u8>) {
@@ -289,16 +294,21 @@ impl<B: BlockT, S: network::specialization::NetworkSpecialization<B>, H: ExHashT
fn drop_round_messages(&self, round: u64, set_id: u64) {
let topic = message_topic::<B>(round, set_id);
self.service.consensus_gossip().write().collect_garbage_for_topic(topic);
self.service.with_gossip(move |gossip, _| gossip.collect_garbage(|t| t == &topic));
}
fn drop_set_messages(&self, set_id: u64) {
let topic = commit_topic::<B>(set_id);
self.service.consensus_gossip().write().collect_garbage_for_topic(topic);
self.service.with_gossip(move |gossip, _| gossip.collect_garbage(|t| t == &topic));
}
fn commit_messages(&self, set_id: u64) -> Self::In {
self.service.consensus_gossip().write().messages_for(commit_topic::<B>(set_id))
let (tx, rx) = sync::oneshot::channel();
self.service.with_gossip(move |gossip, _| {
let inner_rx = gossip.messages_for(commit_topic::<B>(set_id));
let _ = tx.send(inner_rx);
});
rx.wait().ok().expect("1. Network is running, 2. it should handle the above closure successfully")
}
fn send_commit(&self, _round: u64, set_id: u64, message: Vec<u8>) {
+9 -19
View File
@@ -151,10 +151,7 @@ impl MessageRouting {
fn drop_messages(&self, topic: Hash) {
let inner = self.inner.lock();
let peer = inner.peer(self.peer_id);
let mut gossip = peer.consensus_gossip().write();
peer.with_spec(move |_, _| {
gossip.collect_garbage_for_topic(topic);
});
peer.consensus_gossip_collect_garbage_for(topic);
}
}
@@ -192,10 +189,7 @@ impl Network<Block> for MessageRouting {
fn messages_for(&self, round: u64, set_id: u64) -> Self::In {
let inner = self.inner.lock();
let peer = inner.peer(self.peer_id);
let mut gossip = peer.consensus_gossip().write();
let messages = peer.with_spec(move |_, _| {
gossip.messages_for(make_topic(round, set_id))
});
let messages = peer.consensus_gossip_messages_for(make_topic(round, set_id));
let messages = messages.map_err(
move |_| panic!("Messages for round {} dropped too early", round)
@@ -205,9 +199,8 @@ impl Network<Block> for MessageRouting {
}
fn send_message(&self, round: u64, set_id: u64, message: Vec<u8>) {
let mut inner = self.inner.lock();
let inner = self.inner.lock();
inner.peer(self.peer_id).gossip_message(make_topic(round, set_id), message, false);
inner.route_until_complete();
}
fn drop_round_messages(&self, round: u64, set_id: u64) {
@@ -223,10 +216,7 @@ impl Network<Block> for MessageRouting {
fn commit_messages(&self, set_id: u64) -> Self::In {
let inner = self.inner.lock();
let peer = inner.peer(self.peer_id);
let mut gossip = peer.consensus_gossip().write();
let messages = peer.with_spec(move |_, _| {
gossip.messages_for(make_commit_topic(set_id))
});
let messages = peer.consensus_gossip_messages_for(make_commit_topic(set_id));
let messages = messages.map_err(
move |_| panic!("Commit messages for set {} dropped too early", set_id)
@@ -236,9 +226,8 @@ impl Network<Block> for MessageRouting {
}
fn send_commit(&self, _round: u64, set_id: u64, message: Vec<u8>) {
let mut inner = self.inner.lock();
let inner = self.inner.lock();
inner.peer(self.peer_id).gossip_message(make_commit_topic(set_id), message, false);
inner.route_until_complete();
}
fn announce(&self, _round: u64, _set_id: u64, _block: H256) {
@@ -420,7 +409,7 @@ fn run_to_completion(blocks: u64, net: Arc<Mutex<GrandpaTestNet>>, peers: &[Keyr
.map_err(|_| ());
let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL)
.for_each(move |_| { net.lock().route_until_complete(); Ok(()) })
.for_each(move |_| { net.lock().route_fast(); Ok(()) })
.map(|_| ())
.map_err(|_| ());
@@ -506,7 +495,7 @@ fn finalize_3_voters_1_observer() {
.map_err(|_| ());
let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL)
.for_each(move |_| { net.lock().route_until_complete(); Ok(()) })
.for_each(move |_| { net.lock().route_fast(); Ok(()) })
.map(|_| ())
.map_err(|_| ());
@@ -667,6 +656,7 @@ fn transition_3_voters_twice_1_observer() {
.for_each(move |_| {
net.lock().send_import_notifications();
net.lock().send_finality_notifications();
net.lock().route_fast();
Ok(())
})
.map(|_| ())
@@ -776,7 +766,7 @@ fn sync_justifications_on_change_blocks() {
// the last peer should get the justification by syncing from other peers
assert!(net.lock().peer(3).client().justification(&BlockId::Number(21)).unwrap().is_none());
while net.lock().peer(3).client().justification(&BlockId::Number(21)).unwrap().is_none() {
net.lock().sync_steps(100);
net.lock().route_fast();
}
}
@@ -154,6 +154,8 @@ pub enum ServiceEvent {
protocol: ProtocolId,
/// Version of the protocol that was opened.
version: u8,
/// Node debug info
debug_info: String,
},
/// A custom protocol substream has been closed.
@@ -162,6 +164,8 @@ pub enum ServiceEvent {
node_index: NodeIndex,
/// Protocol that has been closed.
protocol: ProtocolId,
/// Node debug info
debug_info: String,
},
/// Sustom protocol substreams has been closed.
@@ -172,6 +176,8 @@ pub enum ServiceEvent {
node_index: NodeIndex,
/// Protocols that have been closed.
protocols: Vec<ProtocolId>,
/// Node debug info
debug_info: String,
},
/// Receives a message on a custom protocol stream.
@@ -348,6 +354,15 @@ impl Service {
}
}
/// Get debug info for a given peer.
pub fn peer_debug_info(&self, who: NodeIndex) -> String {
if let (Some(peer_id), Some(addr)) = (self.peer_id_of_node(who), self.node_endpoint(who)) {
format!("{:?} through {:?}", peer_id, addr)
} else {
"unknown".to_string()
}
}
/// Returns the `NodeIndex` of a peer, or assigns one if none exists.
fn index_of_peer_or_assign(&mut self, peer: PeerId, endpoint: ConnectedPoint) -> NodeIndex {
match self.index_by_id.entry(peer) {
@@ -385,6 +400,7 @@ impl Service {
node_index,
protocol: protocol_id,
version,
debug_info: self.peer_debug_info(node_index),
})))
}
Ok(Async::Ready(Some(BehaviourOut::CustomProtocolClosed { protocol_id, peer_id, result }))) => {
@@ -393,6 +409,7 @@ impl Service {
break Ok(Async::Ready(Some(ServiceEvent::ClosedCustomProtocol {
node_index,
protocol: protocol_id,
debug_info: self.peer_debug_info(node_index),
})))
}
Ok(Async::Ready(Some(BehaviourOut::CustomMessage { protocol_id, peer_id, data }))) => {
+6 -6
View File
@@ -96,21 +96,21 @@ impl NetworkConfiguration {
}
/// The severity of misbehaviour of a peer that is reported.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Severity<'a> {
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Severity {
/// Peer is timing out. Could be bad connectivity of overload of work on either of our sides.
Timeout,
/// Peer has been notably useless. E.g. unable to answer a request that we might reasonably consider
/// it could answer.
Useless(&'a str),
Useless(String),
/// Peer has behaved in an invalid manner. This doesn't necessarily need to be Byzantine, but peer
/// must have taken concrete action in order to behave in such a way which is wantanly invalid.
Bad(&'a str),
Bad(String),
}
impl<'a> fmt::Display for Severity<'a> {
impl fmt::Display for Severity {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
match self {
Severity::Timeout => write!(fmt, "Timeout"),
Severity::Useless(r) => write!(fmt, "Useless ({})", r),
Severity::Bad(r) => write!(fmt, "Bad ({})", r),
+1
View File
@@ -8,6 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[lib]
[dependencies]
crossbeam-channel = "0.3.6"
log = "0.4"
parking_lot = "0.7.1"
error-chain = "0.12"
-79
View File
@@ -1,79 +0,0 @@
// Copyright 2017-2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
use parking_lot::Mutex;
use network_libp2p::{Service, Severity, NodeIndex, PeerId, ProtocolId};
use std::sync::Arc;
/// IO interface for the syncing handler.
/// Provides peer connection management and an interface to the blockchain client.
pub trait SyncIo {
/// Report a peer for misbehaviour.
fn report_peer(&mut self, who: NodeIndex, reason: Severity);
/// Send a packet to a peer.
fn send(&mut self, who: NodeIndex, data: Vec<u8>);
/// Returns peer identifier string
fn peer_debug_info(&self, who: NodeIndex) -> String {
who.to_string()
}
/// Returns information on p2p session
fn peer_id(&self, who: NodeIndex) -> Option<PeerId>;
}
/// Wraps the network service.
pub struct NetSyncIo<'s> {
network: &'s Arc<Mutex<Service>>,
protocol: ProtocolId,
}
impl<'s> NetSyncIo<'s> {
/// Creates a new instance.
pub fn new(network: &'s Arc<Mutex<Service>>, protocol: ProtocolId) -> NetSyncIo<'s> {
NetSyncIo {
network,
protocol,
}
}
}
impl<'s> SyncIo for NetSyncIo<'s> {
fn report_peer(&mut self, who: NodeIndex, reason: Severity) {
info!("Purposefully dropping {} ; reason: {:?}", who, reason);
match reason {
Severity::Bad(_) => self.network.lock().ban_node(who),
Severity::Useless(_) => self.network.lock().drop_node(who),
Severity::Timeout => self.network.lock().drop_node(who),
}
}
fn send(&mut self, who: NodeIndex, data: Vec<u8>) {
self.network.lock().send_custom_message(who, self.protocol, data)
}
fn peer_id(&self, who: NodeIndex) -> Option<PeerId> {
let net = self.network.lock();
net.peer_id_of_node(who).cloned()
}
fn peer_debug_info(&self, who: NodeIndex) -> String {
let net = self.network.lock();
if let (Some(peer_id), Some(addr)) = (net.peer_id_of_node(who), net.node_endpoint(who)) {
format!("{:?} through {:?}", peer_id, addr)
} else {
"unknown".to_string()
}
}
}
+3 -2
View File
@@ -20,6 +20,8 @@
//! Substrate-specific P2P networking: synchronizing blocks, propagating BFT messages.
//! Allows attachment of an optional subprotocol for chain-specific requests.
#[macro_use]
extern crate crossbeam_channel;
extern crate linked_hash_map;
extern crate lru_cache;
extern crate parking_lot;
@@ -51,7 +53,6 @@ mod service;
mod sync;
#[macro_use]
mod protocol;
mod io;
mod chain;
mod blocks;
mod on_demand;
@@ -65,7 +66,7 @@ pub mod specialization;
pub mod test;
pub use chain::Client as ClientHandle;
pub use service::{Service, FetchFuture, TransactionPool, ManageNetwork, SyncProvider, ExHashT};
pub use service::{Service, FetchFuture, TransactionPool, ManageNetwork, NetworkMsg, SyncProvider, ExHashT};
pub use protocol::{ProtocolStatus, PeerInfo, Context};
pub use sync::{Status as SyncStatus, SyncState};
pub use network_libp2p::{
+118 -112
View File
@@ -16,22 +16,22 @@
//! On-demand requests service.
use codec::Encode;
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Weak};
use std::sync::Arc;
use std::time::{Instant, Duration};
use futures::{Async, Future, Poll};
use futures::sync::oneshot::{channel, Receiver, Sender};
use futures::sync::oneshot::{channel, Receiver, Sender as OneShotSender};
use linked_hash_map::LinkedHashMap;
use linked_hash_map::Entry;
use parking_lot::Mutex;
use client::{error::{Error as ClientError, ErrorKind as ClientErrorKind}};
use client::light::fetcher::{Fetcher, FetchChecker, RemoteHeaderRequest,
RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof};
use io::SyncIo;
use message;
use network_libp2p::{Severity, NodeIndex};
use config::Roles;
use service;
use service::{NetworkChan, NetworkMsg};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor};
/// Remote request timeout.
@@ -51,35 +51,34 @@ pub trait OnDemandService<Block: BlockT>: Send + Sync {
fn on_disconnect(&self, peer: NodeIndex);
/// Maintain peers requests.
fn maintain_peers(&self, io: &mut SyncIo);
fn maintain_peers(&self);
/// When header response is received from remote node.
fn on_remote_header_response(
&self,
io: &mut SyncIo,
peer: NodeIndex,
response: message::RemoteHeaderResponse<Block::Header>
);
/// When read response is received from remote node.
fn on_remote_read_response(&self, io: &mut SyncIo, peer: NodeIndex, response: message::RemoteReadResponse);
fn on_remote_read_response(&self, peer: NodeIndex, response: message::RemoteReadResponse);
/// When call response is received from remote node.
fn on_remote_call_response(&self, io: &mut SyncIo, peer: NodeIndex, response: message::RemoteCallResponse);
fn on_remote_call_response(&self, peer: NodeIndex, response: message::RemoteCallResponse);
/// When changes response is received from remote node.
fn on_remote_changes_response(
&self,
io: &mut SyncIo,
peer: NodeIndex,
response: message::RemoteChangesResponse<NumberFor<Block>, Block::Hash>
);
}
/// On-demand requests service. Dispatches requests to appropriate peers.
pub struct OnDemand<B: BlockT, E: service::ExecuteInContext<B>> {
core: Mutex<OnDemandCore<B, E>>,
pub struct OnDemand<B: BlockT> {
core: Mutex<OnDemandCore<B>>,
checker: Arc<FetchChecker<B>>,
network_sender: Mutex<Option<NetworkChan>>,
}
/// On-demand remote call response.
@@ -88,8 +87,7 @@ pub struct RemoteResponse<T> {
}
#[derive(Default)]
struct OnDemandCore<B: BlockT, E: service::ExecuteInContext<B>> {
service: Weak<E>,
struct OnDemandCore<B: BlockT> {
next_request_id: u64,
pending_requests: VecDeque<Request<B>>,
active_peers: LinkedHashMap<NodeIndex, Request<B>>,
@@ -105,10 +103,10 @@ struct Request<Block: BlockT> {
}
enum RequestData<Block: BlockT> {
RemoteHeader(RemoteHeaderRequest<Block::Header>, Sender<Result<Block::Header, ClientError>>),
RemoteRead(RemoteReadRequest<Block::Header>, Sender<Result<Option<Vec<u8>>, ClientError>>),
RemoteCall(RemoteCallRequest<Block::Header>, Sender<Result<Vec<u8>, ClientError>>),
RemoteChanges(RemoteChangesRequest<Block::Header>, Sender<Result<Vec<(NumberFor<Block>, u32)>, ClientError>>),
RemoteHeader(RemoteHeaderRequest<Block::Header>, OneShotSender<Result<Block::Header, ClientError>>),
RemoteRead(RemoteReadRequest<Block::Header>, OneShotSender<Result<Option<Vec<u8>>, ClientError>>),
RemoteCall(RemoteCallRequest<Block::Header>, OneShotSender<Result<Vec<u8>, ClientError>>),
RemoteChanges(RemoteChangesRequest<Block::Header>, OneShotSender<Result<Vec<(NumberFor<Block>, u32)>, ClientError>>),
}
enum Accept<Block: BlockT> {
@@ -132,16 +130,15 @@ impl<T> Future for RemoteResponse<T> {
}
}
impl<B: BlockT, E> OnDemand<B, E> where
E: service::ExecuteInContext<B>,
impl<B: BlockT> OnDemand<B> where
B::Header: HeaderT,
{
/// Creates new on-demand service.
pub fn new(checker: Arc<FetchChecker<B>>) -> Self {
OnDemand {
checker,
network_sender: Mutex::new(None),
core: Mutex::new(OnDemandCore {
service: Weak::new(),
next_request_id: 0,
pending_requests: VecDeque::new(),
active_peers: LinkedHashMap::new(),
@@ -152,25 +149,34 @@ impl<B: BlockT, E> OnDemand<B, E> where
}
/// Sets weak reference to network service.
pub fn set_service_link(&self, service: Weak<E>) {
self.core.lock().service = service;
pub fn set_network_sender(&self, network_sender: NetworkChan) {
self.network_sender.lock().replace(network_sender);
}
fn send(&self, msg: NetworkMsg) {
let _ = self.network_sender
.lock()
.as_ref()
.expect("1. OnDemand is passed a network sender upon initialization of the service, 2. it should bet set by now")
.send(msg);
}
/// Schedule && dispatch all scheduled requests.
fn schedule_request<R>(&self, retry_count: Option<usize>, data: RequestData<B>, result: R) -> R {
let mut core = self.core.lock();
core.insert(retry_count.unwrap_or(RETRY_COUNT), data);
core.dispatch();
core.dispatch(self);
result
}
/// Try to accept response from given peer.
fn accept_response<F: FnOnce(Request<B>) -> Accept<B>>(&self, rtype: &str, io: &mut SyncIo, peer: NodeIndex, request_id: u64, try_accept: F) {
fn accept_response<F: FnOnce(Request<B>) -> Accept<B>>(&self, rtype: &str, peer: NodeIndex, request_id: u64, try_accept: F) {
let mut core = self.core.lock();
let request = match core.remove(peer, request_id) {
Some(request) => request,
None => {
io.report_peer(peer, Severity::Bad(&format!("Invalid remote {} response from peer", rtype)));
let reason = format!("Invalid remote {} response from peer", rtype);
self.send(NetworkMsg::ReportPeer(peer, Severity::Bad(reason)));
core.remove_peer(peer);
return;
},
@@ -180,7 +186,8 @@ impl<B: BlockT, E> OnDemand<B, E> where
let (retry_count, retry_request_data) = match try_accept(request) {
Accept::Ok => (retry_count, None),
Accept::CheckFailed(error, retry_request_data) => {
io.report_peer(peer, Severity::Bad(&format!("Failed to check remote {} response from peer: {}", rtype, error)));
let reason = format!("Failed to check remote {} response from peer: {}", rtype, error);
self.send(NetworkMsg::ReportPeer(peer, Severity::Bad(reason)));
core.remove_peer(peer);
if retry_count > 0 {
@@ -192,7 +199,8 @@ impl<B: BlockT, E> OnDemand<B, E> where
}
},
Accept::Unexpected(retry_request_data) => {
io.report_peer(peer, Severity::Bad(&format!("Unexpected response to remote {} from peer", rtype)));
let reason = format!("Unexpected response to remote {} from peer", rtype);
self.send(NetworkMsg::ReportPeer(peer, Severity::Bad(reason)));
core.remove_peer(peer);
(retry_count, Some(retry_request_data))
@@ -203,13 +211,12 @@ impl<B: BlockT, E> OnDemand<B, E> where
core.insert(retry_count, request_data);
}
core.dispatch();
core.dispatch(self);
}
}
impl<B, E> OnDemandService<B> for OnDemand<B, E> where
impl<B> OnDemandService<B> for OnDemand<B> where
B: BlockT,
E: service::ExecuteInContext<B>,
B::Header: HeaderT,
{
fn on_connect(&self, peer: NodeIndex, role: Roles, best_number: NumberFor<B>) {
@@ -219,31 +226,31 @@ impl<B, E> OnDemandService<B> for OnDemand<B, E> where
let mut core = self.core.lock();
core.add_peer(peer, best_number);
core.dispatch();
core.dispatch(self);
}
fn on_block_announce(&self, peer: NodeIndex, best_number: NumberFor<B>) {
let mut core = self.core.lock();
core.update_peer(peer, best_number);
core.dispatch();
core.dispatch(self);
}
fn on_disconnect(&self, peer: NodeIndex) {
let mut core = self.core.lock();
core.remove_peer(peer);
core.dispatch();
core.dispatch(self);
}
fn maintain_peers(&self, io: &mut SyncIo) {
fn maintain_peers(&self) {
let mut core = self.core.lock();
for bad_peer in core.maintain_peers() {
io.report_peer(bad_peer, Severity::Timeout);
self.send(NetworkMsg::ReportPeer(bad_peer, Severity::Timeout));
}
core.dispatch();
core.dispatch(self);
}
fn on_remote_header_response(&self, io: &mut SyncIo, peer: NodeIndex, response: message::RemoteHeaderResponse<B::Header>) {
self.accept_response("header", io, peer, response.id, |request| match request.data {
fn on_remote_header_response(&self, peer: NodeIndex, response: message::RemoteHeaderResponse<B::Header>) {
self.accept_response("header", peer, response.id, |request| match request.data {
RequestData::RemoteHeader(request, sender) => match self.checker.check_header_proof(&request, response.header, response.proof) {
Ok(response) => {
// we do not bother if receiver has been dropped already
@@ -256,8 +263,8 @@ impl<B, E> OnDemandService<B> for OnDemand<B, E> where
})
}
fn on_remote_read_response(&self, io: &mut SyncIo, peer: NodeIndex, response: message::RemoteReadResponse) {
self.accept_response("read", io, peer, response.id, |request| match request.data {
fn on_remote_read_response(&self, peer: NodeIndex, response: message::RemoteReadResponse) {
self.accept_response("read", peer, response.id, |request| match request.data {
RequestData::RemoteRead(request, sender) => match self.checker.check_read_proof(&request, response.proof) {
Ok(response) => {
// we do not bother if receiver has been dropped already
@@ -270,8 +277,8 @@ impl<B, E> OnDemandService<B> for OnDemand<B, E> where
})
}
fn on_remote_call_response(&self, io: &mut SyncIo, peer: NodeIndex, response: message::RemoteCallResponse) {
self.accept_response("call", io, peer, response.id, |request| match request.data {
fn on_remote_call_response(&self, peer: NodeIndex, response: message::RemoteCallResponse) {
self.accept_response("call", peer, response.id, |request| match request.data {
RequestData::RemoteCall(request, sender) => match self.checker.check_execution_proof(&request, response.proof) {
Ok(response) => {
// we do not bother if receiver has been dropped already
@@ -284,8 +291,8 @@ impl<B, E> OnDemandService<B> for OnDemand<B, E> where
})
}
fn on_remote_changes_response(&self, io: &mut SyncIo, peer: NodeIndex, response: message::RemoteChangesResponse<NumberFor<B>, B::Hash>) {
self.accept_response("changes", io, peer, response.id, |request| match request.data {
fn on_remote_changes_response(&self, peer: NodeIndex, response: message::RemoteChangesResponse<NumberFor<B>, B::Hash>) {
self.accept_response("changes", peer, response.id, |request| match request.data {
RequestData::RemoteChanges(request, sender) => match self.checker.check_changes_proof(
&request, ChangesProof {
max_block: response.max,
@@ -305,9 +312,8 @@ impl<B, E> OnDemandService<B> for OnDemand<B, E> where
}
}
impl<B, E> Fetcher<B> for OnDemand<B, E> where
impl<B> Fetcher<B> for OnDemand<B> where
B: BlockT,
E: service::ExecuteInContext<B>,
B::Header: HeaderT,
{
type RemoteHeaderResult = RemoteResponse<B::Header>;
@@ -340,9 +346,8 @@ impl<B, E> Fetcher<B> for OnDemand<B, E> where
}
}
impl<B, E> OnDemandCore<B, E> where
impl<B> OnDemandCore<B> where
B: BlockT,
E: service::ExecuteInContext<B>,
B::Header: HeaderT,
{
pub fn add_peer(&mut self, peer: NodeIndex, best_number: NumberFor<B>) {
@@ -407,11 +412,7 @@ impl<B, E> OnDemandCore<B, E> where
}
}
pub fn dispatch(&mut self) {
let service = match self.service.upgrade() {
Some(service) => service,
None => return,
};
pub fn dispatch(&mut self, on_demand: &OnDemand<B>) {
let mut last_peer = self.idle_peers.back().cloned();
let mut unhandled_requests = VecDeque::new();
@@ -457,8 +458,7 @@ impl<B, E> OnDemandCore<B, E> where
let mut request = self.pending_requests.pop_front().expect("checked in loop condition; qed");
request.timestamp = Instant::now();
trace!(target: "sync", "Dispatching remote request {} to peer {}", request.id, peer);
service.execute_in_context(|ctx| ctx.send_message(peer, request.message()));
on_demand.send(NetworkMsg::Outgoing(peer, request.message().encode()));
self.active_peers.insert(peer, request);
}
@@ -523,30 +523,23 @@ impl<Block: BlockT> RequestData<Block> {
#[cfg(test)]
pub mod tests {
use std::collections::VecDeque;
use std::sync::Arc;
use std::time::Instant;
use futures::Future;
use parking_lot::RwLock;
use runtime_primitives::traits::NumberFor;
use client::{error::{ErrorKind as ClientErrorKind, Result as ClientResult}};
use client::light::fetcher::{Fetcher, FetchChecker, RemoteHeaderRequest,
RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof};
use config::Roles;
use message;
use network_libp2p::NodeIndex;
use service::ExecuteInContext;
use test::TestIo;
use network_libp2p::{NodeIndex, ProtocolId, Severity};
use service::{network_channel, NetworkPort, NetworkMsg};
use super::{REQUEST_TIMEOUT, OnDemand, OnDemandService};
use test_client::runtime::{changes_trie_config, Block, Header};
pub struct DummyExecutor;
struct DummyFetchChecker { ok: bool }
impl ExecuteInContext<Block> for DummyExecutor {
fn execute_in_context<F: Fn(&mut ::protocol::Context<Block>)>(&self, _closure: F) {}
}
impl FetchChecker<Block> for DummyFetchChecker {
fn check_header_proof(
&self,
@@ -582,20 +575,19 @@ pub mod tests {
}
}
fn dummy(ok: bool) -> (Arc<DummyExecutor>, Arc<OnDemand<Block, DummyExecutor>>) {
fn dummy(ok: bool) -> (Arc<DummyExecutor>, Arc<OnDemand<Block>>) {
let executor = Arc::new(DummyExecutor);
let service = Arc::new(OnDemand::new(Arc::new(DummyFetchChecker { ok })));
service.set_service_link(Arc::downgrade(&executor));
(executor, service)
}
fn total_peers(on_demand: &OnDemand<Block, DummyExecutor>) -> usize {
fn total_peers(on_demand: &OnDemand<Block>) -> usize {
let core = on_demand.core.lock();
core.idle_peers.len() + core.active_peers.len()
}
fn receive_call_response(on_demand: &OnDemand<Block, DummyExecutor>, network: &mut TestIo, peer: NodeIndex, id: message::RequestId) {
on_demand.on_remote_call_response(network, peer, message::RemoteCallResponse {
fn receive_call_response(on_demand: &OnDemand<Block>, peer: NodeIndex, id: message::RequestId) {
on_demand.on_remote_call_response(peer, message::RemoteCallResponse {
id: id,
proof: vec![vec![2]],
});
@@ -611,6 +603,21 @@ pub mod tests {
}
}
fn assert_disconnected_peer(network_port: NetworkPort, expected_severity: Severity) {
let mut disconnect_count = 0;
while let Ok(msg) = network_port.receiver().try_recv() {
match msg {
NetworkMsg::ReportPeer(_, severity) => {
if severity == expected_severity {
disconnect_count = disconnect_count + 1;
}
},
_ => {},
}
}
assert_eq!(disconnect_count, 1);
}
#[test]
fn knows_about_peers_roles() {
let (_, on_demand) = dummy(true);
@@ -637,9 +644,8 @@ pub mod tests {
#[test]
fn disconnects_from_timeouted_peer() {
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.on_connect(0, Roles::FULL, 1000);
on_demand.on_connect(1, Roles::FULL, 1000);
assert_eq!(vec![0, 1], on_demand.core.lock().idle_peers.iter().cloned().collect::<Vec<_>>());
@@ -656,17 +662,17 @@ pub mod tests {
assert_eq!(vec![0], on_demand.core.lock().active_peers.keys().cloned().collect::<Vec<_>>());
on_demand.core.lock().active_peers[&0].timestamp = Instant::now() - REQUEST_TIMEOUT - REQUEST_TIMEOUT;
on_demand.maintain_peers(&mut network);
on_demand.maintain_peers();
assert!(on_demand.core.lock().idle_peers.is_empty());
assert_eq!(vec![1], on_demand.core.lock().active_peers.keys().cloned().collect::<Vec<_>>());
assert!(network.to_disconnect.contains(&0));
assert_disconnected_peer(network_port, Severity::Timeout);
}
#[test]
fn disconnects_from_peer_on_response_with_wrong_id() {
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.on_connect(0, Roles::FULL, 1000);
on_demand.remote_call(RemoteCallRequest {
@@ -676,16 +682,16 @@ pub mod tests {
call_data: vec![],
retry_count: None,
});
receive_call_response(&*on_demand, &mut network, 0, 1);
assert!(network.to_disconnect.contains(&0));
receive_call_response(&*on_demand, 0, 1);
assert_disconnected_peer(network_port, Severity::Bad("Invalid remote call response from peer".to_string()));
assert_eq!(on_demand.core.lock().pending_requests.len(), 1);
}
#[test]
fn disconnects_from_peer_on_incorrect_response() {
let (_x, on_demand) = dummy(false);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.remote_call(RemoteCallRequest {
block: Default::default(),
header: dummy_header(),
@@ -695,27 +701,27 @@ pub mod tests {
});
on_demand.on_connect(0, Roles::FULL, 1000);
receive_call_response(&*on_demand, &mut network, 0, 0);
assert!(network.to_disconnect.contains(&0));
receive_call_response(&*on_demand, 0, 0);
assert_disconnected_peer(network_port, Severity::Bad("Failed to check remote call response from peer: Backend error: Test error".to_string()));
assert_eq!(on_demand.core.lock().pending_requests.len(), 1);
}
#[test]
fn disconnects_from_peer_on_unexpected_response() {
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.on_connect(0, Roles::FULL, 1000);
receive_call_response(&*on_demand, &mut network, 0, 0);
assert!(network.to_disconnect.contains(&0));
receive_call_response(&*on_demand, 0, 0);
assert_disconnected_peer(network_port, Severity::Bad("Invalid remote call response from peer".to_string()));
}
#[test]
fn disconnects_from_peer_on_wrong_response_type() {
let (_x, on_demand) = dummy(false);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.on_connect(0, Roles::FULL, 1000);
on_demand.remote_call(RemoteCallRequest {
@@ -726,11 +732,11 @@ pub mod tests {
retry_count: Some(1),
});
on_demand.on_remote_read_response(&mut network, 0, message::RemoteReadResponse {
on_demand.on_remote_read_response(0, message::RemoteReadResponse {
id: 0,
proof: vec![vec![2]],
});
assert!(network.to_disconnect.contains(&0));
assert_disconnected_peer(network_port, Severity::Bad("Unexpected response to remote read from peer".to_string()));
assert_eq!(on_demand.core.lock().pending_requests.len(), 1);
}
@@ -740,8 +746,8 @@ pub mod tests {
let retry_count = 2;
let (_x, on_demand) = dummy(false);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, _network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
for i in 0..retry_count+1 {
on_demand.on_connect(i, Roles::FULL, 1000);
}
@@ -767,7 +773,7 @@ pub mod tests {
for i in 0..retry_count+1 {
let mut current = current.lock();
*current = *current + 1;
receive_call_response(&*on_demand, &mut network, i, i as u64);
receive_call_response(&*on_demand, i, i as u64);
}
let mut finished_at = finished_at.lock();
@@ -780,8 +786,8 @@ pub mod tests {
#[test]
fn receives_remote_call_response() {
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, _network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.on_connect(0, Roles::FULL, 1000);
let response = on_demand.remote_call(RemoteCallRequest {
@@ -796,15 +802,15 @@ pub mod tests {
assert_eq!(result, vec![42]);
});
receive_call_response(&*on_demand, &mut network, 0, 0);
receive_call_response(&*on_demand, 0, 0);
thread.join().unwrap();
}
#[test]
fn receives_remote_read_response() {
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, _network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.on_connect(0, Roles::FULL, 1000);
let response = on_demand.remote_read(RemoteReadRequest {
@@ -818,7 +824,7 @@ pub mod tests {
assert_eq!(result, Some(vec![42]));
});
on_demand.on_remote_read_response(&mut network, 0, message::RemoteReadResponse {
on_demand.on_remote_read_response(0, message::RemoteReadResponse {
id: 0,
proof: vec![vec![2]],
});
@@ -828,8 +834,8 @@ pub mod tests {
#[test]
fn receives_remote_header_response() {
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, _network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.on_connect(0, Roles::FULL, 1000);
let response = on_demand.remote_header(RemoteHeaderRequest {
@@ -846,7 +852,7 @@ pub mod tests {
);
});
on_demand.on_remote_header_response(&mut network, 0, message::RemoteHeaderResponse {
on_demand.on_remote_header_response(0, message::RemoteHeaderResponse {
id: 0,
header: Some(Header {
parent_hash: Default::default(),
@@ -863,8 +869,8 @@ pub mod tests {
#[test]
fn receives_remote_changes_response() {
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, _network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.on_connect(0, Roles::FULL, 1000);
let response = on_demand.remote_changes(RemoteChangesRequest {
@@ -881,7 +887,7 @@ pub mod tests {
assert_eq!(result, vec![(100, 2)]);
});
on_demand.on_remote_changes_response(&mut network, 0, message::RemoteChangesResponse {
on_demand.on_remote_changes_response(0, message::RemoteChangesResponse {
id: 0,
max: 1000,
proof: vec![vec![2]],
@@ -894,8 +900,8 @@ pub mod tests {
#[test]
fn does_not_sends_request_to_peer_who_has_no_required_block() {
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let mut network = TestIo::new(&queue, None);
let (network_sender, _network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.on_connect(1, Roles::FULL, 100);
@@ -930,7 +936,7 @@ pub mod tests {
assert!(!on_demand.core.lock().idle_peers.iter().any(|_| true));
assert_eq!(on_demand.core.lock().pending_requests.len(), 1);
on_demand.on_remote_header_response(&mut network, 1, message::RemoteHeaderResponse {
on_demand.on_remote_header_response(1, message::RemoteHeaderResponse {
id: 0,
header: Some(dummy_header()),
proof: vec![],
@@ -946,8 +952,8 @@ pub mod tests {
// loop forever after dispatching a request to the last peer, since the
// last peer was not updated
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let _network = TestIo::new(&queue, None);
let (network_sender, _network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.remote_header(RemoteHeaderRequest {
cht_root: Default::default(),
@@ -971,8 +977,8 @@ pub mod tests {
#[test]
fn tries_to_send_all_pending_requests() {
let (_x, on_demand) = dummy(true);
let queue = RwLock::new(VecDeque::new());
let _network = TestIo::new(&queue, None);
let (network_sender, _network_port) = network_channel(ProtocolId::default());
on_demand.set_network_sender(network_sender.clone());
on_demand.remote_header(RemoteHeaderRequest {
cht_root: Default::default(),
File diff suppressed because it is too large Load Diff
+290 -159
View File
@@ -17,30 +17,27 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::{io, thread};
use std::time::Duration;
use futures::{self, Future, Stream, stream, sync::oneshot};
use parking_lot::{Mutex, RwLock};
use futures::{Async, Future, Stream, stream, sync::oneshot};
use parking_lot::Mutex;
use network_libp2p::{ProtocolId, PeerId, NetworkConfiguration, NodeIndex, ErrorKind, Severity};
use network_libp2p::{start_service, Service as NetworkService, ServiceEvent as NetworkServiceEvent};
use network_libp2p::{RegisteredProtocol, parse_str_addr, Protocol as Libp2pProtocol};
use io::NetSyncIo;
use network_libp2p::{start_service, parse_str_addr, Service as NetworkService, ServiceEvent as NetworkServiceEvent};
use network_libp2p::{Protocol as Libp2pProtocol, RegisteredProtocol};
use consensus::import_queue::{ImportQueue, Link};
use consensus_gossip::ConsensusGossip;
use protocol::{self, Protocol, ProtocolContext, Context, ProtocolStatus, PeerInfo};
use protocol::{self, Context, Protocol, ProtocolMsg, ProtocolStatus, PeerInfo};
use codec::Decode;
use config::Params;
use crossbeam_channel::{self as channel, Receiver, Sender, TryRecvError};
use error::Error;
use specialization::NetworkSpecialization;
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use sync::ChainSync;
use std::sync::Weak;
use tokio::{runtime::Runtime, timer::Interval};
use specialization::NetworkSpecialization;
use tokio::prelude::task::AtomicTask;
use tokio::runtime::Runtime;
/// Type that represents fetch completion future.
pub type FetchFuture = oneshot::Receiver<Vec<u8>>;
const TICK_TIMEOUT: Duration = Duration::from_millis(1000);
const PROPAGATE_TIMEOUT: Duration = Duration::from_millis(5000);
/// Sync status
pub trait SyncProvider<B: BlockT>: Send + Sync {
/// Get sync status
@@ -50,8 +47,14 @@ pub trait SyncProvider<B: BlockT>: Send + Sync {
}
/// Minimum Requirements for a Hash within Networking
pub trait ExHashT: ::std::hash::Hash + Eq + ::std::fmt::Debug + Clone + Send + Sync + 'static {}
impl<T> ExHashT for T where T: ::std::hash::Hash + Eq + ::std::fmt::Debug + Clone + Send + Sync + 'static {}
pub trait ExHashT:
::std::hash::Hash + Eq + ::std::fmt::Debug + Clone + Send + Sync + 'static
{
}
impl<T> ExHashT for T where
T: ::std::hash::Hash + Eq + ::std::fmt::Debug + Clone + Send + Sync + 'static
{
}
/// Transaction pool interface
pub trait TransactionPool<H: ExHashT, B: BlockT>: Send + Sync {
@@ -63,114 +66,98 @@ pub trait TransactionPool<H: ExHashT, B: BlockT>: Send + Sync {
fn on_broadcasted(&self, propagations: HashMap<H, Vec<String>>);
}
/// Service able to execute closure in the network context.
pub trait ExecuteInContext<B: BlockT>: Send + Sync {
/// Execute closure in network context.
fn execute_in_context<F: Fn(&mut Context<B>)>(&self, closure: F);
}
/// A link implementation that connects to the network.
pub struct NetworkLink<B: BlockT, E: ExecuteInContext<B>> {
/// The chain-sync handle
pub(crate) sync: Weak<RwLock<ChainSync<B>>>,
/// Network context.
pub(crate) context: Weak<E>,
pub struct NetworkLink<B: BlockT, S: NetworkSpecialization<B>> {
/// The protocol sender
pub(crate) protocol_sender: Sender<ProtocolMsg<B, S>>,
/// The network sender
pub(crate) network_sender: NetworkChan,
}
impl<B: BlockT, E: ExecuteInContext<B>> NetworkLink<B, E> {
/// Execute closure with locked ChainSync.
fn with_sync<F: Fn(&mut ChainSync<B>, &mut Context<B>)>(&self, closure: F) {
if let (Some(sync), Some(service)) = (self.sync.upgrade(), self.context.upgrade()) {
service.execute_in_context(move |protocol| {
let mut sync = sync.write();
closure(&mut *sync, protocol)
});
}
}
}
impl<B: BlockT, E: ExecuteInContext<B>> Link<B> for NetworkLink<B, E> {
impl<B: BlockT, S: NetworkSpecialization<B>> Link<B> for NetworkLink<B, S> {
fn block_imported(&self, hash: &B::Hash, number: NumberFor<B>) {
self.with_sync(|sync, _| sync.block_imported(&hash, number))
let _ = self.protocol_sender.send(ProtocolMsg::BlockImportedSync(hash.clone(), number));
}
fn request_justification(&self, hash: &B::Hash, number: NumberFor<B>) {
self.with_sync(|sync, protocol| sync.request_justification(hash, number, protocol))
let _ = self.protocol_sender.send(ProtocolMsg::RequestJustification(hash.clone(), number));
}
fn maintain_sync(&self) {
self.with_sync(|sync, protocol| sync.maintain_sync(protocol))
let _ = self.protocol_sender.send(ProtocolMsg::MaintainSync);
}
fn useless_peer(&self, who: NodeIndex, reason: &str) {
trace!(target:"sync", "Useless peer {}, {}", who, reason);
self.with_sync(|_, protocol| protocol.report_peer(who, Severity::Useless(reason)))
self.network_sender.send(NetworkMsg::ReportPeer(who, Severity::Useless(reason.to_string())));
}
fn note_useless_and_restart_sync(&self, who: NodeIndex, reason: &str) {
trace!(target:"sync", "Bad peer {}, {}", who, reason);
self.with_sync(|sync, protocol| {
protocol.report_peer(who, Severity::Useless(reason)); // is this actually malign or just useless?
sync.restart(protocol);
})
// is this actually malign or just useless?
self.network_sender.send(NetworkMsg::ReportPeer(who, Severity::Useless(reason.to_string())));
let _ = self.protocol_sender.send(ProtocolMsg::RestartSync);
}
fn restart(&self) {
self.with_sync(|sync, protocol| sync.restart(protocol))
let _ = self.protocol_sender.send(ProtocolMsg::RestartSync);
}
}
/// Substrate network service. Handles network IO and manages connectivity.
pub struct Service<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> {
pub struct Service<B: BlockT + 'static, S: NetworkSpecialization<B>> {
/// Network service
network: Arc<Mutex<NetworkService>>,
/// Protocol handler
handler: Arc<Protocol<B, S, H>>,
/// Protocol ID.
protocol_id: ProtocolId,
/// Protocol sender
protocol_sender: Sender<ProtocolMsg<B, S>>,
/// Sender for messages to the background service task, and handle for the background thread.
/// Dropping the sender should close the task and the thread.
/// This is an `Option` because we need to extract it in the destructor.
bg_thread: Option<(oneshot::Sender<()>, thread::JoinHandle<()>)>,
}
impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> Service<B, S, H> {
impl<B: BlockT + 'static, S: NetworkSpecialization<B>> Service<B, S> {
/// Creates and register protocol with the network service
pub fn new<I: 'static + ImportQueue<B>>(
pub fn new<I: 'static + ImportQueue<B>, H: ExHashT>(
params: Params<B, S, H>,
protocol_id: ProtocolId,
import_queue: Arc<I>,
) -> Result<Arc<Service<B, S, H>>, Error>
where I: ImportQueue<B>
{
let handler = Arc::new(Protocol::new(
) -> Result<(Arc<Service<B, S>>, NetworkChan), Error> {
let (network_chan, network_port) = network_channel(protocol_id);
let protocol_sender = Protocol::new(
network_chan.clone(),
params.config,
params.chain,
import_queue.clone(),
params.on_demand,
params.transaction_pool,
params.specialization,
)?);
)?;
let versions = [(protocol::CURRENT_VERSION as u8)];
let registered = RegisteredProtocol::new(protocol_id, &versions[..]);
let (thread, network) = start_thread(params.network_config, handler.clone(), registered)?;
let (thread, network) = start_thread(
protocol_sender.clone(),
network_port,
network_chan.clone(),
params.network_config,
registered,
)?;
let service = Arc::new(Service {
network,
protocol_id,
handler,
bg_thread: Some(thread)
protocol_sender: protocol_sender.clone(),
bg_thread: Some(thread),
});
// connect the import-queue to the network service.
let link = NetworkLink {
sync: Arc::downgrade(service.handler.sync()),
context: Arc::downgrade(&service),
protocol_sender,
network_sender: network_chan.clone(),
};
import_queue.start(link)?;
Ok(service)
Ok((service, network_chan))
}
/// Returns the downloaded bytes per second averaged over the past few seconds.
@@ -186,18 +173,22 @@ impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> Service<B, S,
}
/// Called when a new block is imported by the client.
pub fn on_block_imported(&self, hash: B::Hash, header: &B::Header) {
self.handler.on_block_imported(&mut NetSyncIo::new(&self.network, self.protocol_id), hash, header)
pub fn on_block_imported(&self, hash: B::Hash, header: B::Header) {
let _ = self
.protocol_sender
.send(ProtocolMsg::BlockImported(hash, header));
}
/// Called when a new block is finalized by the client.
pub fn on_block_finalized(&self, hash: B::Hash, header: &B::Header) {
self.handler.on_block_finalized(&mut NetSyncIo::new(&self.network, self.protocol_id), hash, header)
pub fn on_block_finalized(&self, hash: B::Hash, header: B::Header) {
let _ = self
.protocol_sender
.send(ProtocolMsg::BlockFinalized(hash, header));
}
/// Called when new transactons are imported by the client.
pub fn trigger_repropagate(&self) {
self.handler.propagate_extrinsics(&mut NetSyncIo::new(&self.network, self.protocol_id));
let _ = self.protocol_sender.send(ProtocolMsg::PropagateExtrinsics);
}
/// Make sure an important block is propagated to peers.
@@ -205,43 +196,60 @@ impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> Service<B, S,
/// In chain-based consensus, we often need to make sure non-best forks are
/// at least temporarily synced.
pub fn announce_block(&self, hash: B::Hash) {
self.handler.announce_block(&mut NetSyncIo::new(&self.network, self.protocol_id), hash);
let _ = self.protocol_sender.send(ProtocolMsg::AnnounceBlock(hash));
}
/// Send a consensus message through the gossip
pub fn gossip_consensus_message(&self, topic: B::Hash, message: Vec<u8>, broadcast: bool) {
self.handler.gossip_consensus_message(
&mut NetSyncIo::new(&self.network, self.protocol_id),
topic,
message,
broadcast,
)
}
/// Execute a closure with the chain-specific network specialization.
pub fn with_spec<F, U>(&self, f: F) -> U
where F: FnOnce(&mut S, &mut Context<B>) -> U
{
self.handler.with_spec(&mut NetSyncIo::new(&self.network, self.protocol_id), f)
let _ = self
.protocol_sender
.send(ProtocolMsg::GossipConsensusMessage(
topic, message, broadcast,
));
}
/// access the underlying consensus gossip handler
pub fn consensus_gossip<'a>(&'a self) -> &'a RwLock<ConsensusGossip<B>> {
self.handler.consensus_gossip()
/// Execute a closure with the chain-specific network specialization.
pub fn with_spec<F>(&self, f: F)
where F: FnOnce(&mut S, &mut Context<B>) + Send + 'static
{
let _ = self
.protocol_sender
.send(ProtocolMsg::ExecuteWithSpec(Box::new(f)));
}
/// Execute a closure with the consensus gossip.
pub fn with_gossip<F>(&self, f: F)
where F: FnOnce(&mut ConsensusGossip<B>, &mut Context<B>) + Send + 'static
{
let _ = self
.protocol_sender
.send(ProtocolMsg::ExecuteWithGossip(Box::new(f)));
}
}
impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> ::consensus::SyncOracle for Service<B, S, H> {
impl<B: BlockT + 'static, S: NetworkSpecialization<B>> ::consensus::SyncOracle for Service<B, S> {
fn is_major_syncing(&self) -> bool {
self.handler.sync().read().status().is_major_syncing()
let (sender, port) = channel::unbounded();
let _ = self
.protocol_sender
.send(ProtocolMsg::IsMajorSyncing(sender));
port.recv().expect("1. Protocol keeps handling messages until all senders are dropped,
or the ProtocolMsg::Stop message is received,
2 Service keeps a sender to protocol, and the ProtocolMsg::Stop is never sent.")
}
fn is_offline(&self) -> bool {
self.handler.sync().read().status().is_offline()
let (sender, port) = channel::unbounded();
let _ = self
.protocol_sender
.send(ProtocolMsg::IsOffline(sender));
port.recv().expect("1. Protocol keeps handling messages until all senders are dropped,
or the ProtocolMsg::Stop message is received,
2 Service keeps a sender to protocol, and the ProtocolMsg::Stop is never sent.")
}
}
impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H:ExHashT> Drop for Service<B, S, H> {
impl<B: BlockT + 'static, S: NetworkSpecialization<B>> Drop for Service<B, S> {
fn drop(&mut self) {
self.handler.stop();
if let Some((sender, join)) = self.bg_thread.take() {
let _ = sender.send(());
if let Err(e) = join.join() {
@@ -251,20 +259,22 @@ impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H:ExHashT> Drop for Servi
}
}
impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> ExecuteInContext<B> for Service<B, S, H> {
fn execute_in_context<F: Fn(&mut ::protocol::Context<B>)>(&self, closure: F) {
closure(&mut ProtocolContext::new(self.handler.context_data(), &mut NetSyncIo::new(&self.network, self.protocol_id)))
}
}
impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> SyncProvider<B> for Service<B, S, H> {
impl<B: BlockT + 'static, S: NetworkSpecialization<B>> SyncProvider<B> for Service<B, S> {
/// Get sync status
fn status(&self) -> ProtocolStatus<B> {
self.handler.status()
let (sender, port) = channel::unbounded();
let _ = self.protocol_sender.send(ProtocolMsg::Status(sender));
port.recv().expect("1. Protocol keeps handling messages until all senders are dropped,
or the ProtocolMsg::Stop message is received,
2 Service keeps a sender to protocol, and the ProtocolMsg::Stop is never sent.")
}
fn peers(&self) -> Vec<(NodeIndex, Option<PeerId>, PeerInfo<B>)> {
let peers = self.handler.peers();
let (sender, port) = channel::unbounded();
let _ = self.protocol_sender.send(ProtocolMsg::Peers(sender));
let peers = port.recv().expect("1. Protocol keeps handling messages until all senders are dropped,
or the ProtocolMsg::Stop message is received,
2 Service keeps a sender to protocol, and the ProtocolMsg::Stop is never sent.");
let network = self.network.lock();
peers.into_iter().map(|(idx, info)| {
(idx, network.peer_id_of_node(idx).map(|p| p.clone()), info)
@@ -273,7 +283,7 @@ impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> SyncProvider<
}
/// Trait for managing network
pub trait ManageNetwork: Send + Sync {
pub trait ManageNetwork {
/// Set to allow unreserved peers to connect
fn accept_unreserved_peers(&self);
/// Set to deny unreserved peers to connect
@@ -286,7 +296,7 @@ pub trait ManageNetwork: Send + Sync {
fn node_id(&self) -> Option<String>;
}
impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> ManageNetwork for Service<B, S, H> {
impl<B: BlockT + 'static, S: NetworkSpecialization<B>> ManageNetwork for Service<B, S> {
fn accept_unreserved_peers(&self) {
self.network.lock().accept_unreserved_peers();
}
@@ -319,10 +329,102 @@ impl<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT> ManageNetwork
}
}
/// Create a NetworkPort/Chan pair.
pub fn network_channel(protocol_id: ProtocolId) -> (NetworkChan, NetworkPort) {
let (network_sender, network_receiver) = channel::unbounded();
let task_notify = Arc::new(AtomicTask::new());
let network_port = NetworkPort::new(network_receiver, protocol_id, task_notify.clone());
let network_chan = NetworkChan::new(network_sender, task_notify);
(network_chan, network_port)
}
/// A sender of NetworkMsg that notifies a task when a message has been sent.
#[derive(Clone)]
pub struct NetworkChan {
sender: Sender<NetworkMsg>,
task_notify: Arc<AtomicTask>,
}
impl NetworkChan {
/// Create a new network chan.
pub fn new(sender: Sender<NetworkMsg>, task_notify: Arc<AtomicTask>) -> Self {
NetworkChan {
sender,
task_notify,
}
}
/// Send a messaging, to be handled on a stream. Notify the task handling the stream.
pub fn send(&self, msg: NetworkMsg) {
let _ = self.sender.send(msg);
self.task_notify.notify();
}
}
impl Drop for NetworkChan {
/// Notifying the task when a sender is dropped(when all are dropped, the stream is finished).
fn drop(&mut self) {
self.task_notify.notify();
}
}
/// A receiver of NetworkMsg that makes the protocol-id available with each message.
pub struct NetworkPort {
receiver: Receiver<NetworkMsg>,
protocol_id: ProtocolId,
task_notify: Arc<AtomicTask>,
}
impl NetworkPort {
/// Create a new network port for a given protocol-id.
pub fn new(receiver: Receiver<NetworkMsg>, protocol_id: ProtocolId, task_notify: Arc<AtomicTask>) -> Self {
Self {
receiver,
protocol_id,
task_notify,
}
}
/// Receive a message, if any is currently-enqueued.
/// Register the current tokio task for notification when a new message is available.
pub fn take_one_message(&self) -> Result<Option<(ProtocolId, NetworkMsg)>, ()> {
self.task_notify.register();
match self.receiver.try_recv() {
Ok(msg) => Ok(Some((self.protocol_id.clone(), msg))),
Err(TryRecvError::Empty) => Ok(None),
Err(TryRecvError::Disconnected) => Err(()),
}
}
/// Get a reference to the underlying crossbeam receiver.
#[cfg(any(test, feature = "test-helpers"))]
pub fn receiver(&self) -> &Receiver<NetworkMsg> {
&self.receiver
}
}
/// Messages to be handled by NetworkService.
#[derive(Debug)]
pub enum NetworkMsg {
/// Ask network to convert a list of nodes, to a list of peers.
PeerIds(Vec<NodeIndex>, Sender<Vec<(NodeIndex, Option<PeerId>)>>),
/// Send an outgoing custom message.
Outgoing(NodeIndex, Vec<u8>),
/// Report a peer.
ReportPeer(NodeIndex, Severity),
/// Get a peer id.
GetPeerId(NodeIndex, Sender<Option<String>>),
}
/// Starts the background thread that handles the networking.
fn start_thread<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT>(
fn start_thread<B: BlockT + 'static, S: NetworkSpecialization<B>>(
protocol_sender: Sender<ProtocolMsg<B, S>>,
network_port: NetworkPort,
network_sender: NetworkChan,
config: NetworkConfiguration,
protocol: Arc<Protocol<B, S, H>>,
registered: RegisteredProtocol,
) -> Result<((oneshot::Sender<()>, thread::JoinHandle<()>), Arc<Mutex<NetworkService>>), Error> {
let protocol_id = registered.id();
@@ -344,7 +446,7 @@ fn start_thread<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT>(
let service_clone = service.clone();
let mut runtime = Runtime::new()?;
let thread = thread::Builder::new().name("network".to_string()).spawn(move || {
let fut = run_thread(service_clone, protocol, protocol_id)
let fut = run_thread(protocol_sender, service_clone, network_sender, network_port, protocol_id)
.select(close_rx.then(|_| Ok(())))
.map(|(val, _)| val)
.map_err(|(err,_ )| err);
@@ -361,82 +463,111 @@ fn start_thread<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT>(
}
/// Runs the background thread that handles the networking.
fn run_thread<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT>(
fn run_thread<B: BlockT + 'static, S: NetworkSpecialization<B>>(
protocol_sender: Sender<ProtocolMsg<B, S>>,
network_service: Arc<Mutex<NetworkService>>,
protocol: Arc<Protocol<B, S, H>>,
network_sender: NetworkChan,
network_port: NetworkPort,
protocol_id: ProtocolId,
) -> impl Future<Item = (), Error = io::Error> {
// Interval for performing maintenance on the protocol handler.
let tick = Interval::new_interval(TICK_TIMEOUT)
.for_each({
let protocol = protocol.clone();
let network_service = network_service.clone();
move |_| {
protocol.tick(&mut NetSyncIo::new(&network_service, protocol_id));
Ok(())
}
})
.then(|res| {
match res {
Ok(()) => (),
Err(err) => error!("Error in the propagation timer: {:?}", err),
};
Ok(())
});
// Interval at which we gossip extrinsics over the network.
let propagate = Interval::new_interval(PROPAGATE_TIMEOUT)
.for_each({
let protocol = protocol.clone();
let network_service = network_service.clone();
move |_| {
protocol.propagate_extrinsics(&mut NetSyncIo::new(&network_service, protocol_id));
Ok(())
let network_service_2 = network_service.clone();
// Protocol produces a stream of messages about what happens in sync.
let protocol = stream::poll_fn(move || {
match network_port.take_one_message() {
Ok(Some(message)) => Ok(Async::Ready(Some(message))),
Ok(None) => Ok(Async::NotReady),
Err(_) => Err(())
}
}).for_each(move |(protocol_id, msg)| {
// Handle message from Protocol.
match msg {
NetworkMsg::PeerIds(node_idxs, sender) => {
let reply = node_idxs.into_iter().map(|idx| {
(idx, network_service_2.lock().peer_id_of_node(idx).map(|p| p.clone()))
}).collect::<Vec<_>>();
let _ = sender.send(reply);
}
})
.then(|res| {
match res {
Ok(()) => (),
Err(err) => error!("Error in the propagation timer: {:?}", err),
};
Ok(())
});
NetworkMsg::Outgoing(who, outgoing_message) => {
network_service_2
.lock()
.send_custom_message(who, protocol_id, outgoing_message);
},
NetworkMsg::ReportPeer(who, severity) => {
match severity {
Severity::Bad(_) => network_service_2.lock().ban_node(who),
Severity::Useless(_) => network_service_2.lock().drop_node(who),
Severity::Timeout => network_service_2.lock().drop_node(who),
}
},
NetworkMsg::GetPeerId(who, sender) => {
let node_id = network_service_2
.lock()
.peer_id_of_node(who)
.cloned()
.map(|id| id.to_base58());
let _ = sender.send(node_id);
},
}
Ok(())
})
.then(|res| {
match res {
Ok(()) => (),
Err(_) => error!("Protocol disconnected"),
};
Ok(())
});
// The network service produces events about what happens on the network. Let's process them.
let network_service2 = network_service.clone();
let network = stream::poll_fn(move || network_service2.lock().poll()).for_each(move |event| {
let mut net_sync = NetSyncIo::new(&network_service, protocol_id);
let network = stream::poll_fn(move || network_service.lock().poll()).for_each(move |event| {
match event {
NetworkServiceEvent::ClosedCustomProtocols { node_index, protocols } => {
NetworkServiceEvent::ClosedCustomProtocols { node_index, protocols, debug_info } => {
if !protocols.is_empty() {
debug_assert_eq!(protocols, &[protocol_id]);
protocol.on_peer_disconnected(&mut net_sync, node_index);
let _ = protocol_sender.send(
ProtocolMsg::PeerDisconnected(node_index, debug_info));
}
}
NetworkServiceEvent::OpenedCustomProtocol { node_index, version, .. } => {
NetworkServiceEvent::OpenedCustomProtocol { node_index, version, debug_info, .. } => {
debug_assert_eq!(version, protocol::CURRENT_VERSION as u8);
protocol.on_peer_connected(&mut net_sync, node_index);
let _ = protocol_sender.send(ProtocolMsg::PeerConnected(node_index, debug_info));
}
NetworkServiceEvent::ClosedCustomProtocol { node_index, .. } => {
protocol.on_peer_disconnected(&mut net_sync, node_index);
NetworkServiceEvent::ClosedCustomProtocol { node_index, debug_info, .. } => {
let _ = protocol_sender.send(ProtocolMsg::PeerDisconnected(node_index, debug_info));
}
NetworkServiceEvent::CustomMessage { node_index, data, .. } => {
protocol.handle_packet(&mut net_sync, node_index, &data);
if let Some(m) = Decode::decode(&mut (&data as &[u8])) {
let _ = protocol_sender.send(ProtocolMsg::CustomMessage(node_index, m));
return Ok(())
}
let _ = network_sender.send(
NetworkMsg::ReportPeer(
node_index,
Severity::Bad("Peer sent us a packet with invalid format".to_string())
)
);
}
NetworkServiceEvent::Clogged { node_index, messages, .. } => {
protocol.on_clogged_peer(&mut net_sync, node_index,
messages.iter().map(|d| d.as_ref()));
debug!(target: "sync", "{} clogging messages:", messages.len());
for msg_bytes in messages.iter().take(5) {
if let Some(msg) = Decode::decode(&mut (&msg_bytes as &[u8])) {
debug!(target: "sync", "{:?}", msg);
let _ = protocol_sender.send(ProtocolMsg::PeerClogged(node_index, Some(msg)));
} else {
debug!(target: "sync", "{:?}", msg_bytes);
let _ = protocol_sender.send(ProtocolMsg::PeerClogged(node_index, None));
}
}
}
};
Ok(())
});
// Merge all futures into one.
let futures: Vec<Box<Future<Item = (), Error = io::Error> + Send>> = vec![
Box::new(tick) as Box<_>,
Box::new(propagate) as Box<_>,
Box::new(protocol) as Box<_>,
Box::new(network) as Box<_>
];
+15 -11
View File
@@ -25,7 +25,7 @@ use consensus::import_queue::{ImportQueue, IncomingBlock};
use client::error::Error as ClientError;
use blocks::BlockCollection;
use runtime_primitives::Justification;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor};
use runtime_primitives::generic::BlockId;
use message::{self, generic::Message as GenericMessage};
use config::Roles;
@@ -211,7 +211,7 @@ impl<B: BlockT> PendingJustifications<B> {
} else {
protocol.report_peer(
who,
Severity::Bad(&format!("Invalid justification provided for #{}", request.0)),
Severity::Bad(format!("Invalid justification provided for #{}", request.0)),
);
}
} else {
@@ -332,13 +332,16 @@ impl<B: BlockT> ChainSync<B> {
match (block_status(&*protocol.client(), &*self.import_queue, info.best_hash), info.best_number) {
(Err(e), _) => {
debug!(target:"sync", "Error reading blockchain: {:?}", e);
protocol.report_peer(who, Severity::Useless(&format!("Error legimimately reading blockchain status: {:?}", e)));
let reason = format!("Error legimimately reading blockchain status: {:?}", e);
protocol.report_peer(who, Severity::Useless(reason));
},
(Ok(BlockStatus::KnownBad), _) => {
protocol.report_peer(who, Severity::Bad(&format!("New peer with known bad best block {} ({}).", info.best_hash, info.best_number)));
let reason = format!("New peer with known bad best block {} ({}).", info.best_hash, info.best_number);
protocol.report_peer(who, Severity::Bad(reason));
},
(Ok(BlockStatus::Unknown), b) if b.is_zero() => {
protocol.report_peer(who, Severity::Bad(&format!("New peer with unknown genesis hash {} ({}).", info.best_hash, info.best_number)));
(Ok(BlockStatus::Unknown), b) if b == As::sa(0) => {
let reason = format!("New peer with unknown genesis hash {} ({}).", info.best_hash, info.best_number);
protocol.report_peer(who, Severity::Bad(reason));
},
(Ok(BlockStatus::Unknown), _) if self.import_queue.status().importing_count > MAJOR_SYNC_BLOCKS => {
// when actively syncing the common point moves too fast.
@@ -457,18 +460,19 @@ impl<B: BlockT> ChainSync<B> {
},
Ok(_) => { // genesis mismatch
trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who);
protocol.report_peer(who, Severity::Bad("Ancestry search: genesis mismatch for peer"));
protocol.report_peer(who, Severity::Bad("Ancestry search: genesis mismatch for peer".to_string()));
return None;
},
Err(e) => {
protocol.report_peer(who, Severity::Useless(&format!("Error answering legitimate blockchain query: {:?}", e)));
let reason = format!("Error answering legitimate blockchain query: {:?}", e);
protocol.report_peer(who, Severity::Useless(reason));
return None;
}
}
},
None => {
trace!(target:"sync", "Invalid response when searching for ancestor from {}", who);
protocol.report_peer(who, Severity::Bad("Invalid response when searching for ancestor"));
protocol.report_peer(who, Severity::Bad("Invalid response when searching for ancestor".to_string()));
return None;
}
}
@@ -517,7 +521,7 @@ impl<B: BlockT> ChainSync<B> {
response.hash,
);
protocol.report_peer(who, Severity::Bad(&msg));
protocol.report_peer(who, Severity::Bad(msg));
return;
}
@@ -534,7 +538,7 @@ impl<B: BlockT> ChainSync<B> {
hash,
);
protocol.report_peer(who, Severity::Useless(&msg));
protocol.report_peer(who, Severity::Useless(msg));
return;
},
}
+219 -170
View File
@@ -16,37 +16,41 @@
#![allow(missing_docs)]
#[cfg(test)]
mod sync;
#[cfg(test)]
mod block_import;
#[cfg(test)]
mod sync;
use std::collections::{VecDeque, HashSet, HashMap};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use parking_lot::RwLock;
use client;
use client::block_builder::BlockBuilder;
use primitives::{H256, Ed25519AuthorityId};
use runtime_primitives::Justification;
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{AuthorityIdFor, Block as BlockT, Digest, DigestItem, Header, NumberFor, Zero};
use io::SyncIo;
use protocol::{Context, Protocol, ProtocolContext};
use codec::{Decode, Encode};
use config::ProtocolConfig;
use service::{NetworkLink, TransactionPool};
use network_libp2p::{NodeIndex, PeerId, Severity};
use keyring::Keyring;
use codec::Encode;
use consensus::{BlockOrigin, ImportBlock, JustificationImport, ForkChoiceStrategy, Error as ConsensusError, ErrorKind as ConsensusErrorKind};
use consensus::import_queue::{import_many_blocks, ImportQueue, ImportQueueStatus, IncomingBlock};
use consensus::import_queue::{Link, SharedBlockImport, SharedJustificationImport, Verifier};
use consensus::{Error as ConsensusError, ErrorKind as ConsensusErrorKind};
use consensus::{BlockOrigin, ForkChoiceStrategy, ImportBlock, JustificationImport};
use consensus_gossip::{ConsensusGossip, ConsensusMessage};
use crossbeam_channel::{self as channel, Sender};
use futures::Future;
use futures::sync::{mpsc, oneshot};
use keyring::Keyring;
use network_libp2p::{NodeIndex, ProtocolId, Severity};
use parking_lot::Mutex;
use primitives::{H256, Ed25519AuthorityId};
use protocol::{Context, Protocol, ProtocolMsg, ProtocolStatus};
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{AuthorityIdFor, Block as BlockT, Digest, DigestItem, Header, Zero, NumberFor};
use runtime_primitives::Justification;
use service::{network_channel, NetworkChan, NetworkLink, NetworkMsg, NetworkPort, TransactionPool};
use specialization::NetworkSpecialization;
use consensus_gossip::ConsensusGossip;
use service::ExecuteInContext;
use test_client;
pub use test_client::runtime::{Block, Hash, Transfer, Extrinsic};
pub use test_client::runtime::{Block, Extrinsic, Hash, Transfer};
pub use test_client::TestClient;
#[cfg(any(test, feature = "test-helpers"))]
@@ -61,7 +65,7 @@ impl<B: BlockT> ImportCB<B> {
ImportCB(RefCell::new(None))
}
fn set<F>(&self, cb: Box<F>)
where F: 'static + Fn(BlockOrigin, Vec<IncomingBlock<B>>) -> bool
where F: 'static + Fn(BlockOrigin, Vec<IncomingBlock<B>>) -> bool,
{
*self.0.borrow_mut() = Some(cb);
}
@@ -168,7 +172,7 @@ impl<B: 'static + BlockT, V: 'static + Verifier<B>> ImportQueue<B> for SyncImpor
&link,
None,
(origin, new_blocks),
verifier,
verifier
)
}));
Ok(())
@@ -204,23 +208,13 @@ impl<B: 'static + BlockT, V: 'static + Verifier<B>> ImportQueue<B> for SyncImpor
}
}
struct DummyContextExecutor(Arc<Protocol<Block, DummySpecialization, Hash>>, Arc<RwLock<VecDeque<TestPacket>>>);
unsafe impl Send for DummyContextExecutor {}
unsafe impl Sync for DummyContextExecutor {}
impl ExecuteInContext<Block> for DummyContextExecutor {
fn execute_in_context<F: Fn(&mut Context<Block>)>(&self, closure: F) {
let mut io = TestIo::new(&self.1, None);
let mut context = ProtocolContext::new(&self.0.context_data(), &mut io);
closure(&mut context);
}
}
/// The test specialization.
pub struct DummySpecialization { }
impl NetworkSpecialization<Block> for DummySpecialization {
fn status(&self) -> Vec<u8> { vec![] }
fn status(&self) -> Vec<u8> {
vec![]
}
fn on_connect(&mut self, _ctx: &mut Context<Block>, _peer_id: NodeIndex, _status: ::message::Status<Block>) {
}
@@ -232,184 +226,198 @@ impl NetworkSpecialization<Block> for DummySpecialization {
&mut self,
_ctx: &mut Context<Block>,
_peer_id: NodeIndex,
_message: &mut Option<::message::Message<Block>>
_message: &mut Option<::message::Message<Block>>,
) {
}
}
pub struct TestIo<'p> {
queue: &'p RwLock<VecDeque<TestPacket>>,
pub to_disconnect: HashSet<NodeIndex>,
packets: Vec<TestPacket>,
_sender: Option<NodeIndex>,
}
impl<'p> TestIo<'p> where {
pub fn new(queue: &'p RwLock<VecDeque<TestPacket>>, sender: Option<NodeIndex>) -> TestIo<'p> {
TestIo {
queue: queue,
_sender: sender,
to_disconnect: HashSet::new(),
packets: Vec::new(),
}
}
}
impl<'p> Drop for TestIo<'p> {
fn drop(&mut self) {
self.queue.write().extend(self.packets.drain(..));
}
}
impl<'p> SyncIo for TestIo<'p> {
fn report_peer(&mut self, who: NodeIndex, _reason: Severity) {
self.to_disconnect.insert(who);
}
fn send(&mut self, who: NodeIndex, data: Vec<u8>) {
self.packets.push(TestPacket {
data: data,
recipient: who,
});
}
fn peer_debug_info(&self, _who: NodeIndex) -> String {
"unknown".to_string()
}
fn peer_id(&self, _peer_id: NodeIndex) -> Option<PeerId> {
None
}
}
/// Mocked subprotocol packet
pub struct TestPacket {
data: Vec<u8>,
recipient: NodeIndex,
}
pub type PeersClient = client::Client<test_client::Backend, test_client::Executor, Block, test_client::runtime::RuntimeApi>;
pub struct Peer<V: Verifier<Block>, D> {
pub struct Peer<V: 'static + Verifier<Block>, D> {
client: Arc<PeersClient>,
pub sync: Arc<Protocol<Block, DummySpecialization, Hash>>,
pub queue: Arc<RwLock<VecDeque<TestPacket>>>,
pub protocol_sender: Sender<ProtocolMsg<Block, DummySpecialization>>,
network_port: Mutex<NetworkPort>,
import_queue: Arc<SyncImportQueue<Block, V>>,
executor: Arc<DummyContextExecutor>,
/// Some custom data set up at initialization time.
network_sender: NetworkChan,
pub data: D,
}
impl<V: 'static + Verifier<Block>, D> Peer<V, D> {
fn new(
client: Arc<PeersClient>,
sync: Arc<Protocol<Block, DummySpecialization, Hash>>,
queue: Arc<RwLock<VecDeque<TestPacket>>>,
import_queue: Arc<SyncImportQueue<Block, V>>,
protocol_sender: Sender<ProtocolMsg<Block, DummySpecialization>>,
network_sender: NetworkChan,
network_port: NetworkPort,
data: D,
) -> Self {
let executor = Arc::new(DummyContextExecutor(sync.clone(), queue.clone()));
Peer { client, sync, queue, import_queue, executor, data }
let network_port = Mutex::new(network_port);
Peer {
client,
protocol_sender,
import_queue,
network_sender,
network_port,
data,
}
}
/// Called after blockchain has been populated to updated current state.
fn start(&self) {
// Update the sync state to the latest chain state.
let info = self.client.info().expect("In-mem client does not fail");
let header = self.client.header(&BlockId::Hash(info.chain.best_hash)).unwrap().unwrap();
let header = self
.client
.header(&BlockId::Hash(info.chain.best_hash))
.unwrap()
.unwrap();
let network_link = NetworkLink {
sync: Arc::downgrade(self.sync.sync()),
context: Arc::downgrade(&self.executor),
protocol_sender: self.protocol_sender.clone(),
network_sender: self.network_sender.clone(),
};
self.import_queue.start(network_link).expect("Test ImportQueue always starts");
self.sync.on_block_imported(&mut TestIo::new(&self.queue, None), info.chain.best_hash, &header);
let _ = self
.protocol_sender
.send(ProtocolMsg::BlockImported(info.chain.best_hash, header));
}
pub fn on_block_imported(
&self,
hash: <Block as BlockT>::Hash,
header: &<Block as BlockT>::Header,
) {
let _ = self
.protocol_sender
.send(ProtocolMsg::BlockImported(hash, header.clone()));
}
/// Called on connection to other indicated peer.
fn on_connect(&self, other: NodeIndex) {
self.sync.on_peer_connected(&mut TestIo::new(&self.queue, Some(other)), other);
}
pub fn consensus_gossip(&self) -> &RwLock<ConsensusGossip<Block>> {
self.sync.consensus_gossip()
let _ = self.protocol_sender.send(ProtocolMsg::PeerConnected(other, String::new()));
}
/// Called on disconnect from other indicated peer.
fn on_disconnect(&self, other: NodeIndex) {
let mut io = TestIo::new(&self.queue, Some(other));
self.sync.on_peer_disconnected(&mut io, other);
let _ = self
.protocol_sender
.send(ProtocolMsg::PeerDisconnected(other, String::new()));
}
/// Receive a message from another peer. Return a set of peers to disconnect.
fn receive_message(&self, from: NodeIndex, msg: TestPacket) -> HashSet<NodeIndex> {
let mut io = TestIo::new(&self.queue, Some(from));
self.sync.handle_packet(&mut io, from, &msg.data);
self.flush();
io.to_disconnect.clone()
}
#[cfg(test)]
fn with_io<'a, F, U>(&'a self, f: F) -> U where F: FnOnce(&mut TestIo<'a>) -> U {
let mut io = TestIo::new(&self.queue, None);
f(&mut io)
fn receive_message(&self, from: NodeIndex, msg: Vec<u8>) {
match Decode::decode(&mut (&msg as &[u8])) {
Some(m) => {
let _ = self
.protocol_sender
.send(ProtocolMsg::CustomMessage(from, m));
}
None => {
let _ = self.network_sender.send(NetworkMsg::ReportPeer(
from,
Severity::Bad("Peer sent us a packet with invalid format".to_string()),
));
}
}
}
/// Produce the next pending message to send to another peer.
fn pending_message(&self) -> Option<TestPacket> {
self.flush();
self.queue.write().pop_front()
fn pending_message(&self) -> Option<NetworkMsg> {
select! {
recv(self.network_port.lock().receiver()) -> msg => return msg.ok(),
// If there are no messages ready, give protocol a change to send one.
recv(channel::after(Duration::from_millis(100))) -> _ => return None,
}
}
/// Produce the next pending message to send to another peer, without waiting.
fn pending_message_fast(&self) -> Option<NetworkMsg> {
self.network_port.lock().receiver().try_recv().ok()
}
/// Whether this peer is done syncing (has no messages to send).
fn is_done(&self) -> bool {
self.queue.read().is_empty()
self.network_port.lock().receiver().is_empty()
}
/// Execute a "sync step". This is called for each peer after it sends a packet.
fn sync_step(&self) {
self.flush();
self.sync.tick(&mut TestIo::new(&self.queue, None));
let _ = self.protocol_sender.send(ProtocolMsg::Tick);
}
/// Send block import notifications.
fn send_import_notifications(&self) {
let info = self.client.info().expect("In-mem client does not fail");
let header = self.client.header(&BlockId::Hash(info.chain.best_hash)).unwrap().unwrap();
self.sync.on_block_imported(&mut TestIo::new(&self.queue, None), info.chain.best_hash, &header);
let _ = self
.protocol_sender
.send(ProtocolMsg::BlockImported(info.chain.best_hash, header));
}
/// Send block finalization notifications.
pub fn send_finality_notifications(&self) {
let info = self.client.info().expect("In-mem client does not fail");
let header = self.client.header(&BlockId::Hash(info.chain.finalized_hash)).unwrap().unwrap();
self.sync.on_block_finalized(&mut TestIo::new(&self.queue, None), info.chain.finalized_hash, &header);
let _ = self
.protocol_sender
.send(ProtocolMsg::BlockFinalized(info.chain.finalized_hash, header.clone()));
}
/// Restart sync for a peer.
fn restart_sync(&self) {
self.sync.abort();
let _ = self.protocol_sender.send(ProtocolMsg::Abort);
}
fn flush(&self) {
pub fn status(&self) -> ProtocolStatus<Block> {
let (sender, port) = channel::unbounded();
let _ = self.protocol_sender.send(ProtocolMsg::Status(sender));
port.recv().unwrap()
}
/// Push a message into the gossip network and relay to peers.
/// `TestNet::sync_step` needs to be called to ensure it's propagated.
pub fn gossip_message(&self, topic: Hash, data: Vec<u8>, broadcast: bool) {
self.sync.gossip_consensus_message(&mut TestIo::new(&self.queue, None), topic, data, broadcast);
pub fn gossip_message(&self, topic: <Block as BlockT>::Hash, data: Vec<u8>, broadcast: bool) {
let _ = self
.protocol_sender
.send(ProtocolMsg::GossipConsensusMessage(topic, data, broadcast));
}
pub fn consensus_gossip_collect_garbage_for(&self, topic: <Block as BlockT>::Hash) {
self.with_gossip(move |gossip, _| gossip.collect_garbage(|t| t == &topic))
}
/// access the underlying consensus gossip handler
pub fn consensus_gossip_messages_for(
&self,
topic: <Block as BlockT>::Hash,
) -> mpsc::UnboundedReceiver<ConsensusMessage> {
let (tx, rx) = oneshot::channel();
self.with_gossip(move |gossip, _| {
let inner_rx = gossip.messages_for(topic);
let _ = tx.send(inner_rx);
});
rx.wait().ok().expect("1. Network is running, 2. it should handle the above closure successfully")
}
/// Execute a closure with the consensus gossip.
pub fn with_gossip<F>(&self, f: F)
where F: FnOnce(&mut ConsensusGossip<Block>, &mut Context<Block>) + Send + 'static
{
let _ = self
.protocol_sender
.send(ProtocolMsg::ExecuteWithGossip(Box::new(f)));
}
/// Announce a block to peers.
pub fn announce_block(&self, block: Hash) {
self.sync.announce_block(&mut TestIo::new(&self.queue, None), block);
let _ = self.protocol_sender.send(ProtocolMsg::AnnounceBlock(block));
}
/// Request a justification for the given block.
#[cfg(test)]
fn request_justification(&self, hash: &::primitives::H256, number: NumberFor<Block>) {
self.executor.execute_in_context(|context| {
self.sync.sync().write().request_justification(hash, number, context);
})
let _ = self
.protocol_sender
.send(ProtocolMsg::RequestJustification(hash.clone(), number));
}
/// Add blocks to the peer -- edit the block before adding
@@ -429,23 +437,28 @@ impl<V: 'static + Verifier<Block>, D> Peer<V, D> {
let builder = self.client.new_block_at(&at).unwrap();
let block = edit_block(builder);
let hash = block.header.hash();
trace!("Generating {}, (#{}, parent={})", hash, block.header.number, block.header.parent_hash);
trace!(
"Generating {}, (#{}, parent={})",
hash,
block.header.number,
block.header.parent_hash
);
let header = block.header.clone();
at = BlockId::Hash(hash);
// NOTE: if we use a non-synchronous queue in the test-net in the future,
// this may not work.
self.import_queue.import_blocks(origin, vec![
IncomingBlock {
origin: None,
self.import_queue.import_blocks(
origin,
vec![IncomingBlock {
origin: None,
hash,
header: Some(header),
body: Some(block.extrinsics),
justification: None,
},
}
]);
}
}
/// Push blocks to the peer (simplified: with or without a TX)
@@ -483,13 +496,6 @@ impl<V: 'static + Verifier<Block>, D> Peer<V, D> {
});
}
/// Execute a function with specialization for this peer.
pub fn with_spec<F, U>(&self, f: F) -> U
where F: FnOnce(&mut DummySpecialization, &mut Context<Block>) -> U
{
self.sync.with_spec(&mut TestIo::new(&self.queue, None), f)
}
/// Get a reference to the client.
pub fn client(&self) -> &Arc<PeersClient> {
&self.client
@@ -554,23 +560,26 @@ pub trait TestNetFactory: Sized {
let tx_pool = Arc::new(EmptyTransactionPool);
let verifier = self.make_verifier(client.clone(), config);
let (block_import, justification_import, data) = self.make_block_import(client.clone());
let (network_sender, network_port) = network_channel(ProtocolId::default());
let import_queue = Arc::new(SyncImportQueue::new(verifier, block_import, justification_import));
let specialization = DummySpecialization { };
let sync = Protocol::new(
let protocol_sender = Protocol::new(
network_sender.clone(),
config.clone(),
client.clone(),
import_queue.clone(),
None,
tx_pool,
specialization
specialization,
).unwrap();
let peer = Arc::new(Peer::new(
client,
Arc::new(sync),
Arc::new(RwLock::new(VecDeque::new())),
import_queue,
protocol_sender,
network_sender,
network_port,
data,
));
@@ -594,44 +603,58 @@ pub trait TestNetFactory: Sized {
}
}
});
self.route(None);
self.set_started(true);
}
/// Do one step of routing.
fn route(&mut self) {
fn route(&mut self, disconnected: Option<HashSet<NodeIndex>>) {
self.mut_peers(move |peers| {
let mut to_disconnect = HashSet::new();
for peer in 0..peers.len() {
let packet = peers[peer].pending_message();
if let Some(packet) = packet {
let disconnecting = {
let recipient = packet.recipient;
trace!(target: "sync", "--- {} -> {} ---", peer, recipient);
let to_disconnect = peers[recipient].receive_message(peer as NodeIndex, packet);
for d in &to_disconnect {
// notify this that disconnecting peers are disconnecting
peers[recipient].on_disconnect(*d as NodeIndex);
match packet {
None => continue,
Some(NetworkMsg::Outgoing(recipient, packet)) => {
if let Some(disconnected) = disconnected.as_ref() {
let mut current = HashSet::new();
current.insert(peer);
current.insert(recipient);
// Not routing message between "disconnected" nodes.
if disconnected.is_subset(&current) {
continue;
}
}
to_disconnect
};
for d in &disconnecting {
// notify other peers that this peer is disconnecting
peers[*d].on_disconnect(peer as NodeIndex);
peers[recipient].receive_message(peer as NodeIndex, packet)
}
Some(NetworkMsg::ReportPeer(who, _)) => {
to_disconnect.insert(who);
}
Some(_msg) => continue,
}
}
for d in to_disconnect {
for peer in 0..peers.len() {
peers[peer].on_disconnect(d);
}
}
});
}
/// Route messages between peers until all queues are empty.
fn route_until_complete(&mut self) {
while !self.done() {
self.route()
}
/// Route all pending outgoing messages, without waiting or disconnecting.
fn route_fast(&mut self) {
self.mut_peers(move |peers| {
for peer in 0..peers.len() {
while let Some(NetworkMsg::Outgoing(recipient, packet)) = peers[peer].pending_message_fast() {
peers[recipient].receive_message(peer as NodeIndex, packet)
}
}
});
}
/// Do a step of synchronization.
fn sync_step(&mut self) {
self.route();
self.route(None);
self.mut_peers(|peers| {
for peer in peers {
@@ -667,10 +690,26 @@ pub trait TestNetFactory: Sized {
fn sync(&mut self) -> u32 {
self.start();
let mut total_steps = 0;
self.sync_step();
self.route(None);
while !self.done() {
total_steps += 1;
self.route(None);
}
total_steps
}
/// Perform synchronization until complete,
/// excluding sync between certain nodes.
fn sync_with_disconnected(&mut self, disconnected: HashSet<NodeIndex>) -> u32 {
self.start();
let mut total_steps = 0;
self.sync_step();
self.route(Some(disconnected.clone()));
while !self.done() {
self.sync_step();
total_steps += 1;
self.route();
self.route(Some(disconnected.clone()));
}
total_steps
}
@@ -685,6 +724,16 @@ pub trait TestNetFactory: Sized {
/// Whether all peers have synced.
fn done(&self) -> bool {
for _ in 0..10 {
if self.peers().iter().all(|p| p.is_done()) {
// If all peers are done, wait a little bit
// in case one is still about to send a message.
thread::sleep(Duration::from_millis(1000));
continue;
}
// Do another round of routing.
return false
}
self.peers().iter().all(|p| p.is_done())
}
}
+15 -8
View File
@@ -18,7 +18,10 @@ use client::backend::Backend;
use client::blockchain::HeaderBackend as BlockchainHeaderBackend;
use config::Roles;
use consensus::BlockOrigin;
use network_libp2p::NodeIndex;
use sync::SyncState;
use std::{thread, time};
use std::collections::HashSet;
use super::*;
#[test]
@@ -29,7 +32,7 @@ fn sync_from_two_peers_works() {
net.peer(2).push_blocks(100, false);
net.sync();
assert!(net.peer(0).client.backend().blockchain().equals_to(net.peer(1).client.backend().blockchain()));
let status = net.peer(0).sync.status();
let status = net.peer(0).status();
assert_eq!(status.sync.state, SyncState::Idle);
}
@@ -49,9 +52,12 @@ fn sync_from_two_peers_with_ancestry_search_works() {
fn sync_long_chain_works() {
let mut net = TestNet::new(2);
net.peer(1).push_blocks(500, false);
net.sync_steps(3);
assert_eq!(net.peer(0).sync.status().sync.state, SyncState::Downloading);
net.sync();
// Wait for peer 0 to import blocks received over the network.
thread::sleep(time::Duration::from_millis(1000));
net.sync();
// Wait for peers to get up to speed.
thread::sleep(time::Duration::from_millis(1000));
assert!(net.peer(0).client.backend().blockchain().equals_to(net.peer(1).client.backend().blockchain()));
}
@@ -137,7 +143,7 @@ fn own_blocks_are_announced() {
net.peer(0).generate_blocks(1, BlockOrigin::Own, |builder| builder.bake().unwrap());
let header = net.peer(0).client().header(&BlockId::Number(1)).unwrap().unwrap();
net.peer(0).with_io(|io| net.peer(0).sync.on_block_imported(io, header.hash(), &header));
net.peer(0).on_block_imported(header.hash(), &header);
net.sync();
assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1);
assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1);
@@ -166,10 +172,11 @@ fn blocks_are_not_announced_by_light_nodes() {
net.peer(0).on_connect(1);
net.peer(1).on_connect(2);
// generate block at peer0 && run sync
while !net.done() {
net.sync_step();
}
// Only sync between 0 -> 1, and 1 -> 2
let mut disconnected = HashSet::new();
disconnected.insert(0 as NodeIndex);
disconnected.insert(2 as NodeIndex);
net.sync_with_disconnected(disconnected);
// peer 0 has the best chain
// peer 1 has the best chain
+9 -13
View File
@@ -38,11 +38,7 @@ use parking_lot::Mutex;
// Type aliases.
// These exist mainly to avoid typing `<F as Factory>::Foo` all over the code.
/// Network service type for a factory.
pub type NetworkService<F> = network::Service<
<F as ServiceFactory>::Block,
<F as ServiceFactory>::NetworkProtocol,
<<F as ServiceFactory>::Block as BlockT>::Hash,
>;
pub type NetworkService<F> = network::Service<<F as ServiceFactory>::Block, <F as ServiceFactory>::NetworkProtocol>;
/// Code executor type for a factory.
pub type CodeExecutor<F> = NativeExecutor<<F as ServiceFactory>::RuntimeDispatch>;
@@ -59,7 +55,7 @@ pub type FullExecutor<F> = client::LocalCallExecutor<
/// Light client backend type for a factory.
pub type LightBackend<F> = client::light::backend::Backend<
client_db::light::LightStorage<<F as ServiceFactory>::Block>,
network::OnDemand<<F as ServiceFactory>::Block, NetworkService<F>>,
network::OnDemand<<F as ServiceFactory>::Block>,
Blake2Hasher,
>;
@@ -68,20 +64,20 @@ pub type LightExecutor<F> = client::light::call_executor::RemoteOrLocalCallExecu
<F as ServiceFactory>::Block,
client::light::backend::Backend<
client_db::light::LightStorage<<F as ServiceFactory>::Block>,
network::OnDemand<<F as ServiceFactory>::Block, NetworkService<F>>,
network::OnDemand<<F as ServiceFactory>::Block>,
Blake2Hasher
>,
client::light::call_executor::RemoteCallExecutor<
client::light::blockchain::Blockchain<
client_db::light::LightStorage<<F as ServiceFactory>::Block>,
network::OnDemand<<F as ServiceFactory>::Block, NetworkService<F>>
network::OnDemand<<F as ServiceFactory>::Block>
>,
network::OnDemand<<F as ServiceFactory>::Block, NetworkService<F>>
network::OnDemand<<F as ServiceFactory>::Block>
>,
client::LocalCallExecutor<
client::light::backend::Backend<
client_db::light::LightStorage<<F as ServiceFactory>::Block>,
network::OnDemand<<F as ServiceFactory>::Block, NetworkService<F>>,
network::OnDemand<<F as ServiceFactory>::Block>,
Blake2Hasher
>,
CodeExecutor<F>
@@ -363,7 +359,7 @@ pub trait Components: Sized + 'static {
) -> Result<
(
Arc<ComponentClient<Self>>,
Option<Arc<OnDemand<FactoryBlock<Self::Factory>, NetworkService<Self::Factory>>>>
Option<Arc<OnDemand<FactoryBlock<Self::Factory>>>>
),
error::Error
>;
@@ -429,7 +425,7 @@ impl<Factory: ServiceFactory> Components for FullComponents<Factory> {
)
-> Result<(
Arc<ComponentClient<Self>>,
Option<Arc<OnDemand<FactoryBlock<Self::Factory>, NetworkService<Self::Factory>>>>
Option<Arc<OnDemand<FactoryBlock<Self::Factory>>>>
), error::Error>
{
let db_settings = client_db::DatabaseSettings {
@@ -505,7 +501,7 @@ impl<Factory: ServiceFactory> Components for LightComponents<Factory> {
-> Result<
(
Arc<ComponentClient<Self>>,
Option<Arc<OnDemand<FactoryBlock<Self::Factory>, NetworkService<Self::Factory>>>>
Option<Arc<OnDemand<FactoryBlock<Self::Factory>>>>
), error::Error>
{
let db_settings = client_db::DatabaseSettings {
+4 -4
View File
@@ -192,12 +192,12 @@ impl<Components: components::Components> Service<Components> {
};
let has_bootnodes = !network_params.network_config.boot_nodes.is_empty();
let network = network::Service::new(
let (network, network_chan) = network::Service::new(
network_params,
protocol_id,
import_queue
)?;
on_demand.map(|on_demand| on_demand.set_service_link(Arc::downgrade(&network)));
on_demand.map(|on_demand| on_demand.set_network_sender(network_chan));
{
// block notifications
@@ -208,7 +208,7 @@ impl<Components: components::Components> Service<Components> {
let events = client.import_notification_stream()
.for_each(move |notification| {
if let Some(network) = network.upgrade() {
network.on_block_imported(notification.hash, &notification.header);
network.on_block_imported(notification.hash, notification.header);
}
if let (Some(txpool), Some(client)) = (txpool.upgrade(), wclient.upgrade()) {
Components::TransactionPool::on_block_imported(
@@ -260,7 +260,7 @@ impl<Components: components::Components> Service<Components> {
let events = MostRecentNotification(client.finality_notification_stream().fuse())
.for_each(move |notification| {
if let Some(network) = network.upgrade() {
network.on_block_finalized(notification.hash, &notification.header);
network.on_block_finalized(notification.hash, notification.header);
}
Ok(())
})