// Copyright 2017-2018 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . use std::collections::{HashMap, HashSet, BTreeMap}; use std::cmp; use std::io::Cursor; use std::sync::Arc; use std::time; use parking_lot::RwLock; use rustc_hex::ToHex; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As, Zero}; use runtime_primitives::generic::BlockId; use primitives::storage::StorageKey; use network_libp2p::{NodeIndex, Severity}; use codec::{Encode, Decode}; use consensus::import_queue::ImportQueue; use message::{self, Message}; use message::generic::Message as GenericMessage; use consensus_gossip::ConsensusGossip; use specialization::NetworkSpecialization; use sync::{ChainSync, Status as SyncStatus, SyncState}; use service::{TransactionPool, ExHashT}; use config::{ProtocolConfig, Roles}; use chain::Client; use client::light::fetcher::ChangesProof; use on_demand::OnDemandService; use io::SyncIo; use error; const REQUEST_TIMEOUT_SEC: u64 = 40; /// Current protocol version. pub (crate) const CURRENT_VERSION: u32 = 1; // Maximum allowed entries in `BlockResponse` const MAX_BLOCK_DATA_RESPONSE: u32 = 128; /// When light node connects to the full node and the full node is behind light node /// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it unuseful /// and disconnect to free connection slot. const LIGHT_MAXIMAL_BLOCKS_DIFFERENCE: u64 = 8192; // Lock must always be taken in order declared here. pub struct Protocol, H: ExHashT> { config: ProtocolConfig, on_demand: Option>>, genesis_hash: B::Hash, sync: Arc>>, specialization: RwLock, consensus_gossip: RwLock>, context_data: ContextData, // Connected peers pending Status message. handshaking_peers: RwLock>, transaction_pool: Arc>, } /// Syncing status and statistics #[derive(Clone)] pub struct ProtocolStatus { /// Sync status. pub sync: SyncStatus, /// Total number of connected peers pub num_peers: usize, /// Total number of active peers. pub num_active_peers: usize, } /// Peer information struct Peer { /// Protocol version protocol_version: u32, /// Roles roles: Roles, /// Peer best block hash best_hash: B::Hash, /// Peer best block number best_number: ::Number, /// Pending block request if any block_request: Option>, /// Pending block request timestamp block_request_timestamp: Option, /// Pending block justification request if any justification_request: Option>, /// Pending block justification request timestamp justification_request_timestamp: Option, /// Holds a set of transactions known to this peer. known_extrinsics: HashSet, /// Holds a set of blocks known to this peer. known_blocks: HashSet, /// Request counter, next_request_id: message::RequestId, } impl Peer { fn min_request_timestamp(&self) -> Option<&time::Instant> { match (self.block_request_timestamp, self.justification_request_timestamp) { (Some(t1), Some(t2)) if t1 < t2 => self.block_request_timestamp.as_ref(), (Some(_), Some(_)) => self.justification_request_timestamp.as_ref(), (Some(_), None) => self.block_request_timestamp.as_ref(), (None, Some(_)) => self.justification_request_timestamp.as_ref(), _ => None, } } } /// Info about a peer's known state. #[derive(Debug)] pub struct PeerInfo { /// Roles pub roles: Roles, /// Protocol version pub protocol_version: u32, /// Peer best block hash pub best_hash: B::Hash, /// Peer best block number pub best_number: ::Number, } /// Context for a network-specific handler. pub trait Context { /// Get a reference to the client. fn client(&self) -> &::chain::Client; /// Point out that a peer has been malign or irresponsible or appeared lazy. fn report_peer(&mut self, who: NodeIndex, reason: Severity); /// Get peer info. fn peer_info(&self, peer: NodeIndex) -> Option>; /// Send a message to a peer. fn send_message(&mut self, who: NodeIndex, data: ::message::Message); } /// Protocol context. pub(crate) struct ProtocolContext<'a, B: 'a + BlockT, H: 'a + ExHashT> { io: &'a mut SyncIo, context_data: &'a ContextData, } impl<'a, B: BlockT + 'a, H: 'a + ExHashT> ProtocolContext<'a, B, H> { pub(crate) fn new(context_data: &'a ContextData, io: &'a mut SyncIo) -> Self { ProtocolContext { io, context_data, } } /// Send a message to a peer. pub fn send_message(&mut self, who: NodeIndex, message: Message) { send_message(&self.context_data.peers, self.io, who, message) } /// Point out that a peer has been malign or irresponsible or appeared lazy. pub fn report_peer(&mut self, who: NodeIndex, reason: Severity) { self.io.report_peer(who, reason); } /// Get peer info. pub fn peer_info(&self, peer: NodeIndex) -> Option> { self.context_data.peers.read().get(&peer).map(|p| { PeerInfo { roles: p.roles, protocol_version: p.protocol_version, best_hash: p.best_hash, best_number: p.best_number, } }) } } impl<'a, B: BlockT + 'a, H: ExHashT + 'a> Context for ProtocolContext<'a, B, H> { fn send_message(&mut self, who: NodeIndex, message: Message) { ProtocolContext::send_message(self, who, message); } fn report_peer(&mut self, who: NodeIndex, reason: Severity) { ProtocolContext::report_peer(self, who, reason); } fn peer_info(&self, who: NodeIndex) -> Option> { ProtocolContext::peer_info(self, who) } fn client(&self) -> &Client { &*self.context_data.chain } } /// Data necessary to create a context. pub(crate) struct ContextData { // All connected peers peers: RwLock>>, pub chain: Arc>, } impl, H: ExHashT> Protocol { /// Create a new instance. pub fn new>( config: ProtocolConfig, chain: Arc>, import_queue: Arc, on_demand: Option>>, transaction_pool: Arc>, specialization: S, ) -> error::Result where I: ImportQueue { let info = chain.info()?; let sync = ChainSync::new(config.roles, &info, import_queue); let protocol = Protocol { config: config, context_data: ContextData { peers: RwLock::new(HashMap::new()), chain, }, on_demand, genesis_hash: info.chain.genesis_hash, sync: Arc::new(RwLock::new(sync)), specialization: RwLock::new(specialization), consensus_gossip: RwLock::new(ConsensusGossip::new()), handshaking_peers: RwLock::new(HashMap::new()), transaction_pool: transaction_pool, }; Ok(protocol) } pub(crate) fn context_data(&self) -> &ContextData { &self.context_data } pub(crate) fn sync(&self) -> &Arc>> { &self.sync } pub(crate) fn consensus_gossip<'a>(&'a self) -> &'a RwLock> { &self.consensus_gossip } /// Returns protocol status pub fn status(&self) -> ProtocolStatus { let sync = self.sync.read(); let peers = self.context_data.peers.read(); ProtocolStatus { sync: sync.status(), num_peers: peers.values().count(), num_active_peers: peers.values().filter(|p| p.block_request.is_some()).count(), } } pub fn peers(&self) -> Vec<(NodeIndex, PeerInfo)> { self.context_data.peers.read().iter().map(|(idx, p)| { ( *idx, PeerInfo { roles: p.roles, protocol_version: p.protocol_version, best_hash: p.best_hash, best_number: p.best_number, } ) }).collect() } fn handle_response(&self, io: &mut SyncIo, who: NodeIndex, response: &message::BlockResponse) -> Option> { let mut peers = self.context_data.peers.write(); let request = if let Some(ref mut peer) = peers.get_mut(&who) { match (peer.block_request.take(), peer.justification_request.take()) { (Some(block_request), Some(justification_request)) => { if block_request.id == response.id { peer.block_request_timestamp = None; peer.justification_request = Some(justification_request); block_request } else if justification_request.id == response.id { peer.justification_request_timestamp = None; peer.block_request = Some(block_request); justification_request } else { peer.justification_request_timestamp = None; peer.block_request_timestamp = None; trace!(target: "sync", "Ignoring mismatched response packet from {} (expected {} or {} got {})", who, block_request.id, justification_request.id, response.id, ); return None; } }, (Some(block_request), None) => { if block_request.id == response.id { peer.block_request_timestamp = None; block_request } else { peer.block_request_timestamp = None; trace!(target: "sync", "Ignoring mismatched response packet from {} (expected {} got {})", who, block_request.id, response.id, ); return None; } }, (None, Some(justification_request)) => { if justification_request.id == response.id { peer.justification_request_timestamp = None; justification_request } else { peer.justification_request_timestamp = None; trace!(target: "sync", "Ignoring mismatched response packet from {} (expected {} got {})", who, justification_request.id, response.id, ); return None; } }, (None, None) => { io.report_peer(who, Severity::Bad("Unexpected response packet received from peer")); return None; }, } } else { io.report_peer(who, Severity::Bad("Unexpected packet received from peer")); return None; }; Some(request) } pub fn handle_packet(&self, io: &mut SyncIo, who: NodeIndex, mut data: &[u8]) { let message: Message = match Decode::decode(&mut data) { Some(m) => m, None => { trace!(target: "sync", "Invalid packet from {}", who); io.report_peer(who, Severity::Bad("Peer sent us a packet with invalid format")); return; } }; match message { GenericMessage::Status(s) => self.on_status_message(io, who, s), GenericMessage::BlockRequest(r) => self.on_block_request(io, who, r), GenericMessage::BlockResponse(r) => { if let Some(request) = self.handle_response(io, who, &r) { self.on_block_response(io, who, request, r); } }, GenericMessage::BlockAnnounce(announce) => self.on_block_announce(io, who, announce), GenericMessage::Transactions(m) => self.on_extrinsics(io, who, m), GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(io, who, request), GenericMessage::RemoteCallResponse(response) => self.on_remote_call_response(io, who, response), GenericMessage::RemoteReadRequest(request) => self.on_remote_read_request(io, who, request), GenericMessage::RemoteReadResponse(response) => self.on_remote_read_response(io, who, response), GenericMessage::RemoteHeaderRequest(request) => self.on_remote_header_request(io, who, request), GenericMessage::RemoteHeaderResponse(response) => self.on_remote_header_response(io, who, response), GenericMessage::RemoteChangesRequest(request) => self.on_remote_changes_request(io, who, request), GenericMessage::RemoteChangesResponse(response) => self.on_remote_changes_response(io, who, response), GenericMessage::Consensus(topic, msg, broadcast) => { self.consensus_gossip.write().on_incoming(&mut ProtocolContext::new(&self.context_data, io), who, topic, msg, broadcast); }, other => self.specialization.write().on_message(&mut ProtocolContext::new(&self.context_data, io), who, &mut Some(other)), } } pub fn send_message(&self, io: &mut SyncIo, who: NodeIndex, message: Message) { send_message::(&self.context_data.peers, io, who, message) } pub fn gossip_consensus_message(&self, io: &mut SyncIo, topic: B::Hash, message: Vec, broadcast: bool) { let gossip = self.consensus_gossip(); self.with_spec(io, move |_s, context|{ gossip.write().multicast(context, topic, message, broadcast); }); } /// Called when a new peer is connected pub fn on_peer_connected(&self, io: &mut SyncIo, who: NodeIndex) { trace!(target: "sync", "Connected {}: {}", who, io.peer_debug_info(who)); self.handshaking_peers.write().insert(who, time::Instant::now()); self.send_status(io, who); } /// Called by peer when it is disconnecting pub fn on_peer_disconnected(&self, io: &mut SyncIo, peer: NodeIndex) { trace!(target: "sync", "Disconnecting {}: {}", peer, io.peer_debug_info(peer)); // lock all the the peer lists so that add/remove peer events are in order let mut sync = self.sync.write(); let mut spec = self.specialization.write(); let removed = { let mut peers = self.context_data.peers.write(); let mut handshaking_peers = self.handshaking_peers.write(); handshaking_peers.remove(&peer); peers.remove(&peer).is_some() }; if removed { let mut context = ProtocolContext::new(&self.context_data, io); self.consensus_gossip.write().peer_disconnected(&mut context, peer); sync.peer_disconnected(&mut context, peer); spec.on_disconnect(&mut context, peer); self.on_demand.as_ref().map(|s| s.on_disconnect(peer)); } } /// Called as a back-pressure mechanism if the networking detects that the peer cannot process /// our messaging rate fast enough. pub fn on_clogged_peer<'a>( &self, _io: &mut SyncIo, who: NodeIndex, clogging_messages: impl ExactSizeIterator ) { // We don't do anything but print some diagnostics for now. if let Some(peer) = self.context_data.peers.read().get(&who) { debug!(target: "sync", "Clogged peer {} (protocol_version: {:?}; roles: {:?}; \ known_extrinsics: {:?}; known_blocks: {:?}; best_hash: {:?}; best_number: {:?})", who, peer.protocol_version, peer.roles, peer.known_extrinsics, peer.known_blocks, peer.best_hash, peer.best_number); } else { debug!(target: "sync", "Peer clogged before being properly connected"); } debug!(target: "sync", "{} clogging messages:", clogging_messages.len()); for msg_bytes in clogging_messages { if let Some(msg) = as Decode>::decode(&mut Cursor::new(msg_bytes)) { debug!(target: "sync", "{:?}", msg); } else { debug!(target: "sync", "{:?}", msg_bytes) } } } fn on_block_request(&self, io: &mut SyncIo, peer: NodeIndex, request: message::BlockRequest) { trace!(target: "sync", "BlockRequest {} from {} with fields {:?}: from {:?} to {:?} max {:?}", request.id, peer, request.fields, request.from, request.to, request.max, ); let mut blocks = Vec::new(); let mut id = match request.from { message::FromBlock::Hash(h) => BlockId::Hash(h), message::FromBlock::Number(n) => BlockId::Number(n), }; let max = cmp::min(request.max.unwrap_or(u32::max_value()), MAX_BLOCK_DATA_RESPONSE) as usize; let get_header = request.fields.contains(message::BlockAttributes::HEADER); let get_body = request.fields.contains(message::BlockAttributes::BODY); let get_justification = request.fields.contains(message::BlockAttributes::JUSTIFICATION); while let Some(header) = self.context_data.chain.header(&id).unwrap_or(None) { if blocks.len() >= max { break; } let number = header.number().clone(); let hash = header.hash(); let parent_hash = header.parent_hash().clone(); let justification = if get_justification { self.context_data.chain.justification(&BlockId::Hash(hash)).unwrap_or(None) } else { None }; let block_data = message::generic::BlockData { hash: hash, header: if get_header { Some(header) } else { None }, body: if get_body { self.context_data.chain.body(&BlockId::Hash(hash)).unwrap_or(None) } else { None }, receipt: None, message_queue: None, justification, }; blocks.push(block_data); match request.direction { message::Direction::Ascending => id = BlockId::Number(number + As::sa(1)), message::Direction::Descending => { if number == As::sa(0) { break; } id = BlockId::Hash(parent_hash) } } } let response = message::generic::BlockResponse { id: request.id, blocks: blocks, }; trace!(target: "sync", "Sending BlockResponse with {} blocks", response.blocks.len()); self.send_message(io, peer, GenericMessage::BlockResponse(response)) } fn on_block_response(&self, io: &mut SyncIo, peer: NodeIndex, request: message::BlockRequest, response: message::BlockResponse) { let blocks_range = match ( response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), ) { (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), (Some(first), Some(_)) => format!(" ({})", first), _ => Default::default(), }; trace!(target: "sync", "BlockResponse {} from {} with {} blocks {}", response.id, peer, response.blocks.len(), blocks_range); // TODO [andre]: move this logic to the import queue so that // justifications are imported asynchronously (#1482) if request.fields == message::BlockAttributes::JUSTIFICATION { let mut sync = self.sync.write(); sync.on_block_justification_data( &mut ProtocolContext::new(&self.context_data, io), peer, request, response, ); } else { // import_queue.import_blocks also acquires sync.write(); // Break the cycle by doing these separately from the outside; let new_blocks = { let mut sync = self.sync.write(); sync.on_block_data(&mut ProtocolContext::new(&self.context_data, io), peer, request, response) }; if let Some((origin, new_blocks)) = new_blocks { let import_queue = self.sync.read().import_queue(); import_queue.import_blocks(origin, new_blocks); } } } /// Perform time based maintenance. pub fn tick(&self, io: &mut SyncIo) { self.consensus_gossip.write().collect_garbage(|_| true); self.maintain_peers(io); self.sync.write().tick(&mut ProtocolContext::new(&self.context_data, io)); self.on_demand.as_ref().map(|s| s.maintain_peers(io)); } fn maintain_peers(&self, io: &mut SyncIo) { let tick = time::Instant::now(); let mut aborting = Vec::new(); { let peers = self.context_data.peers.read(); let handshaking_peers = self.handshaking_peers.read(); for (who, timestamp) in peers.iter() .filter_map(|(id, peer)| peer.min_request_timestamp().map(|r| (id, r))) .chain(handshaking_peers.iter()) { if (tick - *timestamp).as_secs() > REQUEST_TIMEOUT_SEC { trace!(target: "sync", "Timeout {}", who); aborting.push(*who); } } } self.specialization.write().maintain_peers(&mut ProtocolContext::new(&self.context_data, io)); for p in aborting { io.report_peer(p, Severity::Timeout); } } #[allow(dead_code)] pub fn peer_info(&self, peer: NodeIndex) -> Option> { self.context_data.peers.read().get(&peer).map(|p| { PeerInfo { roles: p.roles, protocol_version: p.protocol_version, best_hash: p.best_hash, best_number: p.best_number, } }) } /// Called by peer to report status fn on_status_message(&self, io: &mut SyncIo, who: NodeIndex, status: message::Status) { trace!(target: "sync", "New peer {} {:?}", who, status); { let mut peers = self.context_data.peers.write(); let mut handshaking_peers = self.handshaking_peers.write(); if peers.contains_key(&who) { debug!(target: "sync", "Unexpected status packet from {}:{}", who, io.peer_debug_info(who)); return; } if status.genesis_hash != self.genesis_hash { io.report_peer(who, Severity::Bad(&format!("Peer is on different chain (our genesis: {} theirs: {})", self.genesis_hash, status.genesis_hash))); return; } if status.version != CURRENT_VERSION { io.report_peer(who, Severity::Bad(&format!("Peer using unsupported protocol version {}", status.version))); return; } if self.config.roles & Roles::LIGHT == Roles::LIGHT { let self_best_block = self.context_data.chain.info().ok() .and_then(|info| info.best_queued_number) .unwrap_or_else(|| Zero::zero()); let blocks_difference = self_best_block.as_().checked_sub(status.best_number.as_()).unwrap_or(0); if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { io.report_peer(who, Severity::Useless("Peer is far behind us and will unable to serve light requests")); return; } } let peer = Peer { protocol_version: status.version, roles: status.roles, best_hash: status.best_hash, best_number: status.best_number, block_request: None, block_request_timestamp: None, justification_request: None, justification_request_timestamp: None, known_extrinsics: HashSet::new(), known_blocks: HashSet::new(), next_request_id: 0, }; peers.insert(who.clone(), peer); handshaking_peers.remove(&who); debug!(target: "sync", "Connected {} {}", who, io.peer_debug_info(who)); } let mut context = ProtocolContext::new(&self.context_data, io); self.on_demand.as_ref().map(|s| s.on_connect(who, status.roles, status.best_number)); self.sync.write().new_peer(&mut context, who); self.consensus_gossip.write().new_peer(&mut context, who, status.roles); self.specialization.write().on_connect(&mut context, who, status); } /// Called when peer sends us new extrinsics fn on_extrinsics(&self, _io: &mut SyncIo, who: NodeIndex, extrinsics: message::Transactions) { // Accept extrinsics only when fully synced if self.sync.read().status().state != SyncState::Idle { trace!(target: "sync", "{} Ignoring extrinsics while syncing", who); return; } trace!(target: "sync", "Received {} extrinsics from {}", extrinsics.len(), who); let mut peers = self.context_data.peers.write(); if let Some(ref mut peer) = peers.get_mut(&who) { for t in extrinsics { if let Some(hash) = self.transaction_pool.import(&t) { peer.known_extrinsics.insert(hash); } else { trace!(target: "sync", "Extrinsic rejected"); } } } } /// Called when we propagate ready extrinsics to peers. pub fn propagate_extrinsics(&self, io: &mut SyncIo) { debug!(target: "sync", "Propagating extrinsics"); // Accept transactions only when fully synced if self.sync.read().status().state != SyncState::Idle { return; } let extrinsics = self.transaction_pool.transactions(); let mut propagated_to = HashMap::new(); let mut peers = self.context_data.peers.write(); for (who, ref mut peer) in peers.iter_mut() { let (hashes, to_send): (Vec<_>, Vec<_>) = extrinsics .iter() .filter(|&(ref hash, _)| peer.known_extrinsics.insert(hash.clone())) .cloned() .unzip(); if !to_send.is_empty() { let node_id = io.peer_id(*who).map(|id| id.to_base58()); if let Some(id) = node_id { for hash in hashes { propagated_to.entry(hash).or_insert_with(Vec::new).push(id.clone()); } } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); self.send_message(io, *who, GenericMessage::Transactions(to_send)); } } self.transaction_pool.on_broadcasted(propagated_to); } /// Make sure an important block is propagated to peers. /// /// In chain-based consensus, we often need to make sure non-best forks are /// at least temporarily synced. pub fn announce_block(&self, io: &mut SyncIo, hash: B::Hash) { let header = match self.context_data.chain.header(&BlockId::Hash(hash)) { Ok(Some(header)) => header, Ok(None) => { warn!("Trying to announce unknown block: {}", hash); return; } Err(e) => { warn!("Error reading block header {}: {:?}", hash, e); return; } }; let mut peers = self.context_data.peers.write(); let hash = header.hash(); for (who, ref mut peer) in peers.iter_mut() { trace!(target: "sync", "Reannouncing block {:?} to {}", hash, who); peer.known_blocks.insert(hash); self.send_message(io, *who, GenericMessage::BlockAnnounce(message::BlockAnnounce { header: header.clone() })); } } /// Send Status message fn send_status(&self, io: &mut SyncIo, who: NodeIndex) { if let Ok(info) = self.context_data.chain.info() { let status = message::generic::Status { version: CURRENT_VERSION, genesis_hash: info.chain.genesis_hash, roles: self.config.roles.into(), best_number: info.chain.best_number, best_hash: info.chain.best_hash, chain_status: self.specialization.read().status(), }; self.send_message(io, who, GenericMessage::Status(status)) } } pub fn abort(&self) { let mut sync = self.sync.write(); let mut spec = self.specialization.write(); let mut peers = self.context_data.peers.write(); let mut handshaking_peers = self.handshaking_peers.write(); let mut consensus_gossip = self.consensus_gossip.write(); sync.clear(); spec.on_abort(); peers.clear(); handshaking_peers.clear(); consensus_gossip.abort(); } pub fn stop(&self) { // stop processing import requests first (without holding a sync lock) let import_queue = self.sync.read().import_queue(); import_queue.stop(); // and then clear all the sync data self.abort(); } pub fn on_block_announce(&self, io: &mut SyncIo, who: NodeIndex, announce: message::BlockAnnounce) { let header = announce.header; let hash = header.hash(); { let mut peers = self.context_data.peers.write(); if let Some(ref mut peer) = peers.get_mut(&who) { peer.known_blocks.insert(hash.clone()); } } self.on_demand.as_ref().map(|s| s.on_block_announce(who, *header.number())); self.sync.write().on_block_announce(&mut ProtocolContext::new(&self.context_data, io), who, hash, &header); } pub fn on_block_imported(&self, io: &mut SyncIo, hash: B::Hash, header: &B::Header) { self.sync.write().update_chain_info(&header); self.specialization.write().on_block_imported( &mut ProtocolContext::new(&self.context_data, io), hash.clone(), header ); // blocks are not announced by light clients if self.config.roles & Roles::LIGHT == Roles::LIGHT { return; } // send out block announcements let mut peers = self.context_data.peers.write(); for (who, ref mut peer) in peers.iter_mut() { if peer.known_blocks.insert(hash.clone()) { trace!(target: "sync", "Announcing block {:?} to {}", hash, who); self.send_message(io, *who, GenericMessage::BlockAnnounce(message::BlockAnnounce { header: header.clone() })); } } } pub fn on_block_finalized(&self, _io: &mut SyncIo, hash: B::Hash, header: &B::Header) { self.sync.write().block_finalized(&hash, *header.number()); } fn on_remote_call_request(&self, io: &mut SyncIo, who: NodeIndex, request: message::RemoteCallRequest) { trace!(target: "sync", "Remote call request {} from {} ({} at {})", request.id, who, request.method, request.block); let proof = match self.context_data.chain.execution_proof(&request.block, &request.method, &request.data) { Ok((_, proof)) => proof, Err(error) => { trace!(target: "sync", "Remote call request {} from {} ({} at {}) failed with: {}", request.id, who, request.method, request.block, error); Default::default() }, }; self.send_message(io, who, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, proof, })); } fn on_remote_call_response(&self, io: &mut SyncIo, who: NodeIndex, response: message::RemoteCallResponse) { trace!(target: "sync", "Remote call response {} from {}", response.id, who); self.on_demand.as_ref().map(|s| s.on_remote_call_response(io, who, response)); } fn on_remote_read_request(&self, io: &mut SyncIo, who: NodeIndex, request: message::RemoteReadRequest) { trace!(target: "sync", "Remote read request {} from {} ({} at {})", request.id, who, request.key.to_hex::(), request.block); let proof = match self.context_data.chain.read_proof(&request.block, &request.key) { Ok(proof) => proof, Err(error) => { trace!(target: "sync", "Remote read request {} from {} ({} at {}) failed with: {}", request.id, who, request.key.to_hex::(), request.block, error); Default::default() }, }; self.send_message(io, who, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, proof, })); } fn on_remote_read_response(&self, io: &mut SyncIo, who: NodeIndex, response: message::RemoteReadResponse) { trace!(target: "sync", "Remote read response {} from {}", response.id, who); self.on_demand.as_ref().map(|s| s.on_remote_read_response(io, who, response)); } fn on_remote_header_request(&self, io: &mut SyncIo, who: NodeIndex, request: message::RemoteHeaderRequest>) { trace!(target: "sync", "Remote header proof request {} from {} ({})", request.id, who, request.block); let (header, proof) = match self.context_data.chain.header_proof(request.block) { Ok((header, proof)) => (Some(header), proof), Err(error) => { trace!(target: "sync", "Remote header proof request {} from {} ({}) failed with: {}", request.id, who, request.block, error); (Default::default(), Default::default()) }, }; self.send_message(io, who, GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, proof, })); } fn on_remote_header_response(&self, io: &mut SyncIo, who: NodeIndex, response: message::RemoteHeaderResponse) { trace!(target: "sync", "Remote header proof response {} from {}", response.id, who); self.on_demand.as_ref().map(|s| s.on_remote_header_response(io, who, response)); } fn on_remote_changes_request(&self, io: &mut SyncIo, who: NodeIndex, request: message::RemoteChangesRequest) { trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{})", request.id, who, request.key.to_hex::(), request.first, request.last); let key = StorageKey(request.key); let proof = match self.context_data.chain.key_changes_proof(request.first, request.last, request.min, request.max, &key) { Ok(proof) => proof, Err(error) => { trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", request.id, who, key.0.to_hex::(), request.first, request.last, error); ChangesProof:: { max_block: Zero::zero(), proof: vec![], roots: BTreeMap::new(), roots_proof: vec![], } }, }; self.send_message(io, who, GenericMessage::RemoteChangesResponse(message::RemoteChangesResponse { id: request.id, max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), roots_proof: proof.roots_proof, })); } fn on_remote_changes_response(&self, io: &mut SyncIo, who: NodeIndex, response: message::RemoteChangesResponse, B::Hash>) { trace!(target: "sync", "Remote changes proof response {} from {} (max={})", response.id, who, response.max); self.on_demand.as_ref().map(|s| s.on_remote_changes_response(io, who, response)); } /// Execute a closure with access to a network context and specialization. pub fn with_spec(&self, io: &mut SyncIo, f: F) -> U where F: FnOnce(&mut S, &mut Context) -> U { f(&mut* self.specialization.write(), &mut ProtocolContext::new(&self.context_data, io)) } } fn send_message(peers: &RwLock>>, io: &mut SyncIo, who: NodeIndex, mut message: Message) { match message { GenericMessage::BlockRequest(ref mut r) => { let mut peers = peers.write(); if let Some(ref mut peer) = peers.get_mut(&who) { r.id = peer.next_request_id; peer.next_request_id = peer.next_request_id + 1; if r.fields == message::BlockAttributes::JUSTIFICATION { peer.justification_request = Some(r.clone()); peer.justification_request_timestamp = Some(time::Instant::now()); } else { peer.block_request = Some(r.clone()); peer.block_request_timestamp = Some(time::Instant::now()); } } }, _ => (), } io.send(who, message.encode()); } /// Construct a simple protocol that is composed of several sub protocols. /// Each "sub protocol" needs to implement `Specialization` and needs to provide a `new()` function. /// For more fine grained implementations, this macro is not usable. /// /// # Example /// /// ```nocompile /// construct_simple_protocol! { /// pub struct MyProtocol where Block = MyBlock { /// consensus_gossip: ConsensusGossip, /// other_protocol: MyCoolStuff, /// } /// } /// ``` /// /// You can also provide an optional parameter after `where Block = MyBlock`, so it looks like /// `where Block = MyBlock, Status = consensus_gossip`. This will instruct the implementation to /// use the `status()` function from the `ConsensusGossip` protocol. By default, `status()` returns /// an empty vector. #[macro_export] macro_rules! construct_simple_protocol { ( $( #[ $attr:meta ] )* pub struct $protocol:ident where Block = $block:ident $( , Status = $status_protocol_name:ident )* { $( $sub_protocol_name:ident : $sub_protocol:ident $( <$protocol_block:ty> )*, )* } ) => { $( #[$attr] )* pub struct $protocol { $( $sub_protocol_name: $sub_protocol $( <$protocol_block> )*, )* } impl $protocol { /// Instantiate a node protocol handler. pub fn new() -> Self { Self { $( $sub_protocol_name: $sub_protocol::new(), )* } } } impl $crate::specialization::NetworkSpecialization<$block> for $protocol { fn status(&self) -> Vec { $( let status = self.$status_protocol_name.status(); if !status.is_empty() { return status; } )* Vec::new() } fn on_connect( &mut self, _ctx: &mut $crate::Context<$block>, _who: $crate::NodeIndex, _status: $crate::StatusMessage<$block> ) { $( self.$sub_protocol_name.on_connect(_ctx, _who, _status); )* } fn on_disconnect(&mut self, _ctx: &mut $crate::Context<$block>, _who: $crate::NodeIndex) { $( self.$sub_protocol_name.on_disconnect(_ctx, _who); )* } fn on_message( &mut self, _ctx: &mut $crate::Context<$block>, _who: $crate::NodeIndex, _message: &mut Option<$crate::message::Message<$block>> ) { $( self.$sub_protocol_name.on_message(_ctx, _who, _message); )* } fn on_abort(&mut self) { $( self.$sub_protocol_name.on_abort(); )* } fn maintain_peers(&mut self, _ctx: &mut $crate::Context<$block>) { $( self.$sub_protocol_name.maintain_peers(_ctx); )* } fn on_block_imported( &mut self, _ctx: &mut $crate::Context<$block>, _hash: <$block as $crate::BlockT>::Hash, _header: &<$block as $crate::BlockT>::Header ) { $( self.$sub_protocol_name.on_block_imported(_ctx, _hash, _header); )* } } } }