Light GRANDPA import handler (#1669)

* GrandpaLightBlockImport

* extract authorities in AuraVerifier

* post-merge fix

* restore authorities cache

* license

* new finality proof draft

* generalized PendingJustifications

* finality proof messages

* fixed compilation

* pass verifier to import_finality_proof

* do not fetch remote proof from light import directly

* FinalityProofProvider

* fixed authorities cache test

* restored finality proof tests

* finality_proof docs

* use DB backend in test client

* justification_is_fetched_by_light_client_when_consensus_data_changes

* restore justification_is_fetched_by_light_client_when_consensus_data_changes

* some more tests

* added authorities-related TODO

* removed unneeded clear_finality_proof_requests field

* truncated some long lines

* more granular light import tests

* only provide finality proof if it is generated by the requested set

* post-merge fix

* finality_proof_is_none_if_first_justification_is_generated_by_unknown_set

* make light+grandpa test rely on finality proofs (instead of simple justifications)

* empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different

* missing trait method impl

* fixed proof-of-finality docs

* one more doc fix

* fix docs

* initialize authorities cache (post-merge fix)

* fixed cache initialization (post-merge fix)

* post-fix merge: fix light + GRANDPA tests (bad way)

* proper fix of empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different

* fixed easy grumbles

* import finality proofs in BlockImportWorker thread

* allow import of finality proofs for non-requested blocks

* limit number of fragments in finality proof

* GRANDPA post-merge fix

* BABE: pos-merge fix
This commit is contained in:
Svyatoslav Nikolsky
2019-05-13 12:36:52 +03:00
committed by Gavin Wood
parent 258f0835e4
commit 22586113ea
36 changed files with 3320 additions and 803 deletions
+6
View File
@@ -68,6 +68,12 @@ pub trait Client<Block: BlockT>: Send + Sync {
fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result<bool, Error>;
}
/// Finality proof provider.
pub trait FinalityProofProvider<Block: BlockT>: Send + Sync {
/// Prove finality of the block.
fn prove_finality(&self, for_block: Block::Hash, request: &[u8]) -> Result<Option<Vec<u8>>, Error>;
}
impl<B, E, Block, RA> Client<Block> for SubstrateClient<B, E, Block, RA> where
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
+3 -1
View File
@@ -19,7 +19,7 @@
pub use network_libp2p::{NonReservedPeerMode, NetworkConfiguration, NodeKeyConfig, Secret};
use bitflags::bitflags;
use crate::chain::Client;
use crate::chain::{Client, FinalityProofProvider};
use parity_codec;
use crate::on_demand::OnDemandService;
use runtime_primitives::traits::{Block as BlockT};
@@ -34,6 +34,8 @@ pub struct Params<B: BlockT, S, H: ExHashT> {
pub network_config: NetworkConfiguration,
/// Substrate relay chain access point.
pub chain: Arc<Client<B>>,
/// Finality proof provider.
pub finality_proof_provider: Option<Arc<FinalityProofProvider<B>>>,
/// On-demand service reference.
pub on_demand: Option<Arc<OnDemandService<B>>>,
/// Transaction pool.
@@ -0,0 +1,470 @@
// Copyright 2017-2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
use std::collections::{HashMap, HashSet, VecDeque};
use std::time::{Duration, Instant};
use log::{trace, warn};
use client::error::Error as ClientError;
use consensus::import_queue::SharedFinalityProofRequestBuilder;
use fork_tree::ForkTree;
use network_libp2p::PeerId;
use runtime_primitives::Justification;
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use crate::message;
use crate::protocol::Context;
use crate::sync::{PeerSync, PeerSyncState};
// Time to wait before trying to get the same extra data from the same peer.
const EXTRA_RETRY_WAIT: Duration = Duration::from_secs(10);
/// Pending extra data request for the given block (hash and number).
type ExtraRequest<B> = (<B as BlockT>::Hash, NumberFor<B>);
/// Extra requests processor.
pub(crate) trait ExtraRequestsEssence<B: BlockT> {
type Response;
/// Name of request type to display in logs.
fn type_name(&self) -> &'static str;
/// Send network message corresponding to the request.
fn send_network_request(&self, protocol: &mut Context<B>, peer: PeerId, request: ExtraRequest<B>);
/// Create peer state for peer that is downloading extra data.
fn peer_downloading_state(&self, block: B::Hash) -> PeerSyncState<B>;
}
/// Manages all extra data requests required for sync.
pub(crate) struct ExtraRequestsAggregator<B: BlockT> {
/// Manages justifications requests.
justifications: ExtraRequests<B, JustificationsRequestsEssence>,
/// Manages finality proof requests.
finality_proofs: ExtraRequests<B, FinalityProofRequestsEssence<B>>,
}
impl<B: BlockT> ExtraRequestsAggregator<B> {
pub(crate) fn new() -> Self {
ExtraRequestsAggregator {
justifications: ExtraRequests::new(JustificationsRequestsEssence),
finality_proofs: ExtraRequests::new(FinalityProofRequestsEssence(None)),
}
}
pub(crate) fn justifications(&mut self) -> &mut ExtraRequests<B, JustificationsRequestsEssence> {
&mut self.justifications
}
pub(crate) fn finality_proofs(&mut self) -> &mut ExtraRequests<B, FinalityProofRequestsEssence<B>> {
&mut self.finality_proofs
}
/// Dispatches all possible pending requests to the given peers.
pub(crate) fn dispatch(&mut self, peers: &mut HashMap<PeerId, PeerSync<B>>, protocol: &mut Context<B>) {
self.justifications.dispatch(peers, protocol);
self.finality_proofs.dispatch(peers, protocol);
}
/// Removes any pending extra requests for blocks lower than the
/// given best finalized.
pub(crate) fn on_block_finalized<F>(
&mut self,
best_finalized_hash: &B::Hash,
best_finalized_number: NumberFor<B>,
is_descendent_of: &F,
) -> Result<(), fork_tree::Error<ClientError>>
where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>
{
self.justifications.on_block_finalized(best_finalized_hash, best_finalized_number, is_descendent_of)?;
self.finality_proofs.on_block_finalized(best_finalized_hash, best_finalized_number, is_descendent_of)?;
Ok(())
}
/// Retry any pending request if a peer disconnected.
pub(crate) fn peer_disconnected(&mut self, who: PeerId) {
self.justifications.peer_disconnected(&who);
self.finality_proofs.peer_disconnected(&who);
}
}
/// Manages pending block extra data (e.g. justification) requests.
/// Multiple extras may be requested for competing forks, or for the same branch
/// at different (increasing) heights. This structure will guarantee that extras
/// are fetched in-order, and that obsolete changes are pruned (when finalizing a
/// competing fork).
pub(crate) struct ExtraRequests<B: BlockT, Essence> {
tree: ForkTree<B::Hash, NumberFor<B>, ()>,
pending_requests: VecDeque<ExtraRequest<B>>,
peer_requests: HashMap<PeerId, ExtraRequest<B>>,
previous_requests: HashMap<ExtraRequest<B>, Vec<(PeerId, Instant)>>,
importing_requests: HashSet<ExtraRequest<B>>,
essence: Essence,
}
impl<B: BlockT, Essence: ExtraRequestsEssence<B>> ExtraRequests<B, Essence> {
fn new(essence: Essence) -> Self {
ExtraRequests {
tree: ForkTree::new(),
pending_requests: VecDeque::new(),
peer_requests: HashMap::new(),
previous_requests: HashMap::new(),
importing_requests: HashSet::new(),
essence,
}
}
/// Get mutable reference to the requests essence.
pub(crate) fn essence(&mut self) -> &mut Essence {
&mut self.essence
}
/// Dispatches all possible pending requests to the given peers. Peers are
/// filtered according to the current known best block (i.e. we won't send a
/// extra request for block #10 to a peer at block #2), and we also
/// throttle requests to the same peer if a previous justification request
/// yielded no results.
pub(crate) fn dispatch(&mut self, peers: &mut HashMap<PeerId, PeerSync<B>>, protocol: &mut Context<B>) {
if self.pending_requests.is_empty() {
return;
}
let initial_pending_requests = self.pending_requests.len();
// clean up previous failed requests so we can retry again
for (_, requests) in self.previous_requests.iter_mut() {
requests.retain(|(_, instant)| instant.elapsed() < EXTRA_RETRY_WAIT);
}
let mut available_peers = peers.iter().filter_map(|(peer, sync)| {
// don't request to any peers that already have pending requests or are unavailable
if sync.state != PeerSyncState::Available || self.peer_requests.contains_key(&peer) {
None
} else {
Some((peer.clone(), sync.best_number))
}
}).collect::<VecDeque<_>>();
let mut last_peer = available_peers.back().map(|p| p.0.clone());
let mut unhandled_requests = VecDeque::new();
loop {
let (peer, peer_best_number) = match available_peers.pop_front() {
Some(p) => p,
_ => break,
};
// only ask peers that have synced past the block number that we're
// asking the extra for and to whom we haven't already made
// the same request recently
let peer_eligible = {
let request = match self.pending_requests.front() {
Some(r) => r.clone(),
_ => break,
};
peer_best_number >= request.1 &&
!self.previous_requests
.get(&request)
.map(|requests| requests.iter().any(|i| i.0 == peer))
.unwrap_or(false)
};
if !peer_eligible {
available_peers.push_back((peer.clone(), peer_best_number));
// we tried all peers and none can answer this request
if Some(peer) == last_peer {
last_peer = available_peers.back().map(|p| p.0.clone());
let request = self.pending_requests.pop_front()
.expect("verified to be Some in the beginning of the loop; qed");
unhandled_requests.push_back(request);
}
continue;
}
last_peer = available_peers.back().map(|p| p.0.clone());
let request = self.pending_requests.pop_front()
.expect("verified to be Some in the beginning of the loop; qed");
self.peer_requests.insert(peer.clone(), request);
peers.get_mut(&peer)
.expect("peer was is taken from available_peers; available_peers is a subset of peers; qed")
.state = self.essence.peer_downloading_state(request.0.clone());
trace!(target: "sync", "Requesting {} for block #{} from {}", self.essence.type_name(), request.0, peer);
self.essence.send_network_request(protocol, peer, request);
}
self.pending_requests.append(&mut unhandled_requests);
trace!(target: "sync", "Dispatched {} {} requests ({} pending)",
initial_pending_requests - self.pending_requests.len(),
self.essence.type_name(),
self.pending_requests.len(),
);
}
/// Queue a extra data request (without dispatching it).
pub(crate) fn queue_request<F>(&mut self, request: ExtraRequest<B>, is_descendent_of: F)
where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>
{
match self.tree.import(request.0.clone(), request.1.clone(), (), &is_descendent_of) {
Ok(true) => {
// this is a new root so we add it to the current `pending_requests`
self.pending_requests.push_back((request.0, request.1));
},
Err(err) => {
warn!(target: "sync", "Failed to insert requested {} {:?} {:?} into tree: {:?}",
self.essence.type_name(),
request.0,
request.1,
err,
);
return;
},
_ => {},
}
}
/// Retry any pending request if a peer disconnected.
fn peer_disconnected(&mut self, who: &PeerId) {
if let Some(request) = self.peer_requests.remove(who) {
self.pending_requests.push_front(request);
}
}
/// Process the import result of an extra.
/// Queues a retry in case the import failed.
/// Returns true if import has been queued.
pub(crate) fn on_import_result(
&mut self,
request: (B::Hash, NumberFor<B>),
finalization_result: Result<(B::Hash, NumberFor<B>), ()>,
) -> bool {
self.try_finalize_root(request, finalization_result, true)
}
/// Processes the response for the request previously sent to the given
/// peer. Queues a retry in case the given justification
/// was `None`.
pub(crate) fn on_response(
&mut self,
who: PeerId,
response: Option<Essence::Response>,
) -> Option<(PeerId, B::Hash, NumberFor<B>, Essence::Response)> {
// we assume that the request maps to the given response, this is
// currently enforced by the outer network protocol before passing on
// messages to chain sync.
if let Some(request) = self.peer_requests.remove(&who) {
if let Some(response) = response {
self.importing_requests.insert(request);
return Some((who, request.0, request.1, response));
}
self.previous_requests
.entry(request)
.or_insert(Vec::new())
.push((who, Instant::now()));
self.pending_requests.push_front(request);
}
None
}
/// Removes any pending extra requests for blocks lower than the
/// given best finalized.
fn on_block_finalized<F>(
&mut self,
best_finalized_hash: &B::Hash,
best_finalized_number: NumberFor<B>,
is_descendent_of: F,
) -> Result<(), fork_tree::Error<ClientError>>
where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>
{
let is_scheduled_root = self.try_finalize_root(
(*best_finalized_hash, best_finalized_number),
Ok((*best_finalized_hash, best_finalized_number)),
false,
);
if is_scheduled_root {
return Ok(());
}
use std::collections::HashSet;
self.tree.finalize(best_finalized_hash, best_finalized_number, &is_descendent_of)?;
let roots = self.tree.roots().collect::<HashSet<_>>();
self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &())));
self.peer_requests.retain(|_, (h, n)| roots.contains(&(h, n, &())));
self.previous_requests.retain(|(h, n), _| roots.contains(&(h, n, &())));
Ok(())
}
/// Clear all data.
pub(crate) fn clear(&mut self) {
self.tree = ForkTree::new();
self.pending_requests.clear();
self.peer_requests.clear();
self.previous_requests.clear();
}
/// Try to finalize pending root.
/// Returns true if import of this request has been scheduled.
fn try_finalize_root(
&mut self,
request: (B::Hash, NumberFor<B>),
finalization_result: Result<(B::Hash, NumberFor<B>), ()>,
reschedule_on_failure: bool,
) -> bool {
if !self.importing_requests.remove(&request) {
return false;
}
let (finalized_hash, finalized_number) = match finalization_result {
Ok((finalized_hash, finalized_number)) => (finalized_hash, finalized_number),
Err(_) => {
if reschedule_on_failure {
self.pending_requests.push_front(request);
}
return true;
},
};
if self.tree.finalize_root(&finalized_hash).is_none() {
warn!(target: "sync", "Imported {} for {:?} {:?} which isn't a root in the tree: {:?}",
self.essence.type_name(),
finalized_hash,
finalized_number,
self.tree.roots().collect::<Vec<_>>(),
);
return true;
};
self.previous_requests.clear();
self.peer_requests.clear();
self.pending_requests =
self.tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect();
true
}
}
pub(crate) struct JustificationsRequestsEssence;
impl<B: BlockT> ExtraRequestsEssence<B> for JustificationsRequestsEssence {
type Response = Justification;
fn type_name(&self) -> &'static str {
"justification"
}
fn send_network_request(&self, protocol: &mut Context<B>, peer: PeerId, request: ExtraRequest<B>) {
protocol.send_block_request(peer, message::generic::BlockRequest {
id: 0,
fields: message::BlockAttributes::JUSTIFICATION,
from: message::FromBlock::Hash(request.0),
to: None,
direction: message::Direction::Ascending,
max: Some(1),
})
}
fn peer_downloading_state(&self, block: B::Hash) -> PeerSyncState<B> {
PeerSyncState::DownloadingJustification(block)
}
}
pub(crate) struct FinalityProofRequestsEssence<B: BlockT>(pub Option<SharedFinalityProofRequestBuilder<B>>);
impl<B: BlockT> ExtraRequestsEssence<B> for FinalityProofRequestsEssence<B> {
type Response = Vec<u8>;
fn type_name(&self) -> &'static str {
"finality proof"
}
fn send_network_request(&self, protocol: &mut Context<B>, peer: PeerId, request: ExtraRequest<B>) {
protocol.send_finality_proof_request(peer, message::generic::FinalityProofRequest {
id: 0,
block: request.0,
request: self.0.as_ref()
.map(|builder| builder.build_request_data(&request.0))
.unwrap_or_default(),
})
}
fn peer_downloading_state(&self, block: B::Hash) -> PeerSyncState<B> {
PeerSyncState::DownloadingFinalityProof(block)
}
}
#[cfg(test)]
mod tests {
use client::error::Error as ClientError;
use test_client::runtime::{Block, Hash};
use super::ExtraRequestsAggregator;
#[test]
fn request_is_rescheduled_when_earlier_block_is_finalized() {
let _ = ::env_logger::try_init();
let mut extra_requests = ExtraRequestsAggregator::<Block>::new();
let hash4 = [4; 32].into();
let hash5 = [5; 32].into();
let hash6 = [6; 32].into();
let hash7 = [7; 32].into();
fn is_descendent_of(base: &Hash, target: &Hash) -> Result<bool, ClientError> {
Ok(target[0] >= base[0])
}
// make #4 last finalized block
extra_requests.finality_proofs().tree.import(hash4, 4, (), &is_descendent_of).unwrap();
extra_requests.finality_proofs().tree.finalize_root(&hash4);
// schedule request for #6
extra_requests.finality_proofs().queue_request((hash6, 6), is_descendent_of);
// receive finality proof for #5
extra_requests.finality_proofs().importing_requests.insert((hash6, 6));
extra_requests.finality_proofs().on_block_finalized(&hash5, 5, is_descendent_of).unwrap();
extra_requests.finality_proofs().on_import_result((hash6, 6), Ok((hash5, 5)));
// ensure that request for #6 is still pending
assert_eq!(
extra_requests.finality_proofs().pending_requests.iter().collect::<Vec<_>>(),
vec![&(hash6, 6)],
);
// receive finality proof for #7
extra_requests.finality_proofs().importing_requests.insert((hash6, 6));
extra_requests.finality_proofs().on_block_finalized(&hash6, 6, is_descendent_of).unwrap();
extra_requests.finality_proofs().on_block_finalized(&hash7, 7, is_descendent_of).unwrap();
extra_requests.finality_proofs().on_import_result((hash6, 6), Ok((hash7, 7)));
// ensure that there's no request for #6
assert_eq!(
extra_requests.finality_proofs().pending_requests.iter().collect::<Vec<_>>(),
Vec::<&(Hash, u64)>::new(),
);
}
}
+2 -1
View File
@@ -30,6 +30,7 @@ mod protocol;
mod chain;
mod blocks;
mod on_demand;
mod extra_requests;
mod util;
pub mod config;
pub mod consensus_gossip;
@@ -40,7 +41,7 @@ pub mod specialization;
#[cfg(any(test, feature = "test-helpers"))]
pub mod test;
pub use chain::Client as ClientHandle;
pub use chain::{Client as ClientHandle, FinalityProofProvider};
pub use service::{
Service, FetchFuture, TransactionPool, ManageNetwork, NetworkMsg,
SyncProvider, ExHashT, ReportHandle,
+27
View File
@@ -23,6 +23,7 @@ pub use self::generic::{
BlockAnnounce, RemoteCallRequest, RemoteReadRequest,
RemoteHeaderRequest, RemoteHeaderResponse,
RemoteChangesRequest, RemoteChangesResponse,
FinalityProofRequest, FinalityProofResponse,
FromBlock, RemoteReadChildRequest,
};
@@ -200,6 +201,10 @@ pub mod generic {
RemoteChangesResponse(RemoteChangesResponse<Number, Hash>),
/// Remote child storage read request.
RemoteReadChildRequest(RemoteReadChildRequest<Hash>),
/// Finality proof request.
FinalityProofRequest(FinalityProofRequest<Hash>),
/// Finality proof reponse.
FinalityProofResponse(FinalityProofResponse<Hash>),
/// Chain-specific message
#[codec(index = "255")]
ChainSpecific(Vec<u8>),
@@ -359,4 +364,26 @@ pub mod generic {
/// Missing changes tries roots proof.
pub roots_proof: Vec<Vec<u8>>,
}
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
/// Finality proof request.
pub struct FinalityProofRequest<H> {
/// Unique request id.
pub id: RequestId,
/// Hash of the block to request proof for.
pub block: H,
/// Additional data blob (that both requester and provider understood) required for proving finality.
pub request: Vec<u8>,
}
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
/// Finality proof response.
pub struct FinalityProofResponse<H> {
/// Id of a request this response was made for.
pub id: RequestId,
/// Hash of the block (the same as in the FinalityProofRequest).
pub block: H,
/// Finality proof (if available).
pub proof: Option<Vec<u8>>,
}
}
+5
View File
@@ -155,6 +155,11 @@ impl<B: BlockT> OnDemand<B> where
}
}
/// Get checker reference.
pub fn checker(&self) -> &Arc<FetchChecker<B>> {
&self.checker
}
/// Sets weak reference to network service.
pub fn set_network_sender(&self, network_sender: NetworkChan<B>) {
self.network_sender.lock().replace(network_sender);
+84 -2
View File
@@ -20,7 +20,11 @@ use primitives::storage::StorageKey;
use consensus::{import_queue::IncomingBlock, import_queue::Origin, BlockOrigin};
use runtime_primitives::{generic::BlockId, ConsensusEngineId, Justification};
use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, NumberFor, Zero};
use crate::message::{self, BlockRequest as BlockRequestMessage, Message};
use consensus::import_queue::SharedFinalityProofRequestBuilder;
use crate::message::{
self, BlockRequest as BlockRequestMessage,
FinalityProofRequest as FinalityProofRequestMessage, Message,
};
use crate::message::generic::{Message as GenericMessage, ConsensusMessage};
use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient};
use crate::on_demand::OnDemandService;
@@ -34,7 +38,7 @@ use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use std::{cmp, num::NonZeroUsize, time};
use log::{trace, debug, warn, error};
use crate::chain::Client;
use crate::chain::{Client, FinalityProofProvider};
use client::light::fetcher::ChangesProof;
use crate::{error, util::LruHashSet};
@@ -163,6 +167,9 @@ pub trait Context<B: BlockT> {
/// Request a block from a peer.
fn send_block_request(&mut self, who: PeerId, request: BlockRequestMessage<B>);
/// Request a finality proof from a peer.
fn send_finality_proof_request(&mut self, who: PeerId, request: FinalityProofRequestMessage<B::Hash>);
/// Send a consensus message to a peer.
fn send_consensus(&mut self, who: PeerId, consensus: ConsensusMessage);
@@ -205,6 +212,12 @@ impl<'a, B: BlockT + 'a, H: ExHashT + 'a> Context<B> for ProtocolContext<'a, B,
)
}
fn send_finality_proof_request(&mut self, who: PeerId, request: FinalityProofRequestMessage<B::Hash>) {
send_message(&mut self.context_data.peers, &self.network_chan, who,
GenericMessage::FinalityProofRequest(request)
)
}
fn send_consensus(&mut self, who: PeerId, consensus: ConsensusMessage) {
send_message(&mut self.context_data.peers, &self.network_chan, who,
GenericMessage::Consensus(consensus)
@@ -223,6 +236,7 @@ struct ContextData<B: BlockT, H: ExHashT> {
// All connected peers
peers: HashMap<PeerId, Peer<B, H>>,
pub chain: Arc<Client<B>>,
pub finality_proof_provider: Option<Arc<FinalityProofProvider<B>>>,
}
/// A task, consisting of a user-provided closure, to be executed on the Protocol thread.
@@ -263,6 +277,12 @@ pub enum ProtocolMsg<B: BlockT, S: NetworkSpecialization<B>> {
RequestJustification(B::Hash, NumberFor<B>),
/// Inform protocol whether a justification was successfully imported.
JustificationImportResult(B::Hash, NumberFor<B>, bool),
/// Set finality proof request builder.
SetFinalityProofRequestBuilder(SharedFinalityProofRequestBuilder<B>),
/// Tell protocol to request finality proof for a block.
RequestFinalityProof(B::Hash, NumberFor<B>),
/// Inform protocol whether a finality proof was successfully imported.
FinalityProofImportResult((B::Hash, NumberFor<B>), Result<(B::Hash, NumberFor<B>), ()>),
/// Propagate a block to peers.
AnnounceBlock(B::Hash),
/// A block has been imported (sent by the client).
@@ -290,6 +310,7 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
network_chan: NetworkChan<B>,
config: ProtocolConfig,
chain: Arc<Client<B>>,
finality_proof_provider: Option<Arc<FinalityProofProvider<B>>>,
on_demand: Option<Arc<OnDemandService<B>>>,
transaction_pool: Arc<TransactionPool<H, B>>,
specialization: S,
@@ -306,6 +327,7 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
context_data: ContextData {
peers: HashMap::new(),
chain,
finality_proof_provider,
},
on_demand,
genesis_hash: info.chain.genesis_hash,
@@ -408,6 +430,16 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
self.sync.request_justification(&hash, number, &mut context);
},
ProtocolMsg::JustificationImportResult(hash, number, success) => self.sync.justification_import_result(hash, number, success),
ProtocolMsg::SetFinalityProofRequestBuilder(builder) => self.sync.set_finality_proof_request_builder(builder),
ProtocolMsg::RequestFinalityProof(hash, number) => {
let mut context =
ProtocolContext::new(&mut self.context_data, &self.network_chan);
self.sync.request_finality_proof(&hash, number, &mut context);
},
ProtocolMsg::FinalityProofImportResult(
requested_block,
finalziation_result,
) => self.sync.finality_proof_import_result(requested_block, finalziation_result),
ProtocolMsg::PropagateExtrinsics => self.propagate_extrinsics(),
#[cfg(any(test, feature = "test-helpers"))]
ProtocolMsg::Tick => self.tick(),
@@ -476,6 +508,8 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
GenericMessage::RemoteHeaderResponse(response) => self.on_remote_header_response(who, response),
GenericMessage::RemoteChangesRequest(request) => self.on_remote_changes_request(who, request),
GenericMessage::RemoteChangesResponse(response) => self.on_remote_changes_response(who, response),
GenericMessage::FinalityProofRequest(request) => self.on_finality_proof_request(who, request),
GenericMessage::FinalityProofResponse(response) => return self.on_finality_proof_response(who, response),
GenericMessage::Consensus(msg) => {
if self.context_data.peers.get(&who).map_or(false, |peer| peer.info.protocol_version > 2) {
self.consensus_gossip.on_incoming(
@@ -1099,6 +1133,53 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
.as_ref()
.map(|s| s.on_remote_changes_response(who, response));
}
fn on_finality_proof_request(
&mut self,
who: PeerId,
request: message::FinalityProofRequest<B::Hash>,
) {
trace!(target: "sync", "Finality proof request from {} for {}", who, request.block);
let finality_proof = self.context_data.finality_proof_provider.as_ref()
.ok_or_else(|| String::from("Finality provider is not configured"))
.and_then(|provider| provider.prove_finality(request.block, &request.request)
.map_err(|e| e.to_string()));
let finality_proof = match finality_proof {
Ok(finality_proof) => finality_proof,
Err(error) => {
trace!(target: "sync", "Finality proof request from {} for {} failed with: {}",
who, request.block, error);
None
},
};
self.send_message(
who,
GenericMessage::FinalityProofResponse(message::FinalityProofResponse {
id: 0,
block: request.block,
proof: finality_proof,
}),
);
}
fn on_finality_proof_response(
&mut self,
who: PeerId,
response: message::FinalityProofResponse<B::Hash>,
) -> CustomMessageOutcome<B> {
trace!(target: "sync", "Finality proof response from {} for {}", who, response.block);
let outcome = self.sync.on_block_finality_proof_data(
&mut ProtocolContext::new(&mut self.context_data, &self.network_chan),
who,
response,
);
if let Some((origin, hash, nb, proof)) = outcome {
CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof)
} else {
CustomMessageOutcome::None
}
}
}
/// Outcome of an incoming custom message.
@@ -1106,6 +1187,7 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
pub enum CustomMessageOutcome<B: BlockT> {
BlockImport(BlockOrigin, Vec<IncomingBlock<B>>),
JustificationImport(Origin, B::Hash, NumberFor<B>, Justification),
FinalityProofImport(Origin, B::Hash, NumberFor<B>, Vec<u8>),
None,
}
+33 -1
View File
@@ -26,7 +26,7 @@ use network_libp2p::{ProtocolId, NetworkConfiguration};
use network_libp2p::{start_service, parse_str_addr, Service as NetworkService, ServiceEvent as NetworkServiceEvent};
use network_libp2p::{RegisteredProtocol, NetworkState};
use peerset::PeersetHandle;
use consensus::import_queue::{ImportQueue, Link};
use consensus::import_queue::{ImportQueue, Link, SharedFinalityProofRequestBuilder};
use runtime_primitives::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId};
use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient};
@@ -115,6 +115,31 @@ impl<B: BlockT, S: NetworkSpecialization<B>> Link<B> for NetworkLink<B, S> {
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::RequestJustification(hash.clone(), number));
}
fn request_finality_proof(&self, hash: &B::Hash, number: NumberFor<B>) {
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::RequestFinalityProof(
hash.clone(),
number,
));
}
fn finality_proof_imported(
&self,
who: PeerId,
request_block: (B::Hash, NumberFor<B>),
finalization_result: Result<(B::Hash, NumberFor<B>), ()>,
) {
let success = finalization_result.is_ok();
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::FinalityProofImportResult(
request_block,
finalization_result,
));
if !success {
info!("Invalid finality proof provided by {} for #{}", who, request_block.0);
let _ = self.network_sender.send(NetworkMsg::ReportPeer(who.clone(), i32::min_value()));
let _ = self.network_sender.send(NetworkMsg::DisconnectPeer(who.clone()));
}
}
fn report_peer(&self, who: PeerId, reputation_change: i32) {
self.network_sender.send(NetworkMsg::ReportPeer(who, reputation_change));
}
@@ -122,6 +147,10 @@ impl<B: BlockT, S: NetworkSpecialization<B>> Link<B> for NetworkLink<B, S> {
fn restart(&self) {
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::RestartSync);
}
fn set_finality_proof_request_builder(&self, request_builder: SharedFinalityProofRequestBuilder<B>) {
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::SetFinalityProofRequestBuilder(request_builder));
}
}
/// A cloneable handle for reporting cost/benefits of peers.
@@ -179,6 +208,7 @@ impl<B: BlockT + 'static, S: NetworkSpecialization<B>> Service<B, S> {
network_chan.clone(),
params.config,
params.chain,
params.finality_proof_provider,
params.on_demand,
params.transaction_pool,
params.specialization,
@@ -593,6 +623,8 @@ fn run_thread<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT>(
import_queue.import_blocks(origin, blocks),
CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) =>
import_queue.import_justification(origin, hash, nb, justification),
CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) =>
import_queue.import_finality_proof(origin, hash, nb, proof),
CustomMessageOutcome::None => {}
}
}
+89 -284
View File
@@ -32,18 +32,16 @@
use std::cmp::max;
use std::collections::{HashMap, VecDeque};
use std::time::{Duration, Instant};
use log::{debug, trace, info, warn};
use log::{debug, trace, warn, info};
use crate::protocol::Context;
use fork_tree::ForkTree;
use network_libp2p::PeerId;
use client::{BlockStatus, ClientInfo};
use consensus::{BlockOrigin, import_queue::IncomingBlock};
use consensus::{BlockOrigin, import_queue::{IncomingBlock, SharedFinalityProofRequestBuilder}};
use client::error::Error as ClientError;
use crate::blocks::BlockCollection;
use runtime_primitives::Justification;
use crate::extra_requests::ExtraRequestsAggregator;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero, CheckedSub};
use runtime_primitives::generic::BlockId;
use runtime_primitives::{Justification, generic::BlockId};
use crate::message;
use crate::config::Roles;
use std::collections::HashSet;
@@ -54,8 +52,6 @@ const MAX_BLOCKS_TO_REQUEST: usize = 128;
const MAX_IMPORTING_BLOCKS: usize = 2048;
// Number of blocks in the queue that prevents ancestry search.
const MAJOR_SYNC_BLOCKS: usize = 5;
// Time to wait before trying to get a justification from the same peer.
const JUSTIFICATION_RETRY_WAIT: Duration = Duration::from_secs(10);
// Number of recently announced blocks to track for each peer.
const ANNOUNCE_HISTORY_SIZE: usize = 64;
// Max number of blocks to download for unknown forks.
@@ -68,7 +64,7 @@ const ANCESTRY_BLOCK_ERROR_REPUTATION_CHANGE: i32 = -(1 << 9);
const GENESIS_MISMATCH_REPUTATION_CHANGE: i32 = i32::min_value() + 1;
#[derive(Debug)]
struct PeerSync<B: BlockT> {
pub(crate) struct PeerSync<B: BlockT> {
pub common_number: NumberFor<B>,
pub best_hash: B::Hash,
pub best_number: NumberFor<B>,
@@ -86,7 +82,7 @@ pub(crate) struct PeerInfo<B: BlockT> {
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum AncestorSearchState<B: BlockT> {
pub(crate) enum AncestorSearchState<B: BlockT> {
/// Use exponential backoff to find an ancestor, then switch to binary search.
/// We keep track of the exponent.
ExponentialBackoff(NumberFor<B>),
@@ -96,270 +92,13 @@ enum AncestorSearchState<B: BlockT> {
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum PeerSyncState<B: BlockT> {
pub(crate) enum PeerSyncState<B: BlockT> {
AncestorSearch(NumberFor<B>, AncestorSearchState<B>),
Available,
DownloadingNew(NumberFor<B>),
DownloadingStale(B::Hash),
DownloadingJustification(B::Hash),
}
/// Pending justification request for the given block (hash and number).
type PendingJustification<B> = (<B as BlockT>::Hash, NumberFor<B>);
/// Manages pending block justification requests. Multiple justifications may be
/// requested for competing forks, or for the same branch at different
/// (increasing) heights. This structure will guarantee that justifications are
/// fetched in-order, and that obsolete changes are pruned (when finalizing a
/// competing fork).
struct PendingJustifications<B: BlockT> {
justifications: ForkTree<B::Hash, NumberFor<B>, ()>,
pending_requests: VecDeque<PendingJustification<B>>,
peer_requests: HashMap<PeerId, PendingJustification<B>>,
previous_requests: HashMap<PendingJustification<B>, Vec<(PeerId, Instant)>>,
importing_requests: HashSet<PendingJustification<B>>,
}
impl<B: BlockT> PendingJustifications<B> {
fn new() -> PendingJustifications<B> {
PendingJustifications {
justifications: ForkTree::new(),
pending_requests: VecDeque::new(),
peer_requests: HashMap::new(),
previous_requests: HashMap::new(),
importing_requests: HashSet::new(),
}
}
/// Dispatches all possible pending requests to the given peers. Peers are
/// filtered according to the current known best block (i.e. we won't send a
/// justification request for block #10 to a peer at block #2), and we also
/// throttle requests to the same peer if a previous justification request
/// yielded no results.
fn dispatch(&mut self, peers: &mut HashMap<PeerId, PeerSync<B>>, protocol: &mut Context<B>) {
if self.pending_requests.is_empty() {
return;
}
let initial_pending_requests = self.pending_requests.len();
// clean up previous failed requests so we can retry again
for (_, requests) in self.previous_requests.iter_mut() {
requests.retain(|(_, instant)| instant.elapsed() < JUSTIFICATION_RETRY_WAIT);
}
let mut available_peers = peers.iter().filter_map(|(peer, sync)| {
// don't request to any peers that already have pending requests or are unavailable
if sync.state != PeerSyncState::Available || self.peer_requests.contains_key(&peer) {
None
} else {
Some((peer.clone(), sync.best_number))
}
}).collect::<VecDeque<_>>();
let mut last_peer = available_peers.back().map(|p| p.0.clone());
let mut unhandled_requests = VecDeque::new();
loop {
let (peer, peer_best_number) = match available_peers.pop_front() {
Some(p) => p,
_ => break,
};
// only ask peers that have synced past the block number that we're
// asking the justification for and to whom we haven't already made
// the same request recently
let peer_eligible = {
let request = match self.pending_requests.front() {
Some(r) => r.clone(),
_ => break,
};
peer_best_number >= request.1 &&
!self.previous_requests
.get(&request)
.map(|requests| requests.iter().any(|i| i.0 == peer))
.unwrap_or(false)
};
if !peer_eligible {
available_peers.push_back((peer.clone(), peer_best_number));
// we tried all peers and none can answer this request
if Some(peer) == last_peer {
last_peer = available_peers.back().map(|p| p.0.clone());
let request = self.pending_requests.pop_front()
.expect("verified to be Some in the beginning of the loop; qed");
unhandled_requests.push_back(request);
}
continue;
}
last_peer = available_peers.back().map(|p| p.0.clone());
let request = self.pending_requests.pop_front()
.expect("verified to be Some in the beginning of the loop; qed");
self.peer_requests.insert(peer.clone(), request);
peers.get_mut(&peer)
.expect("peer was is taken from available_peers; available_peers is a subset of peers; qed")
.state = PeerSyncState::DownloadingJustification(request.0);
trace!(target: "sync", "Requesting justification for block #{} from {}", request.0, peer);
let request = message::generic::BlockRequest {
id: 0,
fields: message::BlockAttributes::JUSTIFICATION,
from: message::FromBlock::Hash(request.0),
to: None,
direction: message::Direction::Ascending,
max: Some(1),
};
protocol.send_block_request(peer, request);
}
self.pending_requests.append(&mut unhandled_requests);
trace!(target: "sync", "Dispatched {} justification requests ({} pending)",
initial_pending_requests - self.pending_requests.len(),
self.pending_requests.len(),
);
}
/// Queue a justification request (without dispatching it).
fn queue_request<F>(
&mut self,
justification: &PendingJustification<B>,
is_descendent_of: F,
) where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError> {
match self.justifications.import(justification.0.clone(), justification.1.clone(), (), &is_descendent_of) {
Ok(true) => {
// this is a new root so we add it to the current `pending_requests`
self.pending_requests.push_back((justification.0, justification.1));
},
Err(err) => {
warn!(target: "sync", "Failed to insert requested justification {:?} {:?} into tree: {:?}",
justification.0,
justification.1,
err,
);
return;
},
_ => {},
};
}
/// Retry any pending request if a peer disconnected.
fn peer_disconnected(&mut self, who: PeerId) {
if let Some(request) = self.peer_requests.remove(&who) {
self.pending_requests.push_front(request);
}
}
/// Process the import of a justification.
/// Queues a retry in case the import failed.
fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor<B>, success: bool) {
let request = (hash, number);
if !self.importing_requests.remove(&request) {
debug!(target: "sync", "Got justification import result for unknown justification {:?} {:?} request.",
request.0,
request.1,
);
return;
};
if success {
if self.justifications.finalize_root(&request.0).is_none() {
warn!(target: "sync", "Imported justification for {:?} {:?} which isn't a root in the tree: {:?}",
request.0,
request.1,
self.justifications.roots().collect::<Vec<_>>(),
);
return;
};
self.previous_requests.clear();
self.peer_requests.clear();
self.pending_requests =
self.justifications.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect();
return;
}
self.pending_requests.push_front(request);
}
/// Processes the response for the request previously sent to the given
/// peer. Queues a retry in case the given justification
/// was `None`.
///
/// Returns `Some` if this produces a justification that must be imported in the import queue.
#[must_use]
fn on_response(
&mut self,
who: PeerId,
justification: Option<Justification>,
) -> Option<(PeerId, B::Hash, NumberFor<B>, Justification)> {
// we assume that the request maps to the given response, this is
// currently enforced by the outer network protocol before passing on
// messages to chain sync.
if let Some(request) = self.peer_requests.remove(&who) {
if let Some(justification) = justification {
self.importing_requests.insert(request);
return Some((who, request.0, request.1, justification))
}
self.previous_requests
.entry(request)
.or_insert(Vec::new())
.push((who, Instant::now()));
self.pending_requests.push_front(request);
}
None
}
/// Removes any pending justification requests for blocks lower than the
/// given best finalized.
fn on_block_finalized<F>(
&mut self,
best_finalized_hash: &B::Hash,
best_finalized_number: NumberFor<B>,
is_descendent_of: F,
) -> Result<(), fork_tree::Error<ClientError>>
where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>
{
if self.importing_requests.contains(&(*best_finalized_hash, best_finalized_number)) {
// we imported this justification ourselves, so we should get back a response
// from the import queue through `justification_import_result`
return Ok(());
}
self.justifications.finalize(best_finalized_hash, best_finalized_number, &is_descendent_of)?;
let roots = self.justifications.roots().collect::<HashSet<_>>();
self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &())));
self.peer_requests.retain(|_, (h, n)| roots.contains(&(h, n, &())));
self.previous_requests.retain(|(h, n), _| roots.contains(&(h, n, &())));
Ok(())
}
/// Clear all data.
fn clear(&mut self) {
self.justifications = ForkTree::new();
self.pending_requests.clear();
self.peer_requests.clear();
self.previous_requests.clear();
}
DownloadingFinalityProof(B::Hash),
}
/// Relay chain sync strategy.
@@ -370,7 +109,7 @@ pub struct ChainSync<B: BlockT> {
best_queued_number: NumberFor<B>,
best_queued_hash: B::Hash,
required_block_attributes: message::BlockAttributes,
justifications: PendingJustifications<B>,
extra_requests: ExtraRequestsAggregator<B>,
queue_blocks: HashSet<B::Hash>,
best_importing_number: NumberFor<B>,
}
@@ -428,7 +167,7 @@ impl<B: BlockT> ChainSync<B> {
blocks: BlockCollection::new(),
best_queued_hash: info.best_queued_hash.unwrap_or(info.chain.best_hash),
best_queued_number: info.best_queued_number.unwrap_or(info.chain.best_number),
justifications: PendingJustifications::new(),
extra_requests: ExtraRequestsAggregator::new(),
required_block_attributes,
queue_blocks: Default::default(),
best_importing_number: Zero::zero(),
@@ -664,7 +403,7 @@ impl<B: BlockT> ChainSync<B> {
vec![]
}
},
PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) => Vec::new(),
PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) | PeerSyncState::DownloadingFinalityProof(..) => Vec::new(),
}
} else {
Vec::new()
@@ -722,7 +461,7 @@ impl<B: BlockT> ChainSync<B> {
return None;
}
return self.justifications.on_response(
return self.extra_requests.justifications().on_response(
who,
response.justification,
);
@@ -744,6 +483,42 @@ impl<B: BlockT> ChainSync<B> {
None
}
/// Handle new finality proof data.
pub(crate) fn on_block_finality_proof_data(
&mut self,
protocol: &mut Context<B>,
who: PeerId,
response: message::FinalityProofResponse<B::Hash>,
) -> Option<(PeerId, B::Hash, NumberFor<B>, Vec<u8>)> {
if let Some(ref mut peer) = self.peers.get_mut(&who) {
if let PeerSyncState::DownloadingFinalityProof(hash) = peer.state {
peer.state = PeerSyncState::Available;
// we only request one finality proof at a time
if hash != response.block {
info!(
"Invalid block finality proof provided: requested: {:?} got: {:?}",
hash,
response.block,
);
protocol.report_peer(who.clone(), i32::min_value());
protocol.disconnect_peer(who);
return None;
}
return self.extra_requests.finality_proofs().on_response(
who,
response.proof,
);
}
}
self.maintain_sync(protocol);
None
}
/// A batch of blocks have been processed, with or without errors.
/// Call this when a batch of blocks have been processed by the import queue, with or without
/// errors.
pub fn blocks_processed(&mut self, processed_blocks: Vec<B::Hash>, has_error: bool) {
@@ -761,13 +536,13 @@ impl<B: BlockT> ChainSync<B> {
for peer in peers {
self.download_new(protocol, peer);
}
self.justifications.dispatch(&mut self.peers, protocol);
self.extra_requests.dispatch(&mut self.peers, protocol);
}
/// Called periodically to perform any time-based actions. Must be called at a regular
/// interval.
pub fn tick(&mut self, protocol: &mut Context<B>) {
self.justifications.dispatch(&mut self.peers, protocol);
self.extra_requests.dispatch(&mut self.peers, protocol);
}
/// Request a justification for the given block.
@@ -775,23 +550,53 @@ impl<B: BlockT> ChainSync<B> {
/// Uses `protocol` to queue a new justification request and tries to dispatch all pending
/// requests.
pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor<B>, protocol: &mut Context<B>) {
self.justifications.queue_request(
&(*hash, number),
self.extra_requests.justifications().queue_request(
(*hash, number),
|base, block| protocol.client().is_descendent_of(base, block),
);
self.justifications.dispatch(&mut self.peers, protocol);
self.extra_requests.justifications().dispatch(&mut self.peers, protocol);
}
/// Clears all pending justification requests.
pub fn clear_justification_requests(&mut self) {
self.justifications.clear();
self.extra_requests.justifications().clear();
}
/// Call this when a justification has been processed by the import queue, with or without
/// errors.
pub fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor<B>, success: bool) {
self.justifications.justification_import_result(hash, number, success);
let finalization_result = if success { Ok((hash, number)) } else { Err(()) };
if !self.extra_requests.justifications().on_import_result((hash, number), finalization_result) {
debug!(target: "sync", "Got justification import result for unknown justification {:?} {:?} request.",
hash,
number,
);
}
}
/// Request a finality proof for the given block.
///
/// Queues a new finality proof request and tries to dispatch all pending requests.
pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor<B>, protocol: &mut Context<B>) {
self.extra_requests.finality_proofs().queue_request(
(*hash, number),
|base, block| protocol.client().is_descendent_of(base, block),
);
self.extra_requests.finality_proofs().dispatch(&mut self.peers, protocol);
}
pub fn finality_proof_import_result(
&mut self,
request_block: (B::Hash, NumberFor<B>),
finalization_result: Result<(B::Hash, NumberFor<B>), ()>,
) {
self.extra_requests.finality_proofs().on_import_result(request_block, finalization_result);
}
pub fn set_finality_proof_request_builder(&mut self, request_builder: SharedFinalityProofRequestBuilder<B>) {
self.extra_requests.finality_proofs().essence().0 = Some(request_builder);
}
/// Notify about successful import of the given block.
@@ -801,12 +606,12 @@ impl<B: BlockT> ChainSync<B> {
/// Notify about finalization of the given block.
pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor<B>, protocol: &mut Context<B>) {
if let Err(err) = self.justifications.on_block_finalized(
if let Err(err) = self.extra_requests.on_block_finalized(
hash,
number,
|base, block| protocol.client().is_descendent_of(base, block),
&|base, block| protocol.client().is_descendent_of(base, block),
) {
warn!(target: "sync", "Error cleaning up pending justification requests: {:?}", err);
warn!(target: "sync", "Error cleaning up pending extra data requests: {:?}", err);
};
}
@@ -916,7 +721,7 @@ impl<B: BlockT> ChainSync<B> {
pub(crate) fn peer_disconnected(&mut self, protocol: &mut Context<B>, who: PeerId) {
self.blocks.clear_peer_download(&who);
self.peers.remove(&who);
self.justifications.peer_disconnected(who);
self.extra_requests.peer_disconnected(who);
self.maintain_sync(protocol);
}
@@ -77,7 +77,7 @@ fn async_import_queue_drops() {
// Perform this test multiple times since it exhibits non-deterministic behavior.
for _ in 0..100 {
let verifier = Arc::new(PassThroughVerifier(true));
let queue = BasicQueue::new(verifier, Arc::new(test_client::new()), None);
let queue = BasicQueue::new(verifier, Arc::new(test_client::new()), None, None, None);
queue.start(Box::new(TestLink{})).unwrap();
drop(queue);
}
+280 -72
View File
@@ -26,11 +26,16 @@ use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use log::trace;
use client;
use crate::chain::FinalityProofProvider;
use client::{self, ClientInfo, BlockchainEvents, FinalityNotifications, in_mem::Backend as InMemoryBackend, error::Result as ClientResult};
use client::block_builder::BlockBuilder;
use crate::config::ProtocolConfig;
use client::backend::AuxStore;
use crate::config::{ProtocolConfig, Roles};
use consensus::import_queue::{BasicQueue, ImportQueue, IncomingBlock};
use consensus::import_queue::{Link, SharedBlockImport, SharedJustificationImport, Verifier};
use consensus::import_queue::{
Link, SharedBlockImport, SharedJustificationImport, Verifier, SharedFinalityProofImport,
SharedFinalityProofRequestBuilder,
};
use consensus::{Error as ConsensusError, ErrorKind as ConsensusErrorKind};
use consensus::{BlockOrigin, ForkChoiceStrategy, ImportBlock, JustificationImport};
use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient, TopicNotification};
@@ -39,7 +44,7 @@ use futures::{prelude::*, sync::{mpsc, oneshot}};
use crate::message::Message;
use network_libp2p::PeerId;
use parking_lot::{Mutex, RwLock};
use primitives::{H256, sr25519::Public as AuthorityId};
use primitives::{H256, sr25519::Public as AuthorityId, Blake2Hasher};
use crate::protocol::{ConnectedPeer, Context, Protocol, ProtocolMsg, CustomMessageOutcome};
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{AuthorityIdFor, Block as BlockT, Digest, DigestItem, Header, NumberFor};
@@ -111,7 +116,79 @@ impl NetworkSpecialization<Block> for DummySpecialization {
}
}
pub type PeersClient = client::Client<test_client::Backend, test_client::Executor, Block, test_client::runtime::RuntimeApi>;
pub type PeersFullClient = client::Client<test_client::Backend, test_client::Executor, Block, test_client::runtime::RuntimeApi>;
pub type PeersLightClient = client::Client<test_client::LightBackend, test_client::LightExecutor, Block, test_client::runtime::RuntimeApi>;
#[derive(Clone)]
pub enum PeersClient {
Full(Arc<PeersFullClient>),
Light(Arc<PeersLightClient>),
}
impl PeersClient {
pub fn as_full(&self) -> Option<Arc<PeersFullClient>> {
match *self {
PeersClient::Full(ref client) => Some(client.clone()),
_ => None,
}
}
pub fn as_block_import(&self) -> SharedBlockImport<Block> {
match *self {
PeersClient::Full(ref client) => client.clone() as _,
PeersClient::Light(ref client) => client.clone() as _,
}
}
pub fn as_in_memory_backend(&self) -> InMemoryBackend<Block, Blake2Hasher> {
match *self {
PeersClient::Full(ref client) => client.backend().as_in_memory(),
PeersClient::Light(_) => unimplemented!("TODO"),
}
}
pub fn get_aux(&self, key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
match *self {
PeersClient::Full(ref client) => client.backend().get_aux(key),
PeersClient::Light(ref client) => client.backend().get_aux(key),
}
}
pub fn info(&self) -> ClientResult<ClientInfo<Block>> {
match *self {
PeersClient::Full(ref client) => client.info(),
PeersClient::Light(ref client) => client.info(),
}
}
pub fn header(&self, block: &BlockId<Block>) -> ClientResult<Option<<Block as BlockT>::Header>> {
match *self {
PeersClient::Full(ref client) => client.header(block),
PeersClient::Light(ref client) => client.header(block),
}
}
pub fn justification(&self, block: &BlockId<Block>) -> ClientResult<Option<Justification>> {
match *self {
PeersClient::Full(ref client) => client.justification(block),
PeersClient::Light(ref client) => client.justification(block),
}
}
pub fn finality_notification_stream(&self) -> FinalityNotifications<Block> {
match *self {
PeersClient::Full(ref client) => client.finality_notification_stream(),
PeersClient::Light(ref client) => client.finality_notification_stream(),
}
}
pub fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> ClientResult<()> {
match *self {
PeersClient::Full(ref client) => client.finalize_block(id, justification, notify),
PeersClient::Light(ref client) => client.finalize_block(id, justification, notify),
}
}
}
/// A Link that can wait for a block to have been imported.
pub struct TestLink<S: NetworkSpecialization<Block>> {
@@ -155,6 +232,23 @@ impl<S: NetworkSpecialization<Block>> Link<Block> for TestLink<S> {
self.link.request_justification(hash, number);
}
fn finality_proof_imported(
&self,
who: PeerId,
request_block: (Hash, NumberFor<Block>),
finalization_result: Result<(Hash, NumberFor<Block>), ()>,
) {
self.link.finality_proof_imported(who, request_block, finalization_result);
}
fn request_finality_proof(&self, hash: &Hash, number: NumberFor<Block>) {
self.link.request_finality_proof(hash, number);
}
fn set_finality_proof_request_builder(&self, request_builder: SharedFinalityProofRequestBuilder<Block>) {
self.link.set_finality_proof_request_builder(request_builder);
}
fn report_peer(&self, who: PeerId, reputation_change: i32) {
self.link.report_peer(who, reputation_change);
}
@@ -178,7 +272,7 @@ pub struct Peer<D, S: NetworkSpecialization<Block>> {
pub is_major_syncing: Arc<AtomicBool>,
pub peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>>,
pub peer_id: PeerId,
client: Arc<PeersClient>,
client: PeersClient,
net_proto_channel: ProtocolChannel<S>,
pub import_queue: Box<BasicQueue<Block>>,
pub data: D,
@@ -188,7 +282,7 @@ pub struct Peer<D, S: NetworkSpecialization<Block>> {
type MessageFilter = Fn(&NetworkMsg<Block>) -> bool;
enum FromNetworkMsg<B: BlockT> {
pub enum FromNetworkMsg<B: BlockT> {
/// A peer connected, with debug info.
PeerConnected(PeerId, String),
/// A peer disconnected, with debug info.
@@ -294,7 +388,7 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
is_offline: Arc<AtomicBool>,
is_major_syncing: Arc<AtomicBool>,
peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>>,
client: Arc<PeersClient>,
client: PeersClient,
import_queue: Box<BasicQueue<Block>>,
network_to_protocol_sender: mpsc::UnboundedSender<FromNetworkMsg<Block>>,
protocol_sender: mpsc::UnboundedSender<ProtocolMsg<Block, S>>,
@@ -327,7 +421,7 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
}
}
/// Called after blockchain has been populated to updated current state.
fn start(&self) {
pub fn start(&self) {
// Update the sync state to the latest chain state.
let info = self.client.info().expect("In-mem client does not fail");
let header = self
@@ -484,7 +578,7 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
/// Add blocks to the peer -- edit the block before adding
pub fn generate_blocks<F>(&self, count: usize, origin: BlockOrigin, edit_block: F) -> H256
where F: FnMut(BlockBuilder<Block, PeersClient>) -> Block
where F: FnMut(BlockBuilder<Block, PeersFullClient>) -> Block
{
let best_hash = self.client.info().unwrap().chain.best_hash;
self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block)
@@ -493,11 +587,12 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
/// Add blocks to the peer -- edit the block before adding. The chain will
/// start at the given block iD.
pub fn generate_blocks_at<F>(&self, at: BlockId<Block>, count: usize, origin: BlockOrigin, mut edit_block: F) -> H256
where F: FnMut(BlockBuilder<Block, PeersClient>) -> Block
where F: FnMut(BlockBuilder<Block, PeersFullClient>) -> Block
{
let mut at = self.client.header(&at).unwrap().unwrap().hash();
let full_client = self.client.as_full().expect("blocks could only be generated by full clients");
let mut at = full_client.header(&at).unwrap().unwrap().hash();
for _ in 0..count {
let builder = self.client.new_block_at(&BlockId::Hash(at)).unwrap();
let builder = full_client.new_block_at(&BlockId::Hash(at)).unwrap();
let block = edit_block(builder);
let hash = block.header.hash();
trace!(
@@ -562,7 +657,7 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
}
/// Get a reference to the client.
pub fn client(&self) -> &Arc<PeersClient> {
pub fn client(&self) -> &PeersClient {
&self.client
}
}
@@ -598,7 +693,7 @@ pub trait TestNetFactory: Sized {
/// These two need to be implemented!
fn from_config(config: &ProtocolConfig) -> Self;
fn make_verifier(&self, client: Arc<PeersClient>, config: &ProtocolConfig) -> Arc<Self::Verifier>;
fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig) -> Arc<Self::Verifier>;
/// Get reference to peer.
fn peer(&self, i: usize) -> &Peer<Self::PeerData, Self::Specialization>;
@@ -609,10 +704,21 @@ pub trait TestNetFactory: Sized {
fn set_started(&mut self, now: bool);
/// Get custom block import handle for fresh client, along with peer data.
fn make_block_import(&self, client: Arc<PeersClient>)
-> (SharedBlockImport<Block>, Option<SharedJustificationImport<Block>>, Self::PeerData)
fn make_block_import(&self, client: PeersClient)
-> (
SharedBlockImport<Block>,
Option<SharedJustificationImport<Block>>,
Option<SharedFinalityProofImport<Block>>,
Option<SharedFinalityProofRequestBuilder<Block>>,
Self::PeerData,
)
{
(client, None, Default::default())
(client.as_block_import(), None, None, None, Default::default())
}
/// Get finality proof provider (if supported).
fn make_finality_proof_provider(&self, _client: PeersClient) -> Option<Arc<FinalityProofProvider<Block>>> {
None
}
fn default_config() -> ProtocolConfig {
@@ -627,41 +733,21 @@ pub trait TestNetFactory: Sized {
for i in 0..n {
trace!(target: "test_network", "Adding peer {}", i);
net.add_peer(&config);
net.add_full_peer(&config);
}
net
}
/// Add a peer.
fn add_peer(&mut self, config: &ProtocolConfig) {
let client = Arc::new(test_client::new());
let tx_pool = Arc::new(EmptyTransactionPool);
let verifier = self.make_verifier(client.clone(), config);
let (block_import, justification_import, data) = self.make_block_import(client.clone());
let (network_sender, network_port) = network_channel();
let import_queue = Box::new(BasicQueue::new(verifier, block_import, justification_import));
let is_offline = Arc::new(AtomicBool::new(true));
let is_major_syncing = Arc::new(AtomicBool::new(false));
let specialization = self::SpecializationFactory::create();
let peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>> = Arc::new(Default::default());
let (network_to_protocol_sender, mut network_to_protocol_rx) = mpsc::unbounded();
let (mut protocol, protocol_sender) = Protocol::new(
peers.clone(),
network_sender.clone(),
config.clone(),
client.clone(),
None,
tx_pool,
specialization,
).unwrap();
let is_offline2 = is_offline.clone();
let is_major_syncing2 = is_major_syncing.clone();
let import_queue2 = import_queue.clone();
/// Add created peer.
fn add_peer(
&mut self,
is_offline: Arc<AtomicBool>,
is_major_syncing: Arc<AtomicBool>,
import_queue: Box<BasicQueue<Block>>,
mut protocol: Protocol<Block, Self::Specialization, Hash>,
mut network_to_protocol_rx: mpsc::UnboundedReceiver<FromNetworkMsg<Block>>,
peer: Arc<Peer<Self::PeerData, Self::Specialization>>,
) {
std::thread::spawn(move || {
tokio::runtime::current_thread::run(futures::future::poll_fn(move || {
while let Async::Ready(msg) = network_to_protocol_rx.poll().unwrap() {
@@ -680,14 +766,16 @@ pub trait TestNetFactory: Sized {
protocol.synchronize();
CustomMessageOutcome::None
},
None => return Ok(Async::Ready(()))
None => return Ok(Async::Ready(())),
};
match outcome {
CustomMessageOutcome::BlockImport(origin, blocks) =>
import_queue2.import_blocks(origin, blocks),
import_queue.import_blocks(origin, blocks),
CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) =>
import_queue2.import_justification(origin, hash, nb, justification),
import_queue.import_justification(origin, hash, nb, justification),
CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) =>
import_queue.import_finality_proof(origin, hash, nb, proof),
CustomMessageOutcome::None => {}
}
}
@@ -696,31 +784,140 @@ pub trait TestNetFactory: Sized {
return Ok(Async::Ready(()))
}
is_offline2.store(protocol.is_offline(), Ordering::Relaxed);
is_major_syncing2.store(protocol.is_major_syncing(), Ordering::Relaxed);
is_offline.store(protocol.is_offline(), Ordering::Relaxed);
is_major_syncing.store(protocol.is_major_syncing(), Ordering::Relaxed);
Ok(Async::NotReady)
}));
});
let peer = Arc::new(Peer::new(
is_offline,
is_major_syncing,
peers,
client,
import_queue,
network_to_protocol_sender,
protocol_sender,
network_sender,
network_port,
data,
));
if self.started() {
peer.start();
self.peers().iter().for_each(|other| {
other.on_connect(&*peer);
peer.on_connect(other);
});
}
self.mut_peers(|peers| {
peers.push(peer)
});
}
/// Add a full peer.
fn add_full_peer(&mut self, config: &ProtocolConfig) {
let client = Arc::new(test_client::new());
let tx_pool = Arc::new(EmptyTransactionPool);
let verifier = self.make_verifier(PeersClient::Full(client.clone()), config);
let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data)
= self.make_block_import(PeersClient::Full(client.clone()));
let (network_sender, network_port) = network_channel();
let import_queue = Box::new(BasicQueue::new(
verifier,
block_import,
justification_import,
finality_proof_import,
finality_proof_request_builder,
));
let is_offline = Arc::new(AtomicBool::new(true));
let is_major_syncing = Arc::new(AtomicBool::new(false));
let specialization = self::SpecializationFactory::create();
let peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>> = Arc::new(Default::default());
let (network_to_protocol_sender, network_to_protocol_rx) = mpsc::unbounded();
let (protocol, protocol_sender) = Protocol::new(
peers.clone(),
network_sender.clone(),
config.clone(),
client.clone(),
self.make_finality_proof_provider(PeersClient::Full(client.clone())),
None,
tx_pool,
specialization,
).unwrap();
self.add_peer(
is_offline.clone(),
is_major_syncing.clone(),
import_queue.clone(),
protocol,
network_to_protocol_rx,
Arc::new(Peer::new(
is_offline,
is_major_syncing,
peers,
PeersClient::Full(client),
import_queue,
network_to_protocol_sender,
protocol_sender,
network_sender,
network_port,
data,
)),
);
}
/// Add a light peer.
fn add_light_peer(&mut self, config: &ProtocolConfig) {
let mut config = config.clone();
config.roles = Roles::LIGHT;
let client = Arc::new(test_client::new_light());
let tx_pool = Arc::new(EmptyTransactionPool);
let verifier = self.make_verifier(PeersClient::Light(client.clone()), &config);
let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data)
= self.make_block_import(PeersClient::Light(client.clone()));
let (network_sender, network_port) = network_channel();
let import_queue = Box::new(BasicQueue::new(
verifier,
block_import,
justification_import,
finality_proof_import,
finality_proof_request_builder,
));
let is_offline = Arc::new(AtomicBool::new(true));
let is_major_syncing = Arc::new(AtomicBool::new(false));
let specialization = self::SpecializationFactory::create();
let peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>> = Arc::new(Default::default());
let (network_to_protocol_sender, network_to_protocol_rx) = mpsc::unbounded();
let (protocol, protocol_sender) = Protocol::new(
peers.clone(),
network_sender.clone(),
config,
client.clone(),
self.make_finality_proof_provider(PeersClient::Light(client.clone())),
None,
tx_pool,
specialization,
).unwrap();
self.add_peer(
is_offline.clone(),
is_major_syncing.clone(),
import_queue.clone(),
protocol,
network_to_protocol_rx,
Arc::new(Peer::new(
is_offline,
is_major_syncing,
peers,
PeersClient::Light(client),
import_queue,
network_to_protocol_sender,
protocol_sender,
network_sender,
network_port,
data,
)),
);
}
/// Start network.
fn start(&mut self) {
if self.started() {
@@ -832,6 +1029,11 @@ pub trait TestNetFactory: Sized {
self.route_single(true, None, &|_| true);
}
/// Maintain sync for a peer.
fn tick_peer(&mut self, i: usize) {
self.peers()[i].sync_step();
}
/// Deliver pending messages until there are no more.
fn sync(&mut self) {
self.sync_with(true, None)
@@ -866,7 +1068,7 @@ impl TestNetFactory for TestNet {
}
}
fn make_verifier(&self, _client: Arc<PeersClient>, _config: &ProtocolConfig)
fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig)
-> Arc<Self::Verifier>
{
Arc::new(PassThroughVerifier(false))
@@ -893,7 +1095,7 @@ impl TestNetFactory for TestNet {
}
}
pub struct ForceFinalized(Arc<PeersClient>);
pub struct ForceFinalized(PeersClient);
impl JustificationImport<Block> for ForceFinalized {
type Error = ConsensusError;
@@ -920,7 +1122,7 @@ impl TestNetFactory for JustificationTestNet {
JustificationTestNet(TestNet::from_config(config))
}
fn make_verifier(&self, client: Arc<PeersClient>, config: &ProtocolConfig)
fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig)
-> Arc<Self::Verifier>
{
self.0.make_verifier(client, config)
@@ -946,9 +1148,15 @@ impl TestNetFactory for JustificationTestNet {
self.0.set_started(new)
}
fn make_block_import(&self, client: Arc<PeersClient>)
-> (SharedBlockImport<Block>, Option<SharedJustificationImport<Block>>, Self::PeerData)
fn make_block_import(&self, client: PeersClient)
-> (
SharedBlockImport<Block>,
Option<SharedJustificationImport<Block>>,
Option<SharedFinalityProofImport<Block>>,
Option<SharedFinalityProofRequestBuilder<Block>>,
Self::PeerData,
)
{
(client.clone(), Some(Arc::new(ForceFinalized(client))), Default::default())
(client.as_block_import(), Some(Arc::new(ForceFinalized(client))), None, None, Default::default())
}
}
+32 -33
View File
@@ -14,8 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
use client::backend::Backend;
use client::blockchain::HeaderBackend as BlockchainHeaderBackend;
use client::{backend::Backend, blockchain::HeaderBackend};
use crate::config::Roles;
use consensus::BlockOrigin;
use std::collections::HashSet;
@@ -34,8 +33,8 @@ fn test_ancestor_search_when_common_is(n: usize) {
net.peer(2).push_blocks(100, false);
net.sync();
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
}
#[test]
@@ -130,8 +129,8 @@ fn sync_from_two_peers_works() {
net.peer(1).push_blocks(100, false);
net.peer(2).push_blocks(100, false);
net.sync();
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
.equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
.equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
assert!(!net.peer(0).is_major_syncing());
}
@@ -143,8 +142,8 @@ fn sync_from_two_peers_with_ancestry_search_works() {
net.peer(1).push_blocks(100, false);
net.peer(2).push_blocks(100, false);
net.sync();
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
}
#[test]
@@ -157,8 +156,8 @@ fn ancestry_search_works_when_backoff_is_one() {
net.peer(2).push_blocks(2, false);
net.sync();
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
}
#[test]
@@ -171,8 +170,8 @@ fn ancestry_search_works_when_ancestor_is_genesis() {
net.peer(2).push_blocks(100, false);
net.sync();
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
}
#[test]
@@ -195,8 +194,8 @@ fn sync_long_chain_works() {
let mut net = TestNet::new(2);
net.peer(1).push_blocks(500, false);
net.sync();
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
.equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
.equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
}
#[test]
@@ -206,8 +205,8 @@ fn sync_no_common_longer_chain_fails() {
net.peer(0).push_blocks(20, true);
net.peer(1).push_blocks(20, false);
net.sync();
assert!(!net.peer(0).client.backend().as_in_memory().blockchain()
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
assert!(!net.peer(0).client.as_in_memory_backend().blockchain()
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
}
#[test]
@@ -285,11 +284,11 @@ fn sync_after_fork_works() {
net.peer(2).push_blocks(1, false);
// peer 1 has the best chain
let peer1_chain = net.peer(1).client.backend().as_in_memory().blockchain().clone();
let peer1_chain = net.peer(1).client.as_in_memory_backend().blockchain().clone();
net.sync();
assert!(net.peer(0).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(0).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(1).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(2).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain));
}
#[test]
@@ -305,8 +304,8 @@ fn syncs_all_forks() {
net.sync();
// Check that all peers have all of the blocks.
assert_eq!(9, net.peer(0).client.backend().as_in_memory().blockchain().blocks_count());
assert_eq!(9, net.peer(1).client.backend().as_in_memory().blockchain().blocks_count());
assert_eq!(9, net.peer(0).client.as_in_memory_backend().blockchain().blocks_count());
assert_eq!(9, net.peer(1).client.as_in_memory_backend().blockchain().blocks_count());
}
#[test]
@@ -320,11 +319,11 @@ fn own_blocks_are_announced() {
net.peer(0).on_block_imported(header.hash(), &header);
net.sync();
assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1);
assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1);
let peer0_chain = net.peer(0).client.backend().as_in_memory().blockchain().clone();
assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain));
assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain));
assert_eq!(net.peer(0).client.as_in_memory_backend().blockchain().info().unwrap().best_number, 1);
assert_eq!(net.peer(1).client.as_in_memory_backend().blockchain().info().unwrap().best_number, 1);
let peer0_chain = net.peer(0).client.as_in_memory_backend().blockchain().clone();
assert!(net.peer(1).client.as_in_memory_backend().blockchain().canon_equals_to(&peer0_chain));
assert!(net.peer(2).client.as_in_memory_backend().blockchain().canon_equals_to(&peer0_chain));
}
#[test]
@@ -336,9 +335,9 @@ fn blocks_are_not_announced_by_light_nodes() {
// light peer1 is connected to full peer2
let mut light_config = ProtocolConfig::default();
light_config.roles = Roles::LIGHT;
net.add_peer(&ProtocolConfig::default());
net.add_peer(&light_config);
net.add_peer(&ProtocolConfig::default());
net.add_full_peer(&ProtocolConfig::default());
net.add_full_peer(&light_config);
net.add_full_peer(&ProtocolConfig::default());
net.peer(0).push_blocks(1, false);
net.peer(0).start();
@@ -356,9 +355,9 @@ fn blocks_are_not_announced_by_light_nodes() {
// peer 0 has the best chain
// peer 1 has the best chain
// peer 2 has genesis-chain only
assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1);
assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1);
assert_eq!(net.peer(2).client.backend().blockchain().info().unwrap().best_number, 0);
assert_eq!(net.peer(0).client.info().unwrap().chain.best_number, 1);
assert_eq!(net.peer(1).client.info().unwrap().chain.best_number, 1);
assert_eq!(net.peer(2).client.info().unwrap().chain.best_number, 0);
}
#[test]