mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 11:07:56 +00:00
Extract warp sync strategy from ChainSync (#2467)
Extract `WarpSync` (and `StateSync` as part of warp sync) from `ChainSync` as independent syncing strategy called by `SyncingEngine`. Introduce `SyncingStrategy` enum as a proxy between `SyncingEngine` and specific syncing strategies. ## Limitations Gap sync is kept in `ChainSync` for now because it shares the same set of peers as block syncing implementation in `ChainSync`. Extraction of a common context responsible for peer management in syncing strategies able to run in parallel is planned for a follow-up PR. ## Further improvements A possibility of conversion of `SyncingStartegy` into a trait should be evaluated. The main stopper for this is that different strategies need to communicate different actions to `SyncingEngine` and respond to different events / provide different APIs (e.g., requesting justifications is only possible via `ChainSync` and not through `WarpSync`; `SendWarpProofRequest` action is only relevant to `WarpSync`, etc.) --------- Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com>
This commit is contained in:
@@ -19,7 +19,7 @@
|
||||
//! [`BlockAnnounceValidator`] is responsible for async validation of block announcements.
|
||||
//! [`Stream`] implemented by [`BlockAnnounceValidator`] never terminates.
|
||||
|
||||
use crate::futures_stream::FuturesStream;
|
||||
use crate::{futures_stream::FuturesStream, LOG_TARGET};
|
||||
use futures::{stream::FusedStream, Future, FutureExt, Stream, StreamExt};
|
||||
use libp2p::PeerId;
|
||||
use log::{debug, error, trace, warn};
|
||||
@@ -33,9 +33,6 @@ use std::{
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
/// Log target for this file.
|
||||
const LOG_TARGET: &str = "sync";
|
||||
|
||||
/// Maximum number of concurrent block announce validations.
|
||||
///
|
||||
/// If the queue reaches the maximum, we drop any new block
|
||||
|
||||
@@ -24,6 +24,7 @@ use crate::{
|
||||
BlockResponse as BlockResponseSchema, BlockResponse, Direction,
|
||||
},
|
||||
service::network::NetworkServiceHandle,
|
||||
LOG_TARGET,
|
||||
};
|
||||
|
||||
use codec::{Decode, DecodeAll, Encode};
|
||||
@@ -56,7 +57,6 @@ use std::{
|
||||
/// Maximum blocks per response.
|
||||
pub(crate) const MAX_BLOCKS_IN_RESPONSE: usize = 128;
|
||||
|
||||
const LOG_TARGET: &str = "sync";
|
||||
const MAX_BODY_BYTES: usize = 8 * 1024 * 1024;
|
||||
const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2;
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use crate::LOG_TARGET;
|
||||
use libp2p::PeerId;
|
||||
use log::trace;
|
||||
use sc_network_common::sync::message;
|
||||
@@ -87,10 +88,10 @@ impl<B: BlockT> BlockCollection<B> {
|
||||
|
||||
match self.blocks.get(&start) {
|
||||
Some(&BlockRangeState::Downloading { .. }) => {
|
||||
trace!(target: "sync", "Inserting block data still marked as being downloaded: {}", start);
|
||||
trace!(target: LOG_TARGET, "Inserting block data still marked as being downloaded: {}", start);
|
||||
},
|
||||
Some(BlockRangeState::Complete(existing)) if existing.len() >= blocks.len() => {
|
||||
trace!(target: "sync", "Ignored block data already downloaded: {}", start);
|
||||
trace!(target: LOG_TARGET, "Ignored block data already downloaded: {}", start);
|
||||
return
|
||||
},
|
||||
_ => (),
|
||||
@@ -162,7 +163,7 @@ impl<B: BlockT> BlockCollection<B> {
|
||||
};
|
||||
// crop to peers best
|
||||
if range.start > peer_best {
|
||||
trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best);
|
||||
trace!(target: LOG_TARGET, "Out of range for peer {} ({} vs {})", who, range.start, peer_best);
|
||||
return None
|
||||
}
|
||||
range.end = cmp::min(peer_best + One::one(), range.end);
|
||||
@@ -173,7 +174,7 @@ impl<B: BlockT> BlockCollection<B> {
|
||||
.next()
|
||||
.map_or(false, |(n, _)| range.start > *n + max_ahead.into())
|
||||
{
|
||||
trace!(target: "sync", "Too far ahead for peer {} ({})", who, range.start);
|
||||
trace!(target: LOG_TARGET, "Too far ahead for peer {} ({})", who, range.start);
|
||||
return None
|
||||
}
|
||||
|
||||
@@ -224,7 +225,7 @@ impl<B: BlockT> BlockCollection<B> {
|
||||
};
|
||||
*range_data = BlockRangeState::Queued { len };
|
||||
}
|
||||
trace!(target: "sync", "{} blocks ready for import", ready.len());
|
||||
trace!(target: LOG_TARGET, "{} blocks ready for import", ready.len());
|
||||
ready
|
||||
}
|
||||
|
||||
@@ -235,7 +236,7 @@ impl<B: BlockT> BlockCollection<B> {
|
||||
self.blocks.remove(&block_num);
|
||||
block_num += One::one();
|
||||
}
|
||||
trace!(target: "sync", "Cleared blocks from {:?} to {:?}", from, to);
|
||||
trace!(target: LOG_TARGET, "Cleared blocks from {:?} to {:?}", from, to);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -25,17 +25,20 @@ use crate::{
|
||||
},
|
||||
block_relay_protocol::{BlockDownloader, BlockResponseError},
|
||||
block_request_handler::MAX_BLOCKS_IN_RESPONSE,
|
||||
chain_sync::{ChainSync, ChainSyncAction},
|
||||
pending_responses::{PendingResponses, ResponseEvent},
|
||||
schema::v1::{StateRequest, StateResponse},
|
||||
service::{
|
||||
self,
|
||||
syncing_service::{SyncingService, ToServiceCommand},
|
||||
},
|
||||
strategy::{
|
||||
warp::{EncodedProof, WarpProofRequest, WarpSyncParams},
|
||||
SyncingAction, SyncingConfig, SyncingStrategy,
|
||||
},
|
||||
types::{
|
||||
BadPeer, ExtendedPeerInfo, OpaqueStateRequest, OpaqueStateResponse, PeerRequest, SyncEvent,
|
||||
},
|
||||
warp::{EncodedProof, WarpProofRequest, WarpSyncParams},
|
||||
LOG_TARGET,
|
||||
};
|
||||
|
||||
use codec::{Decode, DecodeAll, Encode};
|
||||
@@ -45,10 +48,9 @@ use futures::{
|
||||
FutureExt, StreamExt,
|
||||
};
|
||||
use libp2p::{request_response::OutboundFailure, PeerId};
|
||||
use log::{debug, trace};
|
||||
use log::{debug, error, trace};
|
||||
use prometheus_endpoint::{
|
||||
register, Counter, Gauge, GaugeVec, MetricSource, Opts, PrometheusError, Registry,
|
||||
SourcedGauge, U64,
|
||||
register, Counter, Gauge, MetricSource, Opts, PrometheusError, Registry, SourcedGauge, U64,
|
||||
};
|
||||
use prost::Message;
|
||||
use schnellru::{ByLength, LruMap};
|
||||
@@ -97,9 +99,6 @@ const TICK_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(1100)
|
||||
/// Maximum number of known block hashes to keep for a peer.
|
||||
const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead
|
||||
|
||||
/// Logging target for the file.
|
||||
const LOG_TARGET: &str = "sync";
|
||||
|
||||
/// If the block announces stream to peer has been inactive for 30 seconds meaning local node
|
||||
/// has not sent or received block announcements to/from the peer, report the node for inactivity,
|
||||
/// disconnect it and attempt to establish connection to some other peer.
|
||||
@@ -140,9 +139,6 @@ mod rep {
|
||||
|
||||
struct Metrics {
|
||||
peers: Gauge<U64>,
|
||||
queued_blocks: Gauge<U64>,
|
||||
fork_targets: Gauge<U64>,
|
||||
justifications: GaugeVec<U64>,
|
||||
import_queue_blocks_submitted: Counter<U64>,
|
||||
import_queue_justifications_submitted: Counter<U64>,
|
||||
}
|
||||
@@ -155,25 +151,6 @@ impl Metrics {
|
||||
let g = Gauge::new("substrate_sync_peers", "Number of peers we sync with")?;
|
||||
register(g, r)?
|
||||
},
|
||||
queued_blocks: {
|
||||
let g =
|
||||
Gauge::new("substrate_sync_queued_blocks", "Number of blocks in import queue")?;
|
||||
register(g, r)?
|
||||
},
|
||||
fork_targets: {
|
||||
let g = Gauge::new("substrate_sync_fork_targets", "Number of fork sync targets")?;
|
||||
register(g, r)?
|
||||
},
|
||||
justifications: {
|
||||
let g = GaugeVec::new(
|
||||
Opts::new(
|
||||
"substrate_sync_extra_justifications",
|
||||
"Number of extra justifications requests",
|
||||
),
|
||||
&["status"],
|
||||
)?;
|
||||
register(g, r)?
|
||||
},
|
||||
import_queue_blocks_submitted: {
|
||||
let c = Counter::new(
|
||||
"substrate_sync_import_queue_blocks_submitted",
|
||||
@@ -234,9 +211,11 @@ pub struct Peer<B: BlockT> {
|
||||
}
|
||||
|
||||
pub struct SyncingEngine<B: BlockT, Client> {
|
||||
/// State machine that handles the list of in-progress requests. Only full node peers are
|
||||
/// registered.
|
||||
chain_sync: ChainSync<B, Client>,
|
||||
/// Syncing strategy.
|
||||
strategy: SyncingStrategy<B, Client>,
|
||||
|
||||
/// Syncing configuration for startegies.
|
||||
syncing_config: SyncingConfig,
|
||||
|
||||
/// Blockchain client.
|
||||
client: Arc<Client>,
|
||||
@@ -381,6 +360,12 @@ where
|
||||
} else {
|
||||
net_config.network_config.max_blocks_per_request
|
||||
};
|
||||
let syncing_config = SyncingConfig {
|
||||
mode,
|
||||
max_parallel_downloads,
|
||||
max_blocks_per_request,
|
||||
metrics_registry: metrics_registry.cloned(),
|
||||
};
|
||||
let cache_capacity = (net_config.network_config.default_peers_set.in_peers +
|
||||
net_config.network_config.default_peers_set.out_peers)
|
||||
.max(1);
|
||||
@@ -429,19 +414,6 @@ where
|
||||
total.saturating_sub(net_config.network_config.default_peers_set_num_full) as usize
|
||||
};
|
||||
|
||||
// Split warp sync params into warp sync config and a channel to retreive target block
|
||||
// header.
|
||||
let (warp_sync_config, warp_sync_target_block_header_rx) =
|
||||
warp_sync_params.map_or((None, None), |params| {
|
||||
let (config, target_block_rx) = params.split();
|
||||
(Some(config), target_block_rx)
|
||||
});
|
||||
|
||||
// Make sure polling of the target block channel is a no-op if there is no block to
|
||||
// retrieve.
|
||||
let warp_sync_target_block_header_rx_fused = warp_sync_target_block_header_rx
|
||||
.map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse());
|
||||
|
||||
let (block_announce_config, notification_service) = Self::get_block_announce_proto_config(
|
||||
protocol_id,
|
||||
fork_id,
|
||||
@@ -455,13 +427,22 @@ where
|
||||
.expect("Genesis block exists; qed"),
|
||||
);
|
||||
|
||||
let chain_sync = ChainSync::new(
|
||||
mode,
|
||||
client.clone(),
|
||||
max_parallel_downloads,
|
||||
max_blocks_per_request,
|
||||
warp_sync_config,
|
||||
)?;
|
||||
// Split warp sync params into warp sync config and a channel to retreive target block
|
||||
// header.
|
||||
let (warp_sync_config, warp_sync_target_block_header_rx) =
|
||||
warp_sync_params.map_or((None, None), |params| {
|
||||
let (config, target_block_rx) = params.split();
|
||||
(Some(config), target_block_rx)
|
||||
});
|
||||
|
||||
// Make sure polling of the target block channel is a no-op if there is no block to
|
||||
// retrieve.
|
||||
let warp_sync_target_block_header_rx_fused = warp_sync_target_block_header_rx
|
||||
.map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse());
|
||||
|
||||
// Initialize syncing strategy.
|
||||
let strategy =
|
||||
SyncingStrategy::new(syncing_config.clone(), client.clone(), warp_sync_config)?;
|
||||
|
||||
let block_announce_protocol_name = block_announce_config.protocol_name().clone();
|
||||
let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000);
|
||||
@@ -489,7 +470,8 @@ where
|
||||
Self {
|
||||
roles,
|
||||
client,
|
||||
chain_sync,
|
||||
strategy,
|
||||
syncing_config,
|
||||
network_service,
|
||||
peers: HashMap::new(),
|
||||
block_announce_data_cache: LruMap::new(ByLength::new(cache_capacity)),
|
||||
@@ -543,37 +525,19 @@ where
|
||||
if let Some(metrics) = &self.metrics {
|
||||
let n = u64::try_from(self.peers.len()).unwrap_or(std::u64::MAX);
|
||||
metrics.peers.set(n);
|
||||
|
||||
let m = self.chain_sync.metrics();
|
||||
|
||||
metrics.fork_targets.set(m.fork_targets.into());
|
||||
metrics.queued_blocks.set(m.queued_blocks.into());
|
||||
|
||||
metrics
|
||||
.justifications
|
||||
.with_label_values(&["pending"])
|
||||
.set(m.justifications.pending_requests.into());
|
||||
metrics
|
||||
.justifications
|
||||
.with_label_values(&["active"])
|
||||
.set(m.justifications.active_requests.into());
|
||||
metrics
|
||||
.justifications
|
||||
.with_label_values(&["failed"])
|
||||
.set(m.justifications.failed_requests.into());
|
||||
metrics
|
||||
.justifications
|
||||
.with_label_values(&["importing"])
|
||||
.set(m.justifications.importing_requests.into());
|
||||
}
|
||||
self.strategy.report_metrics();
|
||||
}
|
||||
|
||||
fn update_peer_info(&mut self, peer_id: &PeerId) {
|
||||
if let Some(info) = self.chain_sync.peer_info(peer_id) {
|
||||
if let Some(ref mut peer) = self.peers.get_mut(peer_id) {
|
||||
peer.info.best_hash = info.best_hash;
|
||||
peer.info.best_number = info.best_number;
|
||||
}
|
||||
fn update_peer_info(
|
||||
&mut self,
|
||||
peer_id: &PeerId,
|
||||
best_hash: B::Hash,
|
||||
best_number: NumberFor<B>,
|
||||
) {
|
||||
if let Some(ref mut peer) = self.peers.get_mut(peer_id) {
|
||||
peer.info.best_hash = best_hash;
|
||||
peer.info.best_number = best_number;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -585,9 +549,11 @@ where
|
||||
match validation_result {
|
||||
BlockAnnounceValidationResult::Skip { peer_id: _ } => {},
|
||||
BlockAnnounceValidationResult::Process { is_new_best, peer_id, announce } => {
|
||||
self.chain_sync.on_validated_block_announce(is_new_best, peer_id, &announce);
|
||||
|
||||
self.update_peer_info(&peer_id);
|
||||
if let Some((best_hash, best_number)) =
|
||||
self.strategy.on_validated_block_announce(is_new_best, peer_id, &announce)
|
||||
{
|
||||
self.update_peer_info(&peer_id, best_hash, best_number);
|
||||
}
|
||||
|
||||
if let Some(data) = announce.data {
|
||||
if !data.is_empty() {
|
||||
@@ -705,83 +671,106 @@ where
|
||||
|
||||
// Update atomic variables
|
||||
self.num_connected.store(self.peers.len(), Ordering::Relaxed);
|
||||
self.is_major_syncing
|
||||
.store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed);
|
||||
self.is_major_syncing.store(self.strategy.is_major_syncing(), Ordering::Relaxed);
|
||||
|
||||
// Process actions requested by `ChainSync`.
|
||||
self.process_chain_sync_actions();
|
||||
// Process actions requested by a syncing strategy.
|
||||
if let Err(e) = self.process_strategy_actions() {
|
||||
error!("Terminating `SyncingEngine` due to fatal error: {e:?}");
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_chain_sync_actions(&mut self) {
|
||||
self.chain_sync.actions().for_each(|action| match action {
|
||||
ChainSyncAction::SendBlockRequest { peer_id, request } => {
|
||||
// Sending block request implies dropping obsolete pending response as we are not
|
||||
// interested in it anymore (see [`ChainSyncAction::SendBlockRequest`]).
|
||||
// Furthermore, only one request at a time is allowed to any peer.
|
||||
let removed = self.pending_responses.remove(&peer_id);
|
||||
self.send_block_request(peer_id, request.clone());
|
||||
fn process_strategy_actions(&mut self) -> Result<(), ClientError> {
|
||||
for action in self.strategy.actions() {
|
||||
match action {
|
||||
SyncingAction::SendBlockRequest { peer_id, request } => {
|
||||
// Sending block request implies dropping obsolete pending response as we are
|
||||
// not interested in it anymore (see [`SyncingAction::SendBlockRequest`]).
|
||||
// Furthermore, only one request at a time is allowed to any peer.
|
||||
let removed = self.pending_responses.remove(&peer_id);
|
||||
self.send_block_request(peer_id, request.clone());
|
||||
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::SendBlockRequest` to {} with {:?}, stale response removed: {}.",
|
||||
peer_id,
|
||||
request,
|
||||
removed,
|
||||
)
|
||||
},
|
||||
ChainSyncAction::CancelBlockRequest { peer_id } => {
|
||||
let removed = self.pending_responses.remove(&peer_id);
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::SendBlockRequest` to {} with {:?}, stale response removed: {}.",
|
||||
peer_id,
|
||||
request,
|
||||
removed,
|
||||
)
|
||||
},
|
||||
SyncingAction::CancelBlockRequest { peer_id } => {
|
||||
let removed = self.pending_responses.remove(&peer_id);
|
||||
|
||||
trace!(target: LOG_TARGET, "Processed {action:?}, response removed: {removed}.");
|
||||
},
|
||||
ChainSyncAction::SendStateRequest { peer_id, request } => {
|
||||
self.send_state_request(peer_id, request);
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed {action:?}, response removed: {removed}.",
|
||||
);
|
||||
},
|
||||
SyncingAction::SendStateRequest { peer_id, request } => {
|
||||
self.send_state_request(peer_id, request);
|
||||
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::SendBlockRequest` to {peer_id}.",
|
||||
);
|
||||
},
|
||||
ChainSyncAction::SendWarpProofRequest { peer_id, request } => {
|
||||
self.send_warp_proof_request(peer_id, request.clone());
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::SendBlockRequest` to {peer_id}.",
|
||||
);
|
||||
},
|
||||
SyncingAction::SendWarpProofRequest { peer_id, request } => {
|
||||
self.send_warp_proof_request(peer_id, request.clone());
|
||||
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::SendWarpProofRequest` to {}, request: {:?}.",
|
||||
peer_id,
|
||||
request,
|
||||
);
|
||||
},
|
||||
ChainSyncAction::DropPeer(BadPeer(peer_id, rep)) => {
|
||||
self.pending_responses.remove(&peer_id);
|
||||
self.network_service
|
||||
.disconnect_peer(peer_id, self.block_announce_protocol_name.clone());
|
||||
self.network_service.report_peer(peer_id, rep);
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::SendWarpProofRequest` to {}, request: {:?}.",
|
||||
peer_id,
|
||||
request,
|
||||
);
|
||||
},
|
||||
SyncingAction::DropPeer(BadPeer(peer_id, rep)) => {
|
||||
self.pending_responses.remove(&peer_id);
|
||||
self.network_service
|
||||
.disconnect_peer(peer_id, self.block_announce_protocol_name.clone());
|
||||
self.network_service.report_peer(peer_id, rep);
|
||||
|
||||
trace!(target: LOG_TARGET, "Processed {action:?}.");
|
||||
},
|
||||
ChainSyncAction::ImportBlocks { origin, blocks } => {
|
||||
let count = blocks.len();
|
||||
self.import_blocks(origin, blocks);
|
||||
trace!(target: LOG_TARGET, "{peer_id:?} dropped: {rep:?}.");
|
||||
},
|
||||
SyncingAction::ImportBlocks { origin, blocks } => {
|
||||
let count = blocks.len();
|
||||
self.import_blocks(origin, blocks);
|
||||
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::ImportBlocks` with {count} blocks.",
|
||||
);
|
||||
},
|
||||
ChainSyncAction::ImportJustifications { peer_id, hash, number, justifications } => {
|
||||
self.import_justifications(peer_id, hash, number, justifications);
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::ImportBlocks` with {count} blocks.",
|
||||
);
|
||||
},
|
||||
SyncingAction::ImportJustifications { peer_id, hash, number, justifications } => {
|
||||
self.import_justifications(peer_id, hash, number, justifications);
|
||||
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::ImportJustifications` from peer {} for block {} ({}).",
|
||||
peer_id,
|
||||
hash,
|
||||
number,
|
||||
)
|
||||
},
|
||||
});
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Processed `ChainSyncAction::ImportJustifications` from peer {} for block {} ({}).",
|
||||
peer_id,
|
||||
hash,
|
||||
number,
|
||||
)
|
||||
},
|
||||
SyncingAction::Finished => {
|
||||
let connected_peers = self.peers.iter().filter_map(|(peer_id, peer)| {
|
||||
peer.info.roles.is_full().then_some((
|
||||
*peer_id,
|
||||
peer.info.best_hash,
|
||||
peer.info.best_number,
|
||||
))
|
||||
});
|
||||
self.strategy.switch_to_next(
|
||||
self.syncing_config.clone(),
|
||||
self.client.clone(),
|
||||
connected_peers,
|
||||
)?;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn perform_periodic_actions(&mut self) {
|
||||
@@ -824,18 +813,18 @@ where
|
||||
fn process_service_command(&mut self, command: ToServiceCommand<B>) {
|
||||
match command {
|
||||
ToServiceCommand::SetSyncForkRequest(peers, hash, number) => {
|
||||
self.chain_sync.set_sync_fork_request(peers, &hash, number);
|
||||
self.strategy.set_sync_fork_request(peers, &hash, number);
|
||||
},
|
||||
ToServiceCommand::EventStream(tx) => self.event_streams.push(tx),
|
||||
ToServiceCommand::RequestJustification(hash, number) =>
|
||||
self.chain_sync.request_justification(&hash, number),
|
||||
self.strategy.request_justification(&hash, number),
|
||||
ToServiceCommand::ClearJustificationRequests =>
|
||||
self.chain_sync.clear_justification_requests(),
|
||||
self.strategy.clear_justification_requests(),
|
||||
ToServiceCommand::BlocksProcessed(imported, count, results) => {
|
||||
self.chain_sync.on_blocks_processed(imported, count, results);
|
||||
self.strategy.on_blocks_processed(imported, count, results);
|
||||
},
|
||||
ToServiceCommand::JustificationImported(peer_id, hash, number, success) => {
|
||||
self.chain_sync.on_justification_import(hash, number, success);
|
||||
self.strategy.on_justification_import(hash, number, success);
|
||||
if !success {
|
||||
log::info!(
|
||||
target: LOG_TARGET,
|
||||
@@ -849,9 +838,9 @@ where
|
||||
},
|
||||
ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data),
|
||||
ToServiceCommand::NewBestBlockImported(hash, number) => {
|
||||
log::debug!(target: "sync", "New best block imported {:?}/#{}", hash, number);
|
||||
log::debug!(target: LOG_TARGET, "New best block imported {:?}/#{}", hash, number);
|
||||
|
||||
self.chain_sync.update_chain_info(&hash, number);
|
||||
self.strategy.update_chain_info(&hash, number);
|
||||
let _ = self.notification_service.try_set_handshake(
|
||||
BlockAnnouncesHandshake::<B>::build(
|
||||
self.roles,
|
||||
@@ -863,7 +852,7 @@ where
|
||||
);
|
||||
},
|
||||
ToServiceCommand::Status(tx) => {
|
||||
let mut status = self.chain_sync.status();
|
||||
let mut status = self.strategy.status();
|
||||
status.num_connected_peers = self.peers.len() as u32;
|
||||
let _ = tx.send(status);
|
||||
},
|
||||
@@ -871,22 +860,22 @@ where
|
||||
let _ = tx.send(self.num_active_peers());
|
||||
},
|
||||
ToServiceCommand::SyncState(tx) => {
|
||||
let _ = tx.send(self.chain_sync.status());
|
||||
let _ = tx.send(self.strategy.status());
|
||||
},
|
||||
ToServiceCommand::BestSeenBlock(tx) => {
|
||||
let _ = tx.send(self.chain_sync.status().best_seen_block);
|
||||
let _ = tx.send(self.strategy.status().best_seen_block);
|
||||
},
|
||||
ToServiceCommand::NumSyncPeers(tx) => {
|
||||
let _ = tx.send(self.chain_sync.status().num_peers);
|
||||
let _ = tx.send(self.strategy.status().num_peers);
|
||||
},
|
||||
ToServiceCommand::NumQueuedBlocks(tx) => {
|
||||
let _ = tx.send(self.chain_sync.status().queued_blocks);
|
||||
let _ = tx.send(self.strategy.status().queued_blocks);
|
||||
},
|
||||
ToServiceCommand::NumDownloadedBlocks(tx) => {
|
||||
let _ = tx.send(self.chain_sync.num_downloaded_blocks());
|
||||
let _ = tx.send(self.strategy.num_downloaded_blocks());
|
||||
},
|
||||
ToServiceCommand::NumSyncRequests(tx) => {
|
||||
let _ = tx.send(self.chain_sync.num_sync_requests());
|
||||
let _ = tx.send(self.strategy.num_sync_requests());
|
||||
},
|
||||
ToServiceCommand::PeersInfo(tx) => {
|
||||
let peers_info = self
|
||||
@@ -897,7 +886,7 @@ where
|
||||
let _ = tx.send(peers_info);
|
||||
},
|
||||
ToServiceCommand::OnBlockFinalized(hash, header) =>
|
||||
self.chain_sync.on_block_finalized(&hash, *header.number()),
|
||||
self.strategy.on_block_finalized(&hash, *header.number()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -961,11 +950,18 @@ where
|
||||
|
||||
fn pass_warp_sync_target_block_header(&mut self, header: Result<B::Header, oneshot::Canceled>) {
|
||||
match header {
|
||||
Ok(header) => {
|
||||
self.chain_sync.set_warp_sync_target_block(header);
|
||||
},
|
||||
Ok(header) =>
|
||||
if let SyncingStrategy::WarpSyncStrategy(warp_sync) = &mut self.strategy {
|
||||
warp_sync.set_target_block(header);
|
||||
} else {
|
||||
error!(
|
||||
target: LOG_TARGET,
|
||||
"Cannot set warp sync target block: no warp sync strategy is active."
|
||||
);
|
||||
debug_assert!(false);
|
||||
},
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
error!(
|
||||
target: LOG_TARGET,
|
||||
"Failed to get target block for warp sync. Error: {err:?}",
|
||||
);
|
||||
@@ -1005,7 +1001,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
self.chain_sync.peer_disconnected(&peer_id);
|
||||
self.strategy.remove_peer(&peer_id);
|
||||
self.pending_responses.remove(&peer_id);
|
||||
self.event_streams
|
||||
.retain(|stream| stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok());
|
||||
@@ -1091,7 +1087,7 @@ where
|
||||
let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 };
|
||||
|
||||
if handshake.roles.is_full() &&
|
||||
self.chain_sync.num_peers() >=
|
||||
self.strategy.num_peers() >=
|
||||
self.default_peers_set_num_full +
|
||||
self.default_peers_set_no_slot_connected_peers.len() +
|
||||
this_peer_reserved_slot
|
||||
@@ -1115,7 +1111,7 @@ where
|
||||
// `ChainSync` only accepts full peers whereas `SyncingEngine` accepts both full and light
|
||||
// peers. Verify that there is a slot in `SyncingEngine` for the inbound light peer
|
||||
if handshake.roles.is_light() &&
|
||||
(self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light
|
||||
(self.peers.len() - self.strategy.num_peers()) >= self.default_peers_set_num_light
|
||||
{
|
||||
log::debug!(target: LOG_TARGET, "Too many light nodes, rejecting {peer_id}");
|
||||
return Err(false)
|
||||
@@ -1149,7 +1145,10 @@ where
|
||||
inbound: direction.is_inbound(),
|
||||
};
|
||||
|
||||
self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number);
|
||||
// Only forward full peers to syncing strategy.
|
||||
if status.roles.is_full() {
|
||||
self.strategy.add_peer(peer_id, peer.info.best_hash, peer.info.best_number);
|
||||
}
|
||||
|
||||
log::debug!(target: LOG_TARGET, "Connected {peer_id}");
|
||||
|
||||
@@ -1267,7 +1266,7 @@ where
|
||||
PeerRequest::Block(req) => {
|
||||
match self.block_downloader.block_response_into_blocks(&req, resp) {
|
||||
Ok(blocks) => {
|
||||
self.chain_sync.on_block_response(peer_id, req, blocks);
|
||||
self.strategy.on_block_response(peer_id, req, blocks);
|
||||
},
|
||||
Err(BlockResponseError::DecodeFailed(e)) => {
|
||||
debug!(
|
||||
@@ -1312,10 +1311,10 @@ where
|
||||
},
|
||||
};
|
||||
|
||||
self.chain_sync.on_state_response(peer_id, response);
|
||||
self.strategy.on_state_response(peer_id, response);
|
||||
},
|
||||
PeerRequest::WarpProof => {
|
||||
self.chain_sync.on_warp_sync_response(&peer_id, EncodedProof(resp));
|
||||
self.strategy.on_warp_proof_response(&peer_id, EncodedProof(resp));
|
||||
},
|
||||
},
|
||||
Ok(Err(e)) => {
|
||||
|
||||
@@ -17,8 +17,9 @@
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use crate::{
|
||||
chain_sync::{PeerSync, PeerSyncState},
|
||||
request_metrics::Metrics,
|
||||
strategy::chain_sync::{PeerSync, PeerSyncState},
|
||||
LOG_TARGET,
|
||||
};
|
||||
use fork_tree::ForkTree;
|
||||
use libp2p::PeerId;
|
||||
@@ -102,7 +103,7 @@ impl<B: BlockT> ExtraRequests<B> {
|
||||
// ignore the `Revert` error.
|
||||
},
|
||||
Err(err) => {
|
||||
debug!(target: "sync", "Failed to insert request {:?} into tree: {}", request, err);
|
||||
debug!(target: LOG_TARGET, "Failed to insert request {:?} into tree: {}", request, err);
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
@@ -126,7 +127,7 @@ impl<B: BlockT> ExtraRequests<B> {
|
||||
// messages to chain sync.
|
||||
if let Some(request) = self.active_requests.remove(&who) {
|
||||
if let Some(r) = resp {
|
||||
trace!(target: "sync",
|
||||
trace!(target: LOG_TARGET,
|
||||
"Queuing import of {} from {:?} for {:?}",
|
||||
self.request_type_name, who, request,
|
||||
);
|
||||
@@ -134,7 +135,7 @@ impl<B: BlockT> ExtraRequests<B> {
|
||||
self.importing_requests.insert(request);
|
||||
return Some((who, request.0, request.1, r))
|
||||
} else {
|
||||
trace!(target: "sync",
|
||||
trace!(target: LOG_TARGET,
|
||||
"Empty {} response from {:?} for {:?}",
|
||||
self.request_type_name, who, request,
|
||||
);
|
||||
@@ -142,7 +143,7 @@ impl<B: BlockT> ExtraRequests<B> {
|
||||
self.failed_requests.entry(request).or_default().push((who, Instant::now()));
|
||||
self.pending_requests.push_front(request);
|
||||
} else {
|
||||
trace!(target: "sync",
|
||||
trace!(target: LOG_TARGET,
|
||||
"No active {} request to {:?}",
|
||||
self.request_type_name, who,
|
||||
);
|
||||
@@ -217,7 +218,7 @@ impl<B: BlockT> ExtraRequests<B> {
|
||||
};
|
||||
|
||||
if self.tree.finalize_root(&finalized_hash).is_none() {
|
||||
warn!(target: "sync",
|
||||
warn!(target: LOG_TARGET,
|
||||
"‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}",
|
||||
finalized_hash, finalized_number, self.tree.roots().collect::<Vec<_>>()
|
||||
);
|
||||
@@ -322,7 +323,7 @@ impl<'a, B: BlockT> Matcher<'a, B> {
|
||||
}
|
||||
self.extras.active_requests.insert(*peer, request);
|
||||
|
||||
trace!(target: "sync",
|
||||
trace!(target: LOG_TARGET,
|
||||
"Sending {} request to {:?} for {:?}",
|
||||
self.extras.request_type_name, peer, request,
|
||||
);
|
||||
@@ -345,7 +346,7 @@ impl<'a, B: BlockT> Matcher<'a, B> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::chain_sync::PeerSync;
|
||||
use crate::strategy::chain_sync::PeerSync;
|
||||
use quickcheck::{Arbitrary, Gen, QuickCheck};
|
||||
use sp_blockchain::Error as ClientError;
|
||||
use sp_test_primitives::{Block, BlockNumber, Hash};
|
||||
|
||||
@@ -19,10 +19,10 @@
|
||||
//! Blockchain syncing implementation in Substrate.
|
||||
|
||||
pub use service::syncing_service::SyncingService;
|
||||
pub use strategy::warp::{WarpSyncParams, WarpSyncPhase, WarpSyncProgress};
|
||||
pub use types::{SyncEvent, SyncEventStream, SyncState, SyncStatus, SyncStatusProvider};
|
||||
|
||||
mod block_announce_validator;
|
||||
mod chain_sync;
|
||||
mod extra_requests;
|
||||
mod futures_stream;
|
||||
mod pending_responses;
|
||||
@@ -36,7 +36,9 @@ pub mod blocks;
|
||||
pub mod engine;
|
||||
pub mod mock;
|
||||
pub mod service;
|
||||
pub mod state;
|
||||
pub mod state_request_handler;
|
||||
pub mod warp;
|
||||
pub mod strategy;
|
||||
pub mod warp_request_handler;
|
||||
|
||||
/// Log target for this crate.
|
||||
const LOG_TARGET: &str = "sync";
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
//! [`PendingResponses`] is responsible for keeping track of pending responses and
|
||||
//! polling them. [`Stream`] implemented by [`PendingResponses`] never terminates.
|
||||
|
||||
use crate::types::PeerRequest;
|
||||
use crate::{types::PeerRequest, LOG_TARGET};
|
||||
use futures::{
|
||||
channel::oneshot,
|
||||
future::BoxFuture,
|
||||
@@ -33,9 +33,6 @@ use sp_runtime::traits::Block as BlockT;
|
||||
use std::task::{Context, Poll, Waker};
|
||||
use tokio_stream::StreamMap;
|
||||
|
||||
/// Log target for this file.
|
||||
const LOG_TARGET: &'static str = "sync";
|
||||
|
||||
/// Response result.
|
||||
type ResponseResult = Result<Result<(Vec<u8>, ProtocolName), RequestFailure>, oneshot::Canceled>;
|
||||
|
||||
|
||||
@@ -17,7 +17,10 @@
|
||||
//! Helper for handling (i.e. answering) state requests from a remote peer via the
|
||||
//! `crate::request_responses::RequestResponsesBehaviour`.
|
||||
|
||||
use crate::schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse};
|
||||
use crate::{
|
||||
schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse},
|
||||
LOG_TARGET,
|
||||
};
|
||||
|
||||
use codec::{Decode, Encode};
|
||||
use futures::{channel::oneshot, stream::StreamExt};
|
||||
@@ -39,7 +42,6 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
const LOG_TARGET: &str = "sync";
|
||||
const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigger.
|
||||
const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2;
|
||||
|
||||
|
||||
@@ -0,0 +1,489 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! [`SyncingStrategy`] is a proxy between [`crate::engine::SyncingEngine`]
|
||||
//! and specific syncing algorithms.
|
||||
|
||||
pub mod chain_sync;
|
||||
mod state;
|
||||
pub mod state_sync;
|
||||
pub mod warp;
|
||||
|
||||
use crate::{
|
||||
types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncStatus},
|
||||
LOG_TARGET,
|
||||
};
|
||||
use chain_sync::{ChainSync, ChainSyncAction, ChainSyncMode};
|
||||
use libp2p::PeerId;
|
||||
use log::{error, info};
|
||||
use prometheus_endpoint::Registry;
|
||||
use sc_client_api::{BlockBackend, ProofProvider};
|
||||
use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock};
|
||||
use sc_network_common::sync::{
|
||||
message::{BlockAnnounce, BlockData, BlockRequest},
|
||||
SyncMode,
|
||||
};
|
||||
use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata};
|
||||
use sp_consensus::BlockOrigin;
|
||||
use sp_runtime::{
|
||||
traits::{Block as BlockT, NumberFor},
|
||||
Justifications,
|
||||
};
|
||||
use state::{StateStrategy, StateStrategyAction};
|
||||
use std::sync::Arc;
|
||||
use warp::{EncodedProof, WarpProofRequest, WarpSync, WarpSyncAction, WarpSyncConfig};
|
||||
|
||||
/// Corresponding `ChainSync` mode.
|
||||
fn chain_sync_mode(sync_mode: SyncMode) -> ChainSyncMode {
|
||||
match sync_mode {
|
||||
SyncMode::Full => ChainSyncMode::Full,
|
||||
SyncMode::LightState { skip_proofs, storage_chain_mode } =>
|
||||
ChainSyncMode::LightState { skip_proofs, storage_chain_mode },
|
||||
SyncMode::Warp => ChainSyncMode::Full,
|
||||
}
|
||||
}
|
||||
|
||||
/// Syncing configuration containing data for all strategies.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SyncingConfig {
|
||||
/// Syncing mode.
|
||||
pub mode: SyncMode,
|
||||
/// The number of parallel downloads to guard against slow peers.
|
||||
pub max_parallel_downloads: u32,
|
||||
/// Maximum number of blocks to request.
|
||||
pub max_blocks_per_request: u32,
|
||||
/// Prometheus metrics registry.
|
||||
pub metrics_registry: Option<Registry>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum SyncingAction<B: BlockT> {
|
||||
/// Send block request to peer. Always implies dropping a stale block request to the same peer.
|
||||
SendBlockRequest { peer_id: PeerId, request: BlockRequest<B> },
|
||||
/// Drop stale block request.
|
||||
CancelBlockRequest { peer_id: PeerId },
|
||||
/// Send state request to peer.
|
||||
SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest },
|
||||
/// Send warp proof request to peer.
|
||||
SendWarpProofRequest { peer_id: PeerId, request: WarpProofRequest<B> },
|
||||
/// Peer misbehaved. Disconnect, report it and cancel any requests to it.
|
||||
DropPeer(BadPeer),
|
||||
/// Import blocks.
|
||||
ImportBlocks { origin: BlockOrigin, blocks: Vec<IncomingBlock<B>> },
|
||||
/// Import justifications.
|
||||
ImportJustifications {
|
||||
peer_id: PeerId,
|
||||
hash: B::Hash,
|
||||
number: NumberFor<B>,
|
||||
justifications: Justifications,
|
||||
},
|
||||
/// Syncing strategy has finished.
|
||||
Finished,
|
||||
}
|
||||
|
||||
/// Proxy to specific syncing strategies.
|
||||
pub enum SyncingStrategy<B: BlockT, Client> {
|
||||
WarpSyncStrategy(WarpSync<B, Client>),
|
||||
StateSyncStrategy(StateStrategy<B>),
|
||||
ChainSyncStrategy(ChainSync<B, Client>),
|
||||
}
|
||||
|
||||
impl<B: BlockT, Client> SyncingStrategy<B, Client>
|
||||
where
|
||||
B: BlockT,
|
||||
Client: HeaderBackend<B>
|
||||
+ BlockBackend<B>
|
||||
+ HeaderMetadata<B, Error = sp_blockchain::Error>
|
||||
+ ProofProvider<B>
|
||||
+ Send
|
||||
+ Sync
|
||||
+ 'static,
|
||||
{
|
||||
/// Initialize a new syncing startegy.
|
||||
pub fn new(
|
||||
config: SyncingConfig,
|
||||
client: Arc<Client>,
|
||||
warp_sync_config: Option<WarpSyncConfig<B>>,
|
||||
) -> Result<Self, ClientError> {
|
||||
if let SyncMode::Warp = config.mode {
|
||||
let warp_sync_config = warp_sync_config
|
||||
.expect("Warp sync configuration must be supplied in warp sync mode.");
|
||||
Ok(Self::WarpSyncStrategy(WarpSync::new(client.clone(), warp_sync_config)))
|
||||
} else {
|
||||
Ok(Self::ChainSyncStrategy(ChainSync::new(
|
||||
chain_sync_mode(config.mode),
|
||||
client.clone(),
|
||||
config.max_parallel_downloads,
|
||||
config.max_blocks_per_request,
|
||||
config.metrics_registry,
|
||||
)?))
|
||||
}
|
||||
}
|
||||
|
||||
/// Notify that a new peer has connected.
|
||||
pub fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor<B>) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(strategy) =>
|
||||
strategy.add_peer(peer_id, best_hash, best_number),
|
||||
SyncingStrategy::StateSyncStrategy(strategy) =>
|
||||
strategy.add_peer(peer_id, best_hash, best_number),
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.add_peer(peer_id, best_hash, best_number),
|
||||
}
|
||||
}
|
||||
|
||||
/// Notify that a peer has disconnected.
|
||||
pub fn remove_peer(&mut self, peer_id: &PeerId) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(strategy) => strategy.remove_peer(peer_id),
|
||||
SyncingStrategy::StateSyncStrategy(strategy) => strategy.remove_peer(peer_id),
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) => strategy.remove_peer(peer_id),
|
||||
}
|
||||
}
|
||||
|
||||
/// Submit a validated block announcement.
|
||||
///
|
||||
/// Returns new best hash & best number of the peer if they are updated.
|
||||
pub fn on_validated_block_announce(
|
||||
&mut self,
|
||||
is_best: bool,
|
||||
peer_id: PeerId,
|
||||
announce: &BlockAnnounce<B::Header>,
|
||||
) -> Option<(B::Hash, NumberFor<B>)> {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(strategy) =>
|
||||
strategy.on_validated_block_announce(is_best, peer_id, announce),
|
||||
SyncingStrategy::StateSyncStrategy(strategy) =>
|
||||
strategy.on_validated_block_announce(is_best, peer_id, announce),
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.on_validated_block_announce(is_best, peer_id, announce),
|
||||
}
|
||||
}
|
||||
|
||||
/// Configure an explicit fork sync request in case external code has detected that there is a
|
||||
/// stale fork missing.
|
||||
pub fn set_sync_fork_request(
|
||||
&mut self,
|
||||
peers: Vec<PeerId>,
|
||||
hash: &B::Hash,
|
||||
number: NumberFor<B>,
|
||||
) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => {},
|
||||
SyncingStrategy::StateSyncStrategy(_) => {},
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.set_sync_fork_request(peers, hash, number),
|
||||
}
|
||||
}
|
||||
|
||||
/// Request extra justification.
|
||||
pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor<B>) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => {},
|
||||
SyncingStrategy::StateSyncStrategy(_) => {},
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.request_justification(hash, number),
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear extra justification requests.
|
||||
pub fn clear_justification_requests(&mut self) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => {},
|
||||
SyncingStrategy::StateSyncStrategy(_) => {},
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) => strategy.clear_justification_requests(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Report a justification import (successful or not).
|
||||
pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor<B>, success: bool) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => {},
|
||||
SyncingStrategy::StateSyncStrategy(_) => {},
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.on_justification_import(hash, number, success),
|
||||
}
|
||||
}
|
||||
|
||||
/// Process block response.
|
||||
pub fn on_block_response(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
request: BlockRequest<B>,
|
||||
blocks: Vec<BlockData<B>>,
|
||||
) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(strategy) =>
|
||||
strategy.on_block_response(peer_id, request, blocks),
|
||||
SyncingStrategy::StateSyncStrategy(_) => {},
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.on_block_response(peer_id, request, blocks),
|
||||
}
|
||||
}
|
||||
|
||||
/// Process state response.
|
||||
pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => {},
|
||||
SyncingStrategy::StateSyncStrategy(strategy) =>
|
||||
strategy.on_state_response(peer_id, response),
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.on_state_response(peer_id, response),
|
||||
}
|
||||
}
|
||||
|
||||
/// Process warp proof response.
|
||||
pub fn on_warp_proof_response(&mut self, peer_id: &PeerId, response: EncodedProof) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(strategy) =>
|
||||
strategy.on_warp_proof_response(peer_id, response),
|
||||
SyncingStrategy::StateSyncStrategy(_) => {},
|
||||
SyncingStrategy::ChainSyncStrategy(_) => {},
|
||||
}
|
||||
}
|
||||
|
||||
/// A batch of blocks have been processed, with or without errors.
|
||||
pub fn on_blocks_processed(
|
||||
&mut self,
|
||||
imported: usize,
|
||||
count: usize,
|
||||
results: Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
|
||||
) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => {},
|
||||
SyncingStrategy::StateSyncStrategy(strategy) =>
|
||||
strategy.on_blocks_processed(imported, count, results),
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.on_blocks_processed(imported, count, results),
|
||||
}
|
||||
}
|
||||
|
||||
/// Notify a syncing strategy that a block has been finalized.
|
||||
pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor<B>) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => {},
|
||||
SyncingStrategy::StateSyncStrategy(_) => {},
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.on_block_finalized(hash, number),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inform sync about a new best imported block.
|
||||
pub fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor<B>) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => {},
|
||||
SyncingStrategy::StateSyncStrategy(_) => {},
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.update_chain_info(best_hash, best_number),
|
||||
}
|
||||
}
|
||||
|
||||
// Are we in major sync mode?
|
||||
pub fn is_major_syncing(&self) -> bool {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => true,
|
||||
SyncingStrategy::StateSyncStrategy(_) => true,
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
strategy.status().state.is_major_syncing(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the number of peers known to the syncing strategy.
|
||||
pub fn num_peers(&self) -> usize {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(strategy) => strategy.num_peers(),
|
||||
SyncingStrategy::StateSyncStrategy(strategy) => strategy.num_peers(),
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) => strategy.num_peers(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current sync status.
|
||||
pub fn status(&self) -> SyncStatus<B> {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(strategy) => strategy.status(),
|
||||
SyncingStrategy::StateSyncStrategy(strategy) => strategy.status(),
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) => strategy.status(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the total number of downloaded blocks.
|
||||
pub fn num_downloaded_blocks(&self) -> usize {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => 0,
|
||||
SyncingStrategy::StateSyncStrategy(_) => 0,
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) => strategy.num_downloaded_blocks(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an estimate of the number of parallel sync requests.
|
||||
pub fn num_sync_requests(&self) -> usize {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => 0,
|
||||
SyncingStrategy::StateSyncStrategy(_) => 0,
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) => strategy.num_sync_requests(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Report Prometheus metrics
|
||||
pub fn report_metrics(&self) {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(_) => {},
|
||||
SyncingStrategy::StateSyncStrategy(_) => {},
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) => strategy.report_metrics(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get actions that should be performed by the owner on the strategy's behalf
|
||||
#[must_use]
|
||||
pub fn actions(&mut self) -> Box<dyn Iterator<Item = SyncingAction<B>>> {
|
||||
match self {
|
||||
SyncingStrategy::WarpSyncStrategy(strategy) =>
|
||||
Box::new(strategy.actions().map(|action| match action {
|
||||
WarpSyncAction::SendWarpProofRequest { peer_id, request } =>
|
||||
SyncingAction::SendWarpProofRequest { peer_id, request },
|
||||
WarpSyncAction::SendBlockRequest { peer_id, request } =>
|
||||
SyncingAction::SendBlockRequest { peer_id, request },
|
||||
WarpSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer),
|
||||
WarpSyncAction::Finished => SyncingAction::Finished,
|
||||
})),
|
||||
SyncingStrategy::StateSyncStrategy(strategy) =>
|
||||
Box::new(strategy.actions().map(|action| match action {
|
||||
StateStrategyAction::SendStateRequest { peer_id, request } =>
|
||||
SyncingAction::SendStateRequest { peer_id, request },
|
||||
StateStrategyAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer),
|
||||
StateStrategyAction::ImportBlocks { origin, blocks } =>
|
||||
SyncingAction::ImportBlocks { origin, blocks },
|
||||
StateStrategyAction::Finished => SyncingAction::Finished,
|
||||
})),
|
||||
SyncingStrategy::ChainSyncStrategy(strategy) =>
|
||||
Box::new(strategy.actions().map(|action| match action {
|
||||
ChainSyncAction::SendBlockRequest { peer_id, request } =>
|
||||
SyncingAction::SendBlockRequest { peer_id, request },
|
||||
ChainSyncAction::CancelBlockRequest { peer_id } =>
|
||||
SyncingAction::CancelBlockRequest { peer_id },
|
||||
ChainSyncAction::SendStateRequest { peer_id, request } =>
|
||||
SyncingAction::SendStateRequest { peer_id, request },
|
||||
ChainSyncAction::DropPeer(bad_peer) => SyncingAction::DropPeer(bad_peer),
|
||||
ChainSyncAction::ImportBlocks { origin, blocks } =>
|
||||
SyncingAction::ImportBlocks { origin, blocks },
|
||||
ChainSyncAction::ImportJustifications {
|
||||
peer_id,
|
||||
hash,
|
||||
number,
|
||||
justifications,
|
||||
} => SyncingAction::ImportJustifications {
|
||||
peer_id,
|
||||
hash,
|
||||
number,
|
||||
justifications,
|
||||
},
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
/// Switch to next strategy if the active one finished.
|
||||
pub fn switch_to_next(
|
||||
&mut self,
|
||||
config: SyncingConfig,
|
||||
client: Arc<Client>,
|
||||
connected_peers: impl Iterator<Item = (PeerId, B::Hash, NumberFor<B>)>,
|
||||
) -> Result<(), ClientError> {
|
||||
match self {
|
||||
Self::WarpSyncStrategy(warp_sync) => {
|
||||
match warp_sync.take_result() {
|
||||
Some(res) => {
|
||||
info!(
|
||||
target: LOG_TARGET,
|
||||
"Warp sync is complete, continuing with state sync."
|
||||
);
|
||||
let state_sync = StateStrategy::new(
|
||||
client,
|
||||
res.target_header,
|
||||
res.target_body,
|
||||
res.target_justifications,
|
||||
// skip proofs, only set to `true` in `FastUnsafe` sync mode
|
||||
false,
|
||||
connected_peers
|
||||
.map(|(peer_id, _best_hash, best_number)| (peer_id, best_number)),
|
||||
);
|
||||
|
||||
*self = Self::StateSyncStrategy(state_sync);
|
||||
},
|
||||
None => {
|
||||
error!(
|
||||
target: LOG_TARGET,
|
||||
"Warp sync failed. Falling back to full sync."
|
||||
);
|
||||
let mut chain_sync = match ChainSync::new(
|
||||
chain_sync_mode(config.mode),
|
||||
client,
|
||||
config.max_parallel_downloads,
|
||||
config.max_blocks_per_request,
|
||||
config.metrics_registry,
|
||||
) {
|
||||
Ok(chain_sync) => chain_sync,
|
||||
Err(e) => {
|
||||
error!(target: LOG_TARGET, "Failed to start `ChainSync`.");
|
||||
return Err(e)
|
||||
},
|
||||
};
|
||||
// Let `ChainSync` know about connected peers.
|
||||
connected_peers.into_iter().for_each(
|
||||
|(peer_id, best_hash, best_number)| {
|
||||
chain_sync.add_peer(peer_id, best_hash, best_number)
|
||||
},
|
||||
);
|
||||
|
||||
*self = Self::ChainSyncStrategy(chain_sync);
|
||||
},
|
||||
}
|
||||
},
|
||||
Self::StateSyncStrategy(state_sync) => {
|
||||
if state_sync.is_succeded() {
|
||||
info!(target: LOG_TARGET, "State sync is complete, continuing with block sync.");
|
||||
} else {
|
||||
error!(target: LOG_TARGET, "State sync failed. Falling back to full sync.");
|
||||
}
|
||||
let mut chain_sync = match ChainSync::new(
|
||||
chain_sync_mode(config.mode),
|
||||
client,
|
||||
config.max_parallel_downloads,
|
||||
config.max_blocks_per_request,
|
||||
config.metrics_registry,
|
||||
) {
|
||||
Ok(chain_sync) => chain_sync,
|
||||
Err(e) => {
|
||||
error!(target: LOG_TARGET, "Failed to start `ChainSync`.");
|
||||
return Err(e);
|
||||
},
|
||||
};
|
||||
// Let `ChainSync` know about connected peers.
|
||||
connected_peers.into_iter().for_each(|(peer_id, best_hash, best_number)| {
|
||||
chain_sync.add_peer(peer_id, best_hash, best_number)
|
||||
});
|
||||
|
||||
*self = Self::ChainSyncStrategy(chain_sync);
|
||||
},
|
||||
Self::ChainSyncStrategy(_) => {
|
||||
error!(target: LOG_TARGET, "`ChainSyncStrategy` is final startegy, cannot switch to next.");
|
||||
debug_assert!(false);
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
+145
-313
@@ -32,21 +32,18 @@ use crate::{
|
||||
blocks::BlockCollection,
|
||||
extra_requests::ExtraRequests,
|
||||
schema::v1::StateResponse,
|
||||
state::{ImportResult, StateSync},
|
||||
types::{
|
||||
BadPeer, Metrics, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, SyncMode, SyncState,
|
||||
SyncStatus,
|
||||
},
|
||||
warp::{
|
||||
self, EncodedProof, WarpProofImportResult, WarpProofRequest, WarpSync, WarpSyncConfig,
|
||||
WarpSyncPhase, WarpSyncProgress,
|
||||
strategy::{
|
||||
state_sync::{ImportResult, StateSync, StateSyncProvider},
|
||||
warp::{WarpSyncPhase, WarpSyncProgress},
|
||||
},
|
||||
types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncState, SyncStatus},
|
||||
LOG_TARGET,
|
||||
};
|
||||
|
||||
use codec::Encode;
|
||||
use libp2p::PeerId;
|
||||
use log::{debug, error, info, trace, warn};
|
||||
|
||||
use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64};
|
||||
use sc_client_api::{BlockBackend, ProofProvider};
|
||||
use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock};
|
||||
use sc_network_common::sync::message::{
|
||||
@@ -72,9 +69,6 @@ use std::{
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
/// Log target for this file.
|
||||
const LOG_TARGET: &'static str = "sync";
|
||||
|
||||
/// Maximum blocks to store in the import queue.
|
||||
const MAX_IMPORTING_BLOCKS: usize = 2048;
|
||||
|
||||
@@ -95,9 +89,6 @@ const STATE_SYNC_FINALITY_THRESHOLD: u32 = 8;
|
||||
/// so far behind.
|
||||
const MAJOR_SYNC_BLOCKS: u8 = 5;
|
||||
|
||||
/// Number of peers that need to be connected before warp sync is started.
|
||||
const MIN_PEERS_TO_START_WARP_SYNC: usize = 3;
|
||||
|
||||
mod rep {
|
||||
use sc_network::ReputationChange as Rep;
|
||||
/// Reputation change when a peer sent us a message that led to a
|
||||
@@ -133,6 +124,38 @@ mod rep {
|
||||
pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response");
|
||||
}
|
||||
|
||||
struct Metrics {
|
||||
queued_blocks: Gauge<U64>,
|
||||
fork_targets: Gauge<U64>,
|
||||
justifications: GaugeVec<U64>,
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
fn register(r: &Registry) -> Result<Self, PrometheusError> {
|
||||
Ok(Self {
|
||||
queued_blocks: {
|
||||
let g =
|
||||
Gauge::new("substrate_sync_queued_blocks", "Number of blocks in import queue")?;
|
||||
register(g, r)?
|
||||
},
|
||||
fork_targets: {
|
||||
let g = Gauge::new("substrate_sync_fork_targets", "Number of fork sync targets")?;
|
||||
register(g, r)?
|
||||
},
|
||||
justifications: {
|
||||
let g = GaugeVec::new(
|
||||
Opts::new(
|
||||
"substrate_sync_extra_justifications",
|
||||
"Number of extra justifications requests",
|
||||
),
|
||||
&["status"],
|
||||
)?;
|
||||
register(g, r)?
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
enum AllowedRequests {
|
||||
Some(HashSet<PeerId>),
|
||||
All,
|
||||
@@ -193,8 +216,6 @@ pub enum ChainSyncAction<B: BlockT> {
|
||||
CancelBlockRequest { peer_id: PeerId },
|
||||
/// Send state request to peer.
|
||||
SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest },
|
||||
/// Send warp proof request to peer.
|
||||
SendWarpProofRequest { peer_id: PeerId, request: WarpProofRequest<B> },
|
||||
/// Peer misbehaved. Disconnect, report it and cancel the block request to it.
|
||||
DropPeer(BadPeer),
|
||||
/// Import blocks.
|
||||
@@ -208,6 +229,20 @@ pub enum ChainSyncAction<B: BlockT> {
|
||||
},
|
||||
}
|
||||
|
||||
/// Sync operation mode.
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
|
||||
pub enum ChainSyncMode {
|
||||
/// Full block download and verification.
|
||||
Full,
|
||||
/// Download blocks and the latest state.
|
||||
LightState {
|
||||
/// Skip state proof download and verification.
|
||||
skip_proofs: bool,
|
||||
/// Download indexed transactions for recent blocks.
|
||||
storage_chain_mode: bool,
|
||||
},
|
||||
}
|
||||
|
||||
/// The main data structure which contains all the state for a chains
|
||||
/// active syncing strategy.
|
||||
pub struct ChainSync<B: BlockT, Client> {
|
||||
@@ -222,7 +257,7 @@ pub struct ChainSync<B: BlockT, Client> {
|
||||
/// The best block hash in our queue of blocks to import
|
||||
best_queued_hash: B::Hash,
|
||||
/// Current mode (full/light)
|
||||
mode: SyncMode,
|
||||
mode: ChainSyncMode,
|
||||
/// Any extra justification requests.
|
||||
extra_justifications: ExtraRequests<B>,
|
||||
/// A set of hashes of blocks that are being downloaded or have been
|
||||
@@ -240,14 +275,6 @@ pub struct ChainSync<B: BlockT, Client> {
|
||||
downloaded_blocks: usize,
|
||||
/// State sync in progress, if any.
|
||||
state_sync: Option<StateSync<B, Client>>,
|
||||
/// Warp sync in progress, if any.
|
||||
warp_sync: Option<WarpSync<B, Client>>,
|
||||
/// Warp sync configuration.
|
||||
///
|
||||
/// Will be `None` after `self.warp_sync` is `Some(_)`.
|
||||
warp_sync_config: Option<WarpSyncConfig<B>>,
|
||||
/// A temporary storage for warp sync target block until warp sync is initialized.
|
||||
warp_sync_target_block_header: Option<B::Header>,
|
||||
/// Enable importing existing blocks. This is used used after the state download to
|
||||
/// catch up to the latest state while re-importing blocks.
|
||||
import_existing: bool,
|
||||
@@ -255,6 +282,8 @@ pub struct ChainSync<B: BlockT, Client> {
|
||||
gap_sync: Option<GapSync<B>>,
|
||||
/// Pending actions.
|
||||
actions: Vec<ChainSyncAction<B>>,
|
||||
/// Prometheus metrics.
|
||||
metrics: Option<Metrics>,
|
||||
}
|
||||
|
||||
/// All the data we have about a Peer that we are trying to sync with
|
||||
@@ -316,10 +345,6 @@ pub(crate) enum PeerSyncState<B: BlockT> {
|
||||
DownloadingJustification(B::Hash),
|
||||
/// Downloading state.
|
||||
DownloadingState,
|
||||
/// Downloading warp proof.
|
||||
DownloadingWarpProof,
|
||||
/// Downloading warp sync target block.
|
||||
DownloadingWarpTargetBlock,
|
||||
/// Actively downloading block history after warp sync.
|
||||
DownloadingGap(NumberFor<B>),
|
||||
}
|
||||
@@ -343,11 +368,11 @@ where
|
||||
{
|
||||
/// Create a new instance.
|
||||
pub fn new(
|
||||
mode: SyncMode,
|
||||
mode: ChainSyncMode,
|
||||
client: Arc<Client>,
|
||||
max_parallel_downloads: u32,
|
||||
max_blocks_per_request: u32,
|
||||
warp_sync_config: Option<WarpSyncConfig<B>>,
|
||||
metrics_registry: Option<Registry>,
|
||||
) -> Result<Self, ClientError> {
|
||||
let mut sync = Self {
|
||||
client,
|
||||
@@ -364,25 +389,25 @@ where
|
||||
max_blocks_per_request,
|
||||
downloaded_blocks: 0,
|
||||
state_sync: None,
|
||||
warp_sync: None,
|
||||
import_existing: false,
|
||||
gap_sync: None,
|
||||
warp_sync_config,
|
||||
warp_sync_target_block_header: None,
|
||||
actions: Vec::new(),
|
||||
metrics: metrics_registry.and_then(|r| match Metrics::register(&r) {
|
||||
Ok(metrics) => Some(metrics),
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
target: LOG_TARGET,
|
||||
"Failed to register `ChainSync` metrics {err:?}",
|
||||
);
|
||||
None
|
||||
},
|
||||
}),
|
||||
};
|
||||
|
||||
sync.reset_sync_start_point()?;
|
||||
Ok(sync)
|
||||
}
|
||||
|
||||
/// Get peer's best hash & number.
|
||||
pub fn peer_info(&self, peer_id: &PeerId) -> Option<PeerInfo<B>> {
|
||||
self.peers
|
||||
.get(peer_id)
|
||||
.map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number })
|
||||
}
|
||||
|
||||
/// Returns the current sync status.
|
||||
pub fn status(&self) -> SyncStatus<B> {
|
||||
let median_seen = self.median_seen();
|
||||
@@ -407,20 +432,10 @@ where
|
||||
SyncState::Idle
|
||||
};
|
||||
|
||||
let warp_sync_progress = match (&self.warp_sync, &self.mode, &self.gap_sync) {
|
||||
(_, _, Some(gap_sync)) => Some(WarpSyncProgress {
|
||||
phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number),
|
||||
total_bytes: 0,
|
||||
}),
|
||||
(None, SyncMode::Warp, _) => Some(WarpSyncProgress {
|
||||
phase: WarpSyncPhase::AwaitingPeers {
|
||||
required_peers: MIN_PEERS_TO_START_WARP_SYNC,
|
||||
},
|
||||
total_bytes: 0,
|
||||
}),
|
||||
(Some(sync), _, _) => Some(sync.progress()),
|
||||
_ => None,
|
||||
};
|
||||
let warp_sync_progress = self.gap_sync.as_ref().map(|gap_sync| WarpSyncProgress {
|
||||
phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number),
|
||||
total_bytes: 0,
|
||||
});
|
||||
|
||||
SyncStatus {
|
||||
state: sync_state,
|
||||
@@ -452,8 +467,8 @@ where
|
||||
}
|
||||
|
||||
/// Notify syncing state machine that a new sync peer has connected.
|
||||
pub fn new_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor<B>) {
|
||||
match self.new_peer_inner(peer_id, best_hash, best_number) {
|
||||
pub fn add_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor<B>) {
|
||||
match self.add_peer_inner(peer_id, best_hash, best_number) {
|
||||
Ok(Some(request)) =>
|
||||
self.actions.push(ChainSyncAction::SendBlockRequest { peer_id, request }),
|
||||
Ok(None) => {},
|
||||
@@ -462,7 +477,7 @@ where
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn new_peer_inner(
|
||||
fn add_peer_inner(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
best_hash: B::Hash,
|
||||
@@ -471,7 +486,7 @@ where
|
||||
// There is nothing sync can get from the node that has no blockchain data.
|
||||
match self.block_status(&best_hash) {
|
||||
Err(e) => {
|
||||
debug!(target:LOG_TARGET, "Error reading blockchain: {e}");
|
||||
debug!(target: LOG_TARGET, "Error reading blockchain: {e}");
|
||||
Err(BadPeer(peer_id, rep::BLOCKCHAIN_READ_ERROR))
|
||||
},
|
||||
Ok(BlockStatus::KnownBad) => {
|
||||
@@ -494,7 +509,7 @@ where
|
||||
// an ancestor search, which is what we do in the next match case below.
|
||||
if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() {
|
||||
debug!(
|
||||
target:LOG_TARGET,
|
||||
target: LOG_TARGET,
|
||||
"New peer {} with unknown best hash {} ({}), assuming common block.",
|
||||
peer_id,
|
||||
self.best_queued_hash,
|
||||
@@ -516,7 +531,7 @@ where
|
||||
// If we are at genesis, just start downloading.
|
||||
let (state, req) = if self.best_queued_number.is_zero() {
|
||||
debug!(
|
||||
target:LOG_TARGET,
|
||||
target: LOG_TARGET,
|
||||
"New peer {peer_id} with best hash {best_hash} ({best_number}).",
|
||||
);
|
||||
|
||||
@@ -525,7 +540,7 @@ where
|
||||
let common_best = std::cmp::min(self.best_queued_number, best_number);
|
||||
|
||||
debug!(
|
||||
target:LOG_TARGET,
|
||||
target: LOG_TARGET,
|
||||
"New peer {} with unknown best hash {} ({}), searching for common ancestor.",
|
||||
peer_id,
|
||||
best_hash,
|
||||
@@ -554,20 +569,6 @@ where
|
||||
},
|
||||
);
|
||||
|
||||
if let SyncMode::Warp = self.mode {
|
||||
if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none()
|
||||
{
|
||||
log::debug!(target: LOG_TARGET, "Starting warp state sync.");
|
||||
|
||||
if let Some(config) = self.warp_sync_config.take() {
|
||||
let mut warp_sync = WarpSync::new(self.client.clone(), config);
|
||||
if let Some(header) = self.warp_sync_target_block_header.take() {
|
||||
warp_sync.set_target_block(header);
|
||||
}
|
||||
self.warp_sync = Some(warp_sync);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(req)
|
||||
},
|
||||
Ok(BlockStatus::Queued) |
|
||||
@@ -831,7 +832,7 @@ where
|
||||
}
|
||||
if matching_hash.is_none() && current.is_zero() {
|
||||
trace!(
|
||||
target:LOG_TARGET,
|
||||
target: LOG_TARGET,
|
||||
"Ancestry search: genesis mismatch for peer {peer_id}",
|
||||
);
|
||||
return Err(BadPeer(*peer_id, rep::GENESIS_MISMATCH))
|
||||
@@ -886,43 +887,9 @@ where
|
||||
return Ok(())
|
||||
}
|
||||
},
|
||||
PeerSyncState::DownloadingWarpTargetBlock => {
|
||||
peer.state = PeerSyncState::Available;
|
||||
if let Some(warp_sync) = &mut self.warp_sync {
|
||||
if blocks.len() == 1 {
|
||||
validate_blocks::<B>(&blocks, peer_id, Some(request))?;
|
||||
match warp_sync.import_target_block(
|
||||
blocks.pop().expect("`blocks` len checked above."),
|
||||
) {
|
||||
warp::TargetBlockImportResult::Success => return Ok(()),
|
||||
warp::TargetBlockImportResult::BadResponse =>
|
||||
return Err(BadPeer(*peer_id, rep::VERIFICATION_FAIL)),
|
||||
}
|
||||
} else if blocks.is_empty() {
|
||||
debug!(target: LOG_TARGET, "Empty block response from {peer_id}");
|
||||
return Err(BadPeer(*peer_id, rep::NO_BLOCK))
|
||||
} else {
|
||||
debug!(
|
||||
target: LOG_TARGET,
|
||||
"Too many blocks ({}) in warp target block response from {}",
|
||||
blocks.len(),
|
||||
peer_id,
|
||||
);
|
||||
return Err(BadPeer(*peer_id, rep::NOT_REQUESTED))
|
||||
}
|
||||
} else {
|
||||
debug!(
|
||||
target: LOG_TARGET,
|
||||
"Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.",
|
||||
peer_id,
|
||||
);
|
||||
return Ok(())
|
||||
}
|
||||
},
|
||||
PeerSyncState::Available |
|
||||
PeerSyncState::DownloadingJustification(..) |
|
||||
PeerSyncState::DownloadingState |
|
||||
PeerSyncState::DownloadingWarpProof => Vec::new(),
|
||||
PeerSyncState::DownloadingState => Vec::new(),
|
||||
}
|
||||
} else {
|
||||
// When request.is_none() this is a block announcement. Just accept blocks.
|
||||
@@ -1037,7 +1004,7 @@ where
|
||||
is_descendent_of(&**client, base, block)
|
||||
});
|
||||
|
||||
if let SyncMode::LightState { skip_proofs, .. } = &self.mode {
|
||||
if let ChainSyncMode::LightState { skip_proofs, .. } = &self.mode {
|
||||
if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() {
|
||||
// Finalized a recent block.
|
||||
let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect();
|
||||
@@ -1071,12 +1038,15 @@ where
|
||||
}
|
||||
|
||||
/// Submit a validated block announcement.
|
||||
///
|
||||
/// Returns new best hash & best number of the peer if they are updated.
|
||||
#[must_use]
|
||||
pub fn on_validated_block_announce(
|
||||
&mut self,
|
||||
is_best: bool,
|
||||
peer_id: PeerId,
|
||||
announce: &BlockAnnounce<B::Header>,
|
||||
) {
|
||||
) -> Option<(B::Hash, NumberFor<B>)> {
|
||||
let number = *announce.header.number();
|
||||
let hash = announce.header.hash();
|
||||
let parent_status =
|
||||
@@ -1089,19 +1059,21 @@ where
|
||||
peer
|
||||
} else {
|
||||
error!(target: LOG_TARGET, "💔 Called `on_validated_block_announce` with a bad peer ID");
|
||||
return
|
||||
return Some((hash, number))
|
||||
};
|
||||
|
||||
if let PeerSyncState::AncestorSearch { .. } = peer.state {
|
||||
trace!(target: LOG_TARGET, "Peer {} is in the ancestor search state.", peer_id);
|
||||
return
|
||||
return None
|
||||
}
|
||||
|
||||
if is_best {
|
||||
let peer_info = is_best.then(|| {
|
||||
// update their best block
|
||||
peer.best_number = number;
|
||||
peer.best_hash = hash;
|
||||
}
|
||||
|
||||
(hash, number)
|
||||
});
|
||||
|
||||
// If the announced block is the best they have and is not ahead of us, our common number
|
||||
// is either one further ahead or it's the one they just announced, if we know about it.
|
||||
@@ -1118,27 +1090,27 @@ where
|
||||
|
||||
// known block case
|
||||
if known || self.is_already_downloading(&hash) {
|
||||
trace!(target: "sync", "Known block announce from {}: {}", peer_id, hash);
|
||||
trace!(target: LOG_TARGET, "Known block announce from {}: {}", peer_id, hash);
|
||||
if let Some(target) = self.fork_targets.get_mut(&hash) {
|
||||
target.peers.insert(peer_id);
|
||||
}
|
||||
return
|
||||
return peer_info
|
||||
}
|
||||
|
||||
if ancient_parent {
|
||||
trace!(
|
||||
target: "sync",
|
||||
target: LOG_TARGET,
|
||||
"Ignored ancient block announced from {}: {} {:?}",
|
||||
peer_id,
|
||||
hash,
|
||||
announce.header,
|
||||
);
|
||||
return
|
||||
return peer_info
|
||||
}
|
||||
|
||||
if self.status().state == SyncState::Idle {
|
||||
trace!(
|
||||
target: "sync",
|
||||
target: LOG_TARGET,
|
||||
"Added sync target for block announced from {}: {} {:?}",
|
||||
peer_id,
|
||||
hash,
|
||||
@@ -1154,10 +1126,12 @@ where
|
||||
.peers
|
||||
.insert(peer_id);
|
||||
}
|
||||
|
||||
peer_info
|
||||
}
|
||||
|
||||
/// Notify that a sync peer has disconnected.
|
||||
pub fn peer_disconnected(&mut self, peer_id: &PeerId) {
|
||||
pub fn remove_peer(&mut self, peer_id: &PeerId) {
|
||||
self.blocks.clear_peer_download(peer_id);
|
||||
if let Some(gap_sync) = &mut self.gap_sync {
|
||||
gap_sync.blocks.clear_peer_download(peer_id)
|
||||
@@ -1177,12 +1151,33 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Get prometheus metrics.
|
||||
pub fn metrics(&self) -> Metrics {
|
||||
Metrics {
|
||||
queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX),
|
||||
fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX),
|
||||
justifications: self.extra_justifications.metrics(),
|
||||
/// Report prometheus metrics.
|
||||
pub fn report_metrics(&self) {
|
||||
if let Some(metrics) = &self.metrics {
|
||||
metrics
|
||||
.fork_targets
|
||||
.set(self.fork_targets.len().try_into().unwrap_or(std::u64::MAX));
|
||||
metrics
|
||||
.queued_blocks
|
||||
.set(self.queue_blocks.len().try_into().unwrap_or(std::u64::MAX));
|
||||
|
||||
let justifications_metrics = self.extra_justifications.metrics();
|
||||
metrics
|
||||
.justifications
|
||||
.with_label_values(&["pending"])
|
||||
.set(justifications_metrics.pending_requests.into());
|
||||
metrics
|
||||
.justifications
|
||||
.with_label_values(&["active"])
|
||||
.set(justifications_metrics.active_requests.into());
|
||||
metrics
|
||||
.justifications
|
||||
.with_label_values(&["failed"])
|
||||
.set(justifications_metrics.failed_requests.into());
|
||||
metrics
|
||||
.justifications
|
||||
.with_label_values(&["importing"])
|
||||
.set(justifications_metrics.importing_requests.into());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1202,11 +1197,11 @@ where
|
||||
|
||||
fn required_block_attributes(&self) -> BlockAttributes {
|
||||
match self.mode {
|
||||
SyncMode::Full =>
|
||||
ChainSyncMode::Full =>
|
||||
BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY,
|
||||
SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp =>
|
||||
ChainSyncMode::LightState { storage_chain_mode: false, .. } =>
|
||||
BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY,
|
||||
SyncMode::LightState { storage_chain_mode: true, .. } =>
|
||||
ChainSyncMode::LightState { storage_chain_mode: true, .. } =>
|
||||
BlockAttributes::HEADER |
|
||||
BlockAttributes::JUSTIFICATION |
|
||||
BlockAttributes::INDEXED_BODY,
|
||||
@@ -1215,9 +1210,8 @@ where
|
||||
|
||||
fn skip_execution(&self) -> bool {
|
||||
match self.mode {
|
||||
SyncMode::Full => false,
|
||||
SyncMode::LightState { .. } => true,
|
||||
SyncMode::Warp => true,
|
||||
ChainSyncMode::Full => false,
|
||||
ChainSyncMode::LightState { .. } => true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1243,7 +1237,7 @@ where
|
||||
.and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number())))
|
||||
{
|
||||
trace!(
|
||||
target:LOG_TARGET,
|
||||
target: LOG_TARGET,
|
||||
"Accepted {} blocks ({:?}) with origin {:?}",
|
||||
new_blocks.len(),
|
||||
h,
|
||||
@@ -1336,7 +1330,7 @@ where
|
||||
}
|
||||
|
||||
// handle peers that were in other states.
|
||||
let action = match self.new_peer_inner(peer_id, p.best_hash, p.best_number) {
|
||||
let action = match self.add_peer_inner(peer_id, p.best_hash, p.best_number) {
|
||||
// since the request is not a justification, remove it from pending responses
|
||||
Ok(None) => ChainSyncAction::CancelBlockRequest { peer_id },
|
||||
// update the request if the new one is available
|
||||
@@ -1353,25 +1347,19 @@ where
|
||||
/// state for.
|
||||
fn reset_sync_start_point(&mut self) -> Result<(), ClientError> {
|
||||
let info = self.client.info();
|
||||
if matches!(self.mode, SyncMode::LightState { .. }) && info.finalized_state.is_some() {
|
||||
if matches!(self.mode, ChainSyncMode::LightState { .. }) && info.finalized_state.is_some() {
|
||||
warn!(
|
||||
target: LOG_TARGET,
|
||||
"Can't use fast sync mode with a partially synced database. Reverting to full sync mode."
|
||||
);
|
||||
self.mode = SyncMode::Full;
|
||||
}
|
||||
if matches!(self.mode, SyncMode::Warp) && info.finalized_state.is_some() {
|
||||
warn!(
|
||||
target: LOG_TARGET,
|
||||
"Can't use warp sync mode with a partially synced database. Reverting to full sync mode."
|
||||
);
|
||||
self.mode = SyncMode::Full;
|
||||
self.mode = ChainSyncMode::Full;
|
||||
}
|
||||
|
||||
self.import_existing = false;
|
||||
self.best_queued_hash = info.best_hash;
|
||||
self.best_queued_number = info.best_number;
|
||||
|
||||
if self.mode == SyncMode::Full &&
|
||||
if self.mode == ChainSyncMode::Full &&
|
||||
self.client.block_status(info.best_hash)? != BlockStatus::InChainWithState
|
||||
{
|
||||
self.import_existing = true;
|
||||
@@ -1450,44 +1438,6 @@ where
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Set the warp sync target block externally in case we skip warp proofs downloading.
|
||||
pub fn set_warp_sync_target_block(&mut self, header: B::Header) {
|
||||
if let Some(ref mut warp_sync) = self.warp_sync {
|
||||
warp_sync.set_target_block(header);
|
||||
} else {
|
||||
self.warp_sync_target_block_header = Some(header);
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate block request for downloading of the target block body during warp sync.
|
||||
fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest<B>)> {
|
||||
let sync = &self.warp_sync.as_ref()?;
|
||||
|
||||
if self.allowed_requests.is_empty() ||
|
||||
sync.is_complete() ||
|
||||
self.peers
|
||||
.iter()
|
||||
.any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpTargetBlock)
|
||||
{
|
||||
// Only one pending warp target block request is allowed.
|
||||
return None
|
||||
}
|
||||
|
||||
if let Some((target_number, request)) = sync.next_target_block_request() {
|
||||
// Find a random peer that has a block with the target number.
|
||||
for (id, peer) in self.peers.iter_mut() {
|
||||
if peer.state.is_available() && peer.best_number >= target_number {
|
||||
trace!(target: LOG_TARGET, "New warp target block request for {id}");
|
||||
peer.state = PeerSyncState::DownloadingWarpTargetBlock;
|
||||
self.allowed_requests.clear();
|
||||
return Some((*id, request))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Submit blocks received in a response.
|
||||
pub fn on_block_response(
|
||||
&mut self,
|
||||
@@ -1564,12 +1514,6 @@ where
|
||||
|
||||
/// Get block requests scheduled by sync to be sent out.
|
||||
fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest<B>)> {
|
||||
if self.mode == SyncMode::Warp {
|
||||
return self
|
||||
.warp_target_block_request()
|
||||
.map_or_else(|| Vec::new(), |req| Vec::from([req]))
|
||||
}
|
||||
|
||||
if self.allowed_requests.is_empty() || self.state_sync.is_some() {
|
||||
return Vec::new()
|
||||
}
|
||||
@@ -1694,7 +1638,7 @@ where
|
||||
if self.allowed_requests.is_empty() {
|
||||
return None
|
||||
}
|
||||
if (self.state_sync.is_some() || self.warp_sync.is_some()) &&
|
||||
if self.state_sync.is_some() &&
|
||||
self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState)
|
||||
{
|
||||
// Only one pending state request is allowed.
|
||||
@@ -1706,7 +1650,7 @@ where
|
||||
}
|
||||
|
||||
for (id, peer) in self.peers.iter_mut() {
|
||||
if peer.state.is_available() && peer.common_number >= sync.target_block_num() {
|
||||
if peer.state.is_available() && peer.common_number >= sync.target_number() {
|
||||
peer.state = PeerSyncState::DownloadingState;
|
||||
let request = sync.next_request();
|
||||
trace!(target: LOG_TARGET, "New StateRequest for {}: {:?}", id, request);
|
||||
@@ -1715,55 +1659,6 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(sync) = &self.warp_sync {
|
||||
if sync.is_complete() {
|
||||
return None
|
||||
}
|
||||
if let (Some(request), Some(target)) =
|
||||
(sync.next_state_request(), sync.target_block_number())
|
||||
{
|
||||
for (id, peer) in self.peers.iter_mut() {
|
||||
if peer.state.is_available() && peer.best_number >= target {
|
||||
trace!(target: LOG_TARGET, "New StateRequest for {id}: {request:?}");
|
||||
peer.state = PeerSyncState::DownloadingState;
|
||||
self.allowed_requests.clear();
|
||||
return Some((*id, OpaqueStateRequest(Box::new(request))))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Get a warp proof request scheduled by sync to be sent out (if any).
|
||||
fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest<B>)> {
|
||||
if let Some(sync) = &self.warp_sync {
|
||||
if self.allowed_requests.is_empty() ||
|
||||
sync.is_complete() ||
|
||||
self.peers
|
||||
.iter()
|
||||
.any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpProof)
|
||||
{
|
||||
// Only one pending state request is allowed.
|
||||
return None
|
||||
}
|
||||
if let Some(request) = sync.next_warp_proof_request() {
|
||||
let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect();
|
||||
if !targets.is_empty() {
|
||||
targets.sort();
|
||||
let median = targets[targets.len() / 2];
|
||||
// Find a random peer that is synced as much as peer majority.
|
||||
for (id, peer) in self.peers.iter_mut() {
|
||||
if peer.state.is_available() && peer.best_number >= median {
|
||||
trace!(target: LOG_TARGET, "New WarpProofRequest for {id}");
|
||||
peer.state = PeerSyncState::DownloadingWarpProof;
|
||||
self.allowed_requests.clear();
|
||||
return Some((*id, request))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
@@ -1797,15 +1692,6 @@ where
|
||||
response.proof.len(),
|
||||
);
|
||||
sync.import(*response)
|
||||
} else if let Some(sync) = &mut self.warp_sync {
|
||||
debug!(
|
||||
target: LOG_TARGET,
|
||||
"Importing state data from {} with {} keys, {} proof nodes.",
|
||||
peer_id,
|
||||
response.entries.len(),
|
||||
response.proof.len(),
|
||||
);
|
||||
sync.import_state(*response)
|
||||
} else {
|
||||
debug!(target: LOG_TARGET, "Ignored obsolete state response from {peer_id}");
|
||||
return Err(BadPeer(*peer_id, rep::NOT_REQUESTED))
|
||||
@@ -1838,43 +1724,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Submit a warp proof response received.
|
||||
pub fn on_warp_sync_response(&mut self, peer_id: &PeerId, response: EncodedProof) {
|
||||
if let Some(peer) = self.peers.get_mut(peer_id) {
|
||||
if let PeerSyncState::DownloadingWarpProof = peer.state {
|
||||
peer.state = PeerSyncState::Available;
|
||||
self.allowed_requests.set_all();
|
||||
}
|
||||
}
|
||||
let import_result = if let Some(sync) = &mut self.warp_sync {
|
||||
debug!(
|
||||
target: LOG_TARGET,
|
||||
"Importing warp proof data from {}, {} bytes.",
|
||||
peer_id,
|
||||
response.0.len(),
|
||||
);
|
||||
sync.import_warp_proof(response)
|
||||
} else {
|
||||
debug!(target: LOG_TARGET, "Ignored obsolete warp sync response from {peer_id}");
|
||||
self.actions
|
||||
.push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::NOT_REQUESTED)));
|
||||
return
|
||||
};
|
||||
|
||||
match import_result {
|
||||
WarpProofImportResult::Success => {},
|
||||
WarpProofImportResult::BadResponse => {
|
||||
debug!(target: LOG_TARGET, "Bad proof data received from {peer_id}");
|
||||
self.actions.push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::BAD_BLOCK)));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// A batch of blocks have been processed, with or without errors.
|
||||
///
|
||||
/// Call this when a batch of blocks have been processed by the import
|
||||
/// queue, with or without errors. If an error is returned, the pending response
|
||||
/// from the peer must be dropped.
|
||||
/// queue, with or without errors.
|
||||
pub fn on_blocks_processed(
|
||||
&mut self,
|
||||
imported: usize,
|
||||
@@ -1934,7 +1787,7 @@ where
|
||||
self.update_peer_common_number(&peer, number);
|
||||
}
|
||||
let state_sync_complete =
|
||||
self.state_sync.as_ref().map_or(false, |s| s.target() == hash);
|
||||
self.state_sync.as_ref().map_or(false, |s| s.target_hash() == hash);
|
||||
if state_sync_complete {
|
||||
info!(
|
||||
target: LOG_TARGET,
|
||||
@@ -1942,21 +1795,7 @@ where
|
||||
self.state_sync.as_ref().map_or(0, |s| s.progress().size / (1024 * 1024)),
|
||||
);
|
||||
self.state_sync = None;
|
||||
self.mode = SyncMode::Full;
|
||||
self.restart();
|
||||
}
|
||||
let warp_sync_complete = self
|
||||
.warp_sync
|
||||
.as_ref()
|
||||
.map_or(false, |s| s.target_block_hash() == Some(hash));
|
||||
if warp_sync_complete {
|
||||
info!(
|
||||
target: LOG_TARGET,
|
||||
"Warp sync is complete ({} MiB), restarting block sync.",
|
||||
self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)),
|
||||
);
|
||||
self.warp_sync = None;
|
||||
self.mode = SyncMode::Full;
|
||||
self.mode = ChainSyncMode::Full;
|
||||
self.restart();
|
||||
}
|
||||
let gap_sync_complete =
|
||||
@@ -2012,7 +1851,6 @@ where
|
||||
e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => {
|
||||
warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err());
|
||||
self.state_sync = None;
|
||||
self.warp_sync = None;
|
||||
self.restart();
|
||||
},
|
||||
Err(BlockImportError::Cancelled) => {},
|
||||
@@ -2043,12 +1881,6 @@ where
|
||||
.map(|(peer_id, request)| ChainSyncAction::SendStateRequest { peer_id, request });
|
||||
self.actions.extend(state_request);
|
||||
|
||||
let warp_proof_request = self
|
||||
.warp_sync_request()
|
||||
.into_iter()
|
||||
.map(|(peer_id, request)| ChainSyncAction::SendWarpProofRequest { peer_id, request });
|
||||
self.actions.extend(warp_proof_request);
|
||||
|
||||
std::mem::take(&mut self.actions).into_iter()
|
||||
}
|
||||
|
||||
@@ -2324,7 +2156,7 @@ where
|
||||
/// Returns the number of the first block in the sequence.
|
||||
///
|
||||
/// It is expected that `blocks` are in ascending order.
|
||||
fn validate_blocks<Block: BlockT>(
|
||||
pub fn validate_blocks<Block: BlockT>(
|
||||
blocks: &Vec<BlockData<Block>>,
|
||||
peer_id: &PeerId,
|
||||
request: Option<BlockRequest<Block>>,
|
||||
@@ -2389,7 +2221,7 @@ fn validate_blocks<Block: BlockT>(
|
||||
let hash = header.hash();
|
||||
if hash != b.hash {
|
||||
debug!(
|
||||
target:LOG_TARGET,
|
||||
target: LOG_TARGET,
|
||||
"Bad header received from {}. Expected hash {:?}, got {:?}",
|
||||
peer_id,
|
||||
b.hash,
|
||||
@@ -2406,7 +2238,7 @@ fn validate_blocks<Block: BlockT>(
|
||||
);
|
||||
if expected != got {
|
||||
debug!(
|
||||
target:LOG_TARGET,
|
||||
target: LOG_TARGET,
|
||||
"Bad extrinsic root for a block {} received from {}. Expected {:?}, got {:?}",
|
||||
b.hash,
|
||||
peer_id,
|
||||
+26
-26
@@ -38,7 +38,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() {
|
||||
let client = Arc::new(TestClientBuilder::new().build());
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 1, 64, None).unwrap();
|
||||
let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None).unwrap();
|
||||
|
||||
let (a1_hash, a1_number) = {
|
||||
let a1 = BlockBuilderBuilder::new(&*client)
|
||||
@@ -53,7 +53,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() {
|
||||
};
|
||||
|
||||
// add a new peer with the same best block
|
||||
sync.new_peer(peer_id, a1_hash, a1_number);
|
||||
sync.add_peer(peer_id, a1_hash, a1_number);
|
||||
|
||||
// and request a justification for the block
|
||||
sync.request_justification(&a1_hash, a1_number);
|
||||
@@ -91,7 +91,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() {
|
||||
fn restart_doesnt_affect_peers_downloading_finality_data() {
|
||||
let mut client = Arc::new(TestClientBuilder::new().build());
|
||||
|
||||
let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 1, 64, None).unwrap();
|
||||
let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None).unwrap();
|
||||
|
||||
let peer_id1 = PeerId::random();
|
||||
let peer_id2 = PeerId::random();
|
||||
@@ -117,8 +117,8 @@ fn restart_doesnt_affect_peers_downloading_finality_data() {
|
||||
let (b1_hash, b1_number) = new_blocks(50);
|
||||
|
||||
// add 2 peers at blocks that we don't have locally
|
||||
sync.new_peer(peer_id1, Hash::random(), 42);
|
||||
sync.new_peer(peer_id2, Hash::random(), 10);
|
||||
sync.add_peer(peer_id1, Hash::random(), 42);
|
||||
sync.add_peer(peer_id2, Hash::random(), 10);
|
||||
|
||||
// we wil send block requests to these peers
|
||||
// for these blocks we don't know about
|
||||
@@ -128,7 +128,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() {
|
||||
.all(|(p, _)| { p == peer_id1 || p == peer_id2 }));
|
||||
|
||||
// add a new peer at a known block
|
||||
sync.new_peer(peer_id3, b1_hash, b1_number);
|
||||
sync.add_peer(peer_id3, b1_hash, b1_number);
|
||||
|
||||
// we request a justification for a block we have locally
|
||||
sync.request_justification(&b1_hash, b1_number);
|
||||
@@ -181,7 +181,7 @@ fn send_block_announce(header: Header, peer_id: PeerId, sync: &mut ChainSync<Blo
|
||||
data: Some(Vec::new()),
|
||||
};
|
||||
|
||||
sync.on_validated_block_announce(true, peer_id, &announce);
|
||||
let _ = sync.on_validated_block_announce(true, peer_id, &announce);
|
||||
}
|
||||
|
||||
/// Create a block response from the given `blocks`.
|
||||
@@ -275,7 +275,7 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() {
|
||||
let mut client = Arc::new(TestClientBuilder::new().build());
|
||||
let info = client.info();
|
||||
|
||||
let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 5, 64, None).unwrap();
|
||||
let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None).unwrap();
|
||||
|
||||
let peer_id1 = PeerId::random();
|
||||
let peer_id2 = PeerId::random();
|
||||
@@ -283,8 +283,8 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() {
|
||||
let best_block = blocks.last().unwrap().clone();
|
||||
let max_blocks_to_request = sync.max_blocks_per_request;
|
||||
// Connect the node we will sync from
|
||||
sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number());
|
||||
sync.new_peer(peer_id2, info.best_hash, 0);
|
||||
sync.add_peer(peer_id1, best_block.hash(), *best_block.header().number());
|
||||
sync.add_peer(peer_id2, info.best_hash, 0);
|
||||
|
||||
let mut best_block_num = 0;
|
||||
while best_block_num < MAX_DOWNLOAD_AHEAD {
|
||||
@@ -421,7 +421,7 @@ fn can_sync_huge_fork() {
|
||||
|
||||
let info = client.info();
|
||||
|
||||
let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 5, 64, None).unwrap();
|
||||
let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None).unwrap();
|
||||
|
||||
let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone();
|
||||
let just = (*b"TEST", Vec::new());
|
||||
@@ -432,7 +432,7 @@ fn can_sync_huge_fork() {
|
||||
|
||||
let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone();
|
||||
// Connect the node we will sync from
|
||||
sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number());
|
||||
sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number());
|
||||
|
||||
send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync);
|
||||
|
||||
@@ -554,7 +554,7 @@ fn syncs_fork_without_duplicate_requests() {
|
||||
|
||||
let info = client.info();
|
||||
|
||||
let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 5, 64, None).unwrap();
|
||||
let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None).unwrap();
|
||||
|
||||
let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone();
|
||||
let just = (*b"TEST", Vec::new());
|
||||
@@ -565,7 +565,7 @@ fn syncs_fork_without_duplicate_requests() {
|
||||
|
||||
let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone();
|
||||
// Connect the node we will sync from
|
||||
sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number());
|
||||
sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number());
|
||||
|
||||
send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync);
|
||||
|
||||
@@ -689,12 +689,12 @@ fn removes_target_fork_on_disconnect() {
|
||||
let mut client = Arc::new(TestClientBuilder::new().build());
|
||||
let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::<Vec<_>>();
|
||||
|
||||
let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 1, 64, None).unwrap();
|
||||
let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None).unwrap();
|
||||
|
||||
let peer_id1 = PeerId::random();
|
||||
let common_block = blocks[1].clone();
|
||||
// Connect the node we will sync from
|
||||
sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number());
|
||||
sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number());
|
||||
|
||||
// Create a "new" header and announce it
|
||||
let mut header = blocks[0].header().clone();
|
||||
@@ -702,7 +702,7 @@ fn removes_target_fork_on_disconnect() {
|
||||
send_block_announce(header, peer_id1, &mut sync);
|
||||
assert!(sync.fork_targets.len() == 1);
|
||||
|
||||
let _ = sync.peer_disconnected(&peer_id1);
|
||||
let _ = sync.remove_peer(&peer_id1);
|
||||
assert!(sync.fork_targets.len() == 0);
|
||||
}
|
||||
|
||||
@@ -714,11 +714,11 @@ fn can_import_response_with_missing_blocks() {
|
||||
|
||||
let empty_client = Arc::new(TestClientBuilder::new().build());
|
||||
|
||||
let mut sync = ChainSync::new(SyncMode::Full, empty_client.clone(), 1, 64, None).unwrap();
|
||||
let mut sync = ChainSync::new(ChainSyncMode::Full, empty_client.clone(), 1, 64, None).unwrap();
|
||||
|
||||
let peer_id1 = PeerId::random();
|
||||
let best_block = blocks[3].clone();
|
||||
sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number());
|
||||
sync.add_peer(peer_id1, best_block.hash(), *best_block.header().number());
|
||||
|
||||
sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available;
|
||||
sync.peers.get_mut(&peer_id1).unwrap().common_number = 0;
|
||||
@@ -745,7 +745,7 @@ fn ancestor_search_repeat() {
|
||||
#[test]
|
||||
fn sync_restart_removes_block_but_not_justification_requests() {
|
||||
let mut client = Arc::new(TestClientBuilder::new().build());
|
||||
let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 1, 64, None).unwrap();
|
||||
let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None).unwrap();
|
||||
|
||||
let peers = vec![PeerId::random(), PeerId::random()];
|
||||
|
||||
@@ -769,7 +769,7 @@ fn sync_restart_removes_block_but_not_justification_requests() {
|
||||
let (b1_hash, b1_number) = new_blocks(50);
|
||||
|
||||
// add new peer and request blocks from them
|
||||
sync.new_peer(peers[0], Hash::random(), 42);
|
||||
sync.add_peer(peers[0], Hash::random(), 42);
|
||||
|
||||
// we don't actually perform any requests, just keep track of peers waiting for a response
|
||||
let mut pending_responses = HashSet::new();
|
||||
@@ -782,7 +782,7 @@ fn sync_restart_removes_block_but_not_justification_requests() {
|
||||
}
|
||||
|
||||
// add a new peer at a known block
|
||||
sync.new_peer(peers[1], b1_hash, b1_number);
|
||||
sync.add_peer(peers[1], b1_hash, b1_number);
|
||||
|
||||
// we request a justification for a block we have locally
|
||||
sync.request_justification(&b1_hash, b1_number);
|
||||
@@ -837,7 +837,7 @@ fn sync_restart_removes_block_but_not_justification_requests() {
|
||||
sync.peers.get(&peers[1]).unwrap().state,
|
||||
PeerSyncState::DownloadingJustification(b1_hash),
|
||||
);
|
||||
let _ = sync.peer_disconnected(&peers[1]);
|
||||
let _ = sync.remove_peer(&peers[1]);
|
||||
pending_responses.remove(&peers[1]);
|
||||
assert_eq!(pending_responses.len(), 0);
|
||||
}
|
||||
@@ -887,14 +887,14 @@ fn request_across_forks() {
|
||||
fork_blocks
|
||||
};
|
||||
|
||||
let mut sync = ChainSync::new(SyncMode::Full, client.clone(), 5, 64, None).unwrap();
|
||||
let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 5, 64, None).unwrap();
|
||||
|
||||
// Add the peers, all at the common ancestor 100.
|
||||
let common_block = blocks.last().unwrap();
|
||||
let peer_id1 = PeerId::random();
|
||||
sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number());
|
||||
sync.add_peer(peer_id1, common_block.hash(), *common_block.header().number());
|
||||
let peer_id2 = PeerId::random();
|
||||
sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number());
|
||||
sync.add_peer(peer_id2, common_block.hash(), *common_block.header().number());
|
||||
|
||||
// Peer 1 announces 107 from fork 1, 100-107 get downloaded.
|
||||
{
|
||||
@@ -0,0 +1,754 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! State sync strategy.
|
||||
|
||||
use crate::{
|
||||
schema::v1::StateResponse,
|
||||
strategy::state_sync::{ImportResult, StateSync, StateSyncProvider},
|
||||
types::{BadPeer, OpaqueStateRequest, OpaqueStateResponse, SyncState, SyncStatus},
|
||||
LOG_TARGET,
|
||||
};
|
||||
use libp2p::PeerId;
|
||||
use log::{debug, error, trace};
|
||||
use sc_client_api::ProofProvider;
|
||||
use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock};
|
||||
use sc_network_common::sync::message::BlockAnnounce;
|
||||
use sp_consensus::BlockOrigin;
|
||||
use sp_runtime::{
|
||||
traits::{Block as BlockT, Header, NumberFor},
|
||||
Justifications, SaturatedConversion,
|
||||
};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
mod rep {
|
||||
use sc_network::ReputationChange as Rep;
|
||||
|
||||
/// Peer response data does not have requested bits.
|
||||
pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response");
|
||||
|
||||
/// Reputation change for peers which send us a known bad state.
|
||||
pub const BAD_STATE: Rep = Rep::new(-(1 << 29), "Bad state");
|
||||
}
|
||||
|
||||
/// Action that should be performed on [`StateStrategy`]'s behalf.
|
||||
pub enum StateStrategyAction<B: BlockT> {
|
||||
/// Send state request to peer.
|
||||
SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest },
|
||||
/// Disconnect and report peer.
|
||||
DropPeer(BadPeer),
|
||||
/// Import blocks.
|
||||
ImportBlocks { origin: BlockOrigin, blocks: Vec<IncomingBlock<B>> },
|
||||
/// State sync has finished.
|
||||
Finished,
|
||||
}
|
||||
|
||||
enum PeerState {
|
||||
Available,
|
||||
DownloadingState,
|
||||
}
|
||||
|
||||
impl PeerState {
|
||||
fn is_available(&self) -> bool {
|
||||
matches!(self, PeerState::Available)
|
||||
}
|
||||
}
|
||||
|
||||
struct Peer<B: BlockT> {
|
||||
best_number: NumberFor<B>,
|
||||
state: PeerState,
|
||||
}
|
||||
|
||||
/// Syncing strategy that downloads and imports a recent state directly.
|
||||
pub struct StateStrategy<B: BlockT> {
|
||||
state_sync: Box<dyn StateSyncProvider<B>>,
|
||||
peers: HashMap<PeerId, Peer<B>>,
|
||||
actions: Vec<StateStrategyAction<B>>,
|
||||
succeded: bool,
|
||||
}
|
||||
|
||||
impl<B: BlockT> StateStrategy<B> {
|
||||
/// Create a new instance.
|
||||
pub fn new<Client>(
|
||||
client: Arc<Client>,
|
||||
target_header: B::Header,
|
||||
target_body: Option<Vec<B::Extrinsic>>,
|
||||
target_justifications: Option<Justifications>,
|
||||
skip_proof: bool,
|
||||
initial_peers: impl Iterator<Item = (PeerId, NumberFor<B>)>,
|
||||
) -> Self
|
||||
where
|
||||
Client: ProofProvider<B> + Send + Sync + 'static,
|
||||
{
|
||||
let peers = initial_peers
|
||||
.map(|(peer_id, best_number)| {
|
||||
(peer_id, Peer { best_number, state: PeerState::Available })
|
||||
})
|
||||
.collect();
|
||||
Self {
|
||||
state_sync: Box::new(StateSync::new(
|
||||
client,
|
||||
target_header,
|
||||
target_body,
|
||||
target_justifications,
|
||||
skip_proof,
|
||||
)),
|
||||
peers,
|
||||
actions: Vec::new(),
|
||||
succeded: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new instance with a custom state sync provider.
|
||||
// Used in tests.
|
||||
#[cfg(test)]
|
||||
fn new_with_provider(
|
||||
state_sync_provider: Box<dyn StateSyncProvider<B>>,
|
||||
initial_peers: impl Iterator<Item = (PeerId, NumberFor<B>)>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state_sync: state_sync_provider,
|
||||
peers: initial_peers
|
||||
.map(|(peer_id, best_number)| {
|
||||
(peer_id, Peer { best_number, state: PeerState::Available })
|
||||
})
|
||||
.collect(),
|
||||
actions: Vec::new(),
|
||||
succeded: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Notify that a new peer has connected.
|
||||
pub fn add_peer(&mut self, peer_id: PeerId, _best_hash: B::Hash, best_number: NumberFor<B>) {
|
||||
self.peers.insert(peer_id, Peer { best_number, state: PeerState::Available });
|
||||
}
|
||||
|
||||
/// Notify that a peer has disconnected.
|
||||
pub fn remove_peer(&mut self, peer_id: &PeerId) {
|
||||
self.peers.remove(peer_id);
|
||||
}
|
||||
|
||||
/// Submit a validated block announcement.
|
||||
///
|
||||
/// Returns new best hash & best number of the peer if they are updated.
|
||||
#[must_use]
|
||||
pub fn on_validated_block_announce(
|
||||
&mut self,
|
||||
is_best: bool,
|
||||
peer_id: PeerId,
|
||||
announce: &BlockAnnounce<B::Header>,
|
||||
) -> Option<(B::Hash, NumberFor<B>)> {
|
||||
is_best.then_some({
|
||||
let best_number = *announce.header.number();
|
||||
let best_hash = announce.header.hash();
|
||||
if let Some(ref mut peer) = self.peers.get_mut(&peer_id) {
|
||||
peer.best_number = best_number;
|
||||
}
|
||||
// Let `SyncingEngine` know that we should update the peer info.
|
||||
(best_hash, best_number)
|
||||
})
|
||||
}
|
||||
|
||||
/// Process state response.
|
||||
pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) {
|
||||
if let Err(bad_peer) = self.on_state_response_inner(peer_id, response) {
|
||||
self.actions.push(StateStrategyAction::DropPeer(bad_peer));
|
||||
}
|
||||
}
|
||||
|
||||
fn on_state_response_inner(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
response: OpaqueStateResponse,
|
||||
) -> Result<(), BadPeer> {
|
||||
if let Some(peer) = self.peers.get_mut(&peer_id) {
|
||||
peer.state = PeerState::Available;
|
||||
}
|
||||
|
||||
let response: Box<StateResponse> = response.0.downcast().map_err(|_error| {
|
||||
error!(
|
||||
target: LOG_TARGET,
|
||||
"Failed to downcast opaque state response, this is an implementation bug."
|
||||
);
|
||||
debug_assert!(false);
|
||||
|
||||
BadPeer(peer_id, rep::BAD_RESPONSE)
|
||||
})?;
|
||||
|
||||
debug!(
|
||||
target: LOG_TARGET,
|
||||
"Importing state data from {} with {} keys, {} proof nodes.",
|
||||
peer_id,
|
||||
response.entries.len(),
|
||||
response.proof.len(),
|
||||
);
|
||||
|
||||
match self.state_sync.import(*response) {
|
||||
ImportResult::Import(hash, header, state, body, justifications) => {
|
||||
let origin = BlockOrigin::NetworkInitialSync;
|
||||
let block = IncomingBlock {
|
||||
hash,
|
||||
header: Some(header),
|
||||
body,
|
||||
indexed_body: None,
|
||||
justifications,
|
||||
origin: None,
|
||||
allow_missing_state: true,
|
||||
import_existing: true,
|
||||
skip_execution: true,
|
||||
state: Some(state),
|
||||
};
|
||||
debug!(target: LOG_TARGET, "State download is complete. Import is queued");
|
||||
self.actions
|
||||
.push(StateStrategyAction::ImportBlocks { origin, blocks: vec![block] });
|
||||
Ok(())
|
||||
},
|
||||
ImportResult::Continue => Ok(()),
|
||||
ImportResult::BadResponse => {
|
||||
debug!(target: LOG_TARGET, "Bad state data received from {peer_id}");
|
||||
Err(BadPeer(peer_id, rep::BAD_STATE))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// A batch of blocks have been processed, with or without errors.
|
||||
///
|
||||
/// Normally this should be called when target block with state is imported.
|
||||
pub fn on_blocks_processed(
|
||||
&mut self,
|
||||
imported: usize,
|
||||
count: usize,
|
||||
results: Vec<(Result<BlockImportStatus<NumberFor<B>>, BlockImportError>, B::Hash)>,
|
||||
) {
|
||||
trace!(target: LOG_TARGET, "State sync: imported {imported} of {count}.");
|
||||
|
||||
let results = results
|
||||
.into_iter()
|
||||
.filter_map(|(result, hash)| {
|
||||
if hash == self.state_sync.target_hash() {
|
||||
Some(result)
|
||||
} else {
|
||||
debug!(
|
||||
target: LOG_TARGET,
|
||||
"Unexpected block processed: {hash} with result {result:?}.",
|
||||
);
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if !results.is_empty() {
|
||||
// We processed the target block
|
||||
results.iter().filter_map(|result| result.as_ref().err()).for_each(|e| {
|
||||
error!(
|
||||
target: LOG_TARGET,
|
||||
"Failed to import target block with state: {e:?}."
|
||||
);
|
||||
});
|
||||
self.succeded |= results.into_iter().any(|result| result.is_ok());
|
||||
self.actions.push(StateStrategyAction::Finished);
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce state request.
|
||||
fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> {
|
||||
if self.state_sync.is_complete() {
|
||||
return None
|
||||
}
|
||||
|
||||
if self
|
||||
.peers
|
||||
.values()
|
||||
.any(|peer| matches!(peer.state, PeerState::DownloadingState))
|
||||
{
|
||||
// Only one state request at a time is possible.
|
||||
return None
|
||||
}
|
||||
|
||||
let peer_id =
|
||||
self.schedule_next_peer(PeerState::DownloadingState, self.state_sync.target_number())?;
|
||||
let request = self.state_sync.next_request();
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"New state request to {peer_id}: {request:?}.",
|
||||
);
|
||||
Some((peer_id, OpaqueStateRequest(Box::new(request))))
|
||||
}
|
||||
|
||||
fn schedule_next_peer(
|
||||
&mut self,
|
||||
new_state: PeerState,
|
||||
min_best_number: NumberFor<B>,
|
||||
) -> Option<PeerId> {
|
||||
let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect();
|
||||
if targets.is_empty() {
|
||||
return None
|
||||
}
|
||||
targets.sort();
|
||||
let median = targets[targets.len() / 2];
|
||||
let threshold = std::cmp::max(median, min_best_number);
|
||||
// Find a random peer that is synced as much as peer majority and is above
|
||||
// `min_best_number`.
|
||||
for (peer_id, peer) in self.peers.iter_mut() {
|
||||
if peer.state.is_available() && peer.best_number >= threshold {
|
||||
peer.state = new_state;
|
||||
return Some(*peer_id)
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns the current sync status.
|
||||
pub fn status(&self) -> SyncStatus<B> {
|
||||
SyncStatus {
|
||||
state: if self.state_sync.is_complete() {
|
||||
SyncState::Idle
|
||||
} else {
|
||||
SyncState::Downloading { target: self.state_sync.target_number() }
|
||||
},
|
||||
best_seen_block: Some(self.state_sync.target_number()),
|
||||
num_peers: self.peers.len().saturated_into(),
|
||||
num_connected_peers: self.peers.len().saturated_into(),
|
||||
queued_blocks: 0,
|
||||
state_sync: Some(self.state_sync.progress()),
|
||||
warp_sync: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the number of peers known to syncing.
|
||||
pub fn num_peers(&self) -> usize {
|
||||
self.peers.len()
|
||||
}
|
||||
|
||||
/// Get actions that should be performed by the owner on [`WarpSync`]'s behalf
|
||||
#[must_use]
|
||||
pub fn actions(&mut self) -> impl Iterator<Item = StateStrategyAction<B>> {
|
||||
let state_request = self
|
||||
.state_request()
|
||||
.into_iter()
|
||||
.map(|(peer_id, request)| StateStrategyAction::SendStateRequest { peer_id, request });
|
||||
self.actions.extend(state_request);
|
||||
|
||||
std::mem::take(&mut self.actions).into_iter()
|
||||
}
|
||||
|
||||
/// Check if state sync has succeded.
|
||||
#[must_use]
|
||||
pub fn is_succeded(&self) -> bool {
|
||||
self.succeded
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
schema::v1::{StateRequest, StateResponse},
|
||||
strategy::state_sync::{ImportResult, StateSyncProgress, StateSyncProvider},
|
||||
};
|
||||
use codec::Decode;
|
||||
use sc_block_builder::BlockBuilderBuilder;
|
||||
use sc_client_api::KeyValueStates;
|
||||
use sc_consensus::{ImportedAux, ImportedState};
|
||||
use sp_runtime::traits::Zero;
|
||||
use substrate_test_runtime_client::{
|
||||
runtime::{Block, Hash},
|
||||
BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt,
|
||||
};
|
||||
|
||||
mockall::mock! {
|
||||
pub StateSync<B: BlockT> {}
|
||||
|
||||
impl<B: BlockT> StateSyncProvider<B> for StateSync<B> {
|
||||
fn import(&mut self, response: StateResponse) -> ImportResult<B>;
|
||||
fn next_request(&self) -> StateRequest;
|
||||
fn is_complete(&self) -> bool;
|
||||
fn target_number(&self) -> NumberFor<B>;
|
||||
fn target_hash(&self) -> B::Hash;
|
||||
fn progress(&self) -> StateSyncProgress;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_peer_is_scheduled_if_no_peers_connected() {
|
||||
let client = Arc::new(TestClientBuilder::new().set_no_genesis().build());
|
||||
let target_block = BlockBuilderBuilder::new(&*client)
|
||||
.on_parent_block(client.chain_info().best_hash)
|
||||
.with_parent_block_number(client.chain_info().best_number)
|
||||
.build()
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block;
|
||||
let target_header = target_block.header().clone();
|
||||
|
||||
let mut state_strategy =
|
||||
StateStrategy::new(client, target_header, None, None, false, std::iter::empty());
|
||||
|
||||
assert!(state_strategy
|
||||
.schedule_next_peer(PeerState::DownloadingState, Zero::zero())
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn at_least_median_synced_peer_is_scheduled() {
|
||||
let client = Arc::new(TestClientBuilder::new().set_no_genesis().build());
|
||||
let target_block = BlockBuilderBuilder::new(&*client)
|
||||
.on_parent_block(client.chain_info().best_hash)
|
||||
.with_parent_block_number(client.chain_info().best_number)
|
||||
.build()
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block;
|
||||
|
||||
for _ in 0..100 {
|
||||
let peers = (1..=10)
|
||||
.map(|best_number| (PeerId::random(), best_number))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let initial_peers = peers.iter().map(|(p, n)| (*p, *n));
|
||||
|
||||
let mut state_strategy = StateStrategy::new(
|
||||
client.clone(),
|
||||
target_block.header().clone(),
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
initial_peers,
|
||||
);
|
||||
|
||||
let peer_id =
|
||||
state_strategy.schedule_next_peer(PeerState::DownloadingState, Zero::zero());
|
||||
assert!(*peers.get(&peer_id.unwrap()).unwrap() >= 6);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn min_best_number_peer_is_scheduled() {
|
||||
let client = Arc::new(TestClientBuilder::new().set_no_genesis().build());
|
||||
let target_block = BlockBuilderBuilder::new(&*client)
|
||||
.on_parent_block(client.chain_info().best_hash)
|
||||
.with_parent_block_number(client.chain_info().best_number)
|
||||
.build()
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block;
|
||||
|
||||
for _ in 0..10 {
|
||||
let peers = (1..=10)
|
||||
.map(|best_number| (PeerId::random(), best_number))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let initial_peers = peers.iter().map(|(p, n)| (*p, *n));
|
||||
|
||||
let mut state_strategy = StateStrategy::new(
|
||||
client.clone(),
|
||||
target_block.header().clone(),
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
initial_peers,
|
||||
);
|
||||
|
||||
let peer_id = state_strategy.schedule_next_peer(PeerState::DownloadingState, 10);
|
||||
assert!(*peers.get(&peer_id.unwrap()).unwrap() == 10);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_request_contains_correct_hash() {
|
||||
let client = Arc::new(TestClientBuilder::new().set_no_genesis().build());
|
||||
let target_block = BlockBuilderBuilder::new(&*client)
|
||||
.on_parent_block(client.chain_info().best_hash)
|
||||
.with_parent_block_number(client.chain_info().best_number)
|
||||
.build()
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block;
|
||||
|
||||
let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number));
|
||||
|
||||
let mut state_strategy = StateStrategy::new(
|
||||
client.clone(),
|
||||
target_block.header().clone(),
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
initial_peers,
|
||||
);
|
||||
|
||||
let (_peer_id, mut opaque_request) = state_strategy.state_request().unwrap();
|
||||
let request: &mut StateRequest = opaque_request.0.downcast_mut().unwrap();
|
||||
let hash = Hash::decode(&mut &*request.block).unwrap();
|
||||
|
||||
assert_eq!(hash, target_block.header().hash());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_parallel_state_requests() {
|
||||
let client = Arc::new(TestClientBuilder::new().set_no_genesis().build());
|
||||
let target_block = BlockBuilderBuilder::new(&*client)
|
||||
.on_parent_block(client.chain_info().best_hash)
|
||||
.with_parent_block_number(client.chain_info().best_number)
|
||||
.build()
|
||||
.unwrap()
|
||||
.build()
|
||||
.unwrap()
|
||||
.block;
|
||||
|
||||
let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number));
|
||||
|
||||
let mut state_strategy = StateStrategy::new(
|
||||
client.clone(),
|
||||
target_block.header().clone(),
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
initial_peers,
|
||||
);
|
||||
|
||||
// First request is sent.
|
||||
assert!(state_strategy.state_request().is_some());
|
||||
|
||||
// No parallel request is sent.
|
||||
assert!(state_strategy.state_request().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn received_state_response_makes_peer_available_again() {
|
||||
let mut state_sync_provider = MockStateSync::<Block>::new();
|
||||
state_sync_provider.expect_import().return_once(|_| ImportResult::Continue);
|
||||
let peer_id = PeerId::random();
|
||||
let initial_peers = std::iter::once((peer_id, 10));
|
||||
let mut state_strategy =
|
||||
StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers);
|
||||
// Manually set the peer's state.
|
||||
state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState;
|
||||
|
||||
let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default()));
|
||||
state_strategy.on_state_response(peer_id, dummy_response);
|
||||
|
||||
assert!(state_strategy.peers.get(&peer_id).unwrap().state.is_available());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bad_state_response_drops_peer() {
|
||||
let mut state_sync_provider = MockStateSync::<Block>::new();
|
||||
// Provider says that state response is bad.
|
||||
state_sync_provider.expect_import().return_once(|_| ImportResult::BadResponse);
|
||||
let peer_id = PeerId::random();
|
||||
let initial_peers = std::iter::once((peer_id, 10));
|
||||
let mut state_strategy =
|
||||
StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers);
|
||||
// Manually set the peer's state.
|
||||
state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState;
|
||||
let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default()));
|
||||
// Receiving response drops the peer.
|
||||
assert!(matches!(
|
||||
state_strategy.on_state_response_inner(peer_id, dummy_response),
|
||||
Err(BadPeer(id, _rep)) if id == peer_id,
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn partial_state_response_doesnt_generate_actions() {
|
||||
let mut state_sync_provider = MockStateSync::<Block>::new();
|
||||
// Sync provider says that the response is partial.
|
||||
state_sync_provider.expect_import().return_once(|_| ImportResult::Continue);
|
||||
let peer_id = PeerId::random();
|
||||
let initial_peers = std::iter::once((peer_id, 10));
|
||||
let mut state_strategy =
|
||||
StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers);
|
||||
// Manually set the peer's state .
|
||||
state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState;
|
||||
|
||||
let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default()));
|
||||
state_strategy.on_state_response(peer_id, dummy_response);
|
||||
|
||||
// No actions generated.
|
||||
assert_eq!(state_strategy.actions.len(), 0)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn complete_state_response_leads_to_block_import() {
|
||||
// Build block to use for checks.
|
||||
let client = Arc::new(TestClientBuilder::new().set_no_genesis().build());
|
||||
let mut block_builder = BlockBuilderBuilder::new(&*client)
|
||||
.on_parent_block(client.chain_info().best_hash)
|
||||
.with_parent_block_number(client.chain_info().best_number)
|
||||
.build()
|
||||
.unwrap();
|
||||
block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap();
|
||||
let block = block_builder.build().unwrap().block;
|
||||
let header = block.header().clone();
|
||||
let hash = header.hash();
|
||||
let body = Some(block.extrinsics().iter().cloned().collect::<Vec<_>>());
|
||||
let state = ImportedState { block: hash, state: KeyValueStates(Vec::new()) };
|
||||
let justifications = Some(Justifications::from((*b"FRNK", Vec::new())));
|
||||
|
||||
// Prepare `StateSync`
|
||||
let mut state_sync_provider = MockStateSync::<Block>::new();
|
||||
let import = ImportResult::Import(
|
||||
hash,
|
||||
header.clone(),
|
||||
state.clone(),
|
||||
body.clone(),
|
||||
justifications.clone(),
|
||||
);
|
||||
state_sync_provider.expect_import().return_once(move |_| import);
|
||||
|
||||
// Reference values to check against.
|
||||
let expected_origin = BlockOrigin::NetworkInitialSync;
|
||||
let expected_block = IncomingBlock {
|
||||
hash,
|
||||
header: Some(header),
|
||||
body,
|
||||
indexed_body: None,
|
||||
justifications,
|
||||
origin: None,
|
||||
allow_missing_state: true,
|
||||
import_existing: true,
|
||||
skip_execution: true,
|
||||
state: Some(state),
|
||||
};
|
||||
let expected_blocks = vec![expected_block];
|
||||
|
||||
// Prepare `StateStrategy`.
|
||||
let peer_id = PeerId::random();
|
||||
let initial_peers = std::iter::once((peer_id, 10));
|
||||
let mut state_strategy =
|
||||
StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers);
|
||||
// Manually set the peer's state .
|
||||
state_strategy.peers.get_mut(&peer_id).unwrap().state = PeerState::DownloadingState;
|
||||
|
||||
// Receive response.
|
||||
let dummy_response = OpaqueStateResponse(Box::new(StateResponse::default()));
|
||||
state_strategy.on_state_response(peer_id, dummy_response);
|
||||
|
||||
assert_eq!(state_strategy.actions.len(), 1);
|
||||
assert!(matches!(
|
||||
&state_strategy.actions[0],
|
||||
StateStrategyAction::ImportBlocks { origin, blocks }
|
||||
if *origin == expected_origin && *blocks == expected_blocks,
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn importing_unknown_block_doesnt_finish_strategy() {
|
||||
let target_hash = Hash::random();
|
||||
let unknown_hash = Hash::random();
|
||||
let mut state_sync_provider = MockStateSync::<Block>::new();
|
||||
state_sync_provider.expect_target_hash().return_const(target_hash);
|
||||
|
||||
let mut state_strategy =
|
||||
StateStrategy::new_with_provider(Box::new(state_sync_provider), std::iter::empty());
|
||||
|
||||
// Unknown block imported.
|
||||
state_strategy.on_blocks_processed(
|
||||
1,
|
||||
1,
|
||||
vec![(
|
||||
Ok(BlockImportStatus::ImportedUnknown(1, ImportedAux::default(), None)),
|
||||
unknown_hash,
|
||||
)],
|
||||
);
|
||||
|
||||
// No actions generated.
|
||||
assert_eq!(state_strategy.actions.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn succesfully_importing_target_block_finishes_strategy() {
|
||||
let target_hash = Hash::random();
|
||||
let mut state_sync_provider = MockStateSync::<Block>::new();
|
||||
state_sync_provider.expect_target_hash().return_const(target_hash);
|
||||
|
||||
let mut state_strategy =
|
||||
StateStrategy::new_with_provider(Box::new(state_sync_provider), std::iter::empty());
|
||||
|
||||
// Target block imported.
|
||||
state_strategy.on_blocks_processed(
|
||||
1,
|
||||
1,
|
||||
vec![(
|
||||
Ok(BlockImportStatus::ImportedUnknown(1, ImportedAux::default(), None)),
|
||||
target_hash,
|
||||
)],
|
||||
);
|
||||
|
||||
// Strategy finishes.
|
||||
assert_eq!(state_strategy.actions.len(), 1);
|
||||
assert!(matches!(&state_strategy.actions[0], StateStrategyAction::Finished));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn failure_to_import_target_block_finishes_strategy() {
|
||||
let target_hash = Hash::random();
|
||||
let mut state_sync_provider = MockStateSync::<Block>::new();
|
||||
state_sync_provider.expect_target_hash().return_const(target_hash);
|
||||
|
||||
let mut state_strategy =
|
||||
StateStrategy::new_with_provider(Box::new(state_sync_provider), std::iter::empty());
|
||||
|
||||
// Target block import failed.
|
||||
state_strategy.on_blocks_processed(
|
||||
1,
|
||||
1,
|
||||
vec![(
|
||||
Err(BlockImportError::VerificationFailed(None, String::from("test-error"))),
|
||||
target_hash,
|
||||
)],
|
||||
);
|
||||
|
||||
// Strategy finishes.
|
||||
assert_eq!(state_strategy.actions.len(), 1);
|
||||
assert!(matches!(&state_strategy.actions[0], StateStrategyAction::Finished));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn finished_strategy_doesnt_generate_more_actions() {
|
||||
let target_hash = Hash::random();
|
||||
let mut state_sync_provider = MockStateSync::<Block>::new();
|
||||
state_sync_provider.expect_target_hash().return_const(target_hash);
|
||||
state_sync_provider.expect_is_complete().return_const(true);
|
||||
|
||||
// Get enough peers for possible spurious requests.
|
||||
let initial_peers = (1..=10).map(|best_number| (PeerId::random(), best_number));
|
||||
|
||||
let mut state_strategy =
|
||||
StateStrategy::new_with_provider(Box::new(state_sync_provider), initial_peers);
|
||||
|
||||
state_strategy.on_blocks_processed(
|
||||
1,
|
||||
1,
|
||||
vec![(
|
||||
Ok(BlockImportStatus::ImportedUnknown(1, ImportedAux::default(), None)),
|
||||
target_hash,
|
||||
)],
|
||||
);
|
||||
|
||||
// Strategy finishes.
|
||||
let actions = state_strategy.actions().collect::<Vec<_>>();
|
||||
assert_eq!(actions.len(), 1);
|
||||
assert!(matches!(&actions[0], StateStrategyAction::Finished));
|
||||
|
||||
// No more actions generated.
|
||||
assert_eq!(state_strategy.actions().count(), 0);
|
||||
}
|
||||
}
|
||||
+86
-27
@@ -20,7 +20,7 @@
|
||||
|
||||
use crate::{
|
||||
schema::v1::{StateEntry, StateRequest, StateResponse},
|
||||
types::StateDownloadProgress,
|
||||
LOG_TARGET,
|
||||
};
|
||||
use codec::{Decode, Encode};
|
||||
use log::debug;
|
||||
@@ -32,7 +32,62 @@ use sp_runtime::{
|
||||
traits::{Block as BlockT, Header, NumberFor},
|
||||
Justifications,
|
||||
};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use std::{collections::HashMap, fmt, sync::Arc};
|
||||
|
||||
/// Generic state sync provider. Used for mocking in tests.
|
||||
pub trait StateSyncProvider<B: BlockT>: Send + Sync {
|
||||
/// Validate and import a state response.
|
||||
fn import(&mut self, response: StateResponse) -> ImportResult<B>;
|
||||
/// Produce next state request.
|
||||
fn next_request(&self) -> StateRequest;
|
||||
/// Check if the state is complete.
|
||||
fn is_complete(&self) -> bool;
|
||||
/// Returns target block number.
|
||||
fn target_number(&self) -> NumberFor<B>;
|
||||
/// Returns target block hash.
|
||||
fn target_hash(&self) -> B::Hash;
|
||||
/// Returns state sync estimated progress.
|
||||
fn progress(&self) -> StateSyncProgress;
|
||||
}
|
||||
|
||||
// Reported state sync phase.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub enum StateSyncPhase {
|
||||
// State download in progress.
|
||||
DownloadingState,
|
||||
// Download is complete, state is being imported.
|
||||
ImportingState,
|
||||
}
|
||||
|
||||
impl fmt::Display for StateSyncPhase {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Self::DownloadingState => write!(f, "Downloading state"),
|
||||
Self::ImportingState => write!(f, "Importing state"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reported state download progress.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub struct StateSyncProgress {
|
||||
/// Estimated download percentage.
|
||||
pub percentage: u32,
|
||||
/// Total state size in bytes downloaded so far.
|
||||
pub size: u64,
|
||||
/// Current state sync phase.
|
||||
pub phase: StateSyncPhase,
|
||||
}
|
||||
|
||||
/// Import state chunk result.
|
||||
pub enum ImportResult<B: BlockT> {
|
||||
/// State is complete and ready for import.
|
||||
Import(B::Hash, B::Header, ImportedState<B>, Option<Vec<B::Extrinsic>>, Option<Justifications>),
|
||||
/// Continue downloading.
|
||||
Continue,
|
||||
/// Bad state chunk.
|
||||
BadResponse,
|
||||
}
|
||||
|
||||
/// State sync state machine. Accumulates partial state data until it
|
||||
/// is ready to be imported.
|
||||
@@ -50,16 +105,6 @@ pub struct StateSync<B: BlockT, Client> {
|
||||
skip_proof: bool,
|
||||
}
|
||||
|
||||
/// Import state chunk result.
|
||||
pub enum ImportResult<B: BlockT> {
|
||||
/// State is complete and ready for import.
|
||||
Import(B::Hash, B::Header, ImportedState<B>, Option<Vec<B::Extrinsic>>, Option<Justifications>),
|
||||
/// Continue downloading.
|
||||
Continue,
|
||||
/// Bad state chunk.
|
||||
BadResponse,
|
||||
}
|
||||
|
||||
impl<B, Client> StateSync<B, Client>
|
||||
where
|
||||
B: BlockT,
|
||||
@@ -87,24 +132,30 @@ where
|
||||
skip_proof,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, Client> StateSyncProvider<B> for StateSync<B, Client>
|
||||
where
|
||||
B: BlockT,
|
||||
Client: ProofProvider<B> + Send + Sync + 'static,
|
||||
{
|
||||
/// Validate and import a state response.
|
||||
pub fn import(&mut self, response: StateResponse) -> ImportResult<B> {
|
||||
fn import(&mut self, response: StateResponse) -> ImportResult<B> {
|
||||
if response.entries.is_empty() && response.proof.is_empty() {
|
||||
debug!(target: "sync", "Bad state response");
|
||||
debug!(target: LOG_TARGET, "Bad state response");
|
||||
return ImportResult::BadResponse
|
||||
}
|
||||
if !self.skip_proof && response.proof.is_empty() {
|
||||
debug!(target: "sync", "Missing proof");
|
||||
debug!(target: LOG_TARGET, "Missing proof");
|
||||
return ImportResult::BadResponse
|
||||
}
|
||||
let complete = if !self.skip_proof {
|
||||
debug!(target: "sync", "Importing state from {} trie nodes", response.proof.len());
|
||||
debug!(target: LOG_TARGET, "Importing state from {} trie nodes", response.proof.len());
|
||||
let proof_size = response.proof.len() as u64;
|
||||
let proof = match CompactProof::decode(&mut response.proof.as_ref()) {
|
||||
Ok(proof) => proof,
|
||||
Err(e) => {
|
||||
debug!(target: "sync", "Error decoding proof: {:?}", e);
|
||||
debug!(target: LOG_TARGET, "Error decoding proof: {:?}", e);
|
||||
return ImportResult::BadResponse
|
||||
},
|
||||
};
|
||||
@@ -115,7 +166,7 @@ where
|
||||
) {
|
||||
Err(e) => {
|
||||
debug!(
|
||||
target: "sync",
|
||||
target: LOG_TARGET,
|
||||
"StateResponse failed proof verification: {}",
|
||||
e,
|
||||
);
|
||||
@@ -123,11 +174,11 @@ where
|
||||
},
|
||||
Ok(values) => values,
|
||||
};
|
||||
debug!(target: "sync", "Imported with {} keys", values.len());
|
||||
debug!(target: LOG_TARGET, "Imported with {} keys", values.len());
|
||||
|
||||
let complete = completed == 0;
|
||||
if !complete && !values.update_last_key(completed, &mut self.last_key) {
|
||||
debug!(target: "sync", "Error updating key cursor, depth: {}", completed);
|
||||
debug!(target: LOG_TARGET, "Error updating key cursor, depth: {}", completed);
|
||||
};
|
||||
|
||||
for values in values.0 {
|
||||
@@ -185,7 +236,7 @@ where
|
||||
}
|
||||
for state in response.entries {
|
||||
debug!(
|
||||
target: "sync",
|
||||
target: LOG_TARGET,
|
||||
"Importing state from {:?} to {:?}",
|
||||
state.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
|
||||
state.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
|
||||
@@ -237,7 +288,7 @@ where
|
||||
}
|
||||
|
||||
/// Produce next state request.
|
||||
pub fn next_request(&self) -> StateRequest {
|
||||
fn next_request(&self) -> StateRequest {
|
||||
StateRequest {
|
||||
block: self.target_block.encode(),
|
||||
start: self.last_key.clone().into_vec(),
|
||||
@@ -246,24 +297,32 @@ where
|
||||
}
|
||||
|
||||
/// Check if the state is complete.
|
||||
pub fn is_complete(&self) -> bool {
|
||||
fn is_complete(&self) -> bool {
|
||||
self.complete
|
||||
}
|
||||
|
||||
/// Returns target block number.
|
||||
pub fn target_block_num(&self) -> NumberFor<B> {
|
||||
fn target_number(&self) -> NumberFor<B> {
|
||||
*self.target_header.number()
|
||||
}
|
||||
|
||||
/// Returns target block hash.
|
||||
pub fn target(&self) -> B::Hash {
|
||||
fn target_hash(&self) -> B::Hash {
|
||||
self.target_block
|
||||
}
|
||||
|
||||
/// Returns state sync estimated progress.
|
||||
pub fn progress(&self) -> StateDownloadProgress {
|
||||
fn progress(&self) -> StateSyncProgress {
|
||||
let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8);
|
||||
let percent_done = cursor as u32 * 100 / 256;
|
||||
StateDownloadProgress { percentage: percent_done, size: self.imported_bytes }
|
||||
StateSyncProgress {
|
||||
percentage: percent_done,
|
||||
size: self.imported_bytes,
|
||||
phase: if self.complete {
|
||||
StateSyncPhase::ImportingState
|
||||
} else {
|
||||
StateSyncPhase::DownloadingState
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -23,14 +23,12 @@ use sc_network_common::{role::Roles, types::ReputationChange};
|
||||
|
||||
use libp2p::PeerId;
|
||||
|
||||
use crate::warp::WarpSyncProgress;
|
||||
use crate::strategy::{state_sync::StateSyncProgress, warp::WarpSyncProgress};
|
||||
use sc_network_common::sync::message::BlockRequest;
|
||||
use sp_runtime::traits::{Block as BlockT, NumberFor};
|
||||
|
||||
use std::{any::Any, fmt, fmt::Formatter, pin::Pin, sync::Arc};
|
||||
|
||||
pub use sc_network_common::sync::SyncMode;
|
||||
|
||||
/// The sync status of a peer we are trying to sync with
|
||||
#[derive(Debug)]
|
||||
pub struct PeerInfo<Block: BlockT> {
|
||||
@@ -69,15 +67,6 @@ impl<BlockNumber> SyncState<BlockNumber> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Reported state download progress.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub struct StateDownloadProgress {
|
||||
/// Estimated download percentage.
|
||||
pub percentage: u32,
|
||||
/// Total state size in bytes downloaded so far.
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// Syncing status and statistics.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SyncStatus<Block: BlockT> {
|
||||
@@ -92,7 +81,7 @@ pub struct SyncStatus<Block: BlockT> {
|
||||
/// Number of blocks queued for import
|
||||
pub queued_blocks: u32,
|
||||
/// State sync status in progress, if any.
|
||||
pub state_sync: Option<StateDownloadProgress>,
|
||||
pub state_sync: Option<StateSyncProgress>,
|
||||
/// Warp sync in progress, if any.
|
||||
pub warp_sync: Option<WarpSyncProgress<Block>>,
|
||||
}
|
||||
@@ -109,13 +98,6 @@ impl fmt::Display for BadPeer {
|
||||
|
||||
impl std::error::Error for BadPeer {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Metrics {
|
||||
pub queued_blocks: u32,
|
||||
pub fork_targets: u32,
|
||||
pub justifications: crate::request_metrics::Metrics,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum PeerRequest<B: BlockT> {
|
||||
Block(BlockRequest<B>),
|
||||
|
||||
@@ -1,405 +0,0 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! Warp sync support.
|
||||
|
||||
pub use sp_consensus_grandpa::{AuthorityList, SetId};
|
||||
|
||||
use crate::{
|
||||
schema::v1::{StateRequest, StateResponse},
|
||||
state::{ImportResult, StateSync},
|
||||
};
|
||||
use codec::{Decode, Encode};
|
||||
use futures::channel::oneshot;
|
||||
use log::error;
|
||||
use sc_client_api::ProofProvider;
|
||||
use sc_network_common::sync::message::{
|
||||
BlockAttributes, BlockData, BlockRequest, Direction, FromBlock,
|
||||
};
|
||||
use sp_blockchain::HeaderBackend;
|
||||
use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero};
|
||||
use std::{fmt, sync::Arc};
|
||||
|
||||
/// Log target for this file.
|
||||
const LOG_TARGET: &'static str = "sync";
|
||||
|
||||
/// Scale-encoded warp sync proof response.
|
||||
pub struct EncodedProof(pub Vec<u8>);
|
||||
|
||||
/// Warp sync request
|
||||
#[derive(Encode, Decode, Debug, Clone)]
|
||||
pub struct WarpProofRequest<B: BlockT> {
|
||||
/// Start collecting proofs from this block.
|
||||
pub begin: B::Hash,
|
||||
}
|
||||
|
||||
/// Proof verification result.
|
||||
pub enum VerificationResult<Block: BlockT> {
|
||||
/// Proof is valid, but the target was not reached.
|
||||
Partial(SetId, AuthorityList, Block::Hash),
|
||||
/// Target finality is proved.
|
||||
Complete(SetId, AuthorityList, Block::Header),
|
||||
}
|
||||
|
||||
/// Warp sync backend. Handles retrieving and verifying warp sync proofs.
|
||||
pub trait WarpSyncProvider<Block: BlockT>: Send + Sync {
|
||||
/// Generate proof starting at given block hash. The proof is accumulated until maximum proof
|
||||
/// size is reached.
|
||||
fn generate(
|
||||
&self,
|
||||
start: Block::Hash,
|
||||
) -> Result<EncodedProof, Box<dyn std::error::Error + Send + Sync>>;
|
||||
/// Verify warp proof against current set of authorities.
|
||||
fn verify(
|
||||
&self,
|
||||
proof: &EncodedProof,
|
||||
set_id: SetId,
|
||||
authorities: AuthorityList,
|
||||
) -> Result<VerificationResult<Block>, Box<dyn std::error::Error + Send + Sync>>;
|
||||
/// Get current list of authorities. This is supposed to be genesis authorities when starting
|
||||
/// sync.
|
||||
fn current_authorities(&self) -> AuthorityList;
|
||||
}
|
||||
|
||||
/// Reported warp sync phase.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub enum WarpSyncPhase<Block: BlockT> {
|
||||
/// Waiting for peers to connect.
|
||||
AwaitingPeers { required_peers: usize },
|
||||
/// Waiting for target block to be received.
|
||||
AwaitingTargetBlock,
|
||||
/// Downloading and verifying grandpa warp proofs.
|
||||
DownloadingWarpProofs,
|
||||
/// Downloading target block.
|
||||
DownloadingTargetBlock,
|
||||
/// Downloading state data.
|
||||
DownloadingState,
|
||||
/// Importing state.
|
||||
ImportingState,
|
||||
/// Downloading block history.
|
||||
DownloadingBlocks(NumberFor<Block>),
|
||||
}
|
||||
|
||||
impl<Block: BlockT> fmt::Display for WarpSyncPhase<Block> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Self::AwaitingPeers { required_peers } =>
|
||||
write!(f, "Waiting for {required_peers} peers to be connected"),
|
||||
Self::AwaitingTargetBlock => write!(f, "Waiting for target block to be received"),
|
||||
Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"),
|
||||
Self::DownloadingTargetBlock => write!(f, "Downloading target block"),
|
||||
Self::DownloadingState => write!(f, "Downloading state"),
|
||||
Self::ImportingState => write!(f, "Importing state"),
|
||||
Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reported warp sync progress.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub struct WarpSyncProgress<Block: BlockT> {
|
||||
/// Estimated download percentage.
|
||||
pub phase: WarpSyncPhase<Block>,
|
||||
/// Total bytes downloaded so far.
|
||||
pub total_bytes: u64,
|
||||
}
|
||||
|
||||
/// The different types of warp syncing, passed to `build_network`.
|
||||
pub enum WarpSyncParams<Block: BlockT> {
|
||||
/// Standard warp sync for the chain.
|
||||
WithProvider(Arc<dyn WarpSyncProvider<Block>>),
|
||||
/// Skip downloading proofs and wait for a header of the state that should be downloaded.
|
||||
///
|
||||
/// It is expected that the header provider ensures that the header is trusted.
|
||||
WaitForTarget(oneshot::Receiver<<Block as BlockT>::Header>),
|
||||
}
|
||||
|
||||
/// Warp sync configuration as accepted by [`WarpSync`].
|
||||
pub enum WarpSyncConfig<Block: BlockT> {
|
||||
/// Standard warp sync for the chain.
|
||||
WithProvider(Arc<dyn WarpSyncProvider<Block>>),
|
||||
/// Skip downloading proofs and wait for a header of the state that should be downloaded.
|
||||
///
|
||||
/// It is expected that the header provider ensures that the header is trusted.
|
||||
WaitForTarget,
|
||||
}
|
||||
|
||||
impl<Block: BlockT> WarpSyncParams<Block> {
|
||||
/// Split `WarpSyncParams` into `WarpSyncConfig` and warp sync target block header receiver.
|
||||
pub fn split(
|
||||
self,
|
||||
) -> (WarpSyncConfig<Block>, Option<oneshot::Receiver<<Block as BlockT>::Header>>) {
|
||||
match self {
|
||||
WarpSyncParams::WithProvider(provider) =>
|
||||
(WarpSyncConfig::WithProvider(provider), None),
|
||||
WarpSyncParams::WaitForTarget(rx) => (WarpSyncConfig::WaitForTarget, Some(rx)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Warp sync phase.
|
||||
enum Phase<B: BlockT, Client> {
|
||||
/// Downloading warp proofs.
|
||||
WarpProof {
|
||||
set_id: SetId,
|
||||
authorities: AuthorityList,
|
||||
last_hash: B::Hash,
|
||||
warp_sync_provider: Arc<dyn WarpSyncProvider<B>>,
|
||||
},
|
||||
/// Waiting for target block to be set externally if we skip warp proofs downloading,
|
||||
/// and start straight from the target block (used by parachains warp sync).
|
||||
PendingTargetBlock,
|
||||
/// Downloading target block.
|
||||
TargetBlock(B::Header),
|
||||
/// Downloading state.
|
||||
State(StateSync<B, Client>),
|
||||
}
|
||||
|
||||
/// Import warp proof result.
|
||||
pub enum WarpProofImportResult {
|
||||
/// Import was successful.
|
||||
Success,
|
||||
/// Bad proof.
|
||||
BadResponse,
|
||||
}
|
||||
|
||||
/// Import target block result.
|
||||
pub enum TargetBlockImportResult {
|
||||
/// Import was successful.
|
||||
Success,
|
||||
/// Invalid block.
|
||||
BadResponse,
|
||||
}
|
||||
|
||||
/// Warp sync state machine. Accumulates warp proofs and state.
|
||||
pub struct WarpSync<B: BlockT, Client> {
|
||||
phase: Phase<B, Client>,
|
||||
client: Arc<Client>,
|
||||
total_proof_bytes: u64,
|
||||
}
|
||||
|
||||
impl<B, Client> WarpSync<B, Client>
|
||||
where
|
||||
B: BlockT,
|
||||
Client: HeaderBackend<B> + ProofProvider<B> + 'static,
|
||||
{
|
||||
/// Create a new instance. When passing a warp sync provider we will be checking for proof and
|
||||
/// authorities. Alternatively we can pass a target block when we want to skip downloading
|
||||
/// proofs, in this case we will continue polling until the target block is known.
|
||||
pub fn new(client: Arc<Client>, warp_sync_config: WarpSyncConfig<B>) -> Self {
|
||||
let last_hash = client.hash(Zero::zero()).unwrap().expect("Genesis header always exists");
|
||||
match warp_sync_config {
|
||||
WarpSyncConfig::WithProvider(warp_sync_provider) => {
|
||||
let phase = Phase::WarpProof {
|
||||
set_id: 0,
|
||||
authorities: warp_sync_provider.current_authorities(),
|
||||
last_hash,
|
||||
warp_sync_provider: warp_sync_provider.clone(),
|
||||
};
|
||||
Self { client, phase, total_proof_bytes: 0 }
|
||||
},
|
||||
WarpSyncConfig::WaitForTarget =>
|
||||
Self { client, phase: Phase::PendingTargetBlock, total_proof_bytes: 0 },
|
||||
}
|
||||
}
|
||||
|
||||
/// Set target block externally in case we skip warp proof downloading.
|
||||
pub fn set_target_block(&mut self, header: B::Header) {
|
||||
let Phase::PendingTargetBlock = self.phase else {
|
||||
error!(
|
||||
target: LOG_TARGET,
|
||||
"Attempt to set warp sync target block in invalid phase.",
|
||||
);
|
||||
debug_assert!(false);
|
||||
return
|
||||
};
|
||||
|
||||
self.phase = Phase::TargetBlock(header);
|
||||
}
|
||||
|
||||
/// Validate and import a state response.
|
||||
pub fn import_state(&mut self, response: StateResponse) -> ImportResult<B> {
|
||||
match &mut self.phase {
|
||||
Phase::WarpProof { .. } | Phase::TargetBlock(_) | Phase::PendingTargetBlock { .. } => {
|
||||
log::debug!(target: "sync", "Unexpected state response");
|
||||
ImportResult::BadResponse
|
||||
},
|
||||
Phase::State(sync) => sync.import(response),
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate and import a warp proof response.
|
||||
pub fn import_warp_proof(&mut self, response: EncodedProof) -> WarpProofImportResult {
|
||||
match &mut self.phase {
|
||||
Phase::State(_) | Phase::TargetBlock(_) | Phase::PendingTargetBlock { .. } => {
|
||||
log::debug!(target: "sync", "Unexpected warp proof response");
|
||||
WarpProofImportResult::BadResponse
|
||||
},
|
||||
Phase::WarpProof { set_id, authorities, last_hash, warp_sync_provider } =>
|
||||
match warp_sync_provider.verify(&response, *set_id, authorities.clone()) {
|
||||
Err(e) => {
|
||||
log::debug!(target: "sync", "Bad warp proof response: {}", e);
|
||||
WarpProofImportResult::BadResponse
|
||||
},
|
||||
Ok(VerificationResult::Partial(new_set_id, new_authorities, new_last_hash)) => {
|
||||
log::debug!(target: "sync", "Verified partial proof, set_id={:?}", new_set_id);
|
||||
*set_id = new_set_id;
|
||||
*authorities = new_authorities;
|
||||
*last_hash = new_last_hash;
|
||||
self.total_proof_bytes += response.0.len() as u64;
|
||||
WarpProofImportResult::Success
|
||||
},
|
||||
Ok(VerificationResult::Complete(new_set_id, _, header)) => {
|
||||
log::debug!(target: "sync", "Verified complete proof, set_id={:?}", new_set_id);
|
||||
self.total_proof_bytes += response.0.len() as u64;
|
||||
self.phase = Phase::TargetBlock(header);
|
||||
WarpProofImportResult::Success
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Import the target block body.
|
||||
pub fn import_target_block(&mut self, block: BlockData<B>) -> TargetBlockImportResult {
|
||||
match &mut self.phase {
|
||||
Phase::WarpProof { .. } | Phase::State(_) | Phase::PendingTargetBlock { .. } => {
|
||||
log::debug!(target: "sync", "Unexpected target block response");
|
||||
TargetBlockImportResult::BadResponse
|
||||
},
|
||||
Phase::TargetBlock(header) =>
|
||||
if let Some(block_header) = &block.header {
|
||||
if block_header == header {
|
||||
if block.body.is_some() {
|
||||
let state_sync = StateSync::new(
|
||||
self.client.clone(),
|
||||
header.clone(),
|
||||
block.body,
|
||||
block.justifications,
|
||||
false,
|
||||
);
|
||||
self.phase = Phase::State(state_sync);
|
||||
TargetBlockImportResult::Success
|
||||
} else {
|
||||
log::debug!(
|
||||
target: "sync",
|
||||
"Importing target block failed: missing body.",
|
||||
);
|
||||
TargetBlockImportResult::BadResponse
|
||||
}
|
||||
} else {
|
||||
log::debug!(
|
||||
target: "sync",
|
||||
"Importing target block failed: different header.",
|
||||
);
|
||||
TargetBlockImportResult::BadResponse
|
||||
}
|
||||
} else {
|
||||
log::debug!(target: "sync", "Importing target block failed: missing header.");
|
||||
TargetBlockImportResult::BadResponse
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce next state request.
|
||||
pub fn next_state_request(&self) -> Option<StateRequest> {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { .. } | Phase::TargetBlock(_) | Phase::PendingTargetBlock { .. } =>
|
||||
None,
|
||||
Phase::State(sync) => Some(sync.next_request()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce next warp proof request.
|
||||
pub fn next_warp_proof_request(&self) -> Option<WarpProofRequest<B>> {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { last_hash, .. } => Some(WarpProofRequest { begin: *last_hash }),
|
||||
Phase::TargetBlock(_) | Phase::State(_) | Phase::PendingTargetBlock { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce next target block request.
|
||||
pub fn next_target_block_request(&self) -> Option<(NumberFor<B>, BlockRequest<B>)> {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { .. } | Phase::State(_) | Phase::PendingTargetBlock { .. } => None,
|
||||
Phase::TargetBlock(header) => {
|
||||
let request = BlockRequest::<B> {
|
||||
id: 0,
|
||||
fields: BlockAttributes::HEADER |
|
||||
BlockAttributes::BODY | BlockAttributes::JUSTIFICATION,
|
||||
from: FromBlock::Hash(header.hash()),
|
||||
direction: Direction::Ascending,
|
||||
max: Some(1),
|
||||
};
|
||||
Some((*header.number(), request))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Return target block hash if it is known.
|
||||
pub fn target_block_hash(&self) -> Option<B::Hash> {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { .. } | Phase::TargetBlock(_) | Phase::PendingTargetBlock { .. } =>
|
||||
None,
|
||||
Phase::State(s) => Some(s.target()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return target block number if it is known.
|
||||
pub fn target_block_number(&self) -> Option<NumberFor<B>> {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { .. } | Phase::PendingTargetBlock { .. } => None,
|
||||
Phase::TargetBlock(header) => Some(*header.number()),
|
||||
Phase::State(s) => Some(s.target_block_num()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the state is complete.
|
||||
pub fn is_complete(&self) -> bool {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { .. } | Phase::TargetBlock(_) | Phase::PendingTargetBlock { .. } =>
|
||||
false,
|
||||
Phase::State(sync) => sync.is_complete(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns state sync estimated progress (percentage, bytes)
|
||||
pub fn progress(&self) -> WarpSyncProgress<B> {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { .. } => WarpSyncProgress {
|
||||
phase: WarpSyncPhase::DownloadingWarpProofs,
|
||||
total_bytes: self.total_proof_bytes,
|
||||
},
|
||||
Phase::TargetBlock(_) => WarpSyncProgress {
|
||||
phase: WarpSyncPhase::DownloadingTargetBlock,
|
||||
total_bytes: self.total_proof_bytes,
|
||||
},
|
||||
Phase::PendingTargetBlock { .. } => WarpSyncProgress {
|
||||
phase: WarpSyncPhase::AwaitingTargetBlock,
|
||||
total_bytes: self.total_proof_bytes,
|
||||
},
|
||||
Phase::State(sync) => WarpSyncProgress {
|
||||
phase: if self.is_complete() {
|
||||
WarpSyncPhase::ImportingState
|
||||
} else {
|
||||
WarpSyncPhase::DownloadingState
|
||||
},
|
||||
total_bytes: self.total_proof_bytes + sync.progress().size,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,10 @@ use codec::Decode;
|
||||
use futures::{channel::oneshot, stream::StreamExt};
|
||||
use log::debug;
|
||||
|
||||
use crate::warp::{EncodedProof, WarpProofRequest, WarpSyncProvider};
|
||||
use crate::{
|
||||
strategy::warp::{EncodedProof, WarpProofRequest, WarpSyncProvider},
|
||||
LOG_TARGET,
|
||||
};
|
||||
use sc_network::{
|
||||
config::ProtocolId,
|
||||
request_responses::{
|
||||
@@ -120,10 +123,10 @@ impl<TBlock: BlockT> RequestHandler<TBlock> {
|
||||
|
||||
match self.handle_request(payload, pending_response) {
|
||||
Ok(()) => {
|
||||
debug!(target: "sync", "Handled grandpa warp sync request from {}.", peer)
|
||||
debug!(target: LOG_TARGET, "Handled grandpa warp sync request from {}.", peer)
|
||||
},
|
||||
Err(e) => debug!(
|
||||
target: "sync",
|
||||
target: LOG_TARGET,
|
||||
"Failed to handle grandpa warp sync request from {}: {}",
|
||||
peer, e,
|
||||
),
|
||||
|
||||
Reference in New Issue
Block a user