mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 12:17:58 +00:00
Extract warp sync strategy from ChainSync (#2467)
Extract `WarpSync` (and `StateSync` as part of warp sync) from `ChainSync` as independent syncing strategy called by `SyncingEngine`. Introduce `SyncingStrategy` enum as a proxy between `SyncingEngine` and specific syncing strategies. ## Limitations Gap sync is kept in `ChainSync` for now because it shares the same set of peers as block syncing implementation in `ChainSync`. Extraction of a common context responsible for peer management in syncing strategies able to run in parallel is planned for a follow-up PR. ## Further improvements A possibility of conversion of `SyncingStartegy` into a trait should be evaluated. The main stopper for this is that different strategies need to communicate different actions to `SyncingEngine` and respond to different events / provide different APIs (e.g., requesting justifications is only possible via `ChainSync` and not through `WarpSync`; `SendWarpProofRequest` action is only relevant to `WarpSync`, etc.) --------- Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com>
This commit is contained in:
@@ -66,7 +66,7 @@ use sc_network_sync::{
|
||||
block_request_handler::BlockRequestHandler,
|
||||
service::{network::NetworkServiceProvider, syncing_service::SyncingService},
|
||||
state_request_handler::StateRequestHandler,
|
||||
warp::{
|
||||
strategy::warp::{
|
||||
AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncParams, WarpSyncProvider,
|
||||
},
|
||||
warp_request_handler,
|
||||
@@ -699,6 +699,8 @@ pub struct FullPeerConfig {
|
||||
pub storage_chain: bool,
|
||||
/// Optional target block header to sync to
|
||||
pub target_block: Option<<Block as BlockT>::Header>,
|
||||
/// Force genesis even in case of warp & light state sync.
|
||||
pub force_genesis: bool,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -758,7 +760,9 @@ pub trait TestNetFactory: Default + Sized + Send {
|
||||
*genesis_extra_storage = storage;
|
||||
}
|
||||
|
||||
if matches!(config.sync_mode, SyncMode::LightState { .. } | SyncMode::Warp) {
|
||||
if !config.force_genesis &&
|
||||
matches!(config.sync_mode, SyncMode::LightState { .. } | SyncMode::Warp)
|
||||
{
|
||||
test_client_builder = test_client_builder.set_no_genesis();
|
||||
}
|
||||
let backend = test_client_builder.backend();
|
||||
|
||||
@@ -1232,12 +1232,14 @@ async fn warp_sync() {
|
||||
let target = net.peer(0).push_blocks(1, false).pop().unwrap();
|
||||
net.peer(1).push_blocks(64, false);
|
||||
net.peer(2).push_blocks(64, false);
|
||||
// Wait for peer 1 to sync state.
|
||||
// Wait for peer 3 to sync state.
|
||||
net.run_until_sync().await;
|
||||
// Make sure it was not a full sync.
|
||||
assert!(!net.peer(3).client().has_state_at(&BlockId::Number(1)));
|
||||
// Make sure warp sync was successful.
|
||||
assert!(net.peer(3).client().has_state_at(&BlockId::Number(64)));
|
||||
|
||||
// Wait for peer 1 download block history
|
||||
// Wait for peer 3 to download block history (gap sync).
|
||||
futures::future::poll_fn::<(), _>(|cx| {
|
||||
net.poll(cx);
|
||||
if net.peer(3).has_body(gap_end) && net.peer(3).has_body(target) {
|
||||
@@ -1249,6 +1251,35 @@ async fn warp_sync() {
|
||||
.await;
|
||||
}
|
||||
|
||||
/// If there is a finalized state in the DB, warp sync falls back to full sync.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn warp_sync_failover_to_full_sync() {
|
||||
sp_tracing::try_init_simple();
|
||||
let mut net = TestNet::new(0);
|
||||
// Create 3 synced peers and 1 peer trying to warp sync.
|
||||
net.add_full_peer_with_config(Default::default());
|
||||
net.add_full_peer_with_config(Default::default());
|
||||
net.add_full_peer_with_config(Default::default());
|
||||
net.add_full_peer_with_config(FullPeerConfig {
|
||||
sync_mode: SyncMode::Warp,
|
||||
// We want some finalized state in the DB to make warp sync impossible.
|
||||
force_genesis: true,
|
||||
..Default::default()
|
||||
});
|
||||
net.peer(0).push_blocks(64, false);
|
||||
net.peer(1).push_blocks(64, false);
|
||||
net.peer(2).push_blocks(64, false);
|
||||
// Even though we requested peer 3 to warp sync, it'll fall back to full sync if there is
|
||||
// a finalized state in the DB.
|
||||
assert!(net.peer(3).client().info().finalized_state.is_some());
|
||||
// Wait for peer 3 to sync.
|
||||
net.run_until_sync().await;
|
||||
// Make sure it was a full sync (peer 3 has state for all blocks).
|
||||
(1..65)
|
||||
.into_iter()
|
||||
.for_each(|i| assert!(net.peer(3).client().has_state_at(&BlockId::Number(i as u64))));
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
async fn warp_sync_to_target_block() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
Reference in New Issue
Block a user