mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 14:37:57 +00:00
Network sync refactoring (part 2) (#11322)
* Move `api.v1.proto` schema into new crate `sc-network-sync` * Move `sc_network::protocol::sync::state` module into `sc_network_sync::state` * Move `sc_network::protocol::sync::blocks` module into `sc_network_sync::blocks` and some data structures from `sc_network::protocol::message` module into `sc_network_sync::message` * Move some data structures from `sc_network::config` and `sc_network::request_responses` into new `sc-network-common` crate * Move `sc_network::protocol::sync::warm` and `sc_network::warp_request_handler` modules into `sc_network_sync` * Move `client/network/sync/src/lib.rs` to `client/network/sync/src/lib_old.rs` to preserve history of changes of the file in the next commit * Move `client/network/src/protocol/sync.rs` on top of `client/network/sync/src/lib.rs` to preserve history of changes * Move `sc_network::protocol::sync` to `sc_network_sync` with submodules, move message data structures around accordingly * Move `sc_network::block_request_handler` to `sc_network_sync::block_request_handler` * Move `sc_network::state_request_handler` to `sc_network_sync::state_request_handler` * Add re-exports for compatibility reasons * Apply suggestions from code review Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com> Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
This commit is contained in:
@@ -0,0 +1,417 @@
|
||||
// Copyright 2020 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Helper for handling (i.e. answering) block requests from a remote peer via the
|
||||
//! `crate::request_responses::RequestResponsesBehaviour`.
|
||||
|
||||
use crate::{
|
||||
message::BlockAttributes,
|
||||
schema::v1::{block_request::FromBlock, BlockResponse, Direction},
|
||||
};
|
||||
use codec::{Decode, Encode};
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
stream::StreamExt,
|
||||
};
|
||||
use libp2p::PeerId;
|
||||
use log::debug;
|
||||
use lru::LruCache;
|
||||
use prost::Message;
|
||||
use sc_client_api::BlockBackend;
|
||||
use sc_network_common::{
|
||||
config::ProtocolId,
|
||||
request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig},
|
||||
};
|
||||
use sp_blockchain::HeaderBackend;
|
||||
use sp_runtime::{
|
||||
generic::BlockId,
|
||||
traits::{Block as BlockT, Header, One, Zero},
|
||||
};
|
||||
use std::{
|
||||
cmp::min,
|
||||
hash::{Hash, Hasher},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
const LOG_TARGET: &str = "sync";
|
||||
const MAX_BLOCKS_IN_RESPONSE: usize = 128;
|
||||
const MAX_BODY_BYTES: usize = 8 * 1024 * 1024;
|
||||
const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2;
|
||||
|
||||
mod rep {
|
||||
use sc_peerset::ReputationChange as Rep;
|
||||
|
||||
/// Reputation change when a peer sent us the same request multiple times.
|
||||
pub const SAME_REQUEST: Rep = Rep::new_fatal("Same block request multiple times");
|
||||
|
||||
/// Reputation change when a peer sent us the same "small" request multiple times.
|
||||
pub const SAME_SMALL_REQUEST: Rep =
|
||||
Rep::new(-(1 << 10), "same small block request multiple times");
|
||||
}
|
||||
|
||||
/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests.
|
||||
pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig {
|
||||
ProtocolConfig {
|
||||
name: generate_protocol_name(protocol_id).into(),
|
||||
max_request_size: 1024 * 1024,
|
||||
max_response_size: 16 * 1024 * 1024,
|
||||
request_timeout: Duration::from_secs(20),
|
||||
inbound_queue: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate the block protocol name from chain specific protocol identifier.
|
||||
// Visibility `pub(crate)` to allow `crate::light_client_requests::sender` to generate block request
|
||||
// protocol name and send block requests.
|
||||
pub(crate) fn generate_protocol_name(protocol_id: &ProtocolId) -> String {
|
||||
format!("/{}/sync/2", protocol_id.as_ref())
|
||||
}
|
||||
|
||||
/// The key of [`BlockRequestHandler::seen_requests`].
|
||||
#[derive(Eq, PartialEq, Clone)]
|
||||
struct SeenRequestsKey<B: BlockT> {
|
||||
peer: PeerId,
|
||||
from: BlockId<B>,
|
||||
max_blocks: usize,
|
||||
direction: Direction,
|
||||
attributes: BlockAttributes,
|
||||
support_multiple_justifications: bool,
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_hash_xor_eq)]
|
||||
impl<B: BlockT> Hash for SeenRequestsKey<B> {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.peer.hash(state);
|
||||
self.max_blocks.hash(state);
|
||||
self.direction.hash(state);
|
||||
self.attributes.hash(state);
|
||||
self.support_multiple_justifications.hash(state);
|
||||
match self.from {
|
||||
BlockId::Hash(h) => h.hash(state),
|
||||
BlockId::Number(n) => n.hash(state),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The value of [`BlockRequestHandler::seen_requests`].
|
||||
enum SeenRequestsValue {
|
||||
/// First time we have seen the request.
|
||||
First,
|
||||
/// We have fulfilled the request `n` times.
|
||||
Fulfilled(usize),
|
||||
}
|
||||
|
||||
/// Handler for incoming block requests from a remote peer.
|
||||
pub struct BlockRequestHandler<B: BlockT, Client> {
|
||||
client: Arc<Client>,
|
||||
request_receiver: mpsc::Receiver<IncomingRequest>,
|
||||
/// Maps from request to number of times we have seen this request.
|
||||
///
|
||||
/// This is used to check if a peer is spamming us with the same request.
|
||||
seen_requests: LruCache<SeenRequestsKey<B>, SeenRequestsValue>,
|
||||
}
|
||||
|
||||
impl<B, Client> BlockRequestHandler<B, Client>
|
||||
where
|
||||
B: BlockT,
|
||||
Client: HeaderBackend<B> + BlockBackend<B> + Send + Sync + 'static,
|
||||
{
|
||||
/// Create a new [`BlockRequestHandler`].
|
||||
pub fn new(
|
||||
protocol_id: &ProtocolId,
|
||||
client: Arc<Client>,
|
||||
num_peer_hint: usize,
|
||||
) -> (Self, ProtocolConfig) {
|
||||
// Reserve enough request slots for one request per peer when we are at the maximum
|
||||
// number of peers.
|
||||
let (tx, request_receiver) = mpsc::channel(num_peer_hint);
|
||||
|
||||
let mut protocol_config = generate_protocol_config(protocol_id);
|
||||
protocol_config.inbound_queue = Some(tx);
|
||||
|
||||
let seen_requests = LruCache::new(num_peer_hint * 2);
|
||||
|
||||
(Self { client, request_receiver, seen_requests }, protocol_config)
|
||||
}
|
||||
|
||||
/// Run [`BlockRequestHandler`].
|
||||
pub async fn run(mut self) {
|
||||
while let Some(request) = self.request_receiver.next().await {
|
||||
let IncomingRequest { peer, payload, pending_response } = request;
|
||||
|
||||
match self.handle_request(payload, pending_response, &peer) {
|
||||
Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer),
|
||||
Err(e) => debug!(
|
||||
target: LOG_TARGET,
|
||||
"Failed to handle block request from {}: {}", peer, e,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_request(
|
||||
&mut self,
|
||||
payload: Vec<u8>,
|
||||
pending_response: oneshot::Sender<OutgoingResponse>,
|
||||
peer: &PeerId,
|
||||
) -> Result<(), HandleRequestError> {
|
||||
let request = crate::schema::v1::BlockRequest::decode(&payload[..])?;
|
||||
|
||||
let from_block_id = match request.from_block.ok_or(HandleRequestError::MissingFromField)? {
|
||||
FromBlock::Hash(ref h) => {
|
||||
let h = Decode::decode(&mut h.as_ref())?;
|
||||
BlockId::<B>::Hash(h)
|
||||
},
|
||||
FromBlock::Number(ref n) => {
|
||||
let n = Decode::decode(&mut n.as_ref())?;
|
||||
BlockId::<B>::Number(n)
|
||||
},
|
||||
};
|
||||
|
||||
let max_blocks = if request.max_blocks == 0 {
|
||||
MAX_BLOCKS_IN_RESPONSE
|
||||
} else {
|
||||
min(request.max_blocks as usize, MAX_BLOCKS_IN_RESPONSE)
|
||||
};
|
||||
|
||||
let direction =
|
||||
Direction::from_i32(request.direction).ok_or(HandleRequestError::ParseDirection)?;
|
||||
|
||||
let attributes = BlockAttributes::from_be_u32(request.fields)?;
|
||||
|
||||
let support_multiple_justifications = request.support_multiple_justifications;
|
||||
|
||||
let key = SeenRequestsKey {
|
||||
peer: *peer,
|
||||
max_blocks,
|
||||
direction,
|
||||
from: from_block_id,
|
||||
attributes,
|
||||
support_multiple_justifications,
|
||||
};
|
||||
|
||||
let mut reputation_change = None;
|
||||
|
||||
match self.seen_requests.get_mut(&key) {
|
||||
Some(SeenRequestsValue::First) => {},
|
||||
Some(SeenRequestsValue::Fulfilled(ref mut requests)) => {
|
||||
*requests = requests.saturating_add(1);
|
||||
|
||||
let small_request = attributes
|
||||
.difference(BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION)
|
||||
.is_empty();
|
||||
|
||||
if *requests > MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER {
|
||||
reputation_change = Some(if small_request {
|
||||
rep::SAME_SMALL_REQUEST
|
||||
} else {
|
||||
rep::SAME_REQUEST
|
||||
});
|
||||
}
|
||||
},
|
||||
None => {
|
||||
self.seen_requests.put(key.clone(), SeenRequestsValue::First);
|
||||
},
|
||||
}
|
||||
|
||||
debug!(
|
||||
target: LOG_TARGET,
|
||||
"Handling block request from {}: Starting at `{:?}` with maximum blocks \
|
||||
of `{}`, direction `{:?}` and attributes `{:?}`.",
|
||||
peer,
|
||||
from_block_id,
|
||||
max_blocks,
|
||||
direction,
|
||||
attributes,
|
||||
);
|
||||
|
||||
let result = if reputation_change.is_none() {
|
||||
let block_response = self.get_block_response(
|
||||
attributes,
|
||||
from_block_id,
|
||||
direction,
|
||||
max_blocks,
|
||||
support_multiple_justifications,
|
||||
)?;
|
||||
|
||||
// If any of the blocks contains any data, we can consider it as successful request.
|
||||
if block_response
|
||||
.blocks
|
||||
.iter()
|
||||
.any(|b| !b.header.is_empty() || !b.body.is_empty() || b.is_empty_justification)
|
||||
{
|
||||
if let Some(value) = self.seen_requests.get_mut(&key) {
|
||||
// If this is the first time we have processed this request, we need to change
|
||||
// it to `Fulfilled`.
|
||||
if let SeenRequestsValue::First = value {
|
||||
*value = SeenRequestsValue::Fulfilled(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut data = Vec::with_capacity(block_response.encoded_len());
|
||||
block_response.encode(&mut data)?;
|
||||
|
||||
Ok(data)
|
||||
} else {
|
||||
Err(())
|
||||
};
|
||||
|
||||
pending_response
|
||||
.send(OutgoingResponse {
|
||||
result,
|
||||
reputation_changes: reputation_change.into_iter().collect(),
|
||||
sent_feedback: None,
|
||||
})
|
||||
.map_err(|_| HandleRequestError::SendResponse)
|
||||
}
|
||||
|
||||
fn get_block_response(
|
||||
&self,
|
||||
attributes: BlockAttributes,
|
||||
mut block_id: BlockId<B>,
|
||||
direction: Direction,
|
||||
max_blocks: usize,
|
||||
support_multiple_justifications: bool,
|
||||
) -> Result<BlockResponse, HandleRequestError> {
|
||||
let get_header = attributes.contains(BlockAttributes::HEADER);
|
||||
let get_body = attributes.contains(BlockAttributes::BODY);
|
||||
let get_indexed_body = attributes.contains(BlockAttributes::INDEXED_BODY);
|
||||
let get_justification = attributes.contains(BlockAttributes::JUSTIFICATION);
|
||||
|
||||
let mut blocks = Vec::new();
|
||||
|
||||
let mut total_size: usize = 0;
|
||||
while let Some(header) = self.client.header(block_id).unwrap_or_default() {
|
||||
let number = *header.number();
|
||||
let hash = header.hash();
|
||||
let parent_hash = *header.parent_hash();
|
||||
let justifications = if get_justification {
|
||||
self.client.justifications(&BlockId::Hash(hash))?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (justifications, justification, is_empty_justification) =
|
||||
if support_multiple_justifications {
|
||||
let justifications = match justifications {
|
||||
Some(v) => v.encode(),
|
||||
None => Vec::new(),
|
||||
};
|
||||
(justifications, Vec::new(), false)
|
||||
} else {
|
||||
// For now we keep compatibility by selecting precisely the GRANDPA one, and not
|
||||
// just the first one. When sending we could have just taken the first one,
|
||||
// since we don't expect there to be any other kind currently, but when
|
||||
// receiving we need to add the engine ID tag.
|
||||
// The ID tag is hardcoded here to avoid depending on the GRANDPA crate, and
|
||||
// will be removed once we remove the backwards compatibility.
|
||||
// See: https://github.com/paritytech/substrate/issues/8172
|
||||
let justification =
|
||||
justifications.and_then(|just| just.into_justification(*b"FRNK"));
|
||||
|
||||
let is_empty_justification =
|
||||
justification.as_ref().map(|j| j.is_empty()).unwrap_or(false);
|
||||
|
||||
let justification = justification.unwrap_or_default();
|
||||
|
||||
(Vec::new(), justification, is_empty_justification)
|
||||
};
|
||||
|
||||
let body = if get_body {
|
||||
match self.client.block_body(&BlockId::Hash(hash))? {
|
||||
Some(mut extrinsics) =>
|
||||
extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect(),
|
||||
None => {
|
||||
log::trace!(target: LOG_TARGET, "Missing data for block request.");
|
||||
break
|
||||
},
|
||||
}
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
let indexed_body = if get_indexed_body {
|
||||
match self.client.block_indexed_body(&BlockId::Hash(hash))? {
|
||||
Some(transactions) => transactions,
|
||||
None => {
|
||||
log::trace!(
|
||||
target: LOG_TARGET,
|
||||
"Missing indexed block data for block request."
|
||||
);
|
||||
// If the indexed body is missing we still continue returning headers.
|
||||
// Ideally `None` should distinguish a missing body from the empty body,
|
||||
// but the current protobuf based protocol does not allow it.
|
||||
Vec::new()
|
||||
},
|
||||
}
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
let block_data = crate::schema::v1::BlockData {
|
||||
hash: hash.encode(),
|
||||
header: if get_header { header.encode() } else { Vec::new() },
|
||||
body,
|
||||
receipt: Vec::new(),
|
||||
message_queue: Vec::new(),
|
||||
justification,
|
||||
is_empty_justification,
|
||||
justifications,
|
||||
indexed_body,
|
||||
};
|
||||
|
||||
total_size += block_data.body.iter().map(|ex| ex.len()).sum::<usize>();
|
||||
total_size += block_data.indexed_body.iter().map(|ex| ex.len()).sum::<usize>();
|
||||
blocks.push(block_data);
|
||||
|
||||
if blocks.len() >= max_blocks as usize || total_size > MAX_BODY_BYTES {
|
||||
break
|
||||
}
|
||||
|
||||
match direction {
|
||||
Direction::Ascending => block_id = BlockId::Number(number + One::one()),
|
||||
Direction::Descending => {
|
||||
if number.is_zero() {
|
||||
break
|
||||
}
|
||||
block_id = BlockId::Hash(parent_hash)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Ok(BlockResponse { blocks })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum HandleRequestError {
|
||||
#[error("Failed to decode request: {0}.")]
|
||||
DecodeProto(#[from] prost::DecodeError),
|
||||
#[error("Failed to encode response: {0}.")]
|
||||
EncodeProto(#[from] prost::EncodeError),
|
||||
#[error("Failed to decode block hash: {0}.")]
|
||||
DecodeScale(#[from] codec::Error),
|
||||
#[error("Missing `BlockRequest::from_block` field.")]
|
||||
MissingFromField,
|
||||
#[error("Failed to parse BlockRequest::direction.")]
|
||||
ParseDirection,
|
||||
#[error(transparent)]
|
||||
Client(#[from] sp_blockchain::Error),
|
||||
#[error("Failed to send response.")]
|
||||
SendResponse,
|
||||
}
|
||||
@@ -0,0 +1,347 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use crate::message;
|
||||
use libp2p::PeerId;
|
||||
use log::trace;
|
||||
use sp_runtime::traits::{Block as BlockT, NumberFor, One};
|
||||
use std::{
|
||||
cmp,
|
||||
collections::{BTreeMap, HashMap},
|
||||
ops::Range,
|
||||
};
|
||||
|
||||
/// Block data with origin.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct BlockData<B: BlockT> {
|
||||
/// The Block Message from the wire
|
||||
pub block: message::BlockData<B>,
|
||||
/// The peer, we received this from
|
||||
pub origin: Option<PeerId>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum BlockRangeState<B: BlockT> {
|
||||
Downloading { len: NumberFor<B>, downloading: u32 },
|
||||
Complete(Vec<BlockData<B>>),
|
||||
}
|
||||
|
||||
impl<B: BlockT> BlockRangeState<B> {
|
||||
pub fn len(&self) -> NumberFor<B> {
|
||||
match *self {
|
||||
Self::Downloading { len, .. } => len,
|
||||
Self::Complete(ref blocks) => (blocks.len() as u32).into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A collection of blocks being downloaded.
|
||||
#[derive(Default)]
|
||||
pub struct BlockCollection<B: BlockT> {
|
||||
/// Downloaded blocks.
|
||||
blocks: BTreeMap<NumberFor<B>, BlockRangeState<B>>,
|
||||
peer_requests: HashMap<PeerId, NumberFor<B>>,
|
||||
}
|
||||
|
||||
impl<B: BlockT> BlockCollection<B> {
|
||||
/// Create a new instance.
|
||||
pub fn new() -> Self {
|
||||
Self { blocks: BTreeMap::new(), peer_requests: HashMap::new() }
|
||||
}
|
||||
|
||||
/// Clear everything.
|
||||
pub fn clear(&mut self) {
|
||||
self.blocks.clear();
|
||||
self.peer_requests.clear();
|
||||
}
|
||||
|
||||
/// Insert a set of blocks into collection.
|
||||
pub fn insert(&mut self, start: NumberFor<B>, blocks: Vec<message::BlockData<B>>, who: PeerId) {
|
||||
if blocks.is_empty() {
|
||||
return
|
||||
}
|
||||
|
||||
match self.blocks.get(&start) {
|
||||
Some(&BlockRangeState::Downloading { .. }) => {
|
||||
trace!(target: "sync", "Inserting block data still marked as being downloaded: {}", start);
|
||||
},
|
||||
Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => {
|
||||
trace!(target: "sync", "Ignored block data already downloaded: {}", start);
|
||||
return
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
|
||||
self.blocks.insert(
|
||||
start,
|
||||
BlockRangeState::Complete(
|
||||
blocks.into_iter().map(|b| BlockData { origin: Some(who), block: b }).collect(),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
/// Returns a set of block hashes that require a header download. The returned set is marked as
|
||||
/// being downloaded.
|
||||
pub fn needed_blocks(
|
||||
&mut self,
|
||||
who: PeerId,
|
||||
count: usize,
|
||||
peer_best: NumberFor<B>,
|
||||
common: NumberFor<B>,
|
||||
max_parallel: u32,
|
||||
max_ahead: u32,
|
||||
) -> Option<Range<NumberFor<B>>> {
|
||||
if peer_best <= common {
|
||||
// Bail out early
|
||||
return None
|
||||
}
|
||||
// First block number that we need to download
|
||||
let first_different = common + <NumberFor<B>>::one();
|
||||
let count = (count as u32).into();
|
||||
let (mut range, downloading) = {
|
||||
let mut downloading_iter = self.blocks.iter().peekable();
|
||||
let mut prev: Option<(&NumberFor<B>, &BlockRangeState<B>)> = None;
|
||||
loop {
|
||||
let next = downloading_iter.next();
|
||||
break match (prev, next) {
|
||||
(Some((start, &BlockRangeState::Downloading { ref len, downloading })), _)
|
||||
if downloading < max_parallel =>
|
||||
(*start..*start + *len, downloading),
|
||||
(Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start =>
|
||||
(*start + r.len()..cmp::min(*next_start, *start + r.len() + count), 0), // gap
|
||||
(Some((start, r)), None) => (*start + r.len()..*start + r.len() + count, 0), /* last range */
|
||||
(None, None) => (first_different..first_different + count, 0), /* empty */
|
||||
(None, Some((start, _))) if *start > first_different =>
|
||||
(first_different..cmp::min(first_different + count, *start), 0), /* gap at the start */
|
||||
_ => {
|
||||
prev = next;
|
||||
continue
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
// crop to peers best
|
||||
if range.start > peer_best {
|
||||
trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best);
|
||||
return None
|
||||
}
|
||||
range.end = cmp::min(peer_best + One::one(), range.end);
|
||||
|
||||
if self
|
||||
.blocks
|
||||
.iter()
|
||||
.next()
|
||||
.map_or(false, |(n, _)| range.start > *n + max_ahead.into())
|
||||
{
|
||||
trace!(target: "sync", "Too far ahead for peer {} ({})", who, range.start);
|
||||
return None
|
||||
}
|
||||
|
||||
self.peer_requests.insert(who, range.start);
|
||||
self.blocks.insert(
|
||||
range.start,
|
||||
BlockRangeState::Downloading {
|
||||
len: range.end - range.start,
|
||||
downloading: downloading + 1,
|
||||
},
|
||||
);
|
||||
if range.end <= range.start {
|
||||
panic!(
|
||||
"Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}",
|
||||
range, count, peer_best, common, self.blocks
|
||||
);
|
||||
}
|
||||
Some(range)
|
||||
}
|
||||
|
||||
/// Get a valid chain of blocks ordered in descending order and ready for importing into
|
||||
/// blockchain.
|
||||
pub fn drain(&mut self, from: NumberFor<B>) -> Vec<BlockData<B>> {
|
||||
let mut drained = Vec::new();
|
||||
let mut ranges = Vec::new();
|
||||
|
||||
let mut prev = from;
|
||||
for (start, range_data) in &mut self.blocks {
|
||||
match range_data {
|
||||
BlockRangeState::Complete(blocks) if *start <= prev => {
|
||||
prev = *start + (blocks.len() as u32).into();
|
||||
// Remove all elements from `blocks` and add them to `drained`
|
||||
drained.append(blocks);
|
||||
ranges.push(*start);
|
||||
},
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
|
||||
for r in ranges {
|
||||
self.blocks.remove(&r);
|
||||
}
|
||||
trace!(target: "sync", "Drained {} blocks from {:?}", drained.len(), from);
|
||||
drained
|
||||
}
|
||||
|
||||
pub fn clear_peer_download(&mut self, who: &PeerId) {
|
||||
if let Some(start) = self.peer_requests.remove(who) {
|
||||
let remove = match self.blocks.get_mut(&start) {
|
||||
Some(&mut BlockRangeState::Downloading { ref mut downloading, .. })
|
||||
if *downloading > 1 =>
|
||||
{
|
||||
*downloading -= 1;
|
||||
false
|
||||
},
|
||||
Some(&mut BlockRangeState::Downloading { .. }) => true,
|
||||
_ => false,
|
||||
};
|
||||
if remove {
|
||||
self.blocks.remove(&start);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{BlockCollection, BlockData, BlockRangeState};
|
||||
use crate::message;
|
||||
use libp2p::PeerId;
|
||||
use sp_core::H256;
|
||||
use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper};
|
||||
|
||||
type Block = RawBlock<ExtrinsicWrapper<u64>>;
|
||||
|
||||
fn is_empty(bc: &BlockCollection<Block>) -> bool {
|
||||
bc.blocks.is_empty() && bc.peer_requests.is_empty()
|
||||
}
|
||||
|
||||
fn generate_blocks(n: usize) -> Vec<message::BlockData<Block>> {
|
||||
(0..n)
|
||||
.map(|_| message::generic::BlockData {
|
||||
hash: H256::random(),
|
||||
header: None,
|
||||
body: None,
|
||||
indexed_body: None,
|
||||
message_queue: None,
|
||||
receipt: None,
|
||||
justification: None,
|
||||
justifications: None,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_clear() {
|
||||
let mut bc = BlockCollection::new();
|
||||
assert!(is_empty(&bc));
|
||||
bc.insert(1, generate_blocks(100), PeerId::random());
|
||||
assert!(!is_empty(&bc));
|
||||
bc.clear();
|
||||
assert!(is_empty(&bc));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_blocks() {
|
||||
let mut bc = BlockCollection::new();
|
||||
assert!(is_empty(&bc));
|
||||
let peer0 = PeerId::random();
|
||||
let peer1 = PeerId::random();
|
||||
let peer2 = PeerId::random();
|
||||
|
||||
let blocks = generate_blocks(150);
|
||||
assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1..41));
|
||||
assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41..81));
|
||||
assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81..121));
|
||||
|
||||
bc.clear_peer_download(&peer1);
|
||||
bc.insert(41, blocks[41..81].to_vec(), peer1.clone());
|
||||
assert_eq!(bc.drain(1), vec![]);
|
||||
assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121..151));
|
||||
bc.clear_peer_download(&peer0);
|
||||
bc.insert(1, blocks[1..11].to_vec(), peer0.clone());
|
||||
|
||||
assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11..41));
|
||||
assert_eq!(
|
||||
bc.drain(1),
|
||||
blocks[1..11]
|
||||
.iter()
|
||||
.map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) })
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
bc.clear_peer_download(&peer0);
|
||||
bc.insert(11, blocks[11..41].to_vec(), peer0.clone());
|
||||
|
||||
let drained = bc.drain(12);
|
||||
assert_eq!(
|
||||
drained[..30],
|
||||
blocks[11..41]
|
||||
.iter()
|
||||
.map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) })
|
||||
.collect::<Vec<_>>()[..]
|
||||
);
|
||||
assert_eq!(
|
||||
drained[30..],
|
||||
blocks[41..81]
|
||||
.iter()
|
||||
.map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) })
|
||||
.collect::<Vec<_>>()[..]
|
||||
);
|
||||
|
||||
bc.clear_peer_download(&peer2);
|
||||
assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81..121));
|
||||
bc.clear_peer_download(&peer2);
|
||||
bc.insert(81, blocks[81..121].to_vec(), peer2.clone());
|
||||
bc.clear_peer_download(&peer1);
|
||||
bc.insert(121, blocks[121..150].to_vec(), peer1.clone());
|
||||
|
||||
assert_eq!(bc.drain(80), vec![]);
|
||||
let drained = bc.drain(81);
|
||||
assert_eq!(
|
||||
drained[..40],
|
||||
blocks[81..121]
|
||||
.iter()
|
||||
.map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) })
|
||||
.collect::<Vec<_>>()[..]
|
||||
);
|
||||
assert_eq!(
|
||||
drained[40..],
|
||||
blocks[121..150]
|
||||
.iter()
|
||||
.map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) })
|
||||
.collect::<Vec<_>>()[..]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn large_gap() {
|
||||
let mut bc: BlockCollection<Block> = BlockCollection::new();
|
||||
bc.blocks.insert(100, BlockRangeState::Downloading { len: 128, downloading: 1 });
|
||||
let blocks = generate_blocks(10)
|
||||
.into_iter()
|
||||
.map(|b| BlockData { block: b, origin: None })
|
||||
.collect();
|
||||
bc.blocks.insert(114305, BlockRangeState::Complete(blocks));
|
||||
|
||||
let peer0 = PeerId::random();
|
||||
assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1..100));
|
||||
assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), None); // too far ahead
|
||||
assert_eq!(
|
||||
bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000),
|
||||
Some(100 + 128..100 + 128 + 128)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,595 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use crate::{PeerSync, PeerSyncState};
|
||||
use fork_tree::ForkTree;
|
||||
use libp2p::PeerId;
|
||||
use log::{debug, trace, warn};
|
||||
use sp_blockchain::Error as ClientError;
|
||||
use sp_runtime::traits::{Block as BlockT, NumberFor, Zero};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet, VecDeque},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
// Time to wait before trying to get the same extra data from the same peer.
|
||||
const EXTRA_RETRY_WAIT: Duration = Duration::from_secs(10);
|
||||
|
||||
/// Pending extra data request for the given block (hash and number).
|
||||
type ExtraRequest<B> = (<B as BlockT>::Hash, NumberFor<B>);
|
||||
|
||||
/// Manages pending block extra data (e.g. justification) requests.
|
||||
///
|
||||
/// Multiple extras may be requested for competing forks, or for the same branch
|
||||
/// at different (increasing) heights. This structure will guarantee that extras
|
||||
/// are fetched in-order, and that obsolete changes are pruned (when finalizing a
|
||||
/// competing fork).
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ExtraRequests<B: BlockT> {
|
||||
tree: ForkTree<B::Hash, NumberFor<B>, ()>,
|
||||
/// best finalized block number that we have seen since restart
|
||||
best_seen_finalized_number: NumberFor<B>,
|
||||
/// requests which have been queued for later processing
|
||||
pending_requests: VecDeque<ExtraRequest<B>>,
|
||||
/// requests which are currently underway to some peer
|
||||
active_requests: HashMap<PeerId, ExtraRequest<B>>,
|
||||
/// previous requests without response
|
||||
failed_requests: HashMap<ExtraRequest<B>, Vec<(PeerId, Instant)>>,
|
||||
/// successful requests
|
||||
importing_requests: HashSet<ExtraRequest<B>>,
|
||||
/// the name of this type of extra request (useful for logging.)
|
||||
request_type_name: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Metrics {
|
||||
pub pending_requests: u32,
|
||||
pub active_requests: u32,
|
||||
pub importing_requests: u32,
|
||||
pub failed_requests: u32,
|
||||
_priv: (),
|
||||
}
|
||||
|
||||
impl<B: BlockT> ExtraRequests<B> {
|
||||
pub(crate) fn new(request_type_name: &'static str) -> Self {
|
||||
Self {
|
||||
tree: ForkTree::new(),
|
||||
best_seen_finalized_number: Zero::zero(),
|
||||
pending_requests: VecDeque::new(),
|
||||
active_requests: HashMap::new(),
|
||||
failed_requests: HashMap::new(),
|
||||
importing_requests: HashSet::new(),
|
||||
request_type_name,
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset all state as if returned from `new`.
|
||||
pub(crate) fn reset(&mut self) {
|
||||
self.tree = ForkTree::new();
|
||||
self.pending_requests.clear();
|
||||
self.active_requests.clear();
|
||||
self.failed_requests.clear();
|
||||
}
|
||||
|
||||
/// Returns an iterator-like struct that yields peers which extra
|
||||
/// requests can be sent to.
|
||||
pub(crate) fn matcher(&mut self) -> Matcher<B> {
|
||||
Matcher::new(self)
|
||||
}
|
||||
|
||||
/// Queue an extra data request to be considered by the `Matcher`.
|
||||
pub(crate) fn schedule<F>(&mut self, request: ExtraRequest<B>, is_descendent_of: F)
|
||||
where
|
||||
F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>,
|
||||
{
|
||||
match self.tree.import(request.0, request.1, (), &is_descendent_of) {
|
||||
Ok(true) => {
|
||||
// this is a new root so we add it to the current `pending_requests`
|
||||
self.pending_requests.push_back((request.0, request.1));
|
||||
},
|
||||
Err(fork_tree::Error::Revert) => {
|
||||
// we have finalized further than the given request, presumably
|
||||
// by some other part of the system (not sync). we can safely
|
||||
// ignore the `Revert` error.
|
||||
},
|
||||
Err(err) => {
|
||||
debug!(target: "sync", "Failed to insert request {:?} into tree: {}", request, err);
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retry any pending request if a peer disconnected.
|
||||
pub(crate) fn peer_disconnected(&mut self, who: &PeerId) {
|
||||
if let Some(request) = self.active_requests.remove(who) {
|
||||
self.pending_requests.push_front(request);
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes the response for the request previously sent to the given peer.
|
||||
pub(crate) fn on_response<R>(
|
||||
&mut self,
|
||||
who: PeerId,
|
||||
resp: Option<R>,
|
||||
) -> Option<(PeerId, B::Hash, NumberFor<B>, R)> {
|
||||
// we assume that the request maps to the given response, this is
|
||||
// currently enforced by the outer network protocol before passing on
|
||||
// messages to chain sync.
|
||||
if let Some(request) = self.active_requests.remove(&who) {
|
||||
if let Some(r) = resp {
|
||||
trace!(target: "sync",
|
||||
"Queuing import of {} from {:?} for {:?}",
|
||||
self.request_type_name, who, request,
|
||||
);
|
||||
|
||||
self.importing_requests.insert(request);
|
||||
return Some((who, request.0, request.1, r))
|
||||
} else {
|
||||
trace!(target: "sync",
|
||||
"Empty {} response from {:?} for {:?}",
|
||||
self.request_type_name, who, request,
|
||||
);
|
||||
}
|
||||
self.failed_requests.entry(request).or_default().push((who, Instant::now()));
|
||||
self.pending_requests.push_front(request);
|
||||
} else {
|
||||
trace!(target: "sync",
|
||||
"No active {} request to {:?}",
|
||||
self.request_type_name, who,
|
||||
);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Removes any pending extra requests for blocks lower than the given best finalized.
|
||||
pub(crate) fn on_block_finalized<F>(
|
||||
&mut self,
|
||||
best_finalized_hash: &B::Hash,
|
||||
best_finalized_number: NumberFor<B>,
|
||||
is_descendent_of: F,
|
||||
) -> Result<(), fork_tree::Error<ClientError>>
|
||||
where
|
||||
F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>,
|
||||
{
|
||||
let request = (*best_finalized_hash, best_finalized_number);
|
||||
|
||||
if self.try_finalize_root::<()>(request, Ok(request), false) {
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
if best_finalized_number > self.best_seen_finalized_number {
|
||||
// we receive finality notification only for the finalized branch head.
|
||||
match self.tree.finalize_with_ancestors(
|
||||
best_finalized_hash,
|
||||
best_finalized_number,
|
||||
&is_descendent_of,
|
||||
) {
|
||||
Err(fork_tree::Error::Revert) => {
|
||||
// we might have finalized further already in which case we
|
||||
// will get a `Revert` error which we can safely ignore.
|
||||
},
|
||||
Err(err) => return Err(err),
|
||||
Ok(_) => {},
|
||||
}
|
||||
|
||||
self.best_seen_finalized_number = best_finalized_number;
|
||||
}
|
||||
|
||||
let roots = self.tree.roots().collect::<HashSet<_>>();
|
||||
|
||||
self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &())));
|
||||
self.active_requests.retain(|_, (h, n)| roots.contains(&(h, n, &())));
|
||||
self.failed_requests.retain(|(h, n), _| roots.contains(&(h, n, &())));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Try to finalize pending root.
|
||||
///
|
||||
/// Returns true if import of this request has been scheduled.
|
||||
pub(crate) fn try_finalize_root<E>(
|
||||
&mut self,
|
||||
request: ExtraRequest<B>,
|
||||
result: Result<ExtraRequest<B>, E>,
|
||||
reschedule_on_failure: bool,
|
||||
) -> bool {
|
||||
if !self.importing_requests.remove(&request) {
|
||||
return false
|
||||
}
|
||||
|
||||
let (finalized_hash, finalized_number) = match result {
|
||||
Ok(req) => (req.0, req.1),
|
||||
Err(_) => {
|
||||
if reschedule_on_failure {
|
||||
self.pending_requests.push_front(request);
|
||||
}
|
||||
return true
|
||||
},
|
||||
};
|
||||
|
||||
if self.tree.finalize_root(&finalized_hash).is_none() {
|
||||
warn!(target: "sync",
|
||||
"‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}",
|
||||
finalized_hash, finalized_number, self.tree.roots().collect::<Vec<_>>()
|
||||
);
|
||||
return true
|
||||
}
|
||||
|
||||
self.failed_requests.clear();
|
||||
self.active_requests.clear();
|
||||
self.pending_requests.clear();
|
||||
self.pending_requests.extend(self.tree.roots().map(|(&h, &n, _)| (h, n)));
|
||||
self.best_seen_finalized_number = finalized_number;
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Returns an iterator over all active (in-flight) requests and associated peer id.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn active_requests(&self) -> impl Iterator<Item = (&PeerId, &ExtraRequest<B>)> {
|
||||
self.active_requests.iter()
|
||||
}
|
||||
|
||||
/// Returns an iterator over all scheduled pending requests.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn pending_requests(&self) -> impl Iterator<Item = &ExtraRequest<B>> {
|
||||
self.pending_requests.iter()
|
||||
}
|
||||
|
||||
/// Get some key metrics.
|
||||
pub(crate) fn metrics(&self) -> Metrics {
|
||||
Metrics {
|
||||
pending_requests: self.pending_requests.len().try_into().unwrap_or(std::u32::MAX),
|
||||
active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX),
|
||||
failed_requests: self.failed_requests.len().try_into().unwrap_or(std::u32::MAX),
|
||||
importing_requests: self.importing_requests.len().try_into().unwrap_or(std::u32::MAX),
|
||||
_priv: (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Matches peers with pending extra requests.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Matcher<'a, B: BlockT> {
|
||||
/// Length of pending requests collection.
|
||||
/// Used to ensure we do not loop more than once over all pending requests.
|
||||
remaining: usize,
|
||||
extras: &'a mut ExtraRequests<B>,
|
||||
}
|
||||
|
||||
impl<'a, B: BlockT> Matcher<'a, B> {
|
||||
fn new(extras: &'a mut ExtraRequests<B>) -> Self {
|
||||
Self { remaining: extras.pending_requests.len(), extras }
|
||||
}
|
||||
|
||||
/// Finds a peer to which a pending request can be sent.
|
||||
///
|
||||
/// Peers are filtered according to the current known best block (i.e. we won't
|
||||
/// send an extra request for block #10 to a peer at block #2), and we also
|
||||
/// throttle requests to the same peer if a previous request yielded no results.
|
||||
///
|
||||
/// This method returns as soon as it finds a peer that should be able to answer
|
||||
/// our request. If no request is pending or no peer can handle it, `None` is
|
||||
/// returned instead.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// The returned `PeerId` (if any) is guaranteed to come from the given `peers`
|
||||
/// argument.
|
||||
pub(crate) fn next(
|
||||
&mut self,
|
||||
peers: &HashMap<PeerId, PeerSync<B>>,
|
||||
) -> Option<(PeerId, ExtraRequest<B>)> {
|
||||
if self.remaining == 0 {
|
||||
return None
|
||||
}
|
||||
|
||||
// clean up previously failed requests so we can retry again
|
||||
for requests in self.extras.failed_requests.values_mut() {
|
||||
requests.retain(|(_, instant)| instant.elapsed() < EXTRA_RETRY_WAIT);
|
||||
}
|
||||
|
||||
while let Some(request) = self.extras.pending_requests.pop_front() {
|
||||
for (peer, sync) in
|
||||
peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available)
|
||||
{
|
||||
// only ask peers that have synced at least up to the block number that we're asking
|
||||
// the extra for
|
||||
if sync.best_number < request.1 {
|
||||
continue
|
||||
}
|
||||
// don't request to any peers that already have pending requests
|
||||
if self.extras.active_requests.contains_key(peer) {
|
||||
continue
|
||||
}
|
||||
// only ask if the same request has not failed for this peer before
|
||||
if self
|
||||
.extras
|
||||
.failed_requests
|
||||
.get(&request)
|
||||
.map(|rr| rr.iter().any(|i| &i.0 == peer))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
continue
|
||||
}
|
||||
self.extras.active_requests.insert(*peer, request);
|
||||
|
||||
trace!(target: "sync",
|
||||
"Sending {} request to {:?} for {:?}",
|
||||
self.extras.request_type_name, peer, request,
|
||||
);
|
||||
|
||||
return Some((*peer, request))
|
||||
}
|
||||
|
||||
self.extras.pending_requests.push_back(request);
|
||||
self.remaining -= 1;
|
||||
|
||||
if self.remaining == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::PeerSync;
|
||||
use quickcheck::{Arbitrary, Gen, QuickCheck};
|
||||
use sp_blockchain::Error as ClientError;
|
||||
use sp_test_primitives::{Block, BlockNumber, Hash};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
#[test]
|
||||
fn requests_are_processed_in_order() {
|
||||
fn property(mut peers: ArbitraryPeers) {
|
||||
let mut requests = ExtraRequests::<Block>::new("test");
|
||||
|
||||
let num_peers_available =
|
||||
peers.0.values().filter(|s| s.state == PeerSyncState::Available).count();
|
||||
|
||||
for i in 0..num_peers_available {
|
||||
requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0]))
|
||||
}
|
||||
|
||||
let pending = requests.pending_requests.clone();
|
||||
let mut m = requests.matcher();
|
||||
|
||||
for p in &pending {
|
||||
let (peer, r) = m.next(&peers.0).unwrap();
|
||||
assert_eq!(p, &r);
|
||||
peers.0.get_mut(&peer).unwrap().state =
|
||||
PeerSyncState::DownloadingJustification(r.0);
|
||||
}
|
||||
}
|
||||
|
||||
QuickCheck::new().quickcheck(property as fn(ArbitraryPeers))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn new_roots_schedule_new_request() {
|
||||
fn property(data: Vec<BlockNumber>) {
|
||||
let mut requests = ExtraRequests::<Block>::new("test");
|
||||
for (i, number) in data.into_iter().enumerate() {
|
||||
let hash = [i as u8; 32].into();
|
||||
let pending = requests.pending_requests.len();
|
||||
let is_root = requests.tree.roots().any(|(&h, &n, _)| hash == h && number == n);
|
||||
requests.schedule((hash, number), |a, b| Ok(a[0] >= b[0]));
|
||||
if !is_root {
|
||||
assert_eq!(1 + pending, requests.pending_requests.len())
|
||||
}
|
||||
}
|
||||
}
|
||||
QuickCheck::new().quickcheck(property as fn(Vec<BlockNumber>))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn disconnecting_implies_rescheduling() {
|
||||
fn property(mut peers: ArbitraryPeers) -> bool {
|
||||
let mut requests = ExtraRequests::<Block>::new("test");
|
||||
|
||||
let num_peers_available =
|
||||
peers.0.values().filter(|s| s.state == PeerSyncState::Available).count();
|
||||
|
||||
for i in 0..num_peers_available {
|
||||
requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0]))
|
||||
}
|
||||
|
||||
let mut m = requests.matcher();
|
||||
while let Some((peer, r)) = m.next(&peers.0) {
|
||||
peers.0.get_mut(&peer).unwrap().state =
|
||||
PeerSyncState::DownloadingJustification(r.0);
|
||||
}
|
||||
|
||||
assert!(requests.pending_requests.is_empty());
|
||||
|
||||
let active_peers = requests.active_requests.keys().cloned().collect::<Vec<_>>();
|
||||
let previously_active =
|
||||
requests.active_requests.values().cloned().collect::<HashSet<_>>();
|
||||
|
||||
for peer in &active_peers {
|
||||
requests.peer_disconnected(peer)
|
||||
}
|
||||
|
||||
assert!(requests.active_requests.is_empty());
|
||||
|
||||
previously_active == requests.pending_requests.iter().cloned().collect::<HashSet<_>>()
|
||||
}
|
||||
|
||||
QuickCheck::new().quickcheck(property as fn(ArbitraryPeers) -> bool)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_response_reschedules() {
|
||||
fn property(mut peers: ArbitraryPeers) {
|
||||
let mut requests = ExtraRequests::<Block>::new("test");
|
||||
|
||||
let num_peers_available =
|
||||
peers.0.values().filter(|s| s.state == PeerSyncState::Available).count();
|
||||
|
||||
for i in 0..num_peers_available {
|
||||
requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0]))
|
||||
}
|
||||
|
||||
let mut m = requests.matcher();
|
||||
while let Some((peer, r)) = m.next(&peers.0) {
|
||||
peers.0.get_mut(&peer).unwrap().state =
|
||||
PeerSyncState::DownloadingJustification(r.0);
|
||||
}
|
||||
|
||||
let active = requests
|
||||
.active_requests
|
||||
.iter()
|
||||
.map(|(p, &r)| (p.clone(), r))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (peer, req) in &active {
|
||||
assert!(requests.failed_requests.get(req).is_none());
|
||||
assert!(!requests.pending_requests.contains(req));
|
||||
assert!(requests.on_response::<()>(peer.clone(), None).is_none());
|
||||
assert!(requests.pending_requests.contains(req));
|
||||
assert_eq!(
|
||||
1,
|
||||
requests
|
||||
.failed_requests
|
||||
.get(req)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.filter(|(p, _)| p == peer)
|
||||
.count()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
QuickCheck::new().quickcheck(property as fn(ArbitraryPeers))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn request_is_rescheduled_when_earlier_block_is_finalized() {
|
||||
sp_tracing::try_init_simple();
|
||||
|
||||
let mut finality_proofs = ExtraRequests::<Block>::new("test");
|
||||
|
||||
let hash4 = [4; 32].into();
|
||||
let hash5 = [5; 32].into();
|
||||
let hash6 = [6; 32].into();
|
||||
let hash7 = [7; 32].into();
|
||||
|
||||
fn is_descendent_of(base: &Hash, target: &Hash) -> Result<bool, ClientError> {
|
||||
Ok(target[0] >= base[0])
|
||||
}
|
||||
|
||||
// make #4 last finalized block
|
||||
finality_proofs.tree.import(hash4, 4, (), &is_descendent_of).unwrap();
|
||||
finality_proofs.tree.finalize_root(&hash4);
|
||||
|
||||
// schedule request for #6
|
||||
finality_proofs.schedule((hash6, 6), is_descendent_of);
|
||||
|
||||
// receive finality proof for #5
|
||||
finality_proofs.importing_requests.insert((hash6, 6));
|
||||
finality_proofs.on_block_finalized(&hash5, 5, is_descendent_of).unwrap();
|
||||
finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash5, 5)), true);
|
||||
|
||||
// ensure that request for #6 is still pending
|
||||
assert_eq!(finality_proofs.pending_requests.iter().collect::<Vec<_>>(), vec![&(hash6, 6)]);
|
||||
|
||||
// receive finality proof for #7
|
||||
finality_proofs.importing_requests.insert((hash6, 6));
|
||||
finality_proofs.on_block_finalized(&hash6, 6, is_descendent_of).unwrap();
|
||||
finality_proofs.on_block_finalized(&hash7, 7, is_descendent_of).unwrap();
|
||||
finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash7, 7)), true);
|
||||
|
||||
// ensure that there's no request for #6
|
||||
assert_eq!(
|
||||
finality_proofs.pending_requests.iter().collect::<Vec<_>>(),
|
||||
Vec::<&(Hash, u64)>::new()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ancestor_roots_are_finalized_when_finality_notification_is_missed() {
|
||||
let mut finality_proofs = ExtraRequests::<Block>::new("test");
|
||||
|
||||
let hash4 = [4; 32].into();
|
||||
let hash5 = [5; 32].into();
|
||||
|
||||
fn is_descendent_of(base: &Hash, target: &Hash) -> Result<bool, ClientError> {
|
||||
Ok(target[0] >= base[0])
|
||||
}
|
||||
|
||||
// schedule request for #4
|
||||
finality_proofs.schedule((hash4, 4), is_descendent_of);
|
||||
|
||||
// receive finality notification for #5 (missing notification for #4!!!)
|
||||
finality_proofs.importing_requests.insert((hash4, 5));
|
||||
finality_proofs.on_block_finalized(&hash5, 5, is_descendent_of).unwrap();
|
||||
assert_eq!(finality_proofs.tree.roots().count(), 0);
|
||||
}
|
||||
|
||||
// Some Arbitrary instances to allow easy construction of random peer sets:
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct ArbitraryPeerSyncState(PeerSyncState<Block>);
|
||||
|
||||
impl Arbitrary for ArbitraryPeerSyncState {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let s = match u8::arbitrary(g) % 4 {
|
||||
0 => PeerSyncState::Available,
|
||||
// TODO: 1 => PeerSyncState::AncestorSearch(g.gen(), AncestorSearchState<B>),
|
||||
1 => PeerSyncState::DownloadingNew(BlockNumber::arbitrary(g)),
|
||||
2 => PeerSyncState::DownloadingStale(Hash::random()),
|
||||
_ => PeerSyncState::DownloadingJustification(Hash::random()),
|
||||
};
|
||||
ArbitraryPeerSyncState(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct ArbitraryPeerSync(PeerSync<Block>);
|
||||
|
||||
impl Arbitrary for ArbitraryPeerSync {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let ps = PeerSync {
|
||||
peer_id: PeerId::random(),
|
||||
common_number: u64::arbitrary(g),
|
||||
best_hash: Hash::random(),
|
||||
best_number: u64::arbitrary(g),
|
||||
state: ArbitraryPeerSyncState::arbitrary(g).0,
|
||||
};
|
||||
ArbitraryPeerSync(ps)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct ArbitraryPeers(HashMap<PeerId, PeerSync<Block>>);
|
||||
|
||||
impl Arbitrary for ArbitraryPeers {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let mut peers = HashMap::with_capacity(g.size());
|
||||
for _ in 0..g.size() {
|
||||
let ps = ArbitraryPeerSync::arbitrary(g).0;
|
||||
peers.insert(ps.peer_id, ps);
|
||||
}
|
||||
ArbitraryPeers(peers)
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,222 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! Network packet message types. These get serialized and put into the lower level protocol
|
||||
//! payload.
|
||||
|
||||
use bitflags::bitflags;
|
||||
use codec::{Decode, Encode, Error, Input, Output};
|
||||
pub use generic::{BlockAnnounce, FromBlock};
|
||||
use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
|
||||
|
||||
/// Type alias for using the block request type using block type parameters.
|
||||
pub type BlockRequest<B> =
|
||||
generic::BlockRequest<<B as BlockT>::Hash, <<B as BlockT>::Header as HeaderT>::Number>;
|
||||
|
||||
/// Type alias for using the BlockData type using block type parameters.
|
||||
pub type BlockData<B> =
|
||||
generic::BlockData<<B as BlockT>::Header, <B as BlockT>::Hash, <B as BlockT>::Extrinsic>;
|
||||
|
||||
/// Type alias for using the BlockResponse type using block type parameters.
|
||||
pub type BlockResponse<B> =
|
||||
generic::BlockResponse<<B as BlockT>::Header, <B as BlockT>::Hash, <B as BlockT>::Extrinsic>;
|
||||
|
||||
// Bits of block data and associated artifacts to request.
|
||||
bitflags! {
|
||||
/// Node roles bitmask.
|
||||
pub struct BlockAttributes: u8 {
|
||||
/// Include block header.
|
||||
const HEADER = 0b00000001;
|
||||
/// Include block body.
|
||||
const BODY = 0b00000010;
|
||||
/// Include block receipt.
|
||||
const RECEIPT = 0b00000100;
|
||||
/// Include block message queue.
|
||||
const MESSAGE_QUEUE = 0b00001000;
|
||||
/// Include a justification for the block.
|
||||
const JUSTIFICATION = 0b00010000;
|
||||
/// Include indexed transactions for a block.
|
||||
const INDEXED_BODY = 0b00100000;
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockAttributes {
|
||||
/// Encodes attributes as big endian u32, compatible with SCALE-encoding (i.e the
|
||||
/// significant byte has zero index).
|
||||
pub fn to_be_u32(&self) -> u32 {
|
||||
u32::from_be_bytes([self.bits(), 0, 0, 0])
|
||||
}
|
||||
|
||||
/// Decodes attributes, encoded with the `encode_to_be_u32()` call.
|
||||
pub fn from_be_u32(encoded: u32) -> Result<Self, Error> {
|
||||
Self::from_bits(encoded.to_be_bytes()[0])
|
||||
.ok_or_else(|| Error::from("Invalid BlockAttributes"))
|
||||
}
|
||||
}
|
||||
|
||||
impl Encode for BlockAttributes {
|
||||
fn encode_to<T: Output + ?Sized>(&self, dest: &mut T) {
|
||||
dest.push_byte(self.bits())
|
||||
}
|
||||
}
|
||||
|
||||
impl codec::EncodeLike for BlockAttributes {}
|
||||
|
||||
impl Decode for BlockAttributes {
|
||||
fn decode<I: Input>(input: &mut I) -> Result<Self, Error> {
|
||||
Self::from_bits(input.read_byte()?).ok_or_else(|| Error::from("Invalid bytes"))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy, Encode, Decode)]
|
||||
/// Block enumeration direction.
|
||||
pub enum Direction {
|
||||
/// Enumerate in ascending order (from child to parent).
|
||||
Ascending = 0,
|
||||
/// Enumerate in descending order (from parent to canonical child).
|
||||
Descending = 1,
|
||||
}
|
||||
|
||||
/// Block state in the chain.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy, Encode, Decode)]
|
||||
pub enum BlockState {
|
||||
/// Block is not part of the best chain.
|
||||
Normal,
|
||||
/// Latest best block.
|
||||
Best,
|
||||
}
|
||||
|
||||
/// Announcement summary used for debug logging.
|
||||
#[derive(Debug)]
|
||||
pub struct AnnouncementSummary<H: HeaderT> {
|
||||
pub block_hash: H::Hash,
|
||||
pub number: H::Number,
|
||||
pub parent_hash: H::Hash,
|
||||
pub state: Option<BlockState>,
|
||||
}
|
||||
|
||||
impl<H: HeaderT> BlockAnnounce<H> {
|
||||
pub fn summary(&self) -> AnnouncementSummary<H> {
|
||||
AnnouncementSummary {
|
||||
block_hash: self.header.hash(),
|
||||
number: *self.header.number(),
|
||||
parent_hash: *self.header.parent_hash(),
|
||||
state: self.state,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generic types.
|
||||
pub mod generic {
|
||||
use super::{BlockAttributes, BlockState, Direction};
|
||||
use codec::{Decode, Encode, Input, Output};
|
||||
use sc_network_common::message::RequestId;
|
||||
use sp_runtime::{EncodedJustification, Justifications};
|
||||
|
||||
/// Block data sent in the response.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
|
||||
pub struct BlockData<Header, Hash, Extrinsic> {
|
||||
/// Block header hash.
|
||||
pub hash: Hash,
|
||||
/// Block header if requested.
|
||||
pub header: Option<Header>,
|
||||
/// Block body if requested.
|
||||
pub body: Option<Vec<Extrinsic>>,
|
||||
/// Block body indexed transactions if requested.
|
||||
pub indexed_body: Option<Vec<Vec<u8>>>,
|
||||
/// Block receipt if requested.
|
||||
pub receipt: Option<Vec<u8>>,
|
||||
/// Block message queue if requested.
|
||||
pub message_queue: Option<Vec<u8>>,
|
||||
/// Justification if requested.
|
||||
pub justification: Option<EncodedJustification>,
|
||||
/// Justifications if requested.
|
||||
pub justifications: Option<Justifications>,
|
||||
}
|
||||
|
||||
/// Request block data from a peer.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
|
||||
pub struct BlockRequest<Hash, Number> {
|
||||
/// Unique request id.
|
||||
pub id: RequestId,
|
||||
/// Bits of block data to request.
|
||||
pub fields: BlockAttributes,
|
||||
/// Start from this block.
|
||||
pub from: FromBlock<Hash, Number>,
|
||||
/// End at this block. An implementation defined maximum is used when unspecified.
|
||||
pub to: Option<Hash>,
|
||||
/// Sequence direction.
|
||||
pub direction: Direction,
|
||||
/// Maximum number of blocks to return. An implementation defined maximum is used when
|
||||
/// unspecified.
|
||||
pub max: Option<u32>,
|
||||
}
|
||||
|
||||
/// Identifies starting point of a block sequence.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
|
||||
pub enum FromBlock<Hash, Number> {
|
||||
/// Start with given hash.
|
||||
Hash(Hash),
|
||||
/// Start with given block number.
|
||||
Number(Number),
|
||||
}
|
||||
|
||||
/// Response to `BlockRequest`
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
|
||||
pub struct BlockResponse<Header, Hash, Extrinsic> {
|
||||
/// Id of a request this response was made for.
|
||||
pub id: RequestId,
|
||||
/// Block data for the requested sequence.
|
||||
pub blocks: Vec<BlockData<Header, Hash, Extrinsic>>,
|
||||
}
|
||||
|
||||
/// Announce a new complete relay chain block on the network.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct BlockAnnounce<H> {
|
||||
/// New block header.
|
||||
pub header: H,
|
||||
/// Block state. TODO: Remove `Option` and custom encoding when v4 becomes common.
|
||||
pub state: Option<BlockState>,
|
||||
/// Data associated with this block announcement, e.g. a candidate message.
|
||||
pub data: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
// Custom Encode/Decode impl to maintain backwards compatibility with v3.
|
||||
// This assumes that the packet contains nothing but the announcement message.
|
||||
// TODO: Get rid of it once protocol v4 is common.
|
||||
impl<H: Encode> Encode for BlockAnnounce<H> {
|
||||
fn encode_to<T: Output + ?Sized>(&self, dest: &mut T) {
|
||||
self.header.encode_to(dest);
|
||||
if let Some(state) = &self.state {
|
||||
state.encode_to(dest);
|
||||
}
|
||||
if let Some(data) = &self.data {
|
||||
data.encode_to(dest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Decode> Decode for BlockAnnounce<H> {
|
||||
fn decode<I: Input>(input: &mut I) -> Result<Self, codec::Error> {
|
||||
let header = H::decode(input)?;
|
||||
let state = BlockState::decode(input).ok();
|
||||
let data = Vec::decode(input).ok();
|
||||
Ok(Self { header, state, data })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! Include sources generated from protobuf definitions.
|
||||
|
||||
pub mod v1 {
|
||||
include!(concat!(env!("OUT_DIR"), "/api.v1.rs"));
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
// Schema definition for block request/response messages.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package api.v1;
|
||||
|
||||
// Block enumeration direction.
|
||||
enum Direction {
|
||||
// Enumerate in ascending order (from child to parent).
|
||||
Ascending = 0;
|
||||
// Enumerate in descending order (from parent to canonical child).
|
||||
Descending = 1;
|
||||
}
|
||||
|
||||
// Request block data from a peer.
|
||||
message BlockRequest {
|
||||
// Bits of block data to request.
|
||||
uint32 fields = 1;
|
||||
// Start from this block.
|
||||
oneof from_block {
|
||||
// Start with given hash.
|
||||
bytes hash = 2;
|
||||
// Start with given block number.
|
||||
bytes number = 3;
|
||||
}
|
||||
// End at this block. An implementation defined maximum is used when unspecified.
|
||||
bytes to_block = 4; // optional
|
||||
// Sequence direction.
|
||||
Direction direction = 5;
|
||||
// Maximum number of blocks to return. An implementation defined maximum is used when unspecified.
|
||||
uint32 max_blocks = 6; // optional
|
||||
// Indicate to the receiver that we support multiple justifications. If the responder also
|
||||
// supports this it will populate the multiple justifications field in `BlockData` instead of
|
||||
// the single justification field.
|
||||
bool support_multiple_justifications = 7; // optional
|
||||
}
|
||||
|
||||
// Response to `BlockRequest`
|
||||
message BlockResponse {
|
||||
// Block data for the requested sequence.
|
||||
repeated BlockData blocks = 1;
|
||||
}
|
||||
|
||||
// Block data sent in the response.
|
||||
message BlockData {
|
||||
// Block header hash.
|
||||
bytes hash = 1;
|
||||
// Block header if requested.
|
||||
bytes header = 2; // optional
|
||||
// Block body if requested.
|
||||
repeated bytes body = 3; // optional
|
||||
// Block receipt if requested.
|
||||
bytes receipt = 4; // optional
|
||||
// Block message queue if requested.
|
||||
bytes message_queue = 5; // optional
|
||||
// Justification if requested.
|
||||
bytes justification = 6; // optional
|
||||
// True if justification should be treated as present but empty.
|
||||
// This hack is unfortunately necessary because shortcomings in the protobuf format otherwise
|
||||
// doesn't make in possible to differentiate between a lack of justification and an empty
|
||||
// justification.
|
||||
bool is_empty_justification = 7; // optional, false if absent
|
||||
// Justifications if requested.
|
||||
// Unlike the field for a single justification, this field does not required an associated
|
||||
// boolean to differentiate between the lack of justifications and empty justification(s). This
|
||||
// is because empty justifications, like all justifications, are paired with a non-empty
|
||||
// consensus engine ID.
|
||||
bytes justifications = 8; // optional
|
||||
// Indexed block body if requestd.
|
||||
repeated bytes indexed_body = 9; // optional
|
||||
}
|
||||
|
||||
// Request storage data from a peer.
|
||||
message StateRequest {
|
||||
// Block header hash.
|
||||
bytes block = 1;
|
||||
// Start from this key.
|
||||
// Multiple keys used for nested state start.
|
||||
repeated bytes start = 2; // optional
|
||||
// if 'true' indicates that response should contain raw key-values, rather than proof.
|
||||
bool no_proof = 3;
|
||||
}
|
||||
|
||||
message StateResponse {
|
||||
// A collection of keys-values states. Only populated if `no_proof` is `true`
|
||||
repeated KeyValueStateEntry entries = 1;
|
||||
// If `no_proof` is false in request, this contains proof nodes.
|
||||
bytes proof = 2;
|
||||
}
|
||||
|
||||
// A key value state.
|
||||
message KeyValueStateEntry {
|
||||
// Root of for this level, empty length bytes
|
||||
// if top level.
|
||||
bytes state_root = 1;
|
||||
// A collection of keys-values.
|
||||
repeated StateEntry entries = 2;
|
||||
// Set to true when there are no more keys to return.
|
||||
bool complete = 3;
|
||||
}
|
||||
|
||||
// A key-value pair.
|
||||
message StateEntry {
|
||||
bytes key = 1;
|
||||
bytes value = 2;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,260 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! State sync support.
|
||||
|
||||
use crate::schema::v1::{StateEntry, StateRequest, StateResponse};
|
||||
use codec::{Decode, Encode};
|
||||
use log::debug;
|
||||
use sc_client_api::{CompactProof, ProofProvider};
|
||||
use sc_consensus::ImportedState;
|
||||
use smallvec::SmallVec;
|
||||
use sp_core::storage::well_known_keys;
|
||||
use sp_runtime::traits::{Block as BlockT, Header, NumberFor};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
/// State sync state machine. Accumulates partial state data until it
|
||||
/// is ready to be imported.
|
||||
pub struct StateSync<B: BlockT, Client> {
|
||||
target_block: B::Hash,
|
||||
target_header: B::Header,
|
||||
target_root: B::Hash,
|
||||
last_key: SmallVec<[Vec<u8>; 2]>,
|
||||
state: HashMap<Vec<u8>, (Vec<(Vec<u8>, Vec<u8>)>, Vec<Vec<u8>>)>,
|
||||
complete: bool,
|
||||
client: Arc<Client>,
|
||||
imported_bytes: u64,
|
||||
skip_proof: bool,
|
||||
}
|
||||
|
||||
/// Reported state download progress.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub struct StateDownloadProgress {
|
||||
/// Estimated download percentage.
|
||||
pub percentage: u32,
|
||||
/// Total state size in bytes downloaded so far.
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// Import state chunk result.
|
||||
pub enum ImportResult<B: BlockT> {
|
||||
/// State is complete and ready for import.
|
||||
Import(B::Hash, B::Header, ImportedState<B>),
|
||||
/// Continue downloading.
|
||||
Continue,
|
||||
/// Bad state chunk.
|
||||
BadResponse,
|
||||
}
|
||||
|
||||
impl<B, Client> StateSync<B, Client>
|
||||
where
|
||||
B: BlockT,
|
||||
Client: ProofProvider<B> + Send + Sync + 'static,
|
||||
{
|
||||
/// Create a new instance.
|
||||
pub fn new(client: Arc<Client>, target: B::Header, skip_proof: bool) -> Self {
|
||||
Self {
|
||||
client,
|
||||
target_block: target.hash(),
|
||||
target_root: *target.state_root(),
|
||||
target_header: target,
|
||||
last_key: SmallVec::default(),
|
||||
state: HashMap::default(),
|
||||
complete: false,
|
||||
imported_bytes: 0,
|
||||
skip_proof,
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate and import a state response.
|
||||
pub fn import(&mut self, response: StateResponse) -> ImportResult<B> {
|
||||
if response.entries.is_empty() && response.proof.is_empty() {
|
||||
debug!(target: "sync", "Bad state response");
|
||||
return ImportResult::BadResponse
|
||||
}
|
||||
if !self.skip_proof && response.proof.is_empty() {
|
||||
debug!(target: "sync", "Missing proof");
|
||||
return ImportResult::BadResponse
|
||||
}
|
||||
let complete = if !self.skip_proof {
|
||||
debug!(target: "sync", "Importing state from {} trie nodes", response.proof.len());
|
||||
let proof_size = response.proof.len() as u64;
|
||||
let proof = match CompactProof::decode(&mut response.proof.as_ref()) {
|
||||
Ok(proof) => proof,
|
||||
Err(e) => {
|
||||
debug!(target: "sync", "Error decoding proof: {:?}", e);
|
||||
return ImportResult::BadResponse
|
||||
},
|
||||
};
|
||||
let (values, completed) = match self.client.verify_range_proof(
|
||||
self.target_root,
|
||||
proof,
|
||||
self.last_key.as_slice(),
|
||||
) {
|
||||
Err(e) => {
|
||||
debug!(
|
||||
target: "sync",
|
||||
"StateResponse failed proof verification: {}",
|
||||
e,
|
||||
);
|
||||
return ImportResult::BadResponse
|
||||
},
|
||||
Ok(values) => values,
|
||||
};
|
||||
debug!(target: "sync", "Imported with {} keys", values.len());
|
||||
|
||||
let complete = completed == 0;
|
||||
if !complete && !values.update_last_key(completed, &mut self.last_key) {
|
||||
debug!(target: "sync", "Error updating key cursor, depth: {}", completed);
|
||||
};
|
||||
|
||||
for values in values.0 {
|
||||
let key_values = if values.state_root.is_empty() {
|
||||
// Read child trie roots.
|
||||
values
|
||||
.key_values
|
||||
.into_iter()
|
||||
.filter(|key_value| {
|
||||
if well_known_keys::is_child_storage_key(key_value.0.as_slice()) {
|
||||
self.state
|
||||
.entry(key_value.1.clone())
|
||||
.or_default()
|
||||
.1
|
||||
.push(key_value.0.clone());
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
values.key_values
|
||||
};
|
||||
let mut entry = self.state.entry(values.state_root).or_default();
|
||||
if entry.0.len() > 0 && entry.1.len() > 1 {
|
||||
// Already imported child_trie with same root.
|
||||
// Warning this will not work with parallel download.
|
||||
} else if entry.0.is_empty() {
|
||||
for (key, _value) in key_values.iter() {
|
||||
self.imported_bytes += key.len() as u64;
|
||||
}
|
||||
|
||||
entry.0 = key_values;
|
||||
} else {
|
||||
for (key, value) in key_values {
|
||||
self.imported_bytes += key.len() as u64;
|
||||
entry.0.push((key, value))
|
||||
}
|
||||
}
|
||||
}
|
||||
self.imported_bytes += proof_size;
|
||||
complete
|
||||
} else {
|
||||
let mut complete = true;
|
||||
// if the trie is a child trie and one of its parent trie is empty,
|
||||
// the parent cursor stays valid.
|
||||
// Empty parent trie content only happens when all the response content
|
||||
// is part of a single child trie.
|
||||
if self.last_key.len() == 2 && response.entries[0].entries.is_empty() {
|
||||
// Do not remove the parent trie position.
|
||||
self.last_key.pop();
|
||||
} else {
|
||||
self.last_key.clear();
|
||||
}
|
||||
for state in response.entries {
|
||||
debug!(
|
||||
target: "sync",
|
||||
"Importing state from {:?} to {:?}",
|
||||
state.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
|
||||
state.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
|
||||
);
|
||||
|
||||
if !state.complete {
|
||||
if let Some(e) = state.entries.last() {
|
||||
self.last_key.push(e.key.clone());
|
||||
}
|
||||
complete = false;
|
||||
}
|
||||
let is_top = state.state_root.is_empty();
|
||||
let entry = self.state.entry(state.state_root).or_default();
|
||||
if entry.0.len() > 0 && entry.1.len() > 1 {
|
||||
// Already imported child trie with same root.
|
||||
} else {
|
||||
let mut child_roots = Vec::new();
|
||||
for StateEntry { key, value } in state.entries {
|
||||
// Skip all child key root (will be recalculated on import).
|
||||
if is_top && well_known_keys::is_child_storage_key(key.as_slice()) {
|
||||
child_roots.push((value, key));
|
||||
} else {
|
||||
self.imported_bytes += key.len() as u64;
|
||||
entry.0.push((key, value))
|
||||
}
|
||||
}
|
||||
for (root, storage_key) in child_roots {
|
||||
self.state.entry(root).or_default().1.push(storage_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
complete
|
||||
};
|
||||
if complete {
|
||||
self.complete = true;
|
||||
ImportResult::Import(
|
||||
self.target_block,
|
||||
self.target_header.clone(),
|
||||
ImportedState {
|
||||
block: self.target_block,
|
||||
state: std::mem::take(&mut self.state).into(),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
ImportResult::Continue
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce next state request.
|
||||
pub fn next_request(&self) -> StateRequest {
|
||||
StateRequest {
|
||||
block: self.target_block.encode(),
|
||||
start: self.last_key.clone().into_vec(),
|
||||
no_proof: self.skip_proof,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the state is complete.
|
||||
pub fn is_complete(&self) -> bool {
|
||||
self.complete
|
||||
}
|
||||
|
||||
/// Returns target block number.
|
||||
pub fn target_block_num(&self) -> NumberFor<B> {
|
||||
*self.target_header.number()
|
||||
}
|
||||
|
||||
/// Returns target block hash.
|
||||
pub fn target(&self) -> B::Hash {
|
||||
self.target_block
|
||||
}
|
||||
|
||||
/// Returns state sync estimated progress.
|
||||
pub fn progress(&self) -> StateDownloadProgress {
|
||||
let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8);
|
||||
let percent_done = cursor as u32 * 100 / 256;
|
||||
StateDownloadProgress { percentage: percent_done, size: self.imported_bytes }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,259 @@
|
||||
// Copyright 2020 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Helper for handling (i.e. answering) state requests from a remote peer via the
|
||||
//! `crate::request_responses::RequestResponsesBehaviour`.
|
||||
|
||||
use crate::schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse};
|
||||
use codec::{Decode, Encode};
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
stream::StreamExt,
|
||||
};
|
||||
use libp2p::PeerId;
|
||||
use log::{debug, trace};
|
||||
use lru::LruCache;
|
||||
use prost::Message;
|
||||
use sc_client_api::ProofProvider;
|
||||
use sc_network_common::{
|
||||
config::ProtocolId,
|
||||
request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig},
|
||||
};
|
||||
use sp_runtime::{generic::BlockId, traits::Block as BlockT};
|
||||
use std::{
|
||||
hash::{Hash, Hasher},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
const LOG_TARGET: &str = "sync";
|
||||
const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigger.
|
||||
const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2;
|
||||
|
||||
mod rep {
|
||||
use sc_peerset::ReputationChange as Rep;
|
||||
|
||||
/// Reputation change when a peer sent us the same request multiple times.
|
||||
pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times");
|
||||
}
|
||||
|
||||
/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests.
|
||||
pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig {
|
||||
ProtocolConfig {
|
||||
name: generate_protocol_name(protocol_id).into(),
|
||||
max_request_size: 1024 * 1024,
|
||||
max_response_size: 16 * 1024 * 1024,
|
||||
request_timeout: Duration::from_secs(40),
|
||||
inbound_queue: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate the state protocol name from chain specific protocol identifier.
|
||||
fn generate_protocol_name(protocol_id: &ProtocolId) -> String {
|
||||
format!("/{}/state/2", protocol_id.as_ref())
|
||||
}
|
||||
|
||||
/// The key of [`BlockRequestHandler::seen_requests`].
|
||||
#[derive(Eq, PartialEq, Clone)]
|
||||
struct SeenRequestsKey<B: BlockT> {
|
||||
peer: PeerId,
|
||||
block: B::Hash,
|
||||
start: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_hash_xor_eq)]
|
||||
impl<B: BlockT> Hash for SeenRequestsKey<B> {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.peer.hash(state);
|
||||
self.block.hash(state);
|
||||
self.start.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
/// The value of [`StateRequestHandler::seen_requests`].
|
||||
enum SeenRequestsValue {
|
||||
/// First time we have seen the request.
|
||||
First,
|
||||
/// We have fulfilled the request `n` times.
|
||||
Fulfilled(usize),
|
||||
}
|
||||
|
||||
/// Handler for incoming block requests from a remote peer.
|
||||
pub struct StateRequestHandler<B: BlockT, Client> {
|
||||
client: Arc<Client>,
|
||||
request_receiver: mpsc::Receiver<IncomingRequest>,
|
||||
/// Maps from request to number of times we have seen this request.
|
||||
///
|
||||
/// This is used to check if a peer is spamming us with the same request.
|
||||
seen_requests: LruCache<SeenRequestsKey<B>, SeenRequestsValue>,
|
||||
}
|
||||
|
||||
impl<B, Client> StateRequestHandler<B, Client>
|
||||
where
|
||||
B: BlockT,
|
||||
Client: ProofProvider<B> + Send + Sync + 'static,
|
||||
{
|
||||
/// Create a new [`StateRequestHandler`].
|
||||
pub fn new(
|
||||
protocol_id: &ProtocolId,
|
||||
client: Arc<Client>,
|
||||
num_peer_hint: usize,
|
||||
) -> (Self, ProtocolConfig) {
|
||||
// Reserve enough request slots for one request per peer when we are at the maximum
|
||||
// number of peers.
|
||||
let (tx, request_receiver) = mpsc::channel(num_peer_hint);
|
||||
|
||||
let mut protocol_config = generate_protocol_config(protocol_id);
|
||||
protocol_config.inbound_queue = Some(tx);
|
||||
|
||||
let seen_requests = LruCache::new(num_peer_hint * 2);
|
||||
|
||||
(Self { client, request_receiver, seen_requests }, protocol_config)
|
||||
}
|
||||
|
||||
/// Run [`StateRequestHandler`].
|
||||
pub async fn run(mut self) {
|
||||
while let Some(request) = self.request_receiver.next().await {
|
||||
let IncomingRequest { peer, payload, pending_response } = request;
|
||||
|
||||
match self.handle_request(payload, pending_response, &peer) {
|
||||
Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer),
|
||||
Err(e) => debug!(
|
||||
target: LOG_TARGET,
|
||||
"Failed to handle state request from {}: {}", peer, e,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_request(
|
||||
&mut self,
|
||||
payload: Vec<u8>,
|
||||
pending_response: oneshot::Sender<OutgoingResponse>,
|
||||
peer: &PeerId,
|
||||
) -> Result<(), HandleRequestError> {
|
||||
let request = StateRequest::decode(&payload[..])?;
|
||||
let block: B::Hash = Decode::decode(&mut request.block.as_ref())?;
|
||||
|
||||
let key = SeenRequestsKey { peer: *peer, block, start: request.start.clone() };
|
||||
|
||||
let mut reputation_changes = Vec::new();
|
||||
|
||||
match self.seen_requests.get_mut(&key) {
|
||||
Some(SeenRequestsValue::First) => {},
|
||||
Some(SeenRequestsValue::Fulfilled(ref mut requests)) => {
|
||||
*requests = requests.saturating_add(1);
|
||||
|
||||
if *requests > MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER {
|
||||
reputation_changes.push(rep::SAME_REQUEST);
|
||||
}
|
||||
},
|
||||
None => {
|
||||
self.seen_requests.put(key.clone(), SeenRequestsValue::First);
|
||||
},
|
||||
}
|
||||
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"Handling state request from {}: Block {:?}, Starting at {:x?}, no_proof={}",
|
||||
peer,
|
||||
request.block,
|
||||
&request.start,
|
||||
request.no_proof,
|
||||
);
|
||||
|
||||
let result = if reputation_changes.is_empty() {
|
||||
let mut response = StateResponse::default();
|
||||
|
||||
if !request.no_proof {
|
||||
let (proof, _count) = self.client.read_proof_collection(
|
||||
&BlockId::hash(block),
|
||||
request.start.as_slice(),
|
||||
MAX_RESPONSE_BYTES,
|
||||
)?;
|
||||
response.proof = proof.encode();
|
||||
} else {
|
||||
let entries = self.client.storage_collection(
|
||||
&BlockId::hash(block),
|
||||
request.start.as_slice(),
|
||||
MAX_RESPONSE_BYTES,
|
||||
)?;
|
||||
response.entries = entries
|
||||
.into_iter()
|
||||
.map(|(state, complete)| KeyValueStateEntry {
|
||||
state_root: state.state_root,
|
||||
entries: state
|
||||
.key_values
|
||||
.into_iter()
|
||||
.map(|(key, value)| StateEntry { key, value })
|
||||
.collect(),
|
||||
complete,
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
|
||||
trace!(
|
||||
target: LOG_TARGET,
|
||||
"StateResponse contains {} keys, {}, proof nodes, from {:?} to {:?}",
|
||||
response.entries.len(),
|
||||
response.proof.len(),
|
||||
response.entries.get(0).and_then(|top| top
|
||||
.entries
|
||||
.first()
|
||||
.map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))),
|
||||
response.entries.get(0).and_then(|top| top
|
||||
.entries
|
||||
.last()
|
||||
.map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))),
|
||||
);
|
||||
if let Some(value) = self.seen_requests.get_mut(&key) {
|
||||
// If this is the first time we have processed this request, we need to change
|
||||
// it to `Fulfilled`.
|
||||
if let SeenRequestsValue::First = value {
|
||||
*value = SeenRequestsValue::Fulfilled(1);
|
||||
}
|
||||
}
|
||||
|
||||
let mut data = Vec::with_capacity(response.encoded_len());
|
||||
response.encode(&mut data)?;
|
||||
Ok(data)
|
||||
} else {
|
||||
Err(())
|
||||
};
|
||||
|
||||
pending_response
|
||||
.send(OutgoingResponse { result, reputation_changes, sent_feedback: None })
|
||||
.map_err(|_| HandleRequestError::SendResponse)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum HandleRequestError {
|
||||
#[error("Failed to decode request: {0}.")]
|
||||
DecodeProto(#[from] prost::DecodeError),
|
||||
|
||||
#[error("Failed to encode response: {0}.")]
|
||||
EncodeProto(#[from] prost::EncodeError),
|
||||
|
||||
#[error("Failed to decode block hash: {0}.")]
|
||||
InvalidHash(#[from] codec::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
Client(#[from] sp_blockchain::Error),
|
||||
|
||||
#[error("Failed to send response.")]
|
||||
SendResponse,
|
||||
}
|
||||
@@ -0,0 +1,208 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! Warp sync support.
|
||||
|
||||
pub use crate::warp_request_handler::{
|
||||
EncodedProof, Request as WarpProofRequest, VerificationResult, WarpSyncProvider,
|
||||
};
|
||||
use crate::{
|
||||
schema::v1::{StateRequest, StateResponse},
|
||||
state::{ImportResult, StateSync},
|
||||
};
|
||||
use sc_client_api::ProofProvider;
|
||||
use sp_blockchain::HeaderBackend;
|
||||
use sp_finality_grandpa::{AuthorityList, SetId};
|
||||
use sp_runtime::traits::{Block as BlockT, NumberFor, Zero};
|
||||
use std::{fmt, sync::Arc};
|
||||
|
||||
enum Phase<B: BlockT, Client> {
|
||||
WarpProof { set_id: SetId, authorities: AuthorityList, last_hash: B::Hash },
|
||||
State(StateSync<B, Client>),
|
||||
}
|
||||
|
||||
/// Reported warp sync phase.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub enum WarpSyncPhase<B: BlockT> {
|
||||
/// Waiting for peers to connect.
|
||||
AwaitingPeers,
|
||||
/// Downloading and verifying grandpa warp proofs.
|
||||
DownloadingWarpProofs,
|
||||
/// Downloading state data.
|
||||
DownloadingState,
|
||||
/// Importing state.
|
||||
ImportingState,
|
||||
/// Downloading block history.
|
||||
DownloadingBlocks(NumberFor<B>),
|
||||
}
|
||||
|
||||
impl<B: BlockT> fmt::Display for WarpSyncPhase<B> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Self::AwaitingPeers => write!(f, "Waiting for peers"),
|
||||
Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"),
|
||||
Self::DownloadingState => write!(f, "Downloading state"),
|
||||
Self::ImportingState => write!(f, "Importing state"),
|
||||
Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reported warp sync progress.
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub struct WarpSyncProgress<B: BlockT> {
|
||||
/// Estimated download percentage.
|
||||
pub phase: WarpSyncPhase<B>,
|
||||
/// Total bytes downloaded so far.
|
||||
pub total_bytes: u64,
|
||||
}
|
||||
|
||||
/// Import warp proof result.
|
||||
pub enum WarpProofImportResult {
|
||||
/// Import was successful.
|
||||
Success,
|
||||
/// Bad proof.
|
||||
BadResponse,
|
||||
}
|
||||
|
||||
/// Warp sync state machine. Accumulates warp proofs and state.
|
||||
pub struct WarpSync<B: BlockT, Client> {
|
||||
phase: Phase<B, Client>,
|
||||
client: Arc<Client>,
|
||||
warp_sync_provider: Arc<dyn WarpSyncProvider<B>>,
|
||||
total_proof_bytes: u64,
|
||||
}
|
||||
|
||||
impl<B, Client> WarpSync<B, Client>
|
||||
where
|
||||
B: BlockT,
|
||||
Client: HeaderBackend<B> + ProofProvider<B> + 'static,
|
||||
{
|
||||
/// Create a new instance.
|
||||
pub fn new(client: Arc<Client>, warp_sync_provider: Arc<dyn WarpSyncProvider<B>>) -> Self {
|
||||
let last_hash = client.hash(Zero::zero()).unwrap().expect("Genesis header always exists");
|
||||
let phase = Phase::WarpProof {
|
||||
set_id: 0,
|
||||
authorities: warp_sync_provider.current_authorities(),
|
||||
last_hash,
|
||||
};
|
||||
Self { client, warp_sync_provider, phase, total_proof_bytes: 0 }
|
||||
}
|
||||
|
||||
/// Validate and import a state response.
|
||||
pub fn import_state(&mut self, response: StateResponse) -> ImportResult<B> {
|
||||
match &mut self.phase {
|
||||
Phase::WarpProof { .. } => {
|
||||
log::debug!(target: "sync", "Unexpected state response");
|
||||
ImportResult::BadResponse
|
||||
},
|
||||
Phase::State(sync) => sync.import(response),
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate and import a warp proof response.
|
||||
pub fn import_warp_proof(&mut self, response: EncodedProof) -> WarpProofImportResult {
|
||||
match &mut self.phase {
|
||||
Phase::State(_) => {
|
||||
log::debug!(target: "sync", "Unexpected warp proof response");
|
||||
WarpProofImportResult::BadResponse
|
||||
},
|
||||
Phase::WarpProof { set_id, authorities, last_hash } => {
|
||||
match self.warp_sync_provider.verify(&response, *set_id, authorities.clone()) {
|
||||
Err(e) => {
|
||||
log::debug!(target: "sync", "Bad warp proof response: {}", e);
|
||||
WarpProofImportResult::BadResponse
|
||||
},
|
||||
Ok(VerificationResult::Partial(new_set_id, new_authorities, new_last_hash)) => {
|
||||
log::debug!(target: "sync", "Verified partial proof, set_id={:?}", new_set_id);
|
||||
*set_id = new_set_id;
|
||||
*authorities = new_authorities;
|
||||
*last_hash = new_last_hash;
|
||||
self.total_proof_bytes += response.0.len() as u64;
|
||||
WarpProofImportResult::Success
|
||||
},
|
||||
Ok(VerificationResult::Complete(new_set_id, _, header)) => {
|
||||
log::debug!(target: "sync", "Verified complete proof, set_id={:?}", new_set_id);
|
||||
self.total_proof_bytes += response.0.len() as u64;
|
||||
let state_sync = StateSync::new(self.client.clone(), header, false);
|
||||
self.phase = Phase::State(state_sync);
|
||||
WarpProofImportResult::Success
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce next state request.
|
||||
pub fn next_state_request(&self) -> Option<StateRequest> {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { .. } => None,
|
||||
Phase::State(sync) => Some(sync.next_request()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce next warp proof request.
|
||||
pub fn next_warp_poof_request(&self) -> Option<WarpProofRequest<B>> {
|
||||
match &self.phase {
|
||||
Phase::State(_) => None,
|
||||
Phase::WarpProof { last_hash, .. } => Some(WarpProofRequest { begin: *last_hash }),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return target block hash if it is known.
|
||||
pub fn target_block_hash(&self) -> Option<B::Hash> {
|
||||
match &self.phase {
|
||||
Phase::State(s) => Some(s.target()),
|
||||
Phase::WarpProof { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return target block number if it is known.
|
||||
pub fn target_block_number(&self) -> Option<NumberFor<B>> {
|
||||
match &self.phase {
|
||||
Phase::State(s) => Some(s.target_block_num()),
|
||||
Phase::WarpProof { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the state is complete.
|
||||
pub fn is_complete(&self) -> bool {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { .. } => false,
|
||||
Phase::State(sync) => sync.is_complete(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns state sync estimated progress (percentage, bytes)
|
||||
pub fn progress(&self) -> WarpSyncProgress<B> {
|
||||
match &self.phase {
|
||||
Phase::WarpProof { .. } => WarpSyncProgress {
|
||||
phase: WarpSyncPhase::DownloadingWarpProofs,
|
||||
total_bytes: self.total_proof_bytes,
|
||||
},
|
||||
Phase::State(sync) => WarpSyncProgress {
|
||||
phase: if self.is_complete() {
|
||||
WarpSyncPhase::ImportingState
|
||||
} else {
|
||||
WarpSyncPhase::DownloadingState
|
||||
},
|
||||
total_bytes: self.total_proof_bytes + sync.progress().size,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,172 @@
|
||||
// Copyright 2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer.
|
||||
|
||||
use codec::{Decode, Encode};
|
||||
use futures::{
|
||||
channel::{mpsc, oneshot},
|
||||
stream::StreamExt,
|
||||
};
|
||||
use log::debug;
|
||||
use sc_network_common::{
|
||||
config::ProtocolId,
|
||||
request_responses::{
|
||||
IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig,
|
||||
},
|
||||
};
|
||||
use sp_runtime::traits::Block as BlockT;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
pub use sp_finality_grandpa::{AuthorityList, SetId};
|
||||
|
||||
/// Scale-encoded warp sync proof response.
|
||||
pub struct EncodedProof(pub Vec<u8>);
|
||||
|
||||
/// Warp sync request
|
||||
#[derive(Encode, Decode, Debug)]
|
||||
pub struct Request<B: BlockT> {
|
||||
/// Start collecting proofs from this block.
|
||||
pub begin: B::Hash,
|
||||
}
|
||||
|
||||
const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024;
|
||||
|
||||
/// Proof verification result.
|
||||
pub enum VerificationResult<Block: BlockT> {
|
||||
/// Proof is valid, but the target was not reached.
|
||||
Partial(SetId, AuthorityList, Block::Hash),
|
||||
/// Target finality is proved.
|
||||
Complete(SetId, AuthorityList, Block::Header),
|
||||
}
|
||||
|
||||
/// Warp sync backend. Handles retrieveing and verifying warp sync proofs.
|
||||
pub trait WarpSyncProvider<B: BlockT>: Send + Sync {
|
||||
/// Generate proof starting at given block hash. The proof is accumulated until maximum proof
|
||||
/// size is reached.
|
||||
fn generate(
|
||||
&self,
|
||||
start: B::Hash,
|
||||
) -> Result<EncodedProof, Box<dyn std::error::Error + Send + Sync>>;
|
||||
/// Verify warp proof against current set of authorities.
|
||||
fn verify(
|
||||
&self,
|
||||
proof: &EncodedProof,
|
||||
set_id: SetId,
|
||||
authorities: AuthorityList,
|
||||
) -> Result<VerificationResult<B>, Box<dyn std::error::Error + Send + Sync>>;
|
||||
/// Get current list of authorities. This is supposed to be genesis authorities when starting
|
||||
/// sync.
|
||||
fn current_authorities(&self) -> AuthorityList;
|
||||
}
|
||||
|
||||
/// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing
|
||||
/// incoming requests.
|
||||
pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig {
|
||||
RequestResponseConfig {
|
||||
name: generate_protocol_name(protocol_id).into(),
|
||||
max_request_size: 32,
|
||||
max_response_size: MAX_RESPONSE_SIZE,
|
||||
request_timeout: Duration::from_secs(10),
|
||||
inbound_queue: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate the grandpa warp sync protocol name from chain specific protocol identifier.
|
||||
fn generate_protocol_name(protocol_id: ProtocolId) -> String {
|
||||
format!("/{}/sync/warp", protocol_id.as_ref())
|
||||
}
|
||||
|
||||
/// Handler for incoming grandpa warp sync requests from a remote peer.
|
||||
pub struct RequestHandler<TBlock: BlockT> {
|
||||
backend: Arc<dyn WarpSyncProvider<TBlock>>,
|
||||
request_receiver: mpsc::Receiver<IncomingRequest>,
|
||||
}
|
||||
|
||||
impl<TBlock: BlockT> RequestHandler<TBlock> {
|
||||
/// Create a new [`RequestHandler`].
|
||||
pub fn new(
|
||||
protocol_id: ProtocolId,
|
||||
backend: Arc<dyn WarpSyncProvider<TBlock>>,
|
||||
) -> (Self, RequestResponseConfig) {
|
||||
let (tx, request_receiver) = mpsc::channel(20);
|
||||
|
||||
let mut request_response_config = generate_request_response_config(protocol_id);
|
||||
request_response_config.inbound_queue = Some(tx);
|
||||
|
||||
(Self { backend, request_receiver }, request_response_config)
|
||||
}
|
||||
|
||||
fn handle_request(
|
||||
&self,
|
||||
payload: Vec<u8>,
|
||||
pending_response: oneshot::Sender<OutgoingResponse>,
|
||||
) -> Result<(), HandleRequestError> {
|
||||
let request = Request::<TBlock>::decode(&mut &payload[..])?;
|
||||
|
||||
let EncodedProof(proof) = self
|
||||
.backend
|
||||
.generate(request.begin)
|
||||
.map_err(HandleRequestError::InvalidRequest)?;
|
||||
|
||||
pending_response
|
||||
.send(OutgoingResponse {
|
||||
result: Ok(proof),
|
||||
reputation_changes: Vec::new(),
|
||||
sent_feedback: None,
|
||||
})
|
||||
.map_err(|_| HandleRequestError::SendResponse)
|
||||
}
|
||||
|
||||
/// Run [`RequestHandler`].
|
||||
pub async fn run(mut self) {
|
||||
while let Some(request) = self.request_receiver.next().await {
|
||||
let IncomingRequest { peer, payload, pending_response } = request;
|
||||
|
||||
match self.handle_request(payload, pending_response) {
|
||||
Ok(()) => {
|
||||
debug!(target: "sync", "Handled grandpa warp sync request from {}.", peer)
|
||||
},
|
||||
Err(e) => debug!(
|
||||
target: "sync",
|
||||
"Failed to handle grandpa warp sync request from {}: {}",
|
||||
peer, e,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum HandleRequestError {
|
||||
#[error("Failed to decode request: {0}.")]
|
||||
DecodeProto(#[from] prost::DecodeError),
|
||||
|
||||
#[error("Failed to encode response: {0}.")]
|
||||
EncodeProto(#[from] prost::EncodeError),
|
||||
|
||||
#[error("Failed to decode block hash: {0}.")]
|
||||
DecodeScale(#[from] codec::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
Client(#[from] sp_blockchain::Error),
|
||||
|
||||
#[error("Invalid request {0}.")]
|
||||
InvalidRequest(#[from] Box<dyn std::error::Error + Send + Sync>),
|
||||
|
||||
#[error("Failed to send response.")]
|
||||
SendResponse,
|
||||
}
|
||||
Reference in New Issue
Block a user