mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-09 09:27:59 +00:00
Request based availability distribution (#2423)
* WIP * availability distribution, still very wip. Work on the requesting side of things. * Some docs on what I intend to do. * Checkpoint of session cache implementation as I will likely replace it with something smarter. * More work, mostly on cache and getting things to type check. * Only derive MallocSizeOf and Debug for std. * availability-distribution: Cache feature complete. * Sketch out logic in `FetchTask` for actual fetching. - Compile fixes. - Cleanup. * Format cleanup. * More format fixes. * Almost feature complete `fetch_task`. Missing: - Check for cancel - Actual querying of peer ids. * Finish FetchTask so far. * Directly use AuthorityDiscoveryId in protocol and cache. * Resolve `AuthorityDiscoveryId` on sending requests. * Rework fetch_task - also make it impossible to check the wrong chunk index. - Export needed function in validator_discovery. * From<u32> implementation for `ValidatorIndex`. * Fixes and more integration work. * Make session cache proper lru cache. * Use proper lru cache. * Requester finished. * ProtocolState -> Requester Also make sure to not fetch our own chunk. * Cleanup + fixes. * Remove unused functions - FetchTask::is_finished - SessionCache::fetch_session_info * availability-distribution responding side. * Cleanup + Fixes. * More fixes. * More fixes. adder-collator is running! * Some docs. * Docs. * Fix reporting of bad guys. * Fix tests * Make all tests compile. * Fix test. * Cleanup + get rid of some warnings. * state -> requester * Mostly doc fixes. * Fix test suite. * Get rid of now redundant message types. * WIP * Rob's review remarks. * Fix test suite. * core.relay_parent -> leaf for session request. * Style fix. * Decrease request timeout. * Cleanup obsolete errors. * Metrics + don't fail on non fatal errors. * requester.rs -> requester/mod.rs * Panic on invalid BadValidator report. * Fix indentation. * Use typed default timeout constant. * Make channel size 0, as each sender gets one slot anyways. * Fix incorrect metrics initialization. * Fix build after merge. * More fixes. * Hopefully valid metrics names. * Better metrics names. * Some tests that already work. * Slightly better docs. * Some more tests. * Fix network bridge test.
This commit is contained in:
@@ -0,0 +1,421 @@
|
||||
// Copyright 2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use futures::channel::mpsc;
|
||||
use futures::channel::oneshot;
|
||||
use futures::future::select;
|
||||
use futures::{FutureExt, SinkExt};
|
||||
|
||||
use polkadot_erasure_coding::branch_hash;
|
||||
use polkadot_node_network_protocol::request_response::{
|
||||
request::{OutgoingRequest, RequestError, Requests},
|
||||
v1::{AvailabilityFetchingRequest, AvailabilityFetchingResponse},
|
||||
};
|
||||
use polkadot_primitives::v1::{
|
||||
AuthorityDiscoveryId, BlakeTwo256, ErasureChunk, GroupIndex, Hash, HashT, OccupiedCore,
|
||||
SessionIndex,
|
||||
};
|
||||
use polkadot_subsystem::messages::{
|
||||
AllMessages, AvailabilityStoreMessage, NetworkBridgeMessage,
|
||||
};
|
||||
use polkadot_subsystem::SubsystemContext;
|
||||
|
||||
use crate::{
|
||||
error::{Error, Result},
|
||||
session_cache::{BadValidators, SessionInfo},
|
||||
LOG_TARGET,
|
||||
metrics::{Metrics, SUCCEEDED, FAILED},
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// Configuration for a `FetchTask`
|
||||
///
|
||||
/// This exists to separate preparation of a `FetchTask` from actual starting it, which is
|
||||
/// beneficial as this allows as for taking session info by reference.
|
||||
pub struct FetchTaskConfig {
|
||||
prepared_running: Option<RunningTask>,
|
||||
live_in: HashSet<Hash>,
|
||||
}
|
||||
|
||||
/// Information about a task fetching an erasure chunk.
|
||||
pub struct FetchTask {
|
||||
/// For what relay parents this task is relevant.
|
||||
///
|
||||
/// In other words, for which relay chain parents this candidate is considered live.
|
||||
/// This is updated on every `ActiveLeavesUpdate` and enables us to know when we can safely
|
||||
/// stop keeping track of that candidate/chunk.
|
||||
live_in: HashSet<Hash>,
|
||||
|
||||
/// We keep the task around in until `live_in` becomes empty, to make
|
||||
/// sure we won't re-fetch an already fetched candidate.
|
||||
state: FetchedState,
|
||||
}
|
||||
|
||||
/// State of a particular candidate chunk fetching process.
|
||||
enum FetchedState {
|
||||
/// Chunk fetch has started.
|
||||
///
|
||||
/// Once the contained `Sender` is dropped, any still running task will be canceled.
|
||||
Started(oneshot::Sender<()>),
|
||||
/// All relevant live_in have been removed, before we were able to get our chunk.
|
||||
Canceled,
|
||||
}
|
||||
|
||||
/// Messages sent from `FetchTask`s to be handled/forwarded.
|
||||
pub enum FromFetchTask {
|
||||
/// Message to other subsystem.
|
||||
Message(AllMessages),
|
||||
|
||||
/// Concluded with result.
|
||||
///
|
||||
/// In case of `None` everything was fine, in case of `Some`, some validators in the group
|
||||
/// did not serve us our chunk as expected.
|
||||
Concluded(Option<BadValidators>),
|
||||
}
|
||||
|
||||
/// Information a running task needs.
|
||||
struct RunningTask {
|
||||
/// For what session we have been spawned.
|
||||
session_index: SessionIndex,
|
||||
|
||||
/// Index of validator group to fetch the chunk from.
|
||||
///
|
||||
/// Needef for reporting bad validators.
|
||||
group_index: GroupIndex,
|
||||
|
||||
/// Validators to request the chunk from.
|
||||
///
|
||||
/// This vector gets drained during execution of the task (it will be empty afterwards).
|
||||
group: Vec<AuthorityDiscoveryId>,
|
||||
|
||||
/// The request to send.
|
||||
request: AvailabilityFetchingRequest,
|
||||
|
||||
/// Root hash, for verifying the chunks validity.
|
||||
erasure_root: Hash,
|
||||
|
||||
/// Relay parent of the candidate to fetch.
|
||||
relay_parent: Hash,
|
||||
|
||||
/// Sender for communicating with other subsystems and reporting results.
|
||||
sender: mpsc::Sender<FromFetchTask>,
|
||||
|
||||
/// Prometheues metrics for reporting results.
|
||||
metrics: Metrics,
|
||||
}
|
||||
|
||||
impl FetchTaskConfig {
|
||||
/// Create a new configuration for a [`FetchTask`].
|
||||
///
|
||||
/// The result of this function can be passed into [`FetchTask::start`].
|
||||
pub fn new(
|
||||
leaf: Hash,
|
||||
core: &OccupiedCore,
|
||||
sender: mpsc::Sender<FromFetchTask>,
|
||||
metrics: Metrics,
|
||||
session_info: &SessionInfo,
|
||||
) -> Self {
|
||||
let live_in = vec![leaf].into_iter().collect();
|
||||
|
||||
// Don't run tasks for our backing group:
|
||||
if session_info.our_group == core.group_responsible {
|
||||
return FetchTaskConfig {
|
||||
live_in,
|
||||
prepared_running: None,
|
||||
};
|
||||
}
|
||||
|
||||
let prepared_running = RunningTask {
|
||||
session_index: session_info.session_index,
|
||||
group_index: core.group_responsible,
|
||||
group: session_info.validator_groups.get(core.group_responsible.0 as usize)
|
||||
.expect("The responsible group of a candidate should be available in the corresponding session. qed.")
|
||||
.clone(),
|
||||
request: AvailabilityFetchingRequest {
|
||||
candidate_hash: core.candidate_hash,
|
||||
index: session_info.our_index,
|
||||
},
|
||||
erasure_root: core.candidate_descriptor.erasure_root,
|
||||
relay_parent: core.candidate_descriptor.relay_parent,
|
||||
metrics,
|
||||
sender,
|
||||
};
|
||||
FetchTaskConfig {
|
||||
live_in,
|
||||
prepared_running: Some(prepared_running),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FetchTask {
|
||||
/// Start fetching a chunk.
|
||||
///
|
||||
/// A task handling the fetching of the configured chunk will be spawned.
|
||||
pub async fn start<Context>(config: FetchTaskConfig, ctx: &mut Context) -> Result<Self>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
{
|
||||
let FetchTaskConfig {
|
||||
prepared_running,
|
||||
live_in,
|
||||
} = config;
|
||||
|
||||
if let Some(running) = prepared_running {
|
||||
let (handle, kill) = oneshot::channel();
|
||||
|
||||
ctx.spawn("chunk-fetcher", running.run(kill).boxed())
|
||||
.await
|
||||
.map_err(|e| Error::SpawnTask(e))?;
|
||||
|
||||
Ok(FetchTask {
|
||||
live_in,
|
||||
state: FetchedState::Started(handle),
|
||||
})
|
||||
} else {
|
||||
Ok(FetchTask {
|
||||
live_in,
|
||||
state: FetchedState::Canceled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Add the given leaf to the relay parents which are making this task relevant.
|
||||
///
|
||||
/// This is for book keeping, so we know we are already fetching a given chunk.
|
||||
pub fn add_leaf(&mut self, leaf: Hash) {
|
||||
self.live_in.insert(leaf);
|
||||
}
|
||||
|
||||
/// Remove leaves and cancel the task, if it was the last one and the task has still been
|
||||
/// fetching.
|
||||
pub fn remove_leaves(&mut self, leaves: &HashSet<Hash>) {
|
||||
self.live_in.difference(leaves);
|
||||
if self.live_in.is_empty() && !self.is_finished() {
|
||||
self.state = FetchedState::Canceled
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether or not there are still relay parents around with this candidate pending
|
||||
/// availability.
|
||||
pub fn is_live(&self) -> bool {
|
||||
!self.live_in.is_empty()
|
||||
}
|
||||
|
||||
/// Whether or not this task can be considered finished.
|
||||
///
|
||||
/// That is, it is either canceled, succeeded or failed.
|
||||
pub fn is_finished(&self) -> bool {
|
||||
match &self.state {
|
||||
FetchedState::Canceled => true,
|
||||
FetchedState::Started(sender) => sender.is_canceled(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Things that can go wrong in task execution.
|
||||
#[derive(Debug)]
|
||||
enum TaskError {
|
||||
/// The peer failed to deliver a correct chunk for some reason (has been reported as
|
||||
/// appropriate).
|
||||
PeerError,
|
||||
/// This very node is seemingly shutting down (sending of message failed).
|
||||
ShuttingDown,
|
||||
}
|
||||
|
||||
impl RunningTask {
|
||||
async fn run(self, kill: oneshot::Receiver<()>) {
|
||||
// Wait for completion/or cancel.
|
||||
let run_it = self.run_inner();
|
||||
futures::pin_mut!(run_it);
|
||||
let _ = select(run_it, kill).await;
|
||||
}
|
||||
|
||||
/// Fetch and store chunk.
|
||||
///
|
||||
/// Try validators in backing group in order.
|
||||
async fn run_inner(mut self) {
|
||||
let mut bad_validators = Vec::new();
|
||||
let mut label = FAILED;
|
||||
let mut count: u32 = 0;
|
||||
// Try validators in reverse order:
|
||||
while let Some(validator) = self.group.pop() {
|
||||
// Report retries:
|
||||
if count > 0 {
|
||||
self.metrics.on_retry();
|
||||
}
|
||||
count +=1;
|
||||
|
||||
// Send request:
|
||||
let resp = match self.do_request(&validator).await {
|
||||
Ok(resp) => resp,
|
||||
Err(TaskError::ShuttingDown) => {
|
||||
tracing::info!(
|
||||
target: LOG_TARGET,
|
||||
"Node seems to be shutting down, canceling fetch task"
|
||||
);
|
||||
self.metrics.on_fetch(FAILED);
|
||||
return
|
||||
}
|
||||
Err(TaskError::PeerError) => {
|
||||
bad_validators.push(validator);
|
||||
continue
|
||||
}
|
||||
};
|
||||
let chunk = match resp {
|
||||
AvailabilityFetchingResponse::Chunk(resp) => {
|
||||
resp.recombine_into_chunk(&self.request)
|
||||
}
|
||||
AvailabilityFetchingResponse::NoSuchChunk => {
|
||||
tracing::debug!(
|
||||
target: LOG_TARGET,
|
||||
validator = ?validator,
|
||||
"Validator did not have our chunk"
|
||||
);
|
||||
bad_validators.push(validator);
|
||||
continue
|
||||
}
|
||||
};
|
||||
|
||||
// Data genuine?
|
||||
if !self.validate_chunk(&validator, &chunk) {
|
||||
bad_validators.push(validator);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Ok, let's store it and be happy:
|
||||
self.store_chunk(chunk).await;
|
||||
label = SUCCEEDED;
|
||||
break;
|
||||
}
|
||||
self.metrics.on_fetch(label);
|
||||
self.conclude(bad_validators).await;
|
||||
}
|
||||
|
||||
/// Do request and return response, if successful.
|
||||
async fn do_request(
|
||||
&mut self,
|
||||
validator: &AuthorityDiscoveryId,
|
||||
) -> std::result::Result<AvailabilityFetchingResponse, TaskError> {
|
||||
let (full_request, response_recv) =
|
||||
OutgoingRequest::new(validator.clone(), self.request);
|
||||
let requests = Requests::AvailabilityFetching(full_request);
|
||||
|
||||
self.sender
|
||||
.send(FromFetchTask::Message(AllMessages::NetworkBridge(
|
||||
NetworkBridgeMessage::SendRequests(vec![requests]),
|
||||
)))
|
||||
.await
|
||||
.map_err(|_| TaskError::ShuttingDown)?;
|
||||
|
||||
match response_recv.await {
|
||||
Ok(resp) => Ok(resp),
|
||||
Err(RequestError::InvalidResponse(err)) => {
|
||||
tracing::warn!(
|
||||
target: LOG_TARGET,
|
||||
origin= ?validator,
|
||||
err= ?err,
|
||||
"Peer sent us invalid erasure chunk data"
|
||||
);
|
||||
Err(TaskError::PeerError)
|
||||
}
|
||||
Err(RequestError::NetworkError(err)) => {
|
||||
tracing::warn!(
|
||||
target: LOG_TARGET,
|
||||
origin= ?validator,
|
||||
err= ?err,
|
||||
"Some network error occurred when fetching erasure chunk"
|
||||
);
|
||||
Err(TaskError::PeerError)
|
||||
}
|
||||
Err(RequestError::Canceled(oneshot::Canceled)) => {
|
||||
tracing::warn!(target: LOG_TARGET,
|
||||
origin= ?validator,
|
||||
"Erasure chunk request got canceled");
|
||||
Err(TaskError::PeerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_chunk(&self, validator: &AuthorityDiscoveryId, chunk: &ErasureChunk) -> bool {
|
||||
let anticipated_hash =
|
||||
match branch_hash(&self.erasure_root, &chunk.proof, chunk.index.0 as usize) {
|
||||
Ok(hash) => hash,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
target: LOG_TARGET,
|
||||
candidate_hash = ?self.request.candidate_hash,
|
||||
origin = ?validator,
|
||||
error = ?e,
|
||||
"Failed to calculate chunk merkle proof",
|
||||
);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk);
|
||||
if anticipated_hash != erasure_chunk_hash {
|
||||
tracing::warn!(target: LOG_TARGET, origin = ?validator, "Received chunk does not match merkle tree");
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Store given chunk and log any error.
|
||||
async fn store_chunk(&mut self, chunk: ErasureChunk) {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let r = self
|
||||
.sender
|
||||
.send(FromFetchTask::Message(AllMessages::AvailabilityStore(
|
||||
AvailabilityStoreMessage::StoreChunk {
|
||||
candidate_hash: self.request.candidate_hash,
|
||||
relay_parent: self.relay_parent,
|
||||
chunk,
|
||||
tx,
|
||||
},
|
||||
)))
|
||||
.await;
|
||||
if let Err(err) = r {
|
||||
tracing::error!(target: LOG_TARGET, err= ?err, "Storing erasure chunk failed, system shutting down?");
|
||||
}
|
||||
|
||||
if let Err(oneshot::Canceled) = rx.await {
|
||||
tracing::error!(target: LOG_TARGET, "Storing erasure chunk failed");
|
||||
}
|
||||
}
|
||||
|
||||
/// Tell subsystem we are done.
|
||||
async fn conclude(&mut self, bad_validators: Vec<AuthorityDiscoveryId>) {
|
||||
let payload = if bad_validators.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(BadValidators {
|
||||
session_index: self.session_index,
|
||||
group_index: self.group_index,
|
||||
bad_validators,
|
||||
})
|
||||
};
|
||||
if let Err(err) = self.sender.send(FromFetchTask::Concluded(payload)).await {
|
||||
tracing::warn!(
|
||||
target: LOG_TARGET,
|
||||
err= ?err,
|
||||
"Sending concluded message for task failed"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,315 @@
|
||||
// Copyright 2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use parity_scale_codec::Encode;
|
||||
|
||||
use futures::channel::{mpsc, oneshot};
|
||||
use futures::{executor, Future, FutureExt, StreamExt, select};
|
||||
use futures::task::{Poll, Context, noop_waker};
|
||||
|
||||
use polkadot_erasure_coding::{obtain_chunks_v1 as obtain_chunks, branches};
|
||||
use sc_network as network;
|
||||
use sp_keyring::Sr25519Keyring;
|
||||
|
||||
use polkadot_primitives::v1::{AvailableData, BlockData, CandidateHash, HeadData, PersistedValidationData, PoV, ValidatorIndex};
|
||||
use polkadot_node_network_protocol::request_response::v1;
|
||||
use polkadot_subsystem::messages::AllMessages;
|
||||
|
||||
use crate::metrics::Metrics;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn task_can_be_canceled() {
|
||||
let (task, _rx) = get_test_running_task();
|
||||
let (handle, kill) = oneshot::channel();
|
||||
std::mem::drop(handle);
|
||||
let running_task = task.run(kill);
|
||||
futures::pin_mut!(running_task);
|
||||
let waker = noop_waker();
|
||||
let mut ctx = Context::from_waker(&waker);
|
||||
assert!(running_task.poll(&mut ctx) == Poll::Ready(()), "Task is immediately finished");
|
||||
}
|
||||
|
||||
/// Make sure task won't accept a chunk that has is invalid.
|
||||
#[test]
|
||||
fn task_does_not_accept_invalid_chunk() {
|
||||
let (mut task, rx) = get_test_running_task();
|
||||
let validators = vec![Sr25519Keyring::Alice.public().into()];
|
||||
task.group = validators;
|
||||
let test = TestRun {
|
||||
chunk_responses: {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(
|
||||
Sr25519Keyring::Alice.public().into(),
|
||||
AvailabilityFetchingResponse::Chunk(
|
||||
v1::ChunkResponse {
|
||||
chunk: vec![1,2,3],
|
||||
proof: vec![vec![9,8,2], vec![2,3,4]],
|
||||
}
|
||||
)
|
||||
);
|
||||
m
|
||||
},
|
||||
valid_chunks: HashSet::new(),
|
||||
};
|
||||
test.run(task, rx);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_stores_valid_chunk() {
|
||||
let (mut task, rx) = get_test_running_task();
|
||||
let (root_hash, chunk) = get_valid_chunk_data();
|
||||
task.erasure_root = root_hash;
|
||||
task.request.index = chunk.index;
|
||||
|
||||
let validators = vec![Sr25519Keyring::Alice.public().into()];
|
||||
task.group = validators;
|
||||
|
||||
let test = TestRun {
|
||||
chunk_responses: {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(
|
||||
Sr25519Keyring::Alice.public().into(),
|
||||
AvailabilityFetchingResponse::Chunk(
|
||||
v1::ChunkResponse {
|
||||
chunk: chunk.chunk.clone(),
|
||||
proof: chunk.proof,
|
||||
}
|
||||
)
|
||||
);
|
||||
m
|
||||
},
|
||||
valid_chunks: {
|
||||
let mut s = HashSet::new();
|
||||
s.insert(chunk.chunk);
|
||||
s
|
||||
},
|
||||
};
|
||||
test.run(task, rx);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_does_not_accept_wrongly_indexed_chunk() {
|
||||
let (mut task, rx) = get_test_running_task();
|
||||
let (root_hash, chunk) = get_valid_chunk_data();
|
||||
task.erasure_root = root_hash;
|
||||
task.request.index = ValidatorIndex(chunk.index.0+1);
|
||||
|
||||
let validators = vec![Sr25519Keyring::Alice.public().into()];
|
||||
task.group = validators;
|
||||
|
||||
let test = TestRun {
|
||||
chunk_responses: {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(
|
||||
Sr25519Keyring::Alice.public().into(),
|
||||
AvailabilityFetchingResponse::Chunk(
|
||||
v1::ChunkResponse {
|
||||
chunk: chunk.chunk.clone(),
|
||||
proof: chunk.proof,
|
||||
}
|
||||
)
|
||||
);
|
||||
m
|
||||
},
|
||||
valid_chunks: HashSet::new(),
|
||||
};
|
||||
test.run(task, rx);
|
||||
}
|
||||
|
||||
/// Task stores chunk, if there is at least one validator having a valid chunk.
|
||||
#[test]
|
||||
fn task_stores_valid_chunk_if_there_is_one() {
|
||||
let (mut task, rx) = get_test_running_task();
|
||||
let (root_hash, chunk) = get_valid_chunk_data();
|
||||
task.erasure_root = root_hash;
|
||||
task.request.index = chunk.index;
|
||||
|
||||
let validators = [
|
||||
// Only Alice has valid chunk - should succeed, even though she is tried last.
|
||||
Sr25519Keyring::Alice,
|
||||
Sr25519Keyring::Bob, Sr25519Keyring::Charlie,
|
||||
Sr25519Keyring::Dave, Sr25519Keyring::Eve,
|
||||
]
|
||||
.iter().map(|v| v.public().into()).collect::<Vec<_>>();
|
||||
task.group = validators;
|
||||
|
||||
let test = TestRun {
|
||||
chunk_responses: {
|
||||
let mut m = HashMap::new();
|
||||
m.insert(
|
||||
Sr25519Keyring::Alice.public().into(),
|
||||
AvailabilityFetchingResponse::Chunk(
|
||||
v1::ChunkResponse {
|
||||
chunk: chunk.chunk.clone(),
|
||||
proof: chunk.proof,
|
||||
}
|
||||
)
|
||||
);
|
||||
m.insert(
|
||||
Sr25519Keyring::Bob.public().into(),
|
||||
AvailabilityFetchingResponse::NoSuchChunk
|
||||
);
|
||||
m.insert(
|
||||
Sr25519Keyring::Charlie.public().into(),
|
||||
AvailabilityFetchingResponse::Chunk(
|
||||
v1::ChunkResponse {
|
||||
chunk: vec![1,2,3],
|
||||
proof: vec![vec![9,8,2], vec![2,3,4]],
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
m
|
||||
},
|
||||
valid_chunks: {
|
||||
let mut s = HashSet::new();
|
||||
s.insert(chunk.chunk);
|
||||
s
|
||||
},
|
||||
};
|
||||
test.run(task, rx);
|
||||
}
|
||||
|
||||
struct TestRun {
|
||||
/// Response to deliver for a given validator index.
|
||||
/// None means, answer with NetworkError.
|
||||
chunk_responses: HashMap<AuthorityDiscoveryId, AvailabilityFetchingResponse>,
|
||||
/// Set of chunks that should be considered valid:
|
||||
valid_chunks: HashSet<Vec<u8>>,
|
||||
}
|
||||
|
||||
|
||||
impl TestRun {
|
||||
fn run(self, task: RunningTask, rx: mpsc::Receiver<FromFetchTask>) {
|
||||
sp_tracing::try_init_simple();
|
||||
let mut rx = rx.fuse();
|
||||
let task = task.run_inner().fuse();
|
||||
futures::pin_mut!(task);
|
||||
executor::block_on(async {
|
||||
let mut end_ok = false;
|
||||
loop {
|
||||
let msg = select!(
|
||||
from_task = rx.next() => {
|
||||
match from_task {
|
||||
Some(msg) => msg,
|
||||
None => break,
|
||||
}
|
||||
},
|
||||
() = task =>
|
||||
break,
|
||||
);
|
||||
match msg {
|
||||
FromFetchTask::Concluded(_) => break,
|
||||
FromFetchTask::Message(msg) =>
|
||||
end_ok = self.handle_message(msg).await,
|
||||
}
|
||||
}
|
||||
if !end_ok {
|
||||
panic!("Task ended prematurely (failed to store valid chunk)!");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Returns true, if after processing of the given message it would be ok for the stream to
|
||||
/// end.
|
||||
async fn handle_message(&self, msg: AllMessages) -> bool {
|
||||
match msg {
|
||||
AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(reqs)) => {
|
||||
let mut valid_responses = 0;
|
||||
for req in reqs {
|
||||
let req = match req {
|
||||
Requests::AvailabilityFetching(req) => req,
|
||||
};
|
||||
let response = self.chunk_responses.get(&req.peer)
|
||||
.ok_or(network::RequestFailure::Refused);
|
||||
|
||||
if let Ok(AvailabilityFetchingResponse::Chunk(resp)) = &response {
|
||||
if self.valid_chunks.contains(&resp.chunk) {
|
||||
valid_responses += 1;
|
||||
}
|
||||
}
|
||||
req.pending_response.send(response.map(Encode::encode))
|
||||
.expect("Sending response should succeed");
|
||||
}
|
||||
return (valid_responses == 0) && self.valid_chunks.is_empty()
|
||||
}
|
||||
AllMessages::AvailabilityStore(
|
||||
AvailabilityStoreMessage::StoreChunk { chunk, tx, .. }
|
||||
) => {
|
||||
assert!(self.valid_chunks.contains(&chunk.chunk));
|
||||
tx.send(Ok(())).expect("Answering fetching task should work");
|
||||
return true
|
||||
}
|
||||
_ => {
|
||||
tracing::debug!(target: LOG_TARGET, "Unexpected message");
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a `RunningTask` filled with dummy values.
|
||||
fn get_test_running_task() -> (RunningTask, mpsc::Receiver<FromFetchTask>) {
|
||||
let (tx,rx) = mpsc::channel(0);
|
||||
|
||||
(
|
||||
RunningTask {
|
||||
session_index: 0,
|
||||
group_index: GroupIndex(0),
|
||||
group: Vec::new(),
|
||||
request: AvailabilityFetchingRequest {
|
||||
candidate_hash: CandidateHash([43u8;32].into()),
|
||||
index: ValidatorIndex(0),
|
||||
},
|
||||
erasure_root: Hash::repeat_byte(99),
|
||||
relay_parent: Hash::repeat_byte(71),
|
||||
sender: tx,
|
||||
metrics: Metrics::new_dummy(),
|
||||
},
|
||||
rx
|
||||
)
|
||||
}
|
||||
|
||||
fn get_valid_chunk_data() -> (Hash, ErasureChunk) {
|
||||
let fake_validator_count = 10;
|
||||
let persisted = PersistedValidationData {
|
||||
parent_head: HeadData(vec![7, 8, 9]),
|
||||
relay_parent_number: Default::default(),
|
||||
max_pov_size: 1024,
|
||||
relay_parent_storage_root: Default::default(),
|
||||
};
|
||||
let pov_block = PoV {
|
||||
block_data: BlockData(vec![45, 46, 47]),
|
||||
};
|
||||
let available_data = AvailableData {
|
||||
validation_data: persisted, pov: Arc::new(pov_block),
|
||||
};
|
||||
let chunks = obtain_chunks(fake_validator_count, &available_data).unwrap();
|
||||
let branches = branches(chunks.as_ref());
|
||||
let root = branches.root();
|
||||
let chunk = branches.enumerate()
|
||||
.map(|(index, (proof, chunk))| ErasureChunk {
|
||||
chunk: chunk.to_vec(),
|
||||
index: ValidatorIndex(index as _),
|
||||
proof,
|
||||
})
|
||||
.next().expect("There really should be 10 chunks.");
|
||||
(root, chunk)
|
||||
}
|
||||
Reference in New Issue
Block a user