cargo +nightly fmt (#3540)

* cargo +nightly fmt

* add cargo-fmt check to ci

* update ci

* fmt

* fmt

* skip macro

* ignore bridges
This commit is contained in:
Shawn Tabrizi
2021-08-02 12:47:33 +02:00
committed by GitHub
parent 30e3012270
commit ff5d56fb76
350 changed files with 20617 additions and 21266 deletions
@@ -22,7 +22,7 @@ use thiserror::Error;
use futures::channel::oneshot;
use polkadot_node_subsystem_util::{Fault, runtime, unwrap_non_fatal};
use polkadot_node_subsystem_util::{runtime, unwrap_non_fatal, Fault};
use polkadot_subsystem::SubsystemError;
use crate::LOG_TARGET;
@@ -113,9 +113,7 @@ pub type Result<T> = std::result::Result<T, Error>;
///
/// We basically always want to try and continue on error. This utility function is meant to
/// consume top-level errors by simply logging them
pub fn log_error(result: Result<()>, ctx: &'static str)
-> std::result::Result<(), Fatal>
{
pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(), Fatal> {
if let Some(error) = unwrap_non_fatal(result.map_err(|e| e.0))? {
tracing::warn!(target: LOG_TARGET, error = ?error, ctx);
}
@@ -19,15 +19,13 @@ use futures::{future::Either, FutureExt, StreamExt, TryFutureExt};
use sp_keystore::SyncCryptoStorePtr;
use polkadot_subsystem::{
messages::AvailabilityDistributionMessage, FromOverseer, OverseerSignal, SpawnedSubsystem,
SubsystemContext, SubsystemError,
overseer,
messages::AvailabilityDistributionMessage, overseer, FromOverseer, OverseerSignal,
SpawnedSubsystem, SubsystemContext, SubsystemError,
};
/// Error and [`Result`] type for this subsystem.
mod error;
use error::Fatal;
use error::{Result, log_error};
use error::{log_error, Fatal, Result};
use polkadot_node_subsystem_util::runtime::RuntimeInfo;
@@ -70,19 +68,15 @@ where
.map_err(|e| SubsystemError::with_origin("availability-distribution", e))
.boxed();
SpawnedSubsystem {
name: "availability-distribution-subsystem",
future,
}
SpawnedSubsystem { name: "availability-distribution-subsystem", future }
}
}
impl AvailabilityDistributionSubsystem {
/// Create a new instance of the availability distribution.
pub fn new(keystore: SyncCryptoStorePtr, metrics: Metrics) -> Self {
let runtime = RuntimeInfo::new(Some(keystore));
Self { runtime, metrics }
Self { runtime, metrics }
}
/// Start processing work as passed on from the Overseer.
@@ -103,44 +97,41 @@ impl AvailabilityDistributionSubsystem {
// Handle task messages sending:
let message = match action {
Either::Left(subsystem_msg) => {
subsystem_msg.map_err(|e| Fatal::IncomingMessageChannel(e))?
}
Either::Left(subsystem_msg) =>
subsystem_msg.map_err(|e| Fatal::IncomingMessageChannel(e))?,
Either::Right(from_task) => {
let from_task = from_task.ok_or(Fatal::RequesterExhausted)?;
ctx.send_message(from_task).await;
continue;
}
continue
},
};
match message {
FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => {
log_error(
requester.get_mut().update_fetching_heads(&mut ctx, &mut self.runtime, update).await,
"Error in Requester::update_fetching_heads"
requester
.get_mut()
.update_fetching_heads(&mut ctx, &mut self.runtime, update)
.await,
"Error in Requester::update_fetching_heads",
)?;
}
FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}
FromOverseer::Signal(OverseerSignal::Conclude) => {
return Ok(());
}
},
FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {},
FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()),
FromOverseer::Communication {
msg: AvailabilityDistributionMessage::ChunkFetchingRequest(req),
} => {
answer_chunk_request_log(&mut ctx, req, &self.metrics).await
}
} => answer_chunk_request_log(&mut ctx, req, &self.metrics).await,
FromOverseer::Communication {
msg: AvailabilityDistributionMessage::PoVFetchingRequest(req),
} => {
answer_pov_request_log(&mut ctx, req, &self.metrics).await
}
} => answer_pov_request_log(&mut ctx, req, &self.metrics).await,
FromOverseer::Communication {
msg: AvailabilityDistributionMessage::FetchPoV {
relay_parent,
from_validator,
candidate_hash,
pov_hash,
tx,
},
msg:
AvailabilityDistributionMessage::FetchPoV {
relay_parent,
from_validator,
candidate_hash,
pov_hash,
tx,
},
} => {
log_error(
pov_requester::fetch_pov(
@@ -151,10 +142,11 @@ impl AvailabilityDistributionSubsystem {
candidate_hash,
pov_hash,
tx,
).await,
"pov_requester::fetch_pov"
)
.await,
"pov_requester::fetch_pov",
)?;
}
},
}
}
}
@@ -14,9 +14,13 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use polkadot_node_subsystem_util::metrics::prometheus::{Counter, U64, Registry, PrometheusError, CounterVec, Opts};
use polkadot_node_subsystem_util::metrics::prometheus;
use polkadot_node_subsystem_util::metrics;
use polkadot_node_subsystem_util::{
metrics,
metrics::{
prometheus,
prometheus::{Counter, CounterVec, Opts, PrometheusError, Registry, U64},
},
};
/// Label for success counters.
pub const SUCCEEDED: &'static str = "succeeded";
@@ -31,7 +35,6 @@ pub const NOT_FOUND: &'static str = "not-found";
#[derive(Clone, Default)]
pub struct Metrics(Option<MetricsInner>);
#[derive(Clone)]
struct MetricsInner {
/// Number of chunks fetched.
@@ -137,4 +140,3 @@ impl metrics::Metrics for Metrics {
Ok(Metrics(Some(metrics)))
}
}
@@ -16,23 +16,26 @@
//! PoV requester takes care of requesting PoVs from validators of a backing group.
use futures::{FutureExt, channel::oneshot, future::BoxFuture};
use futures::{channel::oneshot, future::BoxFuture, FutureExt};
use polkadot_subsystem::jaeger;
use polkadot_node_network_protocol::request_response::{OutgoingRequest, Recipient, request::{RequestError, Requests},
v1::{PoVFetchingRequest, PoVFetchingResponse}};
use polkadot_primitives::v1::{
CandidateHash, Hash, ValidatorIndex,
use polkadot_node_network_protocol::request_response::{
request::{RequestError, Requests},
v1::{PoVFetchingRequest, PoVFetchingResponse},
OutgoingRequest, Recipient,
};
use polkadot_node_primitives::PoV;
use polkadot_subsystem::{
SubsystemContext,
messages::{NetworkBridgeMessage, IfDisconnected}
};
use polkadot_node_subsystem_util::runtime::RuntimeInfo;
use polkadot_primitives::v1::{CandidateHash, Hash, ValidatorIndex};
use polkadot_subsystem::{
jaeger,
messages::{IfDisconnected, NetworkBridgeMessage},
SubsystemContext,
};
use crate::error::{Fatal, NonFatal};
use crate::LOG_TARGET;
use crate::{
error::{Fatal, NonFatal},
LOG_TARGET,
};
/// Start background worker for taking care of fetching the requested `PoV` from the network.
pub async fn fetch_pov<Context>(
@@ -42,32 +45,31 @@ pub async fn fetch_pov<Context>(
from_validator: ValidatorIndex,
candidate_hash: CandidateHash,
pov_hash: Hash,
tx: oneshot::Sender<PoV>
tx: oneshot::Sender<PoV>,
) -> super::Result<()>
where
Context: SubsystemContext,
{
let info = &runtime.get_session_info(ctx.sender(), parent).await?.session_info;
let authority_id = info.discovery_keys.get(from_validator.0 as usize)
let authority_id = info
.discovery_keys
.get(from_validator.0 as usize)
.ok_or(NonFatal::InvalidValidatorIndex)?
.clone();
let (req, pending_response) = OutgoingRequest::new(
Recipient::Authority(authority_id),
PoVFetchingRequest {
candidate_hash,
},
PoVFetchingRequest { candidate_hash },
);
let full_req = Requests::PoVFetching(req);
ctx.send_message(
NetworkBridgeMessage::SendRequests(
vec![full_req],
// We are supposed to be connected to validators of our group via `PeerSet`,
// but at session boundaries that is kind of racy, in case a connection takes
// longer to get established, so we try to connect in any case.
IfDisconnected::TryConnect
)
).await;
ctx.send_message(NetworkBridgeMessage::SendRequests(
vec![full_req],
// We are supposed to be connected to validators of our group via `PeerSet`,
// but at session boundaries that is kind of racy, in case a connection takes
// longer to get established, so we try to connect in any case.
IfDisconnected::TryConnect,
))
.await;
let span = jaeger::Span::new(candidate_hash, "fetch-pov")
.with_validator_index(from_validator)
@@ -85,11 +87,7 @@ async fn fetch_pov_job(
tx: oneshot::Sender<PoV>,
) {
if let Err(err) = do_fetch_pov(pov_hash, pending_response, span, tx).await {
tracing::warn!(
target: LOG_TARGET,
?err,
"fetch_pov_job"
);
tracing::warn!(target: LOG_TARGET, ?err, "fetch_pov_job");
}
}
@@ -99,15 +97,11 @@ async fn do_fetch_pov(
pending_response: BoxFuture<'static, Result<PoVFetchingResponse, RequestError>>,
_span: jaeger::Span,
tx: oneshot::Sender<PoV>,
)
-> std::result::Result<(), NonFatal>
{
) -> std::result::Result<(), NonFatal> {
let response = pending_response.await.map_err(NonFatal::FetchPoV)?;
let pov = match response {
PoVFetchingResponse::PoV(pov) => pov,
PoVFetchingResponse::NoSuchPoV => {
return Err(NonFatal::NoSuchPoV)
}
PoVFetchingResponse::NoSuchPoV => return Err(NonFatal::NoSuchPoV),
};
if pov.hash() == pov_hash {
tx.send(pov).map_err(|_| NonFatal::SendResponse)
@@ -124,38 +118,37 @@ mod tests {
use parity_scale_codec::Encode;
use sp_core::testing::TaskExecutor;
use polkadot_primitives::v1::{CandidateHash, Hash, ValidatorIndex};
use polkadot_node_primitives::BlockData;
use polkadot_primitives::v1::{CandidateHash, Hash, ValidatorIndex};
use polkadot_subsystem::messages::{
AllMessages, AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest,
};
use polkadot_subsystem_testhelpers as test_helpers;
use polkadot_subsystem::messages::{AvailabilityDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, AllMessages};
use test_helpers::mock::make_ferdie_keystore;
use super::*;
use crate::LOG_TARGET;
use crate::tests::mock::{make_session_info};
use crate::{tests::mock::make_session_info, LOG_TARGET};
#[test]
fn rejects_invalid_pov() {
sp_tracing::try_init_simple();
let pov = PoV {
block_data: BlockData(vec![1,2,3,4,5,6]),
};
let pov = PoV { block_data: BlockData(vec![1, 2, 3, 4, 5, 6]) };
test_run(Hash::default(), pov);
}
#[test]
fn accepts_valid_pov() {
sp_tracing::try_init_simple();
let pov = PoV {
block_data: BlockData(vec![1,2,3,4,5,6]),
};
let pov = PoV { block_data: BlockData(vec![1, 2, 3, 4, 5, 6]) };
test_run(pov.hash(), pov);
}
fn test_run(pov_hash: Hash, pov: PoV) {
let pool = TaskExecutor::new();
let (mut context, mut virtual_overseer) =
test_helpers::make_subsystem_context::<AvailabilityDistributionMessage, TaskExecutor>(pool.clone());
let (mut context, mut virtual_overseer) = test_helpers::make_subsystem_context::<
AvailabilityDistributionMessage,
TaskExecutor,
>(pool.clone());
let keystore = make_ferdie_keystore();
let mut runtime = polkadot_node_subsystem_util::runtime::RuntimeInfo::new(Some(keystore));
@@ -169,34 +162,33 @@ mod tests {
CandidateHash::default(),
pov_hash,
tx,
).await.expect("Should succeed");
)
.await
.expect("Should succeed");
};
let tester = async move {
loop {
match virtual_overseer.recv().await {
AllMessages::RuntimeApi(
RuntimeApiMessage::Request(
_,
RuntimeApiRequest::SessionIndexForChild(tx)
)
) => {
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_,
RuntimeApiRequest::SessionIndexForChild(tx),
)) => {
tx.send(Ok(0)).unwrap();
}
AllMessages::RuntimeApi(
RuntimeApiMessage::Request(
_,
RuntimeApiRequest::SessionInfo(_, tx)
)
) => {
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_,
RuntimeApiRequest::SessionInfo(_, tx),
)) => {
tx.send(Ok(Some(make_session_info()))).unwrap();
}
},
AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(mut reqs, _)) => {
let req = assert_matches!(
reqs.pop(),
Some(Requests::PoVFetching(outgoing)) => {outgoing}
);
req.pending_response.send(Ok(PoVFetchingResponse::PoV(pov.clone()).encode()))
req.pending_response
.send(Ok(PoVFetchingResponse::PoV(pov.clone()).encode()))
.unwrap();
break
},
@@ -16,28 +16,33 @@
use std::collections::HashSet;
use futures::channel::mpsc;
use futures::channel::oneshot;
use futures::future::select;
use futures::{FutureExt, SinkExt};
use futures::{
channel::{mpsc, oneshot},
future::select,
FutureExt, SinkExt,
};
use polkadot_erasure_coding::branch_hash;
use polkadot_node_network_protocol::request_response::{
request::{OutgoingRequest, RequestError, Requests, Recipient},
request::{OutgoingRequest, Recipient, RequestError, Requests},
v1::{ChunkFetchingRequest, ChunkFetchingResponse},
};
use polkadot_primitives::v1::{AuthorityDiscoveryId, BlakeTwo256, CandidateHash, GroupIndex, Hash, HashT, OccupiedCore, SessionIndex};
use polkadot_node_primitives::ErasureChunk;
use polkadot_subsystem::messages::{
AllMessages, AvailabilityStoreMessage, NetworkBridgeMessage, IfDisconnected,
use polkadot_primitives::v1::{
AuthorityDiscoveryId, BlakeTwo256, CandidateHash, GroupIndex, Hash, HashT, OccupiedCore,
SessionIndex,
};
use polkadot_subsystem::{
jaeger,
messages::{AllMessages, AvailabilityStoreMessage, IfDisconnected, NetworkBridgeMessage},
SubsystemContext,
};
use polkadot_subsystem::{SubsystemContext, jaeger};
use crate::{
error::{Fatal, Result},
metrics::{Metrics, FAILED, SUCCEEDED},
requester::session_cache::{BadValidators, SessionInfo},
LOG_TARGET,
metrics::{Metrics, SUCCEEDED, FAILED},
};
#[cfg(test)]
@@ -140,10 +145,7 @@ impl FetchTaskConfig {
// Don't run tasks for our backing group:
if session_info.our_group == Some(core.group_responsible) {
return FetchTaskConfig {
live_in,
prepared_running: None,
};
return FetchTaskConfig { live_in, prepared_running: None }
}
let span = jaeger::Span::new(core.candidate_hash, "availability-distribution")
@@ -165,10 +167,7 @@ impl FetchTaskConfig {
sender,
span,
};
FetchTaskConfig {
live_in,
prepared_running: Some(prepared_running),
}
FetchTaskConfig { live_in, prepared_running: Some(prepared_running) }
}
}
@@ -180,10 +179,7 @@ impl FetchTask {
where
Context: SubsystemContext,
{
let FetchTaskConfig {
prepared_running,
live_in,
} = config;
let FetchTaskConfig { prepared_running, live_in } = config;
if let Some(running) = prepared_running {
let (handle, kill) = oneshot::channel();
@@ -191,15 +187,9 @@ impl FetchTask {
ctx.spawn("chunk-fetcher", running.run(kill).boxed())
.map_err(|e| Fatal::SpawnTask(e))?;
Ok(FetchTask {
live_in,
state: FetchedState::Started(handle),
})
Ok(FetchTask { live_in, state: FetchedState::Started(handle) })
} else {
Ok(FetchTask {
live_in,
state: FetchedState::Canceled,
})
Ok(FetchTask { live_in, state: FetchedState::Canceled })
}
}
@@ -261,7 +251,9 @@ impl RunningTask {
let mut bad_validators = Vec::new();
let mut succeeded = false;
let mut count: u32 = 0;
let mut _span = self.span.child("fetch-task")
let mut _span = self
.span
.child("fetch-task")
.with_chunk_index(self.request.index.0)
.with_relay_parent(self.relay_parent);
// Try validators in reverse order:
@@ -271,7 +263,7 @@ impl RunningTask {
if count > 0 {
self.metrics.on_retry();
}
count +=1;
count += 1;
// Send request:
let resp = match self.do_request(&validator).await {
@@ -283,16 +275,14 @@ impl RunningTask {
);
self.metrics.on_fetch(FAILED);
return
}
},
Err(TaskError::PeerError) => {
bad_validators.push(validator);
continue
}
},
};
let chunk = match resp {
ChunkFetchingResponse::Chunk(resp) => {
resp.recombine_into_chunk(&self.request)
}
ChunkFetchingResponse::Chunk(resp) => resp.recombine_into_chunk(&self.request),
ChunkFetchingResponse::NoSuchChunk => {
tracing::debug!(
target: LOG_TARGET,
@@ -301,20 +291,20 @@ impl RunningTask {
);
bad_validators.push(validator);
continue
}
},
};
// Data genuine?
if !self.validate_chunk(&validator, &chunk) {
bad_validators.push(validator);
continue;
continue
}
// Ok, let's store it and be happy:
self.store_chunk(chunk).await;
succeeded = true;
_span.add_string_tag("success", "true");
break;
break
}
_span.add_int_tag("tries", count as _);
if succeeded {
@@ -337,7 +327,7 @@ impl RunningTask {
self.sender
.send(FromFetchTask::Message(AllMessages::NetworkBridge(
NetworkBridgeMessage::SendRequests(vec![requests], IfDisconnected::TryConnect)
NetworkBridgeMessage::SendRequests(vec![requests], IfDisconnected::TryConnect),
)))
.await
.map_err(|_| TaskError::ShuttingDown)?;
@@ -352,7 +342,7 @@ impl RunningTask {
"Peer sent us invalid erasure chunk data"
);
Err(TaskError::PeerError)
}
},
Err(RequestError::NetworkError(err)) => {
tracing::warn!(
target: LOG_TARGET,
@@ -361,13 +351,13 @@ impl RunningTask {
"Some network error occurred when fetching erasure chunk"
);
Err(TaskError::PeerError)
}
},
Err(RequestError::Canceled(oneshot::Canceled)) => {
tracing::warn!(target: LOG_TARGET,
origin= ?validator,
"Erasure chunk request got canceled");
Err(TaskError::PeerError)
}
},
}
}
@@ -383,13 +373,13 @@ impl RunningTask {
error = ?e,
"Failed to calculate chunk merkle proof",
);
return false;
}
return false
},
};
let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk);
if anticipated_hash != erasure_chunk_hash {
tracing::warn!(target: LOG_TARGET, origin = ?validator, "Received chunk does not match merkle tree");
return false;
return false
}
true
}
@@ -437,12 +427,9 @@ impl RunningTask {
}
async fn conclude_fail(&mut self) {
if let Err(err) = self.sender.send(FromFetchTask::Failed(self.request.candidate_hash)).await {
tracing::warn!(
target: LOG_TARGET,
?err,
"Sending `Failed` message for task failed"
);
if let Err(err) = self.sender.send(FromFetchTask::Failed(self.request.candidate_hash)).await
{
tracing::warn!(target: LOG_TARGET, ?err, "Sending `Failed` message for task failed");
}
}
}
@@ -16,24 +16,24 @@
use std::collections::HashMap;
use parity_scale_codec::Encode;
use futures::channel::{mpsc, oneshot};
use futures::{executor, Future, FutureExt, StreamExt, select};
use futures::task::{Poll, Context, noop_waker};
use futures::{
channel::{mpsc, oneshot},
executor, select,
task::{noop_waker, Context, Poll},
Future, FutureExt, StreamExt,
};
use sc_network as network;
use sp_keyring::Sr25519Keyring;
use polkadot_primitives::v1::{CandidateHash, ValidatorIndex};
use polkadot_node_network_protocol::request_response::{v1, Recipient};
use polkadot_node_primitives::{BlockData, PoV};
use polkadot_node_network_protocol::request_response::v1;
use polkadot_node_network_protocol::request_response::Recipient;
use polkadot_primitives::v1::{CandidateHash, ValidatorIndex};
use crate::metrics::Metrics;
use crate::tests::mock::get_valid_chunk_data;
use super::*;
use crate::{metrics::Metrics, tests::mock::get_valid_chunk_data};
#[test]
fn task_can_be_canceled() {
@@ -54,16 +54,14 @@ fn task_does_not_accept_invalid_chunk() {
let validators = vec![Sr25519Keyring::Alice.public().into()];
task.group = validators;
let test = TestRun {
chunk_responses: {
chunk_responses: {
let mut m = HashMap::new();
m.insert(
Recipient::Authority(Sr25519Keyring::Alice.public().into()),
ChunkFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: vec![1,2,3],
proof: vec![vec![9,8,2], vec![2,3,4]],
}
)
ChunkFetchingResponse::Chunk(v1::ChunkResponse {
chunk: vec![1, 2, 3],
proof: vec![vec![9, 8, 2], vec![2, 3, 4]],
}),
);
m
},
@@ -75,9 +73,7 @@ fn task_does_not_accept_invalid_chunk() {
#[test]
fn task_stores_valid_chunk() {
let (mut task, rx) = get_test_running_task();
let pov = PoV {
block_data: BlockData(vec![45, 46, 47]),
};
let pov = PoV { block_data: BlockData(vec![45, 46, 47]) };
let (root_hash, chunk) = get_valid_chunk_data(pov);
task.erasure_root = root_hash;
task.request.index = chunk.index;
@@ -86,16 +82,14 @@ fn task_stores_valid_chunk() {
task.group = validators;
let test = TestRun {
chunk_responses: {
chunk_responses: {
let mut m = HashMap::new();
m.insert(
Recipient::Authority(Sr25519Keyring::Alice.public().into()),
ChunkFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: chunk.chunk.clone(),
proof: chunk.proof,
}
)
ChunkFetchingResponse::Chunk(v1::ChunkResponse {
chunk: chunk.chunk.clone(),
proof: chunk.proof,
}),
);
m
},
@@ -111,27 +105,23 @@ fn task_stores_valid_chunk() {
#[test]
fn task_does_not_accept_wrongly_indexed_chunk() {
let (mut task, rx) = get_test_running_task();
let pov = PoV {
block_data: BlockData(vec![45, 46, 47]),
};
let pov = PoV { block_data: BlockData(vec![45, 46, 47]) };
let (root_hash, chunk) = get_valid_chunk_data(pov);
task.erasure_root = root_hash;
task.request.index = ValidatorIndex(chunk.index.0+1);
task.request.index = ValidatorIndex(chunk.index.0 + 1);
let validators = vec![Sr25519Keyring::Alice.public().into()];
task.group = validators;
let test = TestRun {
chunk_responses: {
chunk_responses: {
let mut m = HashMap::new();
m.insert(
Recipient::Authority(Sr25519Keyring::Alice.public().into()),
ChunkFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: chunk.chunk.clone(),
proof: chunk.proof,
}
)
ChunkFetchingResponse::Chunk(v1::ChunkResponse {
chunk: chunk.chunk.clone(),
proof: chunk.proof,
}),
);
m
},
@@ -144,46 +134,44 @@ fn task_does_not_accept_wrongly_indexed_chunk() {
#[test]
fn task_stores_valid_chunk_if_there_is_one() {
let (mut task, rx) = get_test_running_task();
let pov = PoV {
block_data: BlockData(vec![45, 46, 47]),
};
let pov = PoV { block_data: BlockData(vec![45, 46, 47]) };
let (root_hash, chunk) = get_valid_chunk_data(pov);
task.erasure_root = root_hash;
task.request.index = chunk.index;
let validators = [
// Only Alice has valid chunk - should succeed, even though she is tried last.
Sr25519Keyring::Alice,
Sr25519Keyring::Bob, Sr25519Keyring::Charlie,
Sr25519Keyring::Dave, Sr25519Keyring::Eve,
]
.iter().map(|v| v.public().into()).collect::<Vec<_>>();
// Only Alice has valid chunk - should succeed, even though she is tried last.
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
Sr25519Keyring::Dave,
Sr25519Keyring::Eve,
]
.iter()
.map(|v| v.public().into())
.collect::<Vec<_>>();
task.group = validators;
let test = TestRun {
chunk_responses: {
chunk_responses: {
let mut m = HashMap::new();
m.insert(
Recipient::Authority(Sr25519Keyring::Alice.public().into()),
ChunkFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: chunk.chunk.clone(),
proof: chunk.proof,
}
)
ChunkFetchingResponse::Chunk(v1::ChunkResponse {
chunk: chunk.chunk.clone(),
proof: chunk.proof,
}),
);
m.insert(
Recipient::Authority(Sr25519Keyring::Bob.public().into()),
ChunkFetchingResponse::NoSuchChunk
ChunkFetchingResponse::NoSuchChunk,
);
m.insert(
Recipient::Authority(Sr25519Keyring::Charlie.public().into()),
ChunkFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: vec![1,2,3],
proof: vec![vec![9,8,2], vec![2,3,4]],
}
)
ChunkFetchingResponse::Chunk(v1::ChunkResponse {
chunk: vec![1, 2, 3],
proof: vec![vec![9, 8, 2], vec![2, 3, 4]],
}),
);
m
@@ -205,7 +193,6 @@ struct TestRun {
valid_chunks: HashSet<Vec<u8>>,
}
impl TestRun {
fn run(self, task: RunningTask, rx: mpsc::Receiver<FromFetchTask>) {
sp_tracing::try_init_simple();
@@ -228,8 +215,7 @@ impl TestRun {
match msg {
FromFetchTask::Concluded(_) => break,
FromFetchTask::Failed(_) => break,
FromFetchTask::Message(msg) =>
end_ok = self.handle_message(msg).await,
FromFetchTask::Message(msg) => end_ok = self.handle_message(msg).await,
}
}
if !end_ok {
@@ -242,44 +228,50 @@ impl TestRun {
/// end.
async fn handle_message(&self, msg: AllMessages) -> bool {
match msg {
AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(reqs, IfDisconnected::TryConnect)) => {
AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(
reqs,
IfDisconnected::TryConnect,
)) => {
let mut valid_responses = 0;
for req in reqs {
let req = match req {
Requests::ChunkFetching(req) => req,
_ => panic!("Unexpected request"),
};
let response = self.chunk_responses.get(&req.peer)
.ok_or(network::RequestFailure::Refused);
let response =
self.chunk_responses.get(&req.peer).ok_or(network::RequestFailure::Refused);
if let Ok(ChunkFetchingResponse::Chunk(resp)) = &response {
if self.valid_chunks.contains(&resp.chunk) {
valid_responses += 1;
}
}
req.pending_response.send(response.map(Encode::encode))
req.pending_response
.send(response.map(Encode::encode))
.expect("Sending response should succeed");
}
return (valid_responses == 0) && self.valid_chunks.is_empty()
}
AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreChunk { chunk, tx, .. }
) => {
},
AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreChunk {
chunk,
tx,
..
}) => {
assert!(self.valid_chunks.contains(&chunk.chunk));
tx.send(Ok(())).expect("Answering fetching task should work");
return true
}
},
_ => {
tracing::debug!(target: LOG_TARGET, "Unexpected message");
return false
}
},
}
}
}
/// Get a `RunningTask` filled with dummy values.
fn get_test_running_task() -> (RunningTask, mpsc::Receiver<FromFetchTask>) {
let (tx,rx) = mpsc::channel(0);
let (tx, rx) = mpsc::channel(0);
(
RunningTask {
@@ -287,7 +279,7 @@ fn get_test_running_task() -> (RunningTask, mpsc::Receiver<FromFetchTask>) {
group_index: GroupIndex(0),
group: Vec::new(),
request: ChunkFetchingRequest {
candidate_hash: CandidateHash([43u8;32].into()),
candidate_hash: CandidateHash([43u8; 32].into()),
index: ValidatorIndex(0),
},
erasure_root: Hash::repeat_byte(99),
@@ -296,6 +288,6 @@ fn get_test_running_task() -> (RunningTask, mpsc::Receiver<FromFetchTask>) {
metrics: Metrics::new_dummy(),
span: jaeger::Span::Disabled,
},
rx
rx,
)
}
@@ -17,12 +17,14 @@
//! Requester takes care of requesting erasure chunks for candidates that are pending
//! availability.
use std::collections::{
hash_map::{Entry, HashMap},
hash_set::HashSet,
use std::{
collections::{
hash_map::{Entry, HashMap},
hash_set::HashSet,
},
iter::IntoIterator,
pin::Pin,
};
use std::iter::IntoIterator;
use std::pin::Pin;
use futures::{
channel::mpsc,
@@ -30,20 +32,18 @@ use futures::{
Stream,
};
use polkadot_node_subsystem_util::runtime::{RuntimeInfo, get_occupied_cores};
use polkadot_node_subsystem_util::runtime::{get_occupied_cores, RuntimeInfo};
use polkadot_primitives::v1::{CandidateHash, Hash, OccupiedCore};
use polkadot_subsystem::{
messages::AllMessages,
ActiveLeavesUpdate, SubsystemContext, ActivatedLeaf,
messages::AllMessages, ActivatedLeaf, ActiveLeavesUpdate, SubsystemContext,
};
use super::{LOG_TARGET, Metrics};
use super::{Metrics, LOG_TARGET};
/// Cache for session information.
mod session_cache;
use session_cache::SessionCache;
/// A task fetching a particular chunk.
mod fetch_task;
use fetch_task::{FetchTask, FetchTaskConfig, FromFetchTask};
@@ -81,13 +81,7 @@ impl Requester {
/// by advancing the stream.
pub fn new(metrics: Metrics) -> Self {
let (tx, rx) = mpsc::channel(1);
Requester {
fetches: HashMap::new(),
session_cache: SessionCache::new(),
tx,
rx,
metrics,
}
Requester { fetches: HashMap::new(), session_cache: SessionCache::new(), tx, rx, metrics }
}
/// Update heads that need availability distribution.
///
@@ -101,15 +95,8 @@ impl Requester {
where
Context: SubsystemContext,
{
tracing::trace!(
target: LOG_TARGET,
?update,
"Update fetching heads"
);
let ActiveLeavesUpdate {
activated,
deactivated,
} = update;
tracing::trace!(target: LOG_TARGET, ?update, "Update fetching heads");
let ActiveLeavesUpdate { activated, deactivated } = update;
// Order important! We need to handle activated, prior to deactivated, otherwise we might
// cancel still needed jobs.
self.start_requesting_chunks(ctx, runtime, activated.into_iter()).await?;
@@ -194,7 +181,7 @@ impl Requester {
e.insert(FetchTask::start(task_cfg, ctx).await?);
}
// Not a validator, nothing to do.
}
},
}
}
Ok(())
@@ -204,28 +191,21 @@ impl Requester {
impl Stream for Requester {
type Item = AllMessages;
fn poll_next(
mut self: Pin<&mut Self>,
ctx: &mut Context,
) -> Poll<Option<AllMessages>> {
fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Option<AllMessages>> {
loop {
match Pin::new(&mut self.rx).poll_next(ctx) {
Poll::Ready(Some(FromFetchTask::Message(m))) =>
return Poll::Ready(Some(m)),
Poll::Ready(Some(FromFetchTask::Message(m))) => return Poll::Ready(Some(m)),
Poll::Ready(Some(FromFetchTask::Concluded(Some(bad_boys)))) => {
self.session_cache.report_bad_log(bad_boys);
continue
}
Poll::Ready(Some(FromFetchTask::Concluded(None))) =>
continue,
},
Poll::Ready(Some(FromFetchTask::Concluded(None))) => continue,
Poll::Ready(Some(FromFetchTask::Failed(candidate_hash))) => {
// Make sure we retry on next block still pending availability.
self.fetches.remove(&candidate_hash);
}
Poll::Ready(None) =>
return Poll::Ready(None),
Poll::Pending =>
return Poll::Pending,
},
Poll::Ready(None) => return Poll::Ready(None),
Poll::Pending => return Poll::Pending,
}
}
}
@@ -34,7 +34,6 @@ use crate::{
///
/// It should be ensured that a cached session stays live in the cache as long as we might need it.
pub struct SessionCache {
/// Look up cached sessions by `SessionIndex`.
///
/// Note: Performance of fetching is really secondary here, but we need to ensure we are going
@@ -110,12 +109,11 @@ impl SessionCache {
if let Some(o_info) = self.session_info_cache.get(&session_index) {
tracing::trace!(target: LOG_TARGET, session_index, "Got session from lru");
return Ok(Some(with_info(o_info)));
return Ok(Some(with_info(o_info)))
}
if let Some(info) = self
.query_info_from_runtime(ctx, runtime, parent, session_index)
.await?
if let Some(info) =
self.query_info_from_runtime(ctx, runtime, parent, session_index).await?
{
tracing::trace!(target: LOG_TARGET, session_index, "Calling `with_info`");
let r = with_info(&info);
@@ -132,7 +130,7 @@ impl SessionCache {
/// Not being able to report bad validators is not fatal, so we should not shutdown the
/// subsystem on this.
pub fn report_bad_log(&mut self, report: BadValidators) {
if let Err(err) = self.report_bad(report) {
if let Err(err) = self.report_bad(report) {
tracing::warn!(
target: LOG_TARGET,
err = ?err,
@@ -150,10 +148,9 @@ impl SessionCache {
.session_info_cache
.get_mut(&report.session_index)
.ok_or(NonFatal::NoSuchCachedSession)?;
let group = session
.validator_groups
.get_mut(report.group_index.0 as usize)
.expect("A bad validator report must contain a valid group for the reported session. qed.");
let group = session.validator_groups.get_mut(report.group_index.0 as usize).expect(
"A bad validator report must contain a valid group for the reported session. qed.",
);
let bad_set = report.bad_validators.iter().collect::<HashSet<_>>();
// Get rid of bad boys:
@@ -212,12 +209,7 @@ impl SessionCache {
})
.collect();
let info = SessionInfo {
validator_groups,
our_index,
session_index,
our_group,
};
let info = SessionInfo { validator_groups, our_index, session_index, our_group };
return Ok(Some(info))
}
return Ok(None)
@@ -21,15 +21,15 @@ use std::sync::Arc;
use futures::channel::oneshot;
use polkadot_node_network_protocol::request_response::{request::IncomingRequest, v1};
use polkadot_primitives::v1::{CandidateHash, ValidatorIndex};
use polkadot_node_primitives::{AvailableData, ErasureChunk};
use polkadot_subsystem::{
messages::AvailabilityStoreMessage,
SubsystemContext, jaeger,
};
use polkadot_primitives::v1::{CandidateHash, ValidatorIndex};
use polkadot_subsystem::{jaeger, messages::AvailabilityStoreMessage, SubsystemContext};
use crate::error::{NonFatal, Result};
use crate::{LOG_TARGET, metrics::{Metrics, SUCCEEDED, FAILED, NOT_FOUND}};
use crate::{
error::{NonFatal, Result},
metrics::{Metrics, FAILED, NOT_FOUND, SUCCEEDED},
LOG_TARGET,
};
/// Variant of `answer_pov_request` that does Prometheus metric and logging on errors.
///
@@ -38,14 +38,12 @@ pub async fn answer_pov_request_log<Context>(
ctx: &mut Context,
req: IncomingRequest<v1::PoVFetchingRequest>,
metrics: &Metrics,
)
where
) where
Context: SubsystemContext,
{
let res = answer_pov_request(ctx, req).await;
match res {
Ok(result) =>
metrics.on_served_pov(if result {SUCCEEDED} else {NOT_FOUND}),
Ok(result) => metrics.on_served_pov(if result { SUCCEEDED } else { NOT_FOUND }),
Err(err) => {
tracing::warn!(
target: LOG_TARGET,
@@ -53,7 +51,7 @@ where
"Serving PoV failed with error"
);
metrics.on_served_pov(FAILED);
}
},
}
}
@@ -70,8 +68,7 @@ where
{
let res = answer_chunk_request(ctx, req).await;
match res {
Ok(result) =>
metrics.on_served_chunk(if result {SUCCEEDED} else {NOT_FOUND}),
Ok(result) => metrics.on_served_chunk(if result { SUCCEEDED } else { NOT_FOUND }),
Err(err) => {
tracing::warn!(
target: LOG_TARGET,
@@ -79,7 +76,7 @@ where
"Serving chunk failed with error"
);
metrics.on_served_chunk(FAILED);
}
},
}
}
@@ -104,7 +101,7 @@ where
Some(av_data) => {
let pov = Arc::try_unwrap(av_data.pov).unwrap_or_else(|a| (&*a).clone());
v1::PoVFetchingResponse::PoV(pov)
}
},
};
req.send_response(response).map_err(|_| NonFatal::SendResponse)?;
@@ -123,8 +120,7 @@ where
{
let span = jaeger::Span::new(req.payload.candidate_hash, "answer-chunk-request");
let _child_span = span.child("answer-chunk-request")
.with_chunk_index(req.payload.index.0);
let _child_span = span.child("answer-chunk-request").with_chunk_index(req.payload.index.0);
let chunk = query_chunk(ctx, req.payload.candidate_hash, req.payload.index).await?;
@@ -158,10 +154,8 @@ where
Context: SubsystemContext,
{
let (tx, rx) = oneshot::channel();
ctx.send_message(
AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx),
)
.await;
ctx.send_message(AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx))
.await;
let result = rx.await.map_err(|e| {
tracing::trace!(
@@ -185,10 +179,8 @@ where
Context: SubsystemContext,
{
let (tx, rx) = oneshot::channel();
ctx.send_message(
AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx),
)
.await;
ctx.send_message(AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx))
.await;
let result = rx.await.map_err(|e| NonFatal::QueryAvailableDataResponseChannel(e))?;
Ok(result)
@@ -14,7 +14,6 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Helper functions and tools to generate mock data useful for testing this subsystem.
use std::sync::Arc;
@@ -22,43 +21,44 @@ use std::sync::Arc;
use sp_keyring::Sr25519Keyring;
use polkadot_erasure_coding::{branches, obtain_chunks_v1 as obtain_chunks};
use polkadot_node_primitives::{AvailableData, BlockData, ErasureChunk, PoV};
use polkadot_primitives::v1::{
CandidateCommitments, CandidateDescriptor, CandidateHash,
CommittedCandidateReceipt, GroupIndex, Hash, HeadData, Id as ParaId,
OccupiedCore, PersistedValidationData, SessionInfo, ValidatorIndex
CandidateCommitments, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt,
GroupIndex, Hash, HeadData, Id as ParaId, OccupiedCore, PersistedValidationData, SessionInfo,
ValidatorIndex,
};
use polkadot_node_primitives::{PoV, ErasureChunk, AvailableData, BlockData};
/// Create dummy session info with two validator groups.
pub fn make_session_info() -> SessionInfo {
let validators = vec![
Sr25519Keyring::Ferdie, // <- this node, role: validator
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
Sr25519Keyring::Dave,
Sr25519Keyring::Eve,
Sr25519Keyring::One,
];
let validators = vec![
Sr25519Keyring::Ferdie, // <- this node, role: validator
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
Sr25519Keyring::Dave,
Sr25519Keyring::Eve,
Sr25519Keyring::One,
];
let validator_groups: Vec<Vec<ValidatorIndex>> = [vec![5, 0, 3], vec![1, 6, 2, 4]]
.iter().map(|g| g.into_iter().map(|v| ValidatorIndex(*v)).collect()).collect();
let validator_groups: Vec<Vec<ValidatorIndex>> = [vec![5, 0, 3], vec![1, 6, 2, 4]]
.iter()
.map(|g| g.into_iter().map(|v| ValidatorIndex(*v)).collect())
.collect();
SessionInfo {
discovery_keys: validators.iter().map(|k| k.public().into()).collect(),
// Not used:
n_cores: validator_groups.len() as u32,
validator_groups,
// Not used values:
validators: validators.iter().map(|k| k.public().into()).collect(),
assignment_keys: Vec::new(),
zeroth_delay_tranche_width: 0,
relay_vrf_modulo_samples: 0,
n_delay_tranches: 0,
no_show_slots: 0,
needed_approvals: 0,
}
SessionInfo {
discovery_keys: validators.iter().map(|k| k.public().into()).collect(),
// Not used:
n_cores: validator_groups.len() as u32,
validator_groups,
// Not used values:
validators: validators.iter().map(|k| k.public().into()).collect(),
assignment_keys: Vec::new(),
zeroth_delay_tranche_width: 0,
relay_vrf_modulo_samples: 0,
n_delay_tranches: 0,
no_show_slots: 0,
needed_approvals: 0,
}
}
/// Builder for constructing occupied cores.
@@ -72,9 +72,7 @@ pub struct OccupiedCoreBuilder {
impl OccupiedCoreBuilder {
pub fn build(self) -> (OccupiedCore, (CandidateHash, ErasureChunk)) {
let pov = PoV {
block_data: BlockData(vec![45, 46, 47]),
};
let pov = PoV { block_data: BlockData(vec![45, 46, 47]) };
let pov_hash = pov.hash();
let (erasure_root, chunk) = get_valid_chunk_data(pov.clone());
let candidate_receipt = TestCandidateBuilder {
@@ -83,7 +81,8 @@ impl OccupiedCoreBuilder {
relay_parent: self.relay_parent,
erasure_root,
..Default::default()
}.build();
}
.build();
let core = OccupiedCore {
next_up_on_available: None,
occupied_since: 0,
@@ -117,10 +116,7 @@ impl TestCandidateBuilder {
erasure_root: self.erasure_root,
..Default::default()
},
commitments: CandidateCommitments {
head_data: self.head_data,
..Default::default()
},
commitments: CandidateCommitments { head_data: self.head_data, ..Default::default() },
}
}
}
@@ -134,18 +130,18 @@ pub fn get_valid_chunk_data(pov: PoV) -> (Hash, ErasureChunk) {
max_pov_size: 1024,
relay_parent_storage_root: Default::default(),
};
let available_data = AvailableData {
validation_data: persisted, pov: Arc::new(pov),
};
let available_data = AvailableData { validation_data: persisted, pov: Arc::new(pov) };
let chunks = obtain_chunks(fake_validator_count, &available_data).unwrap();
let branches = branches(chunks.as_ref());
let root = branches.root();
let chunk = branches.enumerate()
.map(|(index, (proof, chunk))| ErasureChunk {
chunk: chunk.to_vec(),
index: ValidatorIndex(index as _),
proof,
})
.next().expect("There really should be 10 chunks.");
let chunk = branches
.enumerate()
.map(|(index, (proof, chunk))| ErasureChunk {
chunk: chunk.to_vec(),
index: ValidatorIndex(index as _),
proof,
})
.next()
.expect("There really should be 10 chunks.");
(root, chunk)
}
@@ -27,7 +27,7 @@ use super::*;
mod state;
/// State for test harnesses.
use state::{TestState, TestHarness};
use state::{TestHarness, TestState};
/// Mock data useful for testing.
pub(crate) mod mock;
@@ -60,9 +60,7 @@ fn test_harness<T: Future<Output = ()>>(
#[test]
fn check_basic() {
let state = TestState::default();
test_harness(state.keystore.clone(), move |harness| {
state.run(harness)
});
test_harness(state.keystore.clone(), move |harness| state.run(harness));
}
/// Check whether requester tries all validators in group.
@@ -75,9 +73,7 @@ fn check_fetch_tries_all() {
v.push(None);
v.push(None);
}
test_harness(state.keystore.clone(), move |harness| {
state.run(harness)
});
test_harness(state.keystore.clone(), move |harness| state.run(harness));
}
/// Check whether requester tries all validators in group
@@ -87,10 +83,9 @@ fn check_fetch_tries_all() {
#[test]
fn check_fetch_retry() {
let mut state = TestState::default();
state.cores.insert(
state.relay_chain[2],
state.cores.get(&state.relay_chain[1]).unwrap().clone(),
);
state
.cores
.insert(state.relay_chain[2], state.cores.get(&state.relay_chain[1]).unwrap().clone());
// We only care about the first three blocks.
// 1. scheduled
// 2. occupied
@@ -98,20 +93,18 @@ fn check_fetch_retry() {
state.relay_chain.truncate(3);
// Get rid of unused valid chunks:
let valid_candidate_hashes: HashSet<_> = state.cores
let valid_candidate_hashes: HashSet<_> = state
.cores
.get(&state.relay_chain[1])
.iter()
.flat_map(|v| v.iter())
.filter_map(|c| {
match c {
CoreState::Occupied(core) => Some(core.candidate_hash),
_ => None,
}
.filter_map(|c| match c {
CoreState::Occupied(core) => Some(core.candidate_hash),
_ => None,
})
.collect();
state.valid_chunks.retain(|(ch, _)| valid_candidate_hashes.contains(ch));
for (_, v) in state.chunks.iter_mut() {
// This should still succeed as cores are still pending availability on next block.
v.push(None);
@@ -120,7 +113,5 @@ fn check_fetch_retry() {
v.push(None);
v.push(None);
}
test_harness(state.keystore.clone(), move |harness| {
state.run(harness)
});
test_harness(state.keystore.clone(), move |harness| state.run(harness));
}
@@ -14,38 +14,44 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use std::{collections::{HashMap, HashSet}, sync::Arc, time::Duration};
use std::{
collections::{HashMap, HashSet},
sync::Arc,
time::Duration,
};
use polkadot_node_subsystem_util::TimeoutExt;
use polkadot_subsystem_testhelpers::TestSubsystemContextHandle;
use futures::{FutureExt, channel::oneshot, SinkExt, channel::mpsc, StreamExt};
use futures::{
channel::{mpsc, oneshot},
FutureExt, SinkExt, StreamExt,
};
use futures_timer::Delay;
use sp_keystore::SyncCryptoStorePtr;
use sp_core::{traits::SpawnNamed, testing::TaskExecutor};
use sc_network as network;
use sc_network::IfDisconnected;
use sc_network::config as netconfig;
use sc_network::{config as netconfig, IfDisconnected};
use sp_core::{testing::TaskExecutor, traits::SpawnNamed};
use sp_keystore::SyncCryptoStorePtr;
use polkadot_subsystem::{
ActiveLeavesUpdate, FromOverseer, OverseerSignal, ActivatedLeaf, LeafStatus,
messages::{
AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage, NetworkBridgeMessage,
RuntimeApiMessage, RuntimeApiRequest,
}
};
use polkadot_primitives::v1::{CandidateHash, CoreState, GroupIndex, Hash, Id
as ParaId, ScheduledCore, SessionInfo,
ValidatorIndex
};
use polkadot_node_primitives::ErasureChunk;
use polkadot_node_network_protocol::{
jaeger,
request_response::{IncomingRequest, OutgoingRequest, Requests, v1}
request_response::{v1, IncomingRequest, OutgoingRequest, Requests},
};
use polkadot_node_primitives::ErasureChunk;
use polkadot_primitives::v1::{
CandidateHash, CoreState, GroupIndex, Hash, Id as ParaId, ScheduledCore, SessionInfo,
ValidatorIndex,
};
use polkadot_subsystem::{
messages::{
AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage,
NetworkBridgeMessage, RuntimeApiMessage, RuntimeApiRequest,
},
ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, LeafStatus, OverseerSignal,
};
use polkadot_subsystem_testhelpers as test_helpers;
use test_helpers::{SingleItemSink, mock::make_ferdie_keystore};
use test_helpers::{mock::make_ferdie_keystore, SingleItemSink};
use super::mock::{make_session_info, OccupiedCoreBuilder};
use crate::LOG_TARGET;
@@ -94,35 +100,32 @@ impl Default for TestState {
let mut cores = HashMap::new();
let mut chunks = HashMap::new();
cores.insert(relay_chain[0],
cores.insert(
relay_chain[0],
vec![
CoreState::Scheduled(ScheduledCore {
para_id: chain_ids[0],
collator: None,
}),
CoreState::Scheduled(ScheduledCore {
para_id: chain_ids[1],
collator: None,
}),
]
CoreState::Scheduled(ScheduledCore { para_id: chain_ids[0], collator: None }),
CoreState::Scheduled(ScheduledCore { para_id: chain_ids[1], collator: None }),
],
);
let heads = {
let heads = {
let mut advanced = relay_chain.iter();
advanced.next();
relay_chain.iter().zip(advanced)
};
for (relay_parent, relay_child) in heads {
let (p_cores, p_chunks): (Vec<_>, Vec<_>) = chain_ids.iter().enumerate()
let (p_cores, p_chunks): (Vec<_>, Vec<_>) = chain_ids
.iter()
.enumerate()
.map(|(i, para_id)| {
let (core, chunk) = OccupiedCoreBuilder {
group_responsible: GroupIndex(i as _),
para_id: *para_id,
relay_parent: relay_parent.clone(),
}.build();
}
.build();
(CoreState::Occupied(core), chunk)
}
)
})
.unzip();
cores.insert(relay_child.clone(), p_cores);
// Skip chunks for our own group (won't get fetched):
@@ -146,11 +149,12 @@ impl Default for TestState {
}
impl TestState {
/// Run, but fail after some timeout.
pub async fn run(self, harness: TestHarness) {
// Make sure test won't run forever.
let f = self.run_inner(harness.pool, harness.virtual_overseer).timeout(Duration::from_secs(10));
let f = self
.run_inner(harness.pool, harness.virtual_overseer)
.timeout(Duration::from_secs(10));
assert!(f.await.is_some(), "Test ran into timeout");
}
@@ -167,17 +171,19 @@ impl TestState {
let updates = {
let mut advanced = self.relay_chain.iter();
advanced.next();
self
.relay_chain.iter().zip(advanced)
.map(|(old, new)| ActiveLeavesUpdate {
activated: Some(ActivatedLeaf {
hash: new.clone(),
number: 1,
status: LeafStatus::Fresh,
span: Arc::new(jaeger::Span::Disabled),
}),
deactivated: vec![old.clone()].into(),
}).collect::<Vec<_>>()
self.relay_chain
.iter()
.zip(advanced)
.map(|(old, new)| ActiveLeavesUpdate {
activated: Some(ActivatedLeaf {
hash: new.clone(),
number: 1,
status: LeafStatus::Fresh,
span: Arc::new(jaeger::Span::Disabled),
}),
deactivated: vec![old.clone()].into(),
})
.collect::<Vec<_>>()
};
// We should be storing all valid chunks during execution:
@@ -190,24 +196,27 @@ impl TestState {
// Spawning necessary as incoming queue can only hold a single item, we don't want to dead
// lock ;-)
let update_tx = tx.clone();
executor.spawn("Sending active leaves updates", async move {
for update in updates {
overseer_signal(
update_tx.clone(),
OverseerSignal::ActiveLeaves(update)
).await;
// We need to give the subsystem a little time to do its job, otherwise it will
// cancel jobs as obsolete:
Delay::new(Duration::from_millis(20)).await;
executor.spawn(
"Sending active leaves updates",
async move {
for update in updates {
overseer_signal(update_tx.clone(), OverseerSignal::ActiveLeaves(update)).await;
// We need to give the subsystem a little time to do its job, otherwise it will
// cancel jobs as obsolete:
Delay::new(Duration::from_millis(20)).await;
}
}
}.boxed());
.boxed(),
);
while remaining_stores > 0
{
while remaining_stores > 0 {
tracing::trace!(target: LOG_TARGET, remaining_stores, "Stores left to go");
let msg = overseer_recv(&mut rx).await;
match msg {
AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(reqs, IfDisconnected::TryConnect)) => {
AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(
reqs,
IfDisconnected::TryConnect,
)) => {
for req in reqs {
// Forward requests:
let in_req = to_incoming_req(&executor, req);
@@ -215,50 +224,61 @@ impl TestState {
executor.spawn(
"Request forwarding",
overseer_send(
tx.clone(),
AvailabilityDistributionMessage::ChunkFetchingRequest(in_req)
).boxed()
tx.clone(),
AvailabilityDistributionMessage::ChunkFetchingRequest(in_req),
)
.boxed(),
);
}
}
AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx)) => {
let chunk = self.chunks.get_mut(&(candidate_hash, validator_index)).map(Vec::pop).flatten().flatten();
tx.send(chunk)
.expect("Receiver is expected to be alive");
}
AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreChunk{candidate_hash, chunk, tx, ..}) => {
},
AllMessages::AvailabilityStore(AvailabilityStoreMessage::QueryChunk(
candidate_hash,
validator_index,
tx,
)) => {
let chunk = self
.chunks
.get_mut(&(candidate_hash, validator_index))
.map(Vec::pop)
.flatten()
.flatten();
tx.send(chunk).expect("Receiver is expected to be alive");
},
AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreChunk {
candidate_hash,
chunk,
tx,
..
}) => {
assert!(
self.valid_chunks.contains(&(candidate_hash, chunk.index)),
"Only valid chunks should ever get stored."
);
tx.send(Ok(()))
.expect("Receiver is expected to be alive");
tx.send(Ok(())).expect("Receiver is expected to be alive");
tracing::trace!(target: LOG_TARGET, "'Stored' fetched chunk.");
remaining_stores -= 1;
}
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, req)) => {
match req {
RuntimeApiRequest::SessionIndexForChild(tx) => {
// Always session index 1 for now:
tx.send(Ok(1))
.expect("Receiver should still be alive");
}
tx.send(Ok(1)).expect("Receiver should still be alive");
},
RuntimeApiRequest::SessionInfo(_, tx) => {
tx.send(Ok(Some(self.session_info.clone())))
.expect("Receiver should be alive.");
}
.expect("Receiver should be alive.");
},
RuntimeApiRequest::AvailabilityCores(tx) => {
tracing::trace!(target: LOG_TARGET, cores= ?self.cores[&hash], hash = ?hash, "Sending out cores for hash");
tx.send(Ok(self.cores[&hash].clone()))
.expect("Receiver should still be alive");
}
.expect("Receiver should still be alive");
},
_ => {
panic!("Unexpected runtime request: {:?}", req);
}
},
}
}
_ => {
}
},
_ => {},
}
}
@@ -272,9 +292,7 @@ async fn overseer_signal(
) {
let msg = msg.into();
tracing::trace!(target: LOG_TARGET, msg = ?msg, "sending message");
tx.send(FromOverseer::Signal(msg))
.await
.expect("Test subsystem no longer live");
tx.send(FromOverseer::Signal(msg)).await.expect("Test subsystem no longer live");
}
async fn overseer_send(
@@ -283,42 +301,44 @@ async fn overseer_send(
) {
let msg = msg.into();
tracing::trace!(target: LOG_TARGET, msg = ?msg, "sending message");
tx.send(FromOverseer::Communication { msg }).await
tx.send(FromOverseer::Communication { msg })
.await
.expect("Test subsystem no longer live");
tracing::trace!(target: LOG_TARGET, "sent message");
}
async fn overseer_recv(
rx: &mut mpsc::UnboundedReceiver<AllMessages>,
) -> AllMessages {
async fn overseer_recv(rx: &mut mpsc::UnboundedReceiver<AllMessages>) -> AllMessages {
tracing::trace!(target: LOG_TARGET, "waiting for message ...");
rx.next().await.expect("Test subsystem no longer live")
}
fn to_incoming_req(
executor: &TaskExecutor,
outgoing: Requests
outgoing: Requests,
) -> IncomingRequest<v1::ChunkFetchingRequest> {
match outgoing {
Requests::ChunkFetching(OutgoingRequest { payload, pending_response, .. }) => {
let (tx, rx): (oneshot::Sender<netconfig::OutgoingResponse>, oneshot::Receiver<_>)
= oneshot::channel();
executor.spawn("Message forwarding", async {
let response = rx.await;
let payload = response.expect("Unexpected canceled request").result;
pending_response.send(payload.map_err(|_| network::RequestFailure::Refused))
.expect("Sending response is expected to work");
}.boxed()
let (tx, rx): (oneshot::Sender<netconfig::OutgoingResponse>, oneshot::Receiver<_>) =
oneshot::channel();
executor.spawn(
"Message forwarding",
async {
let response = rx.await;
let payload = response.expect("Unexpected canceled request").result;
pending_response
.send(payload.map_err(|_| network::RequestFailure::Refused))
.expect("Sending response is expected to work");
}
.boxed(),
);
IncomingRequest::new(
// We don't really care:
network::PeerId::random(),
payload,
tx
tx,
)
}
},
_ => panic!("Unexpected request!"),
}
}