WIP: CI: add spellcheck (#3421)

* CI: add spellcheck

* revert me

* CI: explicit command for spellchecker

* spellcheck: edit misspells

* CI: run spellcheck on diff

* spellcheck: edits

* spellcheck: edit misspells

* spellcheck: add rules

* spellcheck: mv configs

* spellcheck: more edits

* spellcheck: chore

* spellcheck: one more thing

* spellcheck: and another one

* spellcheck: seems like it doesn't get to an end

* spellcheck: new words after rebase

* spellcheck: new words appearing out of nowhere

* chore

* review edits

* more review edits

* more edits

* wonky behavior

* wonky behavior 2

* wonky behavior 3

* change git behavior

* spellcheck: another bunch of new edits

* spellcheck: new words are koming out of nowhere

* CI: finding the master

* CI: fetching master implicitly

* CI: undebug

* new errors

* a bunch of new edits

* and some more

* Update node/core/approval-voting/src/approval_db/v1/mod.rs

Co-authored-by: Andronik Ordian <write@reusable.software>

* Update xcm/xcm-executor/src/assets.rs

Co-authored-by: Andronik Ordian <write@reusable.software>

* Apply suggestions from code review

Co-authored-by: Andronik Ordian <write@reusable.software>

* Suggestions from the code review

* CI: scan only changed files

Co-authored-by: Andronik Ordian <write@reusable.software>
This commit is contained in:
Denis Pisarev
2021-07-14 19:22:58 +02:00
committed by GitHub
parent f6305d29be
commit fc253e6e4d
239 changed files with 927 additions and 761 deletions
-111
View File
@@ -1,111 +0,0 @@
150
adversary/SM
annualised/MS
Apache-2.0/M
api/SM
API/SM
APIs
assignee/SM
async
BFT/M
bitfield/MS
blake2/MS
blockchain/MS
borked
BTC
BTC/S
CLI/MS
codec/SM
config/MS
crypto/MS
customizable/B
debian/M
decodable/MS
dispatchable/SM
DMP/SM
DOTs
DOT/SM
ed25519
enum/MS
ERC-20
ETH
ethereum/MS
externality/MS
extrinsic/MS
extrinsics
fedora/M
GiB/S
GPL/M
GPLv3/M
Handler/MS
HMP/SM
https
include/BG
inherent/MS
initialize/RG
instantiate/B
intrinsic/MS
intrinsics
io
isolate/BG
jaeger/MS
js
keccak256/M
KSM/S
kusama/S
KYC/M
merkle/MS
misbehavior/SM
misbehaviors
MIT/M
MQC/SM
multivalidator/SM
NFT/SM
oneshot/MS
others'
parablock/MS
parachain/MS
parameterize/D
picosecond/SM
polkadot/MS
pov-block/MS
PoV/MS
promethius
promethius'
provisioner/MS
PVF/S
redhat/M
repo/MS
RPC/MS
runtime/MS
rustc/MS
sr25519
struct/MS
subsystem/MS
subsystems'
taskmanager/MS
TCP
teleport/RG
teleportation/SM
teleporter/SM
teleporters
testnet/MS
trie/MS
trustless/Y
tuple/SM
ubuntu/M
UDP
UI
unfinalize/BD
union/MSG
unservable/B
validator/SM
VMP/SM
VRF/SM
w3f/MS
wasm/M
WND/S
XCM/S
XCMP/M
instantiation/SM
NFA
+14 -2
View File
@@ -164,6 +164,18 @@ check-runtime-benchmarks:
- ./scripts/gitlab/check_runtime_benchmarks.sh
- sccache -s
spellcheck:
stage: test
<<: *docker-env
<<: *rules-pr-only
script:
- cargo spellcheck --version
# compare with the commit parent to the PR, given it's from a default branch
- git fetch origin +${CI_DEFAULT_BRANCH}:${CI_DEFAULT_BRANCH}
- time cargo spellcheck check -vvv --cfg=scripts/gitlab/spellcheck.toml --checkers hunspell --code 1
-r $(git diff --name-only ${CI_COMMIT_SHA} $(git merge-base ${CI_COMMIT_SHA} ${CI_DEFAULT_BRANCH}))
allow_failure: true
build-adder-collator:
stage: test
<<: *collect-artifacts
@@ -383,9 +395,9 @@ trigger-simnet:
variables:
TRGR_PROJECT: ${CI_PROJECT_NAME}
TRGR_REF: ${CI_COMMIT_REF_NAME}
# simnet project ID
# Simnet project ID
DWNSTRM_ID: 332
script:
# API trigger for a simnet job, argument value is set in the project variables
# API trigger for a Simnet job, argument value is set in the project variables
- ./scripts/gitlab/trigger_pipeline.sh --simnet-version=${SIMNET_REF}
allow_failure: true
+3 -7
View File
@@ -1,6 +1,6 @@
# Polkadot
Implementation of a https://polkadot.network node in Rust based on the Substrate framework.
Implementation of a <https://polkadot.network> node in Rust based on the Substrate framework.
> **NOTE:** In 2018, we split our implementation of "Polkadot" from its development framework
> "Substrate". See the [Substrate][substrate-repo] repo for git history prior to 2018.
@@ -19,7 +19,7 @@ either run the latest binary from our
[releases](https://github.com/paritytech/polkadot/releases) page, or install
Polkadot from one of our package repositories.
Installation from the debian or rpm repositories will create a `systemd`
Installation from the Debian or rpm repositories will create a `systemd`
service that can be used to run a Polkadot node. This is disabled by default,
and can be started by running `systemctl start polkadot` on demand (use
`systemctl enable polkadot` to make it auto-start after reboot). By default, it
@@ -207,7 +207,7 @@ You can run a simple single-node development "network" on your machine by runnin
polkadot --dev
```
You can muck around by heading to https://polkadot.js.org/apps and choose "Local Node" from the
You can muck around by heading to <https://polkadot.js.org/apps> and choose "Local Node" from the
Settings menu.
### Local Two-node Testnet
@@ -246,7 +246,3 @@ Ensure you replace `ALICE_BOOTNODE_ID_HERE` with the node ID from the output of
## License
Polkadot is [GPL 3.0 licensed](LICENSE).
## Important Notice
https://polkadot.network/testnetdisclaimer
+1 -1
View File
@@ -32,7 +32,7 @@ choosen
config/MS
crypto/MS
customizable/B
debian/M
Debian/M
decodable/MS
DOT/S
doesn
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Autogenerated weights for {{pallet}}
//! Autogenerated weights for {{cmd.pallet}}
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}}
//! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}}
@@ -199,7 +199,7 @@ impl frame_system::Config for Runtime {
type BlockLength = bp_millau::BlockLength;
/// The weight of database operations that the runtime can invoke.
type DbWeight = DbWeight;
/// The designated SS58 prefix of this chain.
/// The designated `SS58` prefix of this chain.
type SS58Prefix = SS58Prefix;
/// The set code logic, just the default since we're not a parachain.
type OnSetCode = ();
@@ -239,7 +239,7 @@ parameter_types! {
}
impl pallet_timestamp::Config for Runtime {
/// A timestamp: milliseconds since the unix epoch.
/// A timestamp: milliseconds since the Unix epoch.
type Moment = u64;
type OnTimestampSet = Aura;
type MinimumPeriod = MinimumPeriod;
@@ -421,9 +421,9 @@ pub type Header = generic::Header<BlockNumber, Hashing>;
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// A Block signed with a Justification
pub type SignedBlock = generic::SignedBlock<Block>;
/// BlockId type as expected by this runtime.
/// `BlockId` type as expected by this runtime.
pub type BlockId = generic::BlockId<Block>;
/// The SignedExtension to the basic transaction logic.
/// The `SignedExtension` to the basic transaction logic.
pub type SignedExtra = (
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckTxVersion<Runtime>,
@@ -55,8 +55,8 @@ pub struct EthereumTransactionInclusionProof {
///
/// The assumption is that this pair will never appear more than once in
/// transactions included into finalized blocks. This is obviously true
/// for any existing eth-like chain (that keep current tx format), because
/// otherwise transaction can be replayed over and over.
/// for any existing eth-like chain (that keep current transaction format),
/// because otherwise transaction can be replayed over and over.
#[derive(Encode, Decode, PartialEq, RuntimeDebug)]
pub struct EthereumTransactionTag {
/// Account that has locked funds.
@@ -34,8 +34,8 @@ frame_support::parameter_types! {
kovan_validators_configuration();
}
/// Max number of finalized headers to keep. It is equivalent of ~24 hours of
/// finalized blocks on current Kovan chain.
/// Max number of finalized headers to keep. It is equivalent of approximately
/// 24 hours of finalized blocks on current Kovan chain.
const FINALIZED_HEADERS_TO_KEEP: u64 = 20_000;
/// Aura engine configuration for Kovan chain.
@@ -206,7 +206,7 @@ impl frame_system::Config for Runtime {
type BlockLength = bp_rialto::BlockLength;
/// The weight of database operations that the runtime can invoke.
type DbWeight = DbWeight;
/// The designated SS58 prefix of this chain.
/// The designated `SS58` prefix of this chain.
type SS58Prefix = SS58Prefix;
/// The set code logic, just the default since we're not a parachain.
type OnSetCode = ();
@@ -346,7 +346,7 @@ parameter_types! {
}
impl pallet_timestamp::Config for Runtime {
/// A timestamp: milliseconds since the unix epoch.
/// A timestamp: milliseconds since the Unix epoch.
type Moment = u64;
type OnTimestampSet = Aura;
type MinimumPeriod = MinimumPeriod;
@@ -531,9 +531,9 @@ pub type Header = generic::Header<BlockNumber, Hashing>;
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// A Block signed with a Justification
pub type SignedBlock = generic::SignedBlock<Block>;
/// BlockId type as expected by this runtime.
/// `BlockId` type as expected by this runtime.
pub type BlockId = generic::BlockId<Block>;
/// The SignedExtension to the basic transaction logic.
/// The `SignedExtension` to the basic transaction logic.
pub type SignedExtra = (
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckTxVersion<Runtime>,
@@ -1060,7 +1060,7 @@ impl_runtime_apis! {
/// Millau account ownership digest from Rialto.
///
/// The byte vector returned by this function should be signed with a Millau account private key.
/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private key
/// This way, the owner of `rialto_account_id` on Rialto proves that the Millau account private key
/// is also under his control.
pub fn rialto_to_millau_account_ownership_digest<Call, AccountId, SpecVersion>(
millau_call: &Call,
@@ -110,7 +110,7 @@ impl TPruningStrategy for PruningStrategy {
}
}
/// ChainTime provider
/// `ChainTime` provider
#[derive(Default)]
pub struct ChainTime;
@@ -40,10 +40,10 @@ pub struct ProofParams<Recipient> {
/// When true, recipient must exists before import.
pub recipient_exists: bool,
/// When 0, transaction should have minimal possible size. When this value has non-zero value n,
/// transaction size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR.
/// transaction size should be (if possible) near to `MIN_SIZE + n * SIZE_FACTOR`.
pub transaction_size_factor: u32,
/// When 0, proof should have minimal possible size. When this value has non-zero value n,
/// proof size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR.
/// proof size should be (if possible) near to `MIN_SIZE + n * SIZE_FACTOR`.
pub proof_size_factor: u32,
}
+5 -5
View File
@@ -19,7 +19,7 @@
//! The messages are interpreted directly as runtime `Call`. We attempt to decode
//! them and then dispatch as usual. To prevent compatibility issues, the Calls have
//! to include a `spec_version`. This will be checked before dispatch. In the case of
//! a succesful dispatch an event is emitted.
//! a successful dispatch an event is emitted.
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]
@@ -52,7 +52,7 @@ pub trait Config<I = DefaultInstance>: frame_system::Config {
/// The overarching event type.
type Event: From<Event<Self, I>> + Into<<Self as frame_system::Config>::Event>;
/// Id of the message. Whenever message is passed to the dispatch module, it emits
/// event with this id + dispatch result. Could be e.g. (LaneId, MessageNonce) if
/// event with this id + dispatch result. Could be e.g. (`LaneId`, `MessageNonce`) if
/// it comes from the messages module.
type MessageId: Parameter;
/// Type of account ID on source chain.
@@ -77,13 +77,13 @@ pub trait Config<I = DefaultInstance>: frame_system::Config {
/// The type that is used to wrap the `Self::Call` when it is moved over bridge.
///
/// The idea behind this is to avoid `Call` conversion/decoding until we'll be sure
/// that all other stuff (like `spec_version`) is ok. If we would try to decode
/// that all other stuff (like `spec_version`) is OK. If we would try to decode
/// `Call` which has been encoded using previous `spec_version`, then we might end
/// up with decoding error, instead of `MessageVersionSpecMismatch`.
type EncodedCall: Decode + Encode + Into<Result<<Self as Config<I>>::Call, ()>>;
/// A type which can be turned into an AccountId from a 256-bit hash.
/// A type which can be turned into an `AccountId` from a 256-bit hash.
///
/// Used when deriving target chain AccountIds from source chain AccountIds.
/// Used when deriving target chain `AccountId`s from source chain `AccountId`s.
type AccountIdConverter: sp_runtime::traits::Convert<sp_core::hash::H256, Self::AccountId>;
}
@@ -16,7 +16,7 @@
//! Pallet for checking GRANDPA Finality Proofs.
//!
//! Adapted copy of substrate/client/finality-grandpa/src/justification.rs. If origin
//! Adapted copy of `substrate/client/finality-grandpa/src/justification.rs`. If origin
//! will ever be moved to the sp_finality_grandpa, we should reuse that implementation.
use codec::{Decode, Encode};
@@ -57,7 +57,7 @@ pub enum Error {
InvalidJustificationTarget,
/// The authority has provided an invalid signature.
InvalidAuthoritySignature,
/// The justification contains precommit for header that is not a descendant of the commit header.
/// The justification contains pre-commit for header that is not a descendant of the commit header.
PrecommitIsNotCommitDescendant,
/// The cumulative weight of all votes in the justification is not enough to justify commit
/// header finalization.
@@ -119,7 +119,7 @@ where
if signed.precommit.target_number < justification.commit.target_number {
return Err(Error::PrecommitIsNotCommitDescendant);
}
// all precommits must be for target block descendents
// all precommits must be for target block descendants
chain = chain.ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?;
// since we know now that the precommit target is the descendant of the justification target,
// we may increase 'weight' of the justification target
@@ -154,7 +154,7 @@ where
}
// check that the cumulative weight of validators voted for the justification target (or one
// of its descendents) is larger than required threshold.
// of its descendants) is larger than required threshold.
let threshold = authorities_set.threshold().0.into();
if cumulative_weight >= threshold {
Ok(())
+1 -1
View File
@@ -65,7 +65,7 @@ pub enum Subcommand {
#[cfg(not(feature = "try-runtime"))]
TryRuntime,
/// Key management cli utilities
/// Key management CLI utilities
Key(sc_cli::KeySubcommand),
}
+7 -7
View File
@@ -81,11 +81,11 @@ impl sp_std::fmt::Debug for CandidateHash {
pub type Nonce = u32;
/// The balance of an account.
/// 128-bits (or 38 significant decimal figures) will allow for 10m currency (10^7) at a resolution
/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (10^11 unit
/// denomination), or 10^18 total atomic units, to grow at 50%/year for 51 years (10^9 multiplier)
/// for an eventual total of 10^27 units (27 significant decimal figures).
/// We round denomination to 10^12 (12 sdf), and leave the other redundancy at the upper end so
/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a resolution
/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (`10^11` unit
/// denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years (`10^9` multiplier)
/// for an eventual total of `10^27` units (27 significant decimal figures).
/// We round denomination to `10^12` (12 SDF), and leave the other redundancy at the upper end so
/// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow.
pub type Balance = u128;
@@ -99,7 +99,7 @@ pub type BlockId = generic::BlockId<Block>;
/// Opaque, encoded, unchecked extrinsic.
pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic;
/// The information that goes alongside a transfer_into_parachain operation. Entirely opaque, it
/// The information that goes alongside a `transfer_into_parachain` operation. Entirely opaque, it
/// will generally be used for identifying the reason for the transfer. Typically it will hold the
/// destination account to which the transfer should be credited. If still more information is
/// needed, then this should be a hash with the pre-image presented via an off-chain mechanism on
@@ -144,7 +144,7 @@ pub struct OutboundHrmpMessage<Id> {
pub data: sp_std::vec::Vec<u8>,
}
/// V1 primitives.
/// `V1` primitives.
pub mod v1 {
pub use super::*;
}
+19 -22
View File
@@ -20,7 +20,7 @@ One particular subsystem (subsystem under test) interacts with a
mocked overseer that is made to assert incoming and outgoing messages
of the subsystem under test.
This is largely present today, but has some fragmentation in the evolved
integration test implementation. A proc-macro/macro_rules would allow
integration test implementation. A `proc-macro`/`macro_rules` would allow
for more consistent implementation and structure.
#### Behavior tests (3)
@@ -29,27 +29,25 @@ Launching small scale networks, with multiple adversarial nodes without any furt
This should include tests around the thresholds in order to evaluate the error handling once certain
assumed invariants fail.
For this purpose based on `AllSubsystems` and proc-macro `AllSubsystemsGen`.
For this purpose based on `AllSubsystems` and `proc-macro` `AllSubsystemsGen`.
This assumes a simplistic test runtime.
#### Testing at scale (4)
Launching many nodes with configurable network speed and node features in a cluster of nodes.
At this scale the [`simnet`][simnet] comes into play which launches a full cluster of nodes.
At this scale the [Simnet][simnet] comes into play which launches a full cluster of nodes.
The scale is handled by spawning a kubernetes cluster and the meta description
is covered by [`gurke`][gurke].
Asserts are made using grafana rules, based on the existing prometheus metrics. This can
is covered by [Gurke][Gurke].
Asserts are made using Grafana rules, based on the existing prometheus metrics. This can
be extended by adding an additional service translating `jaeger` spans into addition
prometheus avoiding additional polkadot source changes.
_Behavior tests_ and _testing at scale_ have naturally soft boundary.
The most significant difference is the presence of a real network and
the number of nodes, since a single host often not capable to run
multiple nodes at once.
---
## Coverage
@@ -93,15 +91,15 @@ miniserve -r ./coverage
grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "/*" -o lcov.info
```
The test coverage in `lcov` can the be published to <codecov.io>.
The test coverage in `lcov` can the be published to <https://codecov.io>.
```sh
bash <(curl -s https://codecov.io/bash) -f lcov.info
```
or just printed as part of the PR using a github action i.e. [jest-lcov-reporter](https://github.com/marketplace/actions/jest-lcov-reporter).
or just printed as part of the PR using a github action i.e. [`jest-lcov-reporter`](https://github.com/marketplace/actions/jest-lcov-reporter).
For full examples on how to use [grcov /w polkadot specifics see the github repo](https://github.com/mozilla/grcov#coverallscodecov-output).
For full examples on how to use [`grcov` /w polkadot specifics see the github repo](https://github.com/mozilla/grcov#coverallscodecov-output).
## Fuzzing
@@ -146,13 +144,12 @@ Requirements:
* spawn nodes with preconfigured behaviors
* allow multiple types of configuration to be specified
* allow extensability via external crates
* allow extendability via external crates
* ...
---
## Implementation of different behavior strain nodes.
## Implementation of different behavior strain nodes
### Goals
@@ -166,21 +163,21 @@ well as shorting the block time and epoch times down to a few `100ms` and a few
#### MVP
A simple small scale builder pattern would suffice for stage one impl of allowing to
A simple small scale builder pattern would suffice for stage one implementation of allowing to
replace individual subsystems.
An alternative would be to harness the existing `AllSubsystems` type
and replace the subsystems as needed.
#### Full proc-macro impl
#### Full `proc-macro` implementation
`Overseer` is a common pattern.
It could be extracted as proc macro and generative proc-macro.
It could be extracted as `proc` macro and generative `proc-macro`.
This would replace the `AllSubsystems` type as well as implicitly create
the `AllMessages` enum as `AllSubsystemsGen` does today.
The implementation is yet to be completed, see the [implementation PR](https://github.com/paritytech/polkadot/pull/2962) for details.
##### Declare an overseer impl
##### Declare an overseer implementation
```rust
struct BehaveMaleficient;
@@ -237,8 +234,8 @@ fn main() -> eyre::Result<()> {
#### Simnet
Spawn a kubernetes cluster based on a meta description using [gurke] with the
[simnet] scripts.
Spawn a kubernetes cluster based on a meta description using [Gurke] with the
[Simnet] scripts.
Coordinated attacks of multiple nodes or subsystems must be made possible via
a side-channel, that is out of scope for this document.
@@ -246,11 +243,11 @@ a side-channel, that is out of scope for this document.
The individual node configurations are done as targets with a particular
builder configuration.
#### Behavior tests w/o simnet
#### Behavior tests w/o Simnet
Commonly this will require multiple nodes, and most machines are limited to
running two or three nodes concurrently.
Hence, this is not the common case and is just an impl _idea_.
Hence, this is not the common case and is just an implementation _idea_.
```rust
behavior_testcase!{
@@ -263,5 +260,5 @@ behavior_testcase!{
}
```
[gurke]: https://github.com/paritytech/gurke
[Gurke]: https://github.com/paritytech/gurke
[simnet]: https://github.com/paritytech/simnet_scripts
+2 -2
View File
@@ -20,7 +20,7 @@
//! The way we accomplish this is by erasure coding the data into n pieces
//! and constructing a merkle root of the data.
//!
//! Each of n validators stores their piece of data. We assume n=3f+k, 0 < k ≤ 3.
//! Each of n validators stores their piece of data. We assume `n = 3f + k`, `0 < k ≤ 3`.
//! f is the maximum number of faulty validators in the system.
//! The data is coded so any f+1 chunks can be used to reconstruct the full data.
@@ -58,7 +58,7 @@ pub enum Error {
/// Chunks not of uniform length or the chunks are empty.
#[error("Chunks are not unform, mismatch in length or are zero sized")]
NonUniformChunks,
/// An uneven byte-length of a shard is not valid for GF(2^16) encoding.
/// An uneven byte-length of a shard is not valid for `GF(2^16)` encoding.
#[error("Uneven length is not valid for field GF(2^16)")]
UnevenLength,
/// Chunk index out of bounds.
+1 -1
View File
@@ -1,4 +1,4 @@
// Copyright 2017-2020 Parity Technologies (UK) Ltd.
// Copyright 2017-2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
@@ -443,7 +443,7 @@ struct MetricsInner {
new_activations_per_availability_core: prometheus::Histogram,
}
/// CollationGenerationSubsystem metrics.
/// `CollationGenerationSubsystem` metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
@@ -297,8 +297,8 @@ fn filled_tranche_iterator<'a>(
pre.chain(approval_entries_filled).chain(post)
}
/// Computes the number of no_show validators in a set of assignments given the relevant approvals
/// and tick parameters. This method also returns the next tick at which a no_show will occur
/// Computes the number of `no_show` validators in a set of assignments given the relevant approvals
/// and tick parameters. This method also returns the next tick at which a `no_show` will occur
/// amongst the set of validators that have not submitted an approval.
///
/// If the returned `next_no_show` is not None, there are two possible cases for the value of
@@ -38,7 +38,7 @@ const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks";
#[cfg(test)]
pub mod tests;
/// DbBackend is a concrete implementation of the higher-level Backend trait
/// `DbBackend` is a concrete implementation of the higher-level Backend trait
pub struct DbBackend {
inner: Arc<dyn KeyValueDB>,
config: Config,
@@ -152,7 +152,7 @@ pub(crate) struct Config {
n_cores: u32,
/// The zeroth delay tranche width.
zeroth_delay_tranche_width: u32,
/// The number of samples we do of relay_vrf_modulo.
/// The number of samples we do of `relay_vrf_modulo`.
relay_vrf_modulo_samples: u32,
/// The number of delay tranches in total.
n_delay_tranches: u32,
@@ -121,7 +121,7 @@ enum Mode {
/// The approval voting subsystem.
pub struct ApprovalVotingSubsystem {
/// LocalKeystore is needed for assignment keys, but not necessarily approval keys.
/// `LocalKeystore` is needed for assignment keys, but not necessarily approval keys.
///
/// We do a lot of VRF signing and need the keys to have low latency.
keystore: Arc<LocalKeystore>,
@@ -145,7 +145,7 @@ struct MetricsInner {
time_recover_and_approve: prometheus::Histogram,
}
/// Aproval Voting metrics.
/// Approval Voting metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
@@ -24,7 +24,7 @@ use std::pin::Pin;
const TICK_DURATION_MILLIS: u64 = 500;
/// A base unit of time, starting from the unix epoch, split into half-second intervals.
/// A base unit of time, starting from the Unix epoch, split into half-second intervals.
pub(crate) type Tick = u64;
/// A clock which allows querying of the current tick as well as
+1 -1
View File
@@ -285,7 +285,7 @@ fn runtime_api_error_does_not_stop_the_subsystem() {
}
);
// runtime api call fails
// runtime API call fails
assert_matches!(
overseer_recv(&mut virtual_overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
+2 -2
View File
@@ -104,7 +104,7 @@ pub enum Error {
/// PoV data to validate.
enum PoVData {
/// Allready available (from candidate selection).
/// Already available (from candidate selection).
Ready(Arc<PoV>),
/// Needs to be fetched from validator (we are checking a signed statement).
FetchFromValidator {
@@ -856,7 +856,7 @@ impl CandidateBackingJob {
/// This also does bounds-checking on the validator index and will return an error if the
/// validator index is out of bounds for the current validator set. It's expected that
/// this should never happen due to the interface of the candidate backing subsystem -
/// the networking component repsonsible for feeding statements to the backing subsystem
/// the networking component responsible for feeding statements to the backing subsystem
/// is meant to check the signature and provenance of all statements before submission.
async fn dispatch_new_statement_to_dispute_coordinator(
&self,
@@ -312,5 +312,5 @@ impl JobTrait for BitfieldSigningJob {
}
}
/// BitfieldSigningSubsystem manages a number of bitfield signing jobs.
/// `BitfieldSigningSubsystem` manages a number of bitfield signing jobs.
pub type BitfieldSigningSubsystem<Spawner> = JobSubsystem<BitfieldSigningJob, Spawner>;
@@ -45,7 +45,7 @@ pub(super) trait Backend {
fn load_leaves(&self) -> Result<LeafEntrySet, Error>;
/// Load the stagnant list at the given timestamp.
fn load_stagnant_at(&self, timestamp: Timestamp) -> Result<Vec<Hash>, Error>;
/// Load all stagnant lists up to and including the given UNIX timestamp
/// Load all stagnant lists up to and including the given Unix timestamp
/// in ascending order.
fn load_stagnant_at_up_to(&self, up_to: Timestamp)
-> Result<Vec<(Timestamp, Vec<Hash>)>, Error>;
@@ -26,7 +26,7 @@
//! ```
//!
//! The big-endian encoding is used for creating iterators over the key-value DB which are
//! accessible by prefix, to find the earlist block number stored as well as the all stagnant
//! accessible by prefix, to find the earliest block number stored as well as the all stagnant
//! blocks.
//!
//! The `Vec`s stored are always non-empty. Empty `Vec`s are not stored on disk so there is no
@@ -534,7 +534,7 @@ async fn handle_active_leaf(
);
// If we don't know the weight, we can't import the block.
// And none of its descendents either.
// And none of its descendants either.
break;
}
Some(w) => w,
@@ -57,7 +57,7 @@ pub trait Backend {
where I: IntoIterator<Item = BackendWriteOp>;
}
/// An in-memory overllay for the backend.
/// An in-memory overlay for the backend.
///
/// This maintains read-only access to the underlying backend, but can be converted into a set of
/// write operations which will, when written to the underlying backend, give the same view as the
@@ -121,7 +121,7 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> {
self.inner.load_candidate_votes(session, candidate_hash)
}
/// Prepare a write to the 'earliest session' field of the DB.
/// Prepare a write to the "earliest session" field of the DB.
///
/// Later calls to this function will override earlier ones.
pub fn write_earliest_session(&mut self, session: SessionIndex) {
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! V1 database for the dispute coordinator.
//! `V1` database for the dispute coordinator.
use polkadot_primitives::v1::{
CandidateReceipt, ValidDisputeStatementKind, InvalidDisputeStatementKind, ValidatorIndex,
@@ -212,7 +212,7 @@ pub enum DisputeStatus {
/// since the given timestamp.
#[codec(index = 1)]
ConcludedFor(Timestamp),
/// The dispute has been concluded agains the candidate
/// The dispute has been concluded against the candidate
/// since the given timestamp.
///
/// This takes precedence over `ConcludedFor` in the case that
@@ -144,7 +144,7 @@ async fn fetch_validation_code(virtual_overseer: &mut VirtualOverseer) {
)) => {
tx.send(Ok(Some(validation_code))).unwrap();
},
"overseer did not receive runtime api request for validation code",
"overseer did not receive runtime API request for validation code",
);
}
@@ -243,7 +243,7 @@ fn cannot_participate_if_cannot_recover_validation_code() {
)) => {
tx.send(Ok(None)).unwrap();
},
"overseer did not receive runtime api request for validation code",
"overseer did not receive runtime API request for validation code",
);
virtual_overseer
+2 -2
View File
@@ -40,13 +40,13 @@ pub enum InvalidCandidate {
///
/// (b) The candidate triggered a code path that has lead to the process death. For example,
/// the PVF found a way to consume unbounded amount of resources and then it either exceeded
/// an rlimit (if set) or, again, invited OOM killer. Another possibility is a bug in
/// an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a bug in
/// wasmtime allowed the PVF to gain control over the execution worker.
///
/// We attribute such an event to an invalid candidate in either case.
///
/// The rationale for this is that a glitch may lead to unfair rejecting candidate by a single
/// validator. If the glitch is somewhat more persistant the validator will reject all candidate
/// validator. If the glitch is somewhat more persistent the validator will reject all candidate
/// thrown at it and hopefully the operator notices it by decreased reward performance of the
/// validator. On the other hand, if the worker died because of (b) we would have better chances
/// to stop the attack.
+1 -1
View File
@@ -185,7 +185,7 @@ impl Response {
}
}
/// The entrypoint that the spawned execute worker should start with. The socket_path specifies
/// The entrypoint that the spawned execute worker should start with. The `socket_path` specifies
/// the path to the socket used to communicate with the host.
pub fn worker_entrypoint(socket_path: &str) {
worker_event_loop("execute", socket_path, |mut stream| async move {
+1 -1
View File
@@ -54,7 +54,7 @@ const CONFIG: Config = Config {
},
};
/// Runs the prevaldation on the given code. Returns a [`RuntimeBlob`] if it succeeds.
/// Runs the prevalidation on the given code. Returns a [`RuntimeBlob`] if it succeeds.
pub fn prevalidate(code: &[u8]) -> Result<RuntimeBlob, sc_executor_common::error::WasmError> {
let blob = RuntimeBlob::new(code)?;
// It's assumed this function will take care of any prevalidation logic
+6 -6
View File
@@ -49,7 +49,7 @@ pub struct ValidationHost {
}
impl ValidationHost {
/// Execute PVF with the given code, params and priority. The result of execution will be sent
/// Execute PVF with the given code, parameters and priority. The result of execution will be sent
/// to the provided result sender.
///
/// This is async to accommodate the fact a possibility of back-pressure. In the vast majority of
@@ -106,7 +106,7 @@ pub struct Config {
pub cache_path: PathBuf,
/// The path to the program that can be used to spawn the prepare workers.
pub prepare_worker_program_path: PathBuf,
/// The time alloted for a prepare worker to spawn and report to the host.
/// The time allotted for a prepare worker to spawn and report to the host.
pub prepare_worker_spawn_timeout: Duration,
/// The maximum number of workers that can be spawned in the prepare pool for tasks with the
/// priority below critical.
@@ -115,7 +115,7 @@ pub struct Config {
pub prepare_workers_hard_max_num: usize,
/// The path to the program that can be used to spawn the execute workers.
pub execute_worker_program_path: PathBuf,
/// The time alloted for an execute worker to spawn and report to the host.
/// The time allotted for an execute worker to spawn and report to the host.
pub execute_worker_spawn_timeout: Duration,
/// The maximum number of execute workers that can run at the same time.
pub execute_workers_max_num: usize,
@@ -147,7 +147,7 @@ impl Config {
/// must be polled in order for validation host to function.
///
/// The future should not return normally but if it does then that indicates an unrecoverable error.
/// In that case all pending requests will be cancelled, dropping the result senders and new ones
/// In that case all pending requests will be canceled, dropping the result senders and new ones
/// will be rejected.
pub fn start(config: Config) -> (ValidationHost, impl Future<Output = ()>) {
let (to_host_tx, to_host_rx) = mpsc::channel(10);
@@ -220,7 +220,7 @@ struct PendingExecutionRequest {
}
/// A mapping from an artifact ID which is in preparation state to the list of pending execution
/// requests that should be executed once the artifact's prepration is finished.
/// requests that should be executed once the artifact's preparation is finished.
#[derive(Default)]
struct AwaitingPrepare(HashMap<ArtifactId, Vec<PendingExecutionRequest>>);
@@ -628,7 +628,7 @@ mod tests {
}
}
/// Creates a new pvf which artifact id can be uniquely identified by the given number.
/// Creates a new PVF which artifact id can be uniquely identified by the given number.
fn artifact_id(descriminator: u32) -> ArtifactId {
Pvf::from_discriminator(descriminator).as_artifact_id()
}
+4 -4
View File
@@ -23,14 +23,14 @@
//!
//! Then using the handle the client can send two types of requests:
//!
//! (a) PVF execution. This accepts the PVF [params][`polkadot_parachain::primitives::ValidationParams`]
//! (a) PVF execution. This accepts the PVF [`params`][`polkadot_parachain::primitives::ValidationParams`]
//! and the PVF [code][`Pvf`], prepares (verifies and compiles) the code, and then executes PVF
//! with the params.
//! with the `params`.
//!
//! (b) Heads up. This request allows to signal that the given PVF may be needed soon and that it
//! should be prepared for execution.
//!
//! The preparation results are cached for some time after they either used or was signalled in heads up.
//! The preparation results are cached for some time after they either used or was signaled in heads up.
//! All requests that depends on preparation of the same PVF are bundled together and will be executed
//! as soon as the artifact is prepared.
//!
@@ -70,7 +70,7 @@
//!
//! The execute workers will be fed by the requests from the execution queue, which is basically a
//! combination of a path to the compiled artifact and the
//! [params][`polkadot_parachain::primitives::ValidationParams`].
//! [`params`][`polkadot_parachain::primitives::ValidationParams`].
//!
//! Each fixed interval of time a pruning task will run. This task will remove all artifacts that
//! weren't used or received a heads up signal for a while.
+1 -1
View File
@@ -80,7 +80,7 @@ pub enum FromPool {
Spawned(Worker),
/// The given worker either succeeded or failed the given job. Under any circumstances the
/// artifact file has been written. The bool says whether the worker ripped.
/// artifact file has been written. The `bool` says whether the worker ripped.
Concluded(Worker, bool),
/// The given worker ceased to exist.
+1 -1
View File
@@ -530,7 +530,7 @@ mod tests {
use std::task::Poll;
use super::*;
/// Creates a new pvf which artifact id can be uniquely identified by the given number.
/// Creates a new PVF which artifact id can be uniquely identified by the given number.
fn pvf(descriminator: u32) -> Pvf {
Pvf::from_discriminator(descriminator)
}
+1 -1
View File
@@ -273,7 +273,7 @@ fn renice(pid: u32, niceness: i32) {
}
}
/// The entrypoint that the spawned prepare worker should start with. The socket_path specifies
/// The entrypoint that the spawned prepare worker should start with. The `socket_path` specifies
/// the path to the socket used to communicate with the host.
pub fn worker_entrypoint(socket_path: &str) {
worker_event_loop("prepare", socket_path, |mut stream| async move {
+1 -1
View File
@@ -42,7 +42,7 @@ impl Pvf {
Self { code, code_hash }
}
/// Creates a new pvf which artifact id can be uniquely identified by the given number.
/// Creates a new PVF which artifact id can be uniquely identified by the given number.
#[cfg(test)]
pub(crate) fn from_discriminator(num: u32) -> Self {
let descriminator_buf = num.to_le_bytes().to_vec();
+2 -2
View File
@@ -177,7 +177,7 @@ pub enum SpawnErr {
Accept,
/// An error happened during spawning the process.
ProcessSpawn,
/// The deadline alloted for the worker spawning and connecting to the socket has elapsed.
/// The deadline allotted for the worker spawning and connecting to the socket has elapsed.
AcceptTimeout,
}
@@ -187,7 +187,7 @@ pub enum SpawnErr {
/// has been terminated. Since the worker is running in another process it is obviously not necessarily
/// to poll this future to make the worker run, it's only for termination detection.
///
/// This future relies on the fact that a child process's stdout fd is closed upon it's termination.
/// This future relies on the fact that a child process's stdout `fd` is closed upon it's termination.
#[pin_project]
pub struct WorkerHandle {
child: async_process::Child,
+6 -6
View File
@@ -51,10 +51,10 @@ mod tests;
const LOG_TARGET: &str = "parachain::runtime-api";
/// The number of maximum runtime api requests can be executed in parallel. Further requests will be buffered.
/// The number of maximum runtime API requests can be executed in parallel. Further requests will be buffered.
const MAX_PARALLEL_REQUESTS: usize = 4;
/// The name of the blocking task that executes a runtime api request.
/// The name of the blocking task that executes a runtime API request.
const API_REQUEST_TASK_NAME: &str = "polkadot-runtime-api-request";
/// The `RuntimeApiSubsystem`. See module docs for more details.
@@ -67,7 +67,7 @@ pub struct RuntimeApiSubsystem<Client> {
Pin<Box<dyn Future<Output = ()> + Send>>,
oneshot::Receiver<Option<RequestResult>>,
)>,
/// All the active runtime api requests that are currently being executed.
/// All the active runtime API requests that are currently being executed.
active_requests: FuturesUnordered<oneshot::Receiver<Option<RequestResult>>>,
/// Requests results cache
requests_cache: RequestResultCache,
@@ -210,7 +210,7 @@ impl<Client> RuntimeApiSubsystem<Client> where
}
}
/// Spawn a runtime api request.
/// Spawn a runtime API request.
///
/// If there are already [`MAX_PARALLEL_REQUESTS`] requests being executed, the request will be buffered.
fn spawn_request(&mut self, relay_parent: Hash, request: Request) {
@@ -239,7 +239,7 @@ impl<Client> RuntimeApiSubsystem<Client> where
if self.waiting_requests.len() > MAX_PARALLEL_REQUESTS * 10 {
tracing::warn!(
target: LOG_TARGET,
"{} runtime api requests waiting to be executed.",
"{} runtime API requests waiting to be executed.",
self.waiting_requests.len(),
)
}
@@ -249,7 +249,7 @@ impl<Client> RuntimeApiSubsystem<Client> where
}
}
/// Poll the active runtime api requests.
/// Poll the active runtime API requests.
async fn poll_requests(&mut self) {
// If there are no active requests, this future should be pending forever.
if self.active_requests.len() == 0 {
+2 -2
View File
@@ -84,13 +84,13 @@ impl Jaeger {
Jaeger::Prep(cfg)
}
/// Spawn the background task in order to send the tracing information out via udp
/// Spawn the background task in order to send the tracing information out via UDP
#[cfg(target_os = "unknown")]
pub fn launch<S: SpawnNamed>(self, _spawner: S) -> result::Result<(), JaegerError> {
Ok(())
}
/// Spawn the background task in order to send the tracing information out via udp
/// Spawn the background task in order to send the tracing information out via UDP
#[cfg(not(target_os = "unknown"))]
pub fn launch<S: SpawnNamed>(self, spawner: S) -> result::Result<(), JaegerError> {
let cfg = match self {
+3 -3
View File
@@ -326,7 +326,7 @@ impl Span {
/// Add an additional int tag to the span without consuming.
///
/// Should be used sparingly, introduction of new types is prefered.
/// Should be used sparingly, introduction of new types is preferred.
#[inline(always)]
pub fn with_int_tag(mut self, tag: &'static str, i: i64) -> Self {
self.add_int_tag(tag, i);
@@ -354,11 +354,11 @@ impl Span {
}
}
/// Add a pov hash meta tag with lazy hash eval, without consuming the span.
/// Add a PoV hash meta tag with lazy hash evaluation, without consuming the span.
#[inline(always)]
pub fn add_pov(&mut self, pov: &PoV) {
if self.is_enabled() {
// avoid computing the pov hash if jaeger is not enabled
// avoid computing the PoV hash if jaeger is not enabled
self.add_string_fmt_debug_tag("pov", pov.hash());
}
}
+1 -1
View File
@@ -8,7 +8,7 @@ path = "src/variant-a.rs"
[package]
name = "polkadot-test-malus"
description = "Misbehaving nodes for local testnets, system and simnet tests."
description = "Misbehaving nodes for local testnets, system and Simnet tests."
license = "GPL-3.0-only"
version = "0.9.8"
authors = ["Parity Technologies <admin@parity.io>"]
+1 -1
View File
@@ -27,7 +27,7 @@ use std::pin::Pin;
/// Filter incoming and outgoing messages.
pub trait MsgFilter: Send + Sync + Clone + 'static {
/// The message type the original subsystm handles incoming.
/// The message type the original subsystem handles incoming.
type Message: Send + 'static;
/// Filter messages that are to be received by
+1 -1
View File
@@ -18,7 +18,7 @@
//!
//! An example on how to use the `OverseerGen` pattern to
//! instantiate a modified subsystem implementation
//! for usage with simnet/gurke.
//! for usage with `simnet`/Gurke.
#![allow(missing_docs)]
+1 -1
View File
@@ -49,7 +49,7 @@ pub mod metrics {
/// Try to register metrics in the Prometheus registry.
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError>;
/// Convenience method to register metrics in the optional Promethius registry.
/// Convenience method to register metrics in the optional Prometheus registry.
///
/// If no registry is provided, returns `Default::default()`. Otherwise, returns the same
/// thing that `try_register` does.
@@ -274,11 +274,11 @@ fn try_import_the_same_assignment() {
});
}
/// https://github.com/paritytech/polkadot/pull/2160#discussion_r547594835
/// <https://github.com/paritytech/polkadot/pull/2160#discussion_r547594835>
///
/// 1. Send a view update that removes block B from their view.
/// 2. Send a message from B that they incur COST_UNEXPECTED_MESSAGE for,
/// but then they receive BENEFIT_VALID_MESSAGE.
/// 2. Send a message from B that they incur `COST_UNEXPECTED_MESSAGE` for,
/// but then they receive `BENEFIT_VALID_MESSAGE`.
/// 3. Send all other messages related to B.
#[test]
fn spam_attack_results_in_negative_reputation_change() {
@@ -360,7 +360,7 @@ fn spam_attack_results_in_negative_reputation_change() {
/// Upon receiving them, they both will try to send the message each other.
/// This test makes sure they will not punish each other for such duplicate messages.
///
/// See https://github.com/paritytech/polkadot/issues/2499.
/// See <https://github.com/paritytech/polkadot/issues/2499>.
#[test]
fn peer_sending_us_the_same_we_just_sent_them_is_ok() {
let parent_hash = Hash::repeat_byte(0xFF);
@@ -72,7 +72,7 @@ enum FetchedState {
///
/// Once the contained `Sender` is dropped, any still running task will be canceled.
Started(oneshot::Sender<()>),
/// All relevant live_in have been removed, before we were able to get our chunk.
/// All relevant `live_in` have been removed, before we were able to get our chunk.
Canceled,
}
@@ -118,7 +118,7 @@ struct RunningTask {
/// Sender for communicating with other subsystems and reporting results.
sender: mpsc::Sender<FromFetchTask>,
/// Prometheues metrics for reporting results.
/// Prometheus metrics for reporting results.
metrics: Metrics,
/// Span tracking the fetching of this chunk.
@@ -199,7 +199,7 @@ fn task_stores_valid_chunk_if_there_is_one() {
struct TestRun {
/// Response to deliver for a given validator index.
/// None means, answer with NetworkError.
/// None means, answer with `NetworkError`.
chunk_responses: HashMap<Recipient, ChunkFetchingResponse>,
/// Set of chunks that should be considered valid:
valid_chunks: HashSet<Vec<u8>>,
@@ -238,7 +238,7 @@ impl TestRun {
});
}
/// Returns true, if after processing of the given message it would be ok for the stream to
/// Returns true, if after processing of the given message it would be OK for the stream to
/// end.
async fn handle_message(&self, msg: AllMessages) -> bool {
match msg {
@@ -153,8 +153,8 @@ impl Requester {
///
/// Starting requests where necessary.
///
/// Note: The passed in `leaf` is not the same as CandidateDescriptor::relay_parent in the
/// given cores. The latter is the relay_parent this candidate considers its parent, while the
/// Note: The passed in `leaf` is not the same as `CandidateDescriptor::relay_parent` in the
/// given cores. The latter is the `relay_parent` this candidate considers its parent, while the
/// passed in leaf might be some later block where the candidate is still pending availability.
async fn add_cores<Context>(
&mut self,
@@ -35,7 +35,7 @@ use crate::{
/// It should be ensured that a cached session stays live in the cache as long as we might need it.
pub struct SessionCache {
/// Look up cached sessions by SessionIndex.
/// Look up cached sessions by `SessionIndex`.
///
/// Note: Performance of fetching is really secondary here, but we need to ensure we are going
/// to get any existing cache entry, before fetching new information, as we should not mess up
@@ -85,7 +85,7 @@ where
/// Answer an incoming PoV fetch request by querying the av store.
///
/// Returns: Ok(true) if chunk was found and served.
/// Returns: `Ok(true)` if chunk was found and served.
pub async fn answer_pov_request<Context>(
ctx: &mut Context,
req: IncomingRequest<v1::PoVFetchingRequest>,
@@ -113,7 +113,7 @@ where
/// Answer an incoming chunk request by querying the av store.
///
/// Returns: Ok(true) if chunk was found and served.
/// Returns: `Ok(true)` if chunk was found and served.
pub async fn answer_chunk_request<Context>(
ctx: &mut Context,
req: IncomingRequest<v1::ChunkFetchingRequest>,
@@ -57,7 +57,7 @@ pub struct TestHarness {
pub pool: TaskExecutor,
}
/// TestState for mocking execution of this subsystem.
/// `TestState` for mocking execution of this subsystem.
///
/// The `Default` instance provides data, which makes the system succeed by providing a couple of
/// valid occupied cores. You can tune the data before calling `TestState::run`. E.g. modify some
+1 -1
View File
@@ -53,7 +53,7 @@ use polkadot_node_network_protocol::{
};
use polkadot_node_subsystem_util::metrics::{self, prometheus};
/// Peer set infos for network initialization.
/// Peer set info for network initialization.
///
/// To be added to [`NetworkConfiguration::extra_sets`].
pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority};
@@ -39,7 +39,7 @@ use polkadot_overseer::AllMessages;
///
/// The resulting stream will end once any of its input ends.
///
/// TODO: Get rid of this: https://github.com/paritytech/polkadot/issues/2842
// TODO: Get rid of this: <https://github.com/paritytech/polkadot/issues/2842>
pub struct RequestMultiplexer {
receivers: Vec<(Protocol, mpsc::Receiver<network::IncomingRequest>)>,
statement_fetching: Option<mpsc::Receiver<network::IncomingRequest>>,
+1 -1
View File
@@ -232,7 +232,7 @@ impl Network for Arc<NetworkService<Block, Hash>> {
}
}
/// We assume one peer_id per authority_id.
/// We assume one `peer_id` per `authority_id`.
pub async fn get_peer_id_by_authority_id<AD: AuthorityDiscovery>(
authority_discovery: &mut AD,
authority: AuthorityDiscoveryId,
@@ -56,7 +56,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
///
/// This method will also disconnect from previously connected validators not in the `validator_ids` set.
/// it takes `network_service` and `authority_discovery_service` by value
/// and returns them as a workaround for the Future: Send requirement imposed by async fn impl.
/// and returns them as a workaround for the Future: Send requirement imposed by async function implementation.
pub async fn on_request(
&mut self,
validator_ids: Vec<AuthorityDiscoveryId>,
@@ -58,7 +58,7 @@ const COST_APPARENT_FLOOD: Rep = Rep::CostMinor("Message received when previous
///
/// This is to protect from a single slow validator preventing collations from happening.
///
/// With a collation size of 5Meg and bandwidth of 500Mbit/s (requirement for Kusama validators),
/// With a collation size of 5MB and bandwidth of 500Mbit/s (requirement for Kusama validators),
/// the transfer should be possible within 0.1 seconds. 400 milliseconds should therefore be
/// plenty and should be low enough for later validators to still be able to finish on time.
///
@@ -863,7 +863,7 @@ fn collators_reject_declare_messages() {
///
/// After the first response is done, the passed in lambda will be called with the receiver for the
/// next response and a sender for giving feedback on the response of the first transmission. After
/// the lamda has passed it is assumed that the second response is sent, which is checked by this
/// the lambda has passed it is assumed that the second response is sent, which is checked by this
/// function.
///
/// The lambda can trigger occasions on which the second response should be sent, like timeouts,
@@ -71,7 +71,7 @@ const BENEFIT_NOTIFY_GOOD: Rep = Rep::BenefitMinor("A collator was noted good by
///
/// This is to protect from a single slow collator preventing collations from happening.
///
/// With a collation size of 5Meg and bandwidth of 500Mbit/s (requirement for Kusama validators),
/// With a collation size of 5MB and bandwidth of 500Mbit/s (requirement for Kusama validators),
/// the transfer should be possible within 0.1 seconds. 400 milliseconds should therefore be
/// plenty, even with multiple heads and should be low enough for later collators to still be able
/// to finish on time.
@@ -718,7 +718,7 @@ where
}
/// A peer's view has changed. A number of things should be done:
/// - Ongoing collation requests have to be cancelled.
/// - Ongoing collation requests have to be canceled.
/// - Advertisements by this peer that are no longer relevant have to be removed.
async fn handle_peer_view_change(
state: &mut State,
@@ -738,7 +738,7 @@ async fn handle_peer_view_change(
/// This function will
/// - Check for duplicate requests.
/// - Check if the requested collation is in our view.
/// - Update PerRequest records with the `result` field if necessary.
/// - Update `PerRequest` records with the `result` field if necessary.
/// And as such invocations of this function may rely on that.
async fn request_collation<Context>(
ctx: &mut Context,
@@ -62,15 +62,15 @@ pub enum Fatal {
#[error("Spawning subsystem task failed")]
SpawnTask(#[source] SubsystemError),
/// DisputeSender mpsc receiver exhausted.
/// `DisputeSender` mpsc receiver exhausted.
#[error("Erasure chunk requester stream exhausted")]
SenderExhausted,
/// Errors coming from runtime::Runtime.
/// Errors coming from `runtime::Runtime`.
#[error("Error while accessing runtime information")]
Runtime(#[from] runtime::Fatal),
/// Errors coming from DisputeSender
/// Errors coming from `DisputeSender`
#[error("Error while accessing runtime information")]
Sender(#[from] sender::Fatal),
}
@@ -78,7 +78,7 @@ pub enum Fatal {
/// Non-fatal errors of this subsystem.
#[derive(Debug, Error)]
pub enum NonFatal {
/// Errors coming from DisputeSender
/// Errors coming from `DisputeSender`
#[error("Error while accessing runtime information")]
Sender(#[from] sender::NonFatal),
}
@@ -103,7 +103,7 @@ enum MuxedMessage {
///
/// - We need to make sure responses are actually sent (therefore we need to await futures
/// promptly).
/// - We need to update banned_peers accordingly to the result.
/// - We need to update `banned_peers` accordingly to the result.
ConfirmedImport(NonFatalResult<(PeerId, ImportStatementsResult)>),
/// A new request has arrived and should be handled.
@@ -56,7 +56,7 @@ pub const ALICE_INDEX: ValidatorIndex = ValidatorIndex(1);
lazy_static! {
/// Mocked AuthorityDiscovery service.
/// Mocked `AuthorityDiscovery` service.
pub static ref MOCK_AUTHORITY_DISCOVERY: MockAuthorityDiscovery = MockAuthorityDiscovery::new();
// Creating an innocent looking `SessionInfo` is really expensive in a debug build. Around
// 700ms on my machine, We therefore cache those keys here:
@@ -80,7 +80,7 @@ pub static ref MOCK_SESSION_INFO: SessionInfo =
..Default::default()
};
/// SessionInfo for the second session. (No more validators, but two more authorities.
/// `SessionInfo` for the second session. (No more validators, but two more authorities.
pub static ref MOCK_NEXT_SESSION_INFO: SessionInfo =
SessionInfo {
discovery_keys:
@@ -199,12 +199,12 @@ where
failed_rx
}
/// We partition the list of all sorted `authorities` into sqrt(len) groups of sqrt(len) size
/// We partition the list of all sorted `authorities` into `sqrt(len)` groups of `sqrt(len)` size
/// and form a matrix where each validator is connected to all validators in its row and column.
/// This is similar to [web3] research proposed topology, except for the groups are not parachain
/// This is similar to `[web3]` research proposed topology, except for the groups are not parachain
/// groups (because not all validators are parachain validators and the group size is small),
/// but formed randomly via BABE randomness from two epochs ago.
/// This limits the amount of gossip peers to 2 * sqrt(len) and ensures the diameter of 2.
/// This limits the amount of gossip peers to 2 * `sqrt(len)` and ensures the diameter of 2.
///
/// [web3]: https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology
async fn update_gossip_topology<Context>(
+2 -2
View File
@@ -284,7 +284,7 @@ impl View {
/// Check if two views have the same heads.
///
/// Equivalent to the `PartialEq` fn,
/// Equivalent to the `PartialEq` function,
/// but ignores the `finalized_number` field.
pub fn check_heads_eq(&self, other: &Self) -> bool {
self.heads == other.heads
@@ -325,7 +325,7 @@ pub mod v1 {
/// Seconded statement with large payload (e.g. containing a runtime upgrade).
///
/// We only gossip the hash in that case, actual payloads can be fetched from sending node
/// via req/response.
/// via request/response.
#[codec(index = 1)]
LargeStatement(StatementMetadata),
}
@@ -16,18 +16,18 @@
//! Overview over request/responses as used in `Polkadot`.
//!
//! enum Protocol .... List of all supported protocols.
//! `enum Protocol` .... List of all supported protocols.
//!
//! enum Requests .... List of all supported requests, each entry matches one in protocols, but
//! `enum Requests` .... List of all supported requests, each entry matches one in protocols, but
//! has the actual request as payload.
//!
//! struct IncomingRequest .... wrapper for incoming requests, containing a sender for sending
//! `struct IncomingRequest` .... wrapper for incoming requests, containing a sender for sending
//! responses.
//!
//! struct OutgoingRequest .... wrapper for outgoing requests, containing a sender used by the
//! `struct OutgoingRequest` .... wrapper for outgoing requests, containing a sender used by the
//! networking code for delivering responses/delivery errors.
//!
//! trait `IsRequest` .... A trait describing a particular request. It is used for gathering meta
//! `trait IsRequest` .... A trait describing a particular request. It is used for gathering meta
//! data, like what is the corresponding response type.
//!
//! Versioned (v1 module): The actual requests and responses as sent over the network.
@@ -72,7 +72,7 @@ pub enum Protocol {
/// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately
/// 50Meg bytes per second:
/// 50MB per second:
const MIN_BANDWIDTH_BYTES: u64 = 50 * 1024 * 1024;
/// Default request timeout in seconds.
@@ -79,7 +79,7 @@ impl Requests {
///
/// Note: `Requests` is just an enum collecting all supported requests supported by network
/// bridge, it is never sent over the wire. This function just encodes the individual requests
/// contained in the enum.
/// contained in the `enum`.
pub fn encode_request(self) -> (Protocol, OutgoingRequest<Vec<u8>>) {
match self {
Self::ChunkFetching(r) => r.encode_request(),
@@ -219,7 +219,7 @@ impl From<oneshot::Canceled> for RequestError {
/// `IncomingRequest`s are produced by `RequestMultiplexer` on behalf of the network bridge.
#[derive(Debug)]
pub struct IncomingRequest<Req> {
/// PeerId of sending peer.
/// `PeerId` of sending peer.
pub peer: PeerId,
/// The sent request.
pub payload: Req,
@@ -227,7 +227,7 @@ pub struct IncomingRequest<Req> {
pub pending_response: OutgoingResponseSender<Req>,
}
/// Sender for sendinb back responses on an `IncomingRequest`.
/// Sender for sending back responses on an `IncomingRequest`.
#[derive(Debug)]
pub struct OutgoingResponseSender<Req>{
pending_response: oneshot::Sender<netconfig::OutgoingResponse>,
@@ -241,9 +241,9 @@ where
{
/// Send the response back.
///
/// On success we return Ok(()), on error we return the not sent `Response`.
/// On success we return `Ok(())`, on error we return the not sent `Response`.
///
/// netconfig::OutgoingResponse exposes a way of modifying the peer's reputation. If needed we
/// `netconfig::OutgoingResponse` exposes a way of modifying the peer's reputation. If needed we
/// can change this function to expose this feature as well.
pub fn send_response(self, resp: Req::Response) -> Result<(), Req::Response> {
self.pending_response
@@ -375,7 +375,7 @@ where
}
}
/// Future for actually receiving a typed response for an OutgoingRequest.
/// Future for actually receiving a typed response for an `OutgoingRequest`.
async fn receive_response<Req>(
rec: oneshot::Receiver<Result<Vec<u8>, network::RequestFailure>>,
) -> OutgoingResult<Req::Response>
@@ -172,7 +172,7 @@ impl IsRequest for AvailableDataFetchingRequest {
pub struct StatementFetchingRequest {
/// Data needed to locate and identify the needed statement.
pub relay_parent: Hash,
/// Hash of candidate that was used create the CommitedCandidateRecept.
/// Hash of candidate that was used create the `CommitedCandidateRecept`.
pub candidate_hash: CandidateHash,
}
@@ -17,7 +17,7 @@
//! The Statement Distribution Subsystem.
//!
//! This is responsible for distributing signed statements about candidate
//! validity amongst validators.
//! validity among validators.
#![deny(unused_crate_dependencies)]
#![warn(missing_docs)]
@@ -208,7 +208,7 @@ struct PeerRelayParentKnowledge {
/// How many large statements this peer already sent us.
///
/// Flood protection for large statements is rather hard and as soon as we get
/// https://github.com/paritytech/polkadot/issues/2979 implemented also no longer necessary.
/// `https://github.com/paritytech/polkadot/issues/2979` implemented also no longer necessary.
/// Reason: We keep messages around until we fetched the payload, but if a node makes up
/// statements and never provides the data, we will keep it around for the slot duration. Not
/// even signature checking would help, as the sender, if a validator, can just sign arbitrary
@@ -290,7 +290,7 @@ impl PeerRelayParentKnowledge {
/// Provide the maximum message count that we can receive per candidate. In practice we should
/// not receive more statements for any one candidate than there are members in the group assigned
/// to that para, but this maximum needs to be lenient to account for equivocations that may be
/// cross-group. As such, a maximum of 2 * n_validators is recommended.
/// cross-group. As such, a maximum of 2 * `n_validators` is recommended.
///
/// This returns an error if the peer should not have sent us this message according to protocol
/// rules for flood protection.
@@ -459,7 +459,7 @@ impl PeerData {
/// Provide the maximum message count that we can receive per candidate. In practice we should
/// not receive more statements for any one candidate than there are members in the group assigned
/// to that para, but this maximum needs to be lenient to account for equivocations that may be
/// cross-group. As such, a maximum of 2 * n_validators is recommended.
/// cross-group. As such, a maximum of 2 * `n_validators` is recommended.
///
/// This returns an error if the peer should not have sent us this message according to protocol
/// rules for flood protection.
@@ -45,7 +45,7 @@ pub enum RequesterMessage {
candidate_hash: CandidateHash,
tx: oneshot::Sender<Vec<PeerId>>
},
/// Fetching finished, ask for verification. If verification failes, task will continue asking
/// Fetching finished, ask for verification. If verification fails, task will continue asking
/// peers for data.
Finished {
/// Relay parent this candidate is in the context of.
@@ -42,7 +42,7 @@ pub(crate) fn impl_misc(info: &OverseerInfo) -> proc_macro2::TokenStream {
signals_received: SignalsReceived,
}
/// impl for wrapping message type...
/// implementation for wrapping message type...
#[#support_crate ::async_trait]
impl SubsystemSender< #wrapper_message > for #subsystem_sender_name {
async fn send_message(&mut self, msg: #wrapper_message) {
@@ -98,7 +98,7 @@ pub(crate) fn impl_overseer_struct(info: &OverseerInfo) -> proc_macro2::TokenStr
}
impl #generics #overseer_name #generics #where_clause {
/// Send the given signal, a terminatin signal, to all subsystems
/// Send the given signal, a termination signal, to all subsystems
/// and wait for all subsystems to go down.
///
/// The definition of a termination signal is up to the user and
@@ -86,14 +86,14 @@ pub(crate) struct SubSysField {
/// Type to be consumed by the subsystem.
pub(crate) consumes: Path,
/// If `no_dispatch` is present, if the message is incoming via
/// an extern `Event`, it will not be dispatched to all subsystems.
/// an `extern` `Event`, it will not be dispatched to all subsystems.
pub(crate) no_dispatch: bool,
/// If the subsystem implementation is blocking execution and hence
/// has to be spawned on a separate thread or thread pool.
pub(crate) blocking: bool,
/// The subsystem is a work in progress.
/// Avoids dispatching `Wrapper` type messages, but generates the variants.
/// Does not require the subsystem to be instanciated with the builder pattern.
/// Does not require the subsystem to be instantiated with the builder pattern.
pub(crate) wip: bool,
}
@@ -133,7 +133,7 @@ pub(crate) struct SubSystemTags {
pub(crate) attrs: Vec<Attribute>,
#[allow(dead_code)]
pub(crate) no_dispatch: bool,
/// The subsystem is WIP, only generate the `Wrapper` variant, but do not forward messages
/// The subsystem is in progress, only generate the `Wrapper` variant, but do not forward messages
/// and also not include the subsystem in the list of subsystems.
pub(crate) wip: bool,
pub(crate) blocking: bool,
@@ -225,7 +225,7 @@ pub trait AnnotateErrorOrigin: 'static + Send + Sync + std::error::Error {
/// An asynchronous subsystem task..
///
/// In essence it's just a newtype wrapping a `BoxFuture`.
/// In essence it's just a new type wrapping a `BoxFuture`.
pub struct SpawnedSubsystem<E>
where
E: std::error::Error
@@ -366,12 +366,12 @@ impl<Signal, Message> From<Signal> for FromOverseer<Message, Signal> {
#[async_trait::async_trait]
pub trait SubsystemContext: Send + 'static {
/// The message type of this context. Subsystems launched with this context will expect
/// to receive messages of this type. Commonly uses the wrapping enum commonly called
/// to receive messages of this type. Commonly uses the wrapping `enum` commonly called
/// `AllMessages`.
type Message: std::fmt::Debug + Send + 'static;
/// And the same for signals.
type Signal: std::fmt::Debug + Send + 'static;
/// The overarching all messages enum.
/// The overarching all messages `enum`.
/// In some cases can be identical to `Self::Message`.
type AllMessages: From<Self::Message> + Send + 'static;
/// The sender type as provided by `sender()` and underlying.
+1 -1
View File
@@ -34,7 +34,7 @@ struct MetricsInner {
}
/// A sharable metrics type for usage with the overseer.
/// A shareable metrics type for usage with the overseer.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
+2 -2
View File
@@ -17,7 +17,7 @@
//! Legacy way of defining subsystems.
//!
//! In the future, everything should be set up using the generated
//! overeseer builder pattern instead.
//! overseer builder pattern instead.
use polkadot_node_subsystem_types::errors::SubsystemError;
use polkadot_overseer_gen::{
@@ -170,7 +170,7 @@ impl<CV, CB, SD, AD, AR, BS, BD, P, RA, AS, NB, CA, CG, CP, ApD, ApV, GS>
}
}
/// Reference every indidviudal subsystem.
/// Reference every individual subsystem.
pub fn as_ref(&self) -> AllSubsystems<&'_ CV, &'_ CB, &'_ SD, &'_ AD, &'_ AR, &'_ BS, &'_ BD, &'_ P, &'_ RA, &'_ AS, &'_ NB, &'_ CA, &'_ CG, &'_ CP, &'_ ApD, &'_ ApV, &'_ GS> {
AllSubsystems {
candidate_validation: &self.candidate_validation,
+1 -1
View File
@@ -86,7 +86,7 @@ pub struct AssignmentCert {
pub vrf: (VRFOutput, VRFProof),
}
/// An assignment crt which refers to the candidate under which the assignment is
/// An assignment criterion which refers to the candidate under which the assignment is
/// relevant by block hash.
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
pub struct IndirectAssignmentCert {
@@ -27,7 +27,7 @@ use polkadot_primitives::v1::{CandidateReceipt, DisputeStatement, SessionIndex,
use super::{InvalidDisputeVote, SignedDisputeStatement, ValidDisputeVote};
/// A dispute initiating/participtating message that is guaranteed to have been built from signed
/// A dispute initiating/participating message that is guaranteed to have been built from signed
/// statements.
///
/// And most likely has been constructed correctly. This is used with
@@ -102,7 +102,7 @@ impl DisputeMessage {
/// - the invalid statement is indeed an invalid one
/// - the valid statement is indeed a valid one
/// - The passed `CandidateReceipt` has the correct hash (as signed in the statements).
/// - the given validator indeces match with the given `ValidatorId`s in the statements,
/// - the given validator indices match with the given `ValidatorId`s in the statements,
/// given a `SessionInfo`.
///
/// We don't check whether the given `SessionInfo` matches the `SessionIndex` in the
@@ -210,7 +210,7 @@ impl DisputeMessage {
}
impl UncheckedDisputeMessage {
/// Try to recover the two signed dispute votes from an UncheckedDisputeMessage.
/// Try to recover the two signed dispute votes from an `UncheckedDisputeMessage`.
pub fn try_into_signed_votes(self, session_info: &SessionInfo)
-> Result<(CandidateReceipt, (SignedDisputeStatement, ValidatorIndex), (SignedDisputeStatement, ValidatorIndex)), ()>
{
+4 -4
View File
@@ -1166,7 +1166,7 @@ fn testnet_accounts() -> Vec<AccountId> {
]
}
/// Helper function to create polkadot GenesisConfig for testing
/// Helper function to create polkadot `GenesisConfig` for testing
pub fn polkadot_testnet_genesis(
wasm_binary: &[u8],
initial_authorities: Vec<(
@@ -1264,7 +1264,7 @@ pub fn polkadot_testnet_genesis(
}
}
/// Helper function to create kusama GenesisConfig for testing
/// Helper function to create kusama `GenesisConfig` for testing
#[cfg(feature = "kusama-native")]
pub fn kusama_testnet_genesis(
wasm_binary: &[u8],
@@ -1368,7 +1368,7 @@ pub fn kusama_testnet_genesis(
}
}
/// Helper function to create westend GenesisConfig for testing
/// Helper function to create westend `GenesisConfig` for testing
#[cfg(feature = "westend-native")]
pub fn westend_testnet_genesis(
wasm_binary: &[u8],
@@ -1456,7 +1456,7 @@ pub fn westend_testnet_genesis(
}
}
/// Helper function to create rococo GenesisConfig for testing
/// Helper function to create rococo `GenesisConfig` for testing
#[cfg(feature = "rococo-native")]
pub fn rococo_testnet_genesis(
wasm_binary: &[u8],
+2 -2
View File
@@ -227,7 +227,7 @@ where
target_hash = *target_header.parent_hash();
target_header = backend
.header(BlockId::Hash(target_hash))?
.expect("Header known to exist due to the existence of one of its descendents; qed");
.expect("Header known to exist due to the existence of one of its descendants; qed");
}
}
@@ -281,7 +281,7 @@ where
}
/// GRANDPA hard forks due to borked migration of session keys after a runtime
/// upgrade (at #1491596), the signalled authority set changes were invalid
/// upgrade (at #1491596), the signaled authority set changes were invalid
/// (blank keys) and were impossible to finalize. The authorities for these
/// intermediary pending changes are replaced with a static list comprised of
/// w3f validators and randomly selected validators from the latest session (at
@@ -11,7 +11,7 @@
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//! A RocksDB instance for storing parachain data; availability data, and approvals.
//! A `RocksDB` instance for storing parachain data; availability data, and approvals.
#[cfg(feature = "full-node")]
use {
@@ -245,7 +245,7 @@ impl<B> SelectChain<PolkadotBlock> for SelectRelayChain<B>
self.block_header(best_leaf)
}
/// Get the best descendent of `target_hash` that we should attempt to
/// Get the best descendant of `target_hash` that we should attempt to
/// finalize next, if any. It is valid to return the `target_hash` if
/// no better block exists.
///
+5 -5
View File
@@ -52,7 +52,7 @@ pub enum LeafStatus {
}
impl LeafStatus {
/// Returns a bool indicating fresh status.
/// Returns a `bool` indicating fresh status.
pub fn is_fresh(&self) -> bool {
match *self {
LeafStatus::Fresh => true,
@@ -60,7 +60,7 @@ impl LeafStatus {
}
}
/// Returns a bool indicating stale status.
/// Returns a `bool` indicating stale status.
pub fn is_stale(&self) -> bool {
match *self {
LeafStatus::Fresh => false,
@@ -97,12 +97,12 @@ pub struct ActiveLeavesUpdate {
}
impl ActiveLeavesUpdate {
/// Create a ActiveLeavesUpdate with a single activated hash
/// Create a `ActiveLeavesUpdate` with a single activated hash
pub fn start_work(activated: ActivatedLeaf) -> Self {
Self { activated: [activated][..].into(), ..Default::default() }
}
/// Create a ActiveLeavesUpdate with a single deactivated hash
/// Create a `ActiveLeavesUpdate` with a single deactivated hash
pub fn stop_work(hash: Hash) -> Self {
Self { deactivated: [hash][..].into(), ..Default::default() }
}
@@ -114,7 +114,7 @@ impl ActiveLeavesUpdate {
}
impl PartialEq for ActiveLeavesUpdate {
/// Equality for `ActiveLeavesUpdate` doesnt imply bitwise equality.
/// Equality for `ActiveLeavesUpdate` doesn't imply bitwise equality.
///
/// Instead, it means equality when `activated` and `deactivated` are considered as sets.
fn eq(&self, other: &Self) -> bool {
@@ -217,7 +217,7 @@ pub enum DisputeCoordinatorMessage {
/// `InvalidImport`)
/// - or were known already (in that case the result will still be `ValidImport`)
/// - or we recorded them because (`ValidImport`)
/// - we casted our own vote already on that dispute
/// - we cast our own vote already on that dispute
/// - or we have approval votes on that candidate
/// - or other explicit votes on that candidate already recorded
/// - or recovered availability for the candidate
@@ -490,7 +490,7 @@ pub enum AvailabilityStoreMessage {
}
impl AvailabilityStoreMessage {
/// In fact, none of the AvailabilityStore messages assume a particular relay parent.
/// In fact, none of the `AvailabilityStore` messages assume a particular relay parent.
pub fn relay_parent(&self) -> Option<Hash> {
match self {
_ => None,
@@ -697,8 +697,8 @@ pub enum ProvisionerMessage {
/// This message allows external subsystems to request the set of bitfields and backed candidates
/// associated with a particular potential block hash.
///
/// This is expected to be used by a proposer, to inject that information into the InherentData
/// where it can be assembled into the ParaInherent.
/// This is expected to be used by a proposer, to inject that information into the `InherentData`
/// where it can be assembled into the `ParaInherent`.
RequestInherentData(Hash, oneshot::Sender<ProvisionerInherentData>),
/// This data should become part of a relay chain block
ProvisionableData(Hash, ProvisionableData),
@@ -59,8 +59,8 @@ impl<M> NetworkBridgeEvent<M> {
/// for example into a `BitfieldDistributionMessage` in case of the `BitfieldDistribution`
/// constructor.
///
/// Therefore a NetworkBridgeEvent<ValidationProtocol> will become for example a
/// NetworkBridgeEvent<BitfieldDistributionMessage>, with the more specific message type
/// Therefore a `NetworkBridgeEvent<ValidationProtocol>` will become for example a
/// `NetworkBridgeEvent<BitfieldDistributionMessage>`, with the more specific message type
/// `BitfieldDistributionMessage`.
///
/// This acts as a call to `clone`, except in the case where the event is a message event,
+3 -3
View File
@@ -126,7 +126,7 @@ pub enum Error {
/// The type system wants this even though it doesn't make sense
#[error(transparent)]
Infallible(#[from] std::convert::Infallible),
/// Attempted to convert from an AllMessages to a FromJob, and failed.
/// Attempted to convert from an `AllMessages` to a `FromJob`, and failed.
#[error("AllMessage not relevant to Job")]
SenderConversion(String),
/// The local node is not a validator.
@@ -276,7 +276,7 @@ pub fn choose_random_subset<T, F: FnMut(&T) -> bool>(is_priority: F, mut v: Vec<
v
}
/// Returns a bool with a probability of `a / b` of being true.
/// Returns a `bool` with a probability of `a / b` of being true.
pub fn gen_ratio(a: usize, b: usize) -> bool {
use rand::Rng as _;
let mut rng = rand::thread_rng();
@@ -372,7 +372,7 @@ impl Drop for AbortOnDrop {
}
}
/// A JobHandle manages a particular job for a subsystem.
/// A `JobHandle` manages a particular job for a subsystem.
struct JobHandle<ToJob> {
_abort_handle: AbortOnDrop,
to_job: mpsc::Sender<ToJob>,
@@ -62,14 +62,14 @@ pub struct RuntimeInfo {
/// overseer seems sensible.
session_index_cache: LruCache<Hash, SessionIndex>,
/// Look up cached sessions by SessionIndex.
/// Look up cached sessions by `SessionIndex`.
session_info_cache: LruCache<SessionIndex, ExtendedSessionInfo>,
/// Key store for determining whether we are a validator and what `ValidatorIndex` we have.
keystore: Option<SyncCryptoStorePtr>,
}
/// SessionInfo with additional useful data for validator nodes.
/// `SessionInfo` with additional useful data for validator nodes.
pub struct ExtendedSessionInfo {
/// Actual session info as fetched from the runtime.
pub session_info: SessionInfo,
@@ -303,7 +303,7 @@ where
)
}
/// Get group rotation info based on the given relay_parent.
/// Get group rotation info based on the given `relay_parent`.
pub async fn get_group_rotation_info<Context>(ctx: &mut Context, relay_parent: Hash)
-> Result<GroupRotationInfo>
where
@@ -24,7 +24,7 @@ use sc_block_builder::{BlockBuilderProvider, BlockBuilder};
use sp_state_machine::BasicExternalities;
use parity_scale_codec::{Encode, Decode};
/// An extension for the test client to init a Polkadot specific block builder.
/// An extension for the test client to initialize a Polkadot specific block builder.
pub trait InitPolkadotBlockBuilder {
/// Init a Polkadot specific block builder that works for the test runtime.
///
+1 -1
View File
@@ -38,7 +38,7 @@ pub type Executor = client::LocalCallExecutor<Block, FullBackend, sc_executor::N
/// Test client builder for Polkadot.
pub type TestClientBuilder = substrate_test_client::TestClientBuilder<Block, Executor, FullBackend, GenesisParameters>;
/// LongestChain type for the test runtime/client.
/// `LongestChain` type for the test runtime/client.
pub type LongestChain = sc_consensus::LongestChain<FullBackend, Block>;
/// Parameters of test-client builder with test-runtime.
@@ -47,7 +47,7 @@ sc_executor::native_executor_instance!(
(benchmarking::benchmarking::HostFunctions, SignatureVerificationOverride),
);
/// ChainInfo implementation.
/// `ChainInfo` implementation.
pub struct PolkadotChainInfo;
impl ChainInfo for PolkadotChainInfo {
@@ -14,8 +14,8 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Binary used for simnet nodes, supports all runtimes, although only polkadot is implemented currently.
//! This binary accepts all the cli args the polkadot binary does, Only difference is it uses
//! Binary used for Simnet nodes, supports all runtimes, although only polkadot is implemented currently.
//! This binary accepts all the CLI args the polkadot binary does, Only difference is it uses
//! manual-seal™ and babe for block authorship, it has a no-op verifier, so all blocks received over the network
//! are imported and executed straight away. Block authorship/Finalization maybe done by calling the
//! `engine_createBlock` & `engine_FinalizeBlock` rpc methods respectively.
@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Attempts to upgrade the polkadot runtime, in a simnet environment
//! Attempts to upgrade the polkadot runtime, in a Simnet environment
use std::{error::Error, str::FromStr};
use polkadot_simnet::{run, dispatch_with_root};
+1 -1
View File
@@ -92,7 +92,7 @@ fn testnet_accounts() -> Vec<AccountId> {
]
}
/// Helper function to create polkadot GenesisConfig for testing
/// Helper function to create polkadot `GenesisConfig` for testing
fn polkadot_testnet_genesis(
initial_authorities: Vec<(
AccountId,
+2 -2
View File
@@ -285,7 +285,7 @@ pub fn run_collator_node(
/// A Polkadot test node instance used for testing.
pub struct PolkadotTestNode {
/// TaskManager's instance.
/// `TaskManager`'s instance.
pub task_manager: TaskManager,
/// Client's instance.
pub client: Arc<Client>,
@@ -293,7 +293,7 @@ pub struct PolkadotTestNode {
pub overseer_handler: Handle,
/// The `MultiaddrWithPeerId` to this node. This is useful if you want to pass it as "boot node" to other nodes.
pub addr: MultiaddrWithPeerId,
/// RPCHandlers to make RPC queries.
/// `RPCHandlers` to make RPC queries.
pub rpc_handlers: RpcHandlers,
}
+5 -5
View File
@@ -427,7 +427,7 @@ pub struct AbridgedCandidateReceipt<H = Hash> {
pub collator: CollatorId,
/// Signature on blake2-256 of the block data by collator.
pub signature: CollatorSignature,
/// The hash of the pov-block.
/// The hash of the `pov-block`.
pub pov_block_hash: H,
/// Commitments made as a result of validation.
pub commitments: CandidateCommitments<H>,
@@ -561,9 +561,9 @@ pub struct CandidateDescriptor<H = Hash> {
/// The collator's relay-chain account ID
pub collator: CollatorId,
/// Signature on blake2-256 of components of this receipt:
/// The para ID, the relay parent, and the pov_hash.
/// The para ID, the relay parent, and the `pov_hash`.
pub signature: CollatorSignature,
/// The hash of the pov-block.
/// The hash of the `pov-block`.
pub pov_hash: H,
}
@@ -582,12 +582,12 @@ pub struct CollationInfo {
pub signature: CollatorSignature,
/// The head-data
pub head_data: HeadData,
/// blake2-256 Hash of the pov-block
/// blake2-256 Hash of the `pov-block`
pub pov_block_hash: Hash,
}
impl CollationInfo {
/// Check integrity vs. a pov-block.
/// Check integrity vs. a `pov-block`.
pub fn check_signature(&self) -> Result<(), ()> {
check_collator_signature(
&self.relay_parent,

Some files were not shown because too many files have changed in this diff Show More