Use same fmt and clippy configs as in Substrate (#7611)

* Use same rustfmt.toml as Substrate

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* format format file

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Format with new config

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Add Substrate Clippy config

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Print Clippy version in CI

Otherwise its difficult to reproduce locally.

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Make fmt happy

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>

* Update node/core/pvf/src/error.rs

Co-authored-by: Tsvetomir Dimitrov <tsvetomir@parity.io>

* Update node/core/pvf/src/error.rs

Co-authored-by: Tsvetomir Dimitrov <tsvetomir@parity.io>

---------

Signed-off-by: Oliver Tale-Yazdi <oliver.tale-yazdi@parity.io>
Co-authored-by: Tsvetomir Dimitrov <tsvetomir@parity.io>
This commit is contained in:
Oliver Tale-Yazdi
2023-08-14 16:29:29 +02:00
committed by GitHub
parent ac435c96cf
commit 342d720573
203 changed files with 1880 additions and 1504 deletions
@@ -225,8 +225,8 @@ struct State {
/// Our validator groups per active leaf.
our_validators_groups: HashMap<Hash, ValidatorGroup>,
/// The mapping from [`PeerId`] to [`HashSet<AuthorityDiscoveryId>`]. This is filled over time as we learn the [`PeerId`]'s
/// by `PeerConnected` events.
/// The mapping from [`PeerId`] to [`HashSet<AuthorityDiscoveryId>`]. This is filled over time
/// as we learn the [`PeerId`]'s by `PeerConnected` events.
peer_ids: HashMap<PeerId, HashSet<AuthorityDiscoveryId>>,
/// Tracks which validators we want to stay connected to.
@@ -241,8 +241,8 @@ struct State {
/// All collation fetching requests that are still waiting to be answered.
///
/// They are stored per relay parent, when our view changes and the relay parent moves out, we will cancel the fetch
/// request.
/// They are stored per relay parent, when our view changes and the relay parent moves out, we
/// will cancel the fetch request.
waiting_collation_fetches: HashMap<Hash, WaitingCollationFetches>,
/// Active collation fetches.
@@ -526,8 +526,8 @@ async fn connect_to_validators<Context>(
/// Advertise collation to the given `peer`.
///
/// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is
/// set as validator for our para at the given `relay_parent`.
/// This will only advertise a collation if there exists one for the given `relay_parent` and the
/// given `peer` is set as validator for our para at the given `relay_parent`.
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
async fn advertise_collation<Context>(
ctx: &mut Context,
@@ -638,7 +638,8 @@ async fn process_msg<Context>(
);
},
NetworkBridgeUpdate(event) => {
// We should count only this shoulder in the histogram, as other shoulders are just introducing noise
// We should count only this shoulder in the histogram, as other shoulders are just
// introducing noise
let _ = state.metrics.time_process_msg();
if let Err(e) = handle_network_msg(ctx, runtime, state, event).await {
@@ -160,8 +160,8 @@ impl TestState {
/// Generate a new relay parent and inform the subsystem about the new view.
///
/// If `merge_views == true` it means the subsystem will be informed that we are working on the old `relay_parent`
/// and the new one.
/// If `merge_views == true` it means the subsystem will be informed that we are working on the
/// old `relay_parent` and the new one.
async fn advance_to_new_round(
&mut self,
virtual_overseer: &mut VirtualOverseer,
@@ -901,7 +901,8 @@ fn collate_on_two_different_relay_chain_blocks() {
let old_relay_parent = test_state.relay_parent;
// Advance to a new round, while informing the subsystem that the old and the new relay parent are active.
// Advance to a new round, while informing the subsystem that the old and the new relay
// parent are active.
test_state.advance_to_new_round(virtual_overseer, true).await;
distribute_collation(virtual_overseer, &test_state, true).await;
@@ -1085,7 +1086,8 @@ where
.await
.unwrap();
// Keep the feedback channel alive because we need to use it to inform about the finished transfer.
// Keep the feedback channel alive because we need to use it to inform about the
// finished transfer.
let feedback_tx = assert_matches!(
rx.await,
Ok(full_response) => {
@@ -23,9 +23,9 @@
//! We keep a simple FIFO buffer of N validator groups and a bitvec for each advertisement,
//! 1 indicating we want to be connected to i-th validator in a buffer, 0 otherwise.
//!
//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a relay
//! parent, one can reset a bit back to 0 for particular **validator**. For example, if a collation
//! was fetched or some timeout has been hit.
//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a
//! relay parent, one can reset a bit back to 0 for particular **validator**. For example, if a
//! collation was fetched or some timeout has been hit.
//!
//! The bitwise OR over known advertisements gives us validators indices for connection request.
@@ -730,7 +730,8 @@ fn reject_connection_to_next_group() {
})
}
// Ensure that we fetch a second collation, after the first checked collation was found to be invalid.
// Ensure that we fetch a second collation, after the first checked collation was found to be
// invalid.
#[test]
fn fetch_next_collation_on_invalid_collation() {
let test_state = TestState::default();