* Add clippy config and remove .cargo from gitignore

* first fixes

* Clippyfied

* Add clippy CI job

* comment out rusty-cachier

* minor

* fix ci

* remove DAG from check-dependent-project

* add DAG to clippy

Co-authored-by: alvicsam <alvicsam@gmail.com>
This commit is contained in:
alexgparity
2022-11-30 09:34:06 +01:00
committed by GitHub
parent b76086c617
commit 9ea14e66c8
67 changed files with 338 additions and 351 deletions
+32
View File
@@ -0,0 +1,32 @@
#
# An auto defined `clippy` feature was introduced,
# but it was found to clash with user defined features,
# so was renamed to `cargo-clippy`.
#
# If you want standard clippy run:
# RUSTFLAGS= cargo clippy
[target.'cfg(feature = "cargo-clippy")']
rustflags = [
"-Aclippy::all",
"-Dclippy::correctness",
"-Aclippy::if-same-then-else",
"-Aclippy::clone-double-ref",
"-Dclippy::complexity",
"-Aclippy::zero-prefixed-literal", # 00_1000_000
"-Aclippy::type_complexity", # raison d'etre
"-Aclippy::nonminimal-bool", # maybe
"-Aclippy::borrowed-box", # Reasonable to fix this one
"-Aclippy::too-many-arguments", # (Turning this on would lead to)
"-Aclippy::unnecessary_cast", # Types may change
"-Aclippy::identity-op", # One case where we do 0 +
"-Aclippy::useless_conversion", # Types may change
"-Aclippy::unit_arg", # styalistic.
"-Aclippy::option-map-unit-fn", # styalistic
"-Aclippy::bind_instead_of_map", # styalistic
"-Aclippy::erasing_op", # E.g. 0 * DOLLARS
"-Aclippy::eq_op", # In tests we test equality.
"-Aclippy::while_immutable_condition", # false positives
"-Aclippy::needless_option_as_deref", # false positives
"-Aclippy::derivable_impls", # false positives
"-Aclippy::stable_sort_primitive", # prefer stable sort
]
-1
View File
@@ -10,5 +10,4 @@ polkadot.*
!polkadot.service
!.rpm/*
.DS_Store
.cargo
.env
+6 -6
View File
@@ -591,27 +591,27 @@ pub fn run() -> Result<()> {
#[cfg(feature = "kusama-native")]
if chain_spec.is_kusama() {
return Ok(runner.sync_run(|config| {
return runner.sync_run(|config| {
cmd.run::<service::kusama_runtime::Block, service::KusamaExecutorDispatch>(config)
.map_err(|e| Error::SubstrateCli(e))
})?)
})
}
#[cfg(feature = "westend-native")]
if chain_spec.is_westend() {
return Ok(runner.sync_run(|config| {
return runner.sync_run(|config| {
cmd.run::<service::westend_runtime::Block, service::WestendExecutorDispatch>(config)
.map_err(|e| Error::SubstrateCli(e))
})?)
})
}
// else we assume it is polkadot.
#[cfg(feature = "polkadot-native")]
{
return Ok(runner.sync_run(|config| {
return runner.sync_run(|config| {
cmd.run::<service::polkadot_runtime::Block, service::PolkadotExecutorDispatch>(config)
.map_err(|e| Error::SubstrateCli(e))
})?)
})
}
#[cfg(not(feature = "polkadot-native"))]
+1 -1
View File
@@ -216,7 +216,7 @@ pub struct Branches<'a, I> {
impl<'a, I: AsRef<[u8]>> Branches<'a, I> {
/// Get the trie root.
pub fn root(&self) -> H256 {
self.root.clone()
self.root
}
}
+4 -4
View File
@@ -165,7 +165,7 @@ impl BenchmarkCallSigner<polkadot_runtime::RuntimeCall, sp_core::sr25519::Pair>
(),
runtime::VERSION.spec_version,
runtime::VERSION.transaction_version,
genesis.clone(),
genesis,
genesis,
(),
(),
@@ -220,7 +220,7 @@ impl BenchmarkCallSigner<westend_runtime::RuntimeCall, sp_core::sr25519::Pair>
(),
runtime::VERSION.spec_version,
runtime::VERSION.transaction_version,
genesis.clone(),
genesis,
genesis,
(),
(),
@@ -274,7 +274,7 @@ impl BenchmarkCallSigner<kusama_runtime::RuntimeCall, sp_core::sr25519::Pair>
(),
runtime::VERSION.spec_version,
runtime::VERSION.transaction_version,
genesis.clone(),
genesis,
genesis,
(),
(),
@@ -328,7 +328,7 @@ impl BenchmarkCallSigner<rococo_runtime::RuntimeCall, sp_core::sr25519::Pair>
(),
runtime::VERSION.spec_version,
runtime::VERSION.transaction_version,
genesis.clone(),
genesis,
genesis,
(),
(),
@@ -282,8 +282,8 @@ impl State {
/// Constructs an infinite iterator from an array of `TrancheEntry` values. Any missing tranches
/// are filled with empty assignments, as they are needed to compute the approved tranches.
fn filled_tranche_iterator<'a>(
tranches: &'a [TrancheEntry],
fn filled_tranche_iterator(
tranches: &[TrancheEntry],
) -> impl Iterator<Item = (DelayTranche, &[(ValidatorIndex, Tick)])> {
let mut gap_end = None;
@@ -155,10 +155,10 @@ impl<'a> From<&'a SessionInfo> for Config {
Config {
assignment_keys: s.assignment_keys.clone(),
validator_groups: s.validator_groups.clone(),
n_cores: s.n_cores.clone(),
zeroth_delay_tranche_width: s.zeroth_delay_tranche_width.clone(),
relay_vrf_modulo_samples: s.relay_vrf_modulo_samples.clone(),
n_delay_tranches: s.n_delay_tranches.clone(),
n_cores: s.n_cores,
zeroth_delay_tranche_width: s.zeroth_delay_tranche_width,
relay_vrf_modulo_samples: s.relay_vrf_modulo_samples,
n_delay_tranches: s.n_delay_tranches,
}
}
}
@@ -415,11 +415,8 @@ pub(crate) async fn handle_new_head<Context, B: Backend>(
Err(error) => {
// It's possible that we've lost a race with finality.
let (tx, rx) = oneshot::channel();
ctx.send_message(ChainApiMessage::FinalizedBlockHash(
block_header.number.clone(),
tx,
))
.await;
ctx.send_message(ChainApiMessage::FinalizedBlockHash(block_header.number, tx))
.await;
let lost_to_finality = match rx.await {
Ok(Ok(Some(h))) if h != block_hash => true,
@@ -621,10 +621,7 @@ impl CurrentlyCheckingSet {
.candidate_hash_map
.remove(&approval_state.candidate_hash)
.unwrap_or_default();
approvals_cache.put(
approval_state.candidate_hash.clone(),
approval_state.approval_outcome.clone(),
);
approvals_cache.put(approval_state.candidate_hash, approval_state.approval_outcome);
return (out, approval_state)
}
}
@@ -768,7 +765,7 @@ async fn run<B, Context>(
where
B: Backend,
{
if let Err(err) = db_sanity_check(subsystem.db.clone(), subsystem.db_config.clone()) {
if let Err(err) = db_sanity_check(subsystem.db.clone(), subsystem.db_config) {
gum::warn!(target: LOG_TARGET, ?err, "Could not run approval vote DB sanity check");
}
@@ -1278,7 +1275,7 @@ async fn get_approval_signatures_for_candidate<Context>(
Some(e) => e,
};
let relay_hashes = entry.block_assignments.iter().map(|(relay_hash, _)| relay_hash);
let relay_hashes = entry.block_assignments.keys();
let mut candidate_indices = HashSet::new();
// Retrieve `CoreIndices`/`CandidateIndices` as required by approval-distribution:
@@ -2502,7 +2499,7 @@ async fn issue_approval<Context>(
};
let candidate_hash = match block_entry.candidate(candidate_index as usize) {
Some((_, h)) => h.clone(),
Some((_, h)) => *h,
None => {
gum::warn!(
target: LOG_TARGET,
+1 -1
View File
@@ -61,7 +61,7 @@ const PRUNE_BY_TIME_PREFIX: &[u8; 13] = b"prune_by_time";
// We have some keys we want to map to empty values because existence of the key is enough. We use this because
// rocksdb doesn't support empty values.
const TOMBSTONE_VALUE: &[u8] = &*b" ";
const TOMBSTONE_VALUE: &[u8] = b" ";
/// Unavailable blocks are kept for 1 hour.
const KEEP_UNAVAILABLE_FOR: Duration = Duration::from_secs(60 * 60);
+2 -4
View File
@@ -482,9 +482,7 @@ impl TableContextTrait for TableContext {
}
fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool {
self.groups
.get(group)
.map_or(false, |g| g.iter().position(|a| a == authority).is_some())
self.groups.get(group).map_or(false, |g| g.iter().any(|a| a == authority))
}
fn requisite_votes(&self, group: &ParaId) -> usize {
@@ -499,7 +497,7 @@ struct InvalidErasureRoot;
fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement {
let statement = match s.payload() {
Statement::Seconded(c) => TableStatement::Seconded(c.clone()),
Statement::Valid(h) => TableStatement::Valid(h.clone()),
Statement::Valid(h) => TableStatement::Valid(*h),
};
TableSignedStatement {
@@ -502,7 +502,7 @@ async fn validate_candidate_exhaustive(
let _timer = metrics.time_validate_candidate_exhaustive();
let validation_code_hash = validation_code.hash();
let para_id = candidate_receipt.descriptor.para_id.clone();
let para_id = candidate_receipt.descriptor.para_id;
gum::debug!(
target: LOG_TARGET,
?validation_code_hash,
@@ -513,7 +513,7 @@ async fn validate_candidate_exhaustive(
if let Err(e) = perform_basic_checks(
&candidate_receipt.descriptor,
persisted_validation_data.max_pov_size,
&*pov,
&pov,
&validation_code_hash,
) {
gum::info!(target: LOG_TARGET, ?para_id, "Invalid candidate (basic checks)");
@@ -381,6 +381,7 @@ async fn run<Context, B>(
) where
B: Backend,
{
#![allow(clippy::all)]
loop {
let res = run_until_error(
&mut ctx,
@@ -169,7 +169,7 @@ impl CandidateVoteState<CandidateVotes> {
}
/// Create a new `CandidateVoteState` from already existing votes.
pub fn new<'a>(votes: CandidateVotes, env: &CandidateEnvironment<'a>, now: Timestamp) -> Self {
pub fn new(votes: CandidateVotes, env: &CandidateEnvironment, now: Timestamp) -> Self {
let own_vote = OwnVoteState::new(&votes, env);
let n_validators = env.validators().len();
@@ -713,20 +713,22 @@ impl Initialized {
return Ok(ImportStatementsResult::InvalidImport)
}
let env =
match CandidateEnvironment::new(&*self.keystore, &self.rolling_session_window, session)
{
None => {
gum::warn!(
target: LOG_TARGET,
session,
"We are lacking a `SessionInfo` for handling import of statements."
);
let env = match CandidateEnvironment::new(
&self.keystore,
&self.rolling_session_window,
session,
) {
None => {
gum::warn!(
target: LOG_TARGET,
session,
"We are lacking a `SessionInfo` for handling import of statements."
);
return Ok(ImportStatementsResult::InvalidImport)
},
Some(env) => env,
};
return Ok(ImportStatementsResult::InvalidImport)
},
Some(env) => env,
};
let candidate_hash = candidate_receipt.hash();
@@ -1075,20 +1077,22 @@ impl Initialized {
"Issuing local statement for candidate!"
);
// Load environment:
let env =
match CandidateEnvironment::new(&*self.keystore, &self.rolling_session_window, session)
{
None => {
gum::warn!(
target: LOG_TARGET,
session,
"Missing info for session which has an active dispute",
);
let env = match CandidateEnvironment::new(
&self.keystore,
&self.rolling_session_window,
session,
) {
None => {
gum::warn!(
target: LOG_TARGET,
session,
"Missing info for session which has an active dispute",
);
return Ok(())
},
Some(env) => env,
};
return Ok(())
},
Some(env) => env,
};
let votes = overlay_db
.load_candidate_votes(session, &candidate_hash)?
@@ -1257,7 +1261,7 @@ fn make_dispute_message(
votes.invalid.iter().next().ok_or(DisputeMessageCreationError::NoOppositeVote)?;
let other_vote = SignedDisputeStatement::new_checked(
DisputeStatement::Invalid(*statement_kind),
our_vote.candidate_hash().clone(),
*our_vote.candidate_hash(),
our_vote.session_index(),
validators
.get(*validator_index)
@@ -1272,7 +1276,7 @@ fn make_dispute_message(
votes.valid.iter().next().ok_or(DisputeMessageCreationError::NoOppositeVote)?;
let other_vote = SignedDisputeStatement::new_checked(
DisputeStatement::Valid(*statement_kind),
our_vote.candidate_hash().clone(),
*our_vote.candidate_hash(),
our_vote.session_index(),
validators
.get(*validator_index)
@@ -235,7 +235,7 @@ impl Participation {
req: ParticipationRequest,
recent_head: Hash,
) -> FatalResult<()> {
if self.running_participations.insert(req.candidate_hash().clone()) {
if self.running_participations.insert(*req.candidate_hash()) {
let sender = ctx.sender().clone();
ctx.spawn(
"participation-worker",
@@ -204,7 +204,7 @@ impl Queues {
// Once https://github.com/rust-lang/rust/issues/62924 is there, we can use a simple:
// target.pop_first().
if let Some((comparator, _)) = target.iter().next() {
let comparator = comparator.clone();
let comparator = *comparator;
target
.remove(&comparator)
.map(|participation_request| (comparator, participation_request))
@@ -99,7 +99,7 @@ where
);
// Fetch the onchain disputes. We'll do a prioritization based on them.
let onchain = match get_onchain_disputes(sender, leaf.hash.clone()).await {
let onchain = match get_onchain_disputes(sender, leaf.hash).await {
Ok(r) => r,
Err(GetOnchainDisputesError::NotSupported(runtime_api_err, relay_parent)) => {
// Runtime version is checked before calling this method, so the error below should never happen!
+2 -2
View File
@@ -373,7 +373,7 @@ async fn send_inherent_data(
let disputes = match has_required_runtime(
from_job,
leaf.hash.clone(),
leaf.hash,
PRIORITIZED_SELECTION_RUNTIME_VERSION_REQUIREMENT,
)
.await
@@ -506,7 +506,7 @@ fn select_availability_bitfields(
bitfields.len()
);
selected.into_iter().map(|(_, b)| b).collect()
selected.into_values().collect()
}
/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core.
+1 -3
View File
@@ -225,10 +225,8 @@ fn handle_job_finish(
result_tx: ResultSender,
) {
let (idle_worker, result) = match outcome {
Outcome::Ok { result_descriptor, duration_ms, idle_worker } => {
Outcome::Ok { result_descriptor, duration_ms: _, idle_worker } => {
// TODO: propagate the soft timeout
drop(duration_ms);
(Some(idle_worker), Ok(result_descriptor))
},
Outcome::InvalidCandidate { err, idle_worker } => (
+1 -1
View File
@@ -424,7 +424,7 @@ impl sp_core::traits::ReadRuntimeVersion for ReadRuntimeVersion {
use parity_scale_codec::Encode;
Ok(version.encode())
},
None => Err(format!("runtime version section is not found")),
None => Err("runtime version section is not found".to_string()),
}
}
}
+1 -1
View File
@@ -219,7 +219,7 @@ async fn send_request(
code: Arc<Vec<u8>>,
tmp_file: &Path,
) -> io::Result<()> {
framed_send(stream, &*code).await?;
framed_send(stream, &code).await?;
framed_send(stream, path_to_bytes(tmp_file)).await?;
Ok(())
}
+1 -1
View File
@@ -34,7 +34,7 @@ pub fn validate_candidate(
let code = sp_maybe_compressed_blob::decompress(code, 10 * 1024 * 1024)
.expect("Decompressing code failed");
let blob = prevalidate(&*code)?;
let blob = prevalidate(&code)?;
let artifact = prepare(blob)?;
let tmpdir = tempfile::tempdir()?;
let artifact_path = tmpdir.path().join("blob");
+1 -1
View File
@@ -268,7 +268,7 @@ where
let (sender, receiver) = oneshot::channel();
// TODO: make the cache great again https://github.com/paritytech/polkadot/issues/5546
let request = match self.query_cache(relay_parent.clone(), request) {
let request = match self.query_cache(relay_parent, request) {
Some(request) => request,
None => return,
};
+1 -1
View File
@@ -49,7 +49,7 @@ impl futures::Stream for Metronome {
loop {
match self.state {
MetronomeState::SetAlarm => {
let val = self.period.clone();
let val = self.period;
self.delay.reset(val);
self.state = MetronomeState::Snooze;
},
@@ -309,7 +309,7 @@ enum MessageSource {
impl MessageSource {
fn peer_id(&self) -> Option<PeerId> {
match self {
Self::Peer(id) => Some(id.clone()),
Self::Peer(id) => Some(*id),
Self::Local => None,
}
}
@@ -389,7 +389,7 @@ impl State {
) {
let mut new_hashes = HashSet::new();
for meta in &metas {
match self.blocks.entry(meta.hash.clone()) {
match self.blocks.entry(meta.hash) {
hash_map::Entry::Vacant(entry) => {
let candidates_count = meta.candidates.len();
let mut candidates = Vec::with_capacity(candidates_count);
@@ -398,7 +398,7 @@ impl State {
entry.insert(BlockEntry {
known_by: HashMap::new(),
number: meta.number,
parent_hash: meta.parent_hash.clone(),
parent_hash: meta.parent_hash,
knowledge: Knowledge::default(),
candidates,
session: meta.session,
@@ -406,7 +406,7 @@ impl State {
self.topologies.inc_session_refs(meta.session);
new_hashes.insert(meta.hash.clone());
new_hashes.insert(meta.hash);
// In case there are duplicates, we should only set this if the entry
// was vacant.
@@ -433,7 +433,7 @@ impl State {
&mut self.blocks,
&self.topologies,
self.peer_views.len(),
peer_id.clone(),
*peer_id,
view_intersection,
rng,
)
@@ -563,10 +563,8 @@ impl State {
"Pending assignment",
);
pending.push((
peer_id.clone(),
PendingMessage::Assignment(assignment, claimed_index),
));
pending
.push((peer_id, PendingMessage::Assignment(assignment, claimed_index)));
continue
}
@@ -574,7 +572,7 @@ impl State {
self.import_and_circulate_assignment(
ctx,
metrics,
MessageSource::Peer(peer_id.clone()),
MessageSource::Peer(peer_id),
assignment,
claimed_index,
rng,
@@ -604,7 +602,7 @@ impl State {
"Pending approval",
);
pending.push((peer_id.clone(), PendingMessage::Approval(approval_vote)));
pending.push((peer_id, PendingMessage::Approval(approval_vote)));
continue
}
@@ -612,7 +610,7 @@ impl State {
self.import_and_circulate_approval(
ctx,
metrics,
MessageSource::Peer(peer_id.clone()),
MessageSource::Peer(peer_id),
approval_vote,
)
.await;
@@ -663,7 +661,7 @@ impl State {
&mut self.blocks,
&self.topologies,
self.peer_views.len(),
peer_id.clone(),
peer_id,
view,
rng,
)
@@ -709,7 +707,7 @@ impl State {
) where
R: CryptoRng + Rng,
{
let block_hash = assignment.block_hash.clone();
let block_hash = assignment.block_hash;
let validator_index = assignment.validator;
let entry = match self.blocks.get_mut(&block_hash) {
@@ -737,7 +735,7 @@ impl State {
if let Some(peer_id) = source.peer_id() {
// check if our knowledge of the peer already contains this assignment
match entry.known_by.entry(peer_id.clone()) {
match entry.known_by.entry(peer_id) {
hash_map::Entry::Occupied(mut peer_knowledge) => {
let peer_knowledge = peer_knowledge.get_mut();
if peer_knowledge.contains(&message_subject, message_kind) {
@@ -761,13 +759,13 @@ impl State {
?message_subject,
"Assignment from a peer is out of view",
);
modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await;
modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await;
},
}
// if the assignment is known to be valid, reward the peer
if entry.knowledge.contains(&message_subject, message_kind) {
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await;
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known assignment");
peer_knowledge.received.insert(message_subject, message_kind);
@@ -803,8 +801,7 @@ impl State {
);
match result {
AssignmentCheckResult::Accepted => {
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST)
.await;
modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE_FIRST).await;
entry.knowledge.known_messages.insert(message_subject.clone(), message_kind);
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
peer_knowledge.received.insert(message_subject.clone(), message_kind);
@@ -970,7 +967,7 @@ impl State {
source: MessageSource,
vote: IndirectSignedApprovalVote,
) {
let block_hash = vote.block_hash.clone();
let block_hash = vote.block_hash;
let validator_index = vote.validator;
let candidate_index = vote.candidate_index;
@@ -1003,7 +1000,7 @@ impl State {
}
// check if our knowledge of the peer already contains this approval
match entry.known_by.entry(peer_id.clone()) {
match entry.known_by.entry(peer_id) {
hash_map::Entry::Occupied(mut knowledge) => {
let peer_knowledge = knowledge.get_mut();
if peer_knowledge.contains(&message_subject, message_kind) {
@@ -1027,14 +1024,14 @@ impl State {
?message_subject,
"Approval from a peer is out of view",
);
modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await;
modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await;
},
}
// if the approval is known to be valid, reward the peer
if entry.knowledge.contains(&message_subject, message_kind) {
gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known approval");
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await;
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
peer_knowledge.received.insert(message_subject.clone(), message_kind);
}
@@ -1065,8 +1062,7 @@ impl State {
);
match result {
ApprovalCheckResult::Accepted => {
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST)
.await;
modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE_FIRST).await;
entry.knowledge.insert(message_subject.clone(), message_kind);
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
@@ -1301,7 +1297,7 @@ impl State {
break
}
let peer_knowledge = entry.known_by.entry(peer_id.clone()).or_default();
let peer_knowledge = entry.known_by.entry(peer_id).or_default();
let topology = topologies.get_topology(entry.session);
@@ -1335,13 +1331,12 @@ impl State {
}
}
let message_subject =
MessageSubject(block.clone(), candidate_index, validator.clone());
let message_subject = MessageSubject(block, candidate_index, *validator);
let assignment_message = (
IndirectAssignmentCert {
block_hash: block.clone(),
validator: validator.clone(),
block_hash: block,
validator: *validator,
cert: message_state.approval_state.assignment_cert().clone(),
},
candidate_index,
@@ -1350,8 +1345,8 @@ impl State {
let approval_message =
message_state.approval_state.approval_signature().map(|signature| {
IndirectSignedApprovalVote {
block_hash: block.clone(),
validator: validator.clone(),
block_hash: block,
validator: *validator,
candidate_index,
signature,
}
@@ -1374,7 +1369,7 @@ impl State {
}
}
block = entry.parent_hash.clone();
block = entry.parent_hash;
}
}
@@ -1388,7 +1383,7 @@ impl State {
sender
.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![peer_id.clone()],
vec![peer_id],
Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution(
protocol_v1::ApprovalDistributionMessage::Assignments(assignments_to_send),
)),
@@ -1558,13 +1553,12 @@ async fn adjust_required_routing_and_propagate<Context, BlockFilter, RoutingModi
};
// Propagate the message to all peers in the required routing set.
let message_subject =
MessageSubject(block_hash.clone(), candidate_index, validator.clone());
let message_subject = MessageSubject(*block_hash, candidate_index, *validator);
let assignment_message = (
IndirectAssignmentCert {
block_hash: block_hash.clone(),
validator: validator.clone(),
block_hash: *block_hash,
validator: *validator,
cert: message_state.approval_state.assignment_cert().clone(),
},
candidate_index,
@@ -1572,8 +1566,8 @@ async fn adjust_required_routing_and_propagate<Context, BlockFilter, RoutingModi
let approval_message =
message_state.approval_state.approval_signature().map(|signature| {
IndirectSignedApprovalVote {
block_hash: block_hash.clone(),
validator: validator.clone(),
block_hash: *block_hash,
validator: *validator,
candidate_index,
signature,
}
@@ -1590,7 +1584,7 @@ async fn adjust_required_routing_and_propagate<Context, BlockFilter, RoutingModi
if !peer_knowledge.contains(&message_subject, MessageKind::Assignment) {
peer_knowledge.sent.insert(message_subject.clone(), MessageKind::Assignment);
peer_assignments
.entry(peer.clone())
.entry(*peer)
.or_insert_with(Vec::new)
.push(assignment_message.clone());
}
@@ -1599,7 +1593,7 @@ async fn adjust_required_routing_and_propagate<Context, BlockFilter, RoutingModi
if !peer_knowledge.contains(&message_subject, MessageKind::Approval) {
peer_knowledge.sent.insert(message_subject.clone(), MessageKind::Approval);
peer_approvals
.entry(peer.clone())
.entry(*peer)
.or_insert_with(Vec::new)
.push(approval_message.clone());
}
@@ -338,8 +338,7 @@ impl RequestChunksFromValidators {
index: validator_index,
};
let (req, res) =
OutgoingRequest::new(Recipient::Authority(validator), raw_request.clone());
let (req, res) = OutgoingRequest::new(Recipient::Authority(validator), raw_request);
requests.push(Requests::ChunkFetchingV1(req));
params.metrics.on_chunk_request_issued();
@@ -973,7 +972,7 @@ async fn query_full_data<Context>(
ctx.send_message(AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx))
.await;
Ok(rx.await.map_err(error::Error::CanceledQueryFullData)?)
rx.await.map_err(error::Error::CanceledQueryFullData)
}
#[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)]
@@ -319,7 +319,7 @@ async fn handle_bitfield_distribution<Context>(
}
let validator_index = signed_availability.validator_index();
let validator = if let Some(validator) = validator_set.get(*&validator_index.0 as usize) {
let validator = if let Some(validator) = validator_set.get(validator_index.0 as usize) {
validator.clone()
} else {
gum::debug!(target: LOG_TARGET, validator_index = ?validator_index.0, "Could not find a validator for index");
@@ -395,7 +395,7 @@ async fn relay_message<Context>(
};
if need_routing {
Some(peer.clone())
Some(*peer)
} else {
None
}
@@ -412,7 +412,7 @@ async fn relay_message<Context>(
// track the message as sent for this peer
job_data
.message_sent_to_peer
.entry(peer.clone())
.entry(*peer)
.or_default()
.insert(validator.clone());
});
@@ -497,7 +497,7 @@ async fn process_incoming_peer_message<Context>(
// Check if the peer already sent us a message for the validator denoted in the message earlier.
// Must be done after validator index verification, in order to avoid storing an unbounded
// number of set entries.
let received_set = job_data.message_received_from_peer.entry(origin.clone()).or_default();
let received_set = job_data.message_received_from_peer.entry(origin).or_default();
if !received_set.contains(&validator) {
received_set.insert(validator.clone());
@@ -656,7 +656,7 @@ async fn handle_peer_view_change<Context>(
) {
let added = state
.peer_views
.entry(origin.clone())
.entry(origin)
.or_default()
.replace_difference(view)
.cloned()
@@ -681,11 +681,10 @@ async fn handle_peer_view_change<Context>(
let delta_set: Vec<(ValidatorId, BitfieldGossipMessage)> = added
.into_iter()
.filter_map(|new_relay_parent_interest| {
if let Some(job_data) = (&*state).per_relay_parent.get(&new_relay_parent_interest) {
if let Some(job_data) = state.per_relay_parent.get(&new_relay_parent_interest) {
// Send all jointly known messages for a validator (given the current relay parent)
// to the peer `origin`...
let one_per_validator = job_data.one_per_validator.clone();
let origin = origin.clone();
Some(one_per_validator.into_iter().filter(move |(validator, _message)| {
// ..except for the ones the peer already has.
job_data.message_from_validator_needed_by_peer(&origin, validator)
@@ -699,7 +698,7 @@ async fn handle_peer_view_change<Context>(
.collect();
for (validator, message) in delta_set.into_iter() {
send_tracked_gossip_message(ctx, state, origin.clone(), validator, message).await;
send_tracked_gossip_message(ctx, state, origin, validator, message).await;
}
}
@@ -727,11 +726,7 @@ async fn send_tracked_gossip_message<Context>(
"Sending gossip message"
);
job_data
.message_sent_to_peer
.entry(dest.clone())
.or_default()
.insert(validator.clone());
job_data.message_sent_to_peer.entry(dest).or_default().insert(validator.clone());
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![dest],
@@ -760,14 +755,14 @@ async fn query_basics<Context>(
// query validators
ctx.send_message(RuntimeApiMessage::Request(
relay_parent.clone(),
relay_parent,
RuntimeApiRequest::Validators(validators_tx),
))
.await;
// query signing context
ctx.send_message(RuntimeApiMessage::Request(
relay_parent.clone(),
relay_parent,
RuntimeApiRequest::SessionIndexForChild(session_tx),
))
.await;
+2 -2
View File
@@ -174,7 +174,7 @@ impl Network for Arc<NetworkService<Block, Hash>> {
Ok(v) => v,
Err(_) => continue,
};
NetworkService::add_known_address(&*self, peer_id.clone(), addr);
NetworkService::add_known_address(self, peer_id, addr);
found_peer_id = Some(peer_id);
}
found_peer_id
@@ -197,7 +197,7 @@ impl Network for Arc<NetworkService<Block, Hash>> {
};
NetworkService::start_request(
&*self,
self,
peer_id,
req_protocol_names.get_name(protocol),
payload,
+11 -11
View File
@@ -213,7 +213,7 @@ where
PeerSet::Collation => &mut shared.collation_peers,
};
match peer_map.entry(peer.clone()) {
match peer_map.entry(peer) {
hash_map::Entry::Occupied(_) => continue,
hash_map::Entry::Vacant(vacant) => {
vacant.insert(PeerData { view: View::default(), version });
@@ -234,12 +234,12 @@ where
dispatch_validation_events_to_all(
vec![
NetworkBridgeEvent::PeerConnected(
peer.clone(),
peer,
role,
version,
maybe_authority,
),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
NetworkBridgeEvent::PeerViewChange(peer, View::default()),
],
&mut sender,
)
@@ -259,12 +259,12 @@ where
dispatch_collation_events_to_all(
vec![
NetworkBridgeEvent::PeerConnected(
peer.clone(),
peer,
role,
version,
maybe_authority,
),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
NetworkBridgeEvent::PeerViewChange(peer, View::default()),
],
&mut sender,
)
@@ -421,7 +421,7 @@ where
Some(ValidationVersion::V1.into())
{
handle_v1_peer_messages::<protocol_v1::ValidationProtocol, _>(
remote.clone(),
remote,
PeerSet::Validation,
&mut shared.0.lock().validation_peers,
v_messages,
@@ -442,7 +442,7 @@ where
};
for report in reports {
network_service.report_peer(remote.clone(), report);
network_service.report_peer(remote, report);
}
dispatch_validation_events_to_all(events, &mut sender).await;
@@ -454,7 +454,7 @@ where
Some(CollationVersion::V1.into())
{
handle_v1_peer_messages::<protocol_v1::CollationProtocol, _>(
remote.clone(),
remote,
PeerSet::Collation,
&mut shared.0.lock().collation_peers,
c_messages,
@@ -475,7 +475,7 @@ where
};
for report in reports {
network_service.report_peer(remote.clone(), report);
network_service.report_peer(remote, report);
}
dispatch_collation_events_to_all(events, &mut sender).await;
@@ -795,11 +795,11 @@ fn handle_v1_peer_messages<RawMessage: Decode, OutMessage: From<RawMessage>>(
} else {
peer_data.view = new_view;
NetworkBridgeEvent::PeerViewChange(peer.clone(), peer_data.view.clone())
NetworkBridgeEvent::PeerViewChange(peer, peer_data.view.clone())
}
},
WireMessage::ProtocolMessage(message) =>
NetworkBridgeEvent::PeerMessage(peer.clone(), message.into()),
NetworkBridgeEvent::PeerMessage(peer, message.into()),
})
}
@@ -561,7 +561,7 @@ async fn advertise_collation<Context>(
let wire_message = protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent);
ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage(
vec![peer.clone()],
vec![peer],
Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)),
))
.await;
@@ -707,11 +707,8 @@ async fn handle_incoming_peer_message<Context>(
"AdvertiseCollation message is not expected on the collator side of the protocol",
);
ctx.send_message(NetworkBridgeTxMessage::ReportPeer(
origin.clone(),
COST_UNEXPECTED_MESSAGE,
))
.await;
ctx.send_message(NetworkBridgeTxMessage::ReportPeer(origin, COST_UNEXPECTED_MESSAGE))
.await;
// If we are advertised to, this is another collator, and we should disconnect.
ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation))
@@ -838,14 +835,14 @@ async fn handle_peer_view_change<Context>(
peer_id: PeerId,
view: View,
) {
let current = state.peer_views.entry(peer_id.clone()).or_default();
let current = state.peer_views.entry(peer_id).or_default();
let added: Vec<Hash> = view.difference(&*current).cloned().collect();
*current = view;
for added in added.into_iter() {
advertise_collation(ctx, state, added, peer_id.clone()).await;
advertise_collation(ctx, state, added, peer_id).await;
}
}
@@ -287,7 +287,7 @@ impl PeerData {
PeerState::Collating(ref mut state) =>
if state.advertisements.insert(on_relay_parent) {
state.last_active = Instant::now();
Ok((state.collator_id.clone(), state.para_id.clone()))
Ok((state.collator_id.clone(), state.para_id))
} else {
Err(AdvertisementError::Duplicate)
},
@@ -375,22 +375,19 @@ impl ActiveParas {
.await
.await
.ok()
.map(|x| x.ok())
.flatten();
.and_then(|x| x.ok());
let mg = polkadot_node_subsystem_util::request_validator_groups(relay_parent, sender)
.await
.await
.ok()
.map(|x| x.ok())
.flatten();
.and_then(|x| x.ok());
let mc = polkadot_node_subsystem_util::request_availability_cores(relay_parent, sender)
.await
.await
.ok()
.map(|x| x.ok())
.flatten();
.and_then(|x| x.ok());
let (validators, groups, rotation_info, cores) = match (mv, mg, mc) {
(Some(v), Some((g, r)), Some(c)) => (v, g, r, c),
@@ -486,12 +483,7 @@ struct PendingCollation {
impl PendingCollation {
fn new(relay_parent: Hash, para_id: &ParaId, peer_id: &PeerId) -> Self {
Self {
relay_parent,
para_id: para_id.clone(),
peer_id: peer_id.clone(),
commitments_hash: None,
}
Self { relay_parent, para_id: *para_id, peer_id: *peer_id, commitments_hash: None }
}
}
@@ -629,9 +621,9 @@ fn collator_peer_id(
peer_data: &HashMap<PeerId, PeerData>,
collator_id: &CollatorId,
) -> Option<PeerId> {
peer_data.iter().find_map(|(peer, data)| {
data.collator_id().filter(|c| c == &collator_id).map(|_| peer.clone())
})
peer_data
.iter()
.find_map(|(peer, data)| data.collator_id().filter(|c| c == &collator_id).map(|_| *peer))
}
async fn disconnect_peer(sender: &mut impl overseer::CollatorProtocolSenderTrait, peer_id: PeerId) {
@@ -655,9 +647,7 @@ async fn fetch_collation(
Delay::new(MAX_UNSHARED_DOWNLOAD_TIME).await;
(collator_id, relay_parent)
};
state
.collation_fetch_timeouts
.push(timeout(id.clone(), relay_parent.clone()).boxed());
state.collation_fetch_timeouts.push(timeout(id.clone(), relay_parent).boxed());
if let Some(peer_data) = state.peer_data.get(&peer_id) {
if peer_data.has_advertised(&relay_parent) {
@@ -729,7 +719,7 @@ async fn notify_collation_seconded(
/// - Ongoing collation requests have to be canceled.
/// - Advertisements by this peer that are no longer relevant have to be removed.
async fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) -> Result<()> {
let peer_data = state.peer_data.entry(peer_id.clone()).or_default();
let peer_data = state.peer_data.entry(peer_id).or_default();
peer_data.update_view(view);
state
@@ -883,7 +873,7 @@ async fn process_incoming_peer_message<Context>(
"Declared as collator for unneeded para",
);
modify_reputation(ctx.sender(), origin.clone(), COST_UNNEEDED_COLLATOR).await;
modify_reputation(ctx.sender(), origin, COST_UNNEEDED_COLLATOR).await;
gum::trace!(target: LOG_TARGET, "Disconnecting unneeded collator");
disconnect_peer(ctx.sender(), origin).await;
}
@@ -1013,7 +1003,7 @@ async fn handle_our_view_change<Context>(
.span_per_head()
.iter()
.filter(|v| !old_view.contains(&v.0))
.map(|v| (v.0.clone(), v.1.clone()))
.map(|v| (*v.0, v.1.clone()))
.collect();
added.into_iter().for_each(|(h, s)| {
@@ -1046,7 +1036,7 @@ async fn handle_our_view_change<Context>(
?para_id,
"Disconnecting peer on view change (not current parachain id)"
);
disconnect_peer(ctx.sender(), peer_id.clone()).await;
disconnect_peer(ctx.sender(), *peer_id).await;
}
}
}
@@ -1254,7 +1244,7 @@ async fn poll_requests(
retained_requested.insert(pending_collation.clone());
}
if let CollationFetchResult::Error(Some(rep)) = result {
reputation_changes.push((pending_collation.peer_id.clone(), rep));
reputation_changes.push((pending_collation.peer_id, rep));
}
}
requested_collations.retain(|k, _| retained_requested.contains(k));
@@ -1337,11 +1327,7 @@ async fn handle_collation_fetched_result<Context>(
if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) {
collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash);
ctx.sender()
.send_message(CandidateBackingMessage::Second(
relay_parent.clone(),
candidate_receipt,
pov,
))
.send_message(CandidateBackingMessage::Second(relay_parent, candidate_receipt, pov))
.await;
entry.insert(collation_event);
@@ -1366,7 +1352,7 @@ async fn disconnect_inactive_peers(
for (peer, peer_data) in peers {
if peer_data.is_inactive(&eviction_policy) {
gum::trace!(target: LOG_TARGET, "Disconnecting inactive peer");
disconnect_peer(sender, peer.clone()).await;
disconnect_peer(sender, *peer).await;
}
}
}
@@ -430,7 +430,7 @@ where
);
return
},
Some(vote) => (vote.0.session_index(), vote.0.candidate_hash().clone()),
Some(vote) => (vote.0.session_index(), *vote.0.candidate_hash()),
};
let (pending_confirmation, confirmation_rx) = oneshot::channel();
@@ -304,7 +304,7 @@ impl DisputeSender {
.get(*valid_index)
.ok_or(JfyiError::InvalidStatementFromCoordinator)?;
let valid_signed = SignedDisputeStatement::new_checked(
DisputeStatement::Valid(kind.clone()),
DisputeStatement::Valid(*kind),
candidate_hash,
session_index,
valid_public.clone(),
@@ -319,7 +319,7 @@ impl DisputeSender {
.get(*invalid_index)
.ok_or(JfyiError::InvalidValidatorIndexFromCoordinator)?;
let invalid_signed = SignedDisputeStatement::new_checked(
DisputeStatement::Invalid(kind.clone()),
DisputeStatement::Invalid(*kind),
candidate_hash,
session_index,
invalid_public.clone(),
@@ -94,7 +94,7 @@ impl SessionGridTopology {
let n = &self.canonical_shuffling[r_n];
grid_subset.validator_indices_x.insert(n.validator_index);
for p in &n.peer_ids {
grid_subset.peers_x.insert(p.clone());
grid_subset.peers_x.insert(*p);
}
}
@@ -102,7 +102,7 @@ impl SessionGridTopology {
let n = &self.canonical_shuffling[c_n];
grid_subset.validator_indices_y.insert(n.validator_index);
for p in &n.peer_ids {
grid_subset.peers_y.insert(p.clone());
grid_subset.peers_y.insert(*p);
}
}
+1 -1
View File
@@ -207,7 +207,7 @@ impl View {
}
/// Obtain an iterator over all heads.
pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Hash> {
pub fn iter(&self) -> impl Iterator<Item = &Hash> {
self.heads.iter()
}
@@ -278,10 +278,10 @@ impl PeerRelayParentKnowledge {
let new_known = match fingerprint.0 {
CompactStatement::Seconded(ref h) => {
self.seconded_counts.entry(fingerprint.1).or_default().note_local(h.clone());
self.seconded_counts.entry(fingerprint.1).or_default().note_local(*h);
let was_known = self.is_known_candidate(h);
self.sent_candidates.insert(h.clone());
self.sent_candidates.insert(*h);
!was_known
},
CompactStatement::Valid(_) => false,
@@ -345,7 +345,7 @@ impl PeerRelayParentKnowledge {
.seconded_counts
.entry(fingerprint.1)
.or_insert_with(Default::default)
.note_remote(h.clone());
.note_remote(*h);
if !allowed_remote {
return Err(COST_UNEXPECTED_STATEMENT_REMOTE)
@@ -374,7 +374,7 @@ impl PeerRelayParentKnowledge {
}
self.received_statements.insert(fingerprint.clone());
self.received_candidates.insert(candidate_hash.clone());
self.received_candidates.insert(*candidate_hash);
Ok(fresh)
}
@@ -1025,13 +1025,15 @@ async fn circulate_statement<'a, Context>(
let mut peers_to_send: Vec<PeerId> = peers
.iter()
.filter_map(|(peer, data)| {
if data.can_send(&relay_parent, &fingerprint) {
Some(peer.clone())
} else {
None
}
})
.filter_map(
|(peer, data)| {
if data.can_send(&relay_parent, &fingerprint) {
Some(*peer)
} else {
None
}
},
)
.collect();
let good_peers: HashSet<&PeerId> = peers_to_send.iter().collect();
@@ -1087,7 +1089,7 @@ async fn circulate_statement<'a, Context>(
"Sending statement",
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
peers_to_send.iter().map(|(p, _)| p.clone()).collect(),
peers_to_send.iter().map(|(p, _)| *p).collect(),
payload,
))
.await;
@@ -1126,11 +1128,8 @@ async fn send_statements_about<Context>(
statement = ?statement.statement,
"Sending statement",
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![peer.clone()],
payload,
))
.await;
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], payload))
.await;
metrics.on_statement_distributed();
}
@@ -1161,11 +1160,8 @@ async fn send_statements<Context>(
statement = ?statement.statement,
"Sending statement"
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![peer.clone()],
payload,
))
.await;
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], payload))
.await;
metrics.on_statement_distributed();
}
@@ -1431,7 +1427,7 @@ async fn handle_incoming_message<'a, Context>(
}
let fingerprint = message.get_fingerprint();
let candidate_hash = fingerprint.0.candidate_hash().clone();
let candidate_hash = *fingerprint.0.candidate_hash();
let handle_incoming_span = active_head
.span
.child("handle-incoming")
@@ -1551,7 +1547,7 @@ async fn handle_incoming_message<'a, Context>(
// Send the peer all statements concerning the candidate that we have,
// since it appears to have just learned about the candidate.
send_statements_about(
peer.clone(),
peer,
peer_data,
ctx,
relay_parent,
@@ -1627,7 +1623,7 @@ async fn update_peer_view_and_maybe_send_unlocked<Context, R>(
continue
}
if let Some(active_head) = active_heads.get(&new) {
send_statements(peer.clone(), peer_data, ctx, new, active_head, metrics).await;
send_statements(peer, peer_data, ctx, new, active_head, metrics).await;
}
}
}
@@ -1710,7 +1706,7 @@ async fn handle_network_update<Context, R>(
topology_storage,
peers,
active_heads,
&*recent_outdated_heads,
recent_outdated_heads,
ctx,
message,
req_sender,
+4 -4
View File
@@ -56,10 +56,10 @@ where
/// Create an overseer with all subsystem being `Sub`.
///
/// Preferred way of initializing a dummy overseer for subsystem tests.
pub fn dummy_overseer_builder<'a, Spawner, SupportsParachains>(
pub fn dummy_overseer_builder<Spawner, SupportsParachains>(
spawner: Spawner,
supports_parachains: SupportsParachains,
registry: Option<&'a Registry>,
registry: Option<&Registry>,
) -> Result<
InitializedOverseerBuilder<
SpawnGlue<Spawner>,
@@ -97,11 +97,11 @@ where
}
/// Create an overseer with all subsystem being `Sub`.
pub fn one_for_all_overseer_builder<'a, Spawner, SupportsParachains, Sub>(
pub fn one_for_all_overseer_builder<Spawner, SupportsParachains, Sub>(
spawner: Spawner,
supports_parachains: SupportsParachains,
subsystem: Sub,
registry: Option<&'a Registry>,
registry: Option<&Registry>,
) -> Result<
InitializedOverseerBuilder<
SpawnGlue<Spawner>,
+2 -2
View File
@@ -686,7 +686,7 @@ where
subsystem_meters
.iter()
.cloned()
.filter_map(|x| x)
.flatten()
.map(|(name, ref meters)| (name, meters.read())),
);
@@ -861,7 +861,7 @@ where
let mut span = jaeger::Span::new(*hash, "leaf-activated");
if let Some(parent_span) = parent_hash.and_then(|h| self.span_per_active_leaf.get(&h)) {
span.add_follows_from(&*parent_span);
span.add_follows_from(parent_span);
}
let span = Arc::new(span);
@@ -170,13 +170,13 @@ impl DisputeMessage {
let valid_vote = ValidDisputeVote {
validator_index: valid_index,
signature: valid_statement.validator_signature().clone(),
kind: valid_kind.clone(),
kind: *valid_kind,
};
let invalid_vote = InvalidDisputeVote {
validator_index: invalid_index,
signature: invalid_statement.validator_signature().clone(),
kind: invalid_kind.clone(),
kind: *invalid_kind,
};
Ok(DisputeMessage(UncheckedDisputeMessage {
+7 -7
View File
@@ -129,7 +129,7 @@ where
/// Obtain a prepared `OverseerBuilder`, that is initialized
/// with all default values.
pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>(
pub fn prepared_overseer_builder<Spawner, RuntimeClient>(
OverseerGenArgs {
leaves,
keystore,
@@ -155,7 +155,7 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>(
overseer_message_channel_capacity_override,
req_protocol_names,
peerset_protocol_names,
}: OverseerGenArgs<'a, Spawner, RuntimeClient>,
}: OverseerGenArgs<Spawner, RuntimeClient>,
) -> Result<
InitializedOverseerBuilder<
SpawnGlue<Spawner>,
@@ -257,7 +257,7 @@ where
.collator_protocol({
let side = match is_collator {
IsCollator::Yes(collator_pair) => ProtocolSide::Collator(
network_service.local_peer_id().clone(),
network_service.local_peer_id(),
collator_pair,
collation_req_receiver,
Metrics::register(registry)?,
@@ -334,10 +334,10 @@ where
/// would do.
pub trait OverseerGen {
/// Overwrite the full generation of the overseer, including the subsystems.
fn generate<'a, Spawner, RuntimeClient>(
fn generate<Spawner, RuntimeClient>(
&self,
connector: OverseerConnector,
args: OverseerGenArgs<'a, Spawner, RuntimeClient>,
args: OverseerGenArgs<Spawner, RuntimeClient>,
) -> Result<(Overseer<SpawnGlue<Spawner>, Arc<RuntimeClient>>, OverseerHandle), Error>
where
RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
@@ -358,10 +358,10 @@ use polkadot_overseer::KNOWN_LEAVES_CACHE_SIZE;
pub struct RealOverseerGen;
impl OverseerGen for RealOverseerGen {
fn generate<'a, Spawner, RuntimeClient>(
fn generate<Spawner, RuntimeClient>(
&self,
connector: OverseerConnector,
args: OverseerGenArgs<'a, Spawner, RuntimeClient>,
args: OverseerGenArgs<Spawner, RuntimeClient>,
) -> Result<(Overseer<SpawnGlue<Spawner>, Arc<RuntimeClient>>, OverseerHandle), Error>
where
RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
@@ -343,12 +343,11 @@ where
// The Chain Selection subsystem is supposed to treat the finalized
// block as the best leaf in the case that there are no viable
// leaves, so this should not happen in practice.
let best_leaf = self
let best_leaf = *self
.leaves()
.await?
.first()
.ok_or_else(|| ConsensusError::Other(Box::new(Error::EmptyLeaves)))?
.clone();
.ok_or_else(|| ConsensusError::Other(Box::new(Error::EmptyLeaves)))?;
gum::trace!(target: LOG_TARGET, ?best_leaf, "Best chain");
+6 -1
View File
@@ -79,7 +79,12 @@ pub enum RecoveryError {
impl std::fmt::Display for RecoveryError {
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
write!(f, "{}", self)
let msg = match self {
RecoveryError::Invalid => "Invalid",
RecoveryError::Unavailable => "Unavailable",
};
write!(f, "{}", msg)
}
}
@@ -541,9 +541,7 @@ pub enum AvailabilityStoreMessage {
impl AvailabilityStoreMessage {
/// In fact, none of the `AvailabilityStore` messages assume a particular relay parent.
pub fn relay_parent(&self) -> Option<Hash> {
match self {
_ => None,
}
None
}
}
@@ -86,24 +86,19 @@ impl<M> NetworkBridgeEvent<M> {
{
Ok(match *self {
NetworkBridgeEvent::PeerMessage(ref peer, ref msg) =>
NetworkBridgeEvent::PeerMessage(peer.clone(), T::try_from(msg)?),
NetworkBridgeEvent::PeerMessage(*peer, T::try_from(msg)?),
NetworkBridgeEvent::PeerConnected(
ref peer,
ref role,
ref version,
ref authority_id,
) => NetworkBridgeEvent::PeerConnected(
peer.clone(),
role.clone(),
*version,
authority_id.clone(),
),
) => NetworkBridgeEvent::PeerConnected(*peer, *role, *version, authority_id.clone()),
NetworkBridgeEvent::PeerDisconnected(ref peer) =>
NetworkBridgeEvent::PeerDisconnected(peer.clone()),
NetworkBridgeEvent::PeerDisconnected(*peer),
NetworkBridgeEvent::NewGossipTopology(ref topology) =>
NetworkBridgeEvent::NewGossipTopology(topology.clone()),
NetworkBridgeEvent::PeerViewChange(ref peer, ref view) =>
NetworkBridgeEvent::PeerViewChange(peer.clone(), view.clone()),
NetworkBridgeEvent::PeerViewChange(*peer, view.clone()),
NetworkBridgeEvent::OurViewChange(ref view) =>
NetworkBridgeEvent::OurViewChange(view.clone()),
})
+1 -1
View File
@@ -766,7 +766,7 @@ pub fn check_candidate_backing<H: AsRef<[u8]> + Clone + Encode>(
.zip(backed.validity_votes.iter())
{
let validator_id = validator_lookup(val_in_group_idx).ok_or(())?;
let payload = attestation.signed_payload(hash.clone(), signing_context);
let payload = attestation.signed_payload(hash, signing_context);
let sig = attestation.signature();
if sig.verify(&payload[..], &validator_id) {
+10 -14
View File
@@ -247,12 +247,9 @@ pub mod pallet {
impl<T: Config> GenesisBuild<T> for GenesisConfig<T> {
fn build(&self) {
// build `Claims`
self.claims
.iter()
.map(|(a, b, _, _)| (a.clone(), b.clone()))
.for_each(|(a, b)| {
Claims::<T>::insert(a, b);
});
self.claims.iter().map(|(a, b, _, _)| (*a, *b)).for_each(|(a, b)| {
Claims::<T>::insert(a, b);
});
// build `Total`
Total::<T>::put(
self.claims
@@ -266,17 +263,16 @@ pub mod pallet {
// build `Signing`
self.claims
.iter()
.filter_map(|(a, _, _, s)| Some((a.clone(), s.clone()?)))
.filter_map(|(a, _, _, s)| Some((*a, (*s)?)))
.for_each(|(a, s)| {
Signing::<T>::insert(a, s);
});
// build `Preclaims`
self.claims
.iter()
.filter_map(|(a, _, i, _)| Some((i.clone()?, a.clone())))
.for_each(|(i, a)| {
self.claims.iter().filter_map(|(a, _, i, _)| Some((i.clone()?, *a))).for_each(
|(i, a)| {
Preclaims::<T>::insert(i, a);
});
},
);
}
}
@@ -538,7 +534,7 @@ impl<T: Config> Pallet<T> {
}
let mut v = b"\x19Ethereum Signed Message:\n".to_vec();
v.extend(rev.into_iter().rev());
v.extend_from_slice(&prefix[..]);
v.extend_from_slice(prefix);
v.extend_from_slice(what);
v.extend_from_slice(extra);
v
@@ -645,7 +641,7 @@ where
info: &DispatchInfoOf<Self::Call>,
len: usize,
) -> Result<Self::Pre, TransactionValidityError> {
Ok(self.validate(who, call, info, len).map(|_| ())?)
self.validate(who, call, info, len).map(|_| ())
}
// <weight>
@@ -67,12 +67,10 @@ pub mod crowdloan_index_migration {
let leases = Leases::<T>::get(para_id).unwrap_or_default();
let mut found_lease_deposit = false;
for maybe_deposit in leases.iter() {
if let Some((who, _amount)) = maybe_deposit {
if *who == old_fund_account {
found_lease_deposit = true;
break
}
for (who, _amount) in leases.iter().flatten() {
if *who == old_fund_account {
found_lease_deposit = true;
break
}
}
if found_lease_deposit {
@@ -112,11 +110,9 @@ pub mod crowdloan_index_migration {
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 2));
let mut leases = Leases::<T>::get(para_id).unwrap_or_default();
for maybe_deposit in leases.iter_mut() {
if let Some((who, _amount)) = maybe_deposit {
if *who == old_fund_account {
*who = new_fund_account.clone();
}
for (who, _amount) in leases.iter_mut().flatten() {
if *who == old_fund_account {
*who = new_fund_account.clone();
}
}
@@ -162,13 +158,11 @@ pub mod crowdloan_index_migration {
let leases = Leases::<T>::get(para_id).unwrap_or_default();
let mut new_account_found = false;
for maybe_deposit in leases.iter() {
if let Some((who, _amount)) = maybe_deposit {
if *who == old_fund_account {
panic!("Old fund account found after migration!");
} else if *who == new_fund_account {
new_account_found = true;
}
for (who, _amount) in leases.iter().flatten() {
if *who == old_fund_account {
panic!("Old fund account found after migration!");
} else if *who == new_fund_account {
new_account_found = true;
}
}
if new_account_found {
+16 -22
View File
@@ -31,18 +31,16 @@ pub mod slots_crowdloan_index_migration {
for (para_id, leases) in Leases::<T>::iter() {
let old_fund_account = old_fund_account_id::<T>(para_id);
for maybe_deposit in leases.iter() {
if let Some((who, _amount)) = maybe_deposit {
if *who == old_fund_account {
let crowdloan =
crowdloan::Funds::<T>::get(para_id).ok_or("no crowdloan found")?;
log::info!(
target: "runtime",
"para_id={:?}, old_fund_account={:?}, fund_id={:?}, leases={:?}",
para_id, old_fund_account, crowdloan.fund_index, leases,
);
break
}
for (who, _amount) in leases.iter().flatten() {
if *who == old_fund_account {
let crowdloan =
crowdloan::Funds::<T>::get(para_id).ok_or("no crowdloan found")?;
log::info!(
target: "runtime",
"para_id={:?}, old_fund_account={:?}, fund_id={:?}, leases={:?}",
para_id, old_fund_account, crowdloan.fund_index, leases,
);
break
}
}
}
@@ -61,11 +59,9 @@ pub mod slots_crowdloan_index_migration {
let new_fund_account = crowdloan::Pallet::<T>::fund_account_id(fund.fund_index);
// look for places the old account is used, and replace with the new account.
for maybe_deposit in leases.iter_mut() {
if let Some((who, _amount)) = maybe_deposit {
if *who == old_fund_account {
*who = new_fund_account.clone();
}
for (who, _amount) in leases.iter_mut().flatten() {
if *who == old_fund_account {
*who = new_fund_account.clone();
}
}
@@ -83,11 +79,9 @@ pub mod slots_crowdloan_index_migration {
let old_fund_account = old_fund_account_id::<T>(para_id);
log::info!(target: "runtime", "checking para_id: {:?}", para_id);
// check the old fund account doesn't exist anywhere.
for maybe_deposit in leases.iter() {
if let Some((who, _amount)) = maybe_deposit {
if *who == old_fund_account {
panic!("old fund account found after migration!");
}
for (who, _amount) in leases.iter().flatten() {
if *who == old_fund_account {
panic!("old fund account found after migration!");
}
}
}
+1 -1
View File
@@ -1112,7 +1112,7 @@ impl<T: Config> Pallet<T> {
// it's sufficient to count the votes in the statement set after they
set.statements.iter().for_each(|(statement, v_i, _signature)| {
if Some(true) ==
summary.new_participants.get(v_i.0 as usize).map(|b| b.as_ref().clone())
summary.new_participants.get(v_i.0 as usize).map(|b| *b.as_ref())
{
match statement {
// `summary.new_flags` contains the spam free votes.
+3 -3
View File
@@ -751,10 +751,10 @@ impl<T: Config> Pallet<T> {
let ingress = <Self as Store>::HrmpIngressChannelsIndex::take(outgoing_para)
.into_iter()
.map(|sender| HrmpChannelId { sender, recipient: outgoing_para.clone() });
.map(|sender| HrmpChannelId { sender, recipient: *outgoing_para });
let egress = <Self as Store>::HrmpEgressChannelsIndex::take(outgoing_para)
.into_iter()
.map(|recipient| HrmpChannelId { sender: outgoing_para.clone(), recipient });
.map(|recipient| HrmpChannelId { sender: *outgoing_para, recipient });
let mut to_close = ingress.chain(egress).collect::<Vec<_>>();
to_close.sort();
to_close.dedup();
@@ -1075,7 +1075,7 @@ impl<T: Config> Pallet<T> {
channel.total_size += inbound.data.len() as u32;
// compute the new MQC head of the channel
let prev_head = channel.mqc_head.clone().unwrap_or(Default::default());
let prev_head = channel.mqc_head.unwrap_or(Default::default());
let new_head = BlakeTwo256::hash_of(&(
prev_head,
inbound.sent_at,
@@ -102,7 +102,7 @@ impl<H, N> CandidatePendingAvailability<H, N> {
/// Get the core index.
pub(crate) fn core_occupied(&self) -> CoreIndex {
self.core.clone()
self.core
}
/// Get the candidate hash.
@@ -383,7 +383,7 @@ impl<T: Config> Pallet<T> {
let mut freed_cores = Vec::with_capacity(expected_bits);
for (para_id, pending_availability) in assigned_paras_record
.into_iter()
.filter_map(|x| x)
.flatten()
.filter_map(|(id, p)| p.map(|p| (id, p)))
{
if pending_availability.availability_votes.count_ones() >= threshold {
@@ -644,8 +644,7 @@ impl<T: Config> Pallet<T> {
};
// one more sweep for actually writing to storage.
let core_indices =
core_indices_and_backers.iter().map(|&(ref c, _, _)| c.clone()).collect();
let core_indices = core_indices_and_backers.iter().map(|&(ref c, _, _)| *c).collect();
for (candidate, (core, backers, group)) in
candidates.into_iter().zip(core_indices_and_backers)
{
@@ -247,7 +247,7 @@ impl<T: Config> Pallet<T> {
let validators = shared::Pallet::<T>::initializer_on_new_session(
session_index,
random_seed.clone(),
random_seed,
&new_config,
all_validators,
);
@@ -513,7 +513,7 @@ impl<T: Config> Pallet<T> {
METRICS.on_candidates_sanitized(backed_candidates.len() as u64);
// Process backed candidates according to scheduled cores.
let parent_storage_root = parent_header.state_root().clone();
let parent_storage_root = *parent_header.state_root();
let inclusion::ProcessedCandidates::<<T::Header as HeaderT>::Hash> {
core_indices: occupied,
candidate_receipt_with_backing_validator_indices,
@@ -711,7 +711,7 @@ impl<T: Config> Pallet<T> {
let scheduled = <scheduler::Pallet<T>>::scheduled();
let relay_parent_number = now - One::one();
let parent_storage_root = parent_header.state_root().clone();
let parent_storage_root = *parent_header.state_root();
let check_ctx = CandidateCheckContext::<T>::new(now, relay_parent_number);
let backed_candidates = sanitize_backed_candidates::<T, _>(
@@ -1201,7 +1201,7 @@ fn compute_entropy<T: Config>(parent_hash: T::Hash) -> [u8; 32] {
// known 2 epochs ago. it is marginally better than using the parent block
// hash since it's harder to influence the VRF output than the block hash.
let vrf_random = ParentBlockRandomness::<T>::random(&CANDIDATE_SEED_SUBJECT[..]).0;
let mut entropy: [u8; 32] = CANDIDATE_SEED_SUBJECT.clone();
let mut entropy: [u8; 32] = CANDIDATE_SEED_SUBJECT;
if let Some(vrf_random) = vrf_random {
entropy.as_mut().copy_from_slice(vrf_random.as_ref());
} else {
@@ -107,7 +107,7 @@ pub fn availability_cores<T: initializer::Config>() -> Vec<CoreState<T::Hash, T:
<inclusion::Pallet<T>>::pending_availability(para_id)
.expect("Occupied core always has pending availability; qed");
let backed_in_number = pending_availability.backed_in_number().clone();
let backed_in_number = *pending_availability.backed_in_number();
OccupiedCore {
next_up_on_available: <scheduler::Pallet<T>>::next_up_on_available(
CoreIndex(i as u32),
@@ -135,7 +135,7 @@ pub fn availability_cores<T: initializer::Config>() -> Vec<CoreState<T::Hash, T:
<inclusion::Pallet<T>>::pending_availability(para_id)
.expect("Occupied core always has pending availability; qed");
let backed_in_number = pending_availability.backed_in_number().clone();
let backed_in_number = *pending_availability.backed_in_number();
OccupiedCore {
next_up_on_available: <scheduler::Pallet<T>>::next_up_on_available(
CoreIndex(i as u32),
+5 -7
View File
@@ -483,7 +483,7 @@ impl<T: Config> Pallet<T> {
Some(CoreAssignment {
kind: AssignmentKind::Parachain,
para_id: parachains[core_index],
core: core.clone(),
core,
group_idx: Self::group_assigned_to_core(core, now).expect(
"core is not out of bounds and we are guaranteed \
to be after the most recent session start; qed",
@@ -496,7 +496,7 @@ impl<T: Config> Pallet<T> {
parathread_queue.take_next_on_core(core_offset).map(|entry| CoreAssignment {
kind: AssignmentKind::Parathread(entry.claim.1, entry.retries),
para_id: entry.claim.0,
core: core.clone(),
core,
group_idx: Self::group_assigned_to_core(core, now).expect(
"core is not out of bounds and we are guaranteed \
to be after the most recent session start; qed",
@@ -610,11 +610,9 @@ impl<T: Config> Pallet<T> {
(at - session_start_block) / config.group_rotation_frequency.into();
let rotations_since_session_start =
match <T::BlockNumber as TryInto<u32>>::try_into(rotations_since_session_start) {
Ok(i) => i,
Err(_) => 0, // can only happen if rotations occur only once every u32::max(),
// so functionally no difference in behavior.
};
<T::BlockNumber as TryInto<u32>>::try_into(rotations_since_session_start).unwrap_or(0);
// Error case can only happen if rotations occur only once every u32::max(),
// so functionally no difference in behavior.
let group_idx =
(core.0 as usize + rotations_since_session_start as usize) % validator_groups.len();
+1 -1
View File
@@ -107,7 +107,7 @@ impl<XcmExecutor: xcm::latest::ExecuteXcm<C::RuntimeCall>, C: Config> UmpSink
VersionedXcm,
};
let id = upward_message_id(&data[..]);
let id = upward_message_id(data);
let maybe_msg_and_weight = VersionedXcm::<C::RuntimeCall>::decode_all_with_depth_limit(
xcm::MAX_XCM_DECODE_DEPTH,
&mut data,
@@ -72,3 +72,18 @@ test-deterministic-wasm:
- .compiler-info
script:
- ./scripts/ci/gitlab/test_deterministic_wasm.sh
cargo-clippy:
stage: test
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in check.yml
needs:
- job: job-starter
artifacts: false
variables:
RUSTY_CACHIER_TOOLCHAIN: nightly
extends:
- .docker-env
- .test-refs
script:
- SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo +nightly clippy --all-targets
+3 -3
View File
@@ -32,7 +32,7 @@ use tempfile::tempdir;
pub mod common;
static RUNTIMES: [&'static str; 4] = ["polkadot", "kusama", "westend", "rococo"];
static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"];
/// `benchmark block` works for all dev runtimes using the wasm executor.
#[tokio::test]
@@ -54,7 +54,7 @@ async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> {
let mut cmd = Command::new(cargo_bin("polkadot"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(["--chain", &runtime, "--force-authoring", "--alice"])
.args(["--chain", runtime, "--force-authoring", "--alice"])
.arg("-d")
.arg(base_path)
.arg("--no-hardware-benchmarks")
@@ -77,7 +77,7 @@ async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> {
fn benchmark_block(runtime: &str, base_path: &Path, block: u32) -> Result<(), String> {
// Invoke `benchmark block` with all options to make sure that they are valid.
let status = Command::new(cargo_bin("polkadot"))
.args(["benchmark", "block", "--chain", &runtime])
.args(["benchmark", "block", "--chain", runtime])
.arg("-d")
.arg(base_path)
.args(["--from", &block.to_string(), "--to", &block.to_string()])
+4 -5
View File
@@ -17,10 +17,9 @@
use assert_cmd::cargo::cargo_bin;
use std::{process::Command, result::Result};
static RUNTIMES: [&'static str; 4] = ["polkadot", "kusama", "westend", "rococo"];
static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"];
static EXTRINSICS: [(&'static str, &'static str); 2] =
[("system", "remark"), ("balances", "transfer_keep_alive")];
static EXTRINSICS: [(&str, &str); 2] = [("system", "remark"), ("balances", "transfer_keep_alive")];
/// `benchmark extrinsic` works for all dev runtimes and some extrinsics.
#[test]
@@ -43,8 +42,8 @@ fn benchmark_extrinsic_rejects_non_dev_runtimes() {
fn benchmark_extrinsic(runtime: &str, pallet: &str, extrinsic: &str) -> Result<(), String> {
let status = Command::new(cargo_bin("polkadot"))
.args(["benchmark", "extrinsic", "--chain", &runtime])
.args(&["--pallet", pallet, "--extrinsic", extrinsic])
.args(["benchmark", "extrinsic", "--chain", runtime])
.args(["--pallet", pallet, "--extrinsic", extrinsic])
// Run with low repeats for faster execution.
.args(["--repeat=1", "--warmup=1", "--max-ext-per-block=1"])
.status()
+1 -1
View File
@@ -18,7 +18,7 @@ use assert_cmd::cargo::cargo_bin;
use std::{process::Command, result::Result};
use tempfile::tempdir;
static RUNTIMES: [&'static str; 4] = ["polkadot", "kusama", "westend", "rococo"];
static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"];
/// `benchmark overhead` works for all dev runtimes.
#[test]
+1 -1
View File
@@ -38,7 +38,7 @@ fn benchmark_storage_works() {
/// Invoke the `benchmark storage` sub-command.
fn benchmark_storage(db: &str, base_path: &Path) -> ExitStatus {
Command::new(cargo_bin("polkadot"))
.args(&["benchmark", "storage", "--dev"])
.args(["benchmark", "storage", "--dev"])
.arg("--db")
.arg(db)
.arg("--weight-path")
+4 -2
View File
@@ -91,11 +91,13 @@ pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) {
// does the line contain our port (we expect this specific output from substrate).
let sock_addr = match line.split_once("Running JSON-RPC WS server: addr=") {
None => return None,
Some((_, after)) => after.split_once(",").unwrap().0,
Some((_, after)) => after.split_once(',').unwrap().0,
};
Some(format!("ws://{}", sock_addr))
})
.expect(&format!("Could not find WebSocket address in process output:\n{}", &data));
.unwrap_or_else(|| {
panic!("Could not find WebSocket address in process output:\n{}", &data)
});
(ws_url, data)
}
+1 -1
View File
@@ -24,7 +24,7 @@ fn invalid_order_arguments() {
let tmpdir = tempdir().expect("could not create temp dir");
let status = Command::new(cargo_bin("polkadot"))
.args(&["--dev", "invalid_order_arguments", "-d"])
.args(["--dev", "invalid_order_arguments", "-d"])
.arg(tmpdir.path())
.arg("-y")
.status()
+4 -4
View File
@@ -36,7 +36,7 @@ async fn purge_chain_rocksdb_works() {
let mut cmd = Command::new(cargo_bin("polkadot"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(&["--dev", "-d"])
.args(["--dev", "-d"])
.arg(tmpdir.path())
.arg("--port")
.arg("33034")
@@ -61,7 +61,7 @@ async fn purge_chain_rocksdb_works() {
// Purge chain
let status = Command::new(cargo_bin("polkadot"))
.args(&["purge-chain", "--dev", "-d"])
.args(["purge-chain", "--dev", "-d"])
.arg(tmpdir.path())
.arg("-y")
.status()
@@ -86,7 +86,7 @@ async fn purge_chain_paritydb_works() {
let mut cmd = Command::new(cargo_bin("polkadot"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(&["--dev", "-d"])
.args(["--dev", "-d"])
.arg(tmpdir.path())
.arg("--database")
.arg("paritydb-experimental")
@@ -111,7 +111,7 @@ async fn purge_chain_paritydb_works() {
// Purge chain
let status = Command::new(cargo_bin("polkadot"))
.args(&["purge-chain", "--dev", "-d"])
.args(["purge-chain", "--dev", "-d"])
.arg(tmpdir.path())
.arg("--database")
.arg("paritydb-experimental")
@@ -40,7 +40,7 @@ async fn running_the_node_works_and_can_be_interrupted() {
let mut cmd = Command::new(cargo_bin("polkadot"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(&["--dev", "-d"])
.args(["--dev", "-d"])
.arg(tmpdir.path())
.arg("--no-hardware-benchmarks")
.spawn()
+3 -3
View File
@@ -100,14 +100,14 @@ impl Assets {
}
/// A borrowing iterator over the fungible assets.
pub fn fungible_assets_iter<'a>(&'a self) -> impl Iterator<Item = MultiAsset> + 'a {
pub fn fungible_assets_iter(&self) -> impl Iterator<Item = MultiAsset> + '_ {
self.fungible
.iter()
.map(|(id, &amount)| MultiAsset { fun: Fungible(amount), id: id.clone() })
}
/// A borrowing iterator over the non-fungible assets.
pub fn non_fungible_assets_iter<'a>(&'a self) -> impl Iterator<Item = MultiAsset> + 'a {
pub fn non_fungible_assets_iter(&self) -> impl Iterator<Item = MultiAsset> + '_ {
self.non_fungible
.iter()
.map(|(id, instance)| MultiAsset { fun: NonFungible(instance.clone()), id: id.clone() })
@@ -126,7 +126,7 @@ impl Assets {
}
/// A borrowing iterator over all assets.
pub fn assets_iter<'a>(&'a self) -> impl Iterator<Item = MultiAsset> + 'a {
pub fn assets_iter(&self) -> impl Iterator<Item = MultiAsset> + '_ {
self.fungible_assets_iter().chain(self.non_fungible_assets_iter())
}