* Add clippy config and remove .cargo from gitignore

* first fixes

* Clippyfied

* Add clippy CI job

* comment out rusty-cachier

* minor

* fix ci

* remove DAG from check-dependent-project

* add DAG to clippy

Co-authored-by: alvicsam <alvicsam@gmail.com>
This commit is contained in:
alexgparity
2022-11-30 09:34:06 +01:00
committed by GitHub
parent b76086c617
commit 9ea14e66c8
67 changed files with 338 additions and 351 deletions
@@ -309,7 +309,7 @@ enum MessageSource {
impl MessageSource {
fn peer_id(&self) -> Option<PeerId> {
match self {
Self::Peer(id) => Some(id.clone()),
Self::Peer(id) => Some(*id),
Self::Local => None,
}
}
@@ -389,7 +389,7 @@ impl State {
) {
let mut new_hashes = HashSet::new();
for meta in &metas {
match self.blocks.entry(meta.hash.clone()) {
match self.blocks.entry(meta.hash) {
hash_map::Entry::Vacant(entry) => {
let candidates_count = meta.candidates.len();
let mut candidates = Vec::with_capacity(candidates_count);
@@ -398,7 +398,7 @@ impl State {
entry.insert(BlockEntry {
known_by: HashMap::new(),
number: meta.number,
parent_hash: meta.parent_hash.clone(),
parent_hash: meta.parent_hash,
knowledge: Knowledge::default(),
candidates,
session: meta.session,
@@ -406,7 +406,7 @@ impl State {
self.topologies.inc_session_refs(meta.session);
new_hashes.insert(meta.hash.clone());
new_hashes.insert(meta.hash);
// In case there are duplicates, we should only set this if the entry
// was vacant.
@@ -433,7 +433,7 @@ impl State {
&mut self.blocks,
&self.topologies,
self.peer_views.len(),
peer_id.clone(),
*peer_id,
view_intersection,
rng,
)
@@ -563,10 +563,8 @@ impl State {
"Pending assignment",
);
pending.push((
peer_id.clone(),
PendingMessage::Assignment(assignment, claimed_index),
));
pending
.push((peer_id, PendingMessage::Assignment(assignment, claimed_index)));
continue
}
@@ -574,7 +572,7 @@ impl State {
self.import_and_circulate_assignment(
ctx,
metrics,
MessageSource::Peer(peer_id.clone()),
MessageSource::Peer(peer_id),
assignment,
claimed_index,
rng,
@@ -604,7 +602,7 @@ impl State {
"Pending approval",
);
pending.push((peer_id.clone(), PendingMessage::Approval(approval_vote)));
pending.push((peer_id, PendingMessage::Approval(approval_vote)));
continue
}
@@ -612,7 +610,7 @@ impl State {
self.import_and_circulate_approval(
ctx,
metrics,
MessageSource::Peer(peer_id.clone()),
MessageSource::Peer(peer_id),
approval_vote,
)
.await;
@@ -663,7 +661,7 @@ impl State {
&mut self.blocks,
&self.topologies,
self.peer_views.len(),
peer_id.clone(),
peer_id,
view,
rng,
)
@@ -709,7 +707,7 @@ impl State {
) where
R: CryptoRng + Rng,
{
let block_hash = assignment.block_hash.clone();
let block_hash = assignment.block_hash;
let validator_index = assignment.validator;
let entry = match self.blocks.get_mut(&block_hash) {
@@ -737,7 +735,7 @@ impl State {
if let Some(peer_id) = source.peer_id() {
// check if our knowledge of the peer already contains this assignment
match entry.known_by.entry(peer_id.clone()) {
match entry.known_by.entry(peer_id) {
hash_map::Entry::Occupied(mut peer_knowledge) => {
let peer_knowledge = peer_knowledge.get_mut();
if peer_knowledge.contains(&message_subject, message_kind) {
@@ -761,13 +759,13 @@ impl State {
?message_subject,
"Assignment from a peer is out of view",
);
modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await;
modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await;
},
}
// if the assignment is known to be valid, reward the peer
if entry.knowledge.contains(&message_subject, message_kind) {
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await;
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known assignment");
peer_knowledge.received.insert(message_subject, message_kind);
@@ -803,8 +801,7 @@ impl State {
);
match result {
AssignmentCheckResult::Accepted => {
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST)
.await;
modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE_FIRST).await;
entry.knowledge.known_messages.insert(message_subject.clone(), message_kind);
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
peer_knowledge.received.insert(message_subject.clone(), message_kind);
@@ -970,7 +967,7 @@ impl State {
source: MessageSource,
vote: IndirectSignedApprovalVote,
) {
let block_hash = vote.block_hash.clone();
let block_hash = vote.block_hash;
let validator_index = vote.validator;
let candidate_index = vote.candidate_index;
@@ -1003,7 +1000,7 @@ impl State {
}
// check if our knowledge of the peer already contains this approval
match entry.known_by.entry(peer_id.clone()) {
match entry.known_by.entry(peer_id) {
hash_map::Entry::Occupied(mut knowledge) => {
let peer_knowledge = knowledge.get_mut();
if peer_knowledge.contains(&message_subject, message_kind) {
@@ -1027,14 +1024,14 @@ impl State {
?message_subject,
"Approval from a peer is out of view",
);
modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await;
modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await;
},
}
// if the approval is known to be valid, reward the peer
if entry.knowledge.contains(&message_subject, message_kind) {
gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known approval");
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await;
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
peer_knowledge.received.insert(message_subject.clone(), message_kind);
}
@@ -1065,8 +1062,7 @@ impl State {
);
match result {
ApprovalCheckResult::Accepted => {
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST)
.await;
modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE_FIRST).await;
entry.knowledge.insert(message_subject.clone(), message_kind);
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
@@ -1301,7 +1297,7 @@ impl State {
break
}
let peer_knowledge = entry.known_by.entry(peer_id.clone()).or_default();
let peer_knowledge = entry.known_by.entry(peer_id).or_default();
let topology = topologies.get_topology(entry.session);
@@ -1335,13 +1331,12 @@ impl State {
}
}
let message_subject =
MessageSubject(block.clone(), candidate_index, validator.clone());
let message_subject = MessageSubject(block, candidate_index, *validator);
let assignment_message = (
IndirectAssignmentCert {
block_hash: block.clone(),
validator: validator.clone(),
block_hash: block,
validator: *validator,
cert: message_state.approval_state.assignment_cert().clone(),
},
candidate_index,
@@ -1350,8 +1345,8 @@ impl State {
let approval_message =
message_state.approval_state.approval_signature().map(|signature| {
IndirectSignedApprovalVote {
block_hash: block.clone(),
validator: validator.clone(),
block_hash: block,
validator: *validator,
candidate_index,
signature,
}
@@ -1374,7 +1369,7 @@ impl State {
}
}
block = entry.parent_hash.clone();
block = entry.parent_hash;
}
}
@@ -1388,7 +1383,7 @@ impl State {
sender
.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![peer_id.clone()],
vec![peer_id],
Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution(
protocol_v1::ApprovalDistributionMessage::Assignments(assignments_to_send),
)),
@@ -1558,13 +1553,12 @@ async fn adjust_required_routing_and_propagate<Context, BlockFilter, RoutingModi
};
// Propagate the message to all peers in the required routing set.
let message_subject =
MessageSubject(block_hash.clone(), candidate_index, validator.clone());
let message_subject = MessageSubject(*block_hash, candidate_index, *validator);
let assignment_message = (
IndirectAssignmentCert {
block_hash: block_hash.clone(),
validator: validator.clone(),
block_hash: *block_hash,
validator: *validator,
cert: message_state.approval_state.assignment_cert().clone(),
},
candidate_index,
@@ -1572,8 +1566,8 @@ async fn adjust_required_routing_and_propagate<Context, BlockFilter, RoutingModi
let approval_message =
message_state.approval_state.approval_signature().map(|signature| {
IndirectSignedApprovalVote {
block_hash: block_hash.clone(),
validator: validator.clone(),
block_hash: *block_hash,
validator: *validator,
candidate_index,
signature,
}
@@ -1590,7 +1584,7 @@ async fn adjust_required_routing_and_propagate<Context, BlockFilter, RoutingModi
if !peer_knowledge.contains(&message_subject, MessageKind::Assignment) {
peer_knowledge.sent.insert(message_subject.clone(), MessageKind::Assignment);
peer_assignments
.entry(peer.clone())
.entry(*peer)
.or_insert_with(Vec::new)
.push(assignment_message.clone());
}
@@ -1599,7 +1593,7 @@ async fn adjust_required_routing_and_propagate<Context, BlockFilter, RoutingModi
if !peer_knowledge.contains(&message_subject, MessageKind::Approval) {
peer_knowledge.sent.insert(message_subject.clone(), MessageKind::Approval);
peer_approvals
.entry(peer.clone())
.entry(*peer)
.or_insert_with(Vec::new)
.push(approval_message.clone());
}
@@ -338,8 +338,7 @@ impl RequestChunksFromValidators {
index: validator_index,
};
let (req, res) =
OutgoingRequest::new(Recipient::Authority(validator), raw_request.clone());
let (req, res) = OutgoingRequest::new(Recipient::Authority(validator), raw_request);
requests.push(Requests::ChunkFetchingV1(req));
params.metrics.on_chunk_request_issued();
@@ -973,7 +972,7 @@ async fn query_full_data<Context>(
ctx.send_message(AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx))
.await;
Ok(rx.await.map_err(error::Error::CanceledQueryFullData)?)
rx.await.map_err(error::Error::CanceledQueryFullData)
}
#[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)]
@@ -319,7 +319,7 @@ async fn handle_bitfield_distribution<Context>(
}
let validator_index = signed_availability.validator_index();
let validator = if let Some(validator) = validator_set.get(*&validator_index.0 as usize) {
let validator = if let Some(validator) = validator_set.get(validator_index.0 as usize) {
validator.clone()
} else {
gum::debug!(target: LOG_TARGET, validator_index = ?validator_index.0, "Could not find a validator for index");
@@ -395,7 +395,7 @@ async fn relay_message<Context>(
};
if need_routing {
Some(peer.clone())
Some(*peer)
} else {
None
}
@@ -412,7 +412,7 @@ async fn relay_message<Context>(
// track the message as sent for this peer
job_data
.message_sent_to_peer
.entry(peer.clone())
.entry(*peer)
.or_default()
.insert(validator.clone());
});
@@ -497,7 +497,7 @@ async fn process_incoming_peer_message<Context>(
// Check if the peer already sent us a message for the validator denoted in the message earlier.
// Must be done after validator index verification, in order to avoid storing an unbounded
// number of set entries.
let received_set = job_data.message_received_from_peer.entry(origin.clone()).or_default();
let received_set = job_data.message_received_from_peer.entry(origin).or_default();
if !received_set.contains(&validator) {
received_set.insert(validator.clone());
@@ -656,7 +656,7 @@ async fn handle_peer_view_change<Context>(
) {
let added = state
.peer_views
.entry(origin.clone())
.entry(origin)
.or_default()
.replace_difference(view)
.cloned()
@@ -681,11 +681,10 @@ async fn handle_peer_view_change<Context>(
let delta_set: Vec<(ValidatorId, BitfieldGossipMessage)> = added
.into_iter()
.filter_map(|new_relay_parent_interest| {
if let Some(job_data) = (&*state).per_relay_parent.get(&new_relay_parent_interest) {
if let Some(job_data) = state.per_relay_parent.get(&new_relay_parent_interest) {
// Send all jointly known messages for a validator (given the current relay parent)
// to the peer `origin`...
let one_per_validator = job_data.one_per_validator.clone();
let origin = origin.clone();
Some(one_per_validator.into_iter().filter(move |(validator, _message)| {
// ..except for the ones the peer already has.
job_data.message_from_validator_needed_by_peer(&origin, validator)
@@ -699,7 +698,7 @@ async fn handle_peer_view_change<Context>(
.collect();
for (validator, message) in delta_set.into_iter() {
send_tracked_gossip_message(ctx, state, origin.clone(), validator, message).await;
send_tracked_gossip_message(ctx, state, origin, validator, message).await;
}
}
@@ -727,11 +726,7 @@ async fn send_tracked_gossip_message<Context>(
"Sending gossip message"
);
job_data
.message_sent_to_peer
.entry(dest.clone())
.or_default()
.insert(validator.clone());
job_data.message_sent_to_peer.entry(dest).or_default().insert(validator.clone());
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![dest],
@@ -760,14 +755,14 @@ async fn query_basics<Context>(
// query validators
ctx.send_message(RuntimeApiMessage::Request(
relay_parent.clone(),
relay_parent,
RuntimeApiRequest::Validators(validators_tx),
))
.await;
// query signing context
ctx.send_message(RuntimeApiMessage::Request(
relay_parent.clone(),
relay_parent,
RuntimeApiRequest::SessionIndexForChild(session_tx),
))
.await;
+2 -2
View File
@@ -174,7 +174,7 @@ impl Network for Arc<NetworkService<Block, Hash>> {
Ok(v) => v,
Err(_) => continue,
};
NetworkService::add_known_address(&*self, peer_id.clone(), addr);
NetworkService::add_known_address(self, peer_id, addr);
found_peer_id = Some(peer_id);
}
found_peer_id
@@ -197,7 +197,7 @@ impl Network for Arc<NetworkService<Block, Hash>> {
};
NetworkService::start_request(
&*self,
self,
peer_id,
req_protocol_names.get_name(protocol),
payload,
+11 -11
View File
@@ -213,7 +213,7 @@ where
PeerSet::Collation => &mut shared.collation_peers,
};
match peer_map.entry(peer.clone()) {
match peer_map.entry(peer) {
hash_map::Entry::Occupied(_) => continue,
hash_map::Entry::Vacant(vacant) => {
vacant.insert(PeerData { view: View::default(), version });
@@ -234,12 +234,12 @@ where
dispatch_validation_events_to_all(
vec![
NetworkBridgeEvent::PeerConnected(
peer.clone(),
peer,
role,
version,
maybe_authority,
),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
NetworkBridgeEvent::PeerViewChange(peer, View::default()),
],
&mut sender,
)
@@ -259,12 +259,12 @@ where
dispatch_collation_events_to_all(
vec![
NetworkBridgeEvent::PeerConnected(
peer.clone(),
peer,
role,
version,
maybe_authority,
),
NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()),
NetworkBridgeEvent::PeerViewChange(peer, View::default()),
],
&mut sender,
)
@@ -421,7 +421,7 @@ where
Some(ValidationVersion::V1.into())
{
handle_v1_peer_messages::<protocol_v1::ValidationProtocol, _>(
remote.clone(),
remote,
PeerSet::Validation,
&mut shared.0.lock().validation_peers,
v_messages,
@@ -442,7 +442,7 @@ where
};
for report in reports {
network_service.report_peer(remote.clone(), report);
network_service.report_peer(remote, report);
}
dispatch_validation_events_to_all(events, &mut sender).await;
@@ -454,7 +454,7 @@ where
Some(CollationVersion::V1.into())
{
handle_v1_peer_messages::<protocol_v1::CollationProtocol, _>(
remote.clone(),
remote,
PeerSet::Collation,
&mut shared.0.lock().collation_peers,
c_messages,
@@ -475,7 +475,7 @@ where
};
for report in reports {
network_service.report_peer(remote.clone(), report);
network_service.report_peer(remote, report);
}
dispatch_collation_events_to_all(events, &mut sender).await;
@@ -795,11 +795,11 @@ fn handle_v1_peer_messages<RawMessage: Decode, OutMessage: From<RawMessage>>(
} else {
peer_data.view = new_view;
NetworkBridgeEvent::PeerViewChange(peer.clone(), peer_data.view.clone())
NetworkBridgeEvent::PeerViewChange(peer, peer_data.view.clone())
}
},
WireMessage::ProtocolMessage(message) =>
NetworkBridgeEvent::PeerMessage(peer.clone(), message.into()),
NetworkBridgeEvent::PeerMessage(peer, message.into()),
})
}
@@ -561,7 +561,7 @@ async fn advertise_collation<Context>(
let wire_message = protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent);
ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage(
vec![peer.clone()],
vec![peer],
Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)),
))
.await;
@@ -707,11 +707,8 @@ async fn handle_incoming_peer_message<Context>(
"AdvertiseCollation message is not expected on the collator side of the protocol",
);
ctx.send_message(NetworkBridgeTxMessage::ReportPeer(
origin.clone(),
COST_UNEXPECTED_MESSAGE,
))
.await;
ctx.send_message(NetworkBridgeTxMessage::ReportPeer(origin, COST_UNEXPECTED_MESSAGE))
.await;
// If we are advertised to, this is another collator, and we should disconnect.
ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation))
@@ -838,14 +835,14 @@ async fn handle_peer_view_change<Context>(
peer_id: PeerId,
view: View,
) {
let current = state.peer_views.entry(peer_id.clone()).or_default();
let current = state.peer_views.entry(peer_id).or_default();
let added: Vec<Hash> = view.difference(&*current).cloned().collect();
*current = view;
for added in added.into_iter() {
advertise_collation(ctx, state, added, peer_id.clone()).await;
advertise_collation(ctx, state, added, peer_id).await;
}
}
@@ -287,7 +287,7 @@ impl PeerData {
PeerState::Collating(ref mut state) =>
if state.advertisements.insert(on_relay_parent) {
state.last_active = Instant::now();
Ok((state.collator_id.clone(), state.para_id.clone()))
Ok((state.collator_id.clone(), state.para_id))
} else {
Err(AdvertisementError::Duplicate)
},
@@ -375,22 +375,19 @@ impl ActiveParas {
.await
.await
.ok()
.map(|x| x.ok())
.flatten();
.and_then(|x| x.ok());
let mg = polkadot_node_subsystem_util::request_validator_groups(relay_parent, sender)
.await
.await
.ok()
.map(|x| x.ok())
.flatten();
.and_then(|x| x.ok());
let mc = polkadot_node_subsystem_util::request_availability_cores(relay_parent, sender)
.await
.await
.ok()
.map(|x| x.ok())
.flatten();
.and_then(|x| x.ok());
let (validators, groups, rotation_info, cores) = match (mv, mg, mc) {
(Some(v), Some((g, r)), Some(c)) => (v, g, r, c),
@@ -486,12 +483,7 @@ struct PendingCollation {
impl PendingCollation {
fn new(relay_parent: Hash, para_id: &ParaId, peer_id: &PeerId) -> Self {
Self {
relay_parent,
para_id: para_id.clone(),
peer_id: peer_id.clone(),
commitments_hash: None,
}
Self { relay_parent, para_id: *para_id, peer_id: *peer_id, commitments_hash: None }
}
}
@@ -629,9 +621,9 @@ fn collator_peer_id(
peer_data: &HashMap<PeerId, PeerData>,
collator_id: &CollatorId,
) -> Option<PeerId> {
peer_data.iter().find_map(|(peer, data)| {
data.collator_id().filter(|c| c == &collator_id).map(|_| peer.clone())
})
peer_data
.iter()
.find_map(|(peer, data)| data.collator_id().filter(|c| c == &collator_id).map(|_| *peer))
}
async fn disconnect_peer(sender: &mut impl overseer::CollatorProtocolSenderTrait, peer_id: PeerId) {
@@ -655,9 +647,7 @@ async fn fetch_collation(
Delay::new(MAX_UNSHARED_DOWNLOAD_TIME).await;
(collator_id, relay_parent)
};
state
.collation_fetch_timeouts
.push(timeout(id.clone(), relay_parent.clone()).boxed());
state.collation_fetch_timeouts.push(timeout(id.clone(), relay_parent).boxed());
if let Some(peer_data) = state.peer_data.get(&peer_id) {
if peer_data.has_advertised(&relay_parent) {
@@ -729,7 +719,7 @@ async fn notify_collation_seconded(
/// - Ongoing collation requests have to be canceled.
/// - Advertisements by this peer that are no longer relevant have to be removed.
async fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) -> Result<()> {
let peer_data = state.peer_data.entry(peer_id.clone()).or_default();
let peer_data = state.peer_data.entry(peer_id).or_default();
peer_data.update_view(view);
state
@@ -883,7 +873,7 @@ async fn process_incoming_peer_message<Context>(
"Declared as collator for unneeded para",
);
modify_reputation(ctx.sender(), origin.clone(), COST_UNNEEDED_COLLATOR).await;
modify_reputation(ctx.sender(), origin, COST_UNNEEDED_COLLATOR).await;
gum::trace!(target: LOG_TARGET, "Disconnecting unneeded collator");
disconnect_peer(ctx.sender(), origin).await;
}
@@ -1013,7 +1003,7 @@ async fn handle_our_view_change<Context>(
.span_per_head()
.iter()
.filter(|v| !old_view.contains(&v.0))
.map(|v| (v.0.clone(), v.1.clone()))
.map(|v| (*v.0, v.1.clone()))
.collect();
added.into_iter().for_each(|(h, s)| {
@@ -1046,7 +1036,7 @@ async fn handle_our_view_change<Context>(
?para_id,
"Disconnecting peer on view change (not current parachain id)"
);
disconnect_peer(ctx.sender(), peer_id.clone()).await;
disconnect_peer(ctx.sender(), *peer_id).await;
}
}
}
@@ -1254,7 +1244,7 @@ async fn poll_requests(
retained_requested.insert(pending_collation.clone());
}
if let CollationFetchResult::Error(Some(rep)) = result {
reputation_changes.push((pending_collation.peer_id.clone(), rep));
reputation_changes.push((pending_collation.peer_id, rep));
}
}
requested_collations.retain(|k, _| retained_requested.contains(k));
@@ -1337,11 +1327,7 @@ async fn handle_collation_fetched_result<Context>(
if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) {
collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash);
ctx.sender()
.send_message(CandidateBackingMessage::Second(
relay_parent.clone(),
candidate_receipt,
pov,
))
.send_message(CandidateBackingMessage::Second(relay_parent, candidate_receipt, pov))
.await;
entry.insert(collation_event);
@@ -1366,7 +1352,7 @@ async fn disconnect_inactive_peers(
for (peer, peer_data) in peers {
if peer_data.is_inactive(&eviction_policy) {
gum::trace!(target: LOG_TARGET, "Disconnecting inactive peer");
disconnect_peer(sender, peer.clone()).await;
disconnect_peer(sender, *peer).await;
}
}
}
@@ -430,7 +430,7 @@ where
);
return
},
Some(vote) => (vote.0.session_index(), vote.0.candidate_hash().clone()),
Some(vote) => (vote.0.session_index(), *vote.0.candidate_hash()),
};
let (pending_confirmation, confirmation_rx) = oneshot::channel();
@@ -304,7 +304,7 @@ impl DisputeSender {
.get(*valid_index)
.ok_or(JfyiError::InvalidStatementFromCoordinator)?;
let valid_signed = SignedDisputeStatement::new_checked(
DisputeStatement::Valid(kind.clone()),
DisputeStatement::Valid(*kind),
candidate_hash,
session_index,
valid_public.clone(),
@@ -319,7 +319,7 @@ impl DisputeSender {
.get(*invalid_index)
.ok_or(JfyiError::InvalidValidatorIndexFromCoordinator)?;
let invalid_signed = SignedDisputeStatement::new_checked(
DisputeStatement::Invalid(kind.clone()),
DisputeStatement::Invalid(*kind),
candidate_hash,
session_index,
invalid_public.clone(),
@@ -94,7 +94,7 @@ impl SessionGridTopology {
let n = &self.canonical_shuffling[r_n];
grid_subset.validator_indices_x.insert(n.validator_index);
for p in &n.peer_ids {
grid_subset.peers_x.insert(p.clone());
grid_subset.peers_x.insert(*p);
}
}
@@ -102,7 +102,7 @@ impl SessionGridTopology {
let n = &self.canonical_shuffling[c_n];
grid_subset.validator_indices_y.insert(n.validator_index);
for p in &n.peer_ids {
grid_subset.peers_y.insert(p.clone());
grid_subset.peers_y.insert(*p);
}
}
+1 -1
View File
@@ -207,7 +207,7 @@ impl View {
}
/// Obtain an iterator over all heads.
pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Hash> {
pub fn iter(&self) -> impl Iterator<Item = &Hash> {
self.heads.iter()
}
@@ -278,10 +278,10 @@ impl PeerRelayParentKnowledge {
let new_known = match fingerprint.0 {
CompactStatement::Seconded(ref h) => {
self.seconded_counts.entry(fingerprint.1).or_default().note_local(h.clone());
self.seconded_counts.entry(fingerprint.1).or_default().note_local(*h);
let was_known = self.is_known_candidate(h);
self.sent_candidates.insert(h.clone());
self.sent_candidates.insert(*h);
!was_known
},
CompactStatement::Valid(_) => false,
@@ -345,7 +345,7 @@ impl PeerRelayParentKnowledge {
.seconded_counts
.entry(fingerprint.1)
.or_insert_with(Default::default)
.note_remote(h.clone());
.note_remote(*h);
if !allowed_remote {
return Err(COST_UNEXPECTED_STATEMENT_REMOTE)
@@ -374,7 +374,7 @@ impl PeerRelayParentKnowledge {
}
self.received_statements.insert(fingerprint.clone());
self.received_candidates.insert(candidate_hash.clone());
self.received_candidates.insert(*candidate_hash);
Ok(fresh)
}
@@ -1025,13 +1025,15 @@ async fn circulate_statement<'a, Context>(
let mut peers_to_send: Vec<PeerId> = peers
.iter()
.filter_map(|(peer, data)| {
if data.can_send(&relay_parent, &fingerprint) {
Some(peer.clone())
} else {
None
}
})
.filter_map(
|(peer, data)| {
if data.can_send(&relay_parent, &fingerprint) {
Some(*peer)
} else {
None
}
},
)
.collect();
let good_peers: HashSet<&PeerId> = peers_to_send.iter().collect();
@@ -1087,7 +1089,7 @@ async fn circulate_statement<'a, Context>(
"Sending statement",
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
peers_to_send.iter().map(|(p, _)| p.clone()).collect(),
peers_to_send.iter().map(|(p, _)| *p).collect(),
payload,
))
.await;
@@ -1126,11 +1128,8 @@ async fn send_statements_about<Context>(
statement = ?statement.statement,
"Sending statement",
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![peer.clone()],
payload,
))
.await;
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], payload))
.await;
metrics.on_statement_distributed();
}
@@ -1161,11 +1160,8 @@ async fn send_statements<Context>(
statement = ?statement.statement,
"Sending statement"
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![peer.clone()],
payload,
))
.await;
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], payload))
.await;
metrics.on_statement_distributed();
}
@@ -1431,7 +1427,7 @@ async fn handle_incoming_message<'a, Context>(
}
let fingerprint = message.get_fingerprint();
let candidate_hash = fingerprint.0.candidate_hash().clone();
let candidate_hash = *fingerprint.0.candidate_hash();
let handle_incoming_span = active_head
.span
.child("handle-incoming")
@@ -1551,7 +1547,7 @@ async fn handle_incoming_message<'a, Context>(
// Send the peer all statements concerning the candidate that we have,
// since it appears to have just learned about the candidate.
send_statements_about(
peer.clone(),
peer,
peer_data,
ctx,
relay_parent,
@@ -1627,7 +1623,7 @@ async fn update_peer_view_and_maybe_send_unlocked<Context, R>(
continue
}
if let Some(active_head) = active_heads.get(&new) {
send_statements(peer.clone(), peer_data, ctx, new, active_head, metrics).await;
send_statements(peer, peer_data, ctx, new, active_head, metrics).await;
}
}
}
@@ -1710,7 +1706,7 @@ async fn handle_network_update<Context, R>(
topology_storage,
peers,
active_heads,
&*recent_outdated_heads,
recent_outdated_heads,
ctx,
message,
req_sender,