diff --git a/polkadot/node/collation-generation/src/lib.rs b/polkadot/node/collation-generation/src/lib.rs index d1adcc32dd..466cb2bed7 100644 --- a/polkadot/node/collation-generation/src/lib.rs +++ b/polkadot/node/collation-generation/src/lib.rs @@ -77,7 +77,6 @@ impl CollationGenerationSubsystem { /// /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. /// Otherwise, most are logged and then discarded. - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(mut self, mut ctx: Context) where Context: SubsystemContext, @@ -110,7 +109,6 @@ impl CollationGenerationSubsystem { // note: this doesn't strictly need to be a separate function; it's more an administrative function // so that we don't clutter the run loop. It could in principle be inlined directly into there. // it should hopefully therefore be ok that it's an async function mutably borrowing self. - #[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(subsystem = LOG_TARGET))] async fn handle_incoming( &mut self, incoming: SubsystemResult>, @@ -184,7 +182,6 @@ where } } -#[tracing::instrument(level = "trace", skip(ctx, metrics, sender, activated), fields(subsystem = LOG_TARGET))] async fn handle_new_activations( config: Arc, activated: impl IntoIterator, @@ -419,7 +416,6 @@ async fn handle_new_activations( Ok(()) } -#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn erasure_root( n_validators: usize, persisted_validation: PersistedValidationData, diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index efe0379a4a..1c5e8708e5 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -498,7 +498,6 @@ where } } -#[tracing::instrument(skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] async fn run(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context) where Context: SubsystemContext, @@ -524,7 +523,6 @@ where } } -#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] async fn run_iteration( ctx: &mut Context, subsystem: &mut AvailabilityStoreSubsystem, diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 3d5ac78873..12a4240ce6 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -241,7 +241,6 @@ fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement } } -#[tracing::instrument(level = "trace", skip(attested, table_context), fields(subsystem = LOG_TARGET))] fn table_attested_to_backed( attested: TableAttestedCandidate< ParaId, @@ -317,7 +316,6 @@ async fn store_available_data( // // This will compute the erasure root internally and compare it to the expected erasure root. // This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`. -#[tracing::instrument(level = "trace", skip(sender, pov, span), fields(subsystem = LOG_TARGET))] async fn make_pov_available( sender: &mut JobSender, validator_index: Option, @@ -570,7 +568,6 @@ impl CandidateBackingJob { Ok(()) } - #[tracing::instrument(level = "trace", skip(self, root_span, sender), fields(subsystem = LOG_TARGET))] async fn handle_validated_candidate_command( &mut self, root_span: &jaeger::Span, @@ -647,7 +644,6 @@ impl CandidateBackingJob { Ok(()) } - #[tracing::instrument(level = "trace", skip(self, sender, params), fields(subsystem = LOG_TARGET))] async fn background_validate_and_make_available( &mut self, sender: &mut JobSender, @@ -671,7 +667,6 @@ impl CandidateBackingJob { } /// Kick off background validation with intent to second. - #[tracing::instrument(level = "trace", skip(self, parent_span, sender, pov), fields(subsystem = LOG_TARGET))] async fn validate_and_second( &mut self, parent_span: &jaeger::Span, @@ -743,7 +738,6 @@ impl CandidateBackingJob { } /// Check if there have happened any new misbehaviors and issue necessary messages. - #[tracing::instrument(level = "trace", skip(self, sender), fields(subsystem = LOG_TARGET))] async fn issue_new_misbehaviors(&mut self, sender: &mut JobSender) { // collect the misbehaviors to avoid double mutable self borrow issues let misbehaviors: Vec<_> = self.table.drain_misbehaviors().collect(); @@ -758,7 +752,6 @@ impl CandidateBackingJob { } /// Import a statement into the statement table and return the summary of the import. - #[tracing::instrument(level = "trace", skip(self, sender), fields(subsystem = LOG_TARGET))] async fn import_statement( &mut self, sender: &mut JobSender, @@ -828,7 +821,6 @@ impl CandidateBackingJob { Ok(summary) } - #[tracing::instrument(level = "trace", skip(self, root_span, sender), fields(subsystem = LOG_TARGET))] async fn process_msg( &mut self, root_span: &jaeger::Span, @@ -895,7 +887,6 @@ impl CandidateBackingJob { } /// Kick off validation work and distribute the result as a signed statement. - #[tracing::instrument(level = "trace", skip(self, sender, attesting, span), fields(subsystem = LOG_TARGET))] async fn kick_off_validation_work( &mut self, sender: &mut JobSender, @@ -951,7 +942,6 @@ impl CandidateBackingJob { } /// Import the statement and kick off validation work if it is a part of our assignment. - #[tracing::instrument(level = "trace", skip(self, root_span, sender), fields(subsystem = LOG_TARGET))] async fn maybe_validate_and_import( &mut self, root_span: &jaeger::Span, @@ -1014,7 +1004,6 @@ impl CandidateBackingJob { Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn sign_statement(&self, statement: Statement) -> Option { let signed = self.table_context .validator @@ -1090,7 +1079,6 @@ impl util::JobTrait for CandidateBackingJob { const NAME: &'static str = "CandidateBackingJob"; - #[tracing::instrument(skip(span, keystore, metrics, rx_to, sender), fields(subsystem = LOG_TARGET))] fn run( parent: Hash, span: Arc, diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs index 39ef5622a5..a36b2d8baa 100644 --- a/polkadot/node/core/bitfield-signing/src/lib.rs +++ b/polkadot/node/core/bitfield-signing/src/lib.rs @@ -70,7 +70,6 @@ pub enum Error { /// If there is a candidate pending availability, query the Availability Store /// for whether we have the availability chunk for our validator index. -#[tracing::instrument(level = "trace", skip(sender, span), fields(subsystem = LOG_TARGET))] async fn get_core_availability( core: &CoreState, validator_idx: ValidatorIndex, @@ -132,7 +131,6 @@ async fn get_availability_cores( /// - for each core, concurrently determine chunk availability (see `get_core_availability`) /// - return the bitfield if there were no errors at any point in this process /// (otherwise, it's prone to false negatives) -#[tracing::instrument(level = "trace", skip(sender, span), fields(subsystem = LOG_TARGET))] async fn construct_availability_bitfield( relay_parent: Hash, span: &jaeger::Span, @@ -226,7 +224,6 @@ impl JobTrait for BitfieldSigningJob { const NAME: &'static str = "BitfieldSigningJob"; /// Run a job for the parent block indicated - #[tracing::instrument(skip(span, keystore, metrics, _receiver, sender), fields(subsystem = LOG_TARGET))] fn run( relay_parent: Hash, span: Arc, diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 1f27e1c55e..4efec16db1 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -94,7 +94,6 @@ impl Subsystem for CandidateValidationSubsystem where } } -#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, metrics: Metrics, @@ -194,7 +193,6 @@ enum AssumptionCheckOutcome { BadRequest, } -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn check_assumption_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -245,7 +243,6 @@ async fn check_assumption_validation_data( }) } -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn find_assumed_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -277,11 +274,6 @@ async fn find_assumed_validation_data( Ok(AssumptionCheckOutcome::DoesNotMatch) } -#[tracing::instrument( - level = "trace", - skip(ctx, validation_host, pov, metrics), - fields(subsystem = LOG_TARGET), -)] async fn spawn_validate_from_chain_state( ctx: &mut impl SubsystemContext, validation_host: &mut ValidationHost, @@ -340,11 +332,6 @@ async fn spawn_validate_from_chain_state( validation_result } -#[tracing::instrument( - level = "trace", - skip(validation_backend, validation_code, pov, metrics), - fields(subsystem = LOG_TARGET), -)] async fn validate_candidate_exhaustive( mut validation_backend: impl ValidationBackend, persisted_validation_data: PersistedValidationData, @@ -478,7 +465,6 @@ impl ValidationBackend for &'_ mut ValidationHost { /// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks /// are passed, `Err` otherwise. -#[tracing::instrument(level = "trace", skip(pov, validation_code), fields(subsystem = LOG_TARGET))] fn perform_basic_checks( candidate: &CandidateDescriptor, max_pov_size: u32, diff --git a/polkadot/node/core/chain-api/src/lib.rs b/polkadot/node/core/chain-api/src/lib.rs index 782fcb19ed..0e6d3623c3 100644 --- a/polkadot/node/core/chain-api/src/lib.rs +++ b/polkadot/node/core/chain-api/src/lib.rs @@ -77,7 +77,6 @@ impl Subsystem for ChainApiSubsystem where } } -#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, subsystem: ChainApiSubsystem, diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 5320fb931d..dd13621597 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -140,7 +140,6 @@ impl JobTrait for ProvisioningJob { /// Run a job for the parent block indicated // // this function is in charge of creating and executing the job's main loop - #[tracing::instrument(skip(span, _run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))] fn run( relay_parent: Hash, span: Arc, @@ -242,7 +241,6 @@ impl ProvisioningJob { } } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn note_provisionable_data(&mut self, span: &jaeger::Span, provisionable_data: ProvisionableData) { match provisionable_data { ProvisionableData::Bitfield(_, signed_bitfield) => { @@ -277,7 +275,6 @@ type CoreAvailability = BitVec; /// When we're choosing bitfields to include, the rule should be simple: /// maximize availability. So basically, include all bitfields. And then /// choose a coherent set of candidates along with that. -#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(subsystem = LOG_TARGET))] async fn send_inherent_data( relay_parent: Hash, bitfields: &[SignedAvailabilityBitfield], @@ -321,7 +318,6 @@ async fn send_inherent_data( /// /// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated /// to the sorting of the input. -#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn select_availability_bitfields( cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -353,7 +349,6 @@ fn select_availability_bitfields( } /// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. -#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn select_candidates( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], @@ -475,7 +470,6 @@ async fn select_candidates( /// Produces a block number 1 higher than that of the relay parent /// in the event of an invalid `relay_parent`, returns `Ok(0)` -#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn get_block_number_under_construction( relay_parent: Hash, sender: &mut impl SubsystemSender, @@ -501,7 +495,6 @@ async fn get_block_number_under_construction( /// - construct a transverse slice along `core_idx` /// - bitwise-or it with the availability slice /// - count the 1 bits, compare to the total length; true on 2/3+ -#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn bitfields_indicate_availability( core_idx: usize, bitfields: &[SignedAvailabilityBitfield], diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index a2cd09a8f3..b8927564f9 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -262,7 +262,6 @@ impl RuntimeApiSubsystem where } } -#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, mut subsystem: RuntimeApiSubsystem, @@ -287,7 +286,6 @@ async fn run( } } -#[tracing::instrument(level = "trace", skip(client, metrics), fields(subsystem = LOG_TARGET))] fn make_runtime_api_request( client: Arc, metrics: Metrics, diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 29c6504c08..6492caa2b1 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -1102,7 +1102,6 @@ impl State { /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn modify_reputation( ctx: &mut impl SubsystemContext, peer_id: PeerId, @@ -1126,7 +1125,6 @@ impl ApprovalDistribution { Self { metrics } } - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, ctx: Context) where Context: SubsystemContext, @@ -1136,7 +1134,6 @@ impl ApprovalDistribution { } /// Used for testing. - #[tracing::instrument(skip(self, ctx, state), fields(subsystem = LOG_TARGET))] async fn run_inner(self, mut ctx: Context, state: &mut State) where Context: SubsystemContext, diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs index 42f5554ae8..1a4d6574e1 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/mod.rs @@ -176,7 +176,6 @@ impl FetchTask { /// Start fetching a chunk. /// /// A task handling the fetching of the configured chunk will be spawned. - #[tracing::instrument(level = "trace", skip(config, ctx), fields(subsystem = LOG_TARGET))] pub async fn start(config: FetchTaskConfig, ctx: &mut Context) -> Result where Context: SubsystemContext, @@ -249,7 +248,6 @@ enum TaskError { } impl RunningTask { - #[tracing::instrument(level = "trace", skip(self, kill), fields(subsystem = LOG_TARGET))] async fn run(self, kill: oneshot::Receiver<()>) { // Wait for completion/or cancel. let run_it = self.run_inner(); diff --git a/polkadot/node/network/availability-distribution/src/requester/mod.rs b/polkadot/node/network/availability-distribution/src/requester/mod.rs index a13140f701..8e6f1451c6 100644 --- a/polkadot/node/network/availability-distribution/src/requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/mod.rs @@ -78,7 +78,6 @@ impl Requester { /// /// You must feed it with `ActiveLeavesUpdate` via `update_fetching_heads` and make it progress /// by advancing the stream. - #[tracing::instrument(level = "trace", skip(metrics), fields(subsystem = LOG_TARGET))] pub fn new(metrics: Metrics) -> Self { let (tx, rx) = mpsc::channel(1); Requester { @@ -92,7 +91,6 @@ impl Requester { /// Update heads that need availability distribution. /// /// For all active heads we will be fetching our chunks for availability distribution. - #[tracing::instrument(level = "trace", skip(self, ctx, runtime, update), fields(subsystem = LOG_TARGET))] pub async fn update_fetching_heads( &mut self, ctx: &mut Context, diff --git a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs index 380471566f..a7e1d69d78 100644 --- a/polkadot/node/network/availability-distribution/src/requester/session_cache.rs +++ b/polkadot/node/network/availability-distribution/src/requester/session_cache.rs @@ -95,7 +95,6 @@ impl SessionCache { /// /// Use this function over any `fetch_session_info` if all you need is a reference to /// `SessionInfo`, as it avoids an expensive clone. - #[tracing::instrument(level = "trace", skip(self, ctx, runtime, with_info), fields(subsystem = LOG_TARGET))] pub async fn with_session_info( &mut self, ctx: &mut Context, @@ -146,7 +145,6 @@ impl SessionCache { /// /// We assume validators in a group are tried in reverse order, so the reported bad validators /// will be put at the beginning of the group. - #[tracing::instrument(level = "trace", skip(self, report), fields(subsystem = LOG_TARGET))] pub fn report_bad(&mut self, report: BadValidators) -> crate::Result<()> { let session = self .session_info_cache diff --git a/polkadot/node/network/availability-distribution/src/responder.rs b/polkadot/node/network/availability-distribution/src/responder.rs index a811607d0f..9a20510eb3 100644 --- a/polkadot/node/network/availability-distribution/src/responder.rs +++ b/polkadot/node/network/availability-distribution/src/responder.rs @@ -149,7 +149,6 @@ where } /// Query chunk from the availability store. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_chunk( ctx: &mut Context, candidate_hash: CandidateHash, @@ -178,7 +177,6 @@ where } /// Query PoV from the availability store. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_available_data( ctx: &mut Context, candidate_hash: CandidateHash, diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index f8eab8689a..4a4efc66bf 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -608,11 +608,9 @@ async fn handle_signal( } /// Machinery around launching interactions into the background. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn launch_interaction( state: &mut State, ctx: &mut impl SubsystemContext, - session_index: SessionIndex, session_info: SessionInfo, receipt: CandidateReceipt, backing_group: Option, @@ -663,7 +661,6 @@ async fn launch_interaction( } /// Handles an availability recovery request. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_recover( state: &mut State, ctx: &mut impl SubsystemContext, @@ -706,7 +703,6 @@ async fn handle_recover( launch_interaction( state, ctx, - session_index, session_info, receipt, backing_group, @@ -727,7 +723,6 @@ async fn handle_recover( } /// Queries a chunk from av-store. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_full_data( ctx: &mut impl SubsystemContext, candidate_hash: CandidateHash, diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs index 848eeb02bb..67298ca750 100644 --- a/polkadot/node/network/bitfield-distribution/src/lib.rs +++ b/polkadot/node/network/bitfield-distribution/src/lib.rs @@ -152,7 +152,6 @@ impl BitfieldDistribution { } /// Start processing work as passed on from the Overseer. - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, mut ctx: Context) where Context: SubsystemContext, @@ -234,7 +233,6 @@ impl BitfieldDistribution { } /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn modify_reputation( ctx: &mut Context, peer: PeerId, @@ -254,7 +252,6 @@ where /// Distribute a given valid and signature checked bitfield message. /// /// For this variant the source is this node. -#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_bitfield_distribution( ctx: &mut Context, state: &mut ProtocolState, @@ -308,7 +305,6 @@ where /// Distribute a given valid and signature checked bitfield message. /// /// Can be originated by another subsystem or received via network from another peer. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn relay_message( ctx: &mut Context, job_data: &mut PerRelayParentData, @@ -385,7 +381,6 @@ where } /// Handle an incoming message from a peer. -#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut ProtocolState, @@ -506,7 +501,6 @@ where /// Deal with network bridge updates and track what needs to be tracked /// which depends on the message type received. -#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut ProtocolState, @@ -561,7 +555,6 @@ where } /// Handle the changes necessary when our view changes. -#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn handle_our_view_change(state: &mut ProtocolState, view: OurView) { let old_view = std::mem::replace(&mut (state.view), view); @@ -584,7 +577,6 @@ fn handle_our_view_change(state: &mut ProtocolState, view: OurView) { // Send the difference between two views which were not sent // to that particular peer. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut Context, state: &mut ProtocolState, @@ -637,7 +629,6 @@ where } /// Send a gossip message and track it in the per relay parent data. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn send_tracked_gossip_message( ctx: &mut Context, state: &mut ProtocolState, @@ -693,7 +684,6 @@ where } /// Query our validator set and signing context for a particular relay parent. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_basics( ctx: &mut Context, relay_parent: Hash, diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs index 1c348adc28..a1a78d1f64 100644 --- a/polkadot/node/network/bridge/src/lib.rs +++ b/polkadot/node/network/bridge/src/lib.rs @@ -827,7 +827,6 @@ async fn handle_network_messages( /// #fn is_send(); /// #is_send::(); /// ``` -#[tracing::instrument(skip(bridge, ctx, network_stream), fields(subsystem = LOG_TARGET))] async fn run_network( bridge: NetworkBridge, mut ctx: impl SubsystemContext, @@ -924,7 +923,6 @@ fn construct_view(live_heads: impl DoubleEndedIterator, finalized_n ) } -#[tracing::instrument(level = "trace", skip(net, ctx, shared, metrics), fields(subsystem = LOG_TARGET))] async fn update_our_view( net: &mut impl Network, ctx: &mut impl SubsystemContext, @@ -997,7 +995,6 @@ async fn update_our_view( // Handle messages on a specific peer-set. The peer is expected to be connected on that // peer-set. -#[tracing::instrument(level = "trace", skip(peers, messages, metrics), fields(subsystem = LOG_TARGET))] fn handle_peer_messages( peer: PeerId, peer_set: PeerSet, @@ -1048,7 +1045,6 @@ fn handle_peer_messages( (outgoing_messages, reports) } -#[tracing::instrument(level = "trace", skip(net, peers, metrics), fields(subsystem = LOG_TARGET))] async fn send_validation_message( net: &mut impl Network, peers: I, @@ -1062,7 +1058,6 @@ async fn send_validation_message( send_message(net, peers, PeerSet::Validation, message, metrics).await } -#[tracing::instrument(level = "trace", skip(net, peers, metrics), fields(subsystem = LOG_TARGET))] async fn send_collation_message( net: &mut impl Network, peers: I, @@ -1109,7 +1104,6 @@ fn dispatch_collation_event_to_all_unbounded( } } -#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] async fn dispatch_validation_events_to_all( events: I, ctx: &mut impl SubsystemSender @@ -1121,7 +1115,6 @@ async fn dispatch_validation_events_to_all( ctx.send_messages(events.into_iter().flat_map(AllMessages::dispatch_iter)).await } -#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] async fn dispatch_collation_events_to_all( events: I, ctx: &mut impl SubsystemSender diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs index e219f932e8..9c2366b853 100644 --- a/polkadot/node/network/bridge/src/network.rs +++ b/polkadot/node/network/bridge/src/network.rs @@ -192,7 +192,6 @@ impl Network for Arc> { sc_network::NetworkService::remove_from_peers_set(&**self, protocol, multiaddresses) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn action_sink<'a>( &'a mut self, ) -> Pin + Send + 'a>> { diff --git a/polkadot/node/network/collator-protocol/src/collator_side.rs b/polkadot/node/network/collator-protocol/src/collator_side.rs index a73490989c..04abc51029 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side.rs @@ -258,7 +258,6 @@ impl State { /// or the relay-parent isn't in the active-leaves set, we ignore the message /// as it must be invalid in that case - although this indicates a logic error /// elsewhere in the node. -#[tracing::instrument(level = "trace", skip(ctx, runtime, state, pov), fields(subsystem = LOG_TARGET))] async fn distribute_collation( ctx: &mut impl SubsystemContext, runtime: &mut RuntimeInfo, @@ -357,7 +356,6 @@ async fn distribute_collation( /// Get the Id of the Core that is assigned to the para being collated on if any /// and the total number of cores. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn determine_core( ctx: &mut impl SubsystemContext, para_id: ParaId, @@ -387,7 +385,6 @@ struct GroupValidators { /// Figure out current and next group of validators assigned to the para being collated on. /// /// Returns [`ValidatorId`]'s of current and next group as determined based on the `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx, runtime), fields(subsystem = LOG_TARGET))] async fn determine_our_validators( ctx: &mut impl SubsystemContext, runtime: &mut RuntimeInfo, @@ -424,7 +421,6 @@ async fn determine_our_validators( } /// Issue a `Declare` collation message to the given `peer`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn declare( ctx: &mut impl SubsystemContext, state: &mut State, @@ -450,7 +446,6 @@ async fn declare( /// Issue a connection request to a set of validators and /// revoke the previous connection request. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn connect_to_validators( ctx: &mut impl SubsystemContext, validator_ids: Vec, @@ -467,7 +462,6 @@ async fn connect_to_validators( /// /// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is /// set as validator for our para at the given `relay_parent`. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn advertise_collation( ctx: &mut impl SubsystemContext, state: &mut State, @@ -528,7 +522,6 @@ async fn advertise_collation( } /// The main incoming message dispatching switch. -#[tracing::instrument(level = "trace", skip(ctx, runtime, state), fields(subsystem = LOG_TARGET))] async fn process_msg( ctx: &mut impl SubsystemContext, runtime: &mut RuntimeInfo, @@ -635,7 +628,6 @@ async fn process_msg( } /// Issue a response to a previously requested collation. -#[tracing::instrument(level = "trace", skip(state, pov), fields(subsystem = LOG_TARGET))] async fn send_collation( state: &mut State, request: IncomingRequest, @@ -652,7 +644,6 @@ async fn send_collation( } /// A networking messages switch. -#[tracing::instrument(level = "trace", skip(ctx, runtime, state), fields(subsystem = LOG_TARGET))] async fn handle_incoming_peer_message( ctx: &mut impl SubsystemContext, runtime: &mut RuntimeInfo, @@ -724,7 +715,6 @@ async fn handle_incoming_peer_message( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut impl SubsystemContext, state: &mut State, @@ -743,7 +733,6 @@ async fn handle_peer_view_change( } /// Bridge messages switch. -#[tracing::instrument(level = "trace", skip(ctx, runtime, state), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut impl SubsystemContext, runtime: &mut RuntimeInfo, @@ -809,7 +798,6 @@ async fn handle_network_msg( } /// Handles our view changes. -#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: OurView, @@ -851,7 +839,6 @@ async fn handle_our_view_change( } /// The collator protocol collator side main loop. -#[tracing::instrument(skip(ctx, collator_pair, metrics), fields(subsystem = LOG_TARGET))] pub(crate) async fn run( mut ctx: impl SubsystemContext, local_peer_id: PeerId, diff --git a/polkadot/node/network/collator-protocol/src/lib.rs b/polkadot/node/network/collator-protocol/src/lib.rs index 24ae2407d1..c958289133 100644 --- a/polkadot/node/network/collator-protocol/src/lib.rs +++ b/polkadot/node/network/collator-protocol/src/lib.rs @@ -90,7 +90,6 @@ impl CollatorProtocolSubsystem { } } - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, ctx: Context) -> Result<()> where Context: SubsystemContext, @@ -130,7 +129,6 @@ where } /// Modify the reputation of a peer based on its behavior. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) where Context: SubsystemContext, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 4e0e9266ab..e9c9ececf5 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -513,7 +513,6 @@ async fn disconnect_peer(ctx: &mut impl SubsystemContext, peer_id: PeerId) { } /// Another subsystem has requested to fetch collations on a particular leaf for some para. -#[tracing::instrument(level = "trace", skip(ctx, state, tx, pc), fields(subsystem = LOG_TARGET))] async fn fetch_collation( ctx: &mut Context, state: &mut State, @@ -530,7 +529,6 @@ where } /// Report a collator for some malicious actions. -#[tracing::instrument(level = "trace", skip(ctx, peer_data), fields(subsystem = LOG_TARGET))] async fn report_collator( ctx: &mut Context, peer_data: &HashMap, @@ -545,7 +543,6 @@ where } /// Some other subsystem has reported a collator as a good one, bump reputation. -#[tracing::instrument(level = "trace", skip(ctx, peer_data), fields(subsystem = LOG_TARGET))] async fn note_good_collation( ctx: &mut Context, peer_data: &HashMap, @@ -560,7 +557,6 @@ where } /// Notify a collator that its collation got seconded. -#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn notify_collation_seconded( ctx: &mut impl SubsystemContext, peer_id: PeerId, @@ -581,7 +577,6 @@ async fn notify_collation_seconded( /// A peer's view has changed. A number of things should be done: /// - Ongoing collation requests have to be cancelled. /// - Advertisements by this peer that are no longer relevant have to be removed. -#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( state: &mut State, peer_id: PeerId, @@ -602,7 +597,6 @@ async fn handle_peer_view_change( /// - Check if the requested collation is in our view. /// - Update PerRequest records with the `result` field if necessary. /// And as such invocations of this function may rely on that. -#[tracing::instrument(level = "trace", skip(ctx, state, result), fields(subsystem = LOG_TARGET))] async fn request_collation( ctx: &mut Context, state: &mut State, @@ -671,7 +665,6 @@ where } /// Networking message has been received. -#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, @@ -806,7 +799,6 @@ where /// A leaf has become inactive so we want to /// - Cancel all ongoing collation requests that are on top of that leaf. /// - Remove all stored collations relevant to that leaf. -#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn remove_relay_parent( state: &mut State, relay_parent: Hash, @@ -822,7 +814,6 @@ async fn remove_relay_parent( } /// Our view has changed. -#[tracing::instrument(level = "trace", skip(ctx, state, keystore), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( ctx: &mut impl SubsystemContext, state: &mut State, @@ -874,7 +865,6 @@ async fn handle_our_view_change( } /// Bridge event switch. -#[tracing::instrument(level = "trace", skip(ctx, state, keystore), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut State, @@ -910,7 +900,6 @@ where } /// The main message receiver switch. -#[tracing::instrument(level = "trace", skip(ctx, keystore, state), fields(subsystem = LOG_TARGET))] async fn process_msg( ctx: &mut Context, keystore: &SyncCryptoStorePtr, @@ -1003,7 +992,6 @@ async fn wait_until_next_check(last_poll: Instant) -> Instant { } /// The main run loop. -#[tracing::instrument(skip(ctx, keystore, metrics), fields(subsystem = LOG_TARGET))] pub(crate) async fn run( mut ctx: Context, keystore: SyncCryptoStorePtr, diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index 6b1324efac..e5e36c144d 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -232,7 +232,6 @@ impl PeerRelayParentKnowledge { /// /// This returns `true` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> bool { debug_assert!( self.can_send(fingerprint), @@ -295,7 +294,6 @@ impl PeerRelayParentKnowledge { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn receive( &mut self, fingerprint: &(CompactStatement, ValidatorIndex), @@ -422,7 +420,6 @@ impl PeerData { /// /// This returns `true` if this is the first time the peer has become aware of a /// candidate with the given hash. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn send( &mut self, relay_parent: &Hash, @@ -466,7 +463,6 @@ impl PeerData { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn receive( &mut self, relay_parent: &Hash, @@ -664,7 +660,6 @@ impl ActiveHeadData { /// /// Any other statements or those that reference a candidate we are not aware of cannot be accepted /// and will return `NotedStatement::NotUseful`. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { let validator_index = statement.validator_index(); let comparator = StoredStatementComparator { @@ -844,7 +839,6 @@ fn check_statement_signature( /// circulates the statement to all peers who have not seen it yet, and /// sends all statements dependent on that statement to peers who could previously not receive /// them but now can. -#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn circulate_statement_and_dependents( peers: &mut HashMap, active_heads: &mut HashMap, @@ -945,7 +939,6 @@ fn is_statement_large(statement: &SignedFullStatement) -> bool { /// Circulates a statement to all peers who have not seen it yet, and returns /// an iterator over peers who need to have dependent statements sent. -#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))] async fn circulate_statement<'a>( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -1022,7 +1015,6 @@ async fn circulate_statement<'a>( } /// Send all statements about a given candidate hash to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] async fn send_statements_about( peer: PeerId, peer_data: &mut PeerData, @@ -1060,7 +1052,6 @@ async fn send_statements_about( } /// Send all statements at a given relay-parent to a peer. -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] async fn send_statements( peer: PeerId, peer_data: &mut PeerData, @@ -1450,7 +1441,6 @@ async fn handle_incoming_message<'a>( } /// Update a peer's view. Sends all newly unlocked statements based on the previous -#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn update_peer_view_and_send_unlocked( peer: PeerId, peer_data: &mut PeerData, @@ -1560,7 +1550,6 @@ async fn handle_network_update( } impl StatementDistribution { - #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run( self, mut ctx: impl SubsystemContext, diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index b5c2523aef..1e731283db 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -421,19 +421,16 @@ pub struct OverseerHandler { impl OverseerHandler { /// Inform the `Overseer` that that some block was imported. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] pub async fn block_imported(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockImported(block)).await } /// Send some message to one of the `Subsystem`s. - #[tracing::instrument(level = "trace", skip(self, msg), fields(subsystem = LOG_TARGET))] pub async fn send_msg(&mut self, msg: impl Into) { self.send_and_log_error(Event::MsgToSubsystem(msg.into())).await } /// Inform the `Overseer` that some block was finalized. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] pub async fn block_finalized(&mut self, block: BlockInfo) { self.send_and_log_error(Event::BlockFinalized(block)).await } @@ -444,7 +441,6 @@ impl OverseerHandler { /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, /// the response channel may never return if the hash was deactivated before this call. /// In this case, it's the caller's responsibility to ensure a timeout is set. - #[tracing::instrument(level = "trace", skip(self, response_channel), fields(subsystem = LOG_TARGET))] pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender>) { self.send_and_log_error(Event::ExternalRequest(ExternalRequest::WaitForActivation { hash, @@ -453,7 +449,6 @@ impl OverseerHandler { } /// Tell `Overseer` to shutdown. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] pub async fn stop(&mut self) { self.send_and_log_error(Event::Stop).await } @@ -1845,7 +1840,6 @@ where } /// Run the `Overseer`. - #[tracing::instrument(skip(self), fields(subsystem = LOG_TARGET))] pub async fn run(mut self) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1927,7 +1921,6 @@ where } } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { match self.active_leaves.entry(block.hash) { hash_map::Entry::Vacant(entry) => entry.insert(block.number), @@ -1962,7 +1955,6 @@ where } } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1991,7 +1983,6 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn broadcast_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> { self.subsystems.candidate_validation.send_signal(signal.clone()).await?; self.subsystems.candidate_backing.send_signal(signal.clone()).await?; @@ -2014,7 +2005,6 @@ where Ok(()) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn route_message(&mut self, msg: AllMessages) -> SubsystemResult<()> { self.metrics.on_message_relayed(); match msg { @@ -2076,7 +2066,6 @@ where /// Handles a header activation. If the header's state doesn't support the parachains API, /// this returns `None`. - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn on_head_activated(&mut self, hash: &Hash, parent_hash: Option) -> Option<(Arc, LeafStatus)> { @@ -2110,14 +2099,12 @@ where Some((span, status)) } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn on_head_deactivated(&mut self, hash: &Hash) { self.metrics.on_head_deactivated(); self.activation_external_listeners.remove(hash); self.span_per_active_leaf.remove(hash); } - #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn clean_up_external_listeners(&mut self) { self.activation_external_listeners.retain(|_, v| { // remove dead listeners @@ -2126,7 +2113,6 @@ where }) } - #[tracing::instrument(level = "trace", skip(self, request), fields(subsystem = LOG_TARGET))] fn handle_external_request(&mut self, request: ExternalRequest) { match request { ExternalRequest::WaitForActivation { hash, response_channel } => {