observability: tracing gum, automatically cross ref traceID (#5079)

* add some gum

* bump expander

* gum

* fix all remaining issues

* last fixup

* Update node/gum/proc-macro/src/lib.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* change

* netowrk

* fixins

* chore

* allow optional fmt str + args, prep for expr as kv field

* tracing -> gum rename fallout

* restrict further

* allow multiple levels of field accesses

* another round of docs and a slip of the pen

* update ADR

* fixup lock fiel

* use target: instead of target=

* minors

* fix

* chore

* Update node/gum/README.md

Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com>

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com>
This commit is contained in:
Bernhard Schuster
2022-03-15 12:05:16 +01:00
committed by GitHub
parent fa359fd1f7
commit d631f1dea8
130 changed files with 1708 additions and 808 deletions
+55 -31
View File
@@ -1877,6 +1877,7 @@ dependencies = [
"fs-err",
"proc-macro2",
"quote",
"syn",
]
[[package]]
@@ -4464,6 +4465,7 @@ dependencies = [
"log",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6271,7 +6273,7 @@ dependencies = [
"rand_core 0.5.1",
"schnorrkel",
"sp-core",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6292,7 +6294,7 @@ dependencies = [
"sp-application-crypto",
"sp-core",
"sp-keystore",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6321,7 +6323,7 @@ dependencies = [
"sp-keystore",
"sp-tracing",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6350,7 +6352,7 @@ dependencies = [
"sp-core",
"sp-keyring",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6433,7 +6435,7 @@ dependencies = [
"sp-keystore",
"sp-runtime",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6476,7 +6478,7 @@ dependencies = [
"sp-keystore",
"sp-tracing",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6515,7 +6517,7 @@ dependencies = [
"sp-keyring",
"sp-keystore",
"sp-tracing",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6539,7 +6541,7 @@ dependencies = [
"sp-consensus",
"sp-core",
"sp-keyring",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6558,7 +6560,7 @@ dependencies = [
"sp-core",
"sp-maybe-compressed-blob",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6595,7 +6597,7 @@ dependencies = [
"sp-keyring",
"sp-keystore",
"sp-runtime",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6623,7 +6625,7 @@ dependencies = [
"sp-core",
"sp-keyring",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6648,7 +6650,7 @@ dependencies = [
"sp-keystore",
"sp-tracing",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6663,7 +6665,7 @@ dependencies = [
"polkadot-primitives-test-helpers",
"sp-keystore",
"thiserror",
"tracing",
"tracing-gum",
"wasm-timer",
]
@@ -6686,7 +6688,7 @@ dependencies = [
"sp-core",
"sp-keyring",
"sp-maybe-compressed-blob",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6705,7 +6707,7 @@ dependencies = [
"sc-consensus-babe",
"sp-blockchain",
"sp-core",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6726,7 +6728,7 @@ dependencies = [
"polkadot-primitives",
"sp-core",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6751,7 +6753,7 @@ dependencies = [
"sp-keyring",
"sp-keystore",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6767,7 +6769,7 @@ dependencies = [
"sp-inherents",
"sp-runtime",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6787,7 +6789,7 @@ dependencies = [
"sp-application-crypto",
"sp-keystore",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6820,7 +6822,7 @@ dependencies = [
"tempfile",
"test-parachain-adder",
"test-parachain-halt",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6843,7 +6845,7 @@ dependencies = [
"sp-keystore",
"sp-runtime",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6864,7 +6866,7 @@ dependencies = [
"sp-consensus-babe",
"sp-core",
"sp-keyring",
"tracing",
"tracing-gum",
]
[[package]]
@@ -6909,7 +6911,7 @@ dependencies = [
"substrate-test-utils",
"tempfile",
"tokio",
"tracing",
"tracing-gum",
]
[[package]]
@@ -7033,7 +7035,7 @@ dependencies = [
"sp-keystore",
"tempfile",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -7058,7 +7060,7 @@ dependencies = [
"sc-client-api",
"sp-api",
"sp-core",
"tracing",
"tracing-gum",
]
[[package]]
@@ -7074,7 +7076,7 @@ dependencies = [
"polkadot-node-primitives",
"polkadot-overseer-gen-proc-macro",
"thiserror",
"tracing",
"tracing-gum",
"trybuild",
]
@@ -7505,7 +7507,7 @@ dependencies = [
"sp-trie",
"substrate-prometheus-endpoint",
"thiserror",
"tracing",
"tracing-gum",
"westend-runtime",
"westend-runtime-constants",
]
@@ -7537,7 +7539,7 @@ dependencies = [
"sp-staking",
"sp-tracing",
"thiserror",
"tracing",
"tracing-gum",
]
[[package]]
@@ -7599,7 +7601,7 @@ dependencies = [
"polkadot-primitives",
"sp-core",
"sp-keystore",
"tracing",
"tracing-gum",
]
[[package]]
@@ -7720,7 +7722,7 @@ dependencies = [
"tempfile",
"test-runtime-constants",
"tokio",
"tracing",
"tracing-gum",
]
[[package]]
@@ -11502,6 +11504,28 @@ dependencies = [
"tracing",
]
[[package]]
name = "tracing-gum"
version = "0.9.17"
dependencies = [
"polkadot-node-jaeger",
"polkadot-primitives",
"tracing",
"tracing-gum-proc-macro",
]
[[package]]
name = "tracing-gum-proc-macro"
version = "0.9.17"
dependencies = [
"assert_matches",
"expander 0.0.6",
"proc-macro-crate 1.1.3",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-log"
version = "0.1.2"
@@ -12721,7 +12745,7 @@ dependencies = [
"thiserror",
"tokio",
"tokio-tungstenite",
"tracing",
"tracing-gum",
"url 2.2.2",
]
+2
View File
@@ -93,6 +93,8 @@ members = [
"node/subsystem-test-helpers",
"node/subsystem-util",
"node/jaeger",
"node/gum",
"node/gum/proc-macro",
"node/metrics",
"node/metered-channel",
"node/test/client",
+8
View File
@@ -69,6 +69,14 @@ pub type Hash = sp_core::H256;
#[cfg_attr(feature = "std", derive(MallocSizeOf))]
pub struct CandidateHash(pub Hash);
#[cfg(feature = "std")]
impl std::ops::Deref for CandidateHash {
type Target = Hash;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[cfg(feature = "std")]
impl std::fmt::Display for CandidateHash {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../gum" }
polkadot-erasure-coding = { path = "../../erasure-coding" }
polkadot-node-primitives = { path = "../primitives" }
polkadot-node-subsystem = { path = "../subsystem" }
+13 -13
View File
@@ -129,7 +129,7 @@ impl CollationGenerationSubsystem {
)
.await
{
tracing::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations");
gum::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations");
}
}
@@ -140,7 +140,7 @@ impl CollationGenerationSubsystem {
msg: CollationGenerationMessage::Initialize(config),
}) => {
if self.config.is_some() {
tracing::error!(target: LOG_TARGET, "double initialization");
gum::error!(target: LOG_TARGET, "double initialization");
} else {
self.config = Some(Arc::new(config));
}
@@ -148,7 +148,7 @@ impl CollationGenerationSubsystem {
},
Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(..))) => false,
Err(err) => {
tracing::error!(
gum::error!(
target: LOG_TARGET,
err = ?err,
"error receiving message from subsystem context: {:?}",
@@ -207,7 +207,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
(scheduled_core, OccupiedCoreAssumption::Free),
CoreState::Occupied(_occupied_core) => {
// TODO: https://github.com/paritytech/polkadot/issues/1573
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
relay_parent = ?relay_parent,
@@ -216,7 +216,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
continue
},
CoreState::Free => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
"core is free. Keep going.",
@@ -226,7 +226,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
};
if scheduled_core.para_id != config.para_id {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
relay_parent = ?relay_parent,
@@ -252,7 +252,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
{
Some(v) => v,
None => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
relay_parent = ?relay_parent,
@@ -274,7 +274,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
{
Some(v) => v,
None => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
relay_parent = ?relay_parent,
@@ -298,7 +298,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
match (task_config.collator)(relay_parent, &validation_data).await {
Some(collation) => collation.into_inner(),
None => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
para_id = %scheduled_core.para_id,
"collator returned no collation on collate",
@@ -318,7 +318,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
// As such, honest collators never produce an uncompressed PoV which starts with
// a compression magic number, which would lead validators to reject the collation.
if encoded_size > validation_data.max_pov_size as usize {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
para_id = %scheduled_core.para_id,
size = encoded_size,
@@ -346,7 +346,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
match erasure_root(n_validators, validation_data, pov.clone()) {
Ok(erasure_root) => erasure_root,
Err(err) => {
tracing::error!(
gum::error!(
target: LOG_TARGET,
para_id = %scheduled_core.para_id,
err = ?err,
@@ -380,7 +380,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
},
};
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?ccr.hash(),
?pov_hash,
@@ -396,7 +396,7 @@ async fn handle_new_activations<Context: SubsystemContext>(
))
.await
{
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
para_id = %scheduled_core.para_id,
err = ?err,
@@ -8,7 +8,7 @@ edition = "2021"
futures = "0.3.21"
futures-timer = "3.0.2"
parity-scale-codec = { version = "3.1.0", default-features = false, features = ["bit-vec", "derive"] }
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] }
lru = "0.7"
merlin = "2.0"
@@ -236,7 +236,7 @@ pub(crate) fn compute_assignments(
config.assignment_keys.is_empty() ||
config.validator_groups.is_empty()
{
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
n_cores = config.n_cores,
has_assignment_keys = !config.assignment_keys.is_empty(),
@@ -255,7 +255,7 @@ pub(crate) fn compute_assignments(
Err(sc_keystore::Error::Unavailable) => None,
Err(sc_keystore::Error::Io(e)) if e.kind() == std::io::ErrorKind::NotFound => None,
Err(e) => {
tracing::warn!(target: LOG_TARGET, "Encountered keystore error: {:?}", e);
gum::warn!(target: LOG_TARGET, "Encountered keystore error: {:?}", e);
None
},
}
@@ -263,7 +263,7 @@ pub(crate) fn compute_assignments(
match key {
None => {
tracing::trace!(target: LOG_TARGET, "No assignment key");
gum::trace!(target: LOG_TARGET, "No assignment key");
return HashMap::new()
},
Some(k) => k,
@@ -277,7 +277,7 @@ pub(crate) fn compute_assignments(
.map(|(c_hash, core, _)| (c_hash, core))
.collect::<Vec<_>>();
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
assignable_cores = leaving_cores.len(),
"Assigning to candidates from different backing groups"
@@ -333,7 +333,7 @@ fn compute_relay_vrf_modulo_assignments(
if let Some((candidate_hash, _)) =
leaving_cores.clone().into_iter().find(|(_, c)| c == core)
{
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?candidate_hash,
?core,
@@ -415,7 +415,7 @@ fn compute_relay_vrf_delay_assignments(
};
if used {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?candidate_hash,
?core,
@@ -138,7 +138,7 @@ async fn imported_block_info(
.as_ref()
.map_or(true, |s| session_index < s.earliest_session())
{
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"Block {} is from ancient session {}. Skipping",
block_hash,
@@ -188,11 +188,7 @@ async fn imported_block_info(
{
Some(s) => s,
None => {
tracing::debug!(
target: LOG_TARGET,
"Session info unavailable for block {}",
block_hash,
);
gum::debug!(target: LOG_TARGET, "Session info unavailable for block {}", block_hash,);
return Ok(None)
},
@@ -227,7 +223,7 @@ async fn imported_block_info(
}
},
None => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"BABE VRF info unavailable for block {}",
block_hash,
@@ -238,12 +234,12 @@ async fn imported_block_info(
}
};
tracing::trace!(target: LOG_TARGET, n_assignments = assignments.len(), "Produced assignments");
gum::trace!(target: LOG_TARGET, n_assignments = assignments.len(), "Produced assignments");
let force_approve =
block_header.digest.convert_first(|l| match ConsensusLog::from_digest_item(l) {
Ok(Some(ConsensusLog::ForceApprove(num))) if num < block_header.number => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?block_hash,
current_number = block_header.number,
@@ -256,7 +252,7 @@ async fn imported_block_info(
Ok(Some(_)) => None,
Ok(None) => None,
Err(err) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?err,
?block_hash,
@@ -313,7 +309,7 @@ pub(crate) async fn handle_new_head(
match h_rx.await? {
Err(e) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"Chain API subsystem temporarily unreachable {}",
e,
@@ -322,7 +318,7 @@ pub(crate) async fn handle_new_head(
return Ok(Vec::new())
},
Ok(None) => {
tracing::warn!(target: LOG_TARGET, "Missing header for new head {}", head);
gum::warn!(target: LOG_TARGET, "Missing header for new head {}", head);
return Ok(Vec::new())
},
Ok(Some(h)) => h,
@@ -332,7 +328,7 @@ pub(crate) async fn handle_new_head(
// Update session info based on most recent head.
match state.cache_session_info_for_head(ctx, head).await {
Err(e) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?head,
?e,
@@ -342,7 +338,7 @@ pub(crate) async fn handle_new_head(
return Ok(Vec::new())
},
Ok(Some(a @ SessionWindowUpdate::Advanced { .. })) => {
tracing::info!(
gum::info!(
target: LOG_TARGET,
update = ?a,
"Advanced session window for approvals",
@@ -404,7 +400,7 @@ pub(crate) async fn handle_new_head(
if !lost_to_finality {
// Such errors are likely spurious, but this prevents us from getting gaps
// in the approval-db.
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Unable to gather info about imported block {:?}. Skipping chain.",
(block_hash, block_header.number),
@@ -419,7 +415,7 @@ pub(crate) async fn handle_new_head(
imported_blocks_and_info
};
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
imported_blocks = imported_blocks_and_info.len(),
"Inserting imported blocks into database"
@@ -458,7 +454,7 @@ pub(crate) async fn handle_new_head(
let num_candidates = included_candidates.len();
let approved_bitfield = {
if needed_approvals == 0 {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
block_hash = ?block_hash,
"Insta-approving all candidates",
@@ -476,7 +472,7 @@ pub(crate) async fn handle_new_head(
}
}
if result.any() {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
block_hash = ?block_hash,
"Insta-approving {}/{} candidates as the number of validators is too low",
@@ -510,7 +506,7 @@ pub(crate) async fn handle_new_head(
};
if let Some(up_to) = force_approve {
tracing::debug!(target: LOG_TARGET, ?block_hash, up_to, "Enacting force-approve");
gum::debug!(target: LOG_TARGET, ?block_hash, up_to, "Enacting force-approve");
let approved_hashes = crate::ops::force_approve(db, block_hash, up_to)
.map_err(|e| SubsystemError::with_origin("approval-voting", e))?;
@@ -521,7 +517,7 @@ pub(crate) async fn handle_new_head(
}
}
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?block_hash,
block_number = block_header.number,
@@ -561,7 +557,7 @@ pub(crate) async fn handle_new_head(
});
}
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
head = ?head,
chain_length = approval_meta.len(),
+41 -43
View File
@@ -621,7 +621,7 @@ impl State {
let session_info = match self.session_info(block_entry.session()) {
Some(s) => s,
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Unknown session info for {}",
block_entry.session()
@@ -718,7 +718,7 @@ where
match rx.await? {
Ok(number) => Some(number),
Err(err) => {
tracing::warn!(target: LOG_TARGET, ?err, "Failed fetching finalized number");
gum::warn!(target: LOG_TARGET, ?err, "Failed fetching finalized number");
None
},
}
@@ -962,13 +962,11 @@ async fn handle_actions(
match confirmation_rx.await {
Err(oneshot::Canceled) => {
tracing::debug!(target: LOG_TARGET, "Dispute coordinator confirmation lost",)
gum::debug!(target: LOG_TARGET, "Dispute coordinator confirmation lost",)
},
Ok(ImportStatementsResult::ValidImport) => {},
Ok(ImportStatementsResult::InvalidImport) => tracing::warn!(
target: LOG_TARGET,
"Failed to import statements of validity",
),
Ok(ImportStatementsResult::InvalidImport) =>
gum::warn!(target: LOG_TARGET, "Failed to import statements of validity",),
}
},
Action::NoteApprovedInChainSelection(block_hash) => {
@@ -1004,7 +1002,7 @@ fn distribution_messages_for_activation(
let block_entry = match db.load_block_entry(&block_hash)? {
Some(b) => b,
None => {
tracing::warn!(target: LOG_TARGET, ?block_hash, "Missing block entry");
gum::warn!(target: LOG_TARGET, ?block_hash, "Missing block entry");
continue
},
@@ -1021,7 +1019,7 @@ fn distribution_messages_for_activation(
let candidate_entry = match db.load_candidate_entry(&candidate_hash)? {
Some(c) => c,
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?block_hash,
?candidate_hash,
@@ -1068,7 +1066,7 @@ fn distribution_messages_for_activation(
}
},
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?block_hash,
?candidate_hash,
@@ -1105,7 +1103,7 @@ async fn handle_from_overseer(
Ok(block_imported_candidates) => {
// Schedule wakeups for all imported candidates.
for block_batch in block_imported_candidates {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
block_number = ?block_batch.block_number,
block_hash = ?block_batch.block_hash,
@@ -1122,7 +1120,7 @@ async fn handle_from_overseer(
if let Some(our_tranche) = our_tranche {
let tick = our_tranche as Tick + block_batch.block_tick;
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
tranche = our_tranche,
candidate_hash = ?c_hash,
@@ -1150,7 +1148,7 @@ async fn handle_from_overseer(
actions
},
FromOverseer::Signal(OverseerSignal::BlockFinalized(block_hash, block_number)) => {
tracing::debug!(target: LOG_TARGET, ?block_hash, ?block_number, "Block finalized");
gum::debug!(target: LOG_TARGET, ?block_hash, ?block_number, "Block finalized");
*last_finalized_height = Some(block_number);
crate::ops::canonicalize(db, block_number, block_hash)
@@ -1262,7 +1260,7 @@ async fn handle_approved_ancestor(
let entry = match db.load_block_entry(&block_hash)? {
None => {
let block_number = target_number.saturating_sub(i as u32);
tracing::info!(
gum::info!(
target: LOG_TARGET,
unknown_number = ?block_number,
unknown_hash = ?block_hash,
@@ -1303,7 +1301,7 @@ async fn handle_approved_ancestor(
block_descriptions.clear();
let unapproved: Vec<_> = entry.unapproved_candidates().collect();
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"Block {} is {} blocks deep and has {}/{} candidates unapproved",
block_hash,
@@ -1315,7 +1313,7 @@ async fn handle_approved_ancestor(
for candidate_hash in unapproved {
match db.load_candidate_entry(&candidate_hash)? {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?candidate_hash,
"Missing expected candidate in DB",
@@ -1325,7 +1323,7 @@ async fn handle_approved_ancestor(
},
Some(c_entry) => match c_entry.approval_entry(&block_hash) {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?candidate_hash,
?block_hash,
@@ -1357,7 +1355,7 @@ async fn handle_approved_ancestor(
};
match a_entry.our_assignment() {
None => tracing::debug!(
None => gum::debug!(
target: LOG_TARGET,
?candidate_hash,
?block_hash,
@@ -1374,7 +1372,7 @@ async fn handle_approved_ancestor(
let approved =
triggered && { a_entry.local_statements().1.is_some() };
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?candidate_hash,
?block_hash,
@@ -1394,7 +1392,7 @@ async fn handle_approved_ancestor(
}
}
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"approved blocks {}-[{}]-{}",
target_number,
@@ -1507,7 +1505,7 @@ fn schedule_wakeup_action(
};
match maybe_action {
Some(Action::ScheduleWakeup { ref tick, .. }) => tracing::trace!(
Some(Action::ScheduleWakeup { ref tick, .. }) => gum::trace!(
target: LOG_TARGET,
tick,
?candidate_hash,
@@ -1515,7 +1513,7 @@ fn schedule_wakeup_action(
block_tick,
"Scheduling next wakeup.",
),
None => tracing::trace!(
None => gum::trace!(
target: LOG_TARGET,
?candidate_hash,
?block_hash,
@@ -1633,7 +1631,7 @@ fn check_and_import_assignment(
if is_duplicate {
AssignmentCheckResult::AcceptedDuplicate
} else {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
validator = assignment.validator.0,
candidate_hash = ?assigned_candidate_hash,
@@ -1756,7 +1754,7 @@ fn check_and_import_approval<T>(
// importing the approval can be heavy as it may trigger acceptance for a series of blocks.
let t = with_response(ApprovalCheckResult::Accepted);
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
validator_index = approval.validator.0,
validator = ?pubkey,
@@ -1877,7 +1875,7 @@ fn advance_approval_state(
let is_approved = check.is_approved(tick_now.saturating_sub(APPROVAL_DELAY));
if is_approved {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?candidate_hash,
?block_hash,
@@ -1906,7 +1904,7 @@ fn advance_approval_state(
(is_approved, status)
} else {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?candidate_hash,
?block_hash,
@@ -2021,7 +2019,7 @@ fn process_wakeup(
let session_info = match state.session_info(block_entry.session()) {
Some(i) => i,
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Missing session info for live block {} in session {}",
relay_block,
@@ -2040,7 +2038,7 @@ fn process_wakeup(
let tranche_now = state.clock.tranche_now(state.slot_duration_millis, block_entry.slot());
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
tranche = tranche_now,
?candidate_hash,
@@ -2100,7 +2098,7 @@ fn process_wakeup(
block_entry.candidates().iter().position(|(_, h)| &candidate_hash == h);
if let Some(i) = index_in_candidate {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?candidate_hash,
para_id = ?candidate_receipt.descriptor.para_id,
@@ -2185,7 +2183,7 @@ async fn launch_approval(
let candidate_hash = candidate.hash();
let para_id = candidate.descriptor.para_id;
tracing::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Recovering data.");
gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Recovering data.");
let timer = metrics.time_recover_and_approve();
ctx.send_message(AvailabilityRecoveryMessage::RecoverAvailableData(
@@ -2219,7 +2217,7 @@ async fn launch_approval(
Ok(Err(e)) => {
match &e {
&RecoveryError::Unavailable => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?para_id,
?candidate_hash,
@@ -2230,7 +2228,7 @@ async fn launch_approval(
metrics_guard.take().on_approval_unavailable();
},
&RecoveryError::Invalid => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?para_id,
?candidate_hash,
@@ -2261,7 +2259,7 @@ async fn launch_approval(
Ok(Err(_)) => return ApprovalState::failed(validator_index, candidate_hash),
Ok(Ok(Some(code))) => code,
Ok(Ok(None)) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Validation code unavailable for block {:?} in the state of block {:?} (a recent descendant)",
candidate.descriptor.relay_parent,
@@ -2297,7 +2295,7 @@ async fn launch_approval(
// Validation checked out. Issue an approval command. If the underlying service is unreachable,
// then there isn't anything we can do.
tracing::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Candidate Valid");
gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Candidate Valid");
let expected_commitments_hash = candidate.commitments_hash;
if commitments.hash() == expected_commitments_hash {
@@ -2322,7 +2320,7 @@ async fn launch_approval(
}
},
Ok(Ok(ValidationResult::Invalid(reason))) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?reason,
?candidate_hash,
@@ -2346,7 +2344,7 @@ async fn launch_approval(
return ApprovalState::failed(validator_index, candidate_hash)
},
Ok(Err(e)) => {
tracing::error!(
gum::error!(
target: LOG_TARGET,
err = ?e,
?candidate_hash,
@@ -2385,7 +2383,7 @@ async fn issue_approval(
let candidate_index = match block_entry.candidates().iter().position(|e| e.1 == candidate_hash)
{
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Candidate hash {} is not present in the block entry's candidates for relay block {}",
candidate_hash,
@@ -2401,7 +2399,7 @@ async fn issue_approval(
let session_info = match state.session_info(block_entry.session()) {
Some(s) => s,
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Missing session info for live block {} in session {}",
block_hash,
@@ -2416,7 +2414,7 @@ async fn issue_approval(
let candidate_hash = match block_entry.candidate(candidate_index as usize) {
Some((_, h)) => h.clone(),
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Received malformed request to approve out-of-bounds candidate index {} included at block {:?}",
candidate_index,
@@ -2431,7 +2429,7 @@ async fn issue_approval(
let candidate_entry = match db.load_candidate_entry(&candidate_hash)? {
Some(c) => c,
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Missing entry for candidate index {} included at block {:?}",
candidate_index,
@@ -2446,7 +2444,7 @@ async fn issue_approval(
let validator_pubkey = match session_info.validators.get(validator_index.0 as usize) {
Some(p) => p,
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Validator index {} out of bounds in session {}",
validator_index.0,
@@ -2462,7 +2460,7 @@ async fn issue_approval(
let sig = match sign_approval(&state.keystore, &validator_pubkey, candidate_hash, session) {
Some(sig) => sig,
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
validator_index = ?validator_index,
session,
@@ -2485,7 +2483,7 @@ async fn issue_approval(
)
.expect("Statement just signed; should pass checks; qed");
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?candidate_hash,
?block_hash,
@@ -512,7 +512,7 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
}
async fn overseer_send(overseer: &mut VirtualOverseer, msg: FromOverseer<ApprovalVotingMessage>) {
tracing::trace!("Sending message:\n{:?}", &msg);
gum::trace!("Sending message:\n{:?}", &msg);
overseer
.send(msg)
.timeout(TIMEOUT)
@@ -525,7 +525,7 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
.await
.expect(&format!("{:?} is enough to receive messages.", TIMEOUT));
tracing::trace!("Received message:\n{:?}", &msg);
gum::trace!("Received message:\n{:?}", &msg);
msg
}
@@ -534,7 +534,7 @@ async fn overseer_recv_with_timeout(
overseer: &mut VirtualOverseer,
timeout: Duration,
) -> Option<AllMessages> {
tracing::trace!("Waiting for message...");
gum::trace!("Waiting for message...");
overseer.recv().timeout(timeout).await
}
+1 -1
View File
@@ -9,7 +9,7 @@ futures = "0.3.21"
futures-timer = "3.0.2"
kvdb = "0.11.0"
thiserror = "1.0.30"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
bitvec = "1.0.0"
parity-scale-codec = { version = "3.1.0", features = ["derive"] }
+13 -13
View File
@@ -163,7 +163,7 @@ fn query_inner<D: Decode>(
},
Ok(None) => Ok(None),
Err(err) => {
tracing::warn!(target: LOG_TARGET, ?err, "Error reading from the availability store");
gum::warn!(target: LOG_TARGET, ?err, "Error reading from the availability store");
Err(err.into())
},
}
@@ -384,10 +384,10 @@ impl Error {
match self {
// don't spam the log with spurious errors
Self::RuntimeApi(_) | Self::Oneshot(_) => {
tracing::debug!(target: LOG_TARGET, err = ?self)
gum::debug!(target: LOG_TARGET, err = ?self)
},
// it's worth reporting otherwise
_ => tracing::warn!(target: LOG_TARGET, err = ?self),
_ => gum::warn!(target: LOG_TARGET, err = ?self),
}
}
}
@@ -544,7 +544,7 @@ where
}
},
Ok(true) => {
tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
break
},
Ok(false) => continue,
@@ -721,7 +721,7 @@ fn note_block_backed(
) -> Result<(), Error> {
let candidate_hash = candidate.hash();
tracing::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate backed");
gum::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate backed");
if load_meta(db, config, &candidate_hash)?.is_none() {
let meta = CandidateMeta {
@@ -753,7 +753,7 @@ fn note_block_included(
None => {
// This is alarming. We've observed a block being included without ever seeing it backed.
// Warn and ignore.
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?candidate_hash,
"Candidate included without being backed?",
@@ -762,7 +762,7 @@ fn note_block_included(
Some(mut meta) => {
let be_block = (BEBlockNumber(block.0), block.1);
tracing::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate included");
gum::debug!(target: LOG_TARGET, ?candidate_hash, "Candidate included");
meta.state = match meta.state {
State::Unavailable(at) => {
@@ -856,7 +856,7 @@ where
match rx.await? {
Err(err) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
batch_num,
?err,
@@ -866,7 +866,7 @@ where
break
},
Ok(None) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Availability store was informed that block #{} is finalized, \
but chain API has no finalized hash.",
@@ -944,7 +944,7 @@ fn update_blocks_at_finalized_height(
for (candidate_hash, is_finalized) in candidates {
let mut meta = match load_meta(&subsystem.db, &subsystem.config, &candidate_hash)? {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Dangling candidate metadata for {}",
candidate_hash,
@@ -1061,7 +1061,7 @@ fn process_message(
)? {
Some(c) => chunks.push(c),
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?candidate,
index,
@@ -1151,7 +1151,7 @@ fn store_chunk(
None => return Ok(false), // out of bounds.
}
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?candidate_hash,
chunk_index = %chunk.index.0,
@@ -1217,7 +1217,7 @@ fn store_available_data(
subsystem.db.write(tx)?;
tracing::debug!(target: LOG_TARGET, ?candidate_hash, "Stored data and chunks");
gum::debug!(target: LOG_TARGET, ?candidate_hash, "Stored data and chunks");
Ok(())
}
+3 -3
View File
@@ -146,7 +146,7 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
const TIMEOUT: Duration = Duration::from_millis(100);
async fn overseer_send(overseer: &mut VirtualOverseer, msg: AvailabilityStoreMessage) {
tracing::trace!(meg = ?msg, "sending message");
gum::trace!(meg = ?msg, "sending message");
overseer
.send(FromOverseer::Communication { msg })
.timeout(TIMEOUT)
@@ -159,7 +159,7 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
.await
.expect(&format!("{:?} is more than enough to receive messages", TIMEOUT));
tracing::trace!(msg = ?msg, "received message");
gum::trace!(msg = ?msg, "received message");
msg
}
@@ -168,7 +168,7 @@ async fn overseer_recv_with_timeout(
overseer: &mut VirtualOverseer,
timeout: Duration,
) -> Option<AllMessages> {
tracing::trace!("waiting for message...");
gum::trace!("waiting for message...");
overseer.recv().timeout(timeout).await
}
+1 -1
View File
@@ -14,7 +14,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" }
erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" }
statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" }
bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] }
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
thiserror = "1.0.30"
[dev-dependencies]
+18 -18
View File
@@ -276,7 +276,7 @@ fn table_attested_to_backed(
validator_indices.set(position, true);
vote_positions.push((orig_idx, position));
} else {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Logic error: Validity vote from table does not correspond to group",
);
@@ -463,7 +463,7 @@ async fn validate_and_make_available(
let res = match v {
ValidationResult::Valid(commitments, validation_data) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?candidate.hash(),
"Validation successful",
@@ -471,7 +471,7 @@ async fn validate_and_make_available(
// If validation produces a new set of commitments, we vote the candidate as invalid.
if commitments.hash() != expected_commitments_hash {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?candidate.hash(),
actual_commitments = ?commitments,
@@ -493,7 +493,7 @@ async fn validate_and_make_available(
match erasure_valid {
Ok(()) => Ok((candidate, commitments, pov.clone())),
Err(InvalidErasureRoot) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?candidate.hash(),
actual_commitments = ?commitments,
@@ -505,7 +505,7 @@ async fn validate_and_make_available(
}
},
ValidationResult::Invalid(reason) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?candidate.hash(),
reason = ?reason,
@@ -621,7 +621,7 @@ impl CandidateBackingJob {
self.kick_off_validation_work(sender, attesting, c_span).await?
}
} else {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"AttestNoPoV was triggered without fallback being available."
);
@@ -647,13 +647,13 @@ impl CandidateBackingJob {
let bg = async move {
if let Err(e) = validate_and_make_available(params).await {
if let Error::BackgroundValidationMpsc(error) = e {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?error,
"Mpsc background validation mpsc died during validation- leaf no longer active?"
);
} else {
tracing::error!(
gum::error!(
target: LOG_TARGET,
"Failed to validate and make available: {:?}",
e
@@ -699,7 +699,7 @@ impl CandidateBackingJob {
span.as_mut().map(|span| span.add_follows_from(parent_span));
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?candidate_hash,
candidate_receipt = ?candidate,
@@ -763,7 +763,7 @@ impl CandidateBackingJob {
statement: &SignedFullStatement,
root_span: &jaeger::Span,
) -> Result<Option<TableSummary>, Error> {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
statement = ?statement.payload().to_compact(),
validator_index = statement.validator_index().0,
@@ -784,7 +784,7 @@ impl CandidateBackingJob {
.dispatch_new_statement_to_dispute_coordinator(sender, candidate_hash, &statement)
.await
{
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
session_index = ?self.session_index,
relay_parent = ?self.parent,
@@ -809,7 +809,7 @@ impl CandidateBackingJob {
let span = self.remove_unbacked_span(&candidate_hash);
if let Some(backed) = table_attested_to_backed(attested, &self.table_context) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?candidate_hash,
relay_parent = ?self.parent,
@@ -904,11 +904,11 @@ impl CandidateBackingJob {
match confirmation_rx.await {
Err(oneshot::Canceled) => {
tracing::debug!(target: LOG_TARGET, "Dispute coordinator confirmation lost",)
gum::debug!(target: LOG_TARGET, "Dispute coordinator confirmation lost",)
},
Ok(ImportStatementsResult::ValidImport) => {},
Ok(ImportStatementsResult::InvalidImport) => {
tracing::warn!(target: LOG_TARGET, "Failed to import statements of validity",)
gum::warn!(target: LOG_TARGET, "Failed to import statements of validity",)
},
}
}
@@ -935,7 +935,7 @@ impl CandidateBackingJob {
// Sanity check that candidate is from our assignment.
if Some(candidate.descriptor().para_id) != self.assignment {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
our_assignment = ?self.assignment,
collation = ?candidate.descriptor().para_id,
@@ -1006,7 +1006,7 @@ impl CandidateBackingJob {
let descriptor = attesting.candidate.descriptor().clone();
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?candidate_hash,
candidate_receipt = ?attesting.candidate,
@@ -1196,7 +1196,7 @@ impl util::JobTrait for CandidateBackingJob {
match $x {
Ok(x) => x,
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Failed to fetch runtime API data for job",
@@ -1241,7 +1241,7 @@ impl util::JobTrait for CandidateBackingJob {
Ok(v) => Some(v),
Err(util::Error::NotAValidator) => None,
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Cannot participate in candidate backing",
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
@@ -106,7 +106,7 @@ async fn get_core_availability(
let res = rx.await.map_err(Into::into);
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
para_id = %core.para_id(),
availability = ?res,
@@ -172,7 +172,7 @@ async fn construct_availability_bitfield(
.await?;
let core_bits = FromIterator::from_iter(results.into_iter());
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
"Signing Bitfield for {core_count} cores: {core_bits}",
@@ -247,7 +247,7 @@ impl JobTrait for BitfieldSigningJob {
let metrics = metrics.clone();
async move {
if let LeafStatus::Stale = leaf.status {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
hash = ?leaf.hash,
block_number = ?leaf.number,
@@ -288,7 +288,7 @@ impl JobTrait for BitfieldSigningJob {
{
Err(Error::Runtime(runtime_err)) => {
// Don't take down the node on runtime API errors.
tracing::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error");
gum::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error");
return Ok(())
},
Err(err) => return Err(err),
@@ -305,7 +305,7 @@ impl JobTrait for BitfieldSigningJob {
{
Some(b) => b,
None => {
tracing::error!(
gum::error!(
target: LOG_TARGET,
"Key was found at construction, but while signing it could not be found.",
);
@@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
async-trait = "0.1.52"
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", git = "https://github.com/paritytech/substrate", branch = "master" }
parity-scale-codec = { version = "3.1.0", default-features = false, features = ["bit-vec", "derive"] }
@@ -242,13 +242,13 @@ where
receiver
.await
.map_err(|_| {
tracing::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped");
gum::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped");
RuntimeRequestFailed
})
.and_then(|res| {
res.map_err(|e| {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
err = ?e,
@@ -295,7 +295,7 @@ where
// during pre-checking voting the relay-chain will pin the code. In case the code
// actually is not there, we issue failed since this looks more like a bug. This
// leads to us abstaining.
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?relay_parent,
?validation_code_hash,
@@ -311,7 +311,7 @@ where
) {
Ok(code) => Pvf::from_code(code.into_owned()),
Err(e) => {
tracing::debug!(target: LOG_TARGET, err=?e, "precheck: cannot decompress validation code");
gum::debug!(target: LOG_TARGET, err=?e, "precheck: cannot decompress validation code");
return PreCheckOutcome::Invalid
},
};
@@ -481,7 +481,7 @@ async fn validate_candidate_exhaustive(
let _timer = metrics.time_validate_candidate_exhaustive();
let validation_code_hash = validation_code.hash();
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?validation_code_hash,
para_id = ?descriptor.para_id,
@@ -503,7 +503,7 @@ async fn validate_candidate_exhaustive(
) {
Ok(code) => code,
Err(e) => {
tracing::debug!(target: LOG_TARGET, err=?e, "Invalid validation code");
gum::debug!(target: LOG_TARGET, err=?e, "Invalid validation code");
// If the validation code is invalid, the candidate certainly is.
return Ok(ValidationResult::Invalid(InvalidCandidate::CodeDecompressionFailure))
@@ -514,7 +514,7 @@ async fn validate_candidate_exhaustive(
match sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT) {
Ok(block_data) => BlockData(block_data.to_vec()),
Err(e) => {
tracing::debug!(target: LOG_TARGET, err=?e, "Invalid PoV code");
gum::debug!(target: LOG_TARGET, err=?e, "Invalid PoV code");
// If the PoV is invalid, the candidate certainly is.
return Ok(ValidationResult::Invalid(InvalidCandidate::PoVDecompressionFailure))
@@ -533,7 +533,7 @@ async fn validate_candidate_exhaustive(
.await;
if let Err(ref e) = result {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
error = ?e,
"Failed to validate candidate",
+1 -1
View File
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
+1 -1
View File
@@ -129,7 +129,7 @@ where
},
ChainApiMessage::Ancestors { hash, k, response_channel } => {
let _timer = subsystem.metrics.time_ancestors();
tracing::span!(tracing::Level::TRACE, "ChainApiMessage::Ancestors", subsystem=LOG_TARGET, hash=%hash, k=k);
gum::trace!(target: LOG_TARGET, hash=%hash, k=k, "ChainApiMessage::Ancestors");
let mut hash = hash;
@@ -8,7 +8,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
futures-timer = "3"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
+13 -18
View File
@@ -207,9 +207,9 @@ impl Error {
fn trace(&self) {
match self {
// don't spam the log with spurious errors
Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, err = ?self),
Self::Oneshot(_) => gum::debug!(target: LOG_TARGET, err = ?self),
// it's worth reporting otherwise
_ => tracing::warn!(target: LOG_TARGET, err = ?self),
_ => gum::warn!(target: LOG_TARGET, err = ?self),
}
}
}
@@ -235,7 +235,7 @@ impl Clock for SystemClock {
match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(d) => d.as_secs(),
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Current time is before unix epoch. Validation will not work correctly."
@@ -356,7 +356,7 @@ async fn run<Context, B>(
break
},
Ok(()) => {
tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
break
},
}
@@ -444,7 +444,7 @@ async fn fetch_finalized(
let number = match number_rx.await? {
Ok(number) => number,
Err(err) => {
tracing::warn!(target: LOG_TARGET, ?err, "Fetching finalized number failed");
gum::warn!(target: LOG_TARGET, ?err, "Fetching finalized number failed");
return Ok(None)
},
};
@@ -455,16 +455,11 @@ async fn fetch_finalized(
match hash_rx.await? {
Err(err) => {
tracing::warn!(
target: LOG_TARGET,
number,
?err,
"Fetching finalized block number failed"
);
gum::warn!(target: LOG_TARGET, number, ?err, "Fetching finalized block number failed");
Ok(None)
},
Ok(None) => {
tracing::warn!(target: LOG_TARGET, number, "Missing hash for finalized block number");
gum::warn!(target: LOG_TARGET, number, "Missing hash for finalized block number");
Ok(None)
},
Ok(Some(h)) => Ok(Some((h, number))),
@@ -479,7 +474,7 @@ async fn fetch_header(
ctx.send_message(ChainApiMessage::BlockHeader(hash, tx)).await;
Ok(rx.await?.unwrap_or_else(|err| {
tracing::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
gum::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
None
}))
}
@@ -494,7 +489,7 @@ async fn fetch_block_weight(
let res = rx.await?;
Ok(res.unwrap_or_else(|err| {
tracing::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
gum::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
None
}))
}
@@ -518,7 +513,7 @@ async fn handle_active_leaf(
let header = match fetch_header(ctx, hash).await? {
None => {
tracing::warn!(target: LOG_TARGET, ?hash, "Missing header for new head");
gum::warn!(target: LOG_TARGET, ?hash, "Missing header for new head");
return Ok(Vec::new())
},
Some(h) => h,
@@ -540,7 +535,7 @@ async fn handle_active_leaf(
for (hash, header) in new_blocks.into_iter().rev() {
let weight = match fetch_block_weight(ctx, hash).await? {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?hash,
"Missing block weight for new head. Skipping chain.",
@@ -580,7 +575,7 @@ fn extract_reversion_logs(header: &Header) -> Vec<BlockNumber> {
.enumerate()
.filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) {
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
index = i,
@@ -592,7 +587,7 @@ fn extract_reversion_logs(header: &Header) -> Vec<BlockNumber> {
},
Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b),
Ok(Some(ConsensusLog::Revert(b))) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
revert_target = b,
block_number = number,
@@ -140,7 +140,7 @@ fn propagate_viability_update(
BlockEntryRef::Explicit(entry) => entry,
BlockEntryRef::Hash(hash) => match backend.load_block_entry(&hash)? {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
block_hash = ?hash,
"Missing expected block entry"
@@ -360,7 +360,7 @@ fn apply_reversions(
let mut ancestor_entry =
match load_ancestor(backend, block_hash, block_number, revert_number)? {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?block_hash,
block_number,
@@ -373,7 +373,7 @@ fn apply_reversions(
continue
},
Some(ancestor_entry) => {
tracing::info!(
gum::info!(
target: LOG_TARGET,
?block_hash,
block_number,
@@ -480,7 +480,7 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>(
propagate_viability_update(&mut backend, child)?;
} else {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?finalized_hash,
finalized_number,
@@ -515,7 +515,7 @@ pub(super) fn approve_block(
backend.write_block_entry(entry);
}
} else {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
block_hash = ?approved_hash,
"Missing entry for freshly-approved block. Ignoring"
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
parity-scale-codec = "3.1.0"
kvdb = "0.11.0"
thiserror = "1.0.30"
@@ -65,14 +65,14 @@ where
let res = run_until_error(&mut ctx, &subsystem).await;
match res.into_nested() {
Err(fatal) => {
tracing::error!(target: LOG_TARGET, "Observed fatal issue: {:?}", fatal);
gum::error!(target: LOG_TARGET, "Observed fatal issue: {:?}", fatal);
break
},
Ok(Err(jfyi)) => {
tracing::debug!(target: LOG_TARGET, "Observed issue: {:?}", jfyi);
gum::debug!(target: LOG_TARGET, "Observed issue: {:?}", jfyi);
},
Ok(Ok(())) => {
tracing::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
gum::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
break
},
}
@@ -119,10 +119,10 @@ impl JfyiError {
match self {
// don't spam the log with spurious errors
Self::Runtime(_) | Self::Oneshot(_) => {
tracing::debug!(target: LOG_TARGET, error = ?self)
gum::debug!(target: LOG_TARGET, error = ?self)
},
// it's worth reporting otherwise
_ => tracing::warn!(target: LOG_TARGET, error = ?self),
_ => gum::warn!(target: LOG_TARGET, error = ?self),
}
}
}
@@ -149,7 +149,7 @@ impl Initialized {
)
.await;
if let Ok(()) = res {
tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
return Ok(())
}
log_error(res)?;
@@ -268,7 +268,7 @@ impl Initialized {
.await
{
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Failed to update session cache for disputes",
@@ -283,11 +283,7 @@ impl Initialized {
self.error = None;
let session = window_end;
if self.highest_session < session {
tracing::trace!(
target: LOG_TARGET,
session,
"Observed new session. Pruning"
);
gum::trace!(target: LOG_TARGET, session, "Observed new session. Pruning");
self.highest_session = session;
@@ -304,7 +300,7 @@ impl Initialized {
.scrape_on_chain_votes(ctx, overlay_db, new_leaf.hash, now)
.await
.map_err(|err| {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Skipping scraping block #{}({}) due to error: {}",
new_leaf.number,
@@ -335,7 +331,7 @@ impl Initialized {
)
.await
.unwrap_or_else(|err| {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
activated_leaf = ?new_leaf,
error = ?err,
@@ -347,7 +343,7 @@ impl Initialized {
})
},
Err(err) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
activated_leaf = ?new_leaf,
error = ?err,
@@ -364,7 +360,7 @@ impl Initialized {
for ancestor in ancestors {
let _ = self.scrape_on_chain_votes(ctx, overlay_db, ancestor, now).await.map_err(
|err| {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
hash = ?ancestor,
error = ?err,
@@ -405,14 +401,14 @@ impl Initialized {
match rx.await {
Ok(Ok(Some(val))) => val,
Ok(Ok(None)) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
relay_parent = ?new_leaf,
"No on chain votes stored for relay chain leaf");
return Ok(())
},
Ok(Err(e)) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
relay_parent = ?new_leaf,
error = ?e,
@@ -420,7 +416,7 @@ impl Initialized {
return Ok(())
},
Err(e) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
relay_parent = ?new_leaf,
error = ?e,
@@ -446,7 +442,7 @@ impl Initialized {
if let Some(session_info) = self.rolling_session_window.session_info(session) {
session_info.clone()
} else {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
relay_parent = ?new_leaf,
?session,
@@ -466,7 +462,7 @@ impl Initialized {
.validators
.get(validator_index.0 as usize)
.or_else(|| {
tracing::error!(
gum::error!(
target: LOG_TARGET,
relay_parent = ?new_leaf,
"Missing public key for validator {:?}",
@@ -506,11 +502,11 @@ impl Initialized {
)
.await?;
match import_result {
ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET,
ImportStatementsResult::ValidImport => gum::trace!(target: LOG_TARGET,
relay_parent = ?new_leaf,
?session,
"Imported backing vote from on-chain"),
ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET,
ImportStatementsResult::InvalidImport => gum::warn!(target: LOG_TARGET,
relay_parent = ?new_leaf,
?session,
"Attempted import of on-chain backing votes failed"),
@@ -537,7 +533,7 @@ impl Initialized {
{
session_info.clone()
} else {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
relay_parent = ?new_leaf,
?session,
@@ -549,7 +545,7 @@ impl Initialized {
.validators
.get(validator_index.0 as usize)
.or_else(|| {
tracing::error!(
gum::error!(
target: LOG_TARGET,
relay_parent = ?new_leaf,
?session,
@@ -584,12 +580,12 @@ impl Initialized {
)
.await?;
match import_result {
ImportStatementsResult::ValidImport => tracing::trace!(target: LOG_TARGET,
ImportStatementsResult::ValidImport => gum::trace!(target: LOG_TARGET,
relay_parent = ?new_leaf,
?candidate_hash,
?session,
"Imported statement of concluded dispute from on-chain"),
ImportStatementsResult::InvalidImport => tracing::warn!(target: LOG_TARGET,
ImportStatementsResult::InvalidImport => gum::warn!(target: LOG_TARGET,
relay_parent = ?new_leaf,
?candidate_hash,
?session,
@@ -679,7 +675,7 @@ impl Initialized {
{
query_output.push((session_index, candidate_hash, v.into()));
} else {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
session_index,
"No votes found for candidate",
@@ -753,7 +749,7 @@ impl Initialized {
let session_info = match self.rolling_session_window.session_info(session) {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
session,
"Importing statement lacks info for session which has an active dispute",
@@ -794,7 +790,7 @@ impl Initialized {
true,
)
} else {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
session,
"Not seen backing vote for candidate which has an active dispute",
@@ -837,7 +833,7 @@ impl Initialized {
.get(val_index.0 as usize)
.map_or(true, |v| v != statement.validator_public())
{
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?val_index,
session,
@@ -907,7 +903,7 @@ impl Initialized {
}
// Only validity stating votes or validator had free spam slot?
if !free_spam_slots_available {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?candidate_hash,
?session,
@@ -931,7 +927,7 @@ impl Initialized {
// Participate in dispute if the imported vote was not local, we did not vote before either
// and we actually have keys to issue a local vote.
if !is_local && !voted_already && is_disputed && !controlled_indices.is_empty() {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
candidate_hash = ?candidate_receipt.hash(),
priority = ?comparator.is_some(),
@@ -959,7 +955,7 @@ impl Initialized {
let status = if is_disputed {
let status = recent_disputes.entry((session, candidate_hash)).or_insert_with(|| {
tracing::info!(
gum::info!(
target: LOG_TARGET,
?candidate_hash,
session,
@@ -994,7 +990,7 @@ impl Initialized {
}
if !was_concluded_valid && concluded_valid {
tracing::info!(
gum::info!(
target: LOG_TARGET,
?candidate_hash,
session,
@@ -1004,7 +1000,7 @@ impl Initialized {
}
if !was_concluded_invalid && concluded_invalid {
tracing::info!(
gum::info!(
target: LOG_TARGET,
?candidate_hash,
session,
@@ -1038,7 +1034,7 @@ impl Initialized {
// Load session info.
let info = match self.rolling_session_window.session_info(session) {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
session,
"Missing info for session which has an active dispute",
@@ -1088,7 +1084,7 @@ impl Initialized {
},
Ok(None) => {},
Err(e) => {
tracing::error!(
gum::error!(
target: LOG_TARGET,
err = ?e,
"Encountered keystore error while signing dispute statement",
@@ -1102,11 +1098,7 @@ impl Initialized {
let dispute_message =
match make_dispute_message(info, &votes, statement.clone(), *index) {
Err(err) => {
tracing::debug!(
target: LOG_TARGET,
?err,
"Creating dispute message failed."
);
gum::debug!(target: LOG_TARGET, ?err, "Creating dispute message failed.");
continue
},
Ok(dispute_message) => dispute_message,
@@ -1130,7 +1122,7 @@ impl Initialized {
.await?
{
ImportStatementsResult::InvalidImport => {
tracing::error!(
gum::error!(
target: LOG_TARGET,
?candidate_hash,
?session,
@@ -1138,7 +1130,7 @@ impl Initialized {
);
},
ImportStatementsResult::ValidImport => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?candidate_hash,
?session,
@@ -194,7 +194,7 @@ impl DisputeCoordinatorSubsystem {
let (first_leaf, rolling_session_window) = match get_rolling_session_window(ctx).await {
Ok(Some(update)) => update,
Ok(None) => {
tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
return Ok(None)
},
Err(e) => {
@@ -267,11 +267,7 @@ impl DisputeCoordinatorSubsystem {
get_active_with_status(disputes.into_iter(), clock.now()).collect(),
Ok(None) => Vec::new(),
Err(e) => {
tracing::error!(
target: LOG_TARGET,
"Failed initial load of recent disputes: {:?}",
e
);
gum::error!(target: LOG_TARGET, "Failed initial load of recent disputes: {:?}", e);
return Err(e.into())
},
};
@@ -285,7 +281,7 @@ impl DisputeCoordinatorSubsystem {
Ok(Some(votes)) => votes.into(),
Ok(None) => continue,
Err(e) => {
tracing::error!(
gum::error!(
target: LOG_TARGET,
"Failed initial load of candidate votes: {:?}",
e
@@ -296,7 +292,7 @@ impl DisputeCoordinatorSubsystem {
let validators = match rolling_session_window.session_info(session) {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
session,
"Missing info for session which has an active dispute",
@@ -401,7 +397,7 @@ where
// hour old database state, we should rather cancel contained oneshots and delay
// finality until we are fully functional.
{
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?msg,
"Received msg before first active leaves update. This is not expected - message will be dropped."
@@ -162,7 +162,7 @@ impl OrderingProvider {
}
let n = match get_block_number(sender, candidate.descriptor().relay_parent).await? {
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
candidate_hash = ?candidate_hash,
"Candidate's relay_parent could not be found via chain API, but we saw candidate included?!"
@@ -197,7 +197,7 @@ impl OrderingProvider {
)
.await
.unwrap_or_else(|err| {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
activated_leaf = ?activated,
error = ?err,
@@ -209,7 +209,7 @@ impl OrderingProvider {
})
},
Err(err) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
activated_leaf = ?activated,
error = ?err,
@@ -309,7 +309,7 @@ impl OrderingProvider {
None => {
// It's assumed that it's impossible to retrieve
// more than N ancestors for block number N.
tracing::error!(
gum::error!(
target: LOG_TARGET,
"Received {} ancestors for block number {} from Chain API",
hashes.len(),
@@ -266,7 +266,7 @@ async fn participate(
let available_data = match recover_available_data_rx.await {
Err(oneshot::Canceled) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"`Oneshot` got cancelled when recovering available data {:?}",
req.candidate_hash(),
@@ -298,7 +298,7 @@ async fn participate(
{
Ok(Some(code)) => code,
Ok(None) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Validation code unavailable for code hash {:?} in the state of block {:?}",
req.candidate_receipt().descriptor.validation_code_hash,
@@ -309,7 +309,7 @@ async fn participate(
return
},
Err(err) => {
tracing::warn!(target: LOG_TARGET, ?err, "Error when fetching validation code.");
gum::warn!(target: LOG_TARGET, ?err, "Error when fetching validation code.");
send_result(&mut result_sender, req, ParticipationOutcome::Error).await;
return
},
@@ -333,14 +333,14 @@ async fn participate(
match store_available_data_rx.await {
Err(oneshot::Canceled) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"`Oneshot` got cancelled when storing available data {:?}",
req.candidate_hash(),
);
},
Ok(Err(err)) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?err,
"Failed to store available data for candidate {:?}",
@@ -375,7 +375,7 @@ async fn participate(
// the validation and if valid, whether the commitments hash matches
match validation_rx.await {
Err(oneshot::Canceled) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"`Oneshot` got cancelled when validating candidate {:?}",
req.candidate_hash(),
@@ -384,7 +384,7 @@ async fn participate(
return
},
Ok(Err(err)) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Candidate {:?} validation failed with: {:?}",
req.candidate_hash(),
@@ -394,7 +394,7 @@ async fn participate(
send_result(&mut result_sender, req, ParticipationOutcome::Invalid).await;
},
Ok(Ok(ValidationResult::Invalid(invalid))) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Candidate {:?} considered invalid: {:?}",
req.candidate_hash(),
@@ -405,7 +405,7 @@ async fn participate(
},
Ok(Ok(ValidationResult::Valid(commitments, _))) => {
if commitments.hash() != req.candidate_receipt().commitments_hash {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
expected = ?req.candidate_receipt().commitments_hash,
got = ?commitments.hash(),
@@ -427,7 +427,7 @@ async fn send_result(
outcome: ParticipationOutcome,
) {
if let Err(err) = sender.feed(WorkerMessage::from_request(req, outcome)).await {
tracing::error!(
gum::error!(
target: LOG_TARGET,
?err,
"Sending back participation result failed. Dispute coordinator not working properly!"
@@ -67,7 +67,7 @@ impl SpamSlots {
let spam_vote_count = slots.entry((*session, *validator)).or_default();
*spam_vote_count += 1;
if *spam_vote_count > MAX_SPAM_VOTES {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?session,
?validator,
@@ -152,7 +152,7 @@ impl Clock for SystemClock {
match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(d) => d.as_secs(),
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Current time is before unix epoch. Validation will not work correctly."
@@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
futures-timer = "3.0.2"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
thiserror = "1.0.30"
async-trait = "0.1.52"
polkadot-node-subsystem = { path = "../../subsystem" }
@@ -88,7 +88,7 @@ impl ParachainsInherentDataProvider {
parent_header,
},
Err(err) => {
tracing::debug!(
gum::debug!(
?err,
"Could not get provisioner inherent data; injecting default data",
);
+1 -1
View File
@@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] }
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
thiserror = "1.0.30"
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
+7 -7
View File
@@ -265,7 +265,7 @@ impl ProvisionerJob {
)
.await
{
tracing::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data");
gum::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data");
self.metrics.on_inherent_data_request(Err(()));
} else {
self.metrics.on_inherent_data_request(Ok(()));
@@ -451,7 +451,7 @@ async fn select_candidates(
descriptor.persisted_validation_data_hash == computed_validation_data_hash
}) {
let candidate_hash = candidate.hash();
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
"Selecting candidate {}. para_id={} core={}",
candidate_hash,
@@ -509,7 +509,7 @@ async fn select_candidates(
true
});
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
n_candidates = candidates.len(),
n_cores = availability_cores.len(),
@@ -557,7 +557,7 @@ fn bitfields_indicate_availability(
// in principle, this function might return a `Result<bool, Error>` so that we can more clearly express this error condition
// however, in practice, that would just push off an error-handling routine which would look a whole lot like this one.
// simpler to just handle the error internally here.
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
validator_idx = %validator_idx,
availability_len = %availability_len,
@@ -598,7 +598,7 @@ async fn request_disputes(
let recent_disputes = match rx.await {
Ok(r) => r,
Err(oneshot::Canceled) => {
tracing::warn!(target: LOG_TARGET, "Unable to gather {:?} disputes", active_or_recent);
gum::warn!(target: LOG_TARGET, "Unable to gather {:?} disputes", active_or_recent);
Vec::new()
},
};
@@ -618,7 +618,7 @@ async fn request_votes(
match rx.await {
Ok(v) => v,
Err(oneshot::Canceled) => {
tracing::warn!(target: LOG_TARGET, "Unable to query candidate votes");
gum::warn!(target: LOG_TARGET, "Unable to query candidate votes");
Vec::new()
},
}
@@ -666,7 +666,7 @@ async fn select_disputes(
// If the active ones are already exceeding the bounds, randomly select a subset.
let recent = request_disputes(sender, RequestType::Recent).await;
let disputes = if recent.len() > MAX_DISPUTES_FORWARDED_TO_RUNTIME {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"Recent disputes are excessive ({} > {}), reduce to active ones, and selected",
recent.len(),
+1 -1
View File
@@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
thiserror = "1.0.30"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
polkadot-node-primitives = { path = "../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
+15 -20
View File
@@ -185,7 +185,7 @@ async fn handle_pvf_check(
outcome: PreCheckOutcome,
validation_code_hash: ValidationCodeHash,
) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?validation_code_hash,
"Received pre-check result: {:?}",
@@ -200,7 +200,7 @@ async fn handle_pvf_check(
//
// Returning here will leave the PVF in the view dangling. Since it is there, no new
// pre-checking request will be sent.
tracing::info!(
gum::info!(
target: LOG_TARGET,
?validation_code_hash,
"Pre-check failed, abstaining from voting",
@@ -212,7 +212,7 @@ async fn handle_pvf_check(
match state.view.on_judgement(validation_code_hash, judgement) {
Ok(()) => (),
Err(()) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?validation_code_hash,
"received judgement for an unknown (or removed) PVF hash",
@@ -254,7 +254,7 @@ async fn handle_from_overseer(
) -> Option<Conclude> {
match from_overseer {
FromOverseer::Signal(OverseerSignal::Conclude) => {
tracing::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
gum::info!(target: LOG_TARGET, "Received `Conclude` signal, exiting");
Some(Conclude)
},
FromOverseer::Signal(OverseerSignal::BlockFinalized(_, _)) => {
@@ -360,7 +360,7 @@ async fn examine_activation(
leaf_hash: Hash,
leaf_number: BlockNumber,
) -> Option<ActivationEffect> {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"Examining activation of leaf {:?} ({})",
leaf_hash,
@@ -370,7 +370,7 @@ async fn examine_activation(
let pending_pvfs = match runtime_api::pvfs_require_precheck(sender, leaf_hash).await {
Err(runtime_api::RuntimeRequestError::NotSupported) => return None,
Err(_) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
relay_parent = ?leaf_hash,
"cannot fetch PVFs that require pre-checking from runtime API",
@@ -398,7 +398,7 @@ async fn examine_activation(
None
},
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
relay_parent = ?leaf_hash,
"cannot fetch session index from runtime API: {:?}",
@@ -421,7 +421,7 @@ async fn check_signing_credentials(
let validators = match runtime_api::validators(sender, leaf).await {
Ok(v) => v,
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
relay_parent = ?leaf,
"error occured during requesting validators: {:?}",
@@ -453,7 +453,7 @@ async fn sign_and_submit_pvf_check_statement(
judgement: Judgement,
validation_code_hash: ValidationCodeHash,
) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?validation_code_hash,
?relay_parent,
@@ -464,7 +464,7 @@ async fn sign_and_submit_pvf_check_statement(
metrics.on_vote_submission_started();
if voted.contains(&validation_code_hash) {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
relay_parent = ?relay_parent,
?validation_code_hash,
@@ -491,7 +491,7 @@ async fn sign_and_submit_pvf_check_statement(
{
Ok(Some(signature)) => signature,
Ok(None) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?relay_parent,
validator_index = ?credentials.validator_index,
@@ -501,7 +501,7 @@ async fn sign_and_submit_pvf_check_statement(
return
},
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?relay_parent,
validator_index = ?credentials.validator_index,
@@ -518,7 +518,7 @@ async fn sign_and_submit_pvf_check_statement(
metrics.on_vote_submitted();
},
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?relay_parent,
?validation_code_hash,
@@ -540,12 +540,7 @@ async fn initiate_precheck(
validation_code_hash: ValidationCodeHash,
metrics: &Metrics,
) {
tracing::debug!(
target: LOG_TARGET,
?validation_code_hash,
?relay_parent,
"initiating a precheck",
);
gum::debug!(target: LOG_TARGET, ?validation_code_hash, ?relay_parent, "initiating a precheck",);
let (tx, rx) = oneshot::channel();
sender
@@ -563,7 +558,7 @@ async fn initiate_precheck(
// Pre-checking request dropped before replying. That can happen in case the
// overseer is shutting down. Our part of shutdown will be handled by the
// overseer conclude signal. Log it here just in case.
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?validation_code_hash,
?relay_parent,
@@ -85,7 +85,7 @@ pub(crate) async fn runtime_api_request<T>(
receiver
.await
.map_err(|_| {
tracing::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped");
gum::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped");
RuntimeRequestError::CommunicationError
})
.and_then(|res| {
@@ -93,7 +93,7 @@ pub(crate) async fn runtime_api_request<T>(
use RuntimeApiSubsystemError::*;
match e {
Execution { .. } => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
err = ?e,
+1 -1
View File
@@ -16,7 +16,7 @@ assert_matches = "1.4.0"
futures = "0.3.21"
futures-timer = "3.0.2"
slotmap = "1.0"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
pin-project = "1.0.9"
rand = "0.8.5"
parity-scale-codec = { version = "3.1.0", default-features = false, features = ["derive"] }
+6 -6
View File
@@ -174,7 +174,7 @@ async fn purge_dead(metrics: &Metrics, workers: &mut Workers) {
fn handle_to_queue(queue: &mut Queue, to_queue: ToQueue) {
let ToQueue::Enqueue { artifact, execution_timeout, params, result_tx } = to_queue;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
validation_code_hash = ?artifact.id.code_hash,
"enqueueing an artifact for execution",
@@ -208,7 +208,7 @@ fn handle_worker_spawned(queue: &mut Queue, idle: IdleWorker, handle: WorkerHand
queue.workers.spawn_inflight -= 1;
let worker = queue.workers.running.insert(WorkerData { idle: Some(idle), handle });
tracing::debug!(target: LOG_TARGET, ?worker, "execute worker spawned");
gum::debug!(target: LOG_TARGET, ?worker, "execute worker spawned");
if let Some(job) = queue.queue.pop_front() {
assign(queue, worker, job);
@@ -244,7 +244,7 @@ fn handle_job_finish(
};
queue.metrics.execute_finished();
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
validation_code_hash = ?artifact_id.code_hash,
worker_rip = idle_worker.is_none(),
@@ -288,7 +288,7 @@ fn handle_job_finish(
fn spawn_extra_worker(queue: &mut Queue) {
queue.metrics.execute_worker().on_begin_spawn();
tracing::debug!(target: LOG_TARGET, "spawning an extra worker");
gum::debug!(target: LOG_TARGET, "spawning an extra worker");
queue
.mux
@@ -303,7 +303,7 @@ async fn spawn_worker_task(program_path: PathBuf, spawn_timeout: Duration) -> Qu
match super::worker::spawn(&program_path, spawn_timeout).await {
Ok((idle, handle)) => break QueueEvent::Spawn(idle, handle),
Err(err) => {
tracing::warn!(target: LOG_TARGET, "failed to spawn an execute worker: {:?}", err);
gum::warn!(target: LOG_TARGET, "failed to spawn an execute worker: {:?}", err);
// Assume that the failure intermittent and retry after a delay.
Delay::new(Duration::from_secs(3)).await;
@@ -316,7 +316,7 @@ async fn spawn_worker_task(program_path: PathBuf, spawn_timeout: Duration) -> Qu
///
/// The worker must be running and idle.
fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
validation_code_hash = ?job.artifact.id,
?worker,
+5 -5
View File
@@ -72,7 +72,7 @@ pub async fn start_work(
) -> Outcome {
let IdleWorker { mut stream, pid } = worker;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
worker_pid = %pid,
validation_code_hash = ?artifact.id.code_hash,
@@ -81,7 +81,7 @@ pub async fn start_work(
);
if let Err(error) = send_request(&mut stream, &artifact.path, &validation_params).await {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
validation_code_hash = ?artifact.id.code_hash,
@@ -95,7 +95,7 @@ pub async fn start_work(
response = recv_response(&mut stream).fuse() => {
match response {
Err(error) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
validation_code_hash = ?artifact.id.code_hash,
@@ -108,7 +108,7 @@ pub async fn start_work(
}
},
_ = Delay::new(execution_timeout).fuse() => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
validation_code_hash = ?artifact.id.code_hash,
@@ -189,7 +189,7 @@ pub fn worker_entrypoint(socket_path: &str) {
})?;
loop {
let (artifact_path, params) = recv_request(&mut stream).await?;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
worker_pid = %std::process::id(),
"worker: validating artifact {}",
+3 -3
View File
@@ -639,13 +639,13 @@ async fn handle_cleanup_pulse(
artifact_ttl: Duration,
) -> Result<(), Fatal> {
let to_remove = artifacts.prune(artifact_ttl);
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"PVF pruning: {} artifacts reached their end of life",
to_remove.len(),
);
for artifact_id in to_remove {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
validation_code_hash = ?artifact_id.code_hash,
"pruning artifact",
@@ -664,7 +664,7 @@ async fn sweeper_task(mut sweeper_rx: mpsc::Receiver<PathBuf>) {
None => break,
Some(condemned) => {
let result = async_std::fs::remove_file(&condemned).await;
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?result,
"Sweeping the artifact file {}",
+3 -3
View File
@@ -201,7 +201,7 @@ fn handle_to_pool(
) {
match to_pool {
ToPool::Spawn => {
tracing::debug!(target: LOG_TARGET, "spawning a new prepare worker");
gum::debug!(target: LOG_TARGET, "spawning a new prepare worker");
metrics.prepare_worker().on_begin_spawn();
mux.push(spawn_worker_task(program_path.to_owned(), spawn_timeout).boxed());
},
@@ -234,7 +234,7 @@ fn handle_to_pool(
}
},
ToPool::Kill(worker) => {
tracing::debug!(target: LOG_TARGET, ?worker, "killing prepare worker");
gum::debug!(target: LOG_TARGET, ?worker, "killing prepare worker");
// It may be absent if it were previously already removed by `purge_dead`.
let _ = attempt_retire(metrics, spawned, worker);
},
@@ -248,7 +248,7 @@ async fn spawn_worker_task(program_path: PathBuf, spawn_timeout: Duration) -> Po
match worker::spawn(&program_path, spawn_timeout).await {
Ok((idle, handle)) => break PoolEvent::Spawn(idle, handle),
Err(err) => {
tracing::warn!(target: LOG_TARGET, "failed to spawn a prepare worker: {:?}", err);
gum::warn!(target: LOG_TARGET, "failed to spawn a prepare worker: {:?}", err);
// Assume that the failure intermittent and retry after a delay.
Delay::new(Duration::from_secs(3)).await;
+4 -4
View File
@@ -211,7 +211,7 @@ async fn handle_to_queue(queue: &mut Queue, to_queue: ToQueue) -> Result<(), Fat
}
async fn handle_enqueue(queue: &mut Queue, priority: Priority, pvf: Pvf) -> Result<(), Fatal> {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
validation_code_hash = ?pvf.code_hash,
?priority,
@@ -228,7 +228,7 @@ async fn handle_enqueue(queue: &mut Queue, priority: Priority, pvf: Pvf) -> Resu
// Precondtion for `Enqueue` is that it is sent only once for a PVF;
// Thus this should always be `false`;
// qed.
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"duplicate `enqueue` command received for {:?}",
artifact_id,
@@ -331,7 +331,7 @@ async fn handle_worker_concluded(
queue.artifact_id_to_job.remove(&artifact_id);
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
validation_code_hash = ?artifact_id.code_hash,
?worker,
@@ -370,7 +370,7 @@ async fn handle_worker_concluded(
}
async fn handle_worker_rip(queue: &mut Queue, worker: Worker) -> Result<(), Fatal> {
tracing::debug!(target: LOG_TARGET, ?worker, "prepare worker ripped");
gum::debug!(target: LOG_TARGET, ?worker, "prepare worker ripped");
let worker_data = queue.workers.remove(worker);
if let Some(WorkerData { job: Some(job), .. }) = worker_data {
+10 -10
View File
@@ -72,7 +72,7 @@ pub async fn start_work(
) -> Outcome {
let IdleWorker { mut stream, pid } = worker;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
worker_pid = %pid,
"starting prepare for {}",
@@ -81,7 +81,7 @@ pub async fn start_work(
with_tmp_file(pid, cache_path, |tmp_file| async move {
if let Err(err) = send_request(&mut stream, code, &tmp_file).await {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
"failed to send a prepare request: {:?}",
@@ -109,7 +109,7 @@ pub async fn start_work(
// By convention we expect encoded `PrepareResult`.
if let Ok(result) = PrepareResult::decode(&mut response_bytes.as_slice()) {
if result.is_ok() {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
worker_pid = %pid,
"promoting WIP artifact {} to {}",
@@ -121,7 +121,7 @@ pub async fn start_work(
.await
.map(|_| Selected::Done(result))
.unwrap_or_else(|err| {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
"failed to rename the artifact from {} to {}: {:?}",
@@ -137,7 +137,7 @@ pub async fn start_work(
} else {
// We received invalid bytes from the worker.
let bound_bytes = &response_bytes[..response_bytes.len().min(4)];
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
"received unexpected response from the prepare worker: {}",
@@ -148,7 +148,7 @@ pub async fn start_work(
},
Ok(Err(err)) => {
// Communication error within the time limit.
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
"failed to recv a prepare response: {:?}",
@@ -184,7 +184,7 @@ where
let tmp_file = match tmpfile_in("prepare-artifact-", cache_path).await {
Ok(f) => f,
Err(err) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
"failed to create a temp file for the artifact: {:?}",
@@ -205,7 +205,7 @@ where
Ok(()) => (),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => (),
Err(err) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
worker_pid = %pid,
"failed to remove the tmp file: {:?}",
@@ -246,7 +246,7 @@ pub fn worker_entrypoint(socket_path: &str) {
loop {
let (code, dest) = recv_request(&mut stream).await?;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
worker_pid = %std::process::id(),
"worker: preparing artifact",
@@ -267,7 +267,7 @@ pub fn worker_entrypoint(socket_path: &str) {
let artifact_bytes = compiled_artifact.encode();
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
worker_pid = %std::process::id(),
"worker: writing artifact to {}",
+4 -4
View File
@@ -48,7 +48,7 @@ pub async fn spawn_with_program_path(
let socket_path = socket_path.to_owned();
async move {
let listener = UnixListener::bind(&socket_path).await.map_err(|err| {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
%debug_id,
"cannot bind unix socket: {:?}",
@@ -59,7 +59,7 @@ pub async fn spawn_with_program_path(
let handle =
WorkerHandle::spawn(program_path, extra_args, socket_path).map_err(|err| {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
%debug_id,
"cannot spawn a worker: {:?}",
@@ -71,7 +71,7 @@ pub async fn spawn_with_program_path(
futures::select! {
accept_result = listener.accept().fuse() => {
let (stream, _) = accept_result.map_err(|err| {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
%debug_id,
"cannot accept a worker: {:?}",
@@ -160,7 +160,7 @@ where
})
.unwrap_err(); // it's never `Ok` because it's `Ok(Never)`
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
worker_pid = %std::process::id(),
"pvf worker ({}): {:?}",
+1 -1
View File
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
memory-lru = "0.1.0"
parity-util-mem = { version = "0.11.0", default-features = false }
+3 -3
View File
@@ -291,7 +291,7 @@ where
self.waiting_requests.push_back((request, receiver));
if self.waiting_requests.len() > MAX_PARALLEL_REQUESTS * 10 {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"{} runtime API requests waiting to be executed.",
self.waiting_requests.len(),
@@ -372,7 +372,7 @@ where
let runtime_version = api.api_version::<dyn ParachainHost<Block>>(&BlockId::Hash(relay_parent))
.unwrap_or_else(|e| {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"cannot query the runtime API version: {}",
e,
@@ -380,7 +380,7 @@ where
Some(0)
})
.unwrap_or_else(|| {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"no runtime version is reported"
);
+12
View File
@@ -0,0 +1,12 @@
[package]
name = "tracing-gum"
version = "0.9.17"
authors = ["Parity Technologies <admin@parity.io>"]
edition = "2021"
description = "Stick logs together with the TraceID as provided by tempo"
[dependencies]
tracing = "0.1.31"
jaeger = { path = "../jaeger", package = "polkadot-node-jaeger" }
gum-proc-macro = { path = "./proc-macro", package = "tracing-gum-proc-macro" }
polkadot-primitives = { path = "../../primitives", features = ["std"] }
+53
View File
@@ -0,0 +1,53 @@
# tracing-gum
"gum" to make `tracing::{warn,info,..}` and `mick-jaeger` stick together, to be
cross referenced in grafana with zero additional loc in the source code.
## Architecture Decision Record (ADR)
### Context
For cross referencing spans and logs in grafana loki and tempo, a shared
`traceID` or `TraceIdentifier` is required. All logs must be annotated with such
meta information.
In most cases `CandidateHash` is the primary identifier of the `jaeger::Span`
and hence the source from which the `traceID` is derived. For cases where it is
_not_ the primary identifier, a helper tag named `traceID` is added to those
spans (out of scope, this is already present as a convenience measure).
Log lines on the other hand side, use `warn!,info!,debug!,trace!,..` API
provided by the `tracing` crate. Many of these, contain a `candidate_hash`,
which is _not_ equivalent to the `traceID` (256bits vs 128bits), and hence must
be derived.
To achieve the cross ref, either all instances of `candidate_hash` could be
added or this could be approached more systematically by providing a `macro` to
automatically do so.
Related issues:
* <https://github.com/paritytech/polkadot/issues/5045>
### Decision
Adding approx. 2 lines per tracing line including a `candidate_hash` reference,
to derive the `TraceIdentifier` from that, and printing that as part of the
key-value section in the `tracing::*` macros. The visual overhead and friction
and required diligence to keep the 100s of `tracing::{warn!,info!,debug!,..}` up
is unreasonably high in the mid/long run. This is especially true, in the
context of more people joining the team. Hence a proc-macro is introduced
which abstracts this away, and does so automagically at the cost of
one-more-proc-macro in the codebase.
### Consequences
Minimal training/impact is required to name `CandidateHash` as `candidate_hash`
when providing to any of the log macros (`warn!`, `info!`, etc.).
The crate has to be used throughout the entire codebase to work consistently, to
disambiguate, the prefix `gum::` is used.
Feature parity with `tracing::{warn!,..}` is not desired. We want consistency
more than anything. All currently used features _are_ supported with _gum_ as
well.
+29
View File
@@ -0,0 +1,29 @@
[package]
name = "tracing-gum-proc-macro"
version = "0.9.17"
authors = ["Parity Technologies <admin@parity.io>"]
edition = "2021"
description = "Generate an overseer including builder pattern and message wrapper from a single annotated struct definition."
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[lib]
proc-macro = true
[dependencies]
syn = { version = "1.0.86", features = ["full", "extra-traits"] }
quote = "1.0.15"
proc-macro2 = "1.0.36"
proc-macro-crate = "1.1.3"
expander = "0.0.6"
[dev-dependencies]
assert_matches = "1.5.0"
[features]
default = []
# write the expanded version to a `gum.[a-f0-9]{10}.rs`
# in the `OUT_DIR` as defined by `cargo` for the `expander` crate.
expand = []
+171
View File
@@ -0,0 +1,171 @@
// Copyright 2022 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#![deny(unused_crate_dependencies)]
#![deny(missing_docs)]
#![deny(clippy::dbg_macro)]
//! Generative part of `tracing-gum`. See `tracing-gum` for usage documentation.
use proc_macro2::{Ident, Span, TokenStream};
use quote::{quote, ToTokens};
use syn::{parse2, parse_quote, punctuated::Punctuated, Result, Token};
mod types;
use self::types::*;
#[cfg(test)]
mod tests;
/// Print an error message.
#[proc_macro]
pub fn error(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
gum(item, Level::Error)
}
/// Print a warning level message.
#[proc_macro]
pub fn warn(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
gum(item, Level::Warn)
}
/// Print a info level message.
#[proc_macro]
pub fn info(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
gum(item, Level::Info)
}
/// Print a debug level message.
#[proc_macro]
pub fn debug(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
gum(item, Level::Debug)
}
/// Print a trace level message.
#[proc_macro]
pub fn trace(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
gum(item, Level::Trace)
}
/// One-size-fits all internal implementation that produces the actual code.
pub(crate) fn gum(item: proc_macro::TokenStream, level: Level) -> proc_macro::TokenStream {
let item: TokenStream = item.into();
let res = expander::Expander::new("gum")
.add_comment("Generated overseer code by `gum::warn!(..)`".to_owned())
.dry(!cfg!(feature = "expand"))
.verbose(false)
.fmt(expander::Edition::_2021)
.maybe_write_to_out_dir(impl_gum2(item, level))
.expect("Expander does not fail due to IO in OUT_DIR. qed");
res.unwrap_or_else(|err| err.to_compile_error()).into()
}
/// Does the actual parsing and token generation based on `proc_macro2` types.
///
/// Required for unit tests.
pub(crate) fn impl_gum2(orig: TokenStream, level: Level) -> Result<TokenStream> {
let args: Args = parse2(orig)?;
let krate = support_crate();
let span = Span::call_site();
let Args { target, comma, mut values, fmt } = args;
// find a value or alias called `candidate_hash`.
let maybe_candidate_hash = values.iter_mut().find(|value| value.as_ident() == "candidate_hash");
if let Some(kv) = maybe_candidate_hash {
let (ident, rhs_expr, replace_with) = match kv {
Value::Alias(alias) => {
let ValueWithAliasIdent { alias, marker, expr, .. } = alias.clone();
(
alias.clone(),
expr.to_token_stream(),
Some(Value::Value(ValueWithFormatMarker {
marker,
ident: alias,
dot: None,
inner: Punctuated::new(),
})),
)
},
Value::Value(value) => (value.ident.clone(), value.ident.to_token_stream(), None),
};
// we generate a local value with the same alias name
// so replace the expr with just a value
if let Some(replace_with) = replace_with {
let _old = std::mem::replace(kv, replace_with);
};
// Inject the addition `traceID = % trace_id` identifier
// while maintaining trailing comma semantics.
let had_trailing_comma = values.trailing_punct();
if !had_trailing_comma {
values.push_punct(Token![,](span));
}
values.push_value(parse_quote! {
traceID = % trace_id
});
if had_trailing_comma {
values.push_punct(Token![,](span));
}
Ok(quote! {
if #krate :: enabled!(#target #comma #level) {
use ::std::ops::Deref;
// create a scoped let binding of something that `deref`s to
// `Hash`.
let value = #rhs_expr;
let value = &value;
let value: & #krate:: Hash = value.deref();
// Do the `deref` to `Hash` and convert to a `TraceIdentifier`.
let #ident: #krate:: Hash = * value;
let trace_id = #krate:: hash_to_trace_identifier ( #ident );
#krate :: event!(
#target #comma #level, #values #fmt
)
}
})
} else {
Ok(quote! {
#krate :: event!(
#target #comma #level, #values #fmt
)
})
}
}
/// Extract the support crate path.
fn support_crate() -> TokenStream {
let support_crate_name = if cfg!(test) {
quote! {crate}
} else {
use proc_macro_crate::{crate_name, FoundCrate};
let crate_name = crate_name("tracing-gum")
.expect("Support crate `tracing-gum` is present in `Cargo.toml`. qed");
match crate_name {
FoundCrate::Itself => quote! {crate},
FoundCrate::Name(name) => Ident::new(&name, Span::call_site()).to_token_stream(),
}
};
support_crate_name
}
+207
View File
@@ -0,0 +1,207 @@
// Copyright 2022 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use super::*;
use assert_matches::assert_matches;
use quote::quote;
#[test]
fn smoke() {
assert_matches!(
impl_gum2(
quote! {
target: "xyz",
x = Foo::default(),
z = ?Game::new(),
"Foo {p} x {q}",
p,
q,
},
Level::Warn
),
Ok(_)
);
}
mod roundtrip {
use super::*;
macro_rules! roundtrip {
($whatty:ty | $ts:expr) => {
let input = $ts;
assert_matches!(
::syn::parse2::<$whatty>(input),
Ok(typed) => {
let downgraded = dbg!(typed.to_token_stream());
assert_matches!(::syn::parse2::<$whatty>(downgraded),
Ok(reparsed) => {
assert_eq!(
dbg!(typed.into_token_stream().to_string()),
reparsed.into_token_stream().to_string(),
)
});
}
);
}
}
#[test]
fn u_target() {
roundtrip! {Target | quote! {target: "foo" } };
}
#[test]
fn u_format_marker() {
roundtrip! {FormatMarker | quote! {?} };
roundtrip! {FormatMarker | quote! {%} };
roundtrip! {FormatMarker | quote! {} };
}
#[test]
fn u_value_w_alias() {
roundtrip! {Value | quote! {x = y} };
roundtrip! {Value | quote! {f = f} };
roundtrip! {Value | quote! {ff = ?ff} };
roundtrip! {Value | quote! {fff = %fff} };
}
#[test]
fn u_value_bare_w_format_marker() {
roundtrip! {Value | quote! {?q} };
roundtrip! {Value | quote! {%etcpp} };
roundtrip! {ValueWithFormatMarker | quote! {?q} };
roundtrip! {ValueWithFormatMarker | quote! {%etcpp} };
}
#[test]
fn u_value_bare_w_field_access() {
roundtrip! {ValueWithFormatMarker | quote! {a.b} };
roundtrip! {ValueWithFormatMarker | quote! {a.b.cdef.ghij} };
roundtrip! {ValueWithFormatMarker | quote! {?a.b.c} };
}
#[test]
fn u_args() {
roundtrip! {Args | quote! {target: "yes", k=?v, candidate_hash, "But why? {a}", a} };
roundtrip! {Args | quote! {target: "also", candidate_hash = ?c_hash, "But why?"} };
roundtrip! {Args | quote! {"Nope? {}", candidate_hash} };
}
#[test]
fn e2e() {
roundtrip! {Args | quote! {target: "yes", k=?v, candidate_hash, "But why? {a}", a} };
roundtrip! {Args | quote! {target: "also", candidate_hash = ?c_hash, "But why?"} };
roundtrip! {Args | quote! { "Nope? But yes {}", candidate_hash} };
}
#[test]
fn sample_w_candidate_hash_aliased() {
dbg!(impl_gum2(
quote! {
target: "bar",
a = a,
candidate_hash = %Hash::repeat_byte(0xF0),
b = ?Y::default(),
c = ?a,
"xxx"
},
Level::Info
)
.unwrap()
.to_string());
}
#[test]
fn sample_w_candidate_hash_aliased_unnecessary() {
assert_matches!(impl_gum2(
quote! {
"bar",
a = a,
candidate_hash = ?candidate_hash,
b = ?Y::default(),
c = ?a,
"xxx {} {}",
a,
a,
},
Level::Info
), Ok(x) => {
dbg!(x.to_string())
});
}
#[test]
fn no_fmt_str_args() {
assert_matches!(impl_gum2(
quote! {
target: "bar",
a = a,
candidate_hash = ?candidate_hash,
b = ?Y::default(),
c = a,
"xxx",
},
Level::Trace
), Ok(x) => {
dbg!(x.to_string())
});
}
#[test]
fn no_fmt_str() {
assert_matches!(impl_gum2(
quote! {
target: "bar",
a = a,
candidate_hash = ?candidate_hash,
b = ?Y::default(),
c = a,
},
Level::Trace
), Ok(x) => {
dbg!(x.to_string())
});
}
#[test]
fn field_member_as_kv() {
assert_matches!(impl_gum2(
quote! {
target: "z",
?y.x,
},
Level::Info
), Ok(x) => {
dbg!(x.to_string())
});
}
#[test]
fn nested_field_member_as_kv() {
assert_matches!(impl_gum2(
quote! {
target: "z",
?a.b.c.d.e.f.g,
},
Level::Info
), Ok(x) => {
dbg!(x.to_string())
});
}
}
+336
View File
@@ -0,0 +1,336 @@
// Copyright 2022 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use super::*;
use syn::{
parse::{Parse, ParseStream},
Token,
};
pub(crate) mod kw {
syn::custom_keyword!(target);
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct Target {
kw: kw::target,
colon: Token![:],
expr: syn::Expr,
}
impl Parse for Target {
fn parse(input: ParseStream) -> Result<Self> {
Ok(Self { kw: input.parse()?, colon: input.parse()?, expr: input.parse()? })
}
}
impl ToTokens for Target {
fn to_tokens(&self, tokens: &mut TokenStream) {
let kw = &self.kw;
let colon = &self.colon;
let expr = &self.expr;
tokens.extend(quote! {
#kw #colon #expr
})
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum FormatMarker {
Questionmark(Token![?]),
Percentage(Token![%]),
None,
}
impl Parse for FormatMarker {
fn parse(input: ParseStream) -> Result<Self> {
let lookahead = input.lookahead1();
if lookahead.peek(Token![?]) {
input.parse().map(Self::Questionmark)
} else if lookahead.peek(Token![%]) {
input.parse().map(Self::Percentage)
} else {
Ok(Self::None)
}
}
}
impl ToTokens for FormatMarker {
fn to_tokens(&self, tokens: &mut TokenStream) {
tokens.extend(match self {
Self::Percentage(p) => p.to_token_stream(),
Self::Questionmark(q) => q.to_token_stream(),
Self::None => TokenStream::new(),
})
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct ValueWithAliasIdent {
pub alias: Ident,
pub eq: Token![=],
pub marker: FormatMarker,
pub expr: syn::Expr,
}
impl Parse for ValueWithAliasIdent {
fn parse(input: ParseStream) -> Result<Self> {
Ok(Self {
alias: input.parse()?,
eq: input.parse()?,
marker: input.parse()?,
expr: input.parse()?,
})
}
}
impl ToTokens for ValueWithAliasIdent {
fn to_tokens(&self, tokens: &mut TokenStream) {
let alias = &self.alias;
let eq = &self.eq;
let marker = &self.marker;
let expr = &self.expr;
tokens.extend(quote! {
#alias #eq #marker #expr
})
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct ValueWithFormatMarker {
pub marker: FormatMarker,
pub ident: Ident,
pub dot: Option<Token![.]>,
pub inner: Punctuated<syn::Member, Token![.]>,
}
impl Parse for ValueWithFormatMarker {
fn parse(input: ParseStream) -> Result<Self> {
let marker = input.parse::<FormatMarker>()?;
let ident = input.parse::<syn::Ident>()?;
let mut inner = Punctuated::<syn::Member, Token![.]>::new();
let lookahead = input.lookahead1();
let dot = if lookahead.peek(Token![.]) {
let dot = Some(input.parse::<Token![.]>()?);
loop {
let member = input.parse::<syn::Member>()?;
inner.push_value(member);
let lookahead = input.lookahead1();
if !lookahead.peek(Token![.]) {
break
}
let token = input.parse::<Token![.]>()?;
inner.push_punct(token);
}
dot
} else {
None
};
Ok(Self { marker, ident, dot, inner })
}
}
impl ToTokens for ValueWithFormatMarker {
fn to_tokens(&self, tokens: &mut TokenStream) {
let marker = &self.marker;
let ident = &self.ident;
let dot = &self.dot;
let inner = &self.inner;
tokens.extend(quote! {
#marker #ident #dot #inner
})
}
}
/// A value as passed to the macro, appearing _before_ the format string.
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) enum Value {
Alias(ValueWithAliasIdent),
Value(ValueWithFormatMarker),
}
impl Value {
pub fn as_ident(&self) -> &Ident {
match self {
Self::Alias(alias) => &alias.alias,
Self::Value(value) => &value.ident,
}
}
}
impl Parse for Value {
fn parse(input: ParseStream) -> Result<Self> {
if input.fork().parse::<ValueWithAliasIdent>().is_ok() {
input.parse().map(Self::Alias)
} else if input.fork().parse::<ValueWithFormatMarker>().is_ok() {
input.parse().map(Self::Value)
} else {
Err(syn::Error::new(Span::call_site(), "Neither value nor aliased value."))
}
}
}
impl ToTokens for Value {
fn to_tokens(&self, tokens: &mut TokenStream) {
tokens.extend(match self {
Self::Alias(alias) => quote! { #alias },
Self::Value(value) => quote! { #value },
})
}
}
/// Defines the token stream consisting of a format string and it's arguments.
///
/// Attention: Currently the correctness of the arguments is not checked as part
/// of the parsing logic.
/// It would be possible to use `parse_fmt_str:2.0.0`
/// to do so and possibly improve the error message here - for the time being
/// it's not clear if this yields any practical benefits, and is hence
/// left for future consideration.
#[derive(Debug, Clone)]
pub(crate) struct FmtGroup {
pub format_str: syn::LitStr,
pub maybe_comma: Option<Token![,]>,
pub rest: TokenStream,
}
impl Parse for FmtGroup {
fn parse(input: ParseStream) -> Result<Self> {
let format_str = input
.parse()
.map_err(|e| syn::Error::new(e.span(), "Expected format specifier"))?;
let (maybe_comma, rest) = if input.peek(Token![,]) {
let comma = input.parse::<Token![,]>()?;
let rest = input.parse()?;
(Some(comma), rest)
} else {
(None, TokenStream::new())
};
if !input.is_empty() {
return Err(syn::Error::new(input.span(), "Unexpected data, expected closing `)`."))
}
Ok(Self { format_str, maybe_comma, rest })
}
}
impl ToTokens for FmtGroup {
fn to_tokens(&self, tokens: &mut TokenStream) {
let format_str = &self.format_str;
let maybe_comma = &self.maybe_comma;
let rest = &self.rest;
tokens.extend(quote! { #format_str #maybe_comma #rest });
}
}
/// Full set of arguments as provided to the `gum::warn!` call.
#[derive(Debug, Clone)]
pub(crate) struct Args {
pub target: Option<Target>,
pub comma: Option<Token![,]>,
pub values: Punctuated<Value, Token![,]>,
pub fmt: Option<FmtGroup>,
}
impl Parse for Args {
fn parse(input: ParseStream) -> Result<Self> {
let lookahead = input.lookahead1();
let (target, comma) = if lookahead.peek(kw::target) {
let target = input.parse()?;
let comma = input.parse::<Token![,]>()?;
(Some(target), Some(comma))
} else {
(None, None)
};
let mut values = Punctuated::new();
loop {
if input.fork().parse::<Value>().is_ok() {
values.push_value(input.parse::<Value>()?);
} else {
break
}
if input.peek(Token![,]) {
values.push_punct(input.parse::<Token![,]>()?);
} else {
break
}
}
let fmt = if values.empty_or_trailing() && !input.is_empty() {
let fmt = input.parse::<FmtGroup>()?;
Some(fmt)
} else {
None
};
Ok(Self { target, comma, values, fmt })
}
}
impl ToTokens for Args {
fn to_tokens(&self, tokens: &mut TokenStream) {
let target = &self.target;
let comma = &self.comma;
let values = &self.values;
let fmt = &self.fmt;
tokens.extend(quote! {
#target #comma #values #fmt
})
}
}
/// Support tracing levels, passed to `tracing::event!`
///
/// Note: Not parsed from the input stream, but implicitly defined
/// by the macro name, i.e. `level::debug!` is `Level::Debug`.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum Level {
Error,
Warn,
Info,
Debug,
Trace,
}
impl ToTokens for Level {
fn to_tokens(&self, tokens: &mut TokenStream) {
let span = Span::call_site();
let variant = match self {
Self::Error => Ident::new("ERROR", span),
Self::Warn => Ident::new("WARN", span),
Self::Info => Ident::new("INFO", span),
Self::Debug => Ident::new("DEBUG", span),
Self::Trace => Ident::new("TRACE", span),
};
let krate = support_crate();
tokens.extend(quote! {
#krate :: Level :: #variant
})
}
}
+35
View File
@@ -0,0 +1,35 @@
// Copyright 2022 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#![deny(unused_crate_dependencies)]
#![deny(missing_docs)]
#![deny(clippy::dbg_macro)]
//! A wrapper around `tracing` macros, to provide semi automatic
//! `traceID` annotation without codebase turnover.
pub use tracing::{enabled, event, Level};
#[doc(hidden)]
pub use jaeger::hash_to_trace_identifier;
#[doc(hidden)]
pub use polkadot_primitives::v2::{CandidateHash, Hash};
pub use gum_proc_macro::{debug, error, info, trace, warn};
#[cfg(test)]
mod tests;
+104
View File
@@ -0,0 +1,104 @@
// Copyright 2022 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use super::*;
pub use polkadot_primitives::v2::{CandidateHash, Hash};
#[derive(Default, Debug)]
struct Y {
#[allow(dead_code)]
x: u8,
}
#[test]
fn plain() {
error!("plain");
}
#[test]
fn wo_alias() {
let a: i32 = 7;
error!(target: "foo",
"Something something {}, {:?}, or maybe {}",
a,
b = Y::default(),
c = a
);
}
#[test]
fn wo_unnecessary() {
let a: i32 = 7;
warn!(target: "bar",
a = a,
b = ?Y::default(),
"fff {c}",
c = a,
);
}
#[test]
fn w_candidate_hash_value_assignment() {
let a: i32 = 7;
info!(target: "bar",
a = a,
// ad-hoc value
candidate_hash = %CandidateHash(Hash::repeat_byte(0xF0)),
b = ?Y::default(),
c = ?a,
"xxx",
);
}
#[test]
fn w_candidate_hash_from_scope() {
let a: i32 = 7;
let candidate_hash = CandidateHash(Hash::repeat_byte(0xF1));
debug!(target: "bar",
a = a,
?candidate_hash,
b = ?Y::default(),
c = ?a,
"xxx",
);
}
#[test]
fn w_candidate_hash_aliased() {
let a: i32 = 7;
let c_hash = Hash::repeat_byte(0xFA);
trace!(target: "bar",
a = a,
candidate_hash = ?c_hash,
b = ?Y::default(),
c = a,
"xxx",
);
}
#[test]
fn w_candidate_hash_aliased_unnecessary() {
let a: i32 = 7;
let candidate_hash = CandidateHash(Hash::repeat_byte(0xFA));
info!(
target: "bar",
a = a,
candidate_hash = ?candidate_hash,
b = ?Y::default(),
c = a,
"xxx",
);
}
+1 -1
View File
@@ -3,7 +3,7 @@ name = "polkadot-node-jaeger"
version = "0.9.18"
authors = ["Parity Technologies <admin@parity.io>"]
edition = "2021"
description = "Polkadot Jaeger primitives"
description = "Polkadot Jaeger primitives, but equally useful for Grafana/Tempo"
[dependencies]
async-std = "1.8.0"
+1 -1
View File
@@ -53,7 +53,7 @@ mod spans;
pub use self::{
config::{JaegerConfig, JaegerConfigBuilder},
errors::JaegerError,
spans::{PerLeafSpan, Span, Stage},
spans::{hash_to_trace_identifier, PerLeafSpan, Span, Stage},
};
use self::spans::TraceIdentifier;
+8 -8
View File
@@ -174,7 +174,7 @@ pub(crate) type TraceIdentifier = u128;
/// A helper to convert the hash to the fixed size representation
/// needed for jaeger.
#[inline]
fn hash_to_identifier(hash: Hash) -> TraceIdentifier {
pub fn hash_to_trace_identifier(hash: Hash) -> TraceIdentifier {
let mut buf = [0u8; 16];
buf.copy_from_slice(&hash.as_ref()[0..16]);
// The slice bytes are copied in reading order, so if interpreted
@@ -197,13 +197,13 @@ pub trait LazyIdent {
impl<'a> LazyIdent for &'a [u8] {
fn eval(&self) -> TraceIdentifier {
hash_to_identifier(BlakeTwo256::hash_of(self))
hash_to_trace_identifier(BlakeTwo256::hash_of(self))
}
}
impl LazyIdent for &PoV {
fn eval(&self) -> TraceIdentifier {
hash_to_identifier(self.hash())
hash_to_trace_identifier(self.hash())
}
fn extra_tags(&self, span: &mut Span) {
@@ -213,7 +213,7 @@ impl LazyIdent for &PoV {
impl LazyIdent for Hash {
fn eval(&self) -> TraceIdentifier {
hash_to_identifier(*self)
hash_to_trace_identifier(*self)
}
fn extra_tags(&self, span: &mut Span) {
@@ -223,7 +223,7 @@ impl LazyIdent for Hash {
impl LazyIdent for &Hash {
fn eval(&self) -> TraceIdentifier {
hash_to_identifier(**self)
hash_to_trace_identifier(**self)
}
fn extra_tags(&self, span: &mut Span) {
@@ -233,7 +233,7 @@ impl LazyIdent for &Hash {
impl LazyIdent for CandidateHash {
fn eval(&self) -> TraceIdentifier {
hash_to_identifier(self.0)
hash_to_trace_identifier(self.0)
}
fn extra_tags(&self, span: &mut Span) {
@@ -472,7 +472,7 @@ mod tests {
#[test]
fn hash_derived_identifier_is_leading_16bytes() {
let candidate_hash = dbg!(Hash::from(&RAW));
let trace_id = dbg!(hash_to_identifier(candidate_hash));
let trace_id = dbg!(hash_to_trace_identifier(candidate_hash));
for (idx, (a, b)) in candidate_hash
.as_bytes()
.iter()
@@ -488,7 +488,7 @@ mod tests {
fn extra_tags_do_not_change_trace_id() {
Jaeger::test_setup();
let candidate_hash = dbg!(Hash::from(&RAW));
let trace_id = hash_to_identifier(candidate_hash);
let trace_id = hash_to_trace_identifier(candidate_hash);
let span = Span::new(candidate_hash, "foo");
+1 -1
View File
@@ -31,7 +31,7 @@ sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste
clap = { version = "3.1", features = ["derive"] }
futures = "0.3.21"
futures-timer = "3.0.2"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../gum/" }
[features]
default = []
@@ -170,15 +170,15 @@ where
response_sender,
);
} else {
tracing::info!(
target = MALUS,
gum::info!(
target: MALUS,
"Could not get availability data, can't back"
);
}
}),
);
} else {
tracing::info!(target = MALUS, "No CandidateReceipt available to work with");
gum::info!(target: MALUS, "No CandidateReceipt available to work with");
}
None
},
@@ -132,8 +132,8 @@ impl OverseerGen for SuggestGarbageCandidate {
move |(mut subsystem_sender, hash, candidate_receipt): (_, Hash, CandidateReceipt)| {
let keystore = keystore.clone();
async move {
tracing::info!(
target = MALUS,
gum::info!(
target: MALUS,
"Replacing seconded candidate pov with something else"
);
@@ -160,7 +160,7 @@ impl OverseerGen for SuggestGarbageCandidate {
))
.await;
} else {
tracing::info!("We are not a validator. Not siging anything.");
gum::info!("We are not a validator. Not siging anything.");
}
}
},
+1 -1
View File
@@ -9,7 +9,7 @@ description = "Channels with attached Meters"
futures = "0.3.21"
futures-timer = "3.0.2"
derive_more = "0.99"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../gum" }
thiserror = "1.0.30"
[dev-dependencies]
+1 -1
View File
@@ -224,7 +224,7 @@ impl<T> Future for MeteredReceiver<T> {
.get_or_insert_with(move || Delay::new(soft_timeout).fuse());
if Pin::new(soft_timeout).poll(ctx).is_ready() {
tracing::warn!("Oneshot `{name}` exceeded the soft threshold", name = &self.name);
gum::warn!("Oneshot `{name}` exceeded the soft threshold", name = &self.name);
}
let hard_timeout = self.hard_timeout.clone();
+1 -1
View File
@@ -8,7 +8,7 @@ description = "Subsystem metric helpers"
[dependencies]
futures = "0.3.21"
futures-timer = "3.0.2"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../gum" }
metered-channel = { path = "../metered-channel" }
+4 -8
View File
@@ -106,11 +106,7 @@ impl RuntimeMetricsProvider {
F: FnOnce(MutexGuard<'_, HashMap<String, Counter<U64>>>) -> Result<(), PrometheusError>,
{
let _ = self.1.counters.lock().map(do_something).or_else(|error| {
tracing::error!(
target: LOG_TARGET,
"Cannot acquire the counter hashmap lock: {:?}",
error
);
gum::error!(target: LOG_TARGET, "Cannot acquire the counter hashmap lock: {:?}", error);
Err(error)
});
}
@@ -120,7 +116,7 @@ impl RuntimeMetricsProvider {
F: FnOnce(MutexGuard<'_, HashMap<String, CounterVec<U64>>>) -> Result<(), PrometheusError>,
{
let _ = self.1.counter_vecs.lock().map(do_something).or_else(|error| {
tracing::error!(
gum::error!(
target: LOG_TARGET,
"Cannot acquire the countervec hashmap lock: {:?}",
error
@@ -154,7 +150,7 @@ impl sc_tracing::TraceHandler for RuntimeMetricsProvider {
self.parse_metric_update(update_op);
},
Err(e) => {
tracing::error!(target: LOG_TARGET, "TraceEvent decode failed: {:?}", e);
gum::error!(target: LOG_TARGET, "TraceEvent decode failed: {:?}", e);
},
}
}
@@ -195,7 +191,7 @@ impl RuntimeMetricsProvider {
pub fn logger_hook() -> impl FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration) -> () {
|logger_builder, config| {
if config.prometheus_registry().is_none() {
tracing::debug!(target: LOG_TARGET, "Prometheus registry is not configured.",);
gum::debug!(target: LOG_TARGET, "Prometheus registry is not configured.",);
return
}
let registry = config.prometheus_registry().cloned().unwrap();
@@ -12,7 +12,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-primitives = { path = "../../../primitives" }
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
[dev-dependencies]
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
@@ -206,11 +206,11 @@ impl State {
match event {
NetworkBridgeEvent::PeerConnected(peer_id, role, _) => {
// insert a blank view if none already present
tracing::trace!(target: LOG_TARGET, ?peer_id, ?role, "Peer connected");
gum::trace!(target: LOG_TARGET, ?peer_id, ?role, "Peer connected");
self.peer_views.entry(peer_id).or_default();
},
NetworkBridgeEvent::PeerDisconnected(peer_id) => {
tracing::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected");
gum::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected");
self.peer_views.remove(&peer_id);
self.blocks.iter_mut().for_each(|(_hash, entry)| {
entry.known_by.remove(&peer_id);
@@ -230,7 +230,7 @@ impl State {
self.handle_peer_view_change(ctx, metrics, peer_id, view).await;
},
NetworkBridgeEvent::OurViewChange(view) => {
tracing::trace!(target: LOG_TARGET, ?view, "Own view change");
gum::trace!(target: LOG_TARGET, ?view, "Own view change");
for head in view.iter() {
if !self.blocks.contains_key(head) {
self.pending_known.entry(*head).or_default();
@@ -240,7 +240,7 @@ impl State {
self.pending_known.retain(|h, _| {
let live = view.contains(h);
if !live {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
block_hash = ?h,
"Cleaning up stale pending messages",
@@ -287,7 +287,7 @@ impl State {
}
}
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"Got new blocks {:?}",
metas.iter().map(|m| (m.hash, m.number)).collect::<Vec<_>>(),
@@ -318,7 +318,7 @@ impl State {
let to_import = pending_now_known
.into_iter()
.inspect(|h| {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
block_hash = ?h,
"Extracting pending messages for new block"
@@ -329,7 +329,7 @@ impl State {
.collect::<Vec<_>>();
if !to_import.is_empty() {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
num = to_import.len(),
"Processing pending assignment/approvals",
@@ -374,7 +374,7 @@ impl State {
) {
match msg {
protocol_v1::ApprovalDistributionMessage::Assignments(assignments) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
peer_id = %peer_id,
num = assignments.len(),
@@ -388,7 +388,7 @@ impl State {
assignment.validator,
);
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
%peer_id,
?fingerprint,
@@ -414,7 +414,7 @@ impl State {
}
},
protocol_v1::ApprovalDistributionMessage::Approvals(approvals) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
peer_id = %peer_id,
num = approvals.len(),
@@ -428,7 +428,7 @@ impl State {
approval_vote.validator,
);
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
%peer_id,
?fingerprint,
@@ -460,7 +460,7 @@ impl State {
peer_id: PeerId,
view: View,
) {
tracing::trace!(target: LOG_TARGET, ?view, "Peer view change");
gum::trace!(target: LOG_TARGET, ?view, "Peer view change");
let finalized_number = view.finalized_number;
let old_view = self.peer_views.insert(peer_id.clone(), view.clone());
let old_finalized_number = old_view.map(|v| v.finalized_number).unwrap_or(0);
@@ -525,7 +525,7 @@ impl State {
Some(entry) => entry,
None => {
if let Some(peer_id) = source.peer_id() {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?peer_id,
?block_hash,
@@ -549,7 +549,7 @@ impl State {
let peer_knowledge = peer_knowledge.get_mut();
if peer_knowledge.contains(&fingerprint) {
if peer_knowledge.received.contains(&fingerprint) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer_id,
?fingerprint,
@@ -562,7 +562,7 @@ impl State {
}
},
hash_map::Entry::Vacant(_) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer_id,
?fingerprint,
@@ -576,7 +576,7 @@ impl State {
if entry.knowledge.known_messages.contains(&fingerprint) {
modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
tracing::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, "Known assignment");
gum::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, "Known assignment");
peer_knowledge.received.insert(fingerprint.clone());
}
return
@@ -595,19 +595,13 @@ impl State {
let result = match rx.await {
Ok(result) => result,
Err(_) => {
tracing::debug!(target: LOG_TARGET, "The approval voting subsystem is down");
gum::debug!(target: LOG_TARGET, "The approval voting subsystem is down");
return
},
};
drop(timer);
tracing::trace!(
target: LOG_TARGET,
?source,
?fingerprint,
?result,
"Checked assignment",
);
gum::trace!(target: LOG_TARGET, ?source, ?fingerprint, ?result, "Checked assignment",);
match result {
AssignmentCheckResult::Accepted => {
modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST).await;
@@ -623,7 +617,7 @@ impl State {
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
peer_knowledge.received.insert(fingerprint);
}
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer_id,
"Got an `AcceptedDuplicate` assignment",
@@ -631,7 +625,7 @@ impl State {
return
},
AssignmentCheckResult::TooFarInFuture => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer_id,
"Got an assignment too far in the future",
@@ -640,7 +634,7 @@ impl State {
return
},
AssignmentCheckResult::Bad(error) => {
tracing::info!(
gum::info!(
target: LOG_TARGET,
?peer_id,
%error,
@@ -653,18 +647,14 @@ impl State {
} else {
if !entry.knowledge.known_messages.insert(fingerprint.clone()) {
// if we already imported an assignment, there is no need to distribute it again
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?fingerprint,
"Importing locally an already known assignment",
);
return
} else {
tracing::debug!(
target: LOG_TARGET,
?fingerprint,
"Importing locally a new assignment",
);
gum::debug!(target: LOG_TARGET, ?fingerprint, "Importing locally a new assignment",);
}
}
@@ -682,7 +672,7 @@ impl State {
});
},
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
hash = ?block_hash,
?claimed_candidate_index,
@@ -716,7 +706,7 @@ impl State {
}
if !peers.is_empty() {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?block_hash,
?claimed_candidate_index,
@@ -769,7 +759,7 @@ impl State {
);
if !entry.knowledge.known_messages.contains(&assignment_fingerprint) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer_id,
?fingerprint,
@@ -785,7 +775,7 @@ impl State {
let peer_knowledge = knowledge.get_mut();
if peer_knowledge.contains(&fingerprint) {
if peer_knowledge.received.contains(&fingerprint) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer_id,
?fingerprint,
@@ -799,7 +789,7 @@ impl State {
}
},
hash_map::Entry::Vacant(_) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer_id,
?fingerprint,
@@ -811,7 +801,7 @@ impl State {
// if the approval is known to be valid, reward the peer
if entry.knowledge.contains(&fingerprint) {
tracing::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, "Known approval");
gum::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, "Known approval");
modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
peer_knowledge.received.insert(fingerprint.clone());
@@ -828,19 +818,13 @@ impl State {
let result = match rx.await {
Ok(result) => result,
Err(_) => {
tracing::debug!(target: LOG_TARGET, "The approval voting subsystem is down");
gum::debug!(target: LOG_TARGET, "The approval voting subsystem is down");
return
},
};
drop(timer);
tracing::trace!(
target: LOG_TARGET,
?peer_id,
?fingerprint,
?result,
"Checked approval",
);
gum::trace!(target: LOG_TARGET, ?peer_id, ?fingerprint, ?result, "Checked approval",);
match result {
ApprovalCheckResult::Accepted => {
modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST).await;
@@ -852,7 +836,7 @@ impl State {
},
ApprovalCheckResult::Bad(error) => {
modify_reputation(ctx, peer_id, COST_INVALID_MESSAGE).await;
tracing::info!(
gum::info!(
target: LOG_TARGET,
?peer_id,
%error,
@@ -864,18 +848,14 @@ impl State {
} else {
if !entry.knowledge.insert(fingerprint.clone()) {
// if we already imported an approval, there is no need to distribute it again
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?fingerprint,
"Importing locally an already known approval",
);
return
} else {
tracing::debug!(
target: LOG_TARGET,
?fingerprint,
"Importing locally a new approval",
);
gum::debug!(target: LOG_TARGET, ?fingerprint, "Importing locally a new approval",);
}
}
@@ -902,7 +882,7 @@ impl State {
},
None => {
// this would indicate a bug in approval-voting
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
hash = ?block_hash,
?candidate_index,
@@ -913,7 +893,7 @@ impl State {
}
},
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
hash = ?block_hash,
?candidate_index,
@@ -948,7 +928,7 @@ impl State {
let approvals = vec![vote];
if !peers.is_empty() {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?block_hash,
?candidate_index,
@@ -1004,7 +984,7 @@ impl State {
// This safeguard is needed primarily in case of long finality stalls
// so we don't waste time in a loop for every peer.
if missing.is_empty() {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?block,
?peer_id,
@@ -1036,7 +1016,7 @@ impl State {
util::MIN_GOSSIP_PEERS,
);
if !lucky {
tracing::trace!(target: LOG_TARGET, ?peer_id, "Unlucky peer");
gum::trace!(target: LOG_TARGET, ?peer_id, "Unlucky peer");
return
}
@@ -1062,7 +1042,7 @@ impl State {
None => continue, // should be unreachable
};
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
"Sending all assignments and approvals in block {} to peer {}",
block,
@@ -1083,7 +1063,7 @@ impl State {
match approval_state {
ApprovalState::Assigned(cert) => {
if !missing.contains(&assignment_fingerprint) {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?block,
?validator_index,
@@ -1123,7 +1103,7 @@ impl State {
candidate_index.clone(),
));
} else {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?block,
?validator_index,
@@ -1142,7 +1122,7 @@ impl State {
signature: signature.clone(),
});
} else {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?block,
?validator_index,
@@ -1157,7 +1137,7 @@ impl State {
}
if !assignments.is_empty() {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
num = assignments.len(),
?num_blocks,
@@ -1175,7 +1155,7 @@ impl State {
}
if !approvals.is_empty() {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
num = approvals.len(),
?num_blocks,
@@ -1201,7 +1181,7 @@ async fn modify_reputation(
peer_id: PeerId,
rep: Rep,
) {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
reputation = ?rep,
?peer_id,
@@ -1236,7 +1216,7 @@ impl ApprovalDistribution {
let message = match ctx.recv().await {
Ok(message) => message,
Err(e) => {
tracing::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting");
gum::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting");
return
},
};
@@ -1249,13 +1229,13 @@ impl ApprovalDistribution {
FromOverseer::Communication {
msg: ApprovalDistributionMessage::NewBlocks(metas),
} => {
tracing::debug!(target: LOG_TARGET, "Processing NewBlocks");
gum::debug!(target: LOG_TARGET, "Processing NewBlocks");
state.handle_new_blocks(&mut ctx, &self.metrics, metas).await;
},
FromOverseer::Communication {
msg: ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index),
} => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"Distributing our assignment on candidate (block={}, index={})",
cert.block_hash,
@@ -1275,7 +1255,7 @@ impl ApprovalDistribution {
FromOverseer::Communication {
msg: ApprovalDistributionMessage::DistributeApproval(vote),
} => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"Distributing our approval vote on candidate (block={}, index={})",
vote.block_hash,
@@ -1294,11 +1274,11 @@ impl ApprovalDistribution {
FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
..
})) => {
tracing::trace!(target: LOG_TARGET, "active leaves signal (ignored)");
gum::trace!(target: LOG_TARGET, "active leaves signal (ignored)");
// handled by NewBlocks
},
FromOverseer::Signal(OverseerSignal::BlockFinalized(_hash, number)) => {
tracing::trace!(target: LOG_TARGET, number = %number, "finalized signal");
gum::trace!(target: LOG_TARGET, number = %number, "finalized signal");
state.handle_block_finalized(number);
},
FromOverseer::Signal(OverseerSignal::Conclude) => return,
@@ -72,7 +72,7 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(
const TIMEOUT: Duration = Duration::from_millis(100);
async fn overseer_send(overseer: &mut VirtualOverseer, msg: ApprovalDistributionMessage) {
tracing::trace!(msg = ?msg, "Sending message");
gum::trace!(msg = ?msg, "Sending message");
overseer
.send(FromOverseer::Communication { msg })
.timeout(TIMEOUT)
@@ -81,7 +81,7 @@ async fn overseer_send(overseer: &mut VirtualOverseer, msg: ApprovalDistribution
}
async fn overseer_signal_block_finalized(overseer: &mut VirtualOverseer, number: BlockNumber) {
tracing::trace!(?number, "Sending a finalized signal");
gum::trace!(?number, "Sending a finalized signal");
// we don't care about the block hash
overseer
.send(FromOverseer::Signal(OverseerSignal::BlockFinalized(Hash::zero(), number)))
@@ -91,10 +91,10 @@ async fn overseer_signal_block_finalized(overseer: &mut VirtualOverseer, number:
}
async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
tracing::trace!("Waiting for a message");
gum::trace!("Waiting for a message");
let msg = overseer.recv().timeout(TIMEOUT).await.expect("msg recv timeout");
tracing::trace!(msg = ?msg, "Received message");
gum::trace!(msg = ?msg, "Received message");
msg
}
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
parity-scale-codec = { version = "3.1.0", features = ["std"] }
polkadot-primitives = { path = "../../../primitives" }
polkadot-erasure-coding = { path = "../../../erasure-coding" }
@@ -100,12 +100,11 @@ pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(
JfyiError::InvalidValidatorIndex |
JfyiError::NoSuchCachedSession { .. } |
JfyiError::QueryAvailableDataResponseChannel(_) |
JfyiError::QueryChunkResponseChannel(_) =>
tracing::warn!(target: LOG_TARGET, error = %jfyi, ctx),
JfyiError::QueryChunkResponseChannel(_) => gum::warn!(target: LOG_TARGET, error = %jfyi, ctx),
JfyiError::FetchPoV(_) |
JfyiError::SendResponse |
JfyiError::NoSuchPoV |
JfyiError::Runtime(_) => tracing::debug!(target: LOG_TARGET, error = ?jfyi, ctx),
JfyiError::Runtime(_) => gum::debug!(target: LOG_TARGET, error = ?jfyi, ctx),
}
Ok(())
},
@@ -91,7 +91,7 @@ async fn fetch_pov_job(
metrics: Metrics,
) {
if let Err(err) = do_fetch_pov(pov_hash, pending_response, span, tx, metrics).await {
tracing::warn!(target: LOG_TARGET, ?err, ?pov_hash, ?authority_id, "fetch_pov_job");
gum::warn!(target: LOG_TARGET, ?err, ?pov_hash, ?authority_id, "fetch_pov_job");
}
}
@@ -207,7 +207,7 @@ mod tests {
.unwrap();
break
},
msg => tracing::debug!(target: LOG_TARGET, msg = ?msg, "Received msg"),
msg => gum::debug!(target: LOG_TARGET, msg = ?msg, "Received msg"),
}
}
if pov.hash() == pov_hash {
@@ -271,7 +271,7 @@ impl RunningTask {
let resp = match self.do_request(&validator).await {
Ok(resp) => resp,
Err(TaskError::ShuttingDown) => {
tracing::info!(
gum::info!(
target: LOG_TARGET,
"Node seems to be shutting down, canceling fetch task"
);
@@ -286,7 +286,7 @@ impl RunningTask {
let chunk = match resp {
ChunkFetchingResponse::Chunk(resp) => resp.recombine_into_chunk(&self.request),
ChunkFetchingResponse::NoSuchChunk => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
validator = ?validator,
"Validator did not have our chunk"
@@ -337,7 +337,7 @@ impl RunningTask {
match response_recv.await {
Ok(resp) => Ok(resp),
Err(RequestError::InvalidResponse(err)) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
origin= ?validator,
err= ?err,
@@ -346,7 +346,7 @@ impl RunningTask {
Err(TaskError::PeerError)
},
Err(RequestError::NetworkError(err)) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
origin= ?validator,
err= ?err,
@@ -355,7 +355,7 @@ impl RunningTask {
Err(TaskError::PeerError)
},
Err(RequestError::Canceled(oneshot::Canceled)) => {
tracing::debug!(target: LOG_TARGET,
gum::debug!(target: LOG_TARGET,
origin= ?validator,
"Erasure chunk request got canceled");
Err(TaskError::PeerError)
@@ -368,7 +368,7 @@ impl RunningTask {
match branch_hash(&self.erasure_root, chunk.proof(), chunk.index.0 as usize) {
Ok(hash) => hash,
Err(e) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
candidate_hash = ?self.request.candidate_hash,
origin = ?validator,
@@ -380,7 +380,7 @@ impl RunningTask {
};
let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk);
if anticipated_hash != erasure_chunk_hash {
tracing::warn!(target: LOG_TARGET, origin = ?validator, "Received chunk does not match merkle tree");
gum::warn!(target: LOG_TARGET, origin = ?validator, "Received chunk does not match merkle tree");
return false
}
true
@@ -400,11 +400,11 @@ impl RunningTask {
)))
.await;
if let Err(err) = r {
tracing::error!(target: LOG_TARGET, err= ?err, "Storing erasure chunk failed, system shutting down?");
gum::error!(target: LOG_TARGET, err= ?err, "Storing erasure chunk failed, system shutting down?");
}
if let Err(oneshot::Canceled) = rx.await {
tracing::error!(target: LOG_TARGET, "Storing erasure chunk failed");
gum::error!(target: LOG_TARGET, "Storing erasure chunk failed");
}
}
@@ -420,7 +420,7 @@ impl RunningTask {
})
};
if let Err(err) = self.sender.send(FromFetchTask::Concluded(payload)).await {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err= ?err,
"Sending concluded message for task failed"
@@ -431,7 +431,7 @@ impl RunningTask {
async fn conclude_fail(&mut self) {
if let Err(err) = self.sender.send(FromFetchTask::Failed(self.request.candidate_hash)).await
{
tracing::warn!(target: LOG_TARGET, ?err, "Sending `Failed` message for task failed");
gum::warn!(target: LOG_TARGET, ?err, "Sending `Failed` message for task failed");
}
}
}
@@ -262,7 +262,7 @@ impl TestRun {
return true
},
_ => {
tracing::debug!(target: LOG_TARGET, "Unexpected message");
gum::debug!(target: LOG_TARGET, "Unexpected message");
return false
},
}
@@ -103,7 +103,7 @@ impl Requester {
where
Context: SubsystemContext,
{
tracing::trace!(target: LOG_TARGET, ?update, "Update fetching heads");
gum::trace!(target: LOG_TARGET, ?update, "Update fetching heads");
let ActiveLeavesUpdate { activated, deactivated } = update;
// Stale leaves happen after a reversion - we don't want to re-run availability there.
if let Some(leaf) = activated.filter(|leaf| leaf.status == LeafStatus::Fresh) {
@@ -140,7 +140,7 @@ impl Requester {
// Also spawn or bump tasks for candidates in ancestry in the same session.
for hash in std::iter::once(leaf).chain(ancestors_in_session) {
let cores = get_occupied_cores(ctx, hash).await?;
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
occupied_cores = ?cores,
"Query occupied core"
@@ -212,7 +212,7 @@ impl Requester {
)
.await
.map_err(|err| {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
error = ?err,
"Failed to spawn a fetch task"
@@ -107,16 +107,16 @@ impl SessionCache {
F: FnOnce(&SessionInfo) -> R,
{
if let Some(o_info) = self.session_info_cache.get(&session_index) {
tracing::trace!(target: LOG_TARGET, session_index, "Got session from lru");
gum::trace!(target: LOG_TARGET, session_index, "Got session from lru");
return Ok(Some(with_info(o_info)))
}
if let Some(info) =
self.query_info_from_runtime(ctx, runtime, parent, session_index).await?
{
tracing::trace!(target: LOG_TARGET, session_index, "Calling `with_info`");
gum::trace!(target: LOG_TARGET, session_index, "Calling `with_info`");
let r = with_info(&info);
tracing::trace!(target: LOG_TARGET, session_index, "Storing session info in lru!");
gum::trace!(target: LOG_TARGET, session_index, "Storing session info in lru!");
self.session_info_cache.put(session_index, info);
Ok(Some(r))
} else {
@@ -130,7 +130,7 @@ impl SessionCache {
/// subsystem on this.
pub fn report_bad_log(&mut self, report: BadValidators) {
if let Err(err) = self.report_bad(report) {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?err,
"Reporting bad validators failed with error"
@@ -51,7 +51,7 @@ pub async fn run_pov_receiver<Sender>(
answer_pov_request_log(&mut sender, msg, &metrics).await;
},
Err(fatal) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
error = ?fatal,
"Shutting down POV receiver."
@@ -59,7 +59,7 @@ pub async fn run_pov_receiver<Sender>(
return
},
Ok(Err(jfyi)) => {
tracing::debug!(target: LOG_TARGET, error = ?jfyi, "Error decoding incoming PoV request.");
gum::debug!(target: LOG_TARGET, error = ?jfyi, "Error decoding incoming PoV request.");
},
}
}
@@ -79,7 +79,7 @@ pub async fn run_chunk_receiver<Sender>(
answer_chunk_request_log(&mut sender, msg, &metrics).await;
},
Err(fatal) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
error = ?fatal,
"Shutting down chunk receiver."
@@ -87,7 +87,7 @@ pub async fn run_chunk_receiver<Sender>(
return
},
Ok(Err(jfyi)) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
error = ?jfyi,
"Error decoding incoming chunk request."
@@ -111,7 +111,7 @@ pub async fn answer_pov_request_log<Sender>(
match res {
Ok(result) => metrics.on_served_pov(if result { SUCCEEDED } else { NOT_FOUND }),
Err(err) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err= ?err,
"Serving PoV failed with error"
@@ -136,7 +136,7 @@ where
match res {
Ok(result) => metrics.on_served_chunk(if result { SUCCEEDED } else { NOT_FOUND }),
Err(err) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err= ?err,
"Serving chunk failed with error"
@@ -192,7 +192,7 @@ where
let result = chunk.is_some();
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
hash = ?req.payload.candidate_hash,
index = ?req.payload.index,
@@ -227,7 +227,7 @@ where
.await;
let result = rx.await.map_err(|e| {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?validator_index,
?candidate_hash,
@@ -211,7 +211,7 @@ impl TestState {
);
while remaining_stores > 0 {
tracing::trace!(target: LOG_TARGET, remaining_stores, "Stores left to go");
gum::trace!(target: LOG_TARGET, remaining_stores, "Stores left to go");
let msg = overseer_recv(&mut rx).await;
match msg {
AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(
@@ -255,7 +255,7 @@ impl TestState {
"Only valid chunks should ever get stored."
);
tx.send(Ok(())).expect("Receiver is expected to be alive");
tracing::trace!(target: LOG_TARGET, "'Stored' fetched chunk.");
gum::trace!(target: LOG_TARGET, "'Stored' fetched chunk.");
remaining_stores -= 1;
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, req)) => {
@@ -269,7 +269,7 @@ impl TestState {
.expect("Receiver should be alive.");
},
RuntimeApiRequest::AvailabilityCores(tx) => {
tracing::trace!(target: LOG_TARGET, cores= ?self.cores[&hash], hash = ?hash, "Sending out cores for hash");
gum::trace!(target: LOG_TARGET, cores= ?self.cores[&hash], hash = ?hash, "Sending out cores for hash");
tx.send(Ok(self.cores[&hash].clone()))
.expect("Receiver should still be alive");
},
@@ -299,12 +299,12 @@ async fn overseer_signal(
msg: impl Into<OverseerSignal>,
) {
let msg = msg.into();
tracing::trace!(target: LOG_TARGET, msg = ?msg, "sending message");
gum::trace!(target: LOG_TARGET, msg = ?msg, "sending message");
tx.send(FromOverseer::Signal(msg)).await.expect("Test subsystem no longer live");
}
async fn overseer_recv(rx: &mut mpsc::UnboundedReceiver<AllMessages>) -> AllMessages {
tracing::trace!(target: LOG_TARGET, "waiting for message ...");
gum::trace!(target: LOG_TARGET, "waiting for message ...");
rx.next().await.expect("Test subsystem no longer live")
}
@@ -10,7 +10,7 @@ lru = "0.7.3"
rand = "0.8.5"
fatality = "0.0.6"
thiserror = "1.0.30"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
polkadot-erasure-coding = { path = "../../../erasure-coding" }
polkadot-primitives = { path = "../../../primitives" }
@@ -180,7 +180,7 @@ impl RequestFromBackers {
params: &RecoveryParams,
sender: &mut impl SubsystemSender,
) -> Result<AvailableData, RecoveryError> {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
erasure_root = ?params.erasure_root,
@@ -216,7 +216,7 @@ impl RequestFromBackers {
&params.erasure_root,
&data,
) {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
"Received full data",
@@ -224,7 +224,7 @@ impl RequestFromBackers {
return Ok(data)
} else {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
?validator_index,
@@ -235,7 +235,7 @@ impl RequestFromBackers {
}
},
Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {},
Err(e) => tracing::debug!(
Err(e) => gum::debug!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
?validator_index,
@@ -310,7 +310,7 @@ impl RequestChunksFromValidators {
while self.requesting_chunks.len() < num_requests {
if let Some(validator_index) = self.shuffling.pop_back() {
let validator = params.validator_authority_keys[validator_index.0 as usize].clone();
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?validator,
?validator_index,
@@ -379,7 +379,7 @@ impl RequestChunksFromValidators {
metrics.on_chunk_request_invalid();
self.error_count += 1;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
?validator_index,
@@ -388,7 +388,7 @@ impl RequestChunksFromValidators {
} else {
metrics.on_chunk_request_succeeded();
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
?validator_index,
@@ -400,7 +400,7 @@ impl RequestChunksFromValidators {
metrics.on_chunk_request_invalid();
self.error_count += 1;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
?validator_index,
@@ -415,7 +415,7 @@ impl RequestChunksFromValidators {
Err((validator_index, e)) => {
self.error_count += 1;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash= ?params.candidate_hash,
err = ?e,
@@ -479,7 +479,7 @@ impl RequestChunksFromValidators {
}
},
Err(oneshot::Canceled) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
"Failed to reach the availability store"
@@ -490,7 +490,7 @@ impl RequestChunksFromValidators {
loop {
if self.is_unavailable(&params) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
erasure_root = ?params.erasure_root,
@@ -521,7 +521,7 @@ impl RequestChunksFromValidators {
&params.erasure_root,
&data,
) {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
erasure_root = ?params.erasure_root,
@@ -530,7 +530,7 @@ impl RequestChunksFromValidators {
Ok(data)
} else {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
erasure_root = ?params.erasure_root,
@@ -541,7 +541,7 @@ impl RequestChunksFromValidators {
}
},
Err(err) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
candidate_hash = ?params.candidate_hash,
erasure_root = ?params.erasure_root,
@@ -589,7 +589,7 @@ fn reconstructed_data_matches_root(
let chunks = match obtain_chunks_v1(n_validators, data) {
Ok(chunks) => chunks,
Err(e) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
err = ?e,
"Failed to obtain chunks",
@@ -619,7 +619,7 @@ impl<S: SubsystemSender> RecoveryTask<S> {
Ok(Some(data)) => return Ok(data),
Ok(None) => {},
Err(oneshot::Canceled) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
candidate_hash = ?self.params.candidate_hash,
"Failed to reach the availability store",
@@ -669,7 +669,7 @@ impl Future for RecoveryHandle {
// these are reverse order, so remove is fine.
for index in indices_to_remove {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?self.candidate_hash,
"Receiver for available data dropped.",
@@ -679,7 +679,7 @@ impl Future for RecoveryHandle {
}
if self.awaiting.is_empty() {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?self.candidate_hash,
"All receivers for available data dropped.",
@@ -829,7 +829,7 @@ where
});
if let Err(e) = ctx.spawn("recovery-task", Box::pin(remote)) {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Failed to spawn a recovery task",
@@ -862,7 +862,7 @@ where
state.availability_lru.get(&candidate_hash).cloned().map(|v| v.into_result())
{
if let Err(e) = response_sender.send(result) {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Error responding with an availability recovery result",
@@ -898,7 +898,7 @@ where
)
.await,
None => {
tracing::warn!(target: LOG_TARGET, "SessionInfo is `None` at {:?}", state.live_block);
gum::warn!(target: LOG_TARGET, "SessionInfo is `None` at {:?}", state.live_block);
response_sender
.send(Err(RecoveryError::Unavailable))
.map_err(|_| error::Error::CanceledResponseSender)?;
@@ -978,7 +978,7 @@ impl AvailabilityRecoverySubsystem {
response_sender,
&metrics,
).await {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Error handling a recovery request",
@@ -997,7 +997,7 @@ impl AvailabilityRecoverySubsystem {
let _ = req.send_response(res.into());
}
Err(e) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
err = ?e,
"Failed to query available data.",
@@ -1008,7 +1008,7 @@ impl AvailabilityRecoverySubsystem {
}
}
Err(jfyi) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
error = ?jfyi,
"Decoding incoming request failed"
@@ -134,7 +134,7 @@ async fn overseer_send(
overseer: &mut TestSubsystemContextHandle<AvailabilityRecoveryMessage>,
msg: AvailabilityRecoveryMessage,
) {
tracing::trace!(msg = ?msg, "sending message");
gum::trace!(msg = ?msg, "sending message");
overseer
.send(FromOverseer::Communication { msg })
.timeout(TIMEOUT)
@@ -145,9 +145,9 @@ async fn overseer_send(
async fn overseer_recv(
overseer: &mut TestSubsystemContextHandle<AvailabilityRecoveryMessage>,
) -> AllMessages {
tracing::trace!("waiting for message ...");
gum::trace!("waiting for message ...");
let msg = overseer.recv().timeout(TIMEOUT).await.expect("TIMEOUT is enough to recv.");
tracing::trace!(msg = ?msg, "received message");
gum::trace!(msg = ?msg, "received message");
msg
}
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
@@ -179,7 +179,7 @@ impl BitfieldDistribution {
let message = match ctx.recv().await {
Ok(message) => message,
Err(e) => {
tracing::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting");
gum::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting");
return
},
};
@@ -187,7 +187,7 @@ impl BitfieldDistribution {
FromOverseer::Communication {
msg: BitfieldDistributionMessage::DistributeBitfield(hash, signed_availability),
} => {
tracing::trace!(target: LOG_TARGET, ?hash, "Processing DistributeBitfield");
gum::trace!(target: LOG_TARGET, ?hash, "Processing DistributeBitfield");
handle_bitfield_distribution(
&mut ctx,
&mut state,
@@ -200,7 +200,7 @@ impl BitfieldDistribution {
FromOverseer::Communication {
msg: BitfieldDistributionMessage::NetworkBridgeUpdateV1(event),
} => {
tracing::trace!(target: LOG_TARGET, "Processing NetworkMessage");
gum::trace!(target: LOG_TARGET, "Processing NetworkMessage");
// a network message was received
handle_network_msg(&mut ctx, &mut state, &self.metrics, event).await;
},
@@ -213,7 +213,7 @@ impl BitfieldDistribution {
for activated in activated {
let relay_parent = activated.hash;
tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "activated");
gum::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "activated");
let span = PerLeafSpan::new(activated.span, "bitfield-distribution");
let _span = span.child("query-basics");
@@ -231,17 +231,17 @@ impl BitfieldDistribution {
);
},
Err(e) => {
tracing::warn!(target: LOG_TARGET, err = ?e, "query_basics has failed");
gum::warn!(target: LOG_TARGET, err = ?e, "query_basics has failed");
},
_ => {},
}
}
},
FromOverseer::Signal(OverseerSignal::BlockFinalized(hash, number)) => {
tracing::trace!(target: LOG_TARGET, hash = %hash, number = %number, "block finalized");
gum::trace!(target: LOG_TARGET, hash = %hash, number = %number, "block finalized");
},
FromOverseer::Signal(OverseerSignal::Conclude) => {
tracing::trace!(target: LOG_TARGET, "Conclude");
gum::trace!(target: LOG_TARGET, "Conclude");
return
},
}
@@ -254,7 +254,7 @@ async fn modify_reputation<Context>(ctx: &mut Context, peer: PeerId, rep: Rep)
where
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
{
tracing::trace!(target: LOG_TARGET, ?rep, peer_id = %peer, "reputation change");
gum::trace!(target: LOG_TARGET, ?rep, peer_id = %peer, "reputation change");
ctx.send_message(NetworkBridgeMessage::ReportPeer(peer, rep)).await
}
@@ -278,7 +278,7 @@ async fn handle_bitfield_distribution<Context>(
let job_data: &mut _ = if let Some(ref mut job_data) = job_data {
job_data
} else {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
relay_parent = %relay_parent,
"Not supposed to work on relay parent related data",
@@ -288,7 +288,7 @@ async fn handle_bitfield_distribution<Context>(
};
let validator_set = &job_data.validator_set;
if validator_set.is_empty() {
tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "validator set is empty");
gum::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "validator set is empty");
return
}
@@ -296,11 +296,7 @@ async fn handle_bitfield_distribution<Context>(
let validator = if let Some(validator) = validator_set.get(validator_index) {
validator.clone()
} else {
tracing::trace!(
target: LOG_TARGET,
"Could not find a validator for index {}",
validator_index
);
gum::trace!(target: LOG_TARGET, "Could not find a validator for index {}", validator_index);
return
};
@@ -374,7 +370,7 @@ async fn relay_message<Context>(
drop(_span);
if interested_peers.is_empty() {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
relay_parent = %message.relay_parent,
"no peers are interested in gossip for relay parent",
@@ -400,7 +396,7 @@ async fn process_incoming_peer_message<Context>(
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
{
let protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) = message;
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
peer_id = %origin,
?relay_parent,
@@ -432,7 +428,7 @@ async fn process_incoming_peer_message<Context>(
let validator_set = &job_data.validator_set;
if validator_set.is_empty() {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
relay_parent = %relay_parent,
?origin,
@@ -460,7 +456,7 @@ async fn process_incoming_peer_message<Context>(
if !received_set.contains(&validator) {
received_set.insert(validator.clone());
} else {
tracing::trace!(target: LOG_TARGET, ?validator_index, ?origin, "Duplicate message");
gum::trace!(target: LOG_TARGET, ?validator_index, ?origin, "Duplicate message");
modify_reputation(ctx, origin, COST_PEER_DUPLICATE_MESSAGE).await;
return
};
@@ -469,7 +465,7 @@ async fn process_incoming_peer_message<Context>(
// only relay_message a message of a validator once
if let Some(old_message) = one_per_validator.get(&validator) {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?validator_index,
"already received a message for validator",
@@ -512,12 +508,12 @@ async fn handle_network_msg<Context>(
match bridge_message {
NetworkBridgeEvent::PeerConnected(peerid, role, _) => {
tracing::trace!(target: LOG_TARGET, ?peerid, ?role, "Peer connected");
gum::trace!(target: LOG_TARGET, ?peerid, ?role, "Peer connected");
// insert if none already present
state.peer_views.entry(peerid).or_default();
},
NetworkBridgeEvent::PeerDisconnected(peerid) => {
tracing::trace!(target: LOG_TARGET, ?peerid, "Peer disconnected");
gum::trace!(target: LOG_TARGET, ?peerid, "Peer disconnected");
// get rid of superfluous data
state.peer_views.remove(&peerid);
},
@@ -531,11 +527,11 @@ async fn handle_network_msg<Context>(
}
},
NetworkBridgeEvent::PeerViewChange(peerid, view) => {
tracing::trace!(target: LOG_TARGET, ?peerid, ?view, "Peer view change");
gum::trace!(target: LOG_TARGET, ?peerid, ?view, "Peer view change");
handle_peer_view_change(ctx, state, peerid, view).await;
},
NetworkBridgeEvent::OurViewChange(view) => {
tracing::trace!(target: LOG_TARGET, ?view, "Our view change");
gum::trace!(target: LOG_TARGET, ?view, "Our view change");
handle_our_view_change(state, view);
},
NetworkBridgeEvent::PeerMessage(remote, message) =>
@@ -549,7 +545,7 @@ fn handle_our_view_change(state: &mut ProtocolState, view: OurView) {
for added in state.view.difference(&old_view) {
if !state.per_relay_parent.contains_key(&added) {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
added = %added,
"Our view contains {} but the overseer never told use we should work on this",
@@ -589,7 +585,7 @@ async fn handle_peer_view_change<Context>(
);
if !lucky {
tracing::trace!(target: LOG_TARGET, ?origin, "Peer view change is ignored");
gum::trace!(target: LOG_TARGET, ?origin, "Peer view change is ignored");
return
}
@@ -637,7 +633,7 @@ async fn send_tracked_gossip_message<Context>(
};
let _span = job_data.span.child("gossip");
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?dest,
?validator,
@@ -699,7 +695,7 @@ where
(Ok(v), Ok(s)) =>
Ok(Some((v, SigningContext { parent_hash: relay_parent, session_index: s }))),
(Err(e), _) | (_, Err(e)) => {
tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API");
gum::warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API");
Ok(None)
},
}
+1 -1
View File
@@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
async-trait = "0.1.52"
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
polkadot-primitives = { path = "../../../primitives" }
parity-scale-codec = { version = "3.1.0", default-features = false, features = ["derive"] }
sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
+19 -19
View File
@@ -374,7 +374,7 @@ where
msg = ctx.recv().fuse() => match msg {
Ok(FromOverseer::Signal(OverseerSignal::ActiveLeaves(active_leaves))) => {
let ActiveLeavesUpdate { activated, deactivated } = active_leaves;
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "ActiveLeaves",
has_activated = activated.is_some(),
@@ -413,7 +413,7 @@ where
}
}
Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_hash, number))) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "BlockFinalized"
);
@@ -431,7 +431,7 @@ where
Ok(FromOverseer::Communication { msg }) => match msg {
NetworkBridgeMessage::ReportPeer(peer, rep) => {
if !rep.is_benefit() {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer,
?rep,
@@ -441,7 +441,7 @@ where
network_service.report_peer(peer, rep);
}
NetworkBridgeMessage::DisconnectPeer(peer, peer_set) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "DisconnectPeer",
?peer,
@@ -450,7 +450,7 @@ where
network_service.disconnect_peer(peer, peer_set);
}
NetworkBridgeMessage::SendValidationMessage(peers, msg) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "SendValidationMessages",
num_messages = 1,
@@ -465,7 +465,7 @@ where
);
}
NetworkBridgeMessage::SendValidationMessages(msgs) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "SendValidationMessages",
num_messages = %msgs.len(),
@@ -482,7 +482,7 @@ where
}
}
NetworkBridgeMessage::SendCollationMessage(peers, msg) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "SendCollationMessages",
num_messages = 1,
@@ -497,7 +497,7 @@ where
);
}
NetworkBridgeMessage::SendCollationMessages(msgs) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "SendCollationMessages",
num_messages = %msgs.len(),
@@ -514,7 +514,7 @@ where
}
}
NetworkBridgeMessage::SendRequests(reqs, if_disconnected) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "SendRequests",
num_requests = %reqs.len(),
@@ -531,7 +531,7 @@ where
peer_set,
failed,
} => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "ConnectToValidators",
peer_set = ?peer_set,
@@ -556,7 +556,7 @@ where
validator_addrs,
peer_set,
} => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "ConnectToPeers",
peer_set = ?peer_set,
@@ -576,7 +576,7 @@ where
NetworkBridgeMessage::NewGossipTopology {
our_neighbors,
} => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
action = "NewGossipTopology",
neighbors = our_neighbors.len(),
@@ -632,7 +632,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
Some(peer_set) => peer_set,
};
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
action = "PeerConnected",
peer_set = ?peer_set,
@@ -716,7 +716,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
Some(peer_set) => peer_set,
};
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
action = "PeerDisconnected",
peer_set = ?peer_set,
@@ -766,7 +766,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
let v_messages = match v_messages {
Err(_) => {
tracing::debug!(target: LOG_TARGET, action = "ReportPeer");
gum::debug!(target: LOG_TARGET, action = "ReportPeer");
network_service.report_peer(remote, MALFORMED_MESSAGE_COST);
continue
@@ -784,7 +784,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
match c_messages {
Err(_) => {
tracing::debug!(target: LOG_TARGET, action = "ReportPeer");
gum::debug!(target: LOG_TARGET, action = "ReportPeer");
network_service.report_peer(remote, MALFORMED_MESSAGE_COST);
continue
@@ -793,7 +793,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
if v_messages.is_empty() && c_messages.is_empty() {
continue
} else {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
action = "PeerMessages",
peer = ?remote,
@@ -896,7 +896,7 @@ where
{
Ok(()) => Ok(()),
Err(UnexpectedAbort::SubsystemError(err)) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?err,
"Shutting down Network Bridge due to error"
@@ -908,7 +908,7 @@ where
)))
},
Err(UnexpectedAbort::EventStreamConcluded) => {
tracing::info!(
gum::info!(
target: LOG_TARGET,
"Shutting down Network Bridge: underlying request stream concluded"
);
+3 -5
View File
@@ -176,14 +176,12 @@ impl Network for Arc<NetworkService<Block, Hash>> {
let peer_id = match peer_id {
None => {
tracing::debug!(target: LOG_TARGET, "Discovering authority failed");
gum::debug!(target: LOG_TARGET, "Discovering authority failed");
match pending_response
.send(Err(RequestFailure::Network(OutboundFailure::DialFailure)))
{
Err(_) => tracing::debug!(
target: LOG_TARGET,
"Sending failed request response failed."
),
Err(_) =>
gum::debug!(target: LOG_TARGET, "Sending failed request response failed."),
Ok(_) => {},
}
return
@@ -66,7 +66,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
let removed = peers_to_remove.len();
state.previously_requested = new_peer_ids;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer_set,
?num_peers,
@@ -79,7 +79,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
.set_reserved_peers(peer_set.into_protocol_name(), newly_requested)
.await
{
tracing::warn!(target: LOG_TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress");
gum::warn!(target: LOG_TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress");
}
// the addresses are known to be valid
let _ = network_service
@@ -116,7 +116,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
newly_requested.extend(addresses);
} else {
failed_to_resolve += 1;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"Authority Discovery couldn't resolve {:?}",
authority
@@ -124,7 +124,7 @@ impl<N: Network, AD: AuthorityDiscovery> Service<N, AD> {
}
}
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer_set,
?requested,
@@ -8,7 +8,7 @@ edition = "2021"
always-assert = "0.1.2"
futures = "0.3.21"
futures-timer = "3"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -345,7 +345,7 @@ where
// This collation is not in the active-leaves set.
if !state.view.contains(&relay_parent) {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?relay_parent,
"distribute collation message parent is outside of our view",
@@ -364,7 +364,7 @@ where
let (our_core, num_cores) = match determine_core(ctx, id, relay_parent).await? {
Some(core) => core,
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
para_id = %id,
?relay_parent,
@@ -380,7 +380,7 @@ where
determine_our_validators(ctx, runtime, our_core, num_cores, relay_parent).await?;
if current_validators.validators.is_empty() {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
core = ?our_core,
"there are no validators assigned to core",
@@ -389,7 +389,7 @@ where
return Ok(())
}
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
para_id = %id,
relay_parent = %relay_parent,
@@ -472,7 +472,7 @@ where
.get_session_info_by_index(ctx.sender(), relay_parent, session_index)
.await?
.session_info;
tracing::debug!(target: LOG_TARGET, ?session_index, "Received session info");
gum::debug!(target: LOG_TARGET, ?session_index, "Received session info");
let groups = &info.validator_groups;
let rotation_info = get_group_rotation_info(ctx, relay_parent).await?;
@@ -554,7 +554,7 @@ async fn advertise_collation<Context>(
match (state.collations.get_mut(&relay_parent), should_advertise) {
(None, _) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?relay_parent,
peer_id = %peer,
@@ -563,7 +563,7 @@ async fn advertise_collation<Context>(
return
},
(_, false) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
peer_id = %peer,
@@ -572,7 +572,7 @@ async fn advertise_collation<Context>(
return
},
(Some(collation), true) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
peer_id = %peer,
@@ -626,7 +626,7 @@ where
Some(id) if receipt.descriptor.para_id != id => {
// If the ParaId of a collation requested to be distributed does not match
// the one we expect, we ignore the message.
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
para_id = %receipt.descriptor.para_id,
collating_on = %id,
@@ -638,7 +638,7 @@ where
.await?;
},
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
para_id = %receipt.descriptor.para_id,
"DistributeCollation message while not collating on any",
@@ -647,14 +647,14 @@ where
}
},
ReportCollator(_) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"ReportCollator message is not expected on the collator side of the protocol",
);
},
NetworkBridgeUpdateV1(event) => {
if let Err(e) = handle_network_msg(ctx, runtime, state, event).await {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Failed to handle incoming network message",
@@ -686,14 +686,14 @@ async fn send_collation(
};
if let Err(_) = request.send_outgoing_response(response) {
tracing::warn!(target: LOG_TARGET, "Sending collation response failed");
gum::warn!(target: LOG_TARGET, "Sending collation response failed");
}
state.active_collation_fetches.push(
async move {
let r = rx.timeout(MAX_UNSHARED_UPLOAD_TIME).await;
if r.is_none() {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
?peer_id,
@@ -724,7 +724,7 @@ where
match msg {
Declare(_, _, _) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?origin,
"Declare message is not expected on the collator side of the protocol",
@@ -735,7 +735,7 @@ where
.await;
},
AdvertiseCollation(_) => {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?origin,
"AdvertiseCollation message is not expected on the collator side of the protocol",
@@ -753,7 +753,7 @@ where
},
CollationSeconded(relay_parent, statement) => {
if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
?statement,
?origin,
@@ -769,7 +769,7 @@ where
state.collation_result_senders.remove(&statement.payload().candidate_hash());
if let Some(sender) = removed {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?statement,
?origin,
@@ -806,7 +806,7 @@ where
collation.status.advance_to_requested();
(collation.receipt.clone(), collation.pov.clone())
} else {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
relay_parent = %req.payload.relay_parent,
"received a `RequestCollation` for a relay parent we don't have collation stored.",
@@ -823,7 +823,7 @@ where
state.waiting_collation_fetches.entry(req.payload.relay_parent).or_default();
if !waiting.waiting_peers.insert(req.peer) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
"Dropping incoming request as peer has a request in flight already."
);
@@ -840,7 +840,7 @@ where
}
},
Some(our_para_id) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
for_para_id = %req.payload.para_id,
our_para_id = %our_para_id,
@@ -848,7 +848,7 @@ where
);
},
None => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
for_para_id = %req.payload.para_id,
"received a `RequestCollation` while not collating on any para",
@@ -896,9 +896,9 @@ where
PeerConnected(peer_id, observed_role, maybe_authority) => {
// If it is possible that a disconnected validator would attempt a reconnect
// it should be handled here.
tracing::trace!(target: LOG_TARGET, ?peer_id, ?observed_role, "Peer connected");
gum::trace!(target: LOG_TARGET, ?peer_id, ?observed_role, "Peer connected");
if let Some(authority_ids) = maybe_authority {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?authority_ids,
?peer_id,
@@ -910,16 +910,16 @@ where
}
},
PeerViewChange(peer_id, view) => {
tracing::trace!(target: LOG_TARGET, ?peer_id, ?view, "Peer view change");
gum::trace!(target: LOG_TARGET, ?peer_id, ?view, "Peer view change");
handle_peer_view_change(ctx, state, peer_id, view).await;
},
PeerDisconnected(peer_id) => {
tracing::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected");
gum::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected");
state.peer_views.remove(&peer_id);
state.peer_ids.remove(&peer_id);
},
OurViewChange(view) => {
tracing::trace!(target: LOG_TARGET, ?view, "Own view change");
gum::trace!(target: LOG_TARGET, ?view, "Own view change");
handle_our_view_change(state, view).await?;
},
PeerMessage(remote, msg) => {
@@ -936,25 +936,25 @@ where
/// Handles our view changes.
async fn handle_our_view_change(state: &mut State, view: OurView) -> Result<()> {
for removed in state.view.difference(&view) {
tracing::debug!(target: LOG_TARGET, relay_parent = ?removed, "Removing relay parent because our view changed.");
gum::debug!(target: LOG_TARGET, relay_parent = ?removed, "Removing relay parent because our view changed.");
if let Some(collation) = state.collations.remove(removed) {
state.collation_result_senders.remove(&collation.receipt.hash());
match collation.status {
CollationStatus::Created => tracing::warn!(
CollationStatus::Created => gum::warn!(
target: LOG_TARGET,
candidate_hash = ?collation.receipt.hash(),
pov_hash = ?collation.pov.hash(),
"Collation wasn't advertised to any validator.",
),
CollationStatus::Advertised => tracing::debug!(
CollationStatus::Advertised => gum::debug!(
target: LOG_TARGET,
candidate_hash = ?collation.receipt.hash(),
pov_hash = ?collation.pov.hash(),
"Collation was advertised but not requested by any validator.",
),
CollationStatus::Requested => tracing::debug!(
CollationStatus::Requested => gum::debug!(
target: LOG_TARGET,
candidate_hash = ?collation.receipt.hash(),
pov_hash = ?collation.pov.hash(),
@@ -1035,7 +1035,7 @@ where
}
Err(error) => {
let jfyi = error.split().map_err(incoming::Error::from)?;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
error = ?jfyi,
"Decoding incoming request failed"
@@ -226,7 +226,7 @@ fn test_harness<T: Future<Output = TestHarness>>(
const TIMEOUT: Duration = Duration::from_millis(100);
async fn overseer_send(overseer: &mut VirtualOverseer, msg: CollatorProtocolMessage) {
tracing::trace!(?msg, "sending message");
gum::trace!(?msg, "sending message");
overseer
.send(FromOverseer::Communication { msg })
.timeout(TIMEOUT)
@@ -239,7 +239,7 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
.await
.expect(&format!("{:?} is more than enough to receive messages", TIMEOUT));
tracing::trace!(?msg, "received message");
gum::trace!(?msg, "received message");
msg
}
@@ -248,7 +248,7 @@ async fn overseer_recv_with_timeout(
overseer: &mut VirtualOverseer,
timeout: Duration,
) -> Option<AllMessages> {
tracing::trace!("waiting for message...");
gum::trace!("waiting for message...");
overseer.recv().timeout(timeout).await
}
@@ -56,7 +56,7 @@ pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(
match result.into_nested()? {
Ok(()) => Ok(()),
Err(jfyi) => {
tracing::warn!(target: LOG_TARGET, error = ?jfyi, ctx);
gum::warn!(target: LOG_TARGET, error = ?jfyi, ctx);
Ok(())
},
}
@@ -133,7 +133,7 @@ async fn modify_reputation<Context>(ctx: &mut Context, peer: PeerId, rep: Rep)
where
Context: SubsystemContext,
{
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
rep = ?rep,
peer_id = %peer,
@@ -371,7 +371,7 @@ impl ActiveParas {
let (validators, groups, rotation_info, cores) = match (mv, mg, mc) {
(Some(v), Some((g, r)), Some(c)) => (v, g, r, c),
_ => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
"Failed to query runtime API for relay-parent",
@@ -393,7 +393,7 @@ impl ActiveParas {
cores.get(core_now.0 as usize).and_then(|c| c.para_id())
},
None => {
tracing::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator");
gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator");
continue
},
@@ -411,7 +411,7 @@ impl ActiveParas {
let entry = self.current_assignments.entry(para_now).or_default();
*entry += 1;
if *entry == 1 {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
para_id = ?para_now,
@@ -435,7 +435,7 @@ impl ActiveParas {
*occupied.get_mut() -= 1;
if *occupied.get() == 0 {
occupied.remove_entry();
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
para_id = ?cur,
"Unassigned from a parachain",
@@ -533,7 +533,7 @@ impl CollationsPerRelayParent {
// If finished one does not match waiting_collation, then we already dequeued another fetch
// to replace it.
if self.waiting_collation != finished_one {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
waiting_collation = ?self.waiting_collation,
?finished_one,
@@ -728,7 +728,7 @@ async fn request_collation<Context>(
Context: SubsystemContext<Message = CollatorProtocolMessage>,
{
if !state.view.contains(&relay_parent) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
peer_id = %peer_id,
para_id = %para_id,
@@ -739,7 +739,7 @@ async fn request_collation<Context>(
}
let pending_collation = PendingCollation::new(relay_parent, &para_id, &peer_id);
if state.requested_collations.contains_key(&pending_collation) {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
peer_id = %pending_collation.peer_id,
%pending_collation.para_id,
@@ -768,7 +768,7 @@ async fn request_collation<Context>(
.requested_collations
.insert(PendingCollation::new(relay_parent, &para_id, &peer_id), per_request);
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
peer_id = %peer_id,
%para_id,
@@ -821,7 +821,7 @@ async fn process_incoming_peer_message<Context>(
}
if state.active_paras.is_current(&para_id) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
peer_id = ?origin,
?collator_id,
@@ -831,7 +831,7 @@ async fn process_incoming_peer_message<Context>(
peer_data.set_collating(collator_id, para_id);
} else {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
peer_id = ?origin,
?collator_id,
@@ -840,7 +840,7 @@ async fn process_incoming_peer_message<Context>(
);
modify_reputation(ctx, origin.clone(), COST_UNNEEDED_COLLATOR).await;
tracing::trace!(target: LOG_TARGET, "Disconnecting unneeded collator");
gum::trace!(target: LOG_TARGET, "Disconnecting unneeded collator");
disconnect_peer(ctx, origin).await;
}
},
@@ -850,7 +850,7 @@ async fn process_incoming_peer_message<Context>(
.get(&relay_parent)
.map(|s| s.child("advertise-collation"));
if !state.view.contains(&relay_parent) {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
peer_id = ?origin,
?relay_parent,
@@ -871,7 +871,7 @@ async fn process_incoming_peer_message<Context>(
match peer_data.insert_advertisement(relay_parent, &state.view) {
Ok((id, para_id)) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
peer_id = ?origin,
%para_id,
@@ -897,7 +897,7 @@ async fn process_incoming_peer_message<Context>(
}
},
Err(error) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
peer_id = ?origin,
?relay_parent,
@@ -910,7 +910,7 @@ async fn process_incoming_peer_message<Context>(
}
},
CollationSeconded(_, _) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
peer_id = ?origin,
"Unexpected `CollationSeconded` message, decreasing reputation",
@@ -976,7 +976,7 @@ where
// declare.
if let Some(para_id) = peer_data.collating_para() {
if !state.active_paras.is_current(&para_id) {
tracing::trace!(target: LOG_TARGET, "Disconnecting peer on view change");
gum::trace!(target: LOG_TARGET, "Disconnecting peer on view change");
disconnect_peer(ctx, peer_id.clone()).await;
}
}
@@ -1040,14 +1040,14 @@ async fn process_msg<Context>(
match msg {
CollateOn(id) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
para_id = %id,
"CollateOn message is not expected on the validator side of the protocol",
);
},
DistributeCollation(_, _, _) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
"DistributeCollation message is not expected on the validator side of the protocol",
);
@@ -1057,7 +1057,7 @@ async fn process_msg<Context>(
},
NetworkBridgeUpdateV1(event) => {
if let Err(e) = handle_network_msg(ctx, state, keystore, event).await {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
err = ?e,
"Failed to handle incoming network message",
@@ -1075,7 +1075,7 @@ async fn process_msg<Context>(
collations.status = CollationStatus::Seconded;
}
} else {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
relay_parent = ?parent,
"Collation has been seconded, but the relay parent is deactivated",
@@ -1089,7 +1089,7 @@ async fn process_msg<Context>(
Some(candidate_receipt.commitments_hash) =>
entry.remove().0,
Entry::Occupied(_) => {
tracing::error!(
gum::error!(
target: LOG_TARGET,
relay_parent = ?parent,
candidate = ?candidate_receipt.hash(),
@@ -1150,7 +1150,7 @@ where
res = ctx.recv().fuse() => {
match res {
Ok(FromOverseer::Communication { msg }) => {
tracing::trace!(target: LOG_TARGET, msg = ?msg, "received a message");
gum::trace!(target: LOG_TARGET, msg = ?msg, "received a message");
process_msg(
&mut ctx,
&keystore,
@@ -1170,7 +1170,7 @@ where
}
res = state.collation_fetch_timeouts.select_next_some() => {
let (collator_id, relay_parent) = res;
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
?collator_id,
@@ -1233,7 +1233,7 @@ async fn dequeue_next_collation_and_fetch(
.get_mut(&relay_parent)
.and_then(|c| c.get_next_collation_to_fetch(Some(previous_fetch)))
{
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
?id,
@@ -1260,7 +1260,7 @@ async fn handle_collation_fetched_result<Context>(
let (candidate_receipt, pov) = match res {
Ok(res) => res,
Err(e) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
relay_parent = ?collation_event.1.relay_parent,
para_id = ?collation_event.1.para_id,
@@ -1277,7 +1277,7 @@ async fn handle_collation_fetched_result<Context>(
if let Some(collations) = state.collations_per_relay_parent.get_mut(&relay_parent) {
if let CollationStatus::Seconded = collations.status {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?relay_parent,
"Already seconded - no longer interested in collation fetch result."
@@ -1298,7 +1298,7 @@ async fn handle_collation_fetched_result<Context>(
entry.insert(collation_event);
} else {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?relay_parent,
candidate = ?candidate_receipt.hash(),
@@ -1320,7 +1320,7 @@ async fn disconnect_inactive_peers<Context>(
{
for (peer, peer_data) in peers {
if peer_data.is_inactive(&eviction_policy) {
tracing::trace!(target: LOG_TARGET, "Disconnecting inactive peer");
gum::trace!(target: LOG_TARGET, "Disconnecting inactive peer");
disconnect_peer(ctx, peer.clone()).await;
}
}
@@ -1353,7 +1353,7 @@ async fn poll_collation_response(
per_req: &mut PerRequest,
) -> CollationFetchResult {
if never!(per_req.from_collator.is_terminated()) {
tracing::error!(
gum::error!(
target: LOG_TARGET,
"We remove pending responses once received, this should not happen."
);
@@ -1371,7 +1371,7 @@ async fn poll_collation_response(
let result = match response {
Err(RequestError::InvalidResponse(err)) => {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
hash = ?pending_collation.relay_parent,
para_id = ?pending_collation.para_id,
@@ -1382,7 +1382,7 @@ async fn poll_collation_response(
CollationFetchResult::Error(Some(COST_CORRUPTED_MESSAGE))
},
Err(err) if err.is_timed_out() => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
hash = ?pending_collation.relay_parent,
para_id = ?pending_collation.para_id,
@@ -1394,7 +1394,7 @@ async fn poll_collation_response(
CollationFetchResult::Error(None)
},
Err(RequestError::NetworkError(err)) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
hash = ?pending_collation.relay_parent,
para_id = ?pending_collation.para_id,
@@ -1409,7 +1409,7 @@ async fn poll_collation_response(
CollationFetchResult::Error(Some(COST_NETWORK_ERROR))
},
Err(RequestError::Canceled(err)) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
hash = ?pending_collation.relay_parent,
para_id = ?pending_collation.para_id,
@@ -1422,7 +1422,7 @@ async fn poll_collation_response(
Ok(CollationFetchingResponse::Collation(receipt, _))
if receipt.descriptor().para_id != pending_collation.para_id =>
{
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
expected_para_id = ?pending_collation.para_id,
got_para_id = ?receipt.descriptor().para_id,
@@ -1433,7 +1433,7 @@ async fn poll_collation_response(
CollationFetchResult::Error(Some(COST_WRONG_PARA))
},
Ok(CollationFetchingResponse::Collation(receipt, pov)) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
para_id = %pending_collation.para_id,
hash = ?pending_collation.relay_parent,
@@ -1447,7 +1447,7 @@ async fn poll_collation_response(
let result = tx.send((receipt, pov));
if let Err(_) = result {
tracing::warn!(
gum::warn!(
target: LOG_TARGET,
hash = ?pending_collation.relay_parent,
para_id = ?pending_collation.para_id,
@@ -166,7 +166,7 @@ fn test_harness<T: Future<Output = VirtualOverseer>>(test: impl FnOnce(TestHarne
const TIMEOUT: Duration = Duration::from_millis(200);
async fn overseer_send(overseer: &mut VirtualOverseer, msg: CollatorProtocolMessage) {
tracing::trace!("Sending message:\n{:?}", &msg);
gum::trace!("Sending message:\n{:?}", &msg);
overseer
.send(FromOverseer::Communication { msg })
.timeout(TIMEOUT)
@@ -179,7 +179,7 @@ async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
.await
.expect(&format!("{:?} is enough to receive messages.", TIMEOUT));
tracing::trace!("Received message:\n{:?}", &msg);
gum::trace!("Received message:\n{:?}", &msg);
msg
}
@@ -188,7 +188,7 @@ async fn overseer_recv_with_timeout(
overseer: &mut VirtualOverseer,
timeout: Duration,
) -> Option<AllMessages> {
tracing::trace!("Waiting for message...");
gum::trace!("Waiting for message...");
overseer.recv().timeout(timeout).await
}
@@ -350,7 +350,7 @@ fn act_on_advertisement() {
let TestHarness { mut virtual_overseer } = test_harness;
let pair = CollatorPair::generate().0;
tracing::trace!("activating");
gum::trace!("activating");
overseer_send(
&mut virtual_overseer,
@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
futures = "0.3.21"
tracing = "0.1.32"
gum = { package = "tracing-gum", path = "../../gum" }
derive_more = "0.99.17"
parity-scale-codec = { version = "3.1.0", features = ["std"] }
polkadot-primitives = { path = "../../../primitives" }
@@ -64,7 +64,7 @@ pub type FatalResult<T> = std::result::Result<T, FatalError>;
pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(), FatalError> {
match result.into_nested()? {
Err(jfyi) => {
tracing::warn!(target: LOG_TARGET, error = ?jfyi, ctx);
gum::warn!(target: LOG_TARGET, error = ?jfyi, ctx);
Ok(())
},
Ok(()) => Ok(()),
@@ -62,11 +62,11 @@ pub type JfyiErrorResult<T> = std::result::Result<T, JfyiError>;
pub fn log_error(result: Result<()>) -> std::result::Result<(), FatalError> {
match result.into_nested()? {
Err(error @ JfyiError::ImportCanceled(_)) => {
tracing::debug!(target: LOG_TARGET, error = ?error);
gum::debug!(target: LOG_TARGET, error = ?error);
Ok(())
},
Err(JfyiError::NotAValidator(peer)) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
?peer,
"Dropping message from peer (unknown authority id)"
@@ -74,7 +74,7 @@ pub fn log_error(result: Result<()>) -> std::result::Result<(), FatalError> {
Ok(())
},
Err(error) => {
tracing::warn!(target: LOG_TARGET, error = ?error);
gum::warn!(target: LOG_TARGET, error = ?error);
Ok(())
},
Ok(()) => Ok(()),
@@ -168,7 +168,7 @@ where
match log_error(self.run_inner().await) {
Ok(()) => {},
Err(fatal) => {
tracing::debug!(
gum::debug!(
target: LOG_TARGET,
error = ?fatal,
"Shutting down"
@@ -212,7 +212,7 @@ where
// Immediately drop requests from peers that already have requests in flight or have
// been banned recently (flood protection):
if self.pending_imports.peer_is_pending(&peer) || self.banned_peers.contains(&peer) {
tracing::trace!(
gum::trace!(
target: LOG_TARGET,
?peer,
"Dropping message from peer (banned/pending import)"

Some files were not shown because too many files have changed in this diff Show More