mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-07 18:58:01 +00:00
Add node name to the log lines (#7328)
* Initial commit Forked at:601e2fa139Parent branch: origin/master * WIP Forked at:601e2fa139Parent branch: origin/master * WIP Forked at:601e2fa139Parent branch: origin/master * WIP Forked at:601e2fa139Parent branch: origin/master * WIP Forked at:601e2fa139Parent branch: origin/master * WIP Forked at:601e2fa139Parent branch: origin/master * CLEANUP Forked at:601e2fa139Parent branch: origin/master * Add notes to original source code * CLEANUP Forked at:601e2fa139Parent branch: origin/master * CLEANUP Forked at:601e2fa139Parent branch: origin/master * WIP Forked at:601e2fa139Parent branch: origin/master * WIP Forked at:601e2fa139Parent branch: origin/master * WIP Forked at:601e2fa139Parent branch: origin/master * CLEANUP Forked at:601e2fa139Parent branch: origin/master * WIP Forked at:601e2fa139Parent branch: origin/master * Some doc * Test with trybuild * Revert "Test with trybuild" (issue with trybuild atm) This reverts commit 9055ec2206808ba3ddce6e3d87eb358907fa5e42. https://github.com/dtolnay/trybuild/issues/53 * Apply suggestions * Rename derive to proc-macro * Remove "prefix" feature from informant * Blocking task should use SpawnHandle::spawn_blocking * Improve doc as suggested * Fixes Forked at:601e2fa139Parent branch: origin/master * Apply suggestion * Update client/cli/proc-macro/src/lib.rs Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com> * More suggestions * CLEANUP Forked at:601e2fa139Parent branch: origin/master * Improve error message * CLEANUP Forked at:601e2fa139Parent branch: origin/master * Fix async issue * CLEANUP Forked at:601e2fa139Parent branch: origin/master * CLEANUP Forked at:601e2fa139Parent branch: origin/master * Add test * fix doc test * Update client/cli/src/logging.rs Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com> * Update client/basic-authorship/src/basic_authorship.rs Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com> * Update client/basic-authorship/src/basic_authorship.rs Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com> * Apply suggestions * Suggestions * Clarify doc * WIP Forked at:601e2fa139Parent branch: origin/master Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
This commit is contained in:
Generated
+22
@@ -6433,6 +6433,7 @@ dependencies = [
|
||||
"rand 0.7.3",
|
||||
"regex",
|
||||
"rpassword",
|
||||
"sc-cli-proc-macro",
|
||||
"sc-client-api",
|
||||
"sc-consensus-babe",
|
||||
"sc-consensus-epochs",
|
||||
@@ -6466,6 +6467,16 @@ dependencies = [
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sc-cli-proc-macro"
|
||||
version = "2.0.0"
|
||||
dependencies = [
|
||||
"proc-macro-crate",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sc-client-api"
|
||||
version = "2.0.0"
|
||||
@@ -7313,6 +7324,7 @@ dependencies = [
|
||||
"tempfile",
|
||||
"tokio 0.2.22",
|
||||
"tracing",
|
||||
"tracing-futures",
|
||||
"wasm-timer",
|
||||
]
|
||||
|
||||
@@ -9543,6 +9555,16 @@ dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-futures"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c"
|
||||
dependencies = [
|
||||
"pin-project",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-log"
|
||||
version = "0.1.1"
|
||||
|
||||
@@ -21,6 +21,7 @@ members = [
|
||||
"client/chain-spec",
|
||||
"client/chain-spec/derive",
|
||||
"client/cli",
|
||||
"client/cli/proc-macro",
|
||||
"client/consensus/aura",
|
||||
"client/consensus/babe",
|
||||
"client/consensus/babe/rpc",
|
||||
|
||||
@@ -146,6 +146,7 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||
|
||||
if role.is_authority() {
|
||||
let proposer = sc_basic_authorship::ProposerFactory::new(
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
transaction_pool,
|
||||
prometheus_registry.as_ref(),
|
||||
|
||||
@@ -147,6 +147,7 @@ impl core::Benchmark for ConstructionBenchmark {
|
||||
}
|
||||
|
||||
let mut proposer_factory = sc_basic_authorship::ProposerFactory::new(
|
||||
context.spawn_handle.clone(),
|
||||
context.client.clone(),
|
||||
self.transactions.clone().into(),
|
||||
None,
|
||||
@@ -293,4 +294,4 @@ impl sp_transaction_pool::TransactionPool for Transactions {
|
||||
fn ready_transaction(&self, _hash: &TxHash<Self>) -> Option<Arc<Self::InPoolTransaction>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,6 +231,7 @@ pub fn new_full_base(
|
||||
|
||||
if let sc_service::config::Role::Authority { .. } = &role {
|
||||
let proposer = sc_basic_authorship::ProposerFactory::new(
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
transaction_pool.clone(),
|
||||
prometheus_registry.as_ref(),
|
||||
@@ -557,6 +558,7 @@ mod tests {
|
||||
);
|
||||
|
||||
let mut proposer_factory = sc_basic_authorship::ProposerFactory::new(
|
||||
service.spawn_handle(),
|
||||
service.client(),
|
||||
service.transaction_pool(),
|
||||
None,
|
||||
|
||||
@@ -373,7 +373,12 @@ impl BenchDb {
|
||||
"Created seed db at {}",
|
||||
dir.path().to_string_lossy(),
|
||||
);
|
||||
let (_client, _backend) = Self::bench_client(database_type, dir.path(), Profile::Native, &keyring);
|
||||
let (_client, _backend, _task_executor) = Self::bench_client(
|
||||
database_type,
|
||||
dir.path(),
|
||||
Profile::Native,
|
||||
&keyring,
|
||||
);
|
||||
let directory_guard = Guard(dir);
|
||||
|
||||
BenchDb { keyring, directory_guard, database_type }
|
||||
@@ -401,13 +406,14 @@ impl BenchDb {
|
||||
dir: &std::path::Path,
|
||||
profile: Profile,
|
||||
keyring: &BenchKeyring,
|
||||
) -> (Client, std::sync::Arc<Backend>) {
|
||||
) -> (Client, std::sync::Arc<Backend>, TaskExecutor) {
|
||||
let db_config = sc_client_db::DatabaseSettings {
|
||||
state_cache_size: 16*1024*1024,
|
||||
state_cache_child_ratio: Some((0, 100)),
|
||||
pruning: PruningMode::ArchiveAll,
|
||||
source: database_type.into_settings(dir.into()),
|
||||
};
|
||||
let task_executor = TaskExecutor::new();
|
||||
|
||||
let (client, backend) = sc_service::new_client(
|
||||
db_config,
|
||||
@@ -416,12 +422,12 @@ impl BenchDb {
|
||||
None,
|
||||
None,
|
||||
ExecutionExtensions::new(profile.into_execution_strategies(), None),
|
||||
Box::new(TaskExecutor::new()),
|
||||
Box::new(task_executor.clone()),
|
||||
None,
|
||||
Default::default(),
|
||||
).expect("Should not fail");
|
||||
|
||||
(client, backend)
|
||||
(client, backend, task_executor)
|
||||
}
|
||||
|
||||
/// Generate list of required inherents.
|
||||
@@ -450,7 +456,7 @@ impl BenchDb {
|
||||
|
||||
/// Get cliet for this database operations.
|
||||
pub fn client(&mut self) -> Client {
|
||||
let (client, _backend) = Self::bench_client(
|
||||
let (client, _backend, _task_executor) = Self::bench_client(
|
||||
self.database_type,
|
||||
self.directory_guard.path(),
|
||||
Profile::Wasm,
|
||||
@@ -504,7 +510,7 @@ impl BenchDb {
|
||||
/// Clone this database and create context for testing/benchmarking.
|
||||
pub fn create_context(&self, profile: Profile) -> BenchContext {
|
||||
let BenchDb { directory_guard, keyring, database_type } = self.clone();
|
||||
let (client, backend) = Self::bench_client(
|
||||
let (client, backend, task_executor) = Self::bench_client(
|
||||
database_type,
|
||||
directory_guard.path(),
|
||||
profile,
|
||||
@@ -515,6 +521,7 @@ impl BenchDb {
|
||||
client: Arc::new(client),
|
||||
db_guard: directory_guard,
|
||||
backend,
|
||||
spawn_handle: Box::new(task_executor),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -649,6 +656,8 @@ pub struct BenchContext {
|
||||
pub client: Arc<Client>,
|
||||
/// Node backend.
|
||||
pub backend: Arc<Backend>,
|
||||
/// Spawn handle.
|
||||
pub spawn_handle: Box<dyn SpawnNamed>,
|
||||
|
||||
db_guard: Guard,
|
||||
}
|
||||
|
||||
@@ -20,10 +20,11 @@
|
||||
|
||||
// FIXME #1021 move this into sp-consensus
|
||||
|
||||
use std::{time, sync::Arc};
|
||||
use std::{pin::Pin, time, sync::Arc};
|
||||
use sc_client_api::backend;
|
||||
use codec::Decode;
|
||||
use sp_consensus::{evaluation, Proposal, RecordProof};
|
||||
use sp_core::traits::SpawnNamed;
|
||||
use sp_inherents::InherentData;
|
||||
use log::{error, info, debug, trace, warn};
|
||||
use sp_runtime::{
|
||||
@@ -34,7 +35,7 @@ use sp_transaction_pool::{TransactionPool, InPoolTransaction};
|
||||
use sc_telemetry::{telemetry, CONSENSUS_INFO};
|
||||
use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider};
|
||||
use sp_api::{ProvideRuntimeApi, ApiExt};
|
||||
use futures::{executor, future, future::Either};
|
||||
use futures::{future, future::{Future, FutureExt}, channel::oneshot, select};
|
||||
use sp_blockchain::{HeaderBackend, ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
@@ -43,6 +44,7 @@ use sc_proposer_metrics::MetricsLink as PrometheusMetrics;
|
||||
|
||||
/// Proposer factory.
|
||||
pub struct ProposerFactory<A, B, C> {
|
||||
spawn_handle: Box<dyn SpawnNamed>,
|
||||
/// The client instance.
|
||||
client: Arc<C>,
|
||||
/// The transaction pool.
|
||||
@@ -55,11 +57,13 @@ pub struct ProposerFactory<A, B, C> {
|
||||
|
||||
impl<A, B, C> ProposerFactory<A, B, C> {
|
||||
pub fn new(
|
||||
spawn_handle: impl SpawnNamed + 'static,
|
||||
client: Arc<C>,
|
||||
transaction_pool: Arc<A>,
|
||||
prometheus: Option<&PrometheusRegistry>,
|
||||
) -> Self {
|
||||
ProposerFactory {
|
||||
spawn_handle: Box::new(spawn_handle),
|
||||
client,
|
||||
transaction_pool,
|
||||
metrics: PrometheusMetrics::new(prometheus),
|
||||
@@ -90,6 +94,7 @@ impl<B, Block, C, A> ProposerFactory<A, B, C>
|
||||
info!("🙌 Starting consensus session on top of parent {:?}", parent_hash);
|
||||
|
||||
let proposer = Proposer {
|
||||
spawn_handle: self.spawn_handle.clone(),
|
||||
client: self.client.clone(),
|
||||
parent_hash,
|
||||
parent_id: id,
|
||||
@@ -129,6 +134,7 @@ impl<A, B, Block, C> sp_consensus::Environment<Block> for
|
||||
|
||||
/// The proposer logic.
|
||||
pub struct Proposer<B, Block: BlockT, C, A: TransactionPool> {
|
||||
spawn_handle: Box<dyn SpawnNamed>,
|
||||
client: Arc<C>,
|
||||
parent_hash: <Block as BlockT>::Hash,
|
||||
parent_id: BlockId<Block>,
|
||||
@@ -151,9 +157,9 @@ impl<A, B, Block, C> sp_consensus::Proposer<Block> for
|
||||
+ BlockBuilderApi<Block, Error = sp_blockchain::Error>,
|
||||
{
|
||||
type Transaction = backend::TransactionFor<B, Block>;
|
||||
type Proposal = tokio_executor::blocking::Blocking<
|
||||
Result<Proposal<Block, Self::Transaction>, Self::Error>
|
||||
>;
|
||||
type Proposal = Pin<Box<dyn Future<
|
||||
Output = Result<Proposal<Block, Self::Transaction>, Self::Error>
|
||||
> + Send>>;
|
||||
type Error = sp_blockchain::Error;
|
||||
|
||||
fn propose(
|
||||
@@ -163,11 +169,29 @@ impl<A, B, Block, C> sp_consensus::Proposer<Block> for
|
||||
max_duration: time::Duration,
|
||||
record_proof: RecordProof,
|
||||
) -> Self::Proposal {
|
||||
tokio_executor::blocking::run(move || {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let spawn_handle = self.spawn_handle.clone();
|
||||
|
||||
spawn_handle.spawn_blocking("basic-authorship-proposer", Box::pin(async move {
|
||||
// leave some time for evaluation and block finalization (33%)
|
||||
let deadline = (self.now)() + max_duration - max_duration / 3;
|
||||
self.propose_with(inherent_data, inherent_digests, deadline, record_proof)
|
||||
})
|
||||
let res = self.propose_with(
|
||||
inherent_data,
|
||||
inherent_digests,
|
||||
deadline,
|
||||
record_proof,
|
||||
).await;
|
||||
if tx.send(res).is_err() {
|
||||
trace!("Could not send block production result to proposer!");
|
||||
}
|
||||
}));
|
||||
|
||||
async move {
|
||||
match rx.await {
|
||||
Ok(x) => x,
|
||||
Err(err) => Err(sp_blockchain::Error::Msg(err.to_string()))
|
||||
}
|
||||
}.boxed()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,7 +205,7 @@ impl<A, B, Block, C> Proposer<B, Block, C, A>
|
||||
C::Api: ApiExt<Block, StateBackend = backend::StateBackendFor<B, Block>>
|
||||
+ BlockBuilderApi<Block, Error = sp_blockchain::Error>,
|
||||
{
|
||||
fn propose_with(
|
||||
async fn propose_with(
|
||||
self,
|
||||
inherent_data: InherentData,
|
||||
inherent_digests: DigestFor<Block>,
|
||||
@@ -218,18 +242,20 @@ impl<A, B, Block, C> Proposer<B, Block, C, A>
|
||||
let block_timer = time::Instant::now();
|
||||
let mut skipped = 0;
|
||||
let mut unqueue_invalid = Vec::new();
|
||||
let pending_iterator = match executor::block_on(future::select(
|
||||
self.transaction_pool.ready_at(self.parent_number),
|
||||
futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8),
|
||||
)) {
|
||||
Either::Left((iterator, _)) => iterator,
|
||||
Either::Right(_) => {
|
||||
|
||||
let mut t1 = self.transaction_pool.ready_at(self.parent_number).fuse();
|
||||
let mut t2 = futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8).fuse();
|
||||
|
||||
let pending_iterator = select! {
|
||||
res = t1 => res,
|
||||
_ = t2 => {
|
||||
log::warn!(
|
||||
"Timeout fired waiting for transaction pool at block #{}. Proceeding with production.",
|
||||
"Timeout fired waiting for transaction pool at block #{}. \
|
||||
Proceeding with production.",
|
||||
self.parent_number,
|
||||
);
|
||||
self.transaction_pool.ready()
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
debug!("Attempting to push transactions from the pool.");
|
||||
@@ -360,7 +386,7 @@ mod tests {
|
||||
let txpool = BasicPool::new_full(
|
||||
Default::default(),
|
||||
None,
|
||||
spawner,
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
);
|
||||
|
||||
@@ -376,7 +402,12 @@ mod tests {
|
||||
))
|
||||
);
|
||||
|
||||
let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None);
|
||||
let mut proposer_factory = ProposerFactory::new(
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
txpool.clone(),
|
||||
None,
|
||||
);
|
||||
|
||||
let cell = Mutex::new((false, time::Instant::now()));
|
||||
let proposer = proposer_factory.init_with_now(
|
||||
@@ -413,11 +444,16 @@ mod tests {
|
||||
let txpool = BasicPool::new_full(
|
||||
Default::default(),
|
||||
None,
|
||||
spawner,
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
);
|
||||
|
||||
let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None);
|
||||
let mut proposer_factory = ProposerFactory::new(
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
txpool.clone(),
|
||||
None,
|
||||
);
|
||||
|
||||
let cell = Mutex::new((false, time::Instant::now()));
|
||||
let proposer = proposer_factory.init_with_now(
|
||||
@@ -448,7 +484,7 @@ mod tests {
|
||||
let txpool = BasicPool::new_full(
|
||||
Default::default(),
|
||||
None,
|
||||
spawner,
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
);
|
||||
|
||||
@@ -467,7 +503,12 @@ mod tests {
|
||||
))
|
||||
);
|
||||
|
||||
let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None);
|
||||
let mut proposer_factory = ProposerFactory::new(
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
txpool.clone(),
|
||||
None,
|
||||
);
|
||||
|
||||
let proposer = proposer_factory.init_with_now(
|
||||
&client.header(&block_id).unwrap().unwrap(),
|
||||
@@ -510,7 +551,7 @@ mod tests {
|
||||
let txpool = BasicPool::new_full(
|
||||
Default::default(),
|
||||
None,
|
||||
spawner,
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
);
|
||||
|
||||
@@ -536,7 +577,12 @@ mod tests {
|
||||
])
|
||||
).unwrap();
|
||||
|
||||
let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None);
|
||||
let mut proposer_factory = ProposerFactory::new(
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
txpool.clone(),
|
||||
None,
|
||||
);
|
||||
let mut propose_block = |
|
||||
client: &TestClient,
|
||||
number,
|
||||
|
||||
@@ -35,11 +35,16 @@
|
||||
//! # let txpool = BasicPool::new_full(
|
||||
//! # Default::default(),
|
||||
//! # None,
|
||||
//! # spawner,
|
||||
//! # spawner.clone(),
|
||||
//! # client.clone(),
|
||||
//! # );
|
||||
//! // The first step is to create a `ProposerFactory`.
|
||||
//! let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), None);
|
||||
//! let mut proposer_factory = ProposerFactory::new(
|
||||
//! spawner,
|
||||
//! client.clone(),
|
||||
//! txpool.clone(),
|
||||
//! None,
|
||||
//! );
|
||||
//!
|
||||
//! // From this factory, we create a `Proposer`.
|
||||
//! let proposer = proposer_factory.init(
|
||||
|
||||
@@ -57,6 +57,7 @@ serde = "1.0.111"
|
||||
tracing = "0.1.10"
|
||||
tracing-log = "0.1.1"
|
||||
tracing-subscriber = "0.2.10"
|
||||
sc-cli-proc-macro = { version = "2.0.0", path = "./proc-macro" }
|
||||
|
||||
[target.'cfg(not(target_os = "unknown"))'.dependencies]
|
||||
rpassword = "4.0.1"
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "sc-cli-proc-macro"
|
||||
version = "2.0.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
edition = "2018"
|
||||
license = "Apache-2.0"
|
||||
homepage = "https://substrate.dev"
|
||||
repository = "https://github.com/paritytech/substrate/"
|
||||
description = "Helper macros for Substrate's client CLI"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
proc-macro-crate = "0.1.4"
|
||||
proc-macro2 = "1.0.6"
|
||||
quote = { version = "1.0.3", features = ["proc-macro"] }
|
||||
syn = { version = "1.0.7", features = ["proc-macro", "full", "extra-traits", "parsing"] }
|
||||
@@ -0,0 +1,155 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) 2020 Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use proc_macro::TokenStream;
|
||||
use proc_macro2::Span;
|
||||
use proc_macro_crate::crate_name;
|
||||
use quote::quote;
|
||||
use syn::{Error, Expr, Ident, ItemFn};
|
||||
|
||||
/// Add a log prefix to the function.
|
||||
///
|
||||
/// This prefixes all the log lines with `[<name>]` (after the timestamp). It works by making a
|
||||
/// tracing's span that is propagated to all the child calls and child tasks (futures) if they are
|
||||
/// spawned properly with the `SpawnHandle` (see `TaskManager` in sc-cli) or if the futures use
|
||||
/// `.in_current_span()` (see tracing-futures).
|
||||
///
|
||||
/// See Tokio's [tracing documentation](https://docs.rs/tracing-core/) and
|
||||
/// [tracing-futures documentation](https://docs.rs/tracing-futures/) for more details.
|
||||
///
|
||||
/// # Implementation notes
|
||||
///
|
||||
/// If there are multiple spans with a log prefix, only the latest will be shown.
|
||||
///
|
||||
/// # Example with a literal
|
||||
///
|
||||
/// ```ignore
|
||||
/// Builds a new service for a light client.
|
||||
/// #[sc_cli::prefix_logs_with("light")]
|
||||
/// pub fn new_light(config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||
/// let (client, backend, keystore, mut task_manager, on_demand) =
|
||||
/// sc_service::new_light_parts::<Block, RuntimeApi, Executor>(&config)?;
|
||||
///
|
||||
/// ...
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Will produce logs that look like this:
|
||||
///
|
||||
/// ```text
|
||||
/// 2020-10-16 08:03:14 Substrate Node
|
||||
/// 2020-10-16 08:03:14 ✌️ version 2.0.0-47f7d3f2e-x86_64-linux-gnu
|
||||
/// 2020-10-16 08:03:14 ❤️ by Anonymous, 2017-2020
|
||||
/// 2020-10-16 08:03:14 📋 Chain specification: Local Testnet
|
||||
/// 2020-10-16 08:03:14 🏷 Node name: nice-glove-1401
|
||||
/// 2020-10-16 08:03:14 👤 Role: LIGHT
|
||||
/// 2020-10-16 08:03:14 💾 Database: RocksDb at /tmp/substrate95w2Dk/chains/local_testnet/db
|
||||
/// 2020-10-16 08:03:14 ⛓ Native runtime: node-template-1 (node-template-1.tx1.au1)
|
||||
/// 2020-10-16 08:03:14 [light] 🔨 Initializing Genesis block/state (state: 0x121d…8e36, header-hash: 0x24ef…8ff6)
|
||||
/// 2020-10-16 08:03:14 [light] Loading GRANDPA authorities from genesis on what appears to be first startup.
|
||||
/// 2020-10-16 08:03:15 [light] ⏱ Loaded block-time = 6000 milliseconds from genesis on first-launch
|
||||
/// 2020-10-16 08:03:15 [light] Using default protocol ID "sup" because none is configured in the chain specs
|
||||
/// 2020-10-16 08:03:15 [light] 🏷 Local node identity is: 12D3KooWHX4rkWT6a6N55Km7ZnvenGdShSKPkzJ3yj9DU5nqDtWR
|
||||
/// 2020-10-16 08:03:15 [light] 📦 Highest known block at #0
|
||||
/// 2020-10-16 08:03:15 [light] 〽️ Prometheus server started at 127.0.0.1:9615
|
||||
/// 2020-10-16 08:03:15 [light] Listening for new connections on 127.0.0.1:9944.
|
||||
/// ```
|
||||
///
|
||||
/// # Example using the actual node name
|
||||
///
|
||||
/// ```ignore
|
||||
/// Builds a new service for a light client.
|
||||
/// #[sc_cli::prefix_logs_with(config.network.node_name.as_str())]
|
||||
/// pub fn new_light(config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||
/// let (client, backend, keystore, mut task_manager, on_demand) =
|
||||
/// sc_service::new_light_parts::<Block, RuntimeApi, Executor>(&config)?;
|
||||
///
|
||||
/// ...
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Will produce logs that look like this:
|
||||
///
|
||||
/// ```text
|
||||
/// 2020-10-16 08:12:57 Substrate Node
|
||||
/// 2020-10-16 08:12:57 ✌️ version 2.0.0-efb9b822a-x86_64-linux-gnu
|
||||
/// 2020-10-16 08:12:57 ❤️ by Anonymous, 2017-2020
|
||||
/// 2020-10-16 08:12:57 📋 Chain specification: Local Testnet
|
||||
/// 2020-10-16 08:12:57 🏷 Node name: open-harbor-1619
|
||||
/// 2020-10-16 08:12:57 👤 Role: LIGHT
|
||||
/// 2020-10-16 08:12:57 💾 Database: RocksDb at /tmp/substrate9T9Mtb/chains/local_testnet/db
|
||||
/// 2020-10-16 08:12:57 ⛓ Native runtime: node-template-1 (node-template-1.tx1.au1)
|
||||
/// 2020-10-16 08:12:58 [open-harbor-1619] 🔨 Initializing Genesis block/state (state: 0x121d…8e36, header-hash: 0x24ef…8ff6)
|
||||
/// 2020-10-16 08:12:58 [open-harbor-1619] Loading GRANDPA authorities from genesis on what appears to be first startup.
|
||||
/// 2020-10-16 08:12:58 [open-harbor-1619] ⏱ Loaded block-time = 6000 milliseconds from genesis on first-launch
|
||||
/// 2020-10-16 08:12:58 [open-harbor-1619] Using default protocol ID "sup" because none is configured in the chain specs
|
||||
/// 2020-10-16 08:12:58 [open-harbor-1619] 🏷 Local node identity is: 12D3KooWRzmYC8QTK1Pm8Cfvid3skTS4Hn54jc4AUtje8Rqbfgtp
|
||||
/// 2020-10-16 08:12:58 [open-harbor-1619] 📦 Highest known block at #0
|
||||
/// 2020-10-16 08:12:58 [open-harbor-1619] 〽️ Prometheus server started at 127.0.0.1:9615
|
||||
/// 2020-10-16 08:12:58 [open-harbor-1619] Listening for new connections on 127.0.0.1:9944.
|
||||
/// ```
|
||||
#[proc_macro_attribute]
|
||||
pub fn prefix_logs_with(arg: TokenStream, item: TokenStream) -> TokenStream {
|
||||
let item_fn = syn::parse_macro_input!(item as ItemFn);
|
||||
|
||||
if arg.is_empty() {
|
||||
return Error::new(
|
||||
Span::call_site(),
|
||||
"missing argument: name of the node. Example: sc_cli::prefix_logs_with(<expr>)",
|
||||
)
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
|
||||
let name = syn::parse_macro_input!(arg as Expr);
|
||||
|
||||
let crate_name = if std::env::var("CARGO_PKG_NAME")
|
||||
.expect("cargo env var always there when compiling; qed")
|
||||
== "sc-cli"
|
||||
{
|
||||
Ident::new("sc_cli", Span::call_site().into())
|
||||
} else {
|
||||
let crate_name = match crate_name("sc-cli") {
|
||||
Ok(x) => x,
|
||||
Err(err) => return Error::new(Span::call_site(), err).to_compile_error().into(),
|
||||
};
|
||||
|
||||
Ident::new(&crate_name, Span::call_site().into())
|
||||
};
|
||||
|
||||
let ItemFn {
|
||||
attrs,
|
||||
vis,
|
||||
sig,
|
||||
block,
|
||||
} = item_fn;
|
||||
|
||||
(quote! {
|
||||
#(#attrs)*
|
||||
#vis #sig {
|
||||
let span = #crate_name::tracing::info_span!(
|
||||
#crate_name::PREFIX_LOG_SPAN,
|
||||
name = #name,
|
||||
);
|
||||
let _enter = span.enter();
|
||||
|
||||
#block
|
||||
}
|
||||
})
|
||||
.into()
|
||||
}
|
||||
@@ -25,6 +25,7 @@ pub mod arg_enums;
|
||||
mod commands;
|
||||
mod config;
|
||||
mod error;
|
||||
mod logging;
|
||||
mod params;
|
||||
mod runner;
|
||||
|
||||
@@ -34,8 +35,9 @@ pub use config::*;
|
||||
pub use error::*;
|
||||
pub use params::*;
|
||||
pub use runner::*;
|
||||
use sc_service::{Configuration, TaskExecutor};
|
||||
pub use sc_cli_proc_macro::*;
|
||||
pub use sc_service::{ChainSpec, Role};
|
||||
use sc_service::{Configuration, TaskExecutor};
|
||||
pub use sp_version::RuntimeVersion;
|
||||
use std::io::Write;
|
||||
pub use structopt;
|
||||
@@ -43,10 +45,14 @@ use structopt::{
|
||||
clap::{self, AppSettings},
|
||||
StructOpt,
|
||||
};
|
||||
#[doc(hidden)]
|
||||
pub use tracing;
|
||||
use tracing_subscriber::{
|
||||
filter::Directive, fmt::time::ChronoLocal, layer::SubscriberExt, FmtSubscriber, Layer,
|
||||
};
|
||||
|
||||
pub use logging::PREFIX_LOG_SPAN;
|
||||
|
||||
/// Substrate client CLI
|
||||
///
|
||||
/// This trait needs to be defined on the root structopt of the application. It will provide the
|
||||
@@ -310,13 +316,15 @@ pub fn init_logger(
|
||||
|
||||
let subscriber = FmtSubscriber::builder()
|
||||
.with_env_filter(env_filter)
|
||||
.with_ansi(enable_color)
|
||||
.with_target(!simple)
|
||||
.with_level(!simple)
|
||||
.with_thread_names(!simple)
|
||||
.with_timer(timer)
|
||||
.with_writer(std::io::stderr)
|
||||
.finish();
|
||||
.event_format(logging::EventFormat {
|
||||
timer,
|
||||
ansi: enable_color,
|
||||
display_target: !simple,
|
||||
display_level: !simple,
|
||||
display_thread_name: !simple,
|
||||
})
|
||||
.finish().with(logging::NodeNameLayer);
|
||||
|
||||
if let Some(profiling_targets) = profiling_targets {
|
||||
let profiling = sc_tracing::ProfilingLayer::new(tracing_receiver, &profiling_targets);
|
||||
@@ -339,8 +347,9 @@ pub fn init_logger(
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate as sc_cli;
|
||||
use std::{env, process::Command};
|
||||
use tracing::{metadata::Kind, subscriber::Interest, Callsite, Level, Metadata};
|
||||
use std::{process::Command, env};
|
||||
|
||||
#[test]
|
||||
fn test_logger_filters() {
|
||||
@@ -409,4 +418,36 @@ mod tests {
|
||||
log::info!(target: "test-target", "{}", EXPECTED_LOG_MESSAGE);
|
||||
}
|
||||
}
|
||||
|
||||
const EXPECTED_NODE_NAME: &'static str = "THE_NODE";
|
||||
|
||||
#[test]
|
||||
fn prefix_in_log_lines() {
|
||||
let executable = env::current_exe().unwrap();
|
||||
let output = Command::new(executable)
|
||||
.env("ENABLE_LOGGING", "1")
|
||||
.args(&["--nocapture", "prefix_in_log_lines_entrypoint"])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
let output = String::from_utf8(output.stderr).unwrap();
|
||||
assert!(output.contains(&format!(" [{}] ", EXPECTED_NODE_NAME)));
|
||||
}
|
||||
|
||||
/// This is no actual test, it will be used by the `prefix_in_log_lines` test.
|
||||
/// The given test will call the test executable to only execute this test that
|
||||
/// will only print a log line prefixed by the node name `EXPECTED_NODE_NAME`.
|
||||
#[test]
|
||||
fn prefix_in_log_lines_entrypoint() {
|
||||
if env::var("ENABLE_LOGGING").is_ok() {
|
||||
let test_pattern = "test-target=info";
|
||||
init_logger(&test_pattern, Default::default(), Default::default()).unwrap();
|
||||
prefix_in_log_lines_process();
|
||||
}
|
||||
}
|
||||
|
||||
#[crate::prefix_logs_with(EXPECTED_NODE_NAME)]
|
||||
fn prefix_in_log_lines_process() {
|
||||
log::info!("Hello World!");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,337 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) 2020 Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use ansi_term::{Colour, Style};
|
||||
use std::{fmt::{self, Write as _}, iter};
|
||||
use tracing::{
|
||||
span::{self, Attributes},
|
||||
Event, Id, Level, Subscriber,
|
||||
};
|
||||
use tracing_log::NormalizeEvent;
|
||||
use tracing_subscriber::{
|
||||
fmt::{
|
||||
time::{FormatTime, SystemTime},
|
||||
FmtContext, FormatEvent, FormatFields,
|
||||
},
|
||||
layer::Context,
|
||||
registry::LookupSpan,
|
||||
Layer,
|
||||
};
|
||||
|
||||
/// Span name used for the logging prefix. See macro `sc_cli::prefix_logs_with!`
|
||||
pub const PREFIX_LOG_SPAN: &str = "substrate-log-prefix";
|
||||
|
||||
pub(crate) struct EventFormat<T = SystemTime> {
|
||||
pub(crate) timer: T,
|
||||
pub(crate) ansi: bool,
|
||||
pub(crate) display_target: bool,
|
||||
pub(crate) display_level: bool,
|
||||
pub(crate) display_thread_name: bool,
|
||||
}
|
||||
|
||||
// NOTE: the following code took inspiration from tracing-subscriber
|
||||
//
|
||||
// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L449
|
||||
impl<S, N, T> FormatEvent<S, N> for EventFormat<T>
|
||||
where
|
||||
S: Subscriber + for<'a> LookupSpan<'a>,
|
||||
N: for<'a> FormatFields<'a> + 'static,
|
||||
T: FormatTime,
|
||||
{
|
||||
fn format_event(
|
||||
&self,
|
||||
ctx: &FmtContext<S, N>,
|
||||
writer: &mut dyn fmt::Write,
|
||||
event: &Event,
|
||||
) -> fmt::Result {
|
||||
let normalized_meta = event.normalized_metadata();
|
||||
let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata());
|
||||
time::write(&self.timer, writer, self.ansi)?;
|
||||
|
||||
if self.display_level {
|
||||
let fmt_level = { FmtLevel::new(meta.level(), self.ansi) };
|
||||
write!(writer, "{} ", fmt_level)?;
|
||||
}
|
||||
|
||||
if self.display_thread_name {
|
||||
let current_thread = std::thread::current();
|
||||
match current_thread.name() {
|
||||
Some(name) => {
|
||||
write!(writer, "{} ", FmtThreadName::new(name))?;
|
||||
}
|
||||
// fall-back to thread id when name is absent and ids are not enabled
|
||||
None => {
|
||||
write!(writer, "{:0>2?} ", current_thread.id())?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Custom code to display node name
|
||||
if let Some(span) = ctx.lookup_current() {
|
||||
let parents = span.parents();
|
||||
for span in std::iter::once(span).chain(parents) {
|
||||
let exts = span.extensions();
|
||||
if let Some(node_name) = exts.get::<NodeName>() {
|
||||
write!(writer, "{}", node_name.as_str())?;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let fmt_ctx = { FmtCtx::new(&ctx, event.parent(), self.ansi) };
|
||||
write!(writer, "{}", fmt_ctx)?;
|
||||
if self.display_target {
|
||||
write!(writer, "{}:", meta.target())?;
|
||||
}
|
||||
ctx.format_fields(writer, event)?;
|
||||
writeln!(writer)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct NodeNameLayer;
|
||||
|
||||
impl<S> Layer<S> for NodeNameLayer
|
||||
where
|
||||
S: Subscriber + for<'a> LookupSpan<'a>,
|
||||
{
|
||||
fn new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) {
|
||||
let span = ctx
|
||||
.span(id)
|
||||
.expect("new_span has been called for this span; qed");
|
||||
|
||||
if span.name() != PREFIX_LOG_SPAN {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut extensions = span.extensions_mut();
|
||||
|
||||
if extensions.get_mut::<NodeName>().is_none() {
|
||||
let mut s = String::new();
|
||||
let mut v = NodeNameVisitor(&mut s);
|
||||
attrs.record(&mut v);
|
||||
|
||||
if !s.is_empty() {
|
||||
let fmt_fields = NodeName(s);
|
||||
extensions.insert(fmt_fields);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct NodeNameVisitor<'a, W: std::fmt::Write>(&'a mut W);
|
||||
|
||||
macro_rules! write_node_name {
|
||||
($method:ident, $type:ty, $format:expr) => {
|
||||
fn $method(&mut self, field: &tracing::field::Field, value: $type) {
|
||||
if field.name() == "name" {
|
||||
write!(self.0, $format, value).expect("no way to return the err; qed");
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl<'a, W: std::fmt::Write> tracing::field::Visit for NodeNameVisitor<'a, W> {
|
||||
write_node_name!(record_debug, &dyn std::fmt::Debug, "[{:?}] ");
|
||||
write_node_name!(record_str, &str, "[{}] ");
|
||||
write_node_name!(record_i64, i64, "[{}] ");
|
||||
write_node_name!(record_u64, u64, "[{}] ");
|
||||
write_node_name!(record_bool, bool, "[{}] ");
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct NodeName(String);
|
||||
|
||||
impl NodeName {
|
||||
fn as_str(&self) -> &str {
|
||||
self.0.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
struct FmtLevel<'a> {
|
||||
level: &'a Level,
|
||||
ansi: bool,
|
||||
}
|
||||
|
||||
impl<'a> FmtLevel<'a> {
|
||||
pub(crate) fn new(level: &'a Level, ansi: bool) -> Self {
|
||||
Self { level, ansi }
|
||||
}
|
||||
}
|
||||
|
||||
const TRACE_STR: &str = "TRACE";
|
||||
const DEBUG_STR: &str = "DEBUG";
|
||||
const INFO_STR: &str = " INFO";
|
||||
const WARN_STR: &str = " WARN";
|
||||
const ERROR_STR: &str = "ERROR";
|
||||
|
||||
impl<'a> fmt::Display for FmtLevel<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.ansi {
|
||||
match *self.level {
|
||||
Level::TRACE => write!(f, "{}", Colour::Purple.paint(TRACE_STR)),
|
||||
Level::DEBUG => write!(f, "{}", Colour::Blue.paint(DEBUG_STR)),
|
||||
Level::INFO => write!(f, "{}", Colour::Green.paint(INFO_STR)),
|
||||
Level::WARN => write!(f, "{}", Colour::Yellow.paint(WARN_STR)),
|
||||
Level::ERROR => write!(f, "{}", Colour::Red.paint(ERROR_STR)),
|
||||
}
|
||||
} else {
|
||||
match *self.level {
|
||||
Level::TRACE => f.pad(TRACE_STR),
|
||||
Level::DEBUG => f.pad(DEBUG_STR),
|
||||
Level::INFO => f.pad(INFO_STR),
|
||||
Level::WARN => f.pad(WARN_STR),
|
||||
Level::ERROR => f.pad(ERROR_STR),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct FmtThreadName<'a> {
|
||||
name: &'a str,
|
||||
}
|
||||
|
||||
impl<'a> FmtThreadName<'a> {
|
||||
pub(crate) fn new(name: &'a str) -> Self {
|
||||
Self { name }
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: the following code has been duplicated from tracing-subscriber
|
||||
//
|
||||
// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L845
|
||||
impl<'a> fmt::Display for FmtThreadName<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use std::sync::atomic::{
|
||||
AtomicUsize,
|
||||
Ordering::{AcqRel, Acquire, Relaxed},
|
||||
};
|
||||
|
||||
// Track the longest thread name length we've seen so far in an atomic,
|
||||
// so that it can be updated by any thread.
|
||||
static MAX_LEN: AtomicUsize = AtomicUsize::new(0);
|
||||
let len = self.name.len();
|
||||
// Snapshot the current max thread name length.
|
||||
let mut max_len = MAX_LEN.load(Relaxed);
|
||||
|
||||
while len > max_len {
|
||||
// Try to set a new max length, if it is still the value we took a
|
||||
// snapshot of.
|
||||
match MAX_LEN.compare_exchange(max_len, len, AcqRel, Acquire) {
|
||||
// We successfully set the new max value
|
||||
Ok(_) => break,
|
||||
// Another thread set a new max value since we last observed
|
||||
// it! It's possible that the new length is actually longer than
|
||||
// ours, so we'll loop again and check whether our length is
|
||||
// still the longest. If not, we'll just use the newer value.
|
||||
Err(actual) => max_len = actual,
|
||||
}
|
||||
}
|
||||
|
||||
// pad thread name using `max_len`
|
||||
write!(f, "{:>width$}", self.name, width = max_len)
|
||||
}
|
||||
}
|
||||
|
||||
struct FmtCtx<'a, S, N> {
|
||||
ctx: &'a FmtContext<'a, S, N>,
|
||||
span: Option<&'a span::Id>,
|
||||
ansi: bool,
|
||||
}
|
||||
|
||||
impl<'a, S, N: 'a> FmtCtx<'a, S, N>
|
||||
where
|
||||
S: Subscriber + for<'lookup> LookupSpan<'lookup>,
|
||||
N: for<'writer> FormatFields<'writer> + 'static,
|
||||
{
|
||||
pub(crate) fn new(
|
||||
ctx: &'a FmtContext<'_, S, N>,
|
||||
span: Option<&'a span::Id>,
|
||||
ansi: bool,
|
||||
) -> Self {
|
||||
Self { ctx, ansi, span }
|
||||
}
|
||||
|
||||
fn bold(&self) -> Style {
|
||||
if self.ansi {
|
||||
return Style::new().bold();
|
||||
}
|
||||
|
||||
Style::new()
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: the following code took inspiration from tracing-subscriber
|
||||
//
|
||||
// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/format/mod.rs#L711
|
||||
impl<'a, S, N: 'a> fmt::Display for FmtCtx<'a, S, N>
|
||||
where
|
||||
S: Subscriber + for<'lookup> LookupSpan<'lookup>,
|
||||
N: for<'writer> FormatFields<'writer> + 'static,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let bold = self.bold();
|
||||
let mut seen = false;
|
||||
|
||||
let span = self
|
||||
.span
|
||||
.and_then(|id| self.ctx.span(&id))
|
||||
.or_else(|| self.ctx.lookup_current());
|
||||
|
||||
let scope = span
|
||||
.into_iter()
|
||||
.flat_map(|span| span.from_root().chain(iter::once(span)));
|
||||
|
||||
for name in scope
|
||||
.map(|span| span.metadata().name())
|
||||
.filter(|&x| x != "substrate-node")
|
||||
{
|
||||
seen = true;
|
||||
write!(f, "{}:", bold.paint(name))?;
|
||||
}
|
||||
|
||||
if seen {
|
||||
f.write_char(' ')?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: the following code has been duplicated from tracing-subscriber
|
||||
//
|
||||
// https://github.com/tokio-rs/tracing/blob/2f59b32/tracing-subscriber/src/fmt/time/mod.rs#L252
|
||||
mod time {
|
||||
use ansi_term::Style;
|
||||
use std::fmt;
|
||||
use tracing_subscriber::fmt::time::FormatTime;
|
||||
|
||||
pub(crate) fn write<T>(timer: T, writer: &mut dyn fmt::Write, with_ansi: bool) -> fmt::Result
|
||||
where
|
||||
T: FormatTime,
|
||||
{
|
||||
if with_ansi {
|
||||
let style = Style::new().dimmed();
|
||||
write!(writer, "{}", style.prefix())?;
|
||||
timer.format_time(writer)?;
|
||||
write!(writer, "{}", style.suffix())?;
|
||||
} else {
|
||||
timer.format_time(writer)?;
|
||||
}
|
||||
writer.write_char(' ')?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -294,9 +294,10 @@ mod tests {
|
||||
let inherent_data_providers = InherentDataProviders::new();
|
||||
let spawner = sp_core::testing::TaskExecutor::new();
|
||||
let pool = Arc::new(BasicPool::with_revalidation_type(
|
||||
Options::default(), api(), None, RevalidationType::Full, spawner,
|
||||
Options::default(), api(), None, RevalidationType::Full, spawner.clone(),
|
||||
));
|
||||
let env = ProposerFactory::new(
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
pool.clone(),
|
||||
None,
|
||||
@@ -365,9 +366,10 @@ mod tests {
|
||||
let inherent_data_providers = InherentDataProviders::new();
|
||||
let spawner = sp_core::testing::TaskExecutor::new();
|
||||
let pool = Arc::new(BasicPool::with_revalidation_type(
|
||||
Options::default(), api(), None, RevalidationType::Full, spawner,
|
||||
Options::default(), api(), None, RevalidationType::Full, spawner.clone(),
|
||||
));
|
||||
let env = ProposerFactory::new(
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
pool.clone(),
|
||||
None,
|
||||
@@ -440,9 +442,10 @@ mod tests {
|
||||
let pool_api = api();
|
||||
let spawner = sp_core::testing::TaskExecutor::new();
|
||||
let pool = Arc::new(BasicPool::with_revalidation_type(
|
||||
Options::default(), pool_api.clone(), None, RevalidationType::Full, spawner,
|
||||
Options::default(), pool_api.clone(), None, RevalidationType::Full, spawner.clone(),
|
||||
));
|
||||
let env = ProposerFactory::new(
|
||||
spawner.clone(),
|
||||
client.clone(),
|
||||
pool.clone(),
|
||||
None,
|
||||
|
||||
@@ -104,9 +104,8 @@ impl<B: BlockT> InformantDisplay<B> {
|
||||
if self.format.enable_color {
|
||||
info!(
|
||||
target: "substrate",
|
||||
"{} {}{}{} ({} peers), best: #{} ({}), finalized #{} ({}), {} {}",
|
||||
"{} {}{} ({} peers), best: #{} ({}), finalized #{} ({}), {} {}",
|
||||
level,
|
||||
self.format.prefix,
|
||||
Colour::White.bold().paint(&status),
|
||||
target,
|
||||
Colour::White.bold().paint(format!("{}", num_connected_peers)),
|
||||
@@ -120,9 +119,8 @@ impl<B: BlockT> InformantDisplay<B> {
|
||||
} else {
|
||||
info!(
|
||||
target: "substrate",
|
||||
"{} {}{}{} ({} peers), best: #{} ({}), finalized #{} ({}), ⬇ {} ⬆ {}",
|
||||
"{} {}{} ({} peers), best: #{} ({}), finalized #{} ({}), ⬇ {} ⬆ {}",
|
||||
level,
|
||||
self.format.prefix,
|
||||
status,
|
||||
target,
|
||||
num_connected_peers,
|
||||
|
||||
@@ -37,29 +37,12 @@ mod display;
|
||||
pub struct OutputFormat {
|
||||
/// Enable color output in logs. True by default.
|
||||
pub enable_color: bool,
|
||||
/// Defines the informant's prefix for the logs. An empty string by default.
|
||||
///
|
||||
/// By default substrate will show logs without a prefix. Example:
|
||||
///
|
||||
/// ```text
|
||||
/// 2020-05-28 15:11:06 ✨ Imported #2 (0xc21c…2ca8)
|
||||
/// 2020-05-28 15:11:07 💤 Idle (0 peers), best: #2 (0xc21c…2ca8), finalized #0 (0x7299…e6df), ⬇ 0 ⬆ 0
|
||||
/// ```
|
||||
///
|
||||
/// But you can define a prefix by setting this string. This will output:
|
||||
///
|
||||
/// ```text
|
||||
/// 2020-05-28 15:11:06 ✨ [Prefix] Imported #2 (0xc21c…2ca8)
|
||||
/// 2020-05-28 15:11:07 💤 [Prefix] Idle (0 peers), best: #2 (0xc21c…2ca8), finalized #0 (0x7299…e6df), ⬇ 0 ⬆ 0
|
||||
/// ```
|
||||
pub prefix: String,
|
||||
}
|
||||
|
||||
impl Default for OutputFormat {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enable_color: true,
|
||||
prefix: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -118,14 +101,11 @@ where
|
||||
|
||||
future::join(
|
||||
display_notifications,
|
||||
display_block_import(client, format.prefix),
|
||||
display_block_import(client),
|
||||
).map(|_| ())
|
||||
}
|
||||
|
||||
fn display_block_import<B: BlockT, C>(
|
||||
client: Arc<C>,
|
||||
prefix: String,
|
||||
) -> impl Future<Output = ()>
|
||||
fn display_block_import<B: BlockT, C>(client: Arc<C>) -> impl Future<Output = ()>
|
||||
where
|
||||
C: UsageProvider<B> + HeaderMetadata<B> + BlockchainEvents<B>,
|
||||
<C as HeaderMetadata<B>>::Error: Display,
|
||||
@@ -151,8 +131,7 @@ where
|
||||
|
||||
match maybe_ancestor {
|
||||
Ok(ref ancestor) if ancestor.hash != *last_hash => info!(
|
||||
"♻️ {}Reorg on #{},{} to #{},{}, common ancestor #{},{}",
|
||||
prefix,
|
||||
"♻️ Reorg on #{},{} to #{},{}, common ancestor #{},{}",
|
||||
Colour::Red.bold().paint(format!("{}", last_num)), last_hash,
|
||||
Colour::Green.bold().paint(format!("{}", n.header.number())), n.hash,
|
||||
Colour::White.bold().paint(format!("{}", ancestor.number)), ancestor.hash,
|
||||
@@ -179,8 +158,7 @@ where
|
||||
|
||||
info!(
|
||||
target: "substrate",
|
||||
"✨ {}Imported #{} ({})",
|
||||
prefix,
|
||||
"✨ Imported #{} ({})",
|
||||
Colour::White.bold().paint(format!("{}", n.header.number())),
|
||||
n.hash,
|
||||
);
|
||||
|
||||
@@ -77,6 +77,7 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../..
|
||||
sc-tracing = { version = "2.0.0", path = "../tracing" }
|
||||
sp-tracing = { version = "2.0.0", path = "../../primitives/tracing" }
|
||||
tracing = "0.1.19"
|
||||
tracing-futures = { version = "0.2.4" }
|
||||
parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] }
|
||||
|
||||
[target.'cfg(not(target_os = "unknown"))'.dependencies]
|
||||
|
||||
@@ -27,6 +27,7 @@ use prometheus_endpoint::{
|
||||
CounterVec, HistogramOpts, HistogramVec, Opts, Registry, U64
|
||||
};
|
||||
use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded};
|
||||
use tracing_futures::Instrument;
|
||||
use crate::{config::{TaskExecutor, TaskType, JoinFuture}, Error};
|
||||
|
||||
mod prometheus_future;
|
||||
@@ -116,7 +117,7 @@ impl SpawnTaskHandle {
|
||||
}
|
||||
};
|
||||
|
||||
let join_handle = self.executor.spawn(Box::pin(future), task_type);
|
||||
let join_handle = self.executor.spawn(Box::pin(future.in_current_span()), task_type);
|
||||
let mut task_notifier = self.task_notifier.clone();
|
||||
self.executor.spawn(
|
||||
Box::pin(async move {
|
||||
|
||||
@@ -31,6 +31,7 @@ use tokio::{runtime::Runtime, prelude::FutureExt};
|
||||
use tokio::timer::Interval;
|
||||
use sc_service::{
|
||||
TaskManager,
|
||||
SpawnTaskHandle,
|
||||
GenericChainSpec,
|
||||
ChainSpecExtension,
|
||||
Configuration,
|
||||
@@ -75,6 +76,7 @@ pub trait TestNetNode: Clone + Future<Item = (), Error = sc_service::Error> + Se
|
||||
fn client(&self) -> Arc<Client<Self::Backend, Self::Executor, Self::Block, Self::RuntimeApi>>;
|
||||
fn transaction_pool(&self) -> Arc<Self::TransactionPool>;
|
||||
fn network(&self) -> Arc<sc_network::NetworkService<Self::Block, <Self::Block as BlockT>::Hash>>;
|
||||
fn spawn_handle(&self) -> SpawnTaskHandle;
|
||||
}
|
||||
|
||||
pub struct TestNetComponents<TBl: BlockT, TBackend, TExec, TRtApi, TExPool> {
|
||||
@@ -147,6 +149,9 @@ TestNetComponents<TBl, TBackend, TExec, TRtApi, TExPool>
|
||||
fn network(&self) -> Arc<sc_network::NetworkService<Self::Block, <Self::Block as BlockT>::Hash>> {
|
||||
self.network.clone()
|
||||
}
|
||||
fn spawn_handle(&self) -> SpawnTaskHandle {
|
||||
self.task_manager.lock().spawn_handle()
|
||||
}
|
||||
}
|
||||
|
||||
impl<G, E, F, L, U> TestNet<G, E, F, L, U>
|
||||
|
||||
@@ -103,7 +103,6 @@ where
|
||||
base_path: None,
|
||||
informant_output_format: sc_informant::OutputFormat {
|
||||
enable_color: false,
|
||||
prefix: String::new(),
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user