mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 19:17:58 +00:00
Run cargo fmt on the whole code base (#9394)
* Run cargo fmt on the whole code base * Second run * Add CI check * Fix compilation * More unnecessary braces * Handle weights * Use --all * Use correct attributes... * Fix UI tests * AHHHHHHHHH * 🤦 * Docs * Fix compilation * 🤷 * Please stop * 🤦 x 2 * More * make rustfmt.toml consistent with polkadot Co-authored-by: André Silva <andrerfosilva@gmail.com>
This commit is contained in:
@@ -1,17 +1,17 @@
|
||||
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
|
||||
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use sc_client_api::{ExecutorProvider, RemoteBackend};
|
||||
use node_template_runtime::{self, opaque::Block, RuntimeApi};
|
||||
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
|
||||
use sc_client_api::{ExecutorProvider, RemoteBackend};
|
||||
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
|
||||
use sc_executor::native_executor_instance;
|
||||
pub use sc_executor::NativeExecutor;
|
||||
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
|
||||
use sc_consensus_aura::{ImportQueueParams, StartAuraParams, SlotProportion};
|
||||
use sc_finality_grandpa::SharedVoterState;
|
||||
use sc_keystore::LocalKeystore;
|
||||
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
|
||||
use sc_telemetry::{Telemetry, TelemetryWorker};
|
||||
use sp_consensus::SlotData;
|
||||
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
// Our native executor instance.
|
||||
native_executor_instance!(
|
||||
@@ -25,22 +25,35 @@ type FullClient = sc_service::TFullClient<Block, RuntimeApi, Executor>;
|
||||
type FullBackend = sc_service::TFullBackend<Block>;
|
||||
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
|
||||
|
||||
pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponents<
|
||||
FullClient, FullBackend, FullSelectChain,
|
||||
sp_consensus::DefaultImportQueue<Block, FullClient>,
|
||||
sc_transaction_pool::FullPool<Block, FullClient>,
|
||||
(
|
||||
sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
|
||||
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
|
||||
Option<Telemetry>,
|
||||
)
|
||||
>, ServiceError> {
|
||||
pub fn new_partial(
|
||||
config: &Configuration,
|
||||
) -> Result<
|
||||
sc_service::PartialComponents<
|
||||
FullClient,
|
||||
FullBackend,
|
||||
FullSelectChain,
|
||||
sp_consensus::DefaultImportQueue<Block, FullClient>,
|
||||
sc_transaction_pool::FullPool<Block, FullClient>,
|
||||
(
|
||||
sc_finality_grandpa::GrandpaBlockImport<
|
||||
FullBackend,
|
||||
Block,
|
||||
FullClient,
|
||||
FullSelectChain,
|
||||
>,
|
||||
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
|
||||
Option<Telemetry>,
|
||||
),
|
||||
>,
|
||||
ServiceError,
|
||||
> {
|
||||
if config.keystore_remote.is_some() {
|
||||
return Err(ServiceError::Other(
|
||||
format!("Remote Keystores are not supported.")))
|
||||
return Err(ServiceError::Other(format!("Remote Keystores are not supported.")))
|
||||
}
|
||||
|
||||
let telemetry = config.telemetry_endpoints.clone()
|
||||
let telemetry = config
|
||||
.telemetry_endpoints
|
||||
.clone()
|
||||
.filter(|x| !x.is_empty())
|
||||
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
|
||||
let worker = TelemetryWorker::new(16)?;
|
||||
@@ -56,11 +69,10 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
|
||||
)?;
|
||||
let client = Arc::new(client);
|
||||
|
||||
let telemetry = telemetry
|
||||
.map(|(worker, telemetry)| {
|
||||
task_manager.spawn_handle().spawn("telemetry", worker.run());
|
||||
telemetry
|
||||
});
|
||||
let telemetry = telemetry.map(|(worker, telemetry)| {
|
||||
task_manager.spawn_handle().spawn("telemetry", worker.run());
|
||||
telemetry
|
||||
});
|
||||
|
||||
let select_chain = sc_consensus::LongestChain::new(backend.clone());
|
||||
|
||||
@@ -81,8 +93,8 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
|
||||
|
||||
let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
|
||||
|
||||
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(
|
||||
ImportQueueParams {
|
||||
let import_queue =
|
||||
sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
|
||||
block_import: grandpa_block_import.clone(),
|
||||
justification_import: Some(Box::new(grandpa_block_import.clone())),
|
||||
client: client.clone(),
|
||||
@@ -98,12 +110,13 @@ pub fn new_partial(config: &Configuration) -> Result<sc_service::PartialComponen
|
||||
Ok((timestamp, slot))
|
||||
},
|
||||
spawner: &task_manager.spawn_essential_handle(),
|
||||
can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
|
||||
can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(
|
||||
client.executor().clone(),
|
||||
),
|
||||
registry: config.prometheus_registry(),
|
||||
check_for_equivocation: Default::default(),
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
},
|
||||
)?;
|
||||
})?;
|
||||
|
||||
Ok(sc_service::PartialComponents {
|
||||
client,
|
||||
@@ -140,10 +153,11 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
if let Some(url) = &config.keystore_remote {
|
||||
match remote_keystore(url) {
|
||||
Ok(k) => keystore_container.set_remote_keystore(k),
|
||||
Err(e) => {
|
||||
return Err(ServiceError::Other(
|
||||
format!("Error hooking up remote keystore for {}: {}", url, e)))
|
||||
}
|
||||
Err(e) =>
|
||||
return Err(ServiceError::Other(format!(
|
||||
"Error hooking up remote keystore for {}: {}",
|
||||
url, e
|
||||
))),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -162,7 +176,10 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
|
||||
if config.offchain_worker.enabled {
|
||||
sc_service::build_offchain_workers(
|
||||
&config, task_manager.spawn_handle(), client.clone(), network.clone(),
|
||||
&config,
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
network.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -178,32 +195,27 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
let pool = transaction_pool.clone();
|
||||
|
||||
Box::new(move |deny_unsafe, _| {
|
||||
let deps = crate::rpc::FullDeps {
|
||||
client: client.clone(),
|
||||
pool: pool.clone(),
|
||||
deny_unsafe,
|
||||
};
|
||||
let deps =
|
||||
crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe };
|
||||
|
||||
crate::rpc::create_full(deps)
|
||||
})
|
||||
};
|
||||
|
||||
let _rpc_handlers = sc_service::spawn_tasks(
|
||||
sc_service::SpawnTasksParams {
|
||||
network: network.clone(),
|
||||
client: client.clone(),
|
||||
keystore: keystore_container.sync_keystore(),
|
||||
task_manager: &mut task_manager,
|
||||
transaction_pool: transaction_pool.clone(),
|
||||
rpc_extensions_builder,
|
||||
on_demand: None,
|
||||
remote_blockchain: None,
|
||||
backend,
|
||||
system_rpc_tx,
|
||||
config,
|
||||
telemetry: telemetry.as_mut(),
|
||||
},
|
||||
)?;
|
||||
let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
||||
network: network.clone(),
|
||||
client: client.clone(),
|
||||
keystore: keystore_container.sync_keystore(),
|
||||
task_manager: &mut task_manager,
|
||||
transaction_pool: transaction_pool.clone(),
|
||||
rpc_extensions_builder,
|
||||
on_demand: None,
|
||||
remote_blockchain: None,
|
||||
backend,
|
||||
system_rpc_tx,
|
||||
config,
|
||||
telemetry: telemetry.as_mut(),
|
||||
})?;
|
||||
|
||||
if role.is_authority() {
|
||||
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
|
||||
@@ -257,11 +269,8 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
|
||||
// if the node isn't actively participating in consensus then it doesn't
|
||||
// need a keystore, regardless of which protocol we use below.
|
||||
let keystore = if role.is_authority() {
|
||||
Some(keystore_container.sync_keystore())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let keystore =
|
||||
if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None };
|
||||
|
||||
let grandpa_config = sc_finality_grandpa::Config {
|
||||
// FIXME #1578 make this available through chainspec
|
||||
@@ -295,7 +304,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
// if it fails we take down the service with it.
|
||||
task_manager.spawn_essential_handle().spawn_blocking(
|
||||
"grandpa-voter",
|
||||
sc_finality_grandpa::run_grandpa_voter(grandpa_config)?
|
||||
sc_finality_grandpa::run_grandpa_voter(grandpa_config)?,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -305,7 +314,9 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
|
||||
/// Builds a new service for a light client.
|
||||
pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||
let telemetry = config.telemetry_endpoints.clone()
|
||||
let telemetry = config
|
||||
.telemetry_endpoints
|
||||
.clone()
|
||||
.filter(|x| !x.is_empty())
|
||||
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
|
||||
let worker = TelemetryWorker::new(16)?;
|
||||
@@ -320,11 +331,10 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
|
||||
)?;
|
||||
|
||||
let mut telemetry = telemetry
|
||||
.map(|(worker, telemetry)| {
|
||||
task_manager.spawn_handle().spawn("telemetry", worker.run());
|
||||
telemetry
|
||||
});
|
||||
let mut telemetry = telemetry.map(|(worker, telemetry)| {
|
||||
task_manager.spawn_handle().spawn("telemetry", worker.run());
|
||||
telemetry
|
||||
});
|
||||
|
||||
config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
|
||||
|
||||
@@ -347,8 +357,8 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
|
||||
let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
|
||||
|
||||
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(
|
||||
ImportQueueParams {
|
||||
let import_queue =
|
||||
sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
|
||||
block_import: grandpa_block_import.clone(),
|
||||
justification_import: Some(Box::new(grandpa_block_import.clone())),
|
||||
client: client.clone(),
|
||||
@@ -368,8 +378,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
registry: config.prometheus_registry(),
|
||||
check_for_equivocation: Default::default(),
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
},
|
||||
)?;
|
||||
})?;
|
||||
|
||||
let (network, system_rpc_tx, network_starter) =
|
||||
sc_service::build_network(sc_service::BuildNetworkParams {
|
||||
@@ -384,7 +393,10 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
|
||||
if config.offchain_worker.enabled {
|
||||
sc_service::build_offchain_workers(
|
||||
&config, task_manager.spawn_handle(), client.clone(), network.clone(),
|
||||
&config,
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
network.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user