mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-01 04:17:57 +00:00
Bump Substrate (#871)
* Bump Substrate * Change usage of "Module" to "Pallet" Related Substrate PR: https://github.com/paritytech/substrate/pull/8372 * Add `OnSetCode` config param Related Substrate PR: https://github.com/paritytech/substrate/pull/8496 * Update Aura Slot duration time type Related Substrate PR: https://github.com/paritytech/substrate/pull/8386 * Add `OnSetCode` to mock runtimes * Add support for multiple justifications Related Substrate PR: https://github.com/paritytech/substrate/pull/7640 * Use updated justification type in more places * Make GenesisConfig type non-optional Related Substrate PR: https://github.com/paritytech/substrate/pull/8275 * Update service to use updated telemetry Related Substrate PR: https://github.com/paritytech/substrate/pull/8143 * Appease Clippy
This commit is contained in:
committed by
Bastian Köcher
parent
d9c553374c
commit
c6ae74725b
@@ -122,7 +122,7 @@ impl Alternative {
|
||||
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
|
||||
get_account_id_from_seed::<sr25519::Public>("George//stash"),
|
||||
get_account_id_from_seed::<sr25519::Public>("Harry//stash"),
|
||||
pallet_bridge_messages::Module::<
|
||||
pallet_bridge_messages::Pallet::<
|
||||
rialto_runtime::Runtime,
|
||||
pallet_bridge_messages::DefaultInstance,
|
||||
>::relayer_fund_account_id(),
|
||||
@@ -154,28 +154,28 @@ fn testnet_genesis(
|
||||
_enable_println: bool,
|
||||
) -> GenesisConfig {
|
||||
GenesisConfig {
|
||||
frame_system: Some(SystemConfig {
|
||||
frame_system: SystemConfig {
|
||||
code: WASM_BINARY.to_vec(),
|
||||
changes_trie_config: Default::default(),
|
||||
}),
|
||||
pallet_balances: Some(BalancesConfig {
|
||||
},
|
||||
pallet_balances: BalancesConfig {
|
||||
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(),
|
||||
}),
|
||||
pallet_aura: Some(AuraConfig {
|
||||
},
|
||||
pallet_aura: AuraConfig {
|
||||
authorities: Vec::new(),
|
||||
}),
|
||||
pallet_bridge_eth_poa_Instance1: Some(load_rialto_poa_bridge_config()),
|
||||
pallet_bridge_eth_poa_Instance2: Some(load_kovan_bridge_config()),
|
||||
pallet_grandpa: Some(GrandpaConfig {
|
||||
},
|
||||
pallet_bridge_eth_poa_Instance1: load_rialto_poa_bridge_config(),
|
||||
pallet_bridge_eth_poa_Instance2: load_kovan_bridge_config(),
|
||||
pallet_grandpa: GrandpaConfig {
|
||||
authorities: Vec::new(),
|
||||
}),
|
||||
pallet_sudo: Some(SudoConfig { key: root_key }),
|
||||
pallet_session: Some(SessionConfig {
|
||||
},
|
||||
pallet_sudo: SudoConfig { key: root_key },
|
||||
pallet_session: SessionConfig {
|
||||
keys: initial_authorities
|
||||
.iter()
|
||||
.map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone())))
|
||||
.collect::<Vec<_>>(),
|
||||
}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,11 +30,13 @@
|
||||
|
||||
use rialto_runtime::{self, opaque::Block, RuntimeApi};
|
||||
use sc_client_api::{ExecutorProvider, RemoteBackend};
|
||||
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
|
||||
use sc_executor::native_executor_instance;
|
||||
pub use sc_executor::NativeExecutor;
|
||||
use sc_finality_grandpa::SharedVoterState;
|
||||
use sc_keystore::LocalKeystore;
|
||||
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
|
||||
use sc_telemetry::{Telemetry, TelemetryWorker};
|
||||
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
|
||||
use sp_inherents::InherentDataProviders;
|
||||
use std::sync::Arc;
|
||||
@@ -70,6 +72,7 @@ pub fn new_partial(
|
||||
AuraPair,
|
||||
>,
|
||||
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
|
||||
Option<Telemetry>,
|
||||
),
|
||||
>,
|
||||
ServiceError,
|
||||
@@ -77,12 +80,30 @@ pub fn new_partial(
|
||||
if config.keystore_remote.is_some() {
|
||||
return Err(ServiceError::Other("Remote Keystores are not supported.".to_string()));
|
||||
}
|
||||
let inherent_data_providers = sp_inherents::InherentDataProviders::new();
|
||||
let inherent_data_providers = InherentDataProviders::new();
|
||||
|
||||
let (client, backend, keystore_container, task_manager) =
|
||||
sc_service::new_full_parts::<Block, RuntimeApi, Executor>(&config)?;
|
||||
let telemetry = config
|
||||
.telemetry_endpoints
|
||||
.clone()
|
||||
.filter(|x| !x.is_empty())
|
||||
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
|
||||
let worker = TelemetryWorker::new(16)?;
|
||||
let telemetry = worker.handle().new_telemetry(endpoints);
|
||||
Ok((worker, telemetry))
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
|
||||
&config,
|
||||
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
|
||||
)?;
|
||||
let client = Arc::new(client);
|
||||
|
||||
let telemetry = telemetry.map(|(worker, telemetry)| {
|
||||
task_manager.spawn_handle().spawn("telemetry", worker.run());
|
||||
telemetry
|
||||
});
|
||||
|
||||
let select_chain = sc_consensus::LongestChain::new(backend.clone());
|
||||
|
||||
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
|
||||
@@ -93,22 +114,28 @@ pub fn new_partial(
|
||||
client.clone(),
|
||||
);
|
||||
|
||||
let (grandpa_block_import, grandpa_link) =
|
||||
sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone())?;
|
||||
let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
|
||||
client.clone(),
|
||||
&(client.clone() as Arc<_>),
|
||||
select_chain.clone(),
|
||||
telemetry.as_ref().map(|x| x.handle()),
|
||||
)?;
|
||||
|
||||
let aura_block_import =
|
||||
sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone());
|
||||
|
||||
let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>(
|
||||
sc_consensus_aura::slot_duration(&*client)?,
|
||||
aura_block_import.clone(),
|
||||
Some(Box::new(grandpa_block_import)),
|
||||
client.clone(),
|
||||
inherent_data_providers.clone(),
|
||||
&task_manager.spawn_essential_handle(),
|
||||
config.prometheus_registry(),
|
||||
sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
|
||||
)?;
|
||||
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
|
||||
block_import: aura_block_import.clone(),
|
||||
justification_import: Some(Box::new(grandpa_block_import)),
|
||||
client: client.clone(),
|
||||
inherent_data_providers: inherent_data_providers.clone(),
|
||||
spawner: &task_manager.spawn_essential_handle(),
|
||||
can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
|
||||
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
|
||||
registry: config.prometheus_registry(),
|
||||
check_for_equivocation: Default::default(),
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
})?;
|
||||
|
||||
Ok(sc_service::PartialComponents {
|
||||
client,
|
||||
@@ -119,7 +146,7 @@ pub fn new_partial(
|
||||
select_chain,
|
||||
transaction_pool,
|
||||
inherent_data_providers,
|
||||
other: (aura_block_import, grandpa_link),
|
||||
other: (aura_block_import, grandpa_link, telemetry),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -141,7 +168,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
select_chain,
|
||||
transaction_pool,
|
||||
inherent_data_providers,
|
||||
other: (block_import, grandpa_link),
|
||||
other: (block_import, grandpa_link, mut telemetry),
|
||||
} = new_partial(&config)?;
|
||||
|
||||
if let Some(url) = &config.keystore_remote {
|
||||
@@ -173,13 +200,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
})?;
|
||||
|
||||
if config.offchain_worker.enabled {
|
||||
sc_service::build_offchain_workers(
|
||||
&config,
|
||||
backend.clone(),
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
network.clone(),
|
||||
);
|
||||
sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
|
||||
}
|
||||
|
||||
let role = config.role.clone();
|
||||
@@ -266,7 +287,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
})
|
||||
};
|
||||
|
||||
let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
||||
let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
||||
network: network.clone(),
|
||||
client: client.clone(),
|
||||
keystore: keystore_container.sync_keystore(),
|
||||
@@ -279,32 +300,35 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
network_status_sinks,
|
||||
system_rpc_tx,
|
||||
config,
|
||||
telemetry_span: None,
|
||||
telemetry: telemetry.as_mut(),
|
||||
})?;
|
||||
|
||||
if role.is_authority() {
|
||||
let proposer = sc_basic_authorship::ProposerFactory::new(
|
||||
let proposer_factory = sc_basic_authorship::ProposerFactory::new(
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
transaction_pool,
|
||||
prometheus_registry.as_ref(),
|
||||
telemetry.as_ref().map(|x| x.handle()),
|
||||
);
|
||||
|
||||
let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
|
||||
|
||||
let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _, _>(
|
||||
sc_consensus_aura::slot_duration(&*client)?,
|
||||
client.clone(),
|
||||
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _>(StartAuraParams {
|
||||
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
|
||||
client: client.clone(),
|
||||
select_chain,
|
||||
block_import,
|
||||
proposer,
|
||||
network.clone(),
|
||||
proposer_factory,
|
||||
inherent_data_providers,
|
||||
force_authoring,
|
||||
backoff_authoring_blocks,
|
||||
keystore_container.sync_keystore(),
|
||||
keystore: keystore_container.sync_keystore(),
|
||||
can_author_with,
|
||||
)?;
|
||||
sync_oracle: network.clone(),
|
||||
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
})?;
|
||||
|
||||
// the AURA authoring task is considered essential, i.e. if it
|
||||
// fails we take down the service with it.
|
||||
@@ -327,6 +351,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
observer_enabled: false,
|
||||
keystore,
|
||||
is_authority: role.is_authority(),
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
};
|
||||
|
||||
if enable_grandpa {
|
||||
@@ -340,10 +365,10 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
config: grandpa_config,
|
||||
link: grandpa_link,
|
||||
network,
|
||||
telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()),
|
||||
voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(),
|
||||
prometheus_registry,
|
||||
shared_voter_state: SharedVoterState::empty(),
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
};
|
||||
|
||||
// the GRANDPA voter task is considered infallible, i.e.
|
||||
@@ -359,8 +384,27 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
|
||||
/// Builds a new service for a light client.
|
||||
pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||
let telemetry = config
|
||||
.telemetry_endpoints
|
||||
.clone()
|
||||
.filter(|x| !x.is_empty())
|
||||
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
|
||||
let worker = TelemetryWorker::new(16)?;
|
||||
let telemetry = worker.handle().new_telemetry(endpoints);
|
||||
Ok((worker, telemetry))
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
let (client, backend, keystore_container, mut task_manager, on_demand) =
|
||||
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(&config)?;
|
||||
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(
|
||||
&config,
|
||||
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
|
||||
)?;
|
||||
|
||||
let mut telemetry = telemetry.map(|(worker, telemetry)| {
|
||||
task_manager.spawn_handle().spawn("telemetry", worker.run());
|
||||
telemetry
|
||||
});
|
||||
|
||||
config
|
||||
.network
|
||||
@@ -377,22 +421,28 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
on_demand.clone(),
|
||||
));
|
||||
|
||||
let (grandpa_block_import, _) =
|
||||
sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?;
|
||||
let (grandpa_block_import, _) = sc_finality_grandpa::block_import(
|
||||
client.clone(),
|
||||
&(client.clone() as Arc<_>),
|
||||
select_chain,
|
||||
telemetry.as_ref().map(|x| x.handle()),
|
||||
)?;
|
||||
|
||||
let aura_block_import =
|
||||
sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone());
|
||||
|
||||
let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>(
|
||||
sc_consensus_aura::slot_duration(&*client)?,
|
||||
aura_block_import,
|
||||
Some(Box::new(grandpa_block_import)),
|
||||
client.clone(),
|
||||
InherentDataProviders::new(),
|
||||
&task_manager.spawn_essential_handle(),
|
||||
config.prometheus_registry(),
|
||||
sp_consensus::NeverCanAuthor,
|
||||
)?;
|
||||
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
|
||||
block_import: aura_block_import,
|
||||
justification_import: Some(Box::new(grandpa_block_import)),
|
||||
client: client.clone(),
|
||||
inherent_data_providers: InherentDataProviders::new(),
|
||||
spawner: &task_manager.spawn_essential_handle(),
|
||||
can_author_with: sp_consensus::NeverCanAuthor,
|
||||
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
|
||||
registry: config.prometheus_registry(),
|
||||
check_for_equivocation: Default::default(),
|
||||
telemetry: telemetry.as_ref().map(|x| x.handle()),
|
||||
})?;
|
||||
|
||||
let (network, network_status_sinks, system_rpc_tx, network_starter) =
|
||||
sc_service::build_network(sc_service::BuildNetworkParams {
|
||||
@@ -406,13 +456,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
})?;
|
||||
|
||||
if config.offchain_worker.enabled {
|
||||
sc_service::build_offchain_workers(
|
||||
&config,
|
||||
backend.clone(),
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
network.clone(),
|
||||
);
|
||||
sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
|
||||
}
|
||||
|
||||
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
||||
@@ -428,7 +472,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
|
||||
network,
|
||||
network_status_sinks,
|
||||
system_rpc_tx,
|
||||
telemetry_span: None,
|
||||
telemetry: telemetry.as_mut(),
|
||||
})?;
|
||||
|
||||
network_starter.start_network();
|
||||
|
||||
Reference in New Issue
Block a user