Service factory refactor (#3382)

* Move Service::new to a macro

* Move function calls to macros

* Extract offchain_workers and start_rpc in separate function

In follow-up commits, we want to be able to directly call maintain_transaction_pool, offchain_workers, and start_rpc, without having to implement the Components trait.
This commit is a preliminary step: we extract the code to freestanding functions.

* Introduce an AbstractService trait

* Introduce NewService as an implementation detail of Service

* Implement traits on NewService instead

Instead of implementing AbstractService, Future, and Executor on Service, we implement them on NewService instead.

The implementations of AbstractService, Future, and Executor on Service still exist, but they just wrap to the respective implementations for NewService.

* Move components creation back to macro invocation

Instead of having multiple $build_ parameters passed to the macro, let's group them all into one.

This change is necessary for the follow-up commits, because we are going to call new_impl! only after all the components have already been built.

* Add a $block parameter to new_impl

This makes it possible to be explicit as what the generic parameter of the NewServiceis, without relying on type inference.

* Introduce the ServiceBuilder struct

Introduces a new builder-like ServiceBuilder struct that creates a NewService.

* Macro-ify import_blocks, export_blocks and revert_chain

Similar to the introduction of new_impl!, we extract the actual code into a macro, letting us get rid of the Components and Factory traits

* Add export_blocks, import_blocks and revert_chain methods on ServiceBuilder

Can be used as a replacement for the chain_ops::* methods

* Add run_with_builder

Instead of just run, adds run_with_builder to ParseAndPrepareExport/Import/Revert. This lets you run these operations with a ServiceBuilder instead of a ServiceFactory.

* Transition node and node-template to ServiceBuilder

* Transition transaction-factory to the new service factory

This is technically a breaking change, but the transaction-factory crate is only ever used from within substrate-node, which this commit updates as well.

* Remove old service factory

* Adjust the AbstractService trait to be more usable

We slightly change the trait bounds in order to make all the methods usable.

* Make substrate-service-test compile

* Fix the node-cli tests

* Remove the old API

* Remove the components module

* Fix indentation on chain_ops

* Line widths

* Fix bad line widths commit

* Line widths again 🤦

* Fix the sync test

* Apply suggestions from code review

Co-Authored-By: Gavin Wood <i@gavwood.com>

* Address some concerns

* Remove TelemetryOnConnect

* Remove informant::start

* Update jsonrpc

* Rename factory to builder

* Line widths 😩
This commit is contained in:
Pierre Krieger
2019-08-27 11:18:41 +02:00
committed by Bastian Köcher
parent 144bd228af
commit 5b8ebf7baf
26 changed files with 1960 additions and 2263 deletions
+1 -1
View File
@@ -11,7 +11,7 @@ log = "0.4"
tokio = "0.1.7"
futures = "0.1"
exit-future = "0.1"
jsonrpc-core = "13.0.0"
jsonrpc-core = "13.1.0"
cli = { package = "substrate-cli", path = "../../core/cli" }
codec = { package = "parity-scale-codec", version = "1.0.0" }
sr-io = { path = "../../core/sr-io" }
+6 -2
View File
@@ -350,8 +350,8 @@ pub fn local_testnet_config() -> ChainSpec {
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::service::{new_full, new_light};
use service_test;
use crate::service::Factory;
fn local_testnet_genesis_instant_single() -> GenesisConfig {
testnet_genesis(
@@ -395,6 +395,10 @@ pub(crate) mod tests {
#[test]
#[ignore]
fn test_connectivity() {
service_test::connectivity::<Factory>(integration_test_config_with_two_authorities());
service_test::connectivity(
integration_test_config_with_two_authorities(),
|config| new_full(config),
|config| new_light(config),
);
}
}
+23 -16
View File
@@ -21,14 +21,14 @@
pub use cli::error;
pub mod chain_spec;
#[macro_use]
mod service;
mod factory_impl;
use tokio::prelude::Future;
use tokio::runtime::{Builder as RuntimeBuilder, Runtime};
pub use cli::{VersionInfo, IntoExit, NoCustom, SharedParams, ExecutionStrategyParam};
use substrate_service::{ServiceFactory, Roles as ServiceRoles};
use std::ops::Deref;
use substrate_service::{AbstractService, Roles as ServiceRoles};
use log::info;
use structopt::{StructOpt, clap::App};
use cli::{AugmentClap, GetLogFilter, parse_and_prepare, ParseAndPrepare};
@@ -159,7 +159,8 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul
E: IntoExit,
{
match parse_and_prepare::<CustomSubcommands, NoCustom, _>(&version, "substrate-node", args) {
ParseAndPrepare::Run(cmd) => cmd.run(load_spec, exit, |exit, _cli_args, _custom_args, config| {
ParseAndPrepare::Run(cmd) => cmd.run::<(), _, _, _, _>(load_spec, exit,
|exit, _cli_args, _custom_args, config| {
info!("{}", version.name);
info!(" version {}", config.full_version());
info!(" by Parity Technologies, 2017-2019");
@@ -171,23 +172,26 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul
match config.roles {
ServiceRoles::LIGHT => run_until_exit(
runtime,
service::Factory::new_light(config).map_err(|e| format!("{:?}", e))?,
service::new_light(config).map_err(|e| format!("{:?}", e))?,
exit
),
_ => run_until_exit(
runtime,
service::Factory::new_full(config).map_err(|e| format!("{:?}", e))?,
service::new_full(config).map_err(|e| format!("{:?}", e))?,
exit
),
}.map_err(|e| format!("{:?}", e))
}),
ParseAndPrepare::BuildSpec(cmd) => cmd.run(load_spec),
ParseAndPrepare::ExportBlocks(cmd) => cmd.run::<service::Factory, _, _>(load_spec, exit),
ParseAndPrepare::ImportBlocks(cmd) => cmd.run::<service::Factory, _, _>(load_spec, exit),
ParseAndPrepare::ExportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config|
Ok(new_full_start!(config).0), load_spec, exit),
ParseAndPrepare::ImportBlocks(cmd) => cmd.run_with_builder::<(), _, _, _, _, _>(|config|
Ok(new_full_start!(config).0), load_spec, exit),
ParseAndPrepare::PurgeChain(cmd) => cmd.run(load_spec),
ParseAndPrepare::RevertChain(cmd) => cmd.run::<service::Factory, _>(load_spec),
ParseAndPrepare::RevertChain(cmd) => cmd.run_with_builder::<(), _, _, _, _>(|config|
Ok(new_full_start!(config).0), load_spec),
ParseAndPrepare::CustomCommand(CustomSubcommands::Factory(cli_args)) => {
let mut config = cli::create_config_with_db_path(
let mut config = cli::create_config_with_db_path::<(), _, _>(
load_spec,
&cli_args.shared_params,
&version,
@@ -209,9 +213,13 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul
cli_args.num,
cli_args.rounds,
);
transaction_factory::factory::<service::Factory, FactoryState<_>>(
let service_builder = new_full_start!(config).0;
transaction_factory::factory::<FactoryState<_>, _, _, _, _, _>(
factory_state,
config,
service_builder.client(),
service_builder.select_chain()
.expect("The select_chain is always initialized by new_full_start!; QED")
).map_err(|e| format!("Error in transaction factory: {}", e))?;
Ok(())
@@ -219,14 +227,13 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul
}
}
fn run_until_exit<T, C, E>(
fn run_until_exit<T, E>(
mut runtime: Runtime,
service: T,
e: E,
) -> error::Result<()> where
T: Deref<Target=substrate_service::Service<C>>,
T: Future<Item = (), Error = substrate_service::error::Error> + Send + 'static,
C: substrate_service::Components,
) -> error::Result<()>
where
T: AbstractService,
E: IntoExit,
{
let (exit_send, exit) = exit_future::signal();
+309 -343
View File
@@ -16,271 +16,240 @@
#![warn(unused_extern_crates)]
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
//! Service implementation. Specialized wrapper over substrate service.
use std::sync::Arc;
use std::time::Duration;
use babe::{import_queue, start_babe, BabeImportQueue, Config};
use babe::{import_queue, Config};
use client::{self, LongestChain};
use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider};
use node_executor;
use futures::prelude::*;
use node_primitives::Block;
use node_runtime::{GenesisConfig, RuntimeApi};
use substrate_service::{
FactoryFullConfiguration, LightComponents, FullComponents, FullBackend,
FullClient, LightClient, LightBackend, FullExecutor, LightExecutor,
error::{Error as ServiceError},
AbstractService, ServiceBuilder, config::Configuration, error::{Error as ServiceError},
};
use transaction_pool::{self, txpool::{Pool as TransactionPool}};
use inherents::InherentDataProviders;
use network::construct_simple_protocol;
use substrate_service::construct_service_factory;
use substrate_service::TelemetryOnConnect;
construct_simple_protocol! {
/// Demo protocol attachment for substrate.
pub struct NodeProtocol where Block = Block { }
}
type BabeBlockImportForService<F> = babe::BabeBlockImport<
FullBackend<F>,
FullExecutor<F>,
<F as crate::ServiceFactory>::Block,
grandpa::BlockImportForService<F>,
<F as crate::ServiceFactory>::RuntimeApi,
client::Client<
FullBackend<F>,
FullExecutor<F>,
<F as crate::ServiceFactory>::Block,
<F as crate::ServiceFactory>::RuntimeApi
>,
>;
/// Starts a `ServiceBuilder` for a full service.
///
/// Use this macro if you don't actually need the full service, but just the builder in order to
/// be able to perform chain operations.
macro_rules! new_full_start {
($config:expr) => {{
let mut import_setup = None;
let inherent_data_providers = inherents::InherentDataProviders::new();
let mut tasks_to_spawn = None;
/// Node specific configuration
pub struct NodeConfig<F: substrate_service::ServiceFactory> {
/// GRANDPA and BABE connection to import block.
// FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state
pub import_setup: Option<(
BabeBlockImportForService<F>,
grandpa::LinkHalfForService<F>,
babe::BabeLink,
)>,
/// Tasks that were created by previous setup steps and should be spawned.
pub tasks_to_spawn: Option<Vec<Box<dyn Future<Item = (), Error = ()> + Send>>>,
inherent_data_providers: InherentDataProviders,
}
impl<F> Default for NodeConfig<F> where F: substrate_service::ServiceFactory {
fn default() -> NodeConfig<F> {
NodeConfig {
import_setup: None,
inherent_data_providers: InherentDataProviders::new(),
tasks_to_spawn: None,
}
}
}
construct_service_factory! {
struct Factory {
Block = Block,
RuntimeApi = RuntimeApi,
NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) },
RuntimeDispatch = node_executor::Executor,
FullTransactionPoolApi =
transaction_pool::ChainApi<
client::Client<FullBackend<Self>, FullExecutor<Self>, Block, RuntimeApi>,
Block
> {
|config, client|
Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client)))
},
LightTransactionPoolApi =
transaction_pool::ChainApi<
client::Client<LightBackend<Self>, LightExecutor<Self>, Block, RuntimeApi>,
Block
> {
|config, client|
Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client)))
},
Genesis = GenesisConfig,
Configuration = NodeConfig<Self>,
FullService = FullComponents<Self> {
|config: FactoryFullConfiguration<Self>| FullComponents::<Factory>::new(config)
},
AuthoritySetup = {
|mut service: Self::FullService| {
let (block_import, link_half, babe_link) =
service.config_mut().custom.import_setup.take()
.expect("Link Half and Block Import are present for Full Services or setup failed before. qed");
// spawn any futures that were created in the previous setup steps
if let Some(tasks) = service.config_mut().custom.tasks_to_spawn.take() {
for task in tasks {
service.spawn_task(
task.select(service.on_exit())
.map(|_| ())
.map_err(|_| ())
);
}
}
if service.config().roles.is_authority() {
let proposer = substrate_basic_authorship::ProposerFactory {
client: service.client(),
transaction_pool: service.transaction_pool(),
};
let client = service.client();
let select_chain = service.select_chain()
.ok_or(ServiceError::SelectChainRequired)?;
let babe_config = babe::BabeParams {
config: Config::get_or_compute(&*client)?,
keystore: service.keystore(),
client,
select_chain,
block_import,
env: proposer,
sync_oracle: service.network(),
inherent_data_providers: service.config()
.custom.inherent_data_providers.clone(),
force_authoring: service.config().force_authoring,
time_source: babe_link,
};
let babe = start_babe(babe_config)?;
let select = babe.select(service.on_exit()).then(|_| Ok(()));
// the BABE authoring task is considered infallible, i.e. if it
// fails we take down the service with it.
service.spawn_essential_task(select);
}
let config = grandpa::Config {
// FIXME #1578 make this available through chainspec
gossip_duration: Duration::from_millis(333),
justification_period: 4096,
name: Some(service.config().name.clone()),
keystore: Some(service.keystore()),
};
match (service.config().roles.is_authority(), service.config().disable_grandpa) {
(false, false) => {
// start the lightweight GRANDPA observer
service.spawn_task(Box::new(grandpa::run_grandpa_observer(
config,
link_half,
service.network(),
service.on_exit(),
)?));
},
(true, false) => {
// start the full GRANDPA voter
let telemetry_on_connect = TelemetryOnConnect {
telemetry_connection_sinks: service.telemetry_on_connect_stream(),
};
let grandpa_config = grandpa::GrandpaParams {
config: config,
link: link_half,
network: service.network(),
inherent_data_providers:
service.config().custom.inherent_data_providers.clone(),
on_exit: service.on_exit(),
telemetry_on_connect: Some(telemetry_on_connect),
};
// the GRANDPA voter task is considered infallible, i.e.
// if it fails we take down the service with it.
service.spawn_essential_task(grandpa::run_grandpa_voter(grandpa_config)?);
},
(_, true) => {
grandpa::setup_disabled_grandpa(
service.client(),
&service.config().custom.inherent_data_providers,
service.network(),
)?;
},
}
Ok(service)
}
},
LightService = LightComponents<Self>
{ |config| <LightComponents<Factory>>::new(config) },
FullImportQueue = BabeImportQueue<Self::Block>
{
|
config: &mut FactoryFullConfiguration<Self>,
client: Arc<FullClient<Self>>,
select_chain: Self::SelectChain,
transaction_pool: Option<Arc<TransactionPool<Self::FullTransactionPoolApi>>>,
|
{
let builder = substrate_service::ServiceBuilder::new_full::<
node_primitives::Block, node_runtime::RuntimeApi, node_executor::Executor
>($config)?
.with_select_chain(|_config, client| {
#[allow(deprecated)]
Ok(client::LongestChain::new(client.backend().clone()))
})?
.with_transaction_pool(|config, client|
Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client)))
)?
.with_import_queue(|_config, client, mut select_chain, transaction_pool| {
let select_chain = select_chain.take()
.ok_or_else(|| substrate_service::Error::SelectChainRequired)?;
let (block_import, link_half) =
grandpa::block_import::<_, _, _, RuntimeApi, FullClient<Self>, _>(
grandpa::block_import::<_, _, _, node_runtime::RuntimeApi, _, _>(
client.clone(), client.clone(), select_chain
)?;
let justification_import = block_import.clone();
let (import_queue, babe_link, babe_block_import, pruning_task) = import_queue(
Config::get_or_compute(&*client)?,
let (import_queue, babe_link, babe_block_import, pruning_task) = babe::import_queue(
babe::Config::get_or_compute(&*client)?,
block_import,
Some(Box::new(justification_import)),
None,
client.clone(),
client,
config.custom.inherent_data_providers.clone(),
transaction_pool,
inherent_data_providers.clone(),
Some(transaction_pool)
)?;
config.custom.import_setup = Some((babe_block_import.clone(), link_half, babe_link));
config.custom.tasks_to_spawn = Some(vec![Box::new(pruning_task)]);
import_setup = Some((babe_block_import.clone(), link_half, babe_link));
tasks_to_spawn = Some(vec![Box::new(pruning_task)]);
Ok(import_queue)
}},
LightImportQueue = BabeImportQueue<Self::Block>
{ |config: &FactoryFullConfiguration<Self>, client: Arc<LightClient<Self>>| {
#[allow(deprecated)]
let fetch_checker = client.backend().blockchain().fetcher()
.upgrade()
.map(|fetcher| fetcher.checker().clone())
.ok_or_else(|| "Trying to start light import queue without active fetch checker")?;
let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, LightClient<Self>>(
client.clone(), Arc::new(fetch_checker), client.clone()
)?;
})?
.with_rpc_extensions(|client, pool| {
use node_rpc::accounts::{Accounts, AccountsApi};
let finality_proof_import = block_import.clone();
let finality_proof_request_builder =
finality_proof_import.create_finality_proof_request_builder();
let mut io = jsonrpc_core::IoHandler::<substrate_service::RpcMetadata>::default();
io.extend_with(
AccountsApi::to_delegate(Accounts::new(client, pool))
);
io
})?;
(builder, import_setup, inherent_data_providers, tasks_to_spawn)
}}
}
// FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`.
let (import_queue, ..) = import_queue::<_, _, _, _, _, _, TransactionPool<Self::FullTransactionPoolApi>>(
Config::get_or_compute(&*client)?,
block_import,
None,
Some(Box::new(finality_proof_import)),
client.clone(),
client,
config.custom.inherent_data_providers.clone(),
None,
)?;
/// Creates a full service from the configuration.
///
/// We need to use a macro because the test suit doesn't work with an opaque service. It expects
/// concrete types instead.
macro_rules! new_full {
($config:expr) => {{
use futures::Future;
Ok((import_queue, finality_proof_request_builder))
}},
SelectChain = LongestChain<FullBackend<Self>, Self::Block>
{ |config: &FactoryFullConfiguration<Self>, client: Arc<FullClient<Self>>| {
#[allow(deprecated)]
Ok(LongestChain::new(client.backend().clone()))
let (builder, mut import_setup, inherent_data_providers, mut tasks_to_spawn) = new_full_start!($config);
let service = builder.with_network_protocol(|_| Ok(crate::service::NodeProtocol::new()))?
.with_finality_proof_provider(|client|
Ok(Arc::new(grandpa::FinalityProofProvider::new(client.clone(), client)) as _)
)?
.build()?;
let (block_import, link_half, babe_link) = import_setup.take()
.expect("Link Half and Block Import are present for Full Services or setup failed before. qed");
// spawn any futures that were created in the previous setup steps
if let Some(tasks) = tasks_to_spawn.take() {
for task in tasks {
service.spawn_task(
task.select(service.on_exit())
.map(|_| ())
.map_err(|_| ())
);
}
},
FinalityProofProvider = { |client: Arc<FullClient<Self>>| {
Ok(Some(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _))
}},
}
RpcExtensions = jsonrpc_core::IoHandler<substrate_rpc::Metadata>
{ |client, pool| {
if service.config().roles.is_authority() {
let proposer = substrate_basic_authorship::ProposerFactory {
client: service.client(),
transaction_pool: service.transaction_pool(),
};
let client = service.client();
let select_chain = service.select_chain()
.ok_or(substrate_service::Error::SelectChainRequired)?;
let babe_config = babe::BabeParams {
config: babe::Config::get_or_compute(&*client)?,
keystore: service.keystore(),
client,
select_chain,
block_import,
env: proposer,
sync_oracle: service.network(),
inherent_data_providers: inherent_data_providers.clone(),
force_authoring: service.config().force_authoring,
time_source: babe_link,
};
let babe = babe::start_babe(babe_config)?;
let select = babe.select(service.on_exit()).then(|_| Ok(()));
service.spawn_task(Box::new(select));
}
let config = grandpa::Config {
// FIXME #1578 make this available through chainspec
gossip_duration: std::time::Duration::from_millis(333),
justification_period: 4096,
name: Some(service.config().name.clone()),
keystore: Some(service.keystore()),
};
match (service.config().roles.is_authority(), service.config().disable_grandpa) {
(false, false) => {
// start the lightweight GRANDPA observer
service.spawn_task(Box::new(grandpa::run_grandpa_observer(
config,
link_half,
service.network(),
service.on_exit(),
)?));
},
(true, false) => {
// start the full GRANDPA voter
let grandpa_config = grandpa::GrandpaParams {
config: config,
link: link_half,
network: service.network(),
inherent_data_providers: inherent_data_providers.clone(),
on_exit: service.on_exit(),
telemetry_on_connect: Some(service.telemetry_on_connect_stream()),
};
service.spawn_task(Box::new(grandpa::run_grandpa_voter(grandpa_config)?));
},
(_, true) => {
grandpa::setup_disabled_grandpa(
service.client(),
&inherent_data_providers,
service.network(),
)?;
},
}
Ok((service, inherent_data_providers))
}}
}
/// Builds a new service for a full client.
pub fn new_full<C: Send + Default + 'static>(config: Configuration<C, GenesisConfig>)
-> Result<impl AbstractService, ServiceError> {
new_full!(config).map(|(service, _)| service)
}
/// Builds a new service for a light client.
pub fn new_light<C: Send + Default + 'static>(config: Configuration<C, GenesisConfig>)
-> Result<impl AbstractService, ServiceError> {
let inherent_data_providers = InherentDataProviders::new();
ServiceBuilder::new_light::<Block, RuntimeApi, node_executor::Executor>(config)?
.with_select_chain(|_config, client| {
#[allow(deprecated)]
Ok(LongestChain::new(client.backend().clone()))
})?
.with_transaction_pool(|config, client|
Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client)))
)?
.with_import_queue_and_fprb(|_config, client, _select_chain, transaction_pool| {
#[allow(deprecated)]
let fetch_checker = client.backend().blockchain().fetcher()
.upgrade()
.map(|fetcher| fetcher.checker().clone())
.ok_or_else(|| "Trying to start light import queue without active fetch checker")?;
let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>(
client.clone(), Arc::new(fetch_checker), client.clone()
)?;
let finality_proof_import = block_import.clone();
let finality_proof_request_builder =
finality_proof_import.create_finality_proof_request_builder();
// FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`.
let (import_queue, ..) = import_queue(
Config::get_or_compute(&*client)?,
block_import,
None,
Some(Box::new(finality_proof_import)),
client.clone(),
client,
inherent_data_providers.clone(),
Some(transaction_pool)
)?;
Ok((import_queue, finality_proof_request_builder))
})?
.with_network_protocol(|_| Ok(NodeProtocol::new()))?
.with_finality_proof_provider(|client|
Ok(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)
)?
.with_rpc_extensions(|client, pool| {
use node_rpc::accounts::{Accounts, AccountsApi};
let mut io = jsonrpc_core::IoHandler::default();
@@ -288,11 +257,10 @@ construct_service_factory! {
AccountsApi::to_delegate(Accounts::new(client, pool))
);
io
}},
}
})?
.build()
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
@@ -312,9 +280,8 @@ mod tests {
use timestamp;
use finality_tracker;
use keyring::AccountKeyring;
use substrate_service::ServiceFactory;
use service_test::SyncService;
use crate::service::Factory;
use substrate_service::AbstractService;
use crate::service::{new_full, new_light};
#[cfg(feature = "rhd")]
fn test_sync() {
@@ -369,8 +336,10 @@ mod tests {
let v: Vec<u8> = Decode::decode(&mut xt.as_slice()).unwrap();
OpaqueExtrinsic(v)
};
service_test::sync::<Factory, _, _>(
service_test::sync(
chain_spec::integration_test_config(),
|config| new_full(config),
|config| new_light(config),
block_factory,
extrinsic_factory,
);
@@ -387,130 +356,127 @@ mod tests {
let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority();
// For the block factory
let mut slot_num = 1u64;
let block_factory = |service: &SyncService<<Factory as ServiceFactory>::FullService>| {
let service = service.get();
let mut inherent_data = service
.config()
.custom
.inherent_data_providers
.create_inherent_data()
.expect("Creates inherent data.");
inherent_data.replace_data(finality_tracker::INHERENT_IDENTIFIER, &1u64);
let parent_id = BlockId::number(service.client().info().chain.best_number);
let parent_header = service.client().header(&parent_id).unwrap().unwrap();
let mut proposer_factory = substrate_basic_authorship::ProposerFactory {
client: service.client(),
transaction_pool: service.transaction_pool(),
};
let mut digest = Digest::<H256>::default();
// even though there's only one authority some slots might be empty,
// so we must keep trying the next slots until we can claim one.
let babe_pre_digest = loop {
inherent_data.replace_data(timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION));
if let Some(babe_pre_digest) = babe::test_helpers::claim_slot(
slot_num,
&parent_header,
&*service.client(),
(278, 1000),
&keystore,
) {
break babe_pre_digest;
}
slot_num += 1;
};
digest.push(<DigestItem as CompatibleDigestItem>::babe_pre_digest(babe_pre_digest));
let mut proposer = proposer_factory.init(&parent_header).unwrap();
let new_block = futures03::executor::block_on(proposer.propose(
inherent_data,
digest,
std::time::Duration::from_secs(1),
)).expect("Error making test block");
let (new_header, new_body) = new_block.deconstruct();
let pre_hash = new_header.hash();
// sign the pre-sealed hash of the block and then
// add it to a digest item.
let to_sign = pre_hash.encode();
let signature = alice.sign(&to_sign[..]);
let item = <DigestItem as CompatibleDigestItem>::babe_seal(
signature.into(),
);
slot_num += 1;
BlockImportParams {
origin: BlockOrigin::File,
header: new_header,
justification: None,
post_digests: vec![item],
body: Some(new_body),
finalized: true,
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
}
};
// For the extrinsics factory
let bob = Arc::new(AccountKeyring::Bob.pair());
let charlie = Arc::new(AccountKeyring::Charlie.pair());
let mut index = 0;
let extrinsic_factory = |service: &SyncService<<Factory as ServiceFactory>::FullService>| {
let amount = 5 * CENTS;
let to = AddressPublic::from_raw(bob.public().0);
let from = AddressPublic::from_raw(charlie.public().0);
let genesis_hash = service.get().client().block_hash(0).unwrap().unwrap();
let best_block_id = BlockId::number(service.get().client().info().chain.best_number);
let version = service.get().client().runtime_version_at(&best_block_id).unwrap().spec_version;
let signer = charlie.clone();
let function = Call::Balances(BalancesCall::transfer(to.into(), amount));
let check_version = system::CheckVersion::new();
let check_genesis = system::CheckGenesis::new();
let check_era = system::CheckEra::from(Era::Immortal);
let check_nonce = system::CheckNonce::from(index);
let check_weight = system::CheckWeight::new();
let take_fees = balances::TakeFees::from(0);
let extra = (check_version, check_genesis, check_era, check_nonce, check_weight, take_fees);
let raw_payload = (function, extra.clone(), version, genesis_hash, genesis_hash);
let signature = raw_payload.using_encoded(|payload| if payload.len() > 256 {
signer.sign(&blake2_256(payload)[..])
} else {
signer.sign(payload)
});
let xt = UncheckedExtrinsic::new_signed(
raw_payload.0,
from.into(),
signature.into(),
extra,
).encode();
let v: Vec<u8> = Decode::decode(&mut xt.as_slice()).unwrap();
index += 1;
OpaqueExtrinsic(v)
};
service_test::sync::<Factory, _, _>(
service_test::sync(
chain_spec,
block_factory,
extrinsic_factory,
|config| new_full!(config),
|config| new_light(config),
|service, inherent_data_providers| {
let mut inherent_data = inherent_data_providers
.create_inherent_data()
.expect("Creates inherent data.");
inherent_data.replace_data(finality_tracker::INHERENT_IDENTIFIER, &1u64);
let parent_id = BlockId::number(service.client().info().chain.best_number);
let parent_header = service.client().header(&parent_id).unwrap().unwrap();
let mut proposer_factory = substrate_basic_authorship::ProposerFactory {
client: service.client(),
transaction_pool: service.transaction_pool(),
};
let mut digest = Digest::<H256>::default();
// even though there's only one authority some slots might be empty,
// so we must keep trying the next slots until we can claim one.
let babe_pre_digest = loop {
inherent_data.replace_data(timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION));
if let Some(babe_pre_digest) = babe::test_helpers::claim_slot(
slot_num,
&parent_header,
&*service.client(),
(278, 1000),
&keystore,
) {
break babe_pre_digest;
}
slot_num += 1;
};
digest.push(<DigestItem as CompatibleDigestItem>::babe_pre_digest(babe_pre_digest));
let mut proposer = proposer_factory.init(&parent_header).unwrap();
let new_block = futures03::executor::block_on(proposer.propose(
inherent_data,
digest,
std::time::Duration::from_secs(1),
)).expect("Error making test block");
let (new_header, new_body) = new_block.deconstruct();
let pre_hash = new_header.hash();
// sign the pre-sealed hash of the block and then
// add it to a digest item.
let to_sign = pre_hash.encode();
let signature = alice.sign(&to_sign[..]);
let item = <DigestItem as CompatibleDigestItem>::babe_seal(
signature.into(),
);
slot_num += 1;
BlockImportParams {
origin: BlockOrigin::File,
header: new_header,
justification: None,
post_digests: vec![item],
body: Some(new_body),
finalized: true,
auxiliary: Vec::new(),
fork_choice: ForkChoiceStrategy::LongestChain,
}
},
|service, _| {
let amount = 5 * CENTS;
let to = AddressPublic::from_raw(bob.public().0);
let from = AddressPublic::from_raw(charlie.public().0);
let genesis_hash = service.client().block_hash(0).unwrap().unwrap();
let best_block_id = BlockId::number(service.client().info().chain.best_number);
let version = service.client().runtime_version_at(&best_block_id).unwrap().spec_version;
let signer = charlie.clone();
let function = Call::Balances(BalancesCall::transfer(to.into(), amount));
let check_version = system::CheckVersion::new();
let check_genesis = system::CheckGenesis::new();
let check_era = system::CheckEra::from(Era::Immortal);
let check_nonce = system::CheckNonce::from(index);
let check_weight = system::CheckWeight::new();
let take_fees = balances::TakeFees::from(0);
let extra = (check_version, check_genesis, check_era, check_nonce, check_weight, take_fees);
let raw_payload = (function, extra.clone(), version, genesis_hash, genesis_hash);
let signature = raw_payload.using_encoded(|payload| if payload.len() > 256 {
signer.sign(&blake2_256(payload)[..])
} else {
signer.sign(payload)
});
let xt = UncheckedExtrinsic::new_signed(
raw_payload.0,
from.into(),
signature.into(),
extra,
).encode();
let v: Vec<u8> = Decode::decode(&mut xt.as_slice()).unwrap();
index += 1;
OpaqueExtrinsic(v)
},
);
}
#[test]
#[ignore]
fn test_consensus() {
use super::Factory;
service_test::consensus::<Factory>(
service_test::consensus(
crate::chain_spec::tests::integration_test_config_with_two_authorities(),
|config| new_full(config),
|config| new_light(config),
vec![
"//Alice".into(),
"//Bob".into(),
+1 -1
View File
@@ -8,7 +8,7 @@ edition = "2018"
env_logger = "0.6"
futures = "0.1.26"
hyper = "0.12"
jsonrpc-core-client = { version = "13.0.0", features = ["http", "ws"] }
jsonrpc-core-client = { version = "13.1.0", features = ["http", "ws"] }
log = "0.4"
node-primitives = { path = "../primitives" }
substrate-rpc = { path = "../../core/rpc", version = "2.0.0" }
+4 -4
View File
@@ -6,10 +6,10 @@ edition = "2018"
[dependencies]
client = { package = "substrate-client", path = "../../core/client" }
jsonrpc-core = "13.0.0"
jsonrpc-core-client = "13.0.0"
jsonrpc-derive = "13.0.0"
jsonrpc-pubsub = "13.0.0"
jsonrpc-core = "13.1.0"
jsonrpc-core-client = "13.1.0"
jsonrpc-derive = "13.1.0"
jsonrpc-pubsub = "13.1.0"
keyring = { package = "substrate-keyring", path = "../../core/keyring" }
log = "0.4"
node-primitives = { path = "../primitives" }