Integration tests (#805)

* Started substrate tests

* Sync test

* Test updates

* Improved tests

* Use on-chain block delay

* Parallel test execution

* Otimized tests

* Logging

* Fixed racing test

* Fixed compilation

* Fixed timestamp test

* Removed rlp dependency

* Minor fixes

* Fixed tests

* Removed best_block_id and resolved fdlimit issue

* Whitespace

* Use keyring

* Style

* Added API execution setting

* Removed stale import
This commit is contained in:
Arkadiy Paronyan
2018-09-28 11:37:55 +02:00
committed by Gav Wood
parent 955a5393d8
commit 9a660f82ed
30 changed files with 590 additions and 140 deletions
+23
View File
@@ -1701,14 +1701,20 @@ dependencies = [
"node-primitives 0.1.0",
"node-runtime 0.1.0",
"node-transaction-pool 0.1.0",
"parity-codec 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rhododendron 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-io 0.1.0",
"substrate-bft 0.1.0",
"substrate-client 0.1.0",
"substrate-keyring 0.1.0",
"substrate-network 0.1.0",
"substrate-primitives 0.1.0",
"substrate-service 0.3.0",
"substrate-service-test 0.3.0",
"substrate-telemetry 0.3.0",
"substrate-test-client 0.1.0",
"tokio 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -3123,6 +3129,23 @@ dependencies = [
"tokio 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "substrate-service-test"
version = "0.3.0"
dependencies = [
"env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-primitives 0.1.0",
"substrate-client 0.1.0",
"substrate-network 0.1.0",
"substrate-primitives 0.1.0",
"substrate-service 0.3.0",
"tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "substrate-state-db"
version = "0.1.0"
+1
View File
@@ -53,6 +53,7 @@ members = [
"srml/treasury",
"core/serializer",
"core/service",
"core/service/test",
"core/state-db",
"core/state-machine",
"core/test-runtime",
+4
View File
@@ -203,6 +203,10 @@ subcommands:
- execution:
long: execution
value_name: STRATEGY
help: The means of execution used when executing blocks. Can be either wasm, native or both.
- api-execution:
long: api-execution
value_name: STRATEGY
help: The means of execution used when calling into the runtime. Can be either wasm, native or both.
- max-heap-pages:
long: max-heap-pages
+15 -6
View File
@@ -261,18 +261,18 @@ where
let role =
if matches.is_present("light") {
config.execution_strategy = service::ExecutionStrategy::NativeWhenPossible;
config.block_execution_strategy = service::ExecutionStrategy::NativeWhenPossible;
service::Roles::LIGHT
} else if matches.is_present("validator") || matches.is_present("dev") {
config.execution_strategy = service::ExecutionStrategy::Both;
config.block_execution_strategy = service::ExecutionStrategy::Both;
service::Roles::AUTHORITY
} else {
config.execution_strategy = service::ExecutionStrategy::NativeWhenPossible;
config.block_execution_strategy = service::ExecutionStrategy::NativeWhenPossible;
service::Roles::FULL
};
if let Some(s) = matches.value_of("execution") {
config.execution_strategy = match s {
config.block_execution_strategy = match s {
"both" => service::ExecutionStrategy::Both,
"native" => service::ExecutionStrategy::NativeWhenPossible,
"wasm" => service::ExecutionStrategy::AlwaysWasm,
@@ -400,11 +400,20 @@ fn import_blocks<F, E>(matches: &clap::ArgMatches, spec: ChainSpec<FactoryGenesi
config.database_path = db_path(&base_path, config.chain_spec.id()).to_string_lossy().into();
if let Some(s) = matches.value_of("execution") {
config.execution_strategy = match s {
config.block_execution_strategy = match s {
"both" => service::ExecutionStrategy::Both,
"native" => service::ExecutionStrategy::NativeWhenPossible,
"wasm" => service::ExecutionStrategy::AlwaysWasm,
_ => return Err(error::ErrorKind::Input("Invalid execution mode specified".to_owned()).into()),
_ => return Err(error::ErrorKind::Input("Invalid block execution mode specified".to_owned()).into()),
};
}
if let Some(s) = matches.value_of("api-execution") {
config.api_execution_strategy = match s {
"both" => service::ExecutionStrategy::Both,
"native" => service::ExecutionStrategy::NativeWhenPossible,
"wasm" => service::ExecutionStrategy::AlwaysWasm,
_ => return Err(error::ErrorKind::Input("Invalid API execution mode specified".to_owned()).into()),
};
}
+4 -2
View File
@@ -100,7 +100,8 @@ pub fn new_client<E, S, Block>(
settings: DatabaseSettings,
executor: E,
genesis_storage: S,
execution_strategy: ExecutionStrategy,
block_execution_strategy: ExecutionStrategy,
api_execution_strategy: ExecutionStrategy,
) -> Result<client::Client<Backend<Block>, client::LocalCallExecutor<Backend<Block>, E>, Block>, client::error::Error>
where
Block: BlockT,
@@ -109,7 +110,7 @@ pub fn new_client<E, S, Block>(
{
let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?);
let executor = client::LocalCallExecutor::new(backend.clone(), executor);
Ok(client::Client::new(backend, executor, genesis_storage, execution_strategy)?)
Ok(client::Client::new(backend, executor, genesis_storage, block_execution_strategy, api_execution_strategy)?)
}
mod columns {
@@ -196,6 +197,7 @@ impl<Block: BlockT> client::blockchain::HeaderBackend<Block> for BlockchainDb<Bl
best_number: meta.best_number,
genesis_hash: meta.genesis_hash,
finalized_hash: meta.finalized_hash,
finalized_number: meta.finalized_number,
})
}
+1
View File
@@ -154,6 +154,7 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
best_number: meta.best_number,
genesis_hash: meta.genesis_hash,
finalized_hash: meta.finalized_hash,
finalized_number: meta.finalized_number,
})
}
+2
View File
@@ -89,6 +89,8 @@ pub struct Info<Block: BlockT> {
pub genesis_hash: Block::Hash,
/// The head of the finalized chain.
pub finalized_hash: Block::Hash,
/// Last finalized block number.
pub finalized_number: <<Block as BlockT>::Header as HeaderT>::Number,
}
/// Block status.
+19 -12
View File
@@ -54,7 +54,8 @@ pub struct Client<B, E, Block> where Block: BlockT {
finality_notification_sinks: Mutex<Vec<mpsc::UnboundedSender<FinalityNotification<Block>>>>,
import_lock: Mutex<()>,
importing_block: RwLock<Option<Block::Hash>>, // holds the block hash currently being imported. TODO: replace this with block queue
execution_strategy: ExecutionStrategy,
block_execution_strategy: ExecutionStrategy,
api_execution_strategy: ExecutionStrategy,
}
/// A source of blockchain events.
@@ -208,7 +209,7 @@ pub fn new_with_backend<B, E, Block, S>(
B: backend::LocalBackend<Block, Blake2Hasher>
{
let call_executor = LocalCallExecutor::new(backend.clone(), executor);
Client::new(backend, call_executor, build_genesis_storage, ExecutionStrategy::NativeWhenPossible)
Client::new(backend, call_executor, build_genesis_storage, ExecutionStrategy::NativeWhenPossible, ExecutionStrategy::NativeWhenPossible)
}
impl<B, E, Block> Client<B, E, Block> where
@@ -221,7 +222,8 @@ impl<B, E, Block> Client<B, E, Block> where
backend: Arc<B>,
executor: E,
build_genesis_storage: S,
execution_strategy: ExecutionStrategy,
block_execution_strategy: ExecutionStrategy,
api_execution_strategy: ExecutionStrategy,
) -> error::Result<Self> {
if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() {
let genesis_storage = build_genesis_storage.build_storage()?;
@@ -245,7 +247,8 @@ impl<B, E, Block> Client<B, E, Block> where
finality_notification_sinks: Default::default(),
import_lock: Default::default(),
importing_block: Default::default(),
execution_strategy,
block_execution_strategy,
api_execution_strategy,
})
}
@@ -369,13 +372,17 @@ impl<B, E, Block> Client<B, E, Block> where
);
self.state_at(&parent).and_then(|state| {
let mut overlay = Default::default();
let execution_manager = || ExecutionManager::Both(|wasm_result, native_result| {
warn!("Consensus error between wasm and native runtime execution at block {:?}", at);
warn!(" Function {:?}", function);
warn!(" Native result {:?}", native_result);
warn!(" Wasm result {:?}", wasm_result);
wasm_result
});
let execution_manager = || match self.api_execution_strategy {
ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible,
ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm,
ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| {
warn!("Consensus error between wasm and native runtime execution at block {:?}", at);
warn!(" Function {:?}", function);
warn!(" Native result {:?}", native_result);
warn!(" Wasm result {:?}", wasm_result);
wasm_result
}),
};
self.executor().call_at_state(
&state,
&mut overlay,
@@ -499,7 +506,7 @@ impl<B, E, Block> Client<B, E, Block> where
&mut overlay,
"execute_block",
&<Block as BlockT>::new(header.clone(), body.clone().unwrap_or_default()).encode(),
match (origin, self.execution_strategy) {
match (origin, self.block_execution_strategy) {
(BlockOrigin::NetworkInitialSync, _) | (_, ExecutionStrategy::NativeWhenPossible) =>
ExecutionManager::NativeWhenPossible,
(_, ExecutionStrategy::AlwaysWasm) => ExecutionManager::AlwaysWasm,
+4
View File
@@ -92,6 +92,7 @@ struct BlockchainStorage<Block: BlockT> {
best_hash: Block::Hash,
best_number: NumberFor<Block>,
finalized_hash: Block::Hash,
finalized_number: NumberFor<Block>,
genesis_hash: Block::Hash,
cht_roots: HashMap<NumberFor<Block>, Block::Hash>,
leaves: LeafSet<Block::Hash, NumberFor<Block>>,
@@ -139,6 +140,7 @@ impl<Block: BlockT> Blockchain<Block> {
best_hash: Default::default(),
best_number: Zero::zero(),
finalized_hash: Default::default(),
finalized_number: Zero::zero(),
genesis_hash: Default::default(),
cht_roots: HashMap::new(),
leaves: LeafSet::new(),
@@ -206,6 +208,7 @@ impl<Block: BlockT> Blockchain<Block> {
if let NewBlockState::Final = new_state {
storage.finalized_hash = hash;
storage.finalized_number = number.clone();
}
if number == Zero::zero() {
@@ -260,6 +263,7 @@ impl<Block: BlockT> HeaderBackend<Block> for Blockchain<Block> {
best_number: storage.best_number,
genesis_hash: storage.genesis_hash,
finalized_hash: storage.finalized_hash,
finalized_number: storage.finalized_number,
})
}
+1 -1
View File
@@ -60,7 +60,7 @@ pub fn new_light<B, S, F, GS>(
GS: BuildStorage,
{
let executor = RemoteCallExecutor::new(backend.blockchain().clone(), fetcher);
Client::new(backend, executor, genesis_storage, ExecutionStrategy::NativeWhenPossible)
Client::new(backend, executor, genesis_storage, ExecutionStrategy::NativeWhenPossible, ExecutionStrategy::NativeWhenPossible)
}
/// Create an instance of fetch data checker.
+1 -1
View File
@@ -61,7 +61,7 @@ pub use service::{Service, FetchFuture, ConsensusService, BftMessageStream,
TransactionPool, Params, ManageNetwork, SyncProvider};
pub use protocol::{ProtocolStatus, PeerInfo, Context};
pub use sync::{Status as SyncStatus, SyncState};
pub use network_libp2p::{NonReservedPeerMode, NetworkConfiguration, NodeIndex, ProtocolId, Severity};
pub use network_libp2p::{NonReservedPeerMode, NetworkConfiguration, NodeIndex, ProtocolId, Severity, Protocol};
pub use message::{generic as generic_message, RequestId, BftMessage, LocalizedBftMessage, ConsensusVote, SignedConsensusVote, SignedConsensusMessage, SignedConsensusProposal, Status as StatusMessage};
pub use error::Error;
pub use config::{Roles, ProtocolConfig};
+20 -1
View File
@@ -30,6 +30,16 @@ enum GenesisSource<G> {
Factory(fn() -> G),
}
impl<G: RuntimeGenesis> Clone for GenesisSource<G> {
fn clone(&self) -> Self {
match *self {
GenesisSource::File(ref path) => GenesisSource::File(path.clone()),
GenesisSource::Embedded(d) => GenesisSource::Embedded(d),
GenesisSource::Factory(f) => GenesisSource::Factory(f),
}
}
}
impl<G: RuntimeGenesis> GenesisSource<G> {
fn resolve(&self) -> Result<Genesis<G>, String> {
#[derive(Serialize, Deserialize)]
@@ -69,7 +79,7 @@ enum Genesis<G> {
Raw(HashMap<StorageKey, StorageData>),
}
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
struct ChainSpecFile {
pub name: String,
@@ -85,6 +95,15 @@ pub struct ChainSpec<G: RuntimeGenesis> {
genesis: GenesisSource<G>,
}
impl<G: RuntimeGenesis> Clone for ChainSpec<G> {
fn clone(&self) -> Self {
ChainSpec {
spec: self.spec.clone(),
genesis: self.genesis.clone(),
}
}
}
impl<G: RuntimeGenesis> ChainSpec<G> {
pub fn boot_nodes(&self) -> &[String] {
&self.spec.boot_nodes
+31 -6
View File
@@ -19,11 +19,13 @@
use std::fmt;
use std::sync::Arc;
use std::marker::PhantomData;
use std::ops::Deref;
use serde::{Serialize, de::DeserializeOwned};
use tokio::runtime::TaskExecutor;
use chain_spec::ChainSpec;
use client_db;
use client::{self, Client};
use error;
use {error, Service};
use network::{self, OnDemand};
use substrate_executor::{NativeExecutor, NativeExecutionDispatch};
use transaction_pool::{self, Options as TransactionPoolOptions, Pool as TransactionPool};
@@ -83,6 +85,9 @@ pub type FactoryGenesis<F> = <F as ServiceFactory>::Genesis;
/// `Block` type for a factory.
pub type FactoryBlock<F> = <F as ServiceFactory>::Block;
/// `Extrinsic` type for a factory.
pub type FactoryExtrinsic<F> = <<F as ServiceFactory>::Block as BlockT>::Extrinsic;
/// `Number` type for a factory.
pub type FactoryBlockNumber<F> = <<FactoryBlock<F> as BlockT>::Header as HeaderT>::Number;
@@ -113,7 +118,7 @@ pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {}
impl<T: Serialize + DeserializeOwned + BuildStorage> RuntimeGenesis for T {}
/// A collection of types and methods to build a service on top of the substrate service.
pub trait ServiceFactory: 'static {
pub trait ServiceFactory: 'static + Sized {
/// Block type.
type Block: BlockT;
/// Extrinsic hash type.
@@ -123,13 +128,17 @@ pub trait ServiceFactory: 'static {
/// Chain runtime.
type RuntimeDispatch: NativeExecutionDispatch + Send + Sync + 'static;
/// Extrinsic pool backend type for the full client.
type FullTransactionPoolApi: transaction_pool::ChainApi<Hash=Self::ExtrinsicHash, Block=Self::Block> + Send + 'static;
type FullTransactionPoolApi: transaction_pool::ChainApi<Hash = Self::ExtrinsicHash, Block = Self::Block> + Send + 'static;
/// Extrinsic pool backend type for the light client.
type LightTransactionPoolApi: transaction_pool::ChainApi<Hash=Self::ExtrinsicHash, Block=Self::Block> + 'static;
type LightTransactionPoolApi: transaction_pool::ChainApi<Hash = Self::ExtrinsicHash, Block = Self::Block> + 'static;
/// Genesis configuration for the runtime.
type Genesis: RuntimeGenesis;
/// Other configuration for service members.
type Configuration: Default;
/// Extended full service type.
type FullService: Deref<Target = Service<FullComponents<Self>>> + Send + Sync + 'static;
/// Extended light service type.
type LightService: Deref<Target = Service<LightComponents<Self>>> + Send + Sync + 'static;
//TODO: replace these with a constructor trait. that TransactionPool implements.
/// Extrinsic pool constructor for the full client.
@@ -142,6 +151,13 @@ pub trait ServiceFactory: 'static {
/// Build network protocol.
fn build_network_protocol(config: &FactoryFullConfiguration<Self>)
-> Result<Self::NetworkProtocol, error::Error>;
/// Build full service.
fn new_full(config: FactoryFullConfiguration<Self>, executor: TaskExecutor)
-> Result<Self::FullService, error::Error>;
/// Build light service.
fn new_light(config: FactoryFullConfiguration<Self>, executor: TaskExecutor)
-> Result<Self::LightService, error::Error>;
}
/// A collection of types and function to generalise over full / light client type.
@@ -153,7 +169,10 @@ pub trait Components: 'static {
/// Client executor.
type Executor: 'static + client::CallExecutor<FactoryBlock<Self::Factory>, Blake2Hasher> + Send + Sync;
/// Extrinsic pool type.
type TransactionPoolApi: 'static + transaction_pool::ChainApi<Hash=<Self::Factory as ServiceFactory>::ExtrinsicHash, Block=FactoryBlock<Self::Factory>>;
type TransactionPoolApi: 'static + transaction_pool::ChainApi<
Hash = <Self::Factory as ServiceFactory>::ExtrinsicHash,
Block = FactoryBlock<Self::Factory>
>;
/// Create client.
fn build_client(
@@ -195,7 +214,13 @@ impl<Factory: ServiceFactory> Components for FullComponents<Factory> {
path: config.database_path.as_str().into(),
pruning: config.pruning.clone(),
};
Ok((Arc::new(client_db::new_client(db_settings, executor, &config.chain_spec, config.execution_strategy)?), None))
Ok((Arc::new(client_db::new_client(
db_settings,
executor,
&config.chain_spec,
config.block_execution_strategy,
config.api_execution_strategy,
)?), None))
}
fn build_transaction_pool(config: TransactionPoolOptions, client: Arc<ComponentClient<Self>>)
+7 -6
View File
@@ -28,6 +28,7 @@ use serde::{Serialize, de::DeserializeOwned};
use target_info::Target;
/// Service configuration.
#[derive(Clone)]
pub struct Configuration<C, G: Serialize + DeserializeOwned + BuildStorage> {
/// Implementation name
pub impl_name: &'static str,
@@ -53,12 +54,12 @@ pub struct Configuration<C, G: Serialize + DeserializeOwned + BuildStorage> {
pub chain_spec: ChainSpec<G>,
/// Custom configuration.
pub custom: C,
/// Telemetry server URL, optional - only `Some` if telemetry reporting is enabled
pub telemetry: Option<String>,
/// Node name.
pub name: String,
/// Execution strategy.
pub execution_strategy: ExecutionStrategy,
/// Block execution strategy.
pub block_execution_strategy: ExecutionStrategy,
/// Runtime API execution strategy.
pub api_execution_strategy: ExecutionStrategy,
/// RPC over HTTP binding address. `None` if disabled.
pub rpc_http: Option<SocketAddr>,
/// RPC over Websockets binding address. `None` if disabled.
@@ -83,9 +84,9 @@ impl<C: Default, G: Serialize + DeserializeOwned + BuildStorage> Configuration<C
database_path: Default::default(),
keys: Default::default(),
custom: Default::default(),
telemetry: Default::default(),
pruning: PruningMode::default(),
execution_strategy: ExecutionStrategy::Both,
block_execution_strategy: ExecutionStrategy::Both,
api_execution_strategy: ExecutionStrategy::Both,
rpc_http: None,
rpc_ws: None,
telemetry_url: None,
+13 -7
View File
@@ -25,6 +25,7 @@ extern crate futures;
extern crate exit_future;
extern crate serde;
extern crate serde_json;
extern crate parking_lot;
extern crate substrate_keystore as keystore;
extern crate substrate_primitives as primitives;
extern crate sr_primitives as runtime_primitives;
@@ -61,6 +62,7 @@ use std::net::SocketAddr;
use std::sync::Arc;
use std::collections::HashMap;
use futures::prelude::*;
use parking_lot::Mutex;
use keystore::Store as Keystore;
use client::BlockchainEvents;
use runtime_primitives::traits::{Header, As};
@@ -81,7 +83,7 @@ pub use components::{ServiceFactory, FullBackend, FullExecutor, LightBackend,
ComponentBlock, FullClient, LightClient, FullComponents, LightComponents,
CodeExecutor, NetworkService, FactoryChainSpec, FactoryBlock,
FactoryFullConfiguration, RuntimeGenesis, FactoryGenesis,
ComponentExHash, ComponentExtrinsic,
ComponentExHash, ComponentExtrinsic, FactoryExtrinsic,
};
const DEFAULT_PROTOCOL_ID: &'static str = "sup";
@@ -95,7 +97,7 @@ pub struct Service<Components: components::Components> {
exit: ::exit_future::Exit,
signal: Option<Signal>,
_rpc_http: Option<rpc::HttpServer>,
_rpc_ws: Option<rpc::WsServer>,
_rpc_ws: Option<Mutex<rpc::WsServer>>, // WsServer is not `Sync`, but the service needs to be.
_telemetry: Option<tel::Telemetry>,
}
@@ -186,12 +188,14 @@ impl<Components> Service<Components>
{
// block notifications
let network = network.clone();
let network = Arc::downgrade(&network);
let txpool = transaction_pool.clone();
let events = client.import_notification_stream()
.for_each(move |notification| {
network.on_block_imported(notification.hash, &notification.header);
if let Some(network) = network.upgrade() {
network.on_block_imported(notification.hash, &notification.header);
}
txpool.cull(&BlockId::hash(notification.hash))
.map_err(|e| warn!("Error removing extrinsics: {:?}", e))?;
Ok(())
@@ -203,11 +207,13 @@ impl<Components> Service<Components>
{
// extrinsic notifications
let network = network.clone();
let network = Arc::downgrade(&network);
let events = transaction_pool.import_notification_stream()
// TODO [ToDr] Consider throttling?
.for_each(move |_| {
network.trigger_repropagate();
if let Some(network) = network.upgrade() {
network.trigger_repropagate();
}
Ok(())
})
.select(exit.clone())
@@ -277,7 +283,7 @@ impl<Components> Service<Components>
keystore: keystore,
exit,
_rpc_http: rpc_http,
_rpc_ws: rpc_ws,
_rpc_ws: rpc_ws.map(Mutex::new),
_telemetry: telemetry,
})
}
+17
View File
@@ -0,0 +1,17 @@
[package]
name = "substrate-service-test"
version = "0.3.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
tempdir = "0.3"
tokio = "0.1.7"
futures = "0.1"
log = "0.3"
env_logger = "0.4"
fdlimit = "0.1"
substrate-service = { path = "../../../core/service" }
substrate-network = { path = "../../../core/network" }
substrate-primitives = { path = "../../../core/primitives" }
substrate-client = { path = "../../../core/client" }
sr-primitives = { path = "../../../core/sr-primitives" }
+247
View File
@@ -0,0 +1,247 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Service integration test utils.
#[macro_use]
extern crate log;
extern crate tempdir;
extern crate tokio;
extern crate futures;
extern crate env_logger;
extern crate fdlimit;
extern crate substrate_service as service;
extern crate substrate_network as network;
extern crate substrate_primitives as primitives;
extern crate substrate_client as client;
extern crate sr_primitives;
use std::iter;
use std::sync::Arc;
use std::net::Ipv4Addr;
use std::time::Duration;
use futures::Stream;
use tempdir::TempDir;
use tokio::runtime::Runtime;
use tokio::timer::Interval;
use primitives::blake2_256;
use service::{
ServiceFactory,
ExecutionStrategy,
Configuration,
FactoryFullConfiguration,
FactoryChainSpec,
Roles,
FactoryExtrinsic,
};
use network::{NetworkConfiguration, NonReservedPeerMode, Protocol, SyncProvider, ManageNetwork};
use client::{BlockOrigin, JustifiedHeader};
use sr_primitives::traits::As;
struct TestNet<F: ServiceFactory> {
runtime: Runtime,
authority_nodes: Arc<Vec<(u32, F::FullService)>>,
full_nodes: Arc<Vec<(u32, F::FullService)>>,
_light_nodes: Arc<Vec<(u32, F::LightService)>>,
}
impl<F: ServiceFactory> TestNet<F> {
pub fn run_until_all_full<P: Send + Sync + Fn(u32, &F::FullService) -> bool + 'static>(&mut self, predicate: P) {
let full_nodes = self.full_nodes.clone();
let interval = Interval::new_interval(Duration::from_millis(100)).map_err(|_| ()).for_each(move |_| {
if full_nodes.iter().all(|&(ref id, ref service)| predicate(*id, service)) {
Err(())
} else {
Ok(())
}
});
self.runtime.block_on(interval).ok();
}
}
fn node_private_key_string(index: u32) -> String {
format!("N{}", index)
}
fn node_config<F: ServiceFactory> (
index: u32,
spec: &FactoryChainSpec<F>,
role: Roles,
key_seed: Option<String>,
base_port: u16,
root: &TempDir,
) -> FactoryFullConfiguration<F>
{
let root = root.path().join(format!("node-{}", index));
let mut keys = Vec::new();
if let Some(seed) = key_seed {
keys.push(seed);
}
let network_config = NetworkConfiguration {
config_path: Some(root.join("network").to_str().unwrap().into()),
net_config_path: Some(root.join("network").to_str().unwrap().into()),
listen_addresses: vec! [
iter::once(Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1)))
.chain(iter::once(Protocol::Tcp(base_port + index as u16)))
.collect()
],
public_addresses: vec![],
boot_nodes: vec![],
use_secret: Some(blake2_256(node_private_key_string(index).as_bytes())),
min_peers: 25,
max_peers: 500,
reserved_nodes: vec![],
non_reserved_mode: NonReservedPeerMode::Accept,
client_version: "network/test/0.1".to_owned(),
};
Configuration {
impl_name: "network-test-impl",
impl_version: "0.1",
impl_commit: "",
roles: role,
transaction_pool: Default::default(),
network: network_config,
keystore_path: root.join("key").to_str().unwrap().into(),
database_path: root.join("db").to_str().unwrap().into(),
pruning: Default::default(),
keys: keys,
chain_spec: (*spec).clone(),
custom: Default::default(),
name: format!("Node {}", index),
block_execution_strategy: ExecutionStrategy::NativeWhenPossible,
api_execution_strategy: ExecutionStrategy::NativeWhenPossible,
rpc_http: None,
rpc_ws: None,
telemetry_url: None,
}
}
impl<F: ServiceFactory> TestNet<F> {
fn new(temp: &TempDir, spec: FactoryChainSpec<F>, full: u32, light: u32, authorities: Vec<String>, base_port: u16) -> TestNet<F> {
::env_logger::init().ok();
::fdlimit::raise_fd_limit();
let runtime = Runtime::new().expect("Error creating tokio runtime");
let authority_nodes = authorities.iter().enumerate().map(|(index, key)| (index as u32,
F::new_full(node_config::<F>(index as u32, &spec, Roles::AUTHORITY, Some(key.clone()), base_port, &temp), runtime.executor())
.expect("Error creating test node service"))
).collect();
let authorities = authorities.len() as u32;
let full_nodes = (authorities..full + authorities).map(|index| (index,
F::new_full(node_config::<F>(index, &spec, Roles::FULL, None, base_port, &temp), runtime.executor())
.expect("Error creating test node service"))
).collect();
let light_nodes = (full + authorities..full + authorities + light).map(|index| (index,
F::new_light(node_config::<F>(index, &spec, Roles::LIGHT, None, base_port, &temp), runtime.executor())
.expect("Error creating test node service"))
).collect();
TestNet {
runtime,
authority_nodes: Arc::new(authority_nodes),
full_nodes: Arc::new(full_nodes),
_light_nodes: Arc::new(light_nodes),
}
}
}
pub fn connectivity<F: ServiceFactory>(spec: FactoryChainSpec<F>) {
const NUM_NODES: u32 = 10;
{
let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir");
{
let mut network = TestNet::<F>::new(&temp, spec.clone(), NUM_NODES, 0, vec![], 30400);
info!("Checking star topology");
let first_address = network.full_nodes[0].1.network().node_id().unwrap();
for (_, service) in network.full_nodes.iter().skip(1) {
service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer");
}
network.run_until_all_full(|_index, service|
service.network().status().num_peers == NUM_NODES as usize - 1
);
}
temp.close().expect("Error removing temp dir");
}
{
let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir");
{
let mut network = TestNet::<F>::new(&temp, spec, NUM_NODES as u32, 0, vec![], 30400);
info!("Checking linked topology");
let mut address = network.full_nodes[0].1.network().node_id().unwrap();
for (_, service) in network.full_nodes.iter().skip(1) {
service.network().add_reserved_peer(address.clone()).expect("Error adding reserved peer");
address = service.network().node_id().unwrap();
}
network.run_until_all_full(|_index, service| {
service.network().status().num_peers == NUM_NODES as usize - 1
});
}
temp.close().expect("Error removing temp dir");
}
}
pub fn sync<F, B>(spec: FactoryChainSpec<F>, block_factory: B)
where
F: ServiceFactory,
B: Fn(&F::FullService) -> (JustifiedHeader<F::Block>, Option<Vec<FactoryExtrinsic<F>>>),
{
const NUM_NODES: u32 = 10;
const NUM_BLOCKS: usize = 512;
let temp = TempDir::new("substrate-sync-test").expect("Error creating test dir");
let mut network = TestNet::<F>::new(&temp, spec.clone(), NUM_NODES, 0, vec![], 30500);
info!("Checking block sync");
let first_address = {
let first_service = &network.full_nodes[0].1;
for i in 0 .. NUM_BLOCKS {
if i % 128 == 0 {
info!("Generating #{}", i);
}
let (header, body) = block_factory(&first_service);
first_service.client().import_block(BlockOrigin::File, header, body, true).expect("Error importing test block");
}
first_service.network().node_id().unwrap()
};
info!("Running sync");
for (_, service) in network.full_nodes.iter().skip(1) {
service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer");
}
network.run_until_all_full(|_index, service| {
service.client().info().unwrap().chain.best_number == As::sa(NUM_BLOCKS as u64)
});
}
pub fn consensus<F>(spec: FactoryChainSpec<F>, authorities: Vec<String>)
where
F: ServiceFactory,
{
const NUM_NODES: u32 = 10;
const NUM_BLOCKS: u64 = 200;
info!("Checking consensus");
let temp = TempDir::new("substrate-conensus-test").expect("Error creating test dir");
let mut network = TestNet::<F>::new(&temp, spec.clone(), NUM_NODES, 0, authorities, 30600);
let first_address = network.authority_nodes[0].1.network().node_id().unwrap();
for (_, service) in network.full_nodes.iter() {
service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer");
}
for (_, service) in network.authority_nodes.iter().skip(1) {
service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer");
}
network.run_until_all_full(|_index, service| {
service.client().info().unwrap().chain.finalized_number >= As::sa(NUM_BLOCKS)
});
}
+1 -1
View File
@@ -14,4 +14,4 @@ trie-db = { git = "https://github.com/paritytech/trie" }
trie-root = { git = "https://github.com/paritytech/trie" }
substrate-trie = { path = "../trie" }
substrate-primitives = { path = "../primitives" }
parity-codec = "2.0"
parity-codec = "2.0"
+12 -10
View File
@@ -18,6 +18,8 @@
use client::{self, Client};
use keyring::Keyring;
use primitives::ed25519;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT};
use runtime_primitives::generic::BlockId;
use primitives::Blake2Hasher;
use runtime;
@@ -41,7 +43,13 @@ impl<B, E> TestClient for Client<B, E, runtime::Block>
E: client::CallExecutor<runtime::Block, Blake2Hasher>
{
fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()> {
let justification = fake_justify(&block.header);
let authorities: [ed25519::Pair; 3] = [
Keyring::Alice.into(),
Keyring::Bob.into(),
Keyring::Charlie.into(),
];
let keys: Vec<&ed25519::Pair> = authorities.iter().collect();
let justification = fake_justify::<runtime::Block>(&block.header, &keys);
let justified = self.check_justification(block.header, justification)?;
self.import_block(origin, justified, Some(block.extrinsics), false)?;
@@ -63,21 +71,15 @@ impl<B, E> TestClient for Client<B, E, runtime::Block>
/// headers.
/// TODO: remove this in favor of custom verification pipelines for the
/// client
fn fake_justify(header: &runtime::Header) -> bft::UncheckedJustification<runtime::Hash> {
pub fn fake_justify<Block: BlockT>(header: &Block::Header, authorities: &[&ed25519::Pair]) -> bft::UncheckedJustification<Block::Hash> {
let hash = header.hash();
let authorities = vec![
Keyring::Alice.into(),
Keyring::Bob.into(),
Keyring::Charlie.into(),
];
bft::UncheckedJustification::new(
hash,
authorities.iter().map(|key| {
let msg = bft::sign_message::<runtime::Block>(
let msg = bft::sign_message::<Block>(
::rhododendron::Vote::Commit(1, hash).into(),
key,
header.parent_hash
header.parent_hash().clone(),
);
match msg {
+2 -3
View File
@@ -35,14 +35,13 @@ pub mod client_ext;
pub mod trait_tests;
mod block_builder_ext;
use std::sync::Arc;
pub use client_ext::TestClient;
pub use client_ext::{TestClient, fake_justify};
pub use block_builder_ext::BlockBuilderExt;
pub use client::blockchain;
pub use client::backend;
pub use executor::NativeExecutor;
use std::sync::Arc;
use primitives::Blake2Hasher;
use runtime_primitives::StorageMap;
use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis};
+3 -3
View File
@@ -31,7 +31,7 @@ extern crate log;
pub use cli::error;
use tokio::runtime::Runtime;
pub use service::{Components as ServiceComponents, Service, CustomConfiguration};
pub use service::{Components as ServiceComponents, Service, CustomConfiguration, ServiceFactory};
pub use cli::{VersionInfo, IntoExit};
/// The chain specification option.
@@ -94,8 +94,8 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul
let mut runtime = Runtime::new()?;
let executor = runtime.executor();
match config.roles == service::Roles::LIGHT {
true => run_until_exit(&mut runtime, service::new_light(config, executor)?, exit)?,
false => run_until_exit(&mut runtime, service::new_full(config, executor)?, exit)?,
true => run_until_exit(&mut runtime, service::Factory::new_light(config, executor)?, exit)?,
false => run_until_exit(&mut runtime, service::Factory::new_full(config, executor)?, exit)?,
}
}
}
+4 -4
View File
@@ -209,6 +209,8 @@ pub struct ProposerFactory<N, C> where
pub handle: TaskExecutor,
/// Offline-tracker.
pub offline: SharedOfflineTracker,
/// Force delay in evaluation this long.
pub force_delay: Timestamp,
}
impl<N, C> bft::Environment<<C as AuthoringApi>::Block> for ProposerFactory<N, C> where
@@ -228,9 +230,7 @@ impl<N, C> bft::Environment<<C as AuthoringApi>::Block> for ProposerFactory<N, C
authorities: &[AuthorityId],
sign_with: Arc<ed25519::Pair>,
) -> Result<(Self::Proposer, Self::Input, Self::Output)> {
// force delay in evaluation this long.
const FORCE_DELAY: Timestamp = 5;
use runtime_primitives::traits::Hash as HashT;
let parent_hash = parent_header.hash();
let id = BlockId::hash(parent_hash);
@@ -261,7 +261,7 @@ impl<N, C> bft::Environment<<C as AuthoringApi>::Block> for ProposerFactory<N, C
transaction_pool: self.transaction_pool.clone(),
offline: self.offline.clone(),
validators,
minimum_timestamp: current_timestamp() + FORCE_DELAY,
minimum_timestamp: current_timestamp() + self.force_delay,
};
Ok((proposer, input, output))
+2
View File
@@ -79,6 +79,7 @@ impl Service {
transaction_pool: Arc<TransactionPool<A>>,
thread_pool: ThreadPoolHandle,
key: ed25519::Pair,
block_delay: u64,
) -> Service
where
A: AuthoringApi + TPClient<Block = <A as AuthoringApi>::Block> + 'static,
@@ -105,6 +106,7 @@ impl Service {
network,
handle: thread_pool.clone(),
offline: Arc::new(RwLock::new(OfflineTracker::new())),
force_delay: block_delay,
};
let bft_service = Arc::new(BftService::new(client.clone(), key, factory));
+6 -2
View File
@@ -32,6 +32,7 @@ use futures::sync::mpsc;
use std::sync::Arc;
use tokio::runtime::TaskExecutor;
use tokio::executor::Executor;
use super::NetworkService;
@@ -260,7 +261,7 @@ impl<P: AuthoringApi + Send + Sync + 'static> Network for ConsensusNetwork<P> {
&self, validators: &[SessionKey],
local_id: SessionKey,
parent_hash: Hash,
task_executor: TaskExecutor
mut task_executor: TaskExecutor
) -> (Self::Input, Self::Output)
{
let sink = BftSink {
@@ -284,7 +285,10 @@ impl<P: AuthoringApi + Send + Sync + 'static> Network for ConsensusNetwork<P> {
});
match process_task {
Some(task) => task_executor.spawn(task),
Some(task) =>
if let Err(e) = Executor::spawn(&mut task_executor, Box::new(task)) {
debug!(target: "node-network", "Cannot spawn message processing: {:?}", e)
},
None => warn!(target: "node-network", "Cannot process incoming messages: network appears to be down"),
}
+2
View File
@@ -73,6 +73,8 @@ pub use runtime_primitives::BuildStorage;
pub use consensus::Call as ConsensusCall;
pub use timestamp::Call as TimestampCall;
pub use runtime_primitives::{Permill, Perbill};
pub use timestamp::BlockPeriod;
pub use srml_support::StorageValue;
#[cfg(any(feature = "std", test))]
pub use checked_block::CheckedBlock;
+8
View File
@@ -11,6 +11,7 @@ log = "0.4"
slog = "^2"
tokio = "0.1.7"
hex-literal = "0.1"
parity-codec = { version = "2.0" }
node-primitives = { path = "../primitives" }
node-runtime = { path = "../runtime" }
node-executor = { path = "../executor" }
@@ -23,3 +24,10 @@ substrate-network = { path = "../../core/network" }
substrate-client = { path = "../../core/client" }
substrate-service = { path = "../../core/service" }
substrate-telemetry = { path = "../../core/telemetry" }
[dev-dependencies]
substrate-service-test = { path = "../../core/service/test" }
substrate-bft = { path = "../../core/bft" }
substrate-test-client = { path = "../../core/test-client" }
substrate-keyring = { path = "../../core/keyring" }
rhododendron = "0.3"
+11
View File
@@ -234,7 +234,18 @@ fn local_testnet_genesis() -> GenesisConfig {
])
}
fn local_testnet_genesis_instant() -> GenesisConfig {
let mut genesis = local_testnet_genesis();
genesis.timestamp = Some(TimestampConfig { period: 0 });
genesis
}
/// Local testnet config (multivalidator Alice + Bob)
pub fn local_testnet_config() -> ChainSpec<GenesisConfig> {
ChainSpec::from_genesis("Local Testnet", "local_testnet", local_testnet_genesis, vec![], None, None)
}
/// Local testnet config (multivalidator Alice + Bob)
pub fn integration_test_config() -> ChainSpec<GenesisConfig> {
ChainSpec::from_genesis("Integration Test", "test", local_testnet_genesis_instant, vec![], None, None)
}
+124 -73
View File
@@ -28,29 +28,40 @@ extern crate substrate_primitives as primitives;
extern crate substrate_network as network;
extern crate substrate_client as client;
extern crate substrate_service as service;
extern crate parity_codec as codec;
extern crate tokio;
#[cfg(test)]
extern crate substrate_service_test as service_test;
#[macro_use]
extern crate log;
#[macro_use]
extern crate hex_literal;
#[cfg(test)]
extern crate parking_lot;
#[cfg(test)]
extern crate substrate_bft as bft;
#[cfg(test)]
extern crate substrate_test_client;
#[cfg(test)]
extern crate substrate_keyring as keyring;
pub mod chain_spec;
use std::sync::Arc;
use codec::Decode;
use transaction_pool::TransactionPool;
use node_primitives::{Block, Hash};
use node_runtime::GenesisConfig;
use node_primitives::{Block, Hash, Timestamp, BlockId};
use node_runtime::{GenesisConfig, BlockPeriod, StorageValue, Runtime};
use client::Client;
use consensus::AuthoringApi;
use node_network::{Protocol as DemoProtocol, consensus::ConsensusNetwork};
use transaction_pool::Client as TPApi;
use tokio::runtime::TaskExecutor;
use service::FactoryFullConfiguration;
use primitives::{Blake2Hasher};
use primitives::{Blake2Hasher, storage::StorageKey, twox_128};
pub use service::{Roles, PruningMode, TransactionPoolOptions,
pub use service::{Roles, PruningMode, TransactionPoolOptions, ServiceFactory,
ErrorKind, Error, ComponentBlock, LightComponents, FullComponents};
pub use client::ExecutionStrategy;
@@ -101,6 +112,8 @@ impl service::ServiceFactory for Factory {
type LightTransactionPoolApi = transaction_pool::ChainApi<service::LightClient<Self>>;
type Genesis = GenesisConfig;
type Configuration = CustomConfiguration;
type FullService = Service<service::FullComponents<Self>>;
type LightService = Service<service::LightComponents<Self>>;
fn build_full_transaction_pool(config: TransactionPoolOptions, client: Arc<service::FullClient<Self>>)
-> Result<TransactionPool<service::FullClient<Self>>, Error>
@@ -119,82 +132,65 @@ impl service::ServiceFactory for Factory {
{
Ok(DemoProtocol::new())
}
fn new_light(config: Configuration, executor: TaskExecutor)
-> Result<Service<LightComponents<Factory>>, Error>
{
let service = service::Service::<LightComponents<Factory>>::new(config, executor.clone())?;
Ok(Service {
inner: service,
_consensus: None,
})
}
fn new_full(config: Configuration, executor: TaskExecutor)
-> Result<Service<FullComponents<Factory>>, Error>
{
let is_validator = (config.roles & Roles::AUTHORITY) == Roles::AUTHORITY;
let service = service::Service::<FullComponents<Factory>>::new(config, executor.clone())?;
// Spin consensus service if configured
let consensus = if is_validator {
// Load the first available key
let key = service.keystore().load(&service.keystore().contents()?[0], "")?;
info!("Using authority key {}", key.public());
let client = service.client();
let consensus_net = ConsensusNetwork::new(service.network(), client.clone());
let block_id = BlockId::number(client.info().unwrap().chain.best_number);
// TODO: this needs to be dynamically adjustable
let block_delay = client.storage(&block_id, &StorageKey(twox_128(BlockPeriod::<Runtime>::key()).to_vec()))?
.and_then(|data| Timestamp::decode(&mut data.0.as_slice()))
.unwrap_or_else(|| {
warn!("Block period is missing in the storage.");
5
});
Some(consensus::Service::new(
client.clone(),
client.clone(),
consensus_net,
service.transaction_pool(),
executor,
key,
block_delay,
))
} else {
None
};
Ok(Service {
inner: service,
_consensus: consensus,
})
}
}
/// Demo service.
pub struct Service<C: Components> {
inner: service::Service<C>,
client: Arc<ComponentClient<C>>,
network: Arc<NetworkService>,
api: Arc<<C as Components>::Api>,
_consensus: Option<consensus::Service>,
}
impl <C: Components> Service<C> {
pub fn client(&self) -> Arc<ComponentClient<C>> {
self.client.clone()
}
pub fn network(&self) -> Arc<NetworkService> {
self.network.clone()
}
pub fn api(&self) -> Arc<<C as Components>::Api> {
self.api.clone()
}
}
/// Creates light client and register protocol with the network service
pub fn new_light(config: Configuration, executor: TaskExecutor)
-> Result<Service<LightComponents<Factory>>, Error>
{
let service = service::Service::<LightComponents<Factory>>::new(config, executor.clone())?;
let api = service.client();
Ok(Service {
client: service.client(),
network: service.network(),
api: api,
inner: service,
_consensus: None,
})
}
/// Creates full client and register protocol with the network service
pub fn new_full(config: Configuration, executor: TaskExecutor)
-> Result<Service<FullComponents<Factory>>, Error>
{
let is_validator = (config.roles & Roles::AUTHORITY) == Roles::AUTHORITY;
let service = service::Service::<FullComponents<Factory>>::new(config, executor.clone())?;
// Spin consensus service if configured
let consensus = if is_validator {
// Load the first available key
let key = service.keystore().load(&service.keystore().contents()?[0], "")?;
info!("Using authority key {}", key.public());
let client = service.client();
let consensus_net = ConsensusNetwork::new(service.network(), client.clone());
Some(consensus::Service::new(
client.clone(),
client.clone(),
consensus_net,
service.transaction_pool(),
executor,
key,
))
} else {
None
};
Ok(Service {
client: service.client(),
network: service.network(),
api: service.client(),
inner: service,
_consensus: consensus,
})
}
/// Creates bare client without any networking.
pub fn new_client(config: Configuration)
-> Result<Arc<service::ComponentClient<FullComponents<Factory>>>, Error>
@@ -209,3 +205,58 @@ impl<C: Components> ::std::ops::Deref for Service<C> {
&self.inner
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use parking_lot::RwLock;
use {service, service_test, Factory, chain_spec};
use consensus::{self, OfflineTracker};
use primitives::ed25519;
use node_primitives::Block;
use bft::{Proposer, Environment};
use node_network::consensus::ConsensusNetwork;
use substrate_test_client::fake_justify;
use node_primitives::BlockId;
use keyring::Keyring;
#[test]
fn test_connectivity() {
service_test::connectivity::<Factory>(chain_spec::integration_test_config());
}
#[test]
fn test_sync() {
let alice: Arc<ed25519::Pair> = Arc::new(Keyring::Alice.into());
let bob: Arc<ed25519::Pair> = Arc::new(Keyring::Bob.into());
let validators = vec![alice.public().0.into(), bob.public().0.into()];
let keys: Vec<&ed25519::Pair> = vec![&*alice, &*bob];
let offline = Arc::new(RwLock::new(OfflineTracker::new()));
let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap();
let block_factory = |service: &<Factory as service::ServiceFactory>::FullService| {
let block_id = BlockId::number(service.client().info().unwrap().chain.best_number);
let parent_header = service.client().header(&block_id).unwrap().unwrap();
let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone());
let proposer_factory = consensus::ProposerFactory {
client: service.client().clone(),
transaction_pool: service.transaction_pool().clone(),
network: consensus_net,
offline: offline.clone(),
force_delay: 0,
handle: dummy_runtime.executor(),
};
let (proposer, _, _) = proposer_factory.init(&parent_header, &validators, alice.clone()).unwrap();
let block = proposer.propose().expect("Error making test block");
let justification = fake_justify::<Block>(&block.header, &keys);
let justification = service.client().check_justification(block.header, justification).unwrap();
(justification, Some(block.extrinsics))
};
service_test::sync::<Factory, _>(chain_spec::integration_test_config(), block_factory);
}
#[test]
fn test_consensus() {
service_test::consensus::<Factory>(chain_spec::integration_test_config(), vec!["Alice".into(), "Bob".into()]);
}
}
+3
View File
@@ -431,6 +431,9 @@ impl<T: Trait> Module<T> {
/// Get the reward for the session, assuming it ends with this block.
fn this_session_reward(actual_elapsed: T::Moment) -> T::Balance {
let ideal_elapsed = <session::Module<T>>::ideal_session_duration();
if ideal_elapsed.is_zero() {
return Self::current_session_reward();
}
let per65536: u64 = (T::Moment::sa(65536u64) * ideal_elapsed.clone() / actual_elapsed.max(ideal_elapsed)).as_();
Self::current_session_reward() * T::Balance::sa(per65536) / T::Balance::sa(65536u64)
}
+2 -2
View File
@@ -111,7 +111,7 @@ impl<T: Trait> Module<T> {
);
assert!(
Self::now().is_zero() || now >= Self::now() + Self::block_period(),
"Timestamp but increment by at least <BlockPeriod> between sequential blocks"
"Timestamp must increment by at least <BlockPeriod> between sequential blocks"
);
<Self as Store>::Now::put(now);
<Self as Store>::DidUpdate::put(true);
@@ -228,7 +228,7 @@ mod tests {
}
#[test]
#[should_panic(expected = "Timestamp but increment by at least <BlockPeriod> between sequential blocks")]
#[should_panic(expected = "Timestamp must increment by at least <BlockPeriod> between sequential blocks")]
fn block_period_is_enforced() {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
t.extend(GenesisConfig::<Test> { period: 5 }.build_storage().unwrap());