Wait for relay chain block import before validatiing a block announcement (#227)

* Start with something

* Whatever

* Update

* MOARE

* Make cumulus-network compile and tests work

* Update more and fixes

* More stuff

* More fixes

* Make collator build

* Make test almost work

* Remove contracts runtime

* More test work

* Make service compile

* Fix test-service

* Fix test client

* More fixes

* Fix collator test

* Fix network tests (again)

* Make everything compile, finally

* Fix tests

* Write test that should fail

* Add `WaitOnRelayChainBlock`

* Update git versions

* Make it all work

* Update logging

* Switch to provided method for pushing an extrinsic

* Try to debug CI

* Aaaa

* Only use Debug

* Updates

* Use native execution to hopefully make CI happy...
This commit is contained in:
Bastian Köcher
2020-11-23 00:21:02 +01:00
committed by GitHub
parent b11ec7ea9f
commit e5b4e8cae7
17 changed files with 1310 additions and 683 deletions
-1
View File
@@ -62,5 +62,4 @@ test-linux-stable:
RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings"
script:
- time cargo test --all --release --locked
- time cargo test --release -- --ignored integration_test
- sccache -s
+177 -174
View File
File diff suppressed because it is too large Load Diff
+21 -12
View File
@@ -41,8 +41,8 @@ use polkadot_node_primitives::{Collation, CollationGenerationConfig};
use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage};
use polkadot_overseer::OverseerHandler;
use polkadot_primitives::v1::{
Block as PBlock, BlockData, CollatorPair, Hash as PHash, HeadData, Id as ParaId, PoV,
UpwardMessage, BlockNumber as PBlockNumber,
Block as PBlock, BlockData, BlockNumber as PBlockNumber, CollatorPair, Hash as PHash, HeadData,
Id as ParaId, PoV, UpwardMessage,
};
use polkadot_service::RuntimeApiCollection;
@@ -299,7 +299,7 @@ where
info!(
target: "cumulus-collator",
"Starting collation for relay parent `{}` on parent `{}`.",
"Starting collation for relay parent {:?} on parent {:?}.",
relay_parent,
last_head_hash,
);
@@ -380,14 +380,20 @@ where
return None;
}
let collation = self.build_collation(b, block_hash, validation_data.persisted.block_number)?;
let collation =
self.build_collation(b, block_hash, validation_data.persisted.block_number)?;
let pov_hash = collation.proof_of_validity.hash();
self.wait_to_announce
.lock()
.wait_to_announce(block_hash, pov_hash);
info!(target: "cumulus-collator", "Produced proof-of-validity candidate `{:?}` from block `{:?}`.", pov_hash, block_hash);
info!(
target: "cumulus-collator",
"Produced proof-of-validity candidate {:?} from block {:?}.",
pov_hash,
block_hash,
);
Some(collation)
}
@@ -461,7 +467,8 @@ where
let retrieve_dmq_contents = {
let polkadot_client = polkadot_client.clone();
move |relay_parent: PHash| {
polkadot_client.runtime_api()
polkadot_client
.runtime_api()
.dmq_contents_with_context(
&BlockId::hash(relay_parent),
sp_core::ExecutionContext::Importing,
@@ -535,8 +542,8 @@ mod tests {
use sp_runtime::traits::DigestFor;
use cumulus_test_client::{
generate_block_inherents, Client, DefaultTestClientBuilderExt,
TestClientBuilder, TestClientBuilderExt,
generate_block_inherents, Client, DefaultTestClientBuilderExt, TestClientBuilder,
TestClientBuilderExt,
};
use cumulus_test_runtime::{Block, Header};
@@ -634,14 +641,16 @@ mod tests {
let (polkadot_client, relay_parent) = {
// Create a polkadot client with a block imported.
use polkadot_test_client::{
TestClientBuilderExt as _, DefaultTestClientBuilderExt as _,
InitPolkadotBlockBuilder as _, ClientBlockImportExt as _
ClientBlockImportExt as _, DefaultTestClientBuilderExt as _,
InitPolkadotBlockBuilder as _, TestClientBuilderExt as _,
};
let mut client = polkadot_test_client::TestClientBuilder::new().build();
let block_builder = client.init_polkadot_block_builder();
let block = block_builder.build().expect("Finalizes the block").block;
let hash = block.header().hash();
client.import_as_best(BlockOrigin::Own, block).expect("Imports the block");
client
.import_as_best(BlockOrigin::Own, block)
.expect("Imports the block");
(client, hash)
};
@@ -659,7 +668,7 @@ mod tests {
spawner,
para_id,
key: CollatorPair::generate().0,
polkadot_client: Arc::new(polkadot_client,),
polkadot_client: Arc::new(polkadot_client),
},
);
block_on(collator_start).expect("Should start collator");
+2 -2
View File
@@ -23,13 +23,13 @@
use frame_support::{
decl_module, decl_storage, storage,
weights::{DispatchClass, Weight},
traits::Get,
weights::{DispatchClass, Weight},
StorageValue,
};
use frame_system::ensure_none;
use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent};
use sp_std::{prelude::*, cmp};
use sp_std::{cmp, prelude::*};
use cumulus_primitives::{
inherents::{DownwardMessagesType, DOWNWARD_MESSAGES_IDENTIFIER},
+11 -4
View File
@@ -6,7 +6,7 @@ description = "Cumulus-specific networking protocol"
edition = "2018"
[dependencies]
# substrate deps
# Substrate deps
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -14,7 +14,7 @@ sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
# polkadot deps
# Polkadot deps
polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" }
polkadot-statement-table = { git = "https://github.com/paritytech/polkadot", branch = "master" }
polkadot-validation = { git = "https://github.com/paritytech/polkadot", branch = "master" }
@@ -23,20 +23,27 @@ polkadot-node-primitives = { git = "https://github.com/paritytech/polkadot", bra
polkadot-node-subsystem = { git = "https://github.com/paritytech/polkadot", branch = "master" }
polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "master" }
# cumulus deps
# Cumulus deps
cumulus-primitives = { path = "../primitives" }
# other deps
codec = { package = "parity-scale-codec", version = "1.3.0", features = [ "derive" ] }
futures = { version = "0.3.1", features = ["compat"] }
futures-timer = "3.0.2"
log = "0.4.8"
parking_lot = "0.10.2"
derive_more = "0.99.2"
[dev-dependencies]
cumulus-test-runtime = { path = "../test/runtime" }
# Cumulus deps
cumulus-test-service = { path = "../test/service" }
# Polkadot deps
polkadot-test-client = { git = "https://github.com/paritytech/polkadot", branch = "master" }
# substrate deps
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
+219 -175
View File
@@ -22,7 +22,9 @@
#[cfg(test)]
mod tests;
mod wait_on_relay_chain_block;
use sc_client_api::{Backend, BlockchainEvents};
use sp_api::ProvideRuntimeApi;
use sp_blockchain::{Error as ClientError, HeaderBackend};
use sp_consensus::{
@@ -32,7 +34,7 @@ use sp_consensus::{
use sp_core::traits::SpawnNamed;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT},
traits::{Block as BlockT, HashFor, Header as HeaderT},
};
use polkadot_node_primitives::{SignedFullStatement, Statement};
@@ -54,6 +56,10 @@ use log::{trace, warn};
use std::{marker::PhantomData, pin::Pin, sync::Arc};
use wait_on_relay_chain_block::WaitOnRelayChainBlock;
type BlockAnnounceError = Box<dyn std::error::Error + Send>;
/// Parachain specific block announce validator.
///
/// This block announce validator is required if the parachain is running
@@ -81,260 +87,298 @@ use std::{marker::PhantomData, pin::Pin, sync::Arc};
/// chain. If it is at the tip, it is required to provide a justification or otherwise we reject
/// it. However, if the announcement is for a block below the tip the announcement is accepted
/// as it probably comes from a node that is currently syncing the chain.
pub struct BlockAnnounceValidator<B, P> {
phantom: PhantomData<B>,
polkadot_client: Arc<P>,
pub struct BlockAnnounceValidator<Block, P, B, BCE> {
phantom: PhantomData<Block>,
relay_chain_client: Arc<P>,
relay_chain_backend: Arc<B>,
para_id: ParaId,
polkadot_sync_oracle: Box<dyn SyncOracle + Send>,
relay_chain_sync_oracle: Box<dyn SyncOracle + Send>,
wait_on_relay_chain_block: WaitOnRelayChainBlock<B, BCE>,
}
impl<B, P> BlockAnnounceValidator<B, P> {
impl<Block, P, B, BCE> BlockAnnounceValidator<Block, P, B, BCE> {
/// Create a new [`BlockAnnounceValidator`].
pub fn new(
polkadot_client: Arc<P>,
relay_chain_client: Arc<P>,
para_id: ParaId,
polkadot_sync_oracle: Box<dyn SyncOracle + Send>,
relay_chain_sync_oracle: Box<dyn SyncOracle + Send>,
relay_chain_backend: Arc<B>,
relay_chain_blockchain_events: Arc<BCE>,
) -> Self {
Self {
phantom: Default::default(),
polkadot_client,
relay_chain_client,
para_id,
polkadot_sync_oracle,
relay_chain_sync_oracle,
relay_chain_backend: relay_chain_backend.clone(),
wait_on_relay_chain_block: WaitOnRelayChainBlock::new(
relay_chain_backend,
relay_chain_blockchain_events,
),
}
}
}
impl<B: BlockT, P> BlockAnnounceValidatorT<B> for BlockAnnounceValidator<B, P>
impl<Block: BlockT, P, B, BCE> BlockAnnounceValidator<Block, P, B, BCE>
where
P: ProvideRuntimeApi<PBlock> + HeaderBackend<PBlock> + 'static,
P: ProvideRuntimeApi<PBlock> + Send + Sync + 'static,
P::Api: ParachainHost<PBlock>,
B: Backend<PBlock> + 'static,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
sc_client_api::StateBackendFor<B, PBlock>: sc_client_api::StateBackend<HashFor<PBlock>>,
{
/// Handle a block announcement with empty data (no statement) attached to it.
fn handle_empty_block_announce_data(
&self,
header: Block::Header,
) -> impl Future<Output = Result<Validation, BlockAnnounceError>> {
let relay_chain_client = self.relay_chain_client.clone();
let relay_chain_backend = self.relay_chain_backend.clone();
let para_id = self.para_id;
async move {
// Check if block is equal or higher than best (this requires a justification)
let relay_chain_info = relay_chain_backend.blockchain().info();
let runtime_api_block_id = BlockId::Hash(relay_chain_info.best_hash);
let block_number = header.number();
let local_validation_data = relay_chain_client
.runtime_api()
.persisted_validation_data(
&runtime_api_block_id,
para_id,
OccupiedCoreAssumption::TimedOut,
)
.map_err(|e| Box::new(ClientError::Msg(format!("{:?}", e))) as Box<_>)?
.ok_or_else(|| {
Box::new(ClientError::Msg(
"Could not find parachain head in relay chain".into(),
)) as Box<_>
})?;
let parent_head = Block::Header::decode(&mut &local_validation_data.parent_head.0[..])
.map_err(|e| {
Box::new(ClientError::Msg(format!(
"Failed to decode parachain head: {:?}",
e
))) as Box<_>
})?;
let known_best_number = parent_head.number();
if block_number >= known_best_number {
trace!(
target: "cumulus-network",
"validation failed because a justification is needed if the block at the top of the chain."
);
Ok(Validation::Failure)
} else {
Ok(Validation::Success { is_new_best: false })
}
}
}
}
impl<Block: BlockT, P, B, BCE> BlockAnnounceValidatorT<Block>
for BlockAnnounceValidator<Block, P, B, BCE>
where
P: ProvideRuntimeApi<PBlock> + Send + Sync + 'static,
P::Api: ParachainHost<PBlock>,
B: Backend<PBlock> + 'static,
BCE: BlockchainEvents<PBlock> + 'static + Send + Sync,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
sc_client_api::StateBackendFor<B, PBlock>: sc_client_api::StateBackend<HashFor<PBlock>>,
{
fn validate(
&mut self,
header: &B::Header,
header: &Block::Header,
mut data: &[u8],
) -> Pin<Box<dyn Future<Output = Result<Validation, Box<dyn std::error::Error + Send>>> + Send>>
{
if self.polkadot_sync_oracle.is_major_syncing() {
) -> Pin<Box<dyn Future<Output = Result<Validation, BlockAnnounceError>> + Send>> {
if self.relay_chain_sync_oracle.is_major_syncing() {
return ready(Ok(Validation::Success { is_new_best: false })).boxed();
}
let runtime_api = self.polkadot_client.runtime_api();
let polkadot_info = self.polkadot_client.info();
if data.is_empty() {
let polkadot_client = self.polkadot_client.clone();
let header = header.clone();
let para_id = self.para_id;
return async move {
// Check if block is equal or higher than best (this requires a justification)
let runtime_api_block_id = BlockId::Hash(polkadot_info.best_hash);
let block_number = header.number();
let local_validation_data = polkadot_client
.runtime_api()
.persisted_validation_data(
&runtime_api_block_id,
para_id,
OccupiedCoreAssumption::TimedOut,
)
.map_err(|e| Box::new(ClientError::Msg(format!("{:?}", e))) as Box<_>)?
.ok_or_else(|| {
Box::new(ClientError::Msg(
"Could not find parachain head in relay chain".into(),
)) as Box<_>
})?;
let parent_head = B::Header::decode(&mut &local_validation_data.parent_head.0[..])
.map_err(|e| {
Box::new(ClientError::Msg(format!(
"Failed to decode parachain head: {:?}",
e
))) as Box<_>
})?;
let known_best_number = parent_head.number();
if block_number >= known_best_number {
trace!(
target: "cumulus-network",
"validation failed because a justification is needed if the block at the top of the chain."
);
Ok(Validation::Failure)
} else {
Ok(Validation::Success { is_new_best: false })
}
}
.boxed();
return self
.handle_empty_block_announce_data(header.clone())
.boxed();
}
let signed_stmt = match SignedFullStatement::decode(&mut data) {
Ok(r) => r,
Err(_) => return ready(Err(Box::new(ClientError::BadJustification(
Err(_) => return ready(Err(Box::new(ClientError::Msg(
"cannot decode block announcement justification, must be a `SignedFullStatement`"
.to_string(),
.into(),
)) as Box<_>))
.boxed(),
};
// Check statement is a candidate statement.
let candidate_receipt = match signed_stmt.payload() {
Statement::Seconded(ref candidate_receipt) => candidate_receipt,
_ => {
return ready(Err(Box::new(ClientError::BadJustification(
"block announcement justification must be a `Statement::Seconded`".to_string(),
)) as Box<_>))
.boxed()
}
};
let relay_chain_client = self.relay_chain_client.clone();
let header_encoded = header.encode();
let wait_on_relay_chain_block = self.wait_on_relay_chain_block.clone();
// Check that the relay chain parent of the block is the relay chain head
let best_number = polkadot_info.best_number;
let validator_index = signed_stmt.validator_index();
let relay_parent = &candidate_receipt.descriptor.relay_parent;
async move {
// Check statement is a candidate statement.
let candidate_receipt = match signed_stmt.payload() {
Statement::Seconded(ref candidate_receipt) => candidate_receipt,
_ => {
return Err(Box::new(ClientError::Msg(
"block announcement justification must be a `Statement::Seconded`".into(),
)) as Box<_>)
}
};
match self.polkadot_client.number(*relay_parent) {
Err(err) => {
return ready(Err(Box::new(ClientError::Backend(format!(
"could not find block number for {}: {}",
relay_parent, err,
))) as Box<_>))
.boxed();
// Check the header in the candidate_receipt match header given header.
if header_encoded != candidate_receipt.commitments.head_data.0 {
return Err(Box::new(ClientError::Msg(
"block announcement header does not match the one justified".into(),
)) as Box<_>);
}
Ok(Some(x)) if x == best_number => {}
Ok(None) => {
return ready(Err(
Box::new(ClientError::UnknownBlock(relay_parent.to_string())) as Box<_>,
))
.boxed();
}
Ok(Some(_)) => {
trace!(
target: "cumulus-network",
"validation failed because the relay chain parent ({}) is not the relay chain \
head ({})",
relay_parent,
best_number,
);
return ready(Ok(Validation::Failure)).boxed();
let relay_parent = &candidate_receipt.descriptor.relay_parent;
wait_on_relay_chain_block
.wait_on_relay_chain_block(*relay_parent)
.await
.map_err(|e| Box::new(ClientError::Msg(e.to_string())) as Box<_>)?;
let runtime_api = relay_chain_client.runtime_api();
let validator_index = signed_stmt.validator_index();
let runtime_api_block_id = BlockId::Hash(*relay_parent);
let session_index = match runtime_api.session_index_for_child(&runtime_api_block_id) {
Ok(r) => r,
Err(e) => {
return Err(Box::new(ClientError::Msg(format!("{:?}", e))) as Box<_>);
}
};
let signing_context = SigningContext {
parent_hash: *relay_parent,
session_index,
};
// Check that the signer is a legit validator.
let authorities = match runtime_api.validators(&runtime_api_block_id) {
Ok(r) => r,
Err(e) => {
return Err(Box::new(ClientError::Msg(format!("{:?}", e))) as Box<_>);
}
};
let signer = match authorities.get(validator_index as usize) {
Some(r) => r,
None => {
return Err(Box::new(ClientError::Msg(
"block accouncement justification signer is a validator index out of bound"
.to_string(),
)) as Box<_>);
}
};
// Check statement is correctly signed.
if signed_stmt
.check_signature(&signing_context, &signer)
.is_err()
{
return Err(Box::new(ClientError::Msg(
"block announcement justification signature is invalid".to_string(),
)) as Box<_>);
}
Ok(Validation::Success { is_new_best: true })
}
let runtime_api_block_id = BlockId::Hash(*relay_parent);
let session_index = match runtime_api.session_index_for_child(&runtime_api_block_id) {
Ok(r) => r,
Err(e) => {
return ready(Err(Box::new(ClientError::Msg(format!("{:?}", e))) as Box<_>)).boxed()
}
};
let signing_context = SigningContext {
parent_hash: *relay_parent,
session_index,
};
// Check that the signer is a legit validator.
let authorities = match runtime_api.validators(&runtime_api_block_id) {
Ok(r) => r,
Err(e) => {
return ready(Err(Box::new(ClientError::Msg(format!("{:?}", e))) as Box<_>)).boxed()
}
};
let signer = match authorities.get(validator_index as usize) {
Some(r) => r,
None => {
return ready(Err(Box::new(ClientError::BadJustification(
"block accouncement justification signer is a validator index out of bound"
.to_string(),
)) as Box<_>))
.boxed()
}
};
// Check statement is correctly signed.
if signed_stmt
.check_signature(&signing_context, &signer)
.is_err()
{
return ready(Err(Box::new(ClientError::BadJustification(
"block announced justification signature is invalid".to_string(),
)) as Box<_>))
.boxed();
}
// Check the header in the candidate_receipt match header given header.
if header.encode() != candidate_receipt.commitments.head_data.0 {
return ready(Err(Box::new(ClientError::BadJustification(
"block announced header does not match the one justified".to_string(),
)) as Box<_>))
.boxed();
}
ready(Ok(Validation::Success { is_new_best: true })).boxed()
.boxed()
}
}
/// Build a block announce validator instance.
///
/// Returns a boxed [`BlockAnnounceValidator`].
pub fn build_block_announce_validator<B: BlockT>(
polkadot_client: polkadot_service::Client,
pub fn build_block_announce_validator<Block: BlockT, B>(
relay_chain_client: polkadot_service::Client,
para_id: ParaId,
polkadot_sync_oracle: Box<dyn SyncOracle + Send>,
) -> Box<dyn BlockAnnounceValidatorT<B> + Send> {
BlockAnnounceValidatorBuilder::new(polkadot_client, para_id, polkadot_sync_oracle).build()
relay_chain_sync_oracle: Box<dyn SyncOracle + Send>,
relay_chain_backend: Arc<B>,
) -> Box<dyn BlockAnnounceValidatorT<Block> + Send>
where
B: Backend<PBlock> + Send + 'static,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
sc_client_api::StateBackendFor<B, PBlock>: sc_client_api::StateBackend<HashFor<PBlock>>,
{
BlockAnnounceValidatorBuilder::new(
relay_chain_client,
para_id,
relay_chain_sync_oracle,
relay_chain_backend,
)
.build()
}
/// Block announce validator builder.
///
/// Builds a [`BlockAnnounceValidator`] for a parachain. As this requires
/// a concrete Polkadot client instance, the builder takes a [`polkadot_service::Client`]
/// a concrete relay chain client instance, the builder takes a [`polkadot_service::Client`]
/// that wraps this concrete instanace. By using [`polkadot_service::ExecuteWithClient`]
/// the builder gets access to this concrete instance.
struct BlockAnnounceValidatorBuilder<B> {
phantom: PhantomData<B>,
polkadot_client: polkadot_service::Client,
struct BlockAnnounceValidatorBuilder<Block, B> {
phantom: PhantomData<Block>,
relay_chain_client: polkadot_service::Client,
para_id: ParaId,
polkadot_sync_oracle: Box<dyn SyncOracle + Send>,
relay_chain_sync_oracle: Box<dyn SyncOracle + Send>,
relay_chain_backend: Arc<B>,
}
impl<B: BlockT> BlockAnnounceValidatorBuilder<B> {
impl<Block: BlockT, B> BlockAnnounceValidatorBuilder<Block, B>
where
B: Backend<PBlock> + Send + 'static,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
sc_client_api::StateBackendFor<B, PBlock>: sc_client_api::StateBackend<HashFor<PBlock>>,
{
/// Create a new instance of the builder.
fn new(
polkadot_client: polkadot_service::Client,
relay_chain_client: polkadot_service::Client,
para_id: ParaId,
polkadot_sync_oracle: Box<dyn SyncOracle + Send>,
relay_chain_sync_oracle: Box<dyn SyncOracle + Send>,
relay_chain_backend: Arc<B>,
) -> Self {
Self {
polkadot_client,
relay_chain_client,
para_id,
polkadot_sync_oracle,
relay_chain_sync_oracle,
relay_chain_backend,
phantom: PhantomData,
}
}
/// Build the block announce validator.
fn build(self) -> Box<dyn BlockAnnounceValidatorT<B> + Send> {
self.polkadot_client.clone().execute_with(self)
fn build(self) -> Box<dyn BlockAnnounceValidatorT<Block> + Send> {
self.relay_chain_client.clone().execute_with(self)
}
}
impl<B: BlockT> polkadot_service::ExecuteWithClient for BlockAnnounceValidatorBuilder<B> {
type Output = Box<dyn BlockAnnounceValidatorT<B> + Send>;
impl<Block: BlockT, B> polkadot_service::ExecuteWithClient
for BlockAnnounceValidatorBuilder<Block, B>
where
B: Backend<PBlock> + Send + 'static,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
sc_client_api::StateBackendFor<B, PBlock>: sc_client_api::StateBackend<HashFor<PBlock>>,
{
type Output = Box<dyn BlockAnnounceValidatorT<Block> + Send>;
fn execute_with_client<PClient, Api, PBackend>(self, client: Arc<PClient>) -> Self::Output
where
<Api as sp_api::ApiExt<PBlock>>::StateBackend:
sp_api::StateBackend<sp_runtime::traits::BlakeTwo256>,
PBackend: sc_client_api::Backend<PBlock>,
PBackend: Backend<PBlock>,
PBackend::State: sp_api::StateBackend<sp_runtime::traits::BlakeTwo256>,
Api: polkadot_service::RuntimeApiCollection<StateBackend = PBackend::State>,
PClient: polkadot_service::AbstractClient<PBlock, PBackend, Api = Api> + 'static,
{
Box::new(BlockAnnounceValidator::new(
client,
client.clone(),
self.para_id,
self.polkadot_sync_oracle,
self.relay_chain_sync_oracle,
self.relay_chain_backend,
client,
))
}
}
+118 -136
View File
@@ -15,28 +15,38 @@
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use super::*;
use cumulus_test_runtime::{Block, Header};
use futures::executor::block_on;
use cumulus_test_service::runtime::{Block, Header};
use futures::{executor::block_on, poll, task::Poll};
use polkadot_node_primitives::{SignedFullStatement, Statement};
use polkadot_primitives::v1::{
AuthorityDiscoveryId, Block as PBlock, BlockNumber, CandidateCommitments, CandidateDescriptor,
CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash as PHash,
HeadData, Header as PHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage,
OccupiedCoreAssumption, ParachainHost, PersistedValidationData, SessionIndex, SigningContext,
ValidationCode, ValidationData, ValidationOutputs, ValidatorId, ValidatorIndex,
HeadData, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption,
ParachainHost, PersistedValidationData, SessionIndex, SigningContext, ValidationCode,
ValidationData, ValidationOutputs, ValidatorId, ValidatorIndex,
};
use polkadot_test_client::{
Client as PClient, ClientBlockImportExt, DefaultTestClientBuilderExt, FullBackend as PBackend,
InitPolkadotBlockBuilder, TestClientBuilder, TestClientBuilderExt,
};
use sp_api::{ApiRef, ProvideRuntimeApi};
use sp_blockchain::{Error as ClientError, HeaderBackend};
use sp_consensus::block_validation::BlockAnnounceValidator as _;
use sp_consensus::{block_validation::BlockAnnounceValidator as _, BlockOrigin};
use sp_core::H256;
use sp_keyring::Sr25519Keyring;
use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr};
use sp_runtime::{
traits::{NumberFor, Zero},
RuntimeAppPublic,
};
use sp_runtime::RuntimeAppPublic;
use std::collections::BTreeMap;
fn check_error(error: crate::BlockAnnounceError, check_error: impl Fn(&ClientError) -> bool) {
let error = *error
.downcast::<ClientError>()
.expect("Downcasts error to `ClientError`");
if !check_error(&error) {
panic!("Invalid error: {:?}", error);
}
}
#[derive(Clone)]
struct DummyCollatorNetwork;
@@ -50,7 +60,10 @@ impl SyncOracle for DummyCollatorNetwork {
}
}
fn make_validator_and_api() -> (BlockAnnounceValidator<Block, TestApi>, Arc<TestApi>) {
fn make_validator_and_api() -> (
BlockAnnounceValidator<Block, TestApi, PBackend, PClient>,
Arc<TestApi>,
) {
let api = Arc::new(TestApi::new());
(
@@ -58,6 +71,8 @@ fn make_validator_and_api() -> (BlockAnnounceValidator<Block, TestApi>, Arc<Test
api.clone(),
ParaId::from(56),
Box::new(DummyCollatorNetwork),
api.relay_backend.clone(),
api.relay_client.clone(),
),
api,
)
@@ -73,7 +88,22 @@ fn default_header() -> Header {
}
}
fn make_gossip_message_and_header(
/// Same as [`make_gossip_message_and_header`], but using the genesis header as relay parent.
async fn make_gossip_message_and_header_using_genesis(
api: Arc<TestApi>,
validator_index: u32,
) -> (SignedFullStatement, Header) {
let relay_parent = api
.relay_client
.hash(0)
.ok()
.flatten()
.expect("Genesis hash exists");
make_gossip_message_and_header(api, relay_parent, validator_index).await
}
async fn make_gossip_message_and_header(
api: Arc<TestApi>,
relay_parent: H256,
validator_index: u32,
@@ -106,13 +136,14 @@ fn make_gossip_message_and_header(
},
};
let statement = Statement::Seconded(candidate_receipt);
let signed = block_on(SignedFullStatement::sign(
let signed = SignedFullStatement::sign(
&keystore,
statement,
&signing_context,
validator_index,
&alice_public.into(),
))
)
.await
.expect("Signing statement");
(signed, header)
@@ -158,65 +189,19 @@ fn check_statement_is_encoded_correctly() {
.err()
.expect("Should fail on invalid encoded statement");
assert!(matches!(
*res.downcast::<ClientError>().unwrap(),
ClientError::BadJustification(x) if x.contains("must be a `SignedFullStatement`")
));
}
#[test]
fn check_relay_parent_is_head() {
let (mut validator, api) = make_validator_and_api();
let relay_chain_leaf = H256::zero();
let (gossip_message, header) = make_gossip_message_and_header(api, relay_chain_leaf, 0);
let data = gossip_message.encode();
let res = block_on(validator.validate(&header, data.as_slice()));
assert_eq!(
res.unwrap(),
Validation::Failure,
"validation fails if the relay chain parent is not the relay chain head",
);
}
#[test]
fn check_relay_parent_actually_exists() {
let (mut validator, api) = make_validator_and_api();
let relay_parent = H256::from_low_u64_be(42);
let (signed_statement, header) = make_gossip_message_and_header(api, relay_parent, 0);
let data = signed_statement.encode();
let res = block_on(validator.validate(&header, &data))
.err()
.expect("Should fail on unknown relay parent");
assert!(matches!(
*res.downcast::<ClientError>().unwrap(),
ClientError::UnknownBlock(_)
));
}
#[test]
fn check_relay_parent_fails_if_cannot_retrieve_number() {
let (mut validator, api) = make_validator_and_api();
let relay_parent = H256::from_low_u64_be(0xdead);
let (signed_statement, header) = make_gossip_message_and_header(api, relay_parent, 0);
let data = signed_statement.encode();
let res = block_on(validator.validate(&header, &data))
.err()
.expect("Should fail when the relay chain number could not be retrieved");
assert!(matches!(
*res.downcast::<ClientError>().unwrap(),
ClientError::Backend(_)
));
check_error(res, |error| {
matches!(
error,
ClientError::Msg(x) if x.contains("must be a `SignedFullStatement`")
)
});
}
#[test]
fn check_signer_is_legit_validator() {
let (mut validator, api) = make_validator_and_api();
let relay_parent = H256::from_low_u64_be(1);
let (signed_statement, header) = make_gossip_message_and_header(api, relay_parent, 1);
let (signed_statement, header) = block_on(make_gossip_message_and_header_using_genesis(api, 1));
let data = signed_statement.encode();
let res = block_on(validator.validate(&header, &data))
@@ -225,16 +210,15 @@ fn check_signer_is_legit_validator() {
assert!(matches!(
*res.downcast::<ClientError>().unwrap(),
ClientError::BadJustification(x) if x.contains("signer is a validator")
ClientError::Msg(x) if x.contains("signer is a validator")
));
}
#[test]
fn check_statement_is_correctly_signed() {
let (mut validator, api) = make_validator_and_api();
let relay_parent = H256::from_low_u64_be(1);
let (signed_statement, header) = make_gossip_message_and_header(api, relay_parent, 0);
let (signed_statement, header) = block_on(make_gossip_message_and_header_using_genesis(api, 0));
let mut data = signed_statement.encode();
@@ -246,10 +230,12 @@ fn check_statement_is_correctly_signed() {
.err()
.expect("Validation should fail if the statement is not signed correctly");
assert!(matches!(
*res.downcast::<ClientError>().unwrap(),
ClientError::BadJustification(x) if x.contains("signature is invalid")
));
check_error(res, |error| {
matches!(
error,
ClientError::Msg(x) if x.contains("signature is invalid")
)
});
}
#[test]
@@ -290,18 +276,20 @@ fn check_statement_seconded() {
.err()
.expect("validation should fail if not seconded statement");
assert!(matches!(
*res.downcast::<ClientError>().unwrap(),
ClientError::BadJustification(x) if x.contains("must be a `Statement::Seconded`")
));
check_error(res, |error| {
matches!(
error,
ClientError::Msg(x) if x.contains("must be a `Statement::Seconded`")
)
});
}
#[test]
fn check_header_match_candidate_receipt_header() {
let (mut validator, api) = make_validator_and_api();
let relay_parent = H256::from_low_u64_be(1);
let (signed_statement, mut header) = make_gossip_message_and_header(api, relay_parent, 0);
let (signed_statement, mut header) =
block_on(make_gossip_message_and_header_using_genesis(api, 0));
let data = signed_statement.encode();
header.number = 300;
@@ -309,10 +297,49 @@ fn check_header_match_candidate_receipt_header() {
.err()
.expect("validation should fail if the header in doesn't match");
assert!(matches!(
*res.downcast::<ClientError>().unwrap(),
ClientError::BadJustification(x) if x.contains("header does not match")
));
check_error(res, |error| {
matches!(
error,
ClientError::Msg(x) if x.contains("header does not match")
)
});
}
/// Test that ensures that we postpone the block announce verification until
/// a relay chain block is imported. This is important for when we receive a
/// block announcement before we have imported the associated relay chain block
/// which can happen on slow nodes or nodes with a slow network connection.
#[test]
fn relay_parent_not_imported_when_block_announce_is_processed() {
block_on(async move {
let (mut validator, api) = make_validator_and_api();
let mut client = api.relay_client.clone();
let block = client
.init_polkadot_block_builder()
.build()
.expect("Build new block")
.block;
let (signed_statement, header) = make_gossip_message_and_header(api, block.hash(), 0).await;
let data = signed_statement.encode();
let mut validation = validator.validate(&header, &data);
// The relay chain block is not available yet, so the first poll should return
// that the future is still pending.
assert!(poll!(&mut validation).is_pending());
client
.import(BlockOrigin::Own, block)
.expect("Imports the block");
assert!(matches!(
poll!(validation),
Poll::Ready(Ok(Validation::Success { is_new_best: true }))
));
});
}
#[derive(Default)]
@@ -322,14 +349,21 @@ struct ApiData {
struct TestApi {
data: Arc<ApiData>,
relay_client: Arc<PClient>,
relay_backend: Arc<PBackend>,
}
impl TestApi {
fn new() -> Self {
let builder = TestClientBuilder::new();
let relay_backend = builder.backend();
Self {
data: Arc::new(ApiData {
validators: vec![Sr25519Keyring::Alice.public().into()],
}),
relay_client: Arc::new(builder.build()),
relay_backend,
}
}
}
@@ -416,55 +450,3 @@ sp_api::mock_impl_runtime_apis! {
}
}
}
/// Blockchain database header backend. Does not perform any validation.
impl HeaderBackend<PBlock> for TestApi {
fn header(
&self,
_id: BlockId<PBlock>,
) -> std::result::Result<Option<PHeader>, sp_blockchain::Error> {
Ok(None)
}
fn info(&self) -> sc_client_api::blockchain::Info<PBlock> {
let best_hash = H256::from_low_u64_be(1);
sc_client_api::blockchain::Info {
best_hash,
best_number: 1,
finalized_hash: Default::default(),
finalized_number: Zero::zero(),
genesis_hash: Default::default(),
number_leaves: Default::default(),
}
}
fn status(
&self,
_id: BlockId<PBlock>,
) -> std::result::Result<sc_client_api::blockchain::BlockStatus, sp_blockchain::Error> {
Ok(sc_client_api::blockchain::BlockStatus::Unknown)
}
fn number(
&self,
hash: PHash,
) -> std::result::Result<Option<NumberFor<PBlock>>, sp_blockchain::Error> {
if hash == H256::zero() {
Ok(Some(0))
} else if hash == H256::from_low_u64_be(1) {
Ok(Some(1))
} else if hash == H256::from_low_u64_be(0xdead) {
Err(sp_blockchain::Error::Backend("dead".to_string()))
} else {
Ok(None)
}
}
fn hash(
&self,
_number: NumberFor<PBlock>,
) -> std::result::Result<Option<PHash>, sp_blockchain::Error> {
Ok(None)
}
}
@@ -0,0 +1,264 @@
// Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Provides the [`WaitOnRelayChainBlock`] type.
use futures::{future::ready, Future, FutureExt, StreamExt};
use polkadot_primitives::v1::{Block as PBlock, Hash as PHash};
use sc_client_api::{
blockchain::{self, BlockStatus, HeaderBackend},
Backend, BlockchainEvents,
};
use sp_runtime::{generic::BlockId, traits::HashFor};
use std::{sync::Arc, time::Duration};
/// The timeout in seconds after that the waiting for a block should be aborted.
const TIMEOUT_IN_SECONDS: u64 = 6;
/// Custom error type used by [`WaitOnRelayChainBlock`].
#[derive(Debug, derive_more::Display)]
pub enum Error {
#[display(
fmt = "Timeout while waiting for relay-chain block `{}` to be imported.",
_0
)]
Timeout(PHash),
#[display(
fmt = "Import listener closed while waiting for relay-chain block `{}` to be imported.",
_0
)]
ImportListenerClosed(PHash),
#[display(
fmt = "Blockchain returned an error while waiting for relay-chain block `{}` to be imported: {:?}",
_0,
_1
)]
BlockchainError(PHash, blockchain::Error),
}
/// A helper to wait for a given relay chain block in an async way.
///
/// The caller needs to pass the hash of a block it waits for and the function will return when the
/// block is available or an error occurred.
///
/// The waiting for the block is implemented as follows:
///
/// 1. Get a read lock on the import lock from the backend.
///
/// 2. Check if the block is already imported. If yes, return from the function.
///
/// 3. If the block isn't imported yet, add an import notification listener.
///
/// 4. Poll the import notification listener until the block is imported or the timeout is fired.
///
/// The timeout is set to 6 seconds. This should be enough time to import the block in the current
/// round and if not, the new round of the relay chain already started anyway.
pub struct WaitOnRelayChainBlock<B, BCE> {
block_chain_events: Arc<BCE>,
backend: Arc<B>,
}
impl<B, BCE> Clone for WaitOnRelayChainBlock<B, BCE> {
fn clone(&self) -> Self {
Self {
backend: self.backend.clone(),
block_chain_events: self.block_chain_events.clone(),
}
}
}
impl<B, BCE> WaitOnRelayChainBlock<B, BCE> {
/// Creates a new instance of `Self`.
pub fn new(backend: Arc<B>, block_chain_events: Arc<BCE>) -> Self {
Self {
backend,
block_chain_events,
}
}
}
impl<B, BCE> WaitOnRelayChainBlock<B, BCE>
where
B: Backend<PBlock>,
BCE: BlockchainEvents<PBlock>,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
sc_client_api::StateBackendFor<B, PBlock>: sc_client_api::StateBackend<HashFor<PBlock>>,
{
pub fn wait_on_relay_chain_block(
&self,
hash: PHash,
) -> impl Future<Output = Result<(), Error>> {
let _lock = self.backend.get_import_lock().read();
match self.backend.blockchain().status(BlockId::Hash(hash)) {
Ok(BlockStatus::InChain) => {
return ready(Ok(())).boxed();
}
Err(err) => return ready(Err(Error::BlockchainError(hash, err))).boxed(),
_ => {}
}
let mut listener = self.block_chain_events.import_notification_stream();
// Now it is safe to drop the lock, even when the block is now imported, it should show
// up in our registered listener.
drop(_lock);
let mut timeout = futures_timer::Delay::new(Duration::from_secs(TIMEOUT_IN_SECONDS)).fuse();
async move {
loop {
futures::select! {
_ = timeout => return Err(Error::Timeout(hash)),
evt = listener.next() => match evt {
Some(evt) if evt.hash == hash => return Ok(()),
// Not the event we waited on.
Some(_) => continue,
None => return Err(Error::ImportListenerClosed(hash)),
}
}
}
}
.boxed()
}
}
#[cfg(test)]
mod tests {
use super::*;
use polkadot_test_client::{
construct_transfer_extrinsic, BlockBuilderExt, Client, ClientBlockImportExt,
DefaultTestClientBuilderExt, ExecutionStrategy, FullBackend, InitPolkadotBlockBuilder,
TestClientBuilder, TestClientBuilderExt,
};
use sp_consensus::BlockOrigin;
use sp_runtime::traits::Block as BlockT;
use futures::{executor::block_on, poll, task::Poll};
fn build_client_backend_and_block() -> (Arc<Client>, Arc<FullBackend>, PBlock) {
let builder =
TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible);
let backend = builder.backend();
let client = Arc::new(builder.build());
let block_builder = client.init_polkadot_block_builder();
let block = block_builder.build().expect("Finalizes the block").block;
(client, backend, block)
}
#[test]
fn returns_directly_for_available_block() {
let (mut client, backend, block) = build_client_backend_and_block();
let hash = block.hash();
client
.import(BlockOrigin::Own, block)
.expect("Imports the block");
let wait = WaitOnRelayChainBlock::new(backend, client);
block_on(async move {
// Should be ready on the first poll
assert!(matches!(
poll!(wait.wait_on_relay_chain_block(hash)),
Poll::Ready(Ok(()))
));
});
}
#[test]
fn resolve_after_block_import_notification_was_received() {
let (mut client, backend, block) = build_client_backend_and_block();
let hash = block.hash();
let wait = WaitOnRelayChainBlock::new(backend, client.clone());
block_on(async move {
let mut future = wait.wait_on_relay_chain_block(hash);
// As the block is not yet imported, the first poll should return `Pending`
assert!(poll!(&mut future).is_pending());
// Import the block that should fire the notification
client
.import(BlockOrigin::Own, block)
.expect("Imports the block");
// Now it should have received the notification and report that the block was imported
assert!(matches!(poll!(future), Poll::Ready(Ok(()))));
});
}
#[test]
fn wait_for_block_time_out_when_block_is_not_imported() {
let (client, backend, block) = build_client_backend_and_block();
let hash = block.hash();
let wait = WaitOnRelayChainBlock::new(backend, client.clone());
assert!(matches!(
block_on(wait.wait_on_relay_chain_block(hash)),
Err(Error::Timeout(_))
));
}
#[test]
fn do_not_resolve_after_different_block_import_notification_was_received() {
let (mut client, backend, block) = build_client_backend_and_block();
let hash = block.hash();
let ext = construct_transfer_extrinsic(
&*client,
sp_keyring::Sr25519Keyring::Alice,
sp_keyring::Sr25519Keyring::Bob,
1000,
);
let mut block_builder = client.init_polkadot_block_builder();
// Push an extrinsic to get a different block hash.
block_builder
.push_polkadot_extrinsic(ext)
.expect("Push extrinsic");
let block2 = block_builder.build().expect("Build second block").block;
let hash2 = block2.hash();
let wait = WaitOnRelayChainBlock::new(backend, client.clone());
block_on(async move {
let mut future = wait.wait_on_relay_chain_block(hash);
let mut future2 = wait.wait_on_relay_chain_block(hash2);
// As the block is not yet imported, the first poll should return `Pending`
assert!(poll!(&mut future).is_pending());
assert!(poll!(&mut future2).is_pending());
// Import the block that should fire the notification
client
.import(BlockOrigin::Own, block2)
.expect("Imports the second block");
// The import notification of the second block should not make this one finish
assert!(poll!(&mut future).is_pending());
// Now it should have received the notification and report that the block was imported
assert!(matches!(poll!(future2), Poll::Ready(Ok(()))));
client
.import(BlockOrigin::Own, block)
.expect("Imports the first block");
// Now it should be ready
assert!(matches!(poll!(future), Poll::Ready(Ok(()))));
});
}
}
+1 -2
View File
@@ -20,8 +20,7 @@
pub use polkadot_core_primitives as relay_chain;
pub use polkadot_core_primitives::InboundDownwardMessage;
pub use polkadot_parachain::primitives::UpwardMessage;
pub use polkadot_parachain::primitives::{Id as ParaId, ValidationParams};
pub use polkadot_parachain::primitives::{Id as ParaId, UpwardMessage, ValidationParams};
pub use polkadot_primitives::v1::{
PersistedValidationData, TransientValidationData, ValidationData,
};
@@ -0,0 +1,410 @@
// Copyright 2019 Parity Technologies (UK) Ltd.
// This file is part of Cumulus.
// Cumulus is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Cumulus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.
#![cfg_attr(not(feature = "std"), no_std)]
// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
#![recursion_limit = "256"]
// Make the WASM binary available.
#[cfg(feature = "std")]
include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
use cumulus_pallet_contracts_rpc_runtime_api::ContractExecResult;
use rococo_parachain_primitives::*;
use sp_api::impl_runtime_apis;
use sp_core::OpaqueMetadata;
use sp_runtime::{
create_runtime_str, generic, impl_opaque_keys,
traits::{BlakeTwo256, Block as BlockT, IdentityLookup, Saturating},
transaction_validity::{TransactionSource, TransactionValidity},
ApplyExtrinsicResult,
};
use sp_std::prelude::*;
#[cfg(feature = "std")]
use sp_version::NativeVersion;
use sp_version::RuntimeVersion;
// A few exports that help ease life for downstream crates.
pub use frame_support::{
construct_runtime, parameter_types,
traits::Randomness,
weights::{constants::WEIGHT_PER_SECOND, IdentityFee, Weight},
StorageValue,
};
pub use pallet_balances::Call as BalancesCall;
pub use pallet_timestamp::Call as TimestampCall;
#[cfg(any(feature = "std", test))]
pub use sp_runtime::BuildStorage;
pub use sp_runtime::{Perbill, Permill};
pub type SessionHandlers = ();
impl_opaque_keys! {
pub struct SessionKeys {}
}
/// This runtime version.
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("cumulus-contracts-parachain"),
impl_name: create_runtime_str!("cumulus-contracts-parachain"),
authoring_version: 1,
spec_version: 4,
impl_version: 1,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
};
pub const MILLISECS_PER_BLOCK: u64 = 6000;
pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES;
// These time units are defined in number of blocks.
pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber);
pub const HOURS: BlockNumber = MINUTES * 60;
pub const DAYS: BlockNumber = HOURS * 24;
// 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks.
pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);
#[derive(codec::Encode, codec::Decode)]
pub enum XCMPMessage<XAccountId, XBalance> {
/// Transfer tokens to the given account from the Parachain account.
TransferToken(XAccountId, XBalance),
}
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
}
parameter_types! {
pub const BlockHashCount: BlockNumber = 250;
pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND;
/// Assume 10% of weight for average on_initialize calls.
pub MaximumExtrinsicWeight: Weight = AvailableBlockRatio::get()
.saturating_sub(Perbill::from_percent(10)) * MaximumBlockWeight::get();
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const MaximumBlockLength: u32 = 5 * 1024 * 1024;
pub const Version: RuntimeVersion = VERSION;
pub const ExtrinsicBaseWeight: Weight = 10_000_000;
}
impl frame_system::Trait for Runtime {
/// The identifier used to distinguish between accounts.
type AccountId = AccountId;
/// The aggregated dispatch type that is available for extrinsics.
type Call = Call;
/// The lookup mechanism to get account ID from whatever is passed in dispatchers.
type Lookup = IdentityLookup<AccountId>;
/// The index type for storing how many extrinsics an account has signed.
type Index = Index;
/// The index type for blocks.
type BlockNumber = BlockNumber;
/// The type for hashing blocks and tries.
type Hash = Hash;
/// The hashing algorithm used.
type Hashing = BlakeTwo256;
/// The header type.
type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// The ubiquitous event type.
type Event = Event;
/// The ubiquitous origin type.
type Origin = Origin;
/// Maximum number of block number to block hash mappings to keep (oldest pruned first).
type BlockHashCount = BlockHashCount;
/// Maximum weight of each block. With a default weight system of 1byte == 1weight, 4mb is ok.
type MaximumBlockWeight = MaximumBlockWeight;
/// Maximum size of all encoded transactions (in bytes) that are allowed in one block.
type MaximumBlockLength = MaximumBlockLength;
/// Portion of the block weight that is available to all normal transactions.
type AvailableBlockRatio = AvailableBlockRatio;
/// Runtime version.
type Version = Version;
type PalletInfo = PalletInfo;
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type DbWeight = ();
type ExtrinsicBaseWeight = ExtrinsicBaseWeight;
type BlockExecutionWeight = ();
type MaximumExtrinsicWeight = MaximumExtrinsicWeight;
type BaseCallFilter = ();
type SystemWeightInfo = ();
}
parameter_types! {
pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
}
impl pallet_timestamp::Trait for Runtime {
/// A timestamp: milliseconds since the unix epoch.
type Moment = u64;
type OnTimestampSet = ();
type MinimumPeriod = MinimumPeriod;
type WeightInfo = ();
}
parameter_types! {
pub const ExistentialDeposit: u128 = 500;
pub const TransferFee: u128 = 0;
pub const CreationFee: u128 = 0;
pub const TransactionByteFee: u128 = 1;
}
impl pallet_balances::Trait for Runtime {
/// The type for recording an account's balance.
type Balance = Balance;
/// The ubiquitous event type.
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
}
impl pallet_transaction_payment::Trait for Runtime {
type Currency = Balances;
type OnTransactionPayment = ();
type TransactionByteFee = TransactionByteFee;
type WeightToFee = IdentityFee<Balance>;
type FeeMultiplierUpdate = ();
}
impl pallet_sudo::Trait for Runtime {
type Call = Call;
type Event = Event;
}
impl cumulus_parachain_upgrade::Trait for Runtime {
type Event = Event;
type OnValidationFunctionParams = ();
}
impl cumulus_message_broker::Trait for Runtime {
type Event = Event;
type DownwardMessageHandlers = TokenDealer;
type UpwardMessage = cumulus_upward_message::RococoUpwardMessage;
type ParachainId = ParachainInfo;
type XCMPMessage = cumulus_token_dealer::XCMPMessage<AccountId, Balance>;
type XCMPMessageHandlers = TokenDealer;
}
impl cumulus_token_dealer::Trait for Runtime {
type Event = Event;
type UpwardMessageSender = MessageBroker;
type UpwardMessage = cumulus_upward_message::RococoUpwardMessage;
type Currency = Balances;
type XCMPMessageSender = MessageBroker;
}
impl parachain_info::Trait for Runtime {}
// We disable the rent system for easier testing.
parameter_types! {
pub const TombstoneDeposit: Balance = 0;
pub const RentByteFee: Balance = 0;
pub const RentDepositOffset: Balance = 0;
pub const SurchargeReward: Balance = 0;
}
impl cumulus_pallet_contracts::Trait for Runtime {
type Time = Timestamp;
type Randomness = RandomnessCollectiveFlip;
type Currency = Balances;
type Call = Call;
type Event = Event;
type DetermineContractAddress = cumulus_pallet_contracts::SimpleAddressDeterminer<Runtime>;
type TrieIdGenerator = cumulus_pallet_contracts::TrieIdFromParentCounter<Runtime>;
type RentPayment = ();
type SignedClaimHandicap = cumulus_pallet_contracts::DefaultSignedClaimHandicap;
type TombstoneDeposit = TombstoneDeposit;
type StorageSizeOffset = cumulus_pallet_contracts::DefaultStorageSizeOffset;
type RentByteFee = RentByteFee;
type RentDepositOffset = RentDepositOffset;
type SurchargeReward = SurchargeReward;
type MaxDepth = cumulus_pallet_contracts::DefaultMaxDepth;
type MaxValueSize = cumulus_pallet_contracts::DefaultMaxValueSize;
type WeightPrice = pallet_transaction_payment::Module<Self>;
}
construct_runtime! {
pub enum Runtime where
Block = Block,
NodeBlock = rococo_parachain_primitives::Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Storage, Config, Event<T>},
Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
Contracts: cumulus_pallet_contracts::{Module, Call, Config, Storage, Event<T>},
Sudo: pallet_sudo::{Module, Call, Storage, Config<T>, Event<T>},
RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage},
ParachainUpgrade: cumulus_parachain_upgrade::{Module, Call, Storage, Inherent, Event},
MessageBroker: cumulus_message_broker::{Module, Call, Inherent, Event<T>},
TokenDealer: cumulus_token_dealer::{Module, Call, Event<T>},
TransactionPayment: pallet_transaction_payment::{Module, Storage},
ParachainInfo: parachain_info::{Module, Storage, Config},
}
}
/// The address format for describing accounts.
pub type Address = AccountId;
/// Block header type as expected by this runtime.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Block type as expected by this runtime.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// A Block signed with a Justification
pub type SignedBlock = generic::SignedBlock<Block>;
/// BlockId type as expected by this runtime.
pub type BlockId = generic::BlockId<Block>;
/// The SignedExtension to the basic transaction logic.
pub type SignedExtra = (
frame_system::CheckSpecVersion<Runtime>,
frame_system::CheckGenesis<Runtime>,
frame_system::CheckEra<Runtime>,
frame_system::CheckNonce<Runtime>,
frame_system::CheckWeight<Runtime>,
pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
);
/// Unchecked extrinsic type as expected by this runtime.
pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>;
/// Extrinsic type that has already been checked.
pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>;
/// Executive: handles dispatch to the various modules.
pub type Executive = frame_executive::Executive<
Runtime,
Block,
frame_system::ChainContext<Runtime>,
Runtime,
AllModules,
>;
impl_runtime_apis! {
impl sp_api::Core<Block> for Runtime {
fn version() -> RuntimeVersion {
VERSION
}
fn execute_block(block: Block) {
Executive::execute_block(block)
}
fn initialize_block(header: &<Block as BlockT>::Header) {
Executive::initialize_block(header)
}
}
impl sp_api::Metadata<Block> for Runtime {
fn metadata() -> OpaqueMetadata {
Runtime::metadata().into()
}
}
impl sp_block_builder::BlockBuilder<Block> for Runtime {
fn apply_extrinsic(
extrinsic: <Block as BlockT>::Extrinsic,
) -> ApplyExtrinsicResult {
Executive::apply_extrinsic(extrinsic)
}
fn finalize_block() -> <Block as BlockT>::Header {
Executive::finalize_block()
}
fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> {
data.create_extrinsics()
}
fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult {
data.check_extrinsics(&block)
}
fn random_seed() -> <Block as BlockT>::Hash {
RandomnessCollectiveFlip::random_seed()
}
}
impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
fn validate_transaction(
source: TransactionSource,
tx: <Block as BlockT>::Extrinsic,
) -> TransactionValidity {
Executive::validate_transaction(source, tx)
}
}
impl sp_offchain::OffchainWorkerApi<Block> for Runtime {
fn offchain_worker(header: &<Block as BlockT>::Header) {
Executive::offchain_worker(header)
}
}
impl sp_session::SessionKeys<Block> for Runtime {
fn decode_session_keys(
encoded: Vec<u8>,
) -> Option<Vec<(Vec<u8>, sp_core::crypto::KeyTypeId)>> {
SessionKeys::decode_into_raw_public_keys(&encoded)
}
fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
SessionKeys::generate(seed)
}
}
impl cumulus_pallet_contracts_rpc_runtime_api::ContractsApi<Block, AccountId, Balance, BlockNumber>
for Runtime
{
fn call(
origin: AccountId,
dest: AccountId,
value: Balance,
gas_limit: u64,
input_data: Vec<u8>,
) -> ContractExecResult {
let (exec_result, gas_consumed) =
Contracts::bare_call(origin, dest.into(), value, gas_limit, input_data);
match exec_result {
Ok(v) => ContractExecResult::Success {
flags: v.status.into(),
data: v.data,
gas_consumed: gas_consumed,
},
Err(_) => ContractExecResult::Error,
}
}
fn get_storage(
address: AccountId,
key: [u8; 32],
) -> cumulus_pallet_contracts_primitives::GetStorageResult {
Contracts::get_storage(address, key)
}
fn rent_projection(
address: AccountId,
) -> cumulus_pallet_contracts_primitives::RentProjectionResult<BlockNumber> {
Contracts::rent_projection(address)
}
}
}
cumulus_runtime::register_validate_block!(Block, Executive);
+1
View File
@@ -131,6 +131,7 @@ where
polkadot_full_node.client.clone(),
id,
Box::new(polkadot_full_node.network.clone()),
polkadot_full_node.backend.clone(),
);
let prometheus_registry = parachain_config.prometheus_registry().cloned();
@@ -17,7 +17,7 @@
//! The actual implementation of the validate block functionality.
use frame_executive::ExecuteBlock;
use sp_runtime::traits::{Block as BlockT, HashFor, NumberFor, Header as HeaderT};
use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor};
use sp_std::{boxed::Box, vec::Vec};
@@ -33,11 +33,12 @@ use cumulus_primitives::{
},
UpwardMessage, ValidationData,
};
use sp_externalities::{set_and_run_with_externalities};
use sp_externalities::{Externalities, ExtensionStore, Error, Extension};
use sp_trie::MemoryDB;
use sp_std::{any::{TypeId, Any}};
use sp_core::storage::{ChildInfo, TrackedStorageKey};
use sp_externalities::{
set_and_run_with_externalities, Error, Extension, ExtensionStore, Externalities,
};
use sp_std::any::{Any, TypeId};
use sp_trie::MemoryDB;
type StorageValue = Vec<u8>;
type StorageKey = Vec<u8>;
@@ -50,8 +51,7 @@ type Ext<'a, B: BlockT> = sp_state_machine::Ext<
>;
fn with_externalities<F: FnOnce(&mut dyn Externalities) -> R, R>(f: F) -> R {
sp_externalities::with_externalities(f)
.expect("Environmental externalities not set.")
sp_externalities::with_externalities(f).expect("Environmental externalities not set.")
}
/// Implement `Encode` by forwarding the stored raw vec.
@@ -85,10 +85,7 @@ pub fn validate_block<B: BlockT, E: ExecuteBlock<B>>(params: ValidationParams) -
if !HashDB::<HashFor<B>, _>::contains(&db, &root, EMPTY_PREFIX) {
panic!("Witness data does not contain given storage root.");
}
let backend = sp_state_machine::TrieBackend::new(
db,
root,
);
let backend = sp_state_machine::TrieBackend::new(db, root);
let mut overlay = sp_state_machine::OverlayedChanges::default();
let mut cache = Default::default();
let mut ext = WitnessExt::<B> {
@@ -109,30 +106,30 @@ pub fn validate_block<B: BlockT, E: ExecuteBlock<B>>(params: ValidationParams) -
sp_io::storage::host_changes_root.replace_implementation(host_storage_changes_root),
sp_io::storage::host_append.replace_implementation(host_storage_append),
sp_io::storage::host_next_key.replace_implementation(host_storage_next_key),
sp_io::storage::host_start_transaction.replace_implementation(host_storage_start_transaction),
sp_io::storage::host_rollback_transaction.replace_implementation(
host_storage_rollback_transaction
),
sp_io::storage::host_commit_transaction.replace_implementation(
host_storage_commit_transaction
),
sp_io::default_child_storage::host_get.replace_implementation(host_default_child_storage_get),
sp_io::default_child_storage::host_read.replace_implementation(host_default_child_storage_read),
sp_io::default_child_storage::host_set.replace_implementation(host_default_child_storage_set),
sp_io::default_child_storage::host_clear.replace_implementation(
host_default_child_storage_clear
),
sp_io::default_child_storage::host_storage_kill.replace_implementation(
host_default_child_storage_storage_kill
),
sp_io::default_child_storage::host_exists.replace_implementation(
host_default_child_storage_exists
),
sp_io::default_child_storage::host_clear_prefix.replace_implementation(
host_default_child_storage_clear_prefix
),
sp_io::default_child_storage::host_root.replace_implementation(host_default_child_storage_root),
sp_io::default_child_storage::host_next_key.replace_implementation(host_default_child_storage_next_key),
sp_io::storage::host_start_transaction
.replace_implementation(host_storage_start_transaction),
sp_io::storage::host_rollback_transaction
.replace_implementation(host_storage_rollback_transaction),
sp_io::storage::host_commit_transaction
.replace_implementation(host_storage_commit_transaction),
sp_io::default_child_storage::host_get
.replace_implementation(host_default_child_storage_get),
sp_io::default_child_storage::host_read
.replace_implementation(host_default_child_storage_read),
sp_io::default_child_storage::host_set
.replace_implementation(host_default_child_storage_set),
sp_io::default_child_storage::host_clear
.replace_implementation(host_default_child_storage_clear),
sp_io::default_child_storage::host_storage_kill
.replace_implementation(host_default_child_storage_storage_kill),
sp_io::default_child_storage::host_exists
.replace_implementation(host_default_child_storage_exists),
sp_io::default_child_storage::host_clear_prefix
.replace_implementation(host_default_child_storage_clear_prefix),
sp_io::default_child_storage::host_root
.replace_implementation(host_default_child_storage_root),
sp_io::default_child_storage::host_next_key
.replace_implementation(host_default_child_storage_next_key),
)
};
@@ -142,7 +139,9 @@ pub fn validate_block<B: BlockT, E: ExecuteBlock<B>>(params: ValidationParams) -
// If in the course of block execution new validation code was set, insert
// its scheduled upgrade so we can validate that block number later.
let new_validation_code = overlay.storage(NEW_VALIDATION_CODE).flatten()
let new_validation_code = overlay
.storage(NEW_VALIDATION_CODE)
.flatten()
.map(|slice| slice.to_vec())
.map(ValidationCode);
@@ -153,17 +152,20 @@ pub fn validate_block<B: BlockT, E: ExecuteBlock<B>>(params: ValidationParams) -
None => Vec::new(),
};
let processed_downward_messages = overlay.storage(PROCESSED_DOWNWARD_MESSAGES)
let processed_downward_messages = overlay
.storage(PROCESSED_DOWNWARD_MESSAGES)
.flatten()
.map(|v|
.map(|v| {
Decode::decode(&mut &v[..])
.expect("Processed downward message count is not correctly encoded in the storage")
)
})
.unwrap_or_default();
let validation_data: ValidationData = overlay.storage(VALIDATION_DATA).flatten()
.and_then(|v| Decode::decode(&mut &v[..]).ok())
.expect("`ValidationData` is required to be placed into the storage!");
let validation_data: ValidationData = overlay
.storage(VALIDATION_DATA)
.flatten()
.and_then(|v| Decode::decode(&mut &v[..]).ok())
.expect("`ValidationData` is required to be placed into the storage!");
ValidationResult {
head_data,
@@ -220,19 +222,11 @@ impl<'a, B: BlockT> Externalities for WitnessExt<'a, B> {
self.inner.storage_hash(key)
}
fn child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Option<StorageValue> {
fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option<StorageValue> {
self.inner.child_storage(child_info, key)
}
fn child_storage_hash(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Option<Vec<u8>> {
fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option<Vec<u8>> {
self.inner.child_storage_hash(child_info, key)
}
@@ -240,11 +234,7 @@ impl<'a, B: BlockT> Externalities for WitnessExt<'a, B> {
self.inner.exists_storage(key)
}
fn exists_child_storage(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> bool {
fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool {
self.inner.exists_child_storage(child_info, key)
}
@@ -252,11 +242,7 @@ impl<'a, B: BlockT> Externalities for WitnessExt<'a, B> {
self.inner.next_storage_key(key)
}
fn next_child_storage_key(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Option<StorageKey> {
fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option<StorageKey> {
self.inner.next_child_storage_key(child_info, key)
}
@@ -279,10 +265,7 @@ impl<'a, B: BlockT> Externalities for WitnessExt<'a, B> {
self.inner.place_child_storage(child_info, key, value)
}
fn kill_child_storage(
&mut self,
child_info: &ChildInfo,
) {
fn kill_child_storage(&mut self, child_info: &ChildInfo) {
self.inner.kill_child_storage(child_info)
}
@@ -290,19 +273,11 @@ impl<'a, B: BlockT> Externalities for WitnessExt<'a, B> {
self.inner.clear_prefix(prefix)
}
fn clear_child_prefix(
&mut self,
child_info: &ChildInfo,
prefix: &[u8],
) {
fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) {
self.inner.clear_child_prefix(child_info, prefix)
}
fn storage_append(
&mut self,
key: Vec<u8>,
value: Vec<u8>,
) {
fn storage_append(&mut self, key: Vec<u8>, value: Vec<u8>) {
self.inner.storage_append(key, value)
}
@@ -314,10 +289,7 @@ impl<'a, B: BlockT> Externalities for WitnessExt<'a, B> {
self.inner.storage_root()
}
fn child_storage_root(
&mut self,
child_info: &ChildInfo,
) -> Vec<u8> {
fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec<u8> {
self.inner.child_storage_root(child_info)
}
@@ -372,13 +344,11 @@ impl<'a, B: BlockT> ExtensionStore for WitnessExt<'a, B> {
type_id: TypeId,
extension: Box<dyn Extension>,
) -> Result<(), Error> {
self.inner.register_extension_with_type_id(type_id, extension)
self.inner
.register_extension_with_type_id(type_id, extension)
}
fn deregister_extension_by_type_id(
&mut self,
type_id: TypeId,
) -> Result<(), Error> {
fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), Error> {
self.inner.deregister_extension_by_type_id(type_id)
}
}
@@ -470,57 +440,39 @@ fn host_default_child_storage_read(
}
}
fn host_default_child_storage_set(
storage_key: &[u8],
key: &[u8],
value: &[u8],
) {
fn host_default_child_storage_set(storage_key: &[u8], key: &[u8], value: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
with_externalities(|ext| ext.place_child_storage(&child_info, key.to_vec(), Some(value.to_vec())))
with_externalities(|ext| {
ext.place_child_storage(&child_info, key.to_vec(), Some(value.to_vec()))
})
}
fn host_default_child_storage_clear(
storage_key: &[u8],
key: &[u8],
) {
fn host_default_child_storage_clear(storage_key: &[u8], key: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
with_externalities(|ext| ext.place_child_storage(&child_info, key.to_vec(), None))
}
fn host_default_child_storage_storage_kill(
storage_key: &[u8],
) {
fn host_default_child_storage_storage_kill(storage_key: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
with_externalities(|ext| ext.kill_child_storage(&child_info))
}
fn host_default_child_storage_exists(
storage_key: &[u8],
key: &[u8],
) -> bool {
fn host_default_child_storage_exists(storage_key: &[u8], key: &[u8]) -> bool {
let child_info = ChildInfo::new_default(storage_key);
with_externalities(|ext| ext.exists_child_storage(&child_info, key))
}
fn host_default_child_storage_clear_prefix(
storage_key: &[u8],
prefix: &[u8],
) {
fn host_default_child_storage_clear_prefix(storage_key: &[u8], prefix: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
with_externalities(|ext| ext.clear_child_prefix(&child_info, prefix))
}
fn host_default_child_storage_root(
storage_key: &[u8],
) -> Vec<u8> {
fn host_default_child_storage_root(storage_key: &[u8]) -> Vec<u8> {
let child_info = ChildInfo::new_default(storage_key);
with_externalities(|ext| ext.child_storage_root(&child_info))
}
fn host_default_child_storage_next_key(
storage_key: &[u8],
key: &[u8],
) -> Option<Vec<u8>> {
fn host_default_child_storage_next_key(storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
let child_info = ChildInfo::new_default(storage_key);
with_externalities(|ext| ext.next_child_storage_key(&child_info, key))
}
+11 -54
View File
@@ -12,29 +12,23 @@
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! A Cumulus test client.
//! A Polkadot test client.
mod block_builder;
pub use block_builder::*;
use codec::Encode;
pub use cumulus_test_runtime as runtime;
use runtime::{
Balance, Block, BlockHashCount, Call, GenesisConfig, Runtime, Signature, SignedExtra,
SignedPayload, UncheckedExtrinsic, VERSION,
};
use sc_service::client;
use sp_blockchain::HeaderBackend;
use sp_core::{map, storage::Storage, twox_128, ChangesTrieConfiguration};
use sp_runtime::{
generic::Era,
traits::{Block as BlockT, Hash as HashT, Header as HeaderT},
BuildStorage, SaturatedConversion,
};
use std::collections::BTreeMap;
use sp_core::storage::Storage;
use sp_runtime::{generic::Era, BuildStorage, SaturatedConversion};
pub use block_builder::*;
pub use cumulus_test_runtime as runtime;
pub use substrate_test_client::*;
mod local_executor {
@@ -67,42 +61,16 @@ pub type Client = client::Client<Backend, Executor, Block, runtime::RuntimeApi>;
/// Parameters of test-client builder with test-runtime.
#[derive(Default)]
pub struct GenesisParameters {
support_changes_trie: bool,
}
pub struct GenesisParameters;
impl substrate_test_client::GenesisInit for GenesisParameters {
fn genesis_storage(&self) -> Storage {
let changes_trie_config: Option<ChangesTrieConfiguration> = if self.support_changes_trie {
Some(sp_test_primitives::changes_trie_config())
} else {
None
};
let mut storage = genesis_config(changes_trie_config).build_storage().unwrap();
let child_roots = storage.children_default.iter().map(|(sk, child_content)| {
let state_root =
<<<runtime::Block as BlockT>::Header as HeaderT>::Hashing as HashT>::trie_root(
child_content.data.clone().into_iter().collect(),
);
(sk.clone(), state_root.encode())
});
let state_root =
<<<runtime::Block as BlockT>::Header as HeaderT>::Hashing as HashT>::trie_root(
storage.top.clone().into_iter().chain(child_roots).collect(),
);
let block: runtime::Block = client::genesis::construct_genesis_block(state_root);
storage.top.extend(additional_storage_with_genesis(&block));
storage
genesis_config().build_storage().unwrap()
}
}
/// A `test-runtime` extensions to `TestClientBuilder`.
pub trait TestClientBuilderExt: Sized {
/// Enable or disable support for changes trie in genesis.
fn set_support_changes_trie(self, support_changes_trie: bool) -> Self;
/// Build the test client.
fn build(self) -> Client {
self.build_with_longest_chain().0
@@ -113,11 +81,6 @@ pub trait TestClientBuilderExt: Sized {
}
impl TestClientBuilderExt for TestClientBuilder {
fn set_support_changes_trie(mut self, support_changes_trie: bool) -> Self {
self.genesis_init_mut().support_changes_trie = support_changes_trie;
self
}
fn build_with_longest_chain(self) -> (Client, LongestChain) {
self.build_with_native_executor(None)
}
@@ -135,14 +98,8 @@ impl DefaultTestClientBuilderExt for TestClientBuilder {
}
}
fn genesis_config(changes_trie_config: Option<ChangesTrieConfiguration>) -> GenesisConfig {
cumulus_test_service::local_testnet_genesis(changes_trie_config)
}
fn additional_storage_with_genesis(genesis_block: &Block) -> BTreeMap<Vec<u8>, Vec<u8>> {
map![
twox_128(&b"latest"[..]).to_vec() => genesis_block.hash().as_fixed_bytes().to_vec()
]
fn genesis_config() -> GenesisConfig {
cumulus_test_service::local_testnet_genesis()
}
/// Generate an extrinsic from the provided function call, origin and [`Client`].
+1 -1
View File
@@ -342,7 +342,7 @@ impl_runtime_apis! {
impl crate::GetLastTimestamp<Block> for Runtime {
fn get_last_timestamp() -> u64 {
<pallet_timestamp::Module<Self>>::now()
Timestamp::now()
}
}
}
+4 -8
View File
@@ -21,7 +21,7 @@ use cumulus_test_runtime::{AccountId, Signature};
use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup};
use sc_service::ChainType;
use serde::{Deserialize, Serialize};
use sp_core::{sr25519, ChangesTrieConfiguration, Pair, Public};
use sp_core::{sr25519, Pair, Public};
use sp_runtime::traits::{IdentifyAccount, Verify};
/// Specialized `ChainSpec` for the normal parachain runtime.
@@ -67,7 +67,7 @@ pub fn get_chain_spec(id: ParaId) -> ChainSpec {
"Local Testnet",
"local_testnet",
ChainType::Local,
move || local_testnet_genesis(None),
move || local_testnet_genesis(),
vec![],
None,
None,
@@ -80,9 +80,7 @@ pub fn get_chain_spec(id: ParaId) -> ChainSpec {
}
/// Local testnet genesis for testing.
pub fn local_testnet_genesis(
changes_trie_config: Option<ChangesTrieConfiguration>,
) -> cumulus_test_runtime::GenesisConfig {
pub fn local_testnet_genesis() -> cumulus_test_runtime::GenesisConfig {
testnet_genesis(
get_account_id_from_seed::<sr25519::Public>("Alice"),
vec![
@@ -99,21 +97,19 @@ pub fn local_testnet_genesis(
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
],
changes_trie_config,
)
}
fn testnet_genesis(
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
changes_trie_config: Option<ChangesTrieConfiguration>,
) -> cumulus_test_runtime::GenesisConfig {
cumulus_test_runtime::GenesisConfig {
frame_system: Some(cumulus_test_runtime::SystemConfig {
code: cumulus_test_runtime::WASM_BINARY
.expect("WASM binary was not build, please build it!")
.to_vec(),
changes_trie_config,
..Default::default()
}),
pallet_balances: Some(cumulus_test_runtime::BalancesConfig {
balances: endowed_accounts
+3
View File
@@ -22,6 +22,7 @@ mod chain_spec;
mod genesis;
pub use chain_spec::*;
pub use cumulus_test_runtime as runtime;
pub use genesis::*;
use core::future::Future;
@@ -164,6 +165,8 @@ where
polkadot_full_node.client.clone(),
para_id,
Box::new(polkadot_full_node.network.clone()),
polkadot_full_node.backend.clone(),
polkadot_full_node.client.clone(),
);
let block_announce_validator_builder = move |_| Box::new(block_announce_validator) as Box<_>;
+2 -1
View File
@@ -27,7 +27,8 @@ async fn test_collating_and_non_collator_mode_catching_up(task_executor: TaskExe
let para_id = ParaId::from(100);
// start alice
let alice = polkadot_test_service::run_validator_node(task_executor.clone(), Alice, || {}, vec![]);
let alice =
polkadot_test_service::run_validator_node(task_executor.clone(), Alice, || {}, vec![]);
// start bob
let bob = polkadot_test_service::run_validator_node(