Update Substrate & Polkadot (#427)

* Update Substrate & Polkadot

* Fixes
This commit is contained in:
Bastian Köcher
2021-05-04 09:48:41 +02:00
committed by GitHub
parent 3f687171c2
commit 6364eeeef1
11 changed files with 427 additions and 402 deletions
Generated
+241 -232
View File
File diff suppressed because it is too large Load Diff
@@ -24,7 +24,7 @@ use sp_consensus::{
import_queue::{BasicQueue, CacheKeyId, Verifier as VerifierT},
BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy,
};
use sp_inherents::InherentDataProviders;
use sp_inherents::{CreateInherentDataProviders, InherentDataProvider};
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT},
@@ -32,18 +32,19 @@ use sp_runtime::{
};
/// A verifier that just checks the inherents.
struct Verifier<Client, Block> {
struct Verifier<Client, Block, CIDP> {
client: Arc<Client>,
inherent_data_providers: InherentDataProviders,
create_inherent_data_providers: CIDP,
_marker: PhantomData<Block>,
}
#[async_trait::async_trait]
impl<Client, Block> VerifierT<Block> for Verifier<Client, Block>
impl<Client, Block, CIDP> VerifierT<Block> for Verifier<Client, Block, CIDP>
where
Block: BlockT,
Client: ProvideRuntimeApi<Block> + Send + Sync,
<Client as ProvideRuntimeApi<Block>>::Api: BlockBuilderApi<Block>,
CIDP: CreateInherentDataProviders<Block, ()>,
{
async fn verify(
&mut self,
@@ -59,10 +60,15 @@ where
String,
> {
if let Some(inner_body) = body.take() {
let inherent_data = self
.inherent_data_providers
let inherent_data_providers = self
.create_inherent_data_providers
.create_inherent_data_providers(*header.parent_hash(), ())
.await
.map_err(|e| format!("{:?}", e))?;
let inherent_data = inherent_data_providers
.create_inherent_data()
.map_err(|e| e.into_string())?;
.map_err(|e| format!("{:?}", e))?;
let block = Block::new(header.clone(), inner_body);
@@ -77,9 +83,15 @@ where
.map_err(|e| format!("{:?}", e))?;
if !inherent_res.ok() {
inherent_res.into_errors().try_for_each(|(i, e)| {
Err(self.inherent_data_providers.error_to_string(&i, &e))
})?;
for (i, e) in inherent_res.into_errors() {
match inherent_data_providers.try_handle_error(&i, &e).await {
Some(r) => r.map_err(|e| format!("{:?}", e))?,
None => Err(format!(
"Unhandled inherent error from `{}`.",
String::from_utf8_lossy(&i)
))?,
}
}
}
let (_, inner_body) = block.deconstruct();
@@ -103,10 +115,10 @@ where
}
/// Start an import queue for a Cumulus collator that does not uses any special authoring logic.
pub fn import_queue<Client, Block: BlockT, I>(
pub fn import_queue<Client, Block: BlockT, I, CIDP>(
client: Arc<Client>,
block_import: I,
inherent_data_providers: InherentDataProviders,
create_inherent_data_providers: CIDP,
spawner: &impl sp_core::traits::SpawnEssentialNamed,
registry: Option<&substrate_prometheus_endpoint::Registry>,
) -> ClientResult<BasicQueue<Block, I::Transaction>>
@@ -115,10 +127,11 @@ where
I::Transaction: Send,
Client: ProvideRuntimeApi<Block> + Send + Sync + 'static,
<Client as ProvideRuntimeApi<Block>>::Api: BlockBuilderApi<Block>,
CIDP: CreateInherentDataProviders<Block, ()> + 'static,
{
let verifier = Verifier {
client,
inherent_data_providers,
create_inherent_data_providers,
_marker: PhantomData,
};
+49 -29
View File
@@ -39,7 +39,6 @@ use cumulus_primitives_core::{
ParaId, PersistedValidationData,
};
use cumulus_primitives_parachain_inherent::ParachainInherentData;
pub use import_queue::import_queue;
use parking_lot::Mutex;
use polkadot_service::ClientHandle;
use sc_client_api::Backend;
@@ -48,32 +47,35 @@ use sp_consensus::{
BlockImport, BlockImportParams, BlockOrigin, EnableProofRecording, Environment,
ForkChoiceStrategy, ProofRecording, Proposal, Proposer,
};
use sp_inherents::{InherentData, InherentDataProviders};
use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider};
use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT};
use std::{marker::PhantomData, sync::Arc, time::Duration};
mod import_queue;
pub use import_queue::import_queue;
const LOG_TARGET: &str = "cumulus-consensus-relay-chain";
/// The implementation of the relay-chain provided consensus for parachains.
pub struct RelayChainConsensus<B, PF, BI, RClient, RBackend> {
pub struct RelayChainConsensus<B, PF, BI, RClient, RBackend, CIDP> {
para_id: ParaId,
_phantom: PhantomData<B>,
proposer_factory: Arc<Mutex<PF>>,
inherent_data_providers: InherentDataProviders,
create_inherent_data_providers: Arc<CIDP>,
block_import: Arc<futures::lock::Mutex<BI>>,
relay_chain_client: Arc<RClient>,
relay_chain_backend: Arc<RBackend>,
}
impl<B, PF, BI, RClient, RBackend> Clone for RelayChainConsensus<B, PF, BI, RClient, RBackend> {
impl<B, PF, BI, RClient, RBackend, CIDP> Clone
for RelayChainConsensus<B, PF, BI, RClient, RBackend, CIDP>
{
fn clone(&self) -> Self {
Self {
para_id: self.para_id,
_phantom: PhantomData,
proposer_factory: self.proposer_factory.clone(),
inherent_data_providers: self.inherent_data_providers.clone(),
create_inherent_data_providers: self.create_inherent_data_providers.clone(),
block_import: self.block_import.clone(),
relay_chain_backend: self.relay_chain_backend.clone(),
relay_chain_client: self.relay_chain_client.clone(),
@@ -81,18 +83,19 @@ impl<B, PF, BI, RClient, RBackend> Clone for RelayChainConsensus<B, PF, BI, RCli
}
}
impl<B, PF, BI, RClient, RBackend> RelayChainConsensus<B, PF, BI, RClient, RBackend>
impl<B, PF, BI, RClient, RBackend, CIDP> RelayChainConsensus<B, PF, BI, RClient, RBackend, CIDP>
where
B: BlockT,
RClient: ProvideRuntimeApi<PBlock>,
RClient::Api: ParachainHost<PBlock>,
RBackend: Backend<PBlock>,
CIDP: CreateInherentDataProviders<B, ()>,
{
/// Create a new instance of relay-chain provided consensus.
pub fn new(
para_id: ParaId,
proposer_factory: PF,
inherent_data_providers: InherentDataProviders,
create_inherent_data_providers: CIDP,
block_import: BI,
polkadot_client: Arc<RClient>,
polkadot_backend: Arc<RBackend>,
@@ -100,7 +103,7 @@ where
Self {
para_id,
proposer_factory: Arc::new(Mutex::new(proposer_factory)),
inherent_data_providers,
create_inherent_data_providers: Arc::new(create_inherent_data_providers),
block_import: Arc::new(futures::lock::Mutex::new(block_import)),
relay_chain_backend: polkadot_backend,
relay_chain_client: polkadot_client,
@@ -109,13 +112,26 @@ where
}
/// Get the inherent data with validation function parameters injected
fn inherent_data(
async fn inherent_data(
&self,
parent: B::Hash,
validation_data: &PersistedValidationData,
relay_parent: PHash,
) -> Option<InherentData> {
let mut inherent_data = self
.inherent_data_providers
let inherent_data_providers = self
.create_inherent_data_providers
.create_inherent_data_providers(parent, ())
.await
.map_err(|e| {
tracing::error!(
target: LOG_TARGET,
error = ?e,
"Failed to create inherent data providers",
)
})
.ok()?;
let mut inherent_data = inherent_data_providers
.create_inherent_data()
.map_err(|e| {
tracing::error!(
@@ -153,8 +169,8 @@ where
}
#[async_trait::async_trait]
impl<B, PF, BI, RClient, RBackend> ParachainConsensus<B>
for RelayChainConsensus<B, PF, BI, RClient, RBackend>
impl<B, PF, BI, RClient, RBackend, CIDP> ParachainConsensus<B>
for RelayChainConsensus<B, PF, BI, RClient, RBackend, CIDP>
where
B: BlockT,
RClient: ProvideRuntimeApi<PBlock> + Send + Sync,
@@ -168,6 +184,7 @@ where
ProofRecording = EnableProofRecording,
Proof = <EnableProofRecording as ProofRecording>::Proof,
>,
CIDP: CreateInherentDataProviders<B, ()>,
{
async fn produce_candidate(
&mut self,
@@ -184,7 +201,7 @@ where
)
.ok()?;
let inherent_data = self.inherent_data(&validation_data, relay_parent)?;
let inherent_data = self.inherent_data(parent.hash(), &validation_data, relay_parent).await?;
let Proposal {
block,
@@ -236,10 +253,10 @@ where
}
/// Paramaters of [`build_relay_chain_consensus`].
pub struct BuildRelayChainConsensusParams<PF, BI, RBackend> {
pub struct BuildRelayChainConsensusParams<PF, BI, RBackend, CIDP> {
pub para_id: ParaId,
pub proposer_factory: PF,
pub inherent_data_providers: InherentDataProviders,
pub create_inherent_data_providers: CIDP,
pub block_import: BI,
pub relay_chain_client: polkadot_service::Client,
pub relay_chain_backend: Arc<RBackend>,
@@ -248,15 +265,15 @@ pub struct BuildRelayChainConsensusParams<PF, BI, RBackend> {
/// Build the [`RelayChainConsensus`].
///
/// Returns a boxed [`ParachainConsensus`].
pub fn build_relay_chain_consensus<Block, PF, BI, RBackend>(
pub fn build_relay_chain_consensus<Block, PF, BI, RBackend, CIDP>(
BuildRelayChainConsensusParams {
para_id,
proposer_factory,
inherent_data_providers,
create_inherent_data_providers,
block_import,
relay_chain_client,
relay_chain_backend,
}: BuildRelayChainConsensusParams<PF, BI, RBackend>,
}: BuildRelayChainConsensusParams<PF, BI, RBackend, CIDP>,
) -> Box<dyn ParachainConsensus<Block>>
where
Block: BlockT,
@@ -271,12 +288,13 @@ where
RBackend: Backend<PBlock> + 'static,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
sc_client_api::StateBackendFor<RBackend, PBlock>: sc_client_api::StateBackend<HashFor<PBlock>>,
CIDP: CreateInherentDataProviders<Block, ()> + 'static,
{
RelayChainConsensusBuilder::new(
para_id,
proposer_factory,
block_import,
inherent_data_providers,
create_inherent_data_providers,
relay_chain_client,
relay_chain_backend,
)
@@ -289,17 +307,17 @@ where
/// a concrete relay chain client instance, the builder takes a [`polkadot_service::Client`]
/// that wraps this concrete instanace. By using [`polkadot_service::ExecuteWithClient`]
/// the builder gets access to this concrete instance.
struct RelayChainConsensusBuilder<Block, PF, BI, RBackend> {
struct RelayChainConsensusBuilder<Block, PF, BI, RBackend, CIDP> {
para_id: ParaId,
_phantom: PhantomData<Block>,
proposer_factory: PF,
inherent_data_providers: InherentDataProviders,
create_inherent_data_providers: CIDP,
block_import: BI,
relay_chain_backend: Arc<RBackend>,
relay_chain_client: polkadot_service::Client,
}
impl<Block, PF, BI, RBackend> RelayChainConsensusBuilder<Block, PF, BI, RBackend>
impl<Block, PF, BI, RBackend, CIDP> RelayChainConsensusBuilder<Block, PF, BI, RBackend, CIDP>
where
Block: BlockT,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
@@ -313,13 +331,14 @@ where
>,
BI: BlockImport<Block> + Send + Sync + 'static,
RBackend: Backend<PBlock> + 'static,
CIDP: CreateInherentDataProviders<Block, ()> + 'static,
{
/// Create a new instance of the builder.
fn new(
para_id: ParaId,
proposer_factory: PF,
block_import: BI,
inherent_data_providers: InherentDataProviders,
create_inherent_data_providers: CIDP,
relay_chain_client: polkadot_service::Client,
relay_chain_backend: Arc<RBackend>,
) -> Self {
@@ -328,7 +347,7 @@ where
_phantom: PhantomData,
proposer_factory,
block_import,
inherent_data_providers,
create_inherent_data_providers,
relay_chain_backend,
relay_chain_client,
}
@@ -340,8 +359,8 @@ where
}
}
impl<Block, PF, BI, RBackend> polkadot_service::ExecuteWithClient
for RelayChainConsensusBuilder<Block, PF, BI, RBackend>
impl<Block, PF, BI, RBackend, CIDP> polkadot_service::ExecuteWithClient
for RelayChainConsensusBuilder<Block, PF, BI, RBackend, CIDP>
where
Block: BlockT,
// Rust bug: https://github.com/rust-lang/rust/issues/24159
@@ -355,6 +374,7 @@ where
>,
BI: BlockImport<Block> + Send + Sync + 'static,
RBackend: Backend<PBlock> + 'static,
CIDP: CreateInherentDataProviders<Block, ()> + 'static,
{
type Output = Box<dyn ParachainConsensus<Block>>;
@@ -369,7 +389,7 @@ where
Box::new(RelayChainConsensus::new(
self.para_id,
self.proposer_factory,
self.inherent_data_providers,
self.create_inherent_data_providers,
self.block_import,
client.clone(),
self.relay_chain_backend,
+69 -76
View File
@@ -27,33 +27,36 @@
//!
//! Users must ensure that they register this pallet as an inherent provider.
use sp_std::{prelude::*, cmp, collections::btree_map::BTreeMap};
use sp_runtime::traits::{BlakeTwo256, Hash};
use sp_inherents::{InherentData, InherentIdentifier, ProvideInherent};
use frame_support::{
decl_error, decl_event, decl_module, decl_storage,
dispatch::{DispatchResult, DispatchResultWithPostInfo},
ensure, storage,
traits::Get,
weights::{DispatchClass, Weight, PostDispatchInfo, Pays},
};
use frame_system::{ensure_none, ensure_root};
use polkadot_parachain::primitives::RelayChainBlockNumber;
use cumulus_primitives_core::{
relay_chain,
well_known_keys::{self, NEW_VALIDATION_CODE},
AbridgedHostConfiguration, DmpMessageHandler, XcmpMessageHandler,
InboundDownwardMessage, InboundHrmpMessage, OnValidationData, OutboundHrmpMessage, ParaId,
PersistedValidationData, UpwardMessage, UpwardMessageSender, MessageSendError,
XcmpMessageSource, ChannelStatus, GetChannelInfo,
AbridgedHostConfiguration, ChannelStatus, DmpMessageHandler, GetChannelInfo,
InboundDownwardMessage, InboundHrmpMessage, MessageSendError, OnValidationData,
OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender,
XcmpMessageHandler, XcmpMessageSource,
};
use cumulus_primitives_parachain_inherent::ParachainInherentData;
use relay_state_snapshot::MessagingStateSnapshot;
use sp_runtime::transaction_validity::{
TransactionSource, TransactionValidity, InvalidTransaction, ValidTransaction,
TransactionLongevity,
use frame_support::{
decl_error, decl_event, decl_module, decl_storage,
dispatch::{DispatchResult, DispatchResultWithPostInfo},
ensure,
inherent::{InherentData, InherentIdentifier, ProvideInherent},
storage,
traits::Get,
weights::{DispatchClass, Pays, PostDispatchInfo, Weight},
};
use sp_runtime::DispatchError;
use frame_system::{ensure_none, ensure_root};
use polkadot_parachain::primitives::RelayChainBlockNumber;
use relay_state_snapshot::MessagingStateSnapshot;
use sp_runtime::{
traits::{BlakeTwo256, Hash},
transaction_validity::{
InvalidTransaction, TransactionLongevity, TransactionSource, TransactionValidity,
ValidTransaction,
},
DispatchError,
};
use sp_std::{cmp, collections::btree_map::BTreeMap, prelude::*};
mod relay_state_snapshot;
#[macro_use]
@@ -414,8 +417,7 @@ decl_module! {
impl<T: Config> Module<T> {
fn validate_authorized_upgrade(code: &[u8]) -> Result<T::Hash, DispatchError> {
let required_hash = AuthorizedUpgrade::<T>::get()
.ok_or(Error::<T>::NothingAuthorized)?;
let required_hash = AuthorizedUpgrade::<T>::get().ok_or(Error::<T>::NothingAuthorized)?;
let actual_hash = T::Hashing::hash(&code[..]);
ensure!(actual_hash == required_hash, Error::<T>::Unauthorized);
Ok(actual_hash)
@@ -434,11 +436,11 @@ impl<T: Config> sp_runtime::traits::ValidateUnsigned for Module<T> {
provides: vec![hash.as_ref().to_vec()],
longevity: TransactionLongevity::max_value(),
propagate: true,
})
});
}
}
if let Call::set_validation_data(..) = call {
return Ok(Default::default())
return Ok(Default::default());
}
Err(InvalidTransaction::Call.into())
}
@@ -462,8 +464,8 @@ impl<T: Config> GetChannelInfo for Module<T> {
let channels = match Self::relevant_messaging_state() {
None => {
log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
return ChannelStatus::Closed
},
return ChannelStatus::Closed;
}
Some(d) => d.egress_channels,
};
// ^^^ NOTE: This storage field should carry over from the previous block. So if it's None
@@ -504,15 +506,16 @@ impl<T: Config> Module<T> {
/// # Panics
fn validate_validation_data(validation_data: &PersistedValidationData) {
validate_block::with_validation_params(|params| {
assert_eq!(params.parent_head, validation_data.parent_head, "Parent head doesn't match");
assert_eq!(
params.relay_parent_number,
validation_data.relay_parent_number,
params.parent_head, validation_data.parent_head,
"Parent head doesn't match"
);
assert_eq!(
params.relay_parent_number, validation_data.relay_parent_number,
"Relay parent number doesn't match",
);
assert_eq!(
params.relay_parent_storage_root,
validation_data.relay_parent_storage_root,
params.relay_parent_storage_root, validation_data.relay_parent_storage_root,
"Relay parent storage root doesn't match",
);
});
@@ -535,10 +538,14 @@ impl<T: Config> Module<T> {
let mut weight_used = 0;
if dm_count != 0 {
Self::deposit_event(RawEvent::DownwardMessagesReceived(dm_count));
let max_weight = ReservedDmpWeightOverride::get().unwrap_or_else(T::ReservedDmpWeight::get);
let max_weight =
ReservedDmpWeightOverride::get().unwrap_or_else(T::ReservedDmpWeight::get);
let message_iter = downward_messages.into_iter()
.inspect(|m| { dmq_head.extend_downward(m); })
let message_iter = downward_messages
.into_iter()
.inspect(|m| {
dmq_head.extend_downward(m);
})
.map(|m| (m.sent_at, m.msg));
weight_used += T::DmpMessageHandler::handle_dmp_messages(message_iter, max_weight);
LastDmqMqcHead::put(&dmq_head);
@@ -579,11 +586,9 @@ impl<T: Config> Module<T> {
// A violation of the assertion below indicates that one of the messages submitted by
// the collator was sent from a sender that doesn't have a channel opened to this parachain,
// according to the relay-parent state.
assert!(
ingress_channels
.binary_search_by_key(sender, |&(s, _)| s)
.is_ok(),
);
assert!(ingress_channels
.binary_search_by_key(sender, |&(s, _)| s)
.is_ok(),);
}
// Second, prepare horizontal messages for a more convenient processing:
@@ -628,10 +633,12 @@ impl<T: Config> Module<T> {
.extend_hrmp(horizontal_message);
}
}
let message_iter = horizontal_messages.iter()
let message_iter = horizontal_messages
.iter()
.map(|&(sender, ref message)| (sender, message.sent_at, &message.data[..]));
let max_weight = ReservedXcmpWeightOverride::get().unwrap_or_else(T::ReservedXcmpWeight::get);
let max_weight =
ReservedXcmpWeightOverride::get().unwrap_or_else(T::ReservedXcmpWeight::get);
let weight_used = T::XcmpMessageHandler::handle_xcmp_messages(message_iter, max_weight);
// Check that the MQC heads for each channel provided by the relay chain match the MQC heads
@@ -888,8 +895,8 @@ mod tests {
use codec::Encode;
use cumulus_primitives_core::{
AbridgedHrmpChannel, InboundDownwardMessage, InboundHrmpMessage, PersistedValidationData,
relay_chain::BlockNumber as RelayBlockNumber,
relay_chain::BlockNumber as RelayBlockNumber, AbridgedHrmpChannel, InboundDownwardMessage,
InboundHrmpMessage, PersistedValidationData,
};
use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
use frame_support::{
@@ -982,10 +989,7 @@ mod tests {
static SENT_MESSAGES: RefCell<Vec<(ParaId, Vec<u8>)>> = RefCell::new(Vec::new());
}
fn send_message(
dest: ParaId,
message: Vec<u8>,
) {
fn send_message(dest: ParaId, message: Vec<u8>) {
SENT_MESSAGES.with(|m| m.borrow_mut().push((dest, message)));
}
@@ -994,9 +998,9 @@ mod tests {
let mut ids = std::collections::BTreeSet::<ParaId>::new();
let mut taken = 0;
let mut result = Vec::new();
SENT_MESSAGES.with(|ms| ms.borrow_mut()
.retain(|m| {
let status = <Module::<Test> as GetChannelInfo>::get_channel_status(m.0);
SENT_MESSAGES.with(|ms| {
ms.borrow_mut().retain(|m| {
let status = <Module<Test> as GetChannelInfo>::get_channel_status(m.0);
let ready = matches!(status, ChannelStatus::Ready(..));
if ready && !ids.contains(&m.0) && taken < maximum_channels {
ids.insert(m.0);
@@ -1007,14 +1011,14 @@ mod tests {
true
}
})
);
});
result
}
}
impl DmpMessageHandler for SaveIntoThreadLocal {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
iter: impl Iterator<Item = (RelayBlockNumber, Vec<u8>)>,
_max_weight: Weight,
) -> Weight {
HANDLED_DMP_MESSAGES.with(|m| {
@@ -1027,7 +1031,7 @@ mod tests {
}
impl XcmpMessageHandler for SaveIntoThreadLocal {
fn handle_xcmp_messages<'a, I: Iterator<Item=(ParaId, RelayBlockNumber, &'a [u8])>>(
fn handle_xcmp_messages<'a, I: Iterator<Item = (ParaId, RelayBlockNumber, &'a [u8])>>(
iter: I,
_max_weight: Weight,
) -> Weight {
@@ -1284,16 +1288,15 @@ mod tests {
.add_with_post_test(
123,
|| {
assert_ok!(System::set_code(
RawOrigin::Root.into(),
Default::default()
));
assert_ok!(System::set_code(RawOrigin::Root.into(), Default::default()));
},
|| {
let events = System::events();
assert_eq!(
events[0].event,
Event::parachain_system(crate::RawEvent::ValidationFunctionStored(1123).into())
Event::parachain_system(
crate::RawEvent::ValidationFunctionStored(1123).into()
)
);
},
)
@@ -1304,7 +1307,9 @@ mod tests {
let events = System::events();
assert_eq!(
events[0].event,
Event::parachain_system(crate::RawEvent::ValidationFunctionApplied(1234).into())
Event::parachain_system(
crate::RawEvent::ValidationFunctionApplied(1234).into()
)
);
},
);
@@ -1317,10 +1322,7 @@ mod tests {
builder.host_config.validation_upgrade_delay = 1000;
})
.add(123, || {
assert_ok!(System::set_code(
RawOrigin::Root.into(),
Default::default()
));
assert_ok!(System::set_code(RawOrigin::Root.into(), Default::default()));
})
.add(234, || {
assert_eq!(
@@ -1338,10 +1340,7 @@ mod tests {
!PendingValidationFunction::exists(),
"validation function must not exist yet"
);
assert_ok!(System::set_code(
RawOrigin::Root.into(),
Default::default()
));
assert_ok!(System::set_code(RawOrigin::Root.into(), Default::default()));
assert!(
PendingValidationFunction::exists(),
"validation function must now exist"
@@ -1511,14 +1510,8 @@ mod tests {
.add_with_post_test(
1,
|| {
send_message(
ParaId::from(300),
b"1".to_vec(),
);
send_message(
ParaId::from(400),
b"2".to_vec(),
);
send_message(ParaId::from(300), b"1".to_vec());
send_message(ParaId::from(400), b"2".to_vec());
},
|| {},
)
+1 -1
View File
@@ -55,7 +55,7 @@ hex-literal = "0.3.1"
hex = "0.4.3"
[build-dependencies]
substrate-wasm-builder = "3.0.0"
substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
[features]
default = [ "std" ]
+1 -1
View File
@@ -46,7 +46,7 @@ hex = "0.4.3"
hex-literal = "0.3.1"
[build-dependencies]
substrate-wasm-builder = "3.0.0"
substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
[features]
default = [ "std" ]
+36 -38
View File
@@ -28,9 +28,9 @@ use sc_executor::native_executor_instance;
pub use sc_executor::NativeExecutor;
use sc_service::{Configuration, PartialComponents, Role, TFullBackend, TFullClient, TaskManager};
use sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle};
use sp_api::ConstructRuntimeApi;
use sp_runtime::traits::BlakeTwo256;
use sp_trie::PrefixedMemoryDB;
use sp_api::ConstructRuntimeApi;
use std::sync::Arc;
// Native executor instance.
@@ -63,7 +63,8 @@ pub fn new_partial<RuntimeApi, Executor>(
(Option<Telemetry>, Option<TelemetryWorkerHandle>),
>,
sc_service::Error,
> where
>
where
RuntimeApi: ConstructRuntimeApi<Block, TFullClient<Block, RuntimeApi, Executor>>
+ Send
+ Sync
@@ -79,9 +80,9 @@ pub fn new_partial<RuntimeApi, Executor>(
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
Executor: sc_executor::NativeExecutionDispatch + 'static,
{
let inherent_data_providers = sp_inherents::InherentDataProviders::new();
let telemetry = config.telemetry_endpoints.clone()
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
@@ -97,15 +98,12 @@ pub fn new_partial<RuntimeApi, Executor>(
)?;
let client = Arc::new(client);
let telemetry_worker_handle = telemetry
.as_ref()
.map(|(worker, _)| worker.handle());
let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
let telemetry = telemetry
.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});
let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});
let registry = config.prometheus_registry();
@@ -120,7 +118,7 @@ pub fn new_partial<RuntimeApi, Executor>(
let import_queue = cumulus_client_consensus_relay_chain::import_queue(
client.clone(),
client.clone(),
inherent_data_providers.clone(),
|_, _| async { Ok(sp_timestamp::InherentDataProvider::from_system_time()) },
&task_manager.spawn_essential_handle(),
registry.clone(),
)?;
@@ -132,7 +130,6 @@ pub fn new_partial<RuntimeApi, Executor>(
keystore_container,
task_manager,
transaction_pool,
inherent_data_providers,
select_chain: (),
other: (telemetry, telemetry_worker_handle),
};
@@ -180,22 +177,17 @@ where
let parachain_config = prepare_node_config(parachain_config);
let params = new_partial::<RuntimeApi, Executor>(&parachain_config)?;
params
.inherent_data_providers
.register_provider(sp_timestamp::InherentDataProvider)
.unwrap();
let (mut telemetry, telemetry_worker_handle) = params.other;
let polkadot_full_node =
cumulus_client_service::build_polkadot_full_node(
polkadot_config,
collator_key.clone(),
telemetry_worker_handle,
)
.map_err(|e| match e {
polkadot_service::Error::Sub(x) => x,
s => format!("{}", s).into(),
})?;
let polkadot_full_node = cumulus_client_service::build_polkadot_full_node(
polkadot_config,
collator_key.clone(),
telemetry_worker_handle,
)
.map_err(|e| match e {
polkadot_service::Error::Sub(x) => x,
s => format!("{}", s).into(),
})?;
let client = params.client.clone();
let backend = params.backend.clone();
@@ -258,7 +250,9 @@ where
let parachain_consensus = build_relay_chain_consensus(BuildRelayChainConsensusParams {
para_id: id,
proposer_factory,
inherent_data_providers: params.inherent_data_providers,
create_inherent_data_providers: |_, _| async {
Ok(sp_timestamp::InherentDataProvider::from_system_time())
},
block_import: client.clone(),
relay_chain_client: polkadot_full_node.client.clone(),
relay_chain_backend: polkadot_full_node.backend.clone(),
@@ -302,9 +296,10 @@ pub async fn start_node(
polkadot_config: Configuration,
id: ParaId,
validator: bool,
) -> sc_service::error::Result<
(TaskManager, Arc<TFullClient<Block, parachain_runtime::RuntimeApi, RuntimeExecutor>>)
> {
) -> sc_service::error::Result<(
TaskManager,
Arc<TFullClient<Block, parachain_runtime::RuntimeApi, RuntimeExecutor>>,
)> {
start_node_impl::<parachain_runtime::RuntimeApi, RuntimeExecutor, _>(
parachain_config,
collator_key,
@@ -312,7 +307,8 @@ pub async fn start_node(
id,
validator,
|_| Default::default(),
).await
)
.await
}
/// Start a rococo-shell parachain node.
@@ -322,9 +318,10 @@ pub async fn start_shell_node(
polkadot_config: Configuration,
id: ParaId,
validator: bool,
) -> sc_service::error::Result<
(TaskManager, Arc<TFullClient<Block, shell_runtime::RuntimeApi, ShellRuntimeExecutor>>)
> {
) -> sc_service::error::Result<(
TaskManager,
Arc<TFullClient<Block, shell_runtime::RuntimeApi, ShellRuntimeExecutor>>,
)> {
start_node_impl::<shell_runtime::RuntimeApi, ShellRuntimeExecutor, _>(
parachain_config,
collator_key,
@@ -332,5 +329,6 @@ pub async fn start_shell_node(
id,
validator,
|_| Default::default(),
).await
)
.await
}
+1 -1
View File
@@ -37,7 +37,7 @@ cumulus-primitives-core = { path = "../../primitives/core", default-features = f
polkadot-parachain = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" }
[build-dependencies]
substrate-wasm-builder = "3.0.0"
substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
[features]
default = [ "std", "upgrade" ]
+1 -1
View File
@@ -37,7 +37,7 @@ cumulus-primitives-core = { path = "../../primitives/core", default-features = f
polkadot-parachain = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" }
[build-dependencies]
substrate-wasm-builder = "3.0.0"
substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
[features]
default = [ "std" ]
-1
View File
@@ -29,7 +29,6 @@ sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
+2 -9
View File
@@ -89,8 +89,6 @@ pub fn new_partial(
>,
sc_service::Error,
> {
let inherent_data_providers = sp_inherents::InherentDataProviders::new();
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, RuntimeExecutor>(&config, None)?;
let client = Arc::new(client);
@@ -108,7 +106,7 @@ pub fn new_partial(
let import_queue = cumulus_client_consensus_relay_chain::import_queue(
client.clone(),
client.clone(),
inherent_data_providers.clone(),
|_, _| async { Ok(sp_timestamp::InherentDataProvider::from_system_time()) },
&task_manager.spawn_essential_handle(),
registry.clone(),
)?;
@@ -120,7 +118,6 @@ pub fn new_partial(
keystore_container,
task_manager,
transaction_pool,
inherent_data_providers,
select_chain: (),
other: (),
};
@@ -158,10 +155,6 @@ where
let mut parachain_config = prepare_node_config(parachain_config);
let params = new_partial(&mut parachain_config)?;
params
.inherent_data_providers
.register_provider(sp_timestamp::InherentDataProvider)
.expect("Registers timestamp inherent data provider.");
let transaction_pool = params.transaction_pool.clone();
let mut task_manager = params.task_manager;
@@ -242,7 +235,7 @@ where
let parachain_consensus = cumulus_client_consensus_relay_chain::RelayChainConsensus::new(
para_id,
proposer_factory,
params.inherent_data_providers,
|_, _| async { Ok(sp_timestamp::InherentDataProvider::from_system_time()) },
client.clone(),
relay_chain_full_node.client.clone(),
relay_chain_full_node.backend.clone(),