ci: add quick-check with rustfmt (#615)

* ci: add quick-check with clippy and rustfmt

* chore: rustfmt round

* chore: set the same rustfmt config than substrate

* chore: fix formatting

* cI: remove clippy

* ci: switch to nightly for the checks

* ci: fix toolchains and naming

* ci: Limit the check to formatting

* chore: fix formatting

* Update .rustfmt.toml

* Update .rustfmt.toml

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
This commit is contained in:
Chevdor
2021-09-16 16:57:52 +02:00
committed by GitHub
parent e3eb3a0a12
commit 6b20f7a2c5
98 changed files with 1244 additions and 1872 deletions
+40
View File
@@ -0,0 +1,40 @@
name: Quick check Formatting
on:
push:
branches:
- "*"
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
jobs:
quick_check:
strategy:
matrix:
os: ["ubuntu-latest"]
runs-on: ${{ matrix.os }}
steps:
- name: Install Rust nightly toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
components: clippy, rustfmt
- name: Cache Dependencies & Build Outputs
uses: actions/cache@v2
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-${{ matrix.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- uses: actions/checkout@v2
- name: Cargo fmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
+23 -2
View File
@@ -1,2 +1,23 @@
hard_tabs=true
merge_imports=true
# Basic
hard_tabs = true
max_width = 100
use_small_heuristics = "Max"
# Imports
imports_granularity = "Crate"
reorder_imports = true
# Consistency
newline_style = "Unix"
# Misc
binop_separator = "Back"
chain_width = 80
match_arm_blocks = false
match_arm_leading_pipes = "Preserve"
match_block_trailing_comma = true
reorder_impl_items = false
spaces_around_ranges = false
trailing_comma = "Vertical"
trailing_semicolon = false
use_field_init_shorthand = true
+8 -13
View File
@@ -54,10 +54,8 @@ impl PurgeChainCmd {
relay_config: sc_service::Configuration,
) -> sc_cli::Result<()> {
let databases = match (self.parachain, self.relaychain) {
(true, true) | (false, false) => vec![
("parachain", para_config.database),
("relaychain", relay_config.database),
],
(true, true) | (false, false) =>
vec![("parachain", para_config.database), ("relaychain", relay_config.database)],
(true, false) => vec![("parachain", para_config.database)],
(false, true) => vec![("relaychain", relay_config.database)],
};
@@ -86,11 +84,11 @@ impl PurgeChainCmd {
let input = input.trim();
match input.chars().nth(0) {
Some('y') | Some('Y') => {}
Some('y') | Some('Y') => {},
_ => {
println!("Aborted");
return Ok(());
}
return Ok(())
},
}
}
@@ -98,10 +96,10 @@ impl PurgeChainCmd {
match fs::remove_dir_all(&db_path) {
Ok(_) => {
println!("{:?} removed.", &db_path);
}
},
Err(ref err) if err.kind() == io::ErrorKind::NotFound => {
eprintln!("{:?} did not exist.", &db_path);
}
},
Err(err) => return Err(err.into()),
}
}
@@ -155,10 +153,7 @@ impl RunCmd {
new_base.validator = self.base.validator || self.collator;
NormalizedRunCmd {
base: new_base,
parachain_id: self.parachain_id,
}
NormalizedRunCmd { base: new_base, parachain_id: self.parachain_id }
}
}
+19 -41
View File
@@ -81,12 +81,7 @@ where
) -> Self {
let wait_to_announce = Arc::new(Mutex::new(WaitToAnnounce::new(spawner, announce_block)));
Self {
block_status,
wait_to_announce,
runtime_api,
parachain_consensus,
}
Self { block_status, wait_to_announce, runtime_api, parachain_consensus }
}
/// Checks the status of the given block hash in the Parachain.
@@ -101,7 +96,7 @@ where
"Skipping candidate production, because block is still queued for import.",
);
false
}
},
Ok(BlockStatus::InChainWithState) => true,
Ok(BlockStatus::InChainPruned) => {
tracing::error!(
@@ -110,7 +105,7 @@ where
hash,
);
false
}
},
Ok(BlockStatus::KnownBad) => {
tracing::error!(
target: LOG_TARGET,
@@ -118,7 +113,7 @@ where
"Block is tagged as known bad and is included in the relay chain! Skipping candidate production!",
);
false
}
},
Ok(BlockStatus::Unknown) => {
if header.number().is_zero() {
tracing::error!(
@@ -134,7 +129,7 @@ where
);
}
false
}
},
Err(e) => {
tracing::error!(
target: LOG_TARGET,
@@ -143,7 +138,7 @@ where
"Failed to get block status.",
);
false
}
},
}
}
@@ -168,8 +163,8 @@ where
error = ?e,
"Failed to collect collation info.",
);
return None;
}
return None
},
};
Some(Collation {
@@ -202,13 +197,13 @@ where
error = ?e,
"Could not decode the head data."
);
return None;
}
return None
},
};
let last_head_hash = last_head.hash();
if !self.check_block_status(last_head_hash, &last_head) {
return None;
return None
}
tracing::info!(
@@ -232,8 +227,8 @@ where
Ok(proof) => proof,
Err(e) => {
tracing::error!(target: "cumulus-collator", "Failed to compact proof: {:?}", e);
return None;
}
return None
},
};
// Create the parachain block data for the validators.
@@ -252,20 +247,11 @@ where
let (result_sender, signed_stmt_recv) = oneshot::channel();
self.wait_to_announce
.lock()
.wait_to_announce(block_hash, signed_stmt_recv);
self.wait_to_announce.lock().wait_to_announce(block_hash, signed_stmt_recv);
tracing::info!(
target: LOG_TARGET,
?block_hash,
"Produced proof-of-validity candidate.",
);
tracing::info!(target: LOG_TARGET, ?block_hash, "Produced proof-of-validity candidate.",);
Some(CollationResult {
collation,
result_sender: Some(result_sender),
})
Some(CollationResult { collation, result_sender: Some(result_sender) })
}
}
@@ -322,10 +308,7 @@ pub async fn start_collator<Block, RA, BS, Spawner>(
};
overseer_handle
.send_msg(
CollationGenerationMessage::Initialize(config),
"StartCollator",
)
.send_msg(CollationGenerationMessage::Initialize(config), "StartCollator")
.await;
overseer_handle
@@ -384,10 +367,7 @@ mod tests {
.await
.expect("Imports the block");
Some(ParachainCandidate {
block,
proof: proof.expect("Proof is returned"),
})
Some(ParachainCandidate { block, proof: proof.expect("Proof is returned") })
}
}
@@ -424,9 +404,7 @@ mod tests {
spawner,
para_id,
key: CollatorPair::generate().0,
parachain_consensus: Box::new(DummyParachainConsensus {
client: client.clone(),
}),
parachain_consensus: Box::new(DummyParachainConsensus { client: client.clone() }),
});
block_on(collator_start);
+3 -7
View File
@@ -215,9 +215,8 @@ where
relay_parent: PHash,
validation_data: &PersistedValidationData,
) -> Option<ParachainCandidate<B>> {
let (inherent_data, inherent_data_providers) = self
.inherent_data(parent.hash(), validation_data, relay_parent)
.await?;
let (inherent_data, inherent_data_providers) =
self.inherent_data(parent.hash(), validation_data, relay_parent).await?;
let info = SlotInfo::new(
inherent_data_providers.slot(),
@@ -234,10 +233,7 @@ where
let res = self.aura_worker.lock().await.on_slot(info).await?;
Some(ParachainCandidate {
block: res.block,
proof: res.storage_proof,
})
Some(ParachainCandidate { block: res.block, proof: res.storage_proof })
}
}
+1 -3
View File
@@ -65,9 +65,7 @@ impl<B: BlockT> ParachainConsensus<B> for Box<dyn ParachainConsensus<B> + Send +
relay_parent: PHash,
validation_data: &PersistedValidationData,
) -> Option<ParachainCandidate<B>> {
(*self)
.produce_candidate(parent, relay_parent, validation_data)
.await
(*self).produce_candidate(parent, relay_parent, validation_data).await
}
}
@@ -75,7 +75,7 @@ where
h
} else {
tracing::debug!(target: "cumulus-consensus", "Stopping following finalized head.");
return;
return
};
let header = match Block::Header::decode(&mut &finalized_head[..]) {
@@ -86,8 +86,8 @@ where
error = ?err,
"Could not decode parachain header while following finalized heads.",
);
continue;
}
continue
},
};
let hash = header.hash();
@@ -140,12 +140,8 @@ pub async fn run_parachain_consensus<P, R, Block, B>(
R: RelaychainClient,
B: Backend<Block>,
{
let follow_new_best = follow_new_best(
para_id,
parachain.clone(),
relay_chain.clone(),
announce_block,
);
let follow_new_best =
follow_new_best(para_id, parachain.clone(), relay_chain.clone(), announce_block);
let follow_finalized_head = follow_finalized_head(para_id, parachain, relay_chain);
select! {
_ = follow_new_best.fuse() => {},
@@ -242,12 +238,12 @@ async fn handle_new_block_imported<Block, P>(
};
let unset_hash = if notification.header.number() < unset_best_header.number() {
return;
return
} else if notification.header.number() == unset_best_header.number() {
let unset_hash = unset_best_header.hash();
if unset_hash != notification.hash {
return;
return
} else {
unset_hash
}
@@ -263,7 +259,7 @@ async fn handle_new_block_imported<Block, P>(
.expect("We checked above that the value is set; qed");
import_block_as_new_best(unset_hash, unset_best_header, parachain).await;
}
},
state => tracing::debug!(
target: "cumulus-consensus",
?unset_best_header,
@@ -292,8 +288,8 @@ async fn handle_new_best_parachain_head<Block, P>(
error = ?err,
"Could not decode Parachain header while following best heads.",
);
return;
}
return
},
};
let hash = parachain_head.hash();
@@ -311,14 +307,14 @@ async fn handle_new_best_parachain_head<Block, P>(
unset_best_header.take();
import_block_as_new_best(hash, parachain_head, parachain).await;
}
},
Ok(BlockStatus::InChainPruned) => {
tracing::error!(
target: "cumulus-collator",
block_hash = ?hash,
"Trying to set pruned block as new best!",
);
}
},
Ok(BlockStatus::Unknown) => {
*unset_best_header = Some(parachain_head);
@@ -327,7 +323,7 @@ async fn handle_new_best_parachain_head<Block, P>(
block_hash = ?hash,
"Parachain block not yet imported, waiting for import to enact as best block.",
);
}
},
Err(e) => {
tracing::error!(
target: "cumulus-collator",
@@ -335,8 +331,8 @@ async fn handle_new_best_parachain_head<Block, P>(
error = ?e,
"Failed to get block status of block.",
);
}
_ => {}
},
_ => {},
}
}
}
@@ -356,7 +352,7 @@ where
"Skipping importing block as new best block, because there already exists a \
best block with an higher number",
);
return;
return
}
// Make it the new best block
@@ -364,10 +360,7 @@ where
block_import_params.fork_choice = Some(ForkChoiceStrategy::Custom(true));
block_import_params.import_existing = true;
if let Err(err) = (&*parachain)
.import_block(block_import_params, Default::default())
.await
{
if let Err(err) = (&*parachain).import_block(block_import_params, Default::default()).await {
tracing::warn!(
target: "cumulus-consensus",
block_hash = ?hash,
@@ -392,10 +385,7 @@ where
self.import_notification_stream()
.filter_map(move |n| {
future::ready(if n.is_new_best {
relay_chain
.parachain_head_at(&BlockId::hash(n.hash), para_id)
.ok()
.flatten()
relay_chain.parachain_head_at(&BlockId::hash(n.hash), para_id).ok().flatten()
} else {
None
})
@@ -409,10 +399,7 @@ where
self.finality_notification_stream()
.filter_map(move |n| {
future::ready(
relay_chain
.parachain_head_at(&BlockId::hash(n.hash), para_id)
.ok()
.flatten(),
relay_chain.parachain_head_at(&BlockId::hash(n.hash), para_id).ok().flatten(),
)
})
.boxed()
+18 -60
View File
@@ -62,9 +62,7 @@ struct Relaychain {
impl Relaychain {
fn new() -> Self {
Self {
inner: Arc::new(Mutex::new(RelaychainInner::new())),
}
Self { inner: Arc::new(Mutex::new(RelaychainInner::new())) }
}
}
@@ -125,24 +123,17 @@ fn follow_new_best_works() {
let block = build_and_import_block(client.clone(), false);
let relay_chain = Relaychain::new();
let new_best_heads_sender = relay_chain
.inner
.lock()
.unwrap()
.new_best_heads_sender
.clone();
let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
let work = async move {
new_best_heads_sender
.unbounded_send(block.header().clone())
.unwrap();
new_best_heads_sender.unbounded_send(block.header().clone()).unwrap();
loop {
Delay::new(Duration::from_millis(100)).await;
if block.hash() == client.usage_info().chain.best_hash {
break;
break
}
}
};
@@ -166,24 +157,17 @@ fn follow_finalized_works() {
let block = build_and_import_block(client.clone(), false);
let relay_chain = Relaychain::new();
let finalized_sender = relay_chain
.inner
.lock()
.unwrap()
.finalized_heads_sender
.clone();
let finalized_sender = relay_chain.inner.lock().unwrap().finalized_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
let work = async move {
finalized_sender
.unbounded_send(block.header().clone())
.unwrap();
finalized_sender.unbounded_send(block.header().clone()).unwrap();
loop {
Delay::new(Duration::from_millis(100)).await;
if block.hash() == client.usage_info().chain.finalized_hash {
break;
break
}
}
};
@@ -214,32 +198,23 @@ fn follow_finalized_does_not_stop_on_unknown_block() {
};
let relay_chain = Relaychain::new();
let finalized_sender = relay_chain
.inner
.lock()
.unwrap()
.finalized_heads_sender
.clone();
let finalized_sender = relay_chain.inner.lock().unwrap().finalized_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
let work = async move {
for _ in 0..3usize {
finalized_sender
.unbounded_send(unknown_block.header().clone())
.unwrap();
finalized_sender.unbounded_send(unknown_block.header().clone()).unwrap();
Delay::new(Duration::from_millis(100)).await;
}
finalized_sender
.unbounded_send(block.header().clone())
.unwrap();
finalized_sender.unbounded_send(block.header().clone()).unwrap();
loop {
Delay::new(Duration::from_millis(100)).await;
if block.hash() == client.usage_info().chain.finalized_hash {
break;
break
}
}
};
@@ -273,32 +248,23 @@ fn follow_new_best_sets_best_after_it_is_imported() {
};
let relay_chain = Relaychain::new();
let new_best_heads_sender = relay_chain
.inner
.lock()
.unwrap()
.new_best_heads_sender
.clone();
let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
let work = async move {
new_best_heads_sender
.unbounded_send(block.header().clone())
.unwrap();
new_best_heads_sender.unbounded_send(block.header().clone()).unwrap();
loop {
Delay::new(Duration::from_millis(100)).await;
if block.hash() == client.usage_info().chain.best_hash {
break;
break
}
}
// Announce the unknown block
new_best_heads_sender
.unbounded_send(unknown_block.header().clone())
.unwrap();
new_best_heads_sender.unbounded_send(unknown_block.header().clone()).unwrap();
// Do some iterations. As this is a local task executor, only one task can run at a time.
// Meaning that it should already have processed the unknown block.
@@ -313,15 +279,12 @@ fn follow_new_best_sets_best_after_it_is_imported() {
block_import_params.body = Some(body);
// Now import the unkown block to make it "known"
client
.import_block(block_import_params, Default::default())
.await
.unwrap();
client.import_block(block_import_params, Default::default()).await.unwrap();
loop {
Delay::new(Duration::from_millis(100)).await;
if unknown_block.hash() == client.usage_info().chain.best_hash {
break;
break
}
}
};
@@ -362,12 +325,7 @@ fn do_not_set_best_block_to_older_block() {
assert_eq!(NUM_BLOCKS as u32, client.usage_info().chain.best_number);
let relay_chain = Relaychain::new();
let new_best_heads_sender = relay_chain
.inner
.lock()
.unwrap()
.new_best_heads_sender
.clone();
let new_best_heads_sender = relay_chain.inner.lock().unwrap().new_best_heads_sender.clone();
let consensus =
run_parachain_consensus(100.into(), client.clone(), relay_chain, Arc::new(|_, _| {}));
@@ -40,11 +40,7 @@ pub struct Verifier<Client, Block, CIDP> {
impl<Client, Block, CIDP> Verifier<Client, Block, CIDP> {
/// Create a new instance.
pub fn new(client: Arc<Client>, create_inherent_data_providers: CIDP) -> Self {
Self {
client,
create_inherent_data_providers,
_marker: PhantomData,
}
Self { client, create_inherent_data_providers, _marker: PhantomData }
}
}
@@ -59,13 +55,7 @@ where
async fn verify(
&mut self,
mut block_params: BlockImportParams<Block, ()>,
) -> Result<
(
BlockImportParams<Block, ()>,
Option<Vec<(CacheKeyId, Vec<u8>)>>,
),
String,
> {
) -> Result<(BlockImportParams<Block, ()>, Option<Vec<(CacheKeyId, Vec<u8>)>>), String> {
if let Some(inner_body) = block_params.body.take() {
let inherent_data_providers = self
.create_inherent_data_providers
@@ -73,9 +63,8 @@ where
.await
.map_err(|e| e.to_string())?;
let inherent_data = inherent_data_providers
.create_inherent_data()
.map_err(|e| format!("{:?}", e))?;
let inherent_data =
inherent_data_providers.create_inherent_data().map_err(|e| format!("{:?}", e))?;
let block = Block::new(block_params.header.clone(), inner_body);
@@ -130,9 +119,7 @@ where
Ok(BasicQueue::new(
verifier,
Box::new(cumulus_client_consensus_common::ParachainBlockImport::new(
block_import,
)),
Box::new(cumulus_client_consensus_common::ParachainBlockImport::new(block_import)),
None,
spawner,
registry,
@@ -180,15 +180,10 @@ where
)
.ok()?;
let inherent_data = self
.inherent_data(parent.hash(), &validation_data, relay_parent)
.await?;
let inherent_data =
self.inherent_data(parent.hash(), &validation_data, relay_parent).await?;
let Proposal {
block,
storage_changes,
proof,
} = proposer
let Proposal { block, storage_changes, proof } = proposer
.propose(
inherent_data,
Default::default(),
@@ -226,7 +221,7 @@ where
"Error importing build block.",
);
return None;
return None
}
Some(ParachainCandidate { block, proof })
+37 -66
View File
@@ -33,13 +33,13 @@ use sp_runtime::{
traits::{Block as BlockT, HashFor, Header as HeaderT},
};
use polkadot_node_primitives::{SignedFullStatement, Statement, CollationSecondedSignal};
use polkadot_client::ClientHandle;
use polkadot_node_primitives::{CollationSecondedSignal, SignedFullStatement, Statement};
use polkadot_parachain::primitives::HeadData;
use polkadot_primitives::v1::{
Block as PBlock, Hash as PHash, CandidateReceipt, CompactStatement, Id as ParaId,
OccupiedCoreAssumption, ParachainHost, UncheckedSigned, SigningContext,
Block as PBlock, CandidateReceipt, CompactStatement, Hash as PHash, Id as ParaId,
OccupiedCoreAssumption, ParachainHost, SigningContext, UncheckedSigned,
};
use polkadot_client::ClientHandle;
use codec::{Decode, Encode};
use futures::{
@@ -85,14 +85,13 @@ impl BlockAnnounceData {
///
/// This will not check the signature, for this you should use [`BlockAnnounceData::check_signature`].
fn validate(&self, encoded_header: Vec<u8>) -> Result<(), Validation> {
let candidate_hash = if let CompactStatement::Seconded(h) = self.statement.unchecked_payload() {
let candidate_hash = if let CompactStatement::Seconded(h) =
self.statement.unchecked_payload()
{
h
} else {
tracing::debug!(
target: LOG_TARGET,
"`CompactStatement` isn't the candidate variant!",
);
return Err(Validation::Failure { disconnect: true });
tracing::debug!(target: LOG_TARGET, "`CompactStatement` isn't the candidate variant!",);
return Err(Validation::Failure { disconnect: true })
};
if *candidate_hash != self.receipt.hash() {
@@ -100,7 +99,7 @@ impl BlockAnnounceData {
target: LOG_TARGET,
"Receipt candidate hash doesn't match candidate hash in statement",
);
return Err(Validation::Failure { disconnect: true });
return Err(Validation::Failure { disconnect: true })
}
if HeadData(encoded_header).hash() != self.receipt.descriptor.para_head {
@@ -108,7 +107,7 @@ impl BlockAnnounceData {
target: LOG_TARGET,
"Receipt para head hash doesn't match the hash of the header in the block announcement",
);
return Err(Validation::Failure { disconnect: true });
return Err(Validation::Failure { disconnect: true })
}
Ok(())
@@ -131,22 +130,16 @@ impl BlockAnnounceData {
let runtime_api_block_id = BlockId::Hash(self.receipt.descriptor.relay_parent);
let session_index = match runtime_api.session_index_for_child(&runtime_api_block_id) {
Ok(r) => r,
Err(e) => {
return Err(BlockAnnounceError(format!("{:?}", e)));
}
Err(e) => return Err(BlockAnnounceError(format!("{:?}", e))),
};
let signing_context = SigningContext {
parent_hash: self.receipt.descriptor.relay_parent,
session_index,
};
let signing_context =
SigningContext { parent_hash: self.receipt.descriptor.relay_parent, session_index };
// Check that the signer is a legit validator.
let authorities = match runtime_api.validators(&runtime_api_block_id) {
Ok(r) => r,
Err(e) => {
return Err(BlockAnnounceError(format!("{:?}", e)));
}
Err(e) => return Err(BlockAnnounceError(format!("{:?}", e))),
};
let signer = match authorities.get(validator_index.0 as usize) {
Some(r) => r,
@@ -156,22 +149,18 @@ impl BlockAnnounceData {
"Block announcement justification signer is a validator index out of bound",
);
return Ok(Validation::Failure { disconnect: true });
}
return Ok(Validation::Failure { disconnect: true })
},
};
// Check statement is correctly signed.
if self
.statement
.try_into_checked(&signing_context, &signer)
.is_err()
{
if self.statement.try_into_checked(&signing_context, &signer).is_err() {
tracing::debug!(
target: LOG_TARGET,
"Block announcement justification signature is invalid.",
);
return Ok(Validation::Failure { disconnect: true });
return Ok(Validation::Failure { disconnect: true })
}
Ok(Validation::Success { is_new_best: true })
@@ -185,13 +174,10 @@ impl TryFrom<&'_ SignedFullStatement> for BlockAnnounceData {
let receipt = if let Statement::Seconded(receipt) = stmt.payload() {
receipt.to_plain()
} else {
return Err(());
return Err(())
};
Ok(BlockAnnounceData {
receipt,
statement: stmt.convert_payload().into(),
})
Ok(BlockAnnounceData { receipt, statement: stmt.convert_payload().into() })
}
}
@@ -273,16 +259,13 @@ where
.persisted_validation_data(block_id, para_id, OccupiedCoreAssumption::TimedOut)
.map_err(|e| Box::new(BlockAnnounceError(format!("{:?}", e))) as Box<_>)?
.ok_or_else(|| {
Box::new(BlockAnnounceError(
"Could not find parachain head in relay chain".into(),
)) as Box<_>
Box::new(BlockAnnounceError("Could not find parachain head in relay chain".into()))
as Box<_>
})?;
let para_head =
Block::Header::decode(&mut &validation_data.parent_head.0[..]).map_err(|e| {
Box::new(BlockAnnounceError(format!(
"Failed to decode parachain head: {:?}",
e
))) as Box<_>
Box::new(BlockAnnounceError(format!("Failed to decode parachain head: {:?}", e)))
as Box<_>
})?;
Ok(para_head)
@@ -320,21 +303,15 @@ where
let best_head =
Self::included_block(&*relay_chain_client, &runtime_api_block_id, para_id)?;
let known_best_number = best_head.number();
let backed_block = ||
Self::backed_block_hash(&*relay_chain_client, &runtime_api_block_id, para_id);
let backed_block =
|| Self::backed_block_hash(&*relay_chain_client, &runtime_api_block_id, para_id);
if best_head == header {
tracing::debug!(
target: LOG_TARGET,
"Announced block matches best block.",
);
tracing::debug!(target: LOG_TARGET, "Announced block matches best block.",);
Ok(Validation::Success { is_new_best: true })
} else if Some(HeadData(header.encode()).hash()) == backed_block()? {
tracing::debug!(
target: LOG_TARGET,
"Announced block matches latest backed block.",
);
tracing::debug!(target: LOG_TARGET, "Announced block matches latest backed block.",);
Ok(Validation::Success { is_new_best: true })
} else if block_number >= known_best_number {
@@ -367,23 +344,20 @@ where
mut data: &[u8],
) -> Pin<Box<dyn Future<Output = Result<Validation, BoxedError>> + Send>> {
if self.relay_chain_sync_oracle.is_major_syncing() {
return ready(Ok(Validation::Success { is_new_best: false })).boxed();
return ready(Ok(Validation::Success { is_new_best: false })).boxed()
}
if data.is_empty() {
return self
.handle_empty_block_announce_data(header.clone())
.boxed();
return self.handle_empty_block_announce_data(header.clone()).boxed()
}
let block_announce_data = match BlockAnnounceData::decode(&mut data) {
Ok(r) => r,
Err(_) => {
Err(_) =>
return ready(Err(Box::new(BlockAnnounceError(
"Can not decode the `BlockAnnounceData`".into(),
)) as Box<_>))
.boxed()
}
.boxed(),
};
let relay_chain_client = self.relay_chain_client.clone();
@@ -392,7 +366,7 @@ where
async move {
if let Err(e) = block_announce_data.validate(header_encoded) {
return Ok(e);
return Ok(e)
}
let relay_parent = block_announce_data.receipt.descriptor.relay_parent;
@@ -519,10 +493,7 @@ impl<Block: BlockT> WaitToAnnounce<Block> {
spawner: Arc<dyn SpawnNamed + Send + Sync>,
announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
) -> WaitToAnnounce<Block> {
WaitToAnnounce {
spawner,
announce_block,
}
WaitToAnnounce { spawner, announce_block }
}
/// Wait for a candidate message for the block, then announce the block. The candidate
@@ -567,8 +538,8 @@ async fn wait_to_announce<Block: BlockT>(
block = ?block_hash,
"Wait to announce stopped, because sender was dropped.",
);
return;
}
return
},
};
if let Ok(data) = BlockAnnounceData::try_from(&statement) {
+23 -68
View File
@@ -15,15 +15,16 @@
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use super::*;
use cumulus_test_service::runtime::{Block, Header, Hash};
use cumulus_test_service::runtime::{Block, Hash, Header};
use futures::{executor::block_on, poll, task::Poll};
use parking_lot::Mutex;
use polkadot_node_primitives::{SignedFullStatement, Statement};
use polkadot_primitives::v1::{
Block as PBlock, BlockNumber, CandidateCommitments, CandidateDescriptor, CandidateEvent,
CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash as PHash, HeadData, Id as ParaId,
InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, ParachainHost,
PersistedValidationData, SessionIndex, SessionInfo, SigningContext, ValidationCode, ValidationCodeHash,
ValidatorId, ValidatorIndex,
PersistedValidationData, SessionIndex, SessionInfo, SigningContext, ValidationCode,
ValidationCodeHash, ValidatorId, ValidatorIndex,
};
use polkadot_test_client::{
Client as PClient, ClientBlockImportExt, DefaultTestClientBuilderExt, FullBackend as PBackend,
@@ -37,7 +38,6 @@ use sp_keyring::Sr25519Keyring;
use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr};
use sp_runtime::RuntimeAppPublic;
use std::collections::BTreeMap;
use parking_lot::Mutex;
fn check_error(error: crate::BoxedError, check_error: impl Fn(&BlockAnnounceError) -> bool) {
let error = *error
@@ -61,10 +61,8 @@ impl SyncOracle for DummyCollatorNetwork {
}
}
fn make_validator_and_api() -> (
BlockAnnounceValidator<Block, TestApi, PBackend, PClient>,
Arc<TestApi>,
) {
fn make_validator_and_api(
) -> (BlockAnnounceValidator<Block, TestApi, PBackend, PClient>, Arc<TestApi>) {
let api = Arc::new(TestApi::new());
(
@@ -94,12 +92,7 @@ async fn make_gossip_message_and_header_using_genesis(
api: Arc<TestApi>,
validator_index: u32,
) -> (SignedFullStatement, Header) {
let relay_parent = api
.relay_client
.hash(0)
.ok()
.flatten()
.expect("Genesis hash exists");
let relay_parent = api.relay_client.hash(0).ok().flatten().expect("Genesis hash exists");
make_gossip_message_and_header(api, relay_parent, validator_index).await
}
@@ -116,14 +109,9 @@ async fn make_gossip_message_and_header(
Some(&Sr25519Keyring::Alice.to_seed()),
)
.unwrap();
let session_index = api
.runtime_api()
.session_index_for_child(&BlockId::Hash(relay_parent))
.unwrap();
let signing_context = SigningContext {
parent_hash: relay_parent,
session_index,
};
let session_index =
api.runtime_api().session_index_for_child(&BlockId::Hash(relay_parent)).unwrap();
let signing_context = SigningContext { parent_hash: relay_parent, session_index };
let header = default_header();
let candidate_receipt = CommittedCandidateReceipt {
@@ -156,10 +144,7 @@ async fn make_gossip_message_and_header(
#[test]
fn valid_if_no_data_and_less_than_best_known_number() {
let mut validator = make_validator_and_api().0;
let header = Header {
number: 0,
..default_header()
};
let header = Header { number: 0, ..default_header() };
let res = block_on(validator.validate(&header, &[]));
assert_eq!(
@@ -172,11 +157,7 @@ fn valid_if_no_data_and_less_than_best_known_number() {
#[test]
fn invalid_if_no_data_exceeds_best_known_number() {
let mut validator = make_validator_and_api().0;
let header = Header {
number: 1,
state_root: Hash::random(),
..default_header()
};
let header = Header { number: 1, state_root: Hash::random(), ..default_header() };
let res = block_on(validator.validate(&header, &[]));
assert_eq!(
@@ -219,9 +200,7 @@ fn check_signer_is_legit_validator() {
let (mut validator, api) = make_validator_and_api();
let (signed_statement, header) = block_on(make_gossip_message_and_header_using_genesis(api, 1));
let data = BlockAnnounceData::try_from(&signed_statement)
.unwrap()
.encode();
let data = BlockAnnounceData::try_from(&signed_statement).unwrap().encode();
let res = block_on(validator.validate(&header, &data));
assert_eq!(Validation::Failure { disconnect: true }, res.unwrap());
@@ -233,9 +212,7 @@ fn check_statement_is_correctly_signed() {
let (signed_statement, header) = block_on(make_gossip_message_and_header_using_genesis(api, 0));
let mut data = BlockAnnounceData::try_from(&signed_statement)
.unwrap()
.encode();
let mut data = BlockAnnounceData::try_from(&signed_statement).unwrap().encode();
// The signature comes at the end of the type, so change a bit to make the signature invalid.
let last = data.len() - 1;
@@ -258,14 +235,9 @@ fn check_statement_seconded() {
Some(&Sr25519Keyring::Alice.to_seed()),
)
.unwrap();
let session_index = api
.runtime_api()
.session_index_for_child(&BlockId::Hash(relay_parent))
.unwrap();
let signing_context = SigningContext {
parent_hash: relay_parent,
session_index,
};
let session_index =
api.runtime_api().session_index_for_child(&BlockId::Hash(relay_parent)).unwrap();
let signing_context = SigningContext { parent_hash: relay_parent, session_index };
let statement = Statement::Valid(Default::default());
@@ -296,9 +268,7 @@ fn check_header_match_candidate_receipt_header() {
let (signed_statement, mut header) =
block_on(make_gossip_message_and_header_using_genesis(api, 0));
let data = BlockAnnounceData::try_from(&signed_statement)
.unwrap()
.encode();
let data = BlockAnnounceData::try_from(&signed_statement).unwrap().encode();
header.number = 300;
let res = block_on(validator.validate(&header, &data));
@@ -315,17 +285,11 @@ fn relay_parent_not_imported_when_block_announce_is_processed() {
let (mut validator, api) = make_validator_and_api();
let mut client = api.relay_client.clone();
let block = client
.init_polkadot_block_builder()
.build()
.expect("Build new block")
.block;
let block = client.init_polkadot_block_builder().build().expect("Build new block").block;
let (signed_statement, header) = make_gossip_message_and_header(api, block.hash(), 0).await;
let data = BlockAnnounceData::try_from(&signed_statement)
.unwrap()
.encode();
let data = BlockAnnounceData::try_from(&signed_statement).unwrap().encode();
let mut validation = validator.validate(&header, &data);
@@ -333,10 +297,7 @@ fn relay_parent_not_imported_when_block_announce_is_processed() {
// that the future is still pending.
assert!(poll!(&mut validation).is_pending());
client
.import(BlockOrigin::Own, block)
.await
.expect("Imports the block");
client.import(BlockOrigin::Own, block).await.expect("Imports the block");
assert!(matches!(
poll!(validation),
@@ -357,10 +318,7 @@ fn block_announced_without_statement_and_block_only_backed() {
let validation = validator.validate(&header, &[]);
assert!(matches!(
validation.await,
Ok(Validation::Success { is_new_best: true })
));
assert!(matches!(validation.await, Ok(Validation::Success { is_new_best: true })));
});
}
@@ -401,10 +359,7 @@ impl ProvideRuntimeApi<PBlock> for TestApi {
type Api = RuntimeApi;
fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> {
RuntimeApi {
data: self.data.clone(),
}
.into()
RuntimeApi { data: self.data.clone() }.into()
}
}
@@ -31,10 +31,7 @@ const TIMEOUT_IN_SECONDS: u64 = 6;
/// Custom error type used by [`WaitOnRelayChainBlock`].
#[derive(Debug, derive_more::Display)]
pub enum Error {
#[display(
fmt = "Timeout while waiting for relay-chain block `{}` to be imported.",
_0
)]
#[display(fmt = "Timeout while waiting for relay-chain block `{}` to be imported.", _0)]
Timeout(PHash),
#[display(
fmt = "Import listener closed while waiting for relay-chain block `{}` to be imported.",
@@ -73,20 +70,14 @@ pub struct WaitOnRelayChainBlock<B, BCE> {
impl<B, BCE> Clone for WaitOnRelayChainBlock<B, BCE> {
fn clone(&self) -> Self {
Self {
backend: self.backend.clone(),
block_chain_events: self.block_chain_events.clone(),
}
Self { backend: self.backend.clone(), block_chain_events: self.block_chain_events.clone() }
}
}
impl<B, BCE> WaitOnRelayChainBlock<B, BCE> {
/// Creates a new instance of `Self`.
pub fn new(backend: Arc<B>, block_chain_events: Arc<BCE>) -> Self {
Self {
backend,
block_chain_events,
}
Self { backend, block_chain_events }
}
}
@@ -103,11 +94,9 @@ where
) -> impl Future<Output = Result<(), Error>> {
let _lock = self.backend.get_import_lock().read();
match self.backend.blockchain().status(BlockId::Hash(hash)) {
Ok(BlockStatus::InChain) => {
return ready(Ok(())).boxed();
}
Ok(BlockStatus::InChain) => return ready(Ok(())).boxed(),
Err(err) => return ready(Err(Error::BlockchainError(hash, err))).boxed(),
_ => {}
_ => {},
}
let mut listener = self.block_chain_events.import_notification_stream();
@@ -171,10 +160,7 @@ mod tests {
block_on(async move {
// Should be ready on the first poll
assert!(matches!(
poll!(wait.wait_on_relay_chain_block(hash)),
Poll::Ready(Ok(()))
));
assert!(matches!(poll!(wait.wait_on_relay_chain_block(hash)), Poll::Ready(Ok(()))));
});
}
@@ -191,10 +177,7 @@ mod tests {
assert!(poll!(&mut future).is_pending());
// Import the block that should fire the notification
client
.import(BlockOrigin::Own, block)
.await
.expect("Imports the block");
client.import(BlockOrigin::Own, block).await.expect("Imports the block");
// Now it should have received the notification and report that the block was imported
assert!(matches!(poll!(future), Poll::Ready(Ok(()))));
@@ -208,10 +191,7 @@ mod tests {
let wait = WaitOnRelayChainBlock::new(backend, client.clone());
assert!(matches!(
block_on(wait.wait_on_relay_chain_block(hash)),
Err(Error::Timeout(_))
));
assert!(matches!(block_on(wait.wait_on_relay_chain_block(hash)), Err(Error::Timeout(_))));
}
#[test]
@@ -227,9 +207,7 @@ mod tests {
);
let mut block_builder = client.init_polkadot_block_builder();
// Push an extrinsic to get a different block hash.
block_builder
.push_polkadot_extrinsic(ext)
.expect("Push extrinsic");
block_builder.push_polkadot_extrinsic(ext).expect("Push extrinsic");
let block2 = block_builder.build().expect("Build second block").block;
let hash2 = block2.hash();
@@ -243,20 +221,14 @@ mod tests {
assert!(poll!(&mut future2).is_pending());
// Import the block that should fire the notification
client
.import(BlockOrigin::Own, block2)
.await
.expect("Imports the second block");
client.import(BlockOrigin::Own, block2).await.expect("Imports the second block");
// The import notification of the second block should not make this one finish
assert!(poll!(&mut future).is_pending());
// Now it should have received the notification and report that the block was imported
assert!(matches!(poll!(future2), Poll::Ready(Ok(()))));
client
.import(BlockOrigin::Own, block)
.await
.expect("Imports the first block");
client.import(BlockOrigin::Own, block).await.expect("Imports the first block");
// Now it should be ready
assert!(matches!(poll!(future), Poll::Ready(Ok(()))));
@@ -39,11 +39,7 @@ pub(crate) struct ActiveCandidateRecovery<Block: BlockT> {
impl<Block: BlockT> ActiveCandidateRecovery<Block> {
pub fn new(overseer_handle: OverseerHandle) -> Self {
Self {
recoveries: Default::default(),
candidates: Default::default(),
overseer_handle,
}
Self { recoveries: Default::default(), candidates: Default::default(), overseer_handle }
}
/// Recover the given `pending_candidate`.
@@ -80,14 +76,14 @@ impl<Block: BlockT> ActiveCandidateRecovery<Block> {
"Availability recovery failed",
);
(block_hash, None)
}
},
Err(_) => {
tracing::debug!(
target: crate::LOG_TARGET,
"Availability recovery oneshot channel closed",
);
(block_hash, None)
}
},
}
}
.boxed(),
@@ -106,7 +102,7 @@ impl<Block: BlockT> ActiveCandidateRecovery<Block> {
loop {
if let Some(res) = self.recoveries.next().await {
self.candidates.remove(&res.0);
return res;
return res
} else {
futures::pending!()
}
+39 -45
View File
@@ -149,12 +149,12 @@ where
error = ?e,
"Failed to decode parachain header from pending candidate",
);
return;
}
return
},
};
if *header.number() <= self.parachain_client.usage_info().chain.finalized_number {
return;
return
}
let hash = header.hash();
@@ -169,8 +169,8 @@ where
block_hash = ?hash,
"Failed to get block status",
);
return;
}
return
},
}
if self
@@ -185,7 +185,7 @@ where
)
.is_some()
{
return;
return
}
// Wait some random time, with the maximum being the slot duration of the relay chain
@@ -207,8 +207,7 @@ where
/// Handle a finalized block with the given `block_number`.
fn handle_block_finalized(&mut self, block_number: NumberFor<Block>) {
self.pending_candidates
.retain(|_, pc| pc.block_number > block_number);
self.pending_candidates.retain(|_, pc| pc.block_number > block_number);
}
/// Recover the candidate for the given `block_hash`.
@@ -245,8 +244,8 @@ where
Some(data) => data,
None => {
self.clear_waiting_for_parent(block_hash);
return;
}
return
},
};
let raw_block_data = match sp_maybe_compressed_blob::decompress(
@@ -259,8 +258,8 @@ where
self.clear_waiting_for_parent(block_hash);
return;
}
return
},
};
let block_data = match ParachainBlockData::<Block>::decode(&mut &raw_block_data[..]) {
@@ -274,8 +273,8 @@ where
self.clear_waiting_for_parent(block_hash);
return;
}
return
},
};
let block = block_data.into_block();
@@ -292,11 +291,8 @@ where
"Parent is still being recovered, waiting.",
);
self.waiting_for_parent
.entry(parent)
.or_default()
.push(block);
return;
self.waiting_for_parent.entry(parent).or_default().push(block);
return
} else {
tracing::debug!(
target: "cumulus-consensus",
@@ -307,9 +303,9 @@ where
self.clear_waiting_for_parent(block_hash);
return;
return
}
}
},
Err(error) => {
tracing::debug!(
target: "cumulus-consensus",
@@ -320,8 +316,8 @@ where
self.clear_waiting_for_parent(block_hash);
return;
}
return
},
// Any other status is fine to "ignore/accept"
_ => (),
}
@@ -431,27 +427,25 @@ where
RC: ProvideRuntimeApi<PBlock> + BlockchainEvents<PBlock>,
RC::Api: ParachainHost<PBlock>,
{
relay_chain_client
.import_notification_stream()
.filter_map(move |n| {
let runtime_api = relay_chain_client.runtime_api();
let res = runtime_api
.candidate_pending_availability(&BlockId::hash(n.hash), para_id)
.and_then(|pa| {
runtime_api
.session_index_for_child(&BlockId::hash(n.hash))
.map(|v| pa.map(|pa| (pa, v)))
})
.map_err(|e| {
tracing::error!(
target: LOG_TARGET,
error = ?e,
"Failed fetch pending candidates.",
)
})
.ok()
.flatten();
relay_chain_client.import_notification_stream().filter_map(move |n| {
let runtime_api = relay_chain_client.runtime_api();
let res = runtime_api
.candidate_pending_availability(&BlockId::hash(n.hash), para_id)
.and_then(|pa| {
runtime_api
.session_index_for_child(&BlockId::hash(n.hash))
.map(|v| pa.map(|pa| (pa, v)))
})
.map_err(|e| {
tracing::error!(
target: LOG_TARGET,
error = ?e,
"Failed fetch pending candidates.",
)
})
.ok()
.flatten();
async move { res }
})
async move { res }
})
}
+15 -26
View File
@@ -118,19 +118,17 @@ where
_phantom: PhantomData,
});
relay_chain_full_node
.client
.execute_with(StartPoVRecovery {
para_id,
client: client.clone(),
import_queue,
task_manager,
overseer_handle: relay_chain_full_node
.overseer_handle
.clone()
.ok_or_else(|| "Polkadot full node did not provide an `OverseerHandle`!")?,
_phantom: PhantomData,
})?;
relay_chain_full_node.client.execute_with(StartPoVRecovery {
para_id,
client: client.clone(),
import_queue,
task_manager,
overseer_handle: relay_chain_full_node
.overseer_handle
.clone()
.ok_or_else(|| "Polkadot full node did not provide an `OverseerHandle`!")?,
_phantom: PhantomData,
})?;
cumulus_client_collator::start_collator(cumulus_client_collator::StartCollatorParams {
runtime_api: client.clone(),
@@ -239,9 +237,7 @@ where
self.announce_block,
);
self.task_manager
.spawn_essential_handle()
.spawn("cumulus-consensus", consensus);
self.task_manager.spawn_essential_handle().spawn("cumulus-consensus", consensus);
}
}
@@ -311,9 +307,7 @@ pub fn build_polkadot_full_node(
) -> Result<RFullNode<PClient>, polkadot_service::Error> {
let is_light = matches!(config.role, Role::Light);
if is_light {
Err(polkadot_service::Error::Sub(
"Light client not supported.".into(),
))
Err(polkadot_service::Error::Sub("Light client not supported.".into()))
} else {
let collator_key = CollatorPair::generate().0;
@@ -327,10 +321,7 @@ pub fn build_polkadot_full_node(
polkadot_service::RealOverseerGen,
)?;
Ok(RFullNode {
relay_chain_full_node,
collator_key,
})
Ok(RFullNode { relay_chain_full_node, collator_key })
}
}
@@ -359,9 +350,7 @@ impl<Block: BlockT> ImportQueue<Block> for SharedImportQueue<Block> {
number: NumberFor<Block>,
justifications: Justifications,
) {
self.0
.lock()
.import_justifications(who, hash, number, justifications)
self.0.lock().import_justifications(who, hash, number, justifications)
}
fn poll_actions(&mut self, cx: &mut std::task::Context, link: &mut dyn Link<Block>) {
+4 -8
View File
@@ -131,18 +131,14 @@ where
(Some(s), false) => {
seal = Some(s);
false
}
},
}
});
let seal = seal.expect("Could not find an AuRa seal digest!");
let author = Aura::<T>::find_author(
header
.digest()
.logs()
.iter()
.filter_map(|d| d.as_pre_runtime()),
header.digest().logs().iter().filter_map(|d| d.as_pre_runtime()),
)
.expect("Could not find AuRa author index!");
@@ -150,9 +146,9 @@ where
if !authorities
.get(author as usize)
.unwrap_or_else(||
.unwrap_or_else(|| {
panic!("Invalid AuRa author index {} for authorities: {:?}", author, authorities)
)
})
.verify(&pre_hash, &seal)
{
panic!("Invalid AuRa seal");
@@ -19,15 +19,15 @@ use super::*;
#[allow(unused)]
use crate::Pallet as CollatorSelection;
use sp_std::prelude::*;
use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller, account};
use frame_system::{RawOrigin, EventRecord};
use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller};
use frame_support::{
assert_ok,
traits::{Currency, Get, EnsureOrigin},
traits::{Currency, EnsureOrigin, Get},
};
use frame_system::{EventRecord, RawOrigin};
use pallet_authorship::EventHandler;
use pallet_session::SessionManager;
use sp_std::prelude::*;
pub type BalanceOf<T> =
<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
@@ -38,7 +38,7 @@ const SEED: u32 = 0;
macro_rules! whitelist {
($acc:ident) => {
frame_benchmarking::benchmarking::add_to_whitelist(
frame_system::Account::<T>::hashed_key_for(&$acc).into()
frame_system::Account::<T>::hashed_key_for(&$acc).into(),
);
};
}
+82 -60
View File
@@ -74,29 +74,27 @@ pub mod weights;
#[frame_support::pallet]
pub mod pallet {
pub use crate::weights::WeightInfo;
use core::ops::Div;
use frame_support::{
dispatch::DispatchResultWithPostInfo,
pallet_prelude::*,
inherent::Vec,
traits::{
Currency, ReservableCurrency, EnsureOrigin, ExistenceRequirement::KeepAlive, ValidatorRegistration
},
PalletId,
};
use frame_system::pallet_prelude::*;
use frame_system::Config as SystemConfig;
use frame_support::{
pallet_prelude::*,
sp_runtime::{
traits::{AccountIdConversion, CheckedSub, Saturating, Zero},
RuntimeDebug,
traits::{AccountIdConversion, CheckedSub, Zero, Saturating},
},
traits::{
Currency, EnsureOrigin, ExistenceRequirement::KeepAlive, ReservableCurrency,
ValidatorRegistration,
},
weights::DispatchClass,
PalletId,
};
use sp_runtime::traits::Convert;
use core::ops::Div;
use frame_system::{pallet_prelude::*, Config as SystemConfig};
use pallet_session::SessionManager;
use sp_runtime::traits::Convert;
use sp_staking::SessionIndex;
pub use crate::weights::WeightInfo;
type BalanceOf<T> =
<<T as Config>::Currency as Currency<<T as SystemConfig>::AccountId>>::Balance;
@@ -136,7 +134,6 @@ pub mod pallet {
/// This does not take into account the invulnerables.
type MinCandidates: Get<u32>;
/// Maximum number of invulnerables.
///
/// Used only for benchmarking.
@@ -156,7 +153,6 @@ pub mod pallet {
/// Validate a user is registered
type ValidatorRegistration: ValidatorRegistration<Self::ValidatorId>;
/// The weight information of this pallet.
type WeightInfo: WeightInfo;
}
@@ -182,16 +178,14 @@ pub mod pallet {
/// The (community, limited) collation candidates.
#[pallet::storage]
#[pallet::getter(fn candidates)]
pub type Candidates<T: Config> = StorageValue<
_,
Vec<CandidateInfo<T::AccountId, BalanceOf<T>>>,
ValueQuery,
>;
pub type Candidates<T: Config> =
StorageValue<_, Vec<CandidateInfo<T::AccountId, BalanceOf<T>>>, ValueQuery>;
/// Last block authored by collator.
#[pallet::storage]
#[pallet::getter(fn last_authored_block)]
pub type LastAuthoredBlock<T: Config> = StorageMap<_, Twox64Concat, T::AccountId, T::BlockNumber, ValueQuery>;
pub type LastAuthoredBlock<T: Config> =
StorageMap<_, Twox64Concat, T::AccountId, T::BlockNumber, ValueQuery>;
/// Desired number of candidates.
///
@@ -205,7 +199,6 @@ pub mod pallet {
#[pallet::getter(fn candidacy_bond)]
pub type CandidacyBond<T> = StorageValue<_, BalanceOf<T>, ValueQuery>;
#[pallet::genesis_config]
pub struct GenesisConfig<T: Config> {
pub invulnerables: Vec<T::AccountId>,
@@ -227,9 +220,12 @@ pub mod pallet {
#[pallet::genesis_build]
impl<T: Config> GenesisBuild<T> for GenesisConfig<T> {
fn build(&self) {
let duplicate_invulnerables = self.invulnerables.iter().collect::<std::collections::BTreeSet<_>>();
assert!(duplicate_invulnerables.len() == self.invulnerables.len(), "duplicate invulnerables in genesis.");
let duplicate_invulnerables =
self.invulnerables.iter().collect::<std::collections::BTreeSet<_>>();
assert!(
duplicate_invulnerables.len() == self.invulnerables.len(),
"duplicate invulnerables in genesis."
);
assert!(
T::MaxInvulnerables::get() >= (self.invulnerables.len() as u32),
@@ -276,7 +272,7 @@ pub mod pallet {
/// Account has no associated validator ID
NoAssociatedValidatorId,
/// Validator ID is not yet registered
ValidatorNotRegistered
ValidatorNotRegistered,
}
#[pallet::hooks]
@@ -302,13 +298,14 @@ pub mod pallet {
}
#[pallet::weight(T::WeightInfo::set_desired_candidates())]
pub fn set_desired_candidates(origin: OriginFor<T>, max: u32) -> DispatchResultWithPostInfo {
pub fn set_desired_candidates(
origin: OriginFor<T>,
max: u32,
) -> DispatchResultWithPostInfo {
T::UpdateOrigin::ensure_origin(origin)?;
// we trust origin calls, this is just a for more accurate benchmarking
if max > T::MaxCandidates::get() {
log::warn!(
"max > T::MaxCandidates; you might need to run benchmarks again"
);
log::warn!("max > T::MaxCandidates; you might need to run benchmarks again");
}
<DesiredCandidates<T>>::put(&max);
Self::deposit_event(Event::NewDesiredCandidates(max));
@@ -316,7 +313,10 @@ pub mod pallet {
}
#[pallet::weight(T::WeightInfo::set_candidacy_bond())]
pub fn set_candidacy_bond(origin: OriginFor<T>, bond: BalanceOf<T>) -> DispatchResultWithPostInfo {
pub fn set_candidacy_bond(
origin: OriginFor<T>,
bond: BalanceOf<T>,
) -> DispatchResultWithPostInfo {
T::UpdateOrigin::ensure_origin(origin)?;
<CandidacyBond<T>>::put(&bond);
Self::deposit_event(Event::NewCandidacyBond(bond));
@@ -332,8 +332,12 @@ pub mod pallet {
ensure!((length as u32) < Self::desired_candidates(), Error::<T>::TooManyCandidates);
ensure!(!Self::invulnerables().contains(&who), Error::<T>::AlreadyInvulnerable);
let validator_key = T::ValidatorIdOf::convert(who.clone()).ok_or(Error::<T>::NoAssociatedValidatorId)?;
ensure!(T::ValidatorRegistration::is_registered(&validator_key), Error::<T>::ValidatorNotRegistered);
let validator_key = T::ValidatorIdOf::convert(who.clone())
.ok_or(Error::<T>::NoAssociatedValidatorId)?;
ensure!(
T::ValidatorRegistration::is_registered(&validator_key),
Error::<T>::ValidatorNotRegistered
);
let deposit = Self::candidacy_bond();
// First authored block is current block plus kick threshold to handle session delay
@@ -346,7 +350,10 @@ pub mod pallet {
} else {
T::Currency::reserve(&who, deposit)?;
candidates.push(incoming);
<LastAuthoredBlock<T>>::insert(who.clone(), frame_system::Pallet::<T>::block_number() + T::KickThreshold::get());
<LastAuthoredBlock<T>>::insert(
who.clone(),
frame_system::Pallet::<T>::block_number() + T::KickThreshold::get(),
);
Ok(candidates.len())
}
})?;
@@ -358,7 +365,10 @@ pub mod pallet {
#[pallet::weight(T::WeightInfo::leave_intent(T::MaxCandidates::get()))]
pub fn leave_intent(origin: OriginFor<T>) -> DispatchResultWithPostInfo {
let who = ensure_signed(origin)?;
ensure!(Self::candidates().len() as u32 > T::MinCandidates::get(), Error::<T>::TooFewCandidates);
ensure!(
Self::candidates().len() as u32 > T::MinCandidates::get(),
Error::<T>::TooFewCandidates
);
let current_count = Self::try_remove_candidate(&who)?;
Ok(Some(T::WeightInfo::leave_intent(current_count as u32)).into())
@@ -372,13 +382,17 @@ pub mod pallet {
}
/// Removes a candidate if they exist and sends them back their deposit
fn try_remove_candidate(who: &T::AccountId) -> Result<usize, DispatchError> {
let current_count = <Candidates<T>>::try_mutate(|candidates| -> Result<usize, DispatchError> {
let index = candidates.iter().position(|candidate| candidate.who == *who).ok_or(Error::<T>::NotCandidate)?;
T::Currency::unreserve(&who, candidates[index].deposit);
candidates.remove(index);
<LastAuthoredBlock<T>>::remove(who.clone());
Ok(candidates.len())
})?;
let current_count =
<Candidates<T>>::try_mutate(|candidates| -> Result<usize, DispatchError> {
let index = candidates
.iter()
.position(|candidate| candidate.who == *who)
.ok_or(Error::<T>::NotCandidate)?;
T::Currency::unreserve(&who, candidates[index].deposit);
candidates.remove(index);
<LastAuthoredBlock<T>>::remove(who.clone());
Ok(candidates.len())
})?;
Self::deposit_event(Event::CandidateRemoved(who.clone()));
Ok(current_count)
}
@@ -388,29 +402,34 @@ pub mod pallet {
/// This is done on the fly, as frequent as we are told to do so, as the session manager.
pub fn assemble_collators(candidates: Vec<T::AccountId>) -> Vec<T::AccountId> {
let mut collators = Self::invulnerables();
collators.extend(
candidates.into_iter().collect::<Vec<_>>(),
);
collators.extend(candidates.into_iter().collect::<Vec<_>>());
collators
}
/// Kicks out and candidates that did not produce a block in the kick threshold.
pub fn kick_stale_candidates(candidates: Vec<CandidateInfo<T::AccountId, BalanceOf<T>>>) -> Vec<T::AccountId> {
pub fn kick_stale_candidates(
candidates: Vec<CandidateInfo<T::AccountId, BalanceOf<T>>>,
) -> Vec<T::AccountId> {
let now = frame_system::Pallet::<T>::block_number();
let kick_threshold = T::KickThreshold::get();
let new_candidates = candidates.into_iter().filter_map(|c| {
let last_block = <LastAuthoredBlock<T>>::get(c.who.clone());
let since_last = now.saturating_sub(last_block);
if since_last < kick_threshold || Self::candidates().len() as u32 <= T::MinCandidates::get() {
Some(c.who)
} else {
let outcome = Self::try_remove_candidate(&c.who);
if let Err(why) = outcome {
log::warn!("Failed to remove candidate {:?}", why);
debug_assert!(false, "failed to remove candidate {:?}", why);
let new_candidates = candidates
.into_iter()
.filter_map(|c| {
let last_block = <LastAuthoredBlock<T>>::get(c.who.clone());
let since_last = now.saturating_sub(last_block);
if since_last < kick_threshold ||
Self::candidates().len() as u32 <= T::MinCandidates::get()
{
Some(c.who)
} else {
let outcome = Self::try_remove_candidate(&c.who);
if let Err(why) = outcome {
log::warn!("Failed to remove candidate {:?}", why);
debug_assert!(false, "failed to remove candidate {:?}", why);
}
None
}
None
}
}).collect::<Vec<_>>();
})
.collect::<Vec<_>>();
new_candidates
}
}
@@ -423,7 +442,10 @@ pub mod pallet {
fn note_author(author: T::AccountId) {
let pot = Self::account_id();
// assumes an ED will be sent to pot.
let reward = T::Currency::free_balance(&pot).checked_sub(&T::Currency::minimum_balance()).unwrap_or_else(Zero::zero).div(2u32.into());
let reward = T::Currency::free_balance(&pot)
.checked_sub(&T::Currency::minimum_balance())
.unwrap_or_else(Zero::zero)
.div(2u32.into());
// `reward` is half of pot account minus ED, this should never fail.
let _success = T::Currency::transfer(&pot, &author, reward, KeepAlive);
debug_assert!(_success.is_ok());
+17 -25
View File
@@ -15,19 +15,19 @@
use super::*;
use crate as collator_selection;
use sp_core::H256;
use frame_support::{
parameter_types, ord_parameter_types,
ord_parameter_types, parameter_types,
traits::{FindAuthor, GenesisBuild, ValidatorRegistration},
PalletId
PalletId,
};
use sp_runtime::{
RuntimeAppPublic,
traits::{BlakeTwo256, IdentityLookup, OpaqueKeys},
testing::{Header, UintAuthorityId},
};
use frame_system::{EnsureSignedBy};
use frame_system as system;
use frame_system::EnsureSignedBy;
use sp_core::H256;
use sp_runtime::{
testing::{Header, UintAuthorityId},
traits::{BlakeTwo256, IdentityLookup, OpaqueKeys},
RuntimeAppPublic,
};
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>;
type Block = frame_system::mocking::MockBlock<Test>;
@@ -100,7 +100,8 @@ impl pallet_balances::Config for Test {
pub struct Author4;
impl FindAuthor<u64> for Author4 {
fn find_author<'a, I>(_digests: I) -> Option<u64>
where I: 'a + IntoIterator<Item = (frame_support::ConsensusEngineId, &'a [u8])>,
where
I: 'a + IntoIterator<Item = (frame_support::ConsensusEngineId, &'a [u8])>,
{
Some(4)
}
@@ -224,22 +225,13 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
sp_tracing::try_init_simple();
let mut t = frame_system::GenesisConfig::default().build_storage::<Test>().unwrap();
let invulnerables = vec![1, 2];
let keys = invulnerables.iter().map(|i|
(
*i,
*i,
MockSessionKeys { aura: UintAuthorityId(*i) },
)
).collect::<Vec<_>>();
let keys = invulnerables
.iter()
.map(|i| (*i, *i, MockSessionKeys { aura: UintAuthorityId(*i) }))
.collect::<Vec<_>>();
let balances = pallet_balances::GenesisConfig::<Test> {
balances: vec![
(1, 100),
(2, 100),
(3, 100),
(4, 100),
(5, 100),
],
balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)],
};
let collator_selection = collator_selection::GenesisConfig::<Test> {
desired_candidates: 2,
@@ -256,7 +248,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
}
pub fn initialize_to_block(n: u64) {
for i in System::block_number()+1..=n {
for i in System::block_number() + 1..=n {
System::set_block_number(i);
<AllPallets as frame_support::traits::OnInitialize<u64>>::on_initialize(i);
}
+11 -25
View File
@@ -13,15 +13,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate as collator_selection;
use crate::{mock::*, Error, CandidateInfo};
use crate::{mock::*, CandidateInfo, Error};
use frame_support::{
assert_noop, assert_ok,
traits::{OnInitialize, Currency, GenesisBuild},
traits::{Currency, GenesisBuild, OnInitialize},
};
use sp_runtime::traits::BadOrigin;
use pallet_balances::Error as BalancesError;
use sp_runtime::traits::BadOrigin;
#[test]
fn basic_setup_works() {
@@ -59,7 +58,10 @@ fn set_desired_candidates_works() {
assert_eq!(CollatorSelection::desired_candidates(), 2);
// can set
assert_ok!(CollatorSelection::set_desired_candidates(Origin::signed(RootAccount::get()), 7));
assert_ok!(CollatorSelection::set_desired_candidates(
Origin::signed(RootAccount::get()),
7
));
assert_eq!(CollatorSelection::desired_candidates(), 7);
// rejects bad origin
@@ -239,11 +241,7 @@ fn authorship_event_handler() {
// triggers `note_author`
Authorship::on_initialize(1);
let collator = CandidateInfo {
who: 4,
deposit: 10,
};
let collator = CandidateInfo { who: 4, deposit: 10 };
assert_eq!(CollatorSelection::candidates(), vec![collator]);
assert_eq!(CollatorSelection::last_authored_block(4), 0);
@@ -268,11 +266,7 @@ fn fees_edgecases() {
// triggers `note_author`
Authorship::on_initialize(1);
let collator = CandidateInfo {
who: 4,
deposit: 10,
};
let collator = CandidateInfo { who: 4, deposit: 10 };
assert_eq!(CollatorSelection::candidates(), vec![collator]);
assert_eq!(CollatorSelection::last_authored_block(4), 0);
@@ -334,10 +328,7 @@ fn kick_mechanism() {
assert_eq!(CollatorSelection::candidates().len(), 1);
// 3 will be kicked after 1 session delay
assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]);
let collator = CandidateInfo {
who: 4,
deposit: 10,
};
let collator = CandidateInfo { who: 4, deposit: 10 };
assert_eq!(CollatorSelection::candidates(), vec![collator]);
assert_eq!(CollatorSelection::last_authored_block(4), 20);
initialize_to_block(30);
@@ -362,10 +353,7 @@ fn should_not_kick_mechanism_too_few() {
assert_eq!(CollatorSelection::candidates().len(), 1);
// 3 will be kicked after 1 session delay
assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 5]);
let collator = CandidateInfo {
who: 5,
deposit: 10,
};
let collator = CandidateInfo { who: 5, deposit: 10 };
assert_eq!(CollatorSelection::candidates(), vec![collator]);
assert_eq!(CollatorSelection::last_authored_block(4), 20);
initialize_to_block(30);
@@ -376,7 +364,6 @@ fn should_not_kick_mechanism_too_few() {
});
}
#[test]
#[should_panic = "duplicate invulnerables in genesis."]
fn cannot_set_genesis_value_twice() {
@@ -391,5 +378,4 @@ fn cannot_set_genesis_value_twice() {
};
// collator selection must be initialized before session.
collator_selection.assimilate_storage(&mut t).unwrap();
}
@@ -18,7 +18,10 @@
#![allow(unused_parens)]
#![allow(unused_imports)]
use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
use frame_support::{
traits::Get,
weights::{constants::RocksDbWeight, Weight},
};
use sp_std::marker::PhantomData;
// The weight info trait for `pallet_collator_selection`.
@@ -35,28 +38,26 @@ pub trait WeightInfo {
/// Weights for pallet_collator_selection using the Substrate node and recommended hardware.
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
fn set_invulnerables(b: u32, ) -> Weight {
fn set_invulnerables(b: u32) -> Weight {
(18_563_000 as Weight)
// Standard Error: 0
.saturating_add((68_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_desired_candidates() -> Weight {
(16_363_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(16_363_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_candidacy_bond() -> Weight {
(16_840_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(16_840_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn register_as_candidate(c: u32, ) -> Weight {
fn register_as_candidate(c: u32) -> Weight {
(71_196_000 as Weight)
// Standard Error: 0
.saturating_add((198_000 as Weight).saturating_mul(c as Weight))
.saturating_add(T::DbWeight::get().reads(4 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn leave_intent(c: u32, ) -> Weight {
fn leave_intent(c: u32) -> Weight {
(55_336_000 as Weight)
// Standard Error: 0
.saturating_add((151_000 as Weight).saturating_mul(c as Weight))
@@ -68,7 +69,7 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(4 as Weight))
}
fn new_session(r: u32, c: u32, ) -> Weight {
fn new_session(r: u32, c: u32) -> Weight {
(0 as Weight)
// Standard Error: 1_010_000
.saturating_add((109_961_000 as Weight).saturating_mul(r as Weight))
@@ -81,31 +82,28 @@ impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
}
}
// For backwards compatibility and tests
impl WeightInfo for () {
fn set_invulnerables(b: u32, ) -> Weight {
fn set_invulnerables(b: u32) -> Weight {
(18_563_000 as Weight)
// Standard Error: 0
.saturating_add((68_000 as Weight).saturating_mul(b as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
}
fn set_desired_candidates() -> Weight {
(16_363_000 as Weight)
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
(16_363_000 as Weight).saturating_add(RocksDbWeight::get().writes(1 as Weight))
}
fn set_candidacy_bond() -> Weight {
(16_840_000 as Weight)
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
(16_840_000 as Weight).saturating_add(RocksDbWeight::get().writes(1 as Weight))
}
fn register_as_candidate(c: u32, ) -> Weight {
fn register_as_candidate(c: u32) -> Weight {
(71_196_000 as Weight)
// Standard Error: 0
.saturating_add((198_000 as Weight).saturating_mul(c as Weight))
.saturating_add(RocksDbWeight::get().reads(4 as Weight))
.saturating_add(RocksDbWeight::get().writes(2 as Weight))
}
fn leave_intent(c: u32, ) -> Weight {
fn leave_intent(c: u32) -> Weight {
(55_336_000 as Weight)
// Standard Error: 0
.saturating_add((151_000 as Weight).saturating_mul(c as Weight))
@@ -117,7 +115,7 @@ impl WeightInfo for () {
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(4 as Weight))
}
fn new_session(r: u32, c: u32, ) -> Weight {
fn new_session(r: u32, c: u32) -> Weight {
(0 as Weight)
// Standard Error: 1_010_000
.saturating_add((109_961_000 as Weight).saturating_mul(r as Weight))
+148 -157
View File
@@ -21,15 +21,16 @@
#![cfg_attr(not(feature = "std"), no_std)]
use scale_info::TypeInfo;
use sp_std::{prelude::*, convert::TryFrom};
use cumulus_primitives_core::relay_chain::BlockNumber as RelayBlockNumber;
use cumulus_primitives_core::DmpMessageHandler;
use codec::{Encode, Decode};
use sp_runtime::RuntimeDebug;
use xcm::{VersionedXcm, latest::prelude::*};
use frame_support::{traits::EnsureOrigin, dispatch::Weight, weights::constants::WEIGHT_PER_MILLIS};
use codec::{Decode, Encode};
use cumulus_primitives_core::{relay_chain::BlockNumber as RelayBlockNumber, DmpMessageHandler};
use frame_support::{
dispatch::Weight, traits::EnsureOrigin, weights::constants::WEIGHT_PER_MILLIS,
};
pub use pallet::*;
use scale_info::TypeInfo;
use sp_runtime::RuntimeDebug;
use sp_std::{convert::TryFrom, prelude::*};
use xcm::{latest::prelude::*, VersionedXcm};
#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)]
pub struct ConfigData {
@@ -42,7 +43,7 @@ pub struct ConfigData {
impl Default for ConfigData {
fn default() -> Self {
Self {
max_individual: 10 * WEIGHT_PER_MILLIS, // 10 ms of execution time maximum by default
max_individual: 10 * WEIGHT_PER_MILLIS, // 10 ms of execution time maximum by default
}
}
}
@@ -100,23 +101,13 @@ pub mod pallet {
/// The queue pages.
#[pallet::storage]
pub(super) type Pages<T> = StorageMap<
_,
Blake2_128Concat,
PageCounter,
Vec<(RelayBlockNumber, Vec<u8>)>,
ValueQuery,
>;
pub(super) type Pages<T> =
StorageMap<_, Blake2_128Concat, PageCounter, Vec<(RelayBlockNumber, Vec<u8>)>, ValueQuery>;
/// The overweight messages.
#[pallet::storage]
pub(super) type Overweight<T> = StorageMap<
_,
Blake2_128Concat,
OverweightIndex,
(RelayBlockNumber, Vec<u8>),
OptionQuery,
>;
pub(super) type Overweight<T> =
StorageMap<_, Blake2_128Concat, OverweightIndex, (RelayBlockNumber, Vec<u8>), OptionQuery>;
#[pallet::error]
pub enum Error<T> {
@@ -203,13 +194,14 @@ pub mod pallet {
while page_index.begin_used < page_index.end_used {
let page = Pages::<T>::take(page_index.begin_used);
for (i, &(sent_at, ref data)) in page.iter().enumerate() {
match Self::try_service_message(limit.saturating_sub(used), sent_at, &data[..]) {
match Self::try_service_message(limit.saturating_sub(used), sent_at, &data[..])
{
Ok(w) => used += w,
Err(..) => {
// Too much weight needed - put the remaining messages back and bail
Pages::<T>::insert(page_index.begin_used, &page[i..]);
return used;
}
return used
},
}
}
page_index.begin_used += 1;
@@ -233,8 +225,8 @@ pub mod pallet {
data: &[u8],
) -> Result<Weight, (MessageId, Weight)> {
let id = sp_io::hashing::blake2_256(&data[..]);
let maybe_msg = VersionedXcm::<T::Call>::decode(&mut &data[..])
.map(Xcm::<T::Call>::try_from);
let maybe_msg =
VersionedXcm::<T::Call>::decode(&mut &data[..]).map(Xcm::<T::Call>::try_from);
match maybe_msg {
Err(_) => {
Self::deposit_event(Event::InvalidFormat(id));
@@ -247,14 +239,15 @@ pub mod pallet {
Ok(Ok(x)) => {
let outcome = T::XcmExecutor::execute_xcm(Parent.into(), x, limit);
match outcome {
Outcome::Error(XcmError::WeightLimitReached(required)) => Err((id, required)),
Outcome::Error(XcmError::WeightLimitReached(required)) =>
Err((id, required)),
outcome => {
let weight_used = outcome.weight_used();
Self::deposit_event(Event::ExecutedDownward(id, outcome));
Ok(weight_used)
}
},
}
}
},
}
}
}
@@ -264,7 +257,7 @@ pub mod pallet {
/// the `Parent` location.
impl<T: Config> DmpMessageHandler for Pallet<T> {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
iter: impl Iterator<Item = (RelayBlockNumber, Vec<u8>)>,
limit: Weight,
) -> Weight {
let mut page_index = PageIndex::<T>::get();
@@ -290,7 +283,8 @@ pub mod pallet {
match Self::try_service_message(remaining, sent_at, &data[..]) {
Ok(consumed) => used += consumed,
Err((id, required)) =>
// Too much weight required right now.
// Too much weight required right now.
{
if required > config.max_individual {
// overweight - add to overweight queue and continue with
// message execution.
@@ -300,14 +294,17 @@ pub mod pallet {
page_index.overweight_count += 1;
// Not needed for control flow, but only to ensure that the compiler
// understands that we won't attempt to re-use `data` later.
continue;
continue
} else {
// not overweight. stop executing inline and enqueue normally
// from here on.
let item_count_left = item_count.saturating_sub(i);
maybe_enqueue_page = Some(Vec::with_capacity(item_count_left));
Self::deposit_event(Event::WeightExhausted(id, remaining, required));
Self::deposit_event(Event::WeightExhausted(
id, remaining, required,
));
}
}
}
}
// Cannot be an `else` here since the `maybe_enqueue_page` may have changed.
@@ -333,14 +330,17 @@ mod tests {
use super::*;
use crate as dmp_queue;
use std::cell::RefCell;
use codec::Encode;
use cumulus_primitives_core::ParaId;
use frame_support::{parameter_types, assert_noop, traits::OnIdle};
use frame_support::{assert_noop, parameter_types, traits::OnIdle};
use sp_core::H256;
use sp_runtime::{testing::Header, traits::{IdentityLookup, BlakeTwo256}};
use sp_runtime::DispatchError::BadOrigin;
use sp_runtime::{
testing::Header,
traits::{BlakeTwo256, IdentityLookup},
DispatchError::BadOrigin,
};
use sp_version::RuntimeVersion;
use std::cell::RefCell;
use xcm::latest::{MultiLocation, OriginKind};
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>;
@@ -451,9 +451,12 @@ mod tests {
fn enqueue(enqueued: &[Xcm]) {
if !enqueued.is_empty() {
let mut index = PageIndex::<Test>::get();
Pages::<Test>::insert(index.end_used, enqueued.iter()
.map(|m| (0, VersionedXcm::<Call>::from(m.clone()).encode()))
.collect::<Vec<_>>()
Pages::<Test>::insert(
index.end_used,
enqueued
.iter()
.map(|m| (0, VersionedXcm::<Call>::from(m.clone()).encode()))
.collect::<Vec<_>>(),
);
index.end_used += 1;
PageIndex::<Test>::put(index);
@@ -508,7 +511,7 @@ mod tests {
#[test]
fn service_inline_complete_works() {
new_test_ext().execute_with(|| {
let incoming = vec![ msg(1000), msg(1001) ];
let incoming = vec![msg(1000), msg(1001)];
let weight_used = handle_messages(&incoming, 2500);
assert_eq!(weight_used, 2001);
assert_eq!(take_trace(), vec![msg_complete(1000), msg_complete(1001)]);
@@ -519,41 +522,40 @@ mod tests {
#[test]
fn service_enqueued_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001), msg(1002) ];
let enqueued = vec![msg(1000), msg(1001), msg(1002)];
enqueue(&enqueued);
let weight_used = handle_messages(&[], 2500);
assert_eq!(weight_used, 2001);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_limit_reached(1002),
]);
assert_eq!(
take_trace(),
vec![msg_complete(1000), msg_complete(1001), msg_limit_reached(1002),]
);
});
}
#[test]
fn enqueue_works() {
new_test_ext().execute_with(|| {
let incoming = vec![ msg(1000), msg(1001), msg(1002) ];
let incoming = vec![msg(1000), msg(1001), msg(1002)];
let weight_used = handle_messages(&incoming, 999);
assert_eq!(weight_used, 0);
assert_eq!(PageIndex::<Test>::get(), PageIndexData { begin_used: 0, end_used: 1, overweight_count: 0});
assert_eq!(
PageIndex::<Test>::get(),
PageIndexData { begin_used: 0, end_used: 1, overweight_count: 0 }
);
assert_eq!(Pages::<Test>::get(0).len(), 3);
assert_eq!(take_trace(), vec![ msg_limit_reached(1000) ]);
assert_eq!(take_trace(), vec![msg_limit_reached(1000)]);
let weight_used = handle_messages(&[], 2500);
assert_eq!(weight_used, 2001);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_limit_reached(1002),
]);
assert_eq!(
take_trace(),
vec![msg_complete(1000), msg_complete(1001), msg_limit_reached(1002),]
);
let weight_used = handle_messages(&[], 2500);
assert_eq!(weight_used, 1002);
assert_eq!(take_trace(), vec![
msg_complete(1002),
]);
assert_eq!(take_trace(), vec![msg_complete(1002),]);
assert!(queue_is_empty());
});
}
@@ -561,22 +563,16 @@ mod tests {
#[test]
fn service_inline_then_enqueue_works() {
new_test_ext().execute_with(|| {
let incoming = vec![ msg(1000), msg(1001), msg(1002) ];
let incoming = vec![msg(1000), msg(1001), msg(1002)];
let weight_used = handle_messages(&incoming, 1500);
assert_eq!(weight_used, 1000);
assert_eq!(pages_queued(), 1);
assert_eq!(Pages::<Test>::get(0).len(), 2);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_limit_reached(1001),
]);
assert_eq!(take_trace(), vec![msg_complete(1000), msg_limit_reached(1001),]);
let weight_used = handle_messages(&[], 2500);
assert_eq!(weight_used, 2003);
assert_eq!(take_trace(), vec![
msg_complete(1001),
msg_complete(1002),
]);
assert_eq!(take_trace(), vec![msg_complete(1001), msg_complete(1002),]);
assert!(queue_is_empty());
});
}
@@ -584,17 +580,20 @@ mod tests {
#[test]
fn service_enqueued_and_inline_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001) ];
let incoming = vec![ msg(1002), msg(1003) ];
let enqueued = vec![msg(1000), msg(1001)];
let incoming = vec![msg(1002), msg(1003)];
enqueue(&enqueued);
let weight_used = handle_messages(&incoming, 5000);
assert_eq!(weight_used, 4006);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_complete(1002),
msg_complete(1003),
]);
assert_eq!(
take_trace(),
vec![
msg_complete(1000),
msg_complete(1001),
msg_complete(1002),
msg_complete(1003),
]
);
assert!(queue_is_empty());
});
}
@@ -602,32 +601,26 @@ mod tests {
#[test]
fn service_enqueued_partially_and_then_enqueue_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(10001) ];
let incoming = vec![ msg(1002), msg(1003) ];
let enqueued = vec![msg(1000), msg(10001)];
let incoming = vec![msg(1002), msg(1003)];
enqueue(&enqueued);
let weight_used = handle_messages(&incoming, 5000);
assert_eq!(weight_used, 1000);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_limit_reached(10001),
]);
assert_eq!(take_trace(), vec![msg_complete(1000), msg_limit_reached(10001),]);
assert_eq!(pages_queued(), 2);
// 5000 is not enough to process the 10001 blocker, so nothing happens.
let weight_used = handle_messages(&[], 5000);
assert_eq!(weight_used, 0);
assert_eq!(take_trace(), vec![
msg_limit_reached(10001),
]);
assert_eq!(take_trace(), vec![msg_limit_reached(10001),]);
// 20000 is now enough to process everything.
let weight_used = handle_messages(&[], 20000);
assert_eq!(weight_used, 12006);
assert_eq!(take_trace(), vec![
msg_complete(10001),
msg_complete(1002),
msg_complete(1003),
]);
assert_eq!(
take_trace(),
vec![msg_complete(10001), msg_complete(1002), msg_complete(1003),]
);
assert!(queue_is_empty());
});
}
@@ -635,25 +628,21 @@ mod tests {
#[test]
fn service_enqueued_completely_and_then_enqueue_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001) ];
let incoming = vec![ msg(10002), msg(1003) ];
let enqueued = vec![msg(1000), msg(1001)];
let incoming = vec![msg(10002), msg(1003)];
enqueue(&enqueued);
let weight_used = handle_messages(&incoming, 5000);
assert_eq!(weight_used, 2001);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_limit_reached(10002),
]);
assert_eq!(
take_trace(),
vec![msg_complete(1000), msg_complete(1001), msg_limit_reached(10002),]
);
assert_eq!(pages_queued(), 1);
// 20000 is now enough to process everything.
let weight_used = handle_messages(&[], 20000);
assert_eq!(weight_used, 11005);
assert_eq!(take_trace(), vec![
msg_complete(10002),
msg_complete(1003),
]);
assert_eq!(take_trace(), vec![msg_complete(10002), msg_complete(1003),]);
assert!(queue_is_empty());
});
}
@@ -661,25 +650,26 @@ mod tests {
#[test]
fn service_enqueued_then_inline_then_enqueue_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001) ];
let incoming = vec![ msg(1002), msg(10003) ];
let enqueued = vec![msg(1000), msg(1001)];
let incoming = vec![msg(1002), msg(10003)];
enqueue(&enqueued);
let weight_used = handle_messages(&incoming, 5000);
assert_eq!(weight_used, 3003);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_complete(1002),
msg_limit_reached(10003),
]);
assert_eq!(
take_trace(),
vec![
msg_complete(1000),
msg_complete(1001),
msg_complete(1002),
msg_limit_reached(10003),
]
);
assert_eq!(pages_queued(), 1);
// 20000 is now enough to process everything.
let weight_used = handle_messages(&[], 20000);
assert_eq!(weight_used, 10003);
assert_eq!(take_trace(), vec![
msg_complete(10003),
]);
assert_eq!(take_trace(), vec![msg_complete(10003),]);
assert!(queue_is_empty());
});
}
@@ -687,32 +677,23 @@ mod tests {
#[test]
fn page_crawling_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001) ];
let enqueued = vec![msg(1000), msg(1001)];
enqueue(&enqueued);
let weight_used = handle_messages(&vec![ msg(1002) ], 1500);
let weight_used = handle_messages(&vec![msg(1002)], 1500);
assert_eq!(weight_used, 1000);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_limit_reached(1001),
]);
assert_eq!(take_trace(), vec![msg_complete(1000), msg_limit_reached(1001),]);
assert_eq!(pages_queued(), 2);
assert_eq!(PageIndex::<Test>::get().begin_used, 0);
let weight_used = handle_messages(&vec![ msg(1003) ], 1500);
let weight_used = handle_messages(&vec![msg(1003)], 1500);
assert_eq!(weight_used, 1001);
assert_eq!(take_trace(), vec![
msg_complete(1001),
msg_limit_reached(1002),
]);
assert_eq!(take_trace(), vec![msg_complete(1001), msg_limit_reached(1002),]);
assert_eq!(pages_queued(), 2);
assert_eq!(PageIndex::<Test>::get().begin_used, 1);
let weight_used = handle_messages(&vec![ msg(1004) ], 1500);
let weight_used = handle_messages(&vec![msg(1004)], 1500);
assert_eq!(weight_used, 1002);
assert_eq!(take_trace(), vec![
msg_complete(1002),
msg_limit_reached(1003),
]);
assert_eq!(take_trace(), vec![msg_complete(1002), msg_limit_reached(1003),]);
assert_eq!(pages_queued(), 2);
assert_eq!(PageIndex::<Test>::get().begin_used, 2);
});
@@ -724,15 +705,14 @@ mod tests {
// Set the overweight threshold to 9999.
Configuration::<Test>::put(ConfigData { max_individual: 9999 });
let incoming = vec![ msg(1000), msg(10001), msg(1002) ];
let incoming = vec![msg(1000), msg(10001), msg(1002)];
let weight_used = handle_messages(&incoming, 2500);
assert_eq!(weight_used, 2002);
assert!(queue_is_empty());
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_limit_reached(10001),
msg_complete(1002),
]);
assert_eq!(
take_trace(),
vec![msg_complete(1000), msg_limit_reached(10001), msg_complete(1002),]
);
assert_eq!(overweights(), vec![0]);
});
@@ -744,49 +724,60 @@ mod tests {
// Set the overweight threshold to 9999.
Configuration::<Test>::put(ConfigData { max_individual: 9999 });
let incoming = vec![ msg(10000) ];
let incoming = vec![msg(10000)];
let weight_used = handle_messages(&incoming, 2500);
assert_eq!(weight_used, 0);
assert_eq!(take_trace(), vec![ msg_limit_reached(10000) ]);
assert_eq!(take_trace(), vec![msg_limit_reached(10000)]);
assert_eq!(overweights(), vec![0]);
assert_noop!(DmpQueue::service_overweight(Origin::signed(1), 0, 20000), BadOrigin);
assert_noop!(DmpQueue::service_overweight(Origin::root(), 1, 20000), Error::<Test>::Unknown);
assert_noop!(DmpQueue::service_overweight(Origin::root(), 0, 9999), Error::<Test>::OverLimit);
assert_eq!(take_trace(), vec![ msg_limit_reached(10000) ]);
assert_noop!(
DmpQueue::service_overweight(Origin::root(), 1, 20000),
Error::<Test>::Unknown
);
assert_noop!(
DmpQueue::service_overweight(Origin::root(), 0, 9999),
Error::<Test>::OverLimit
);
assert_eq!(take_trace(), vec![msg_limit_reached(10000)]);
let base_weight = super::Call::<Test>::service_overweight {
index: 0,
weight_limit: 0,
}.get_dispatch_info().weight;
let base_weight = super::Call::<Test>::service_overweight { index: 0, weight_limit: 0 }
.get_dispatch_info()
.weight;
use frame_support::weights::GetDispatchInfo;
let info = DmpQueue::service_overweight(Origin::root(), 0, 20000).unwrap();
let actual_weight = info.actual_weight.unwrap();
assert_eq!(actual_weight, base_weight + 10000);
assert_eq!(take_trace(), vec![ msg_complete(10000) ]);
assert_eq!(take_trace(), vec![msg_complete(10000)]);
assert!(overweights().is_empty());
assert_noop!(DmpQueue::service_overweight(Origin::root(), 0, 20000), Error::<Test>::Unknown);
assert_noop!(
DmpQueue::service_overweight(Origin::root(), 0, 20000),
Error::<Test>::Unknown
);
});
}
#[test]
fn on_idle_should_service_queue() {
new_test_ext().execute_with(|| {
enqueue(&vec![ msg(1000), msg(1001) ]);
enqueue(&vec![ msg(1002), msg(1003) ]);
enqueue(&vec![ msg(1004), msg(1005) ]);
enqueue(&vec![msg(1000), msg(1001)]);
enqueue(&vec![msg(1002), msg(1003)]);
enqueue(&vec![msg(1004), msg(1005)]);
let weight_used = DmpQueue::on_idle(1, 6000);
assert_eq!(weight_used, 5010);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_complete(1002),
msg_complete(1003),
msg_complete(1004),
msg_limit_reached(1005),
]);
assert_eq!(
take_trace(),
vec![
msg_complete(1000),
msg_complete(1001),
msg_complete(1002),
msg_complete(1003),
msg_complete(1004),
msg_limit_reached(1005),
]
);
assert_eq!(pages_queued(), 1);
});
}
@@ -69,13 +69,13 @@ impl Parse for Input {
} else if lookahead.peek(keywords::CheckInherents) {
parse_inner::<keywords::CheckInherents>(input, &mut check_inherents)?;
} else {
return Err(lookahead.error());
return Err(lookahead.error())
}
}
let rest = input.parse::<TokenStream>()?;
if !rest.is_empty() {
return Err(Error::new(rest.span(), "Unexpected input data"));
return Err(Error::new(rest.span(), "Unexpected input data"))
}
Ok(Self {
@@ -88,10 +88,8 @@ impl Parse for Input {
fn crate_() -> Result<Ident, Error> {
match crate_name("cumulus-pallet-parachain-system") {
Ok(FoundCrate::Itself) => Ok(syn::Ident::new(
"cumulus_pallet_parachain_system",
Span::call_site(),
)),
Ok(FoundCrate::Itself) =>
Ok(syn::Ident::new("cumulus_pallet_parachain_system", Span::call_site())),
Ok(FoundCrate::Name(name)) => Ok(Ident::new(&name, Span::call_site())),
Err(e) => Err(Error::new(Span::call_site(), e)),
}
@@ -99,11 +97,7 @@ fn crate_() -> Result<Ident, Error> {
#[proc_macro]
pub fn register_validate_block(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let Input {
runtime,
check_inherents,
block_executor,
} = match syn::parse(input) {
let Input { runtime, check_inherents, block_executor } = match syn::parse(input) {
Ok(t) => t,
Err(e) => return e.into_compile_error().into(),
};
+37 -59
View File
@@ -144,8 +144,8 @@ pub mod pallet {
false,
"host configuration is promised to set until `on_finalize`; qed",
);
return;
}
return
},
};
let relevant_messaging_state = match Self::relevant_messaging_state() {
Some(ok) => ok,
@@ -155,8 +155,8 @@ pub mod pallet {
"relevant messaging state is promised to be set until `on_finalize`; \
qed",
);
return;
}
return
},
};
<PendingUpwardMessages<T>>::mutate(|up| {
@@ -172,19 +172,16 @@ pub mod pallet {
// available_capacity and available_size.
let num = up
.iter()
.scan(
(available_capacity as usize, available_size as usize),
|state, msg| {
let (cap_left, size_left) = *state;
match (cap_left.checked_sub(1), size_left.checked_sub(msg.len())) {
(Some(new_cap), Some(new_size)) => {
*state = (new_cap, new_size);
Some(())
}
_ => None,
}
},
)
.scan((available_capacity as usize, available_size as usize), |state, msg| {
let (cap_left, size_left) = *state;
match (cap_left.checked_sub(1), size_left.checked_sub(msg.len())) {
(Some(new_cap), Some(new_size)) => {
*state = (new_cap, new_size);
Some(())
},
_ => None,
}
})
.count();
// TODO: #274 Return back messages that do not longer fit into the queue.
@@ -366,10 +363,7 @@ pub mod pallet {
vfp.relay_parent_number,
);
Ok(PostDispatchInfo {
actual_weight: Some(total_weight),
pays_fee: Pays::No,
})
Ok(PostDispatchInfo { actual_weight: Some(total_weight), pays_fee: Pays::No })
}
#[pallet::weight((1_000, DispatchClass::Operational))]
@@ -572,11 +566,10 @@ pub mod pallet {
cumulus_primitives_parachain_inherent::INHERENT_IDENTIFIER;
fn create_inherent(data: &InherentData) -> Option<Self::Call> {
let data: ParachainInherentData = data
.get_data(&Self::INHERENT_IDENTIFIER)
.ok()
.flatten()
.expect("validation function params are always injected into inherent data; qed");
let data: ParachainInherentData =
data.get_data(&Self::INHERENT_IDENTIFIER).ok().flatten().expect(
"validation function params are always injected into inherent data; qed",
);
Some(Call::set_validation_data { data })
}
@@ -611,11 +604,11 @@ pub mod pallet {
provides: vec![hash.as_ref().to_vec()],
longevity: TransactionLongevity::max_value(),
propagate: true,
});
})
}
}
if let Call::set_validation_data { .. } = call {
return Ok(Default::default());
return Ok(Default::default())
}
Err(InvalidTransaction::Call.into())
}
@@ -650,8 +643,8 @@ impl<T: Config> GetChannelInfo for Pallet<T> {
let channels = match Self::relevant_messaging_state() {
None => {
log::warn!("calling `get_channel_status` with no RelevantMessagingState?!");
return ChannelStatus::Closed;
}
return ChannelStatus::Closed
},
Some(d) => d.egress_channels,
};
// ^^^ NOTE: This storage field should carry over from the previous block. So if it's
@@ -667,7 +660,7 @@ impl<T: Config> GetChannelInfo for Pallet<T> {
let meta = &channels[index].1;
if meta.msg_count + 1 > meta.max_capacity {
// The channel is at its capacity. Skip it for now.
return ChannelStatus::Full;
return ChannelStatus::Full
}
let max_size_now = meta.max_total_size - meta.total_size;
let max_size_ever = meta.max_message_size;
@@ -771,9 +764,7 @@ impl<T: Config> Pallet<T> {
// A violation of the assertion below indicates that one of the messages submitted
// by the collator was sent from a sender that doesn't have a channel opened to
// this parachain, according to the relay-parent state.
assert!(ingress_channels
.binary_search_by_key(sender, |&(s, _)| s)
.is_ok(),);
assert!(ingress_channels.binary_search_by_key(sender, |&(s, _)| s).is_ok(),);
}
// Second, prepare horizontal messages for a more convenient processing:
@@ -787,9 +778,7 @@ impl<T: Config> Pallet<T> {
let mut horizontal_messages = horizontal_messages
.into_iter()
.flat_map(|(sender, channel_contents)| {
channel_contents
.into_iter()
.map(move |message| (sender, message))
channel_contents.into_iter().map(move |message| (sender, message))
})
.collect::<Vec<_>>();
horizontal_messages.sort_by(|a, b| {
@@ -806,10 +795,7 @@ impl<T: Config> Pallet<T> {
{
for (sender, ref horizontal_message) in &horizontal_messages {
if hrmp_watermark
.map(|w| w < horizontal_message.sent_at)
.unwrap_or(true)
{
if hrmp_watermark.map(|w| w < horizontal_message.sent_at).unwrap_or(true) {
hrmp_watermark = Some(horizontal_message.sent_at);
}
@@ -882,16 +868,15 @@ impl<T: Config> Pallet<T> {
) -> Option<relay_chain::BlockNumber> {
if <PendingRelayChainBlockNumber<T>>::get().is_some() {
// There is already upgrade scheduled. Upgrade is not allowed.
return None;
return None
}
let relay_blocks_since_last_upgrade = vfp
.relay_parent_number
.saturating_sub(<LastUpgrade<T>>::get());
let relay_blocks_since_last_upgrade =
vfp.relay_parent_number.saturating_sub(<LastUpgrade<T>>::get());
if relay_blocks_since_last_upgrade <= cfg.validation_upgrade_frequency {
// The cooldown after the last upgrade hasn't elapsed yet. Upgrade is not allowed.
return None;
return None
}
Some(vfp.relay_parent_number + cfg.validation_upgrade_delay)
@@ -899,16 +884,10 @@ impl<T: Config> Pallet<T> {
/// The implementation of the runtime upgrade functionality for parachains.
fn set_code_impl(validation_function: Vec<u8>) -> DispatchResult {
ensure!(
!<PendingValidationCode<T>>::exists(),
Error::<T>::OverlappingUpgrades
);
ensure!(!<PendingValidationCode<T>>::exists(), Error::<T>::OverlappingUpgrades);
let vfp = Self::validation_data().ok_or(Error::<T>::ValidationDataNotAvailable)?;
let cfg = Self::host_configuration().ok_or(Error::<T>::HostConfigurationNotAvailable)?;
ensure!(
validation_function.len() <= cfg.max_code_size as usize,
Error::<T>::TooBig
);
ensure!(validation_function.len() <= cfg.max_code_size as usize, Error::<T>::TooBig);
let apply_block =
Self::code_upgrade_allowed(&vfp, &cfg).ok_or(Error::<T>::ProhibitedByPolkadot)?;
@@ -1002,11 +981,10 @@ impl<T: Config> Pallet<T> {
//
// However, changing this setting is expected to be rare.
match Self::host_configuration() {
Some(cfg) => {
Some(cfg) =>
if message.len() > cfg.max_upward_message_size as usize {
return Err(MessageSendError::TooBig);
}
}
return Err(MessageSendError::TooBig)
},
None => {
// This storage field should carry over from the previous block. So if it's None
// then it must be that this is an edge-case where a message is attempted to be
@@ -1017,7 +995,7 @@ impl<T: Config> Pallet<T> {
// returned back to the sender.
//
// Thus fall through here.
}
},
};
<PendingUpwardMessages<T>>::append(message);
Ok(0)
@@ -19,11 +19,10 @@ use cumulus_primitives_core::{
relay_chain, AbridgedHostConfiguration, AbridgedHrmpChannel, ParaId,
};
use scale_info::TypeInfo;
use sp_trie::{MemoryDB, HashDBT, EMPTY_PREFIX};
use sp_runtime::traits::HashFor;
use sp_state_machine::{Backend, TrieBackend};
use sp_std::vec::Vec;
use sp_trie::StorageProof;
use sp_trie::{HashDBT, MemoryDB, StorageProof, EMPTY_PREFIX};
/// A snapshot of some messaging related state of relay chain pertaining to the current parachain.
///
@@ -130,14 +129,11 @@ impl RelayChainStateProof {
) -> Result<Self, Error> {
let db = proof.into_memory_db::<HashFor<relay_chain::Block>>();
if !db.contains(&relay_parent_storage_root, EMPTY_PREFIX) {
return Err(Error::RootMismatch);
return Err(Error::RootMismatch)
}
let trie_backend = TrieBackend::new(db, relay_parent_storage_root);
Ok(Self {
para_id,
trie_backend,
})
Ok(Self { para_id, trie_backend })
}
/// Read the [`MessagingStateSnapshot`] from the relay chain state proof.
@@ -174,10 +170,7 @@ impl RelayChainStateProof {
let mut ingress_channels = Vec::with_capacity(ingress_channel_index.len());
for sender in ingress_channel_index {
let channel_id = relay_chain::v1::HrmpChannelId {
sender,
recipient: self.para_id,
};
let channel_id = relay_chain::v1::HrmpChannelId { sender, recipient: self.para_id };
let hrmp_channel: AbridgedHrmpChannel = read_entry(
&self.trie_backend,
&relay_chain::well_known_keys::hrmp_channels(channel_id),
@@ -189,10 +182,7 @@ impl RelayChainStateProof {
let mut egress_channels = Vec::with_capacity(egress_channel_index.len());
for recipient in egress_channel_index {
let channel_id = relay_chain::v1::HrmpChannelId {
sender: self.para_id,
recipient,
};
let channel_id = relay_chain::v1::HrmpChannelId { sender: self.para_id, recipient };
let hrmp_channel: AbridgedHrmpChannel = read_entry(
&self.trie_backend,
&relay_chain::well_known_keys::hrmp_channels(channel_id),
@@ -226,6 +216,7 @@ impl RelayChainStateProof {
///
/// Returns an error if anything failed at reading or decoding.
pub fn read_slot(&self) -> Result<relay_chain::v1::Slot, Error> {
read_entry(&self.trie_backend, relay_chain::well_known_keys::CURRENT_SLOT, None).map_err(Error::Slot)
read_entry(&self.trie_backend, relay_chain::well_known_keys::CURRENT_SLOT, None)
.map_err(Error::Slot)
}
}
+53 -119
View File
@@ -17,17 +17,17 @@ use super::*;
use codec::Encode;
use cumulus_primitives_core::{
AbridgedHrmpChannel, InboundDownwardMessage, InboundHrmpMessage, PersistedValidationData,
relay_chain::BlockNumber as RelayBlockNumber,
relay_chain::BlockNumber as RelayBlockNumber, AbridgedHrmpChannel, InboundDownwardMessage,
InboundHrmpMessage, PersistedValidationData,
};
use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
use frame_support::{
assert_ok,
dispatch::UnfilteredDispatchable,
inherent::{InherentData, ProvideInherent},
parameter_types,
traits::{OnFinalize, OnInitialize},
weights::Weight,
inherent::{InherentData, ProvideInherent},
};
use frame_system::{InitKind, RawOrigin};
use hex_literal::hex;
@@ -113,10 +113,7 @@ std::thread_local! {
static SENT_MESSAGES: RefCell<Vec<(ParaId, Vec<u8>)>> = RefCell::new(Vec::new());
}
fn send_message(
dest: ParaId,
message: Vec<u8>,
) {
fn send_message(dest: ParaId, message: Vec<u8>) {
SENT_MESSAGES.with(|m| m.borrow_mut().push((dest, message)));
}
@@ -125,9 +122,9 @@ impl XcmpMessageSource for FromThreadLocal {
let mut ids = std::collections::BTreeSet::<ParaId>::new();
let mut taken = 0;
let mut result = Vec::new();
SENT_MESSAGES.with(|ms| ms.borrow_mut()
.retain(|m| {
let status = <Pallet::<Test> as GetChannelInfo>::get_channel_status(m.0);
SENT_MESSAGES.with(|ms| {
ms.borrow_mut().retain(|m| {
let status = <Pallet<Test> as GetChannelInfo>::get_channel_status(m.0);
let ready = matches!(status, ChannelStatus::Ready(..));
if ready && !ids.contains(&m.0) && taken < maximum_channels {
ids.insert(m.0);
@@ -138,14 +135,14 @@ impl XcmpMessageSource for FromThreadLocal {
true
}
})
);
});
result
}
}
impl DmpMessageHandler for SaveIntoThreadLocal {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
iter: impl Iterator<Item = (RelayBlockNumber, Vec<u8>)>,
_max_weight: Weight,
) -> Weight {
HANDLED_DMP_MESSAGES.with(|m| {
@@ -158,7 +155,7 @@ impl DmpMessageHandler for SaveIntoThreadLocal {
}
impl XcmpMessageHandler for SaveIntoThreadLocal {
fn handle_xcmp_messages<'a, I: Iterator<Item=(ParaId, RelayBlockNumber, &'a [u8])>>(
fn handle_xcmp_messages<'a, I: Iterator<Item = (ParaId, RelayBlockNumber, &'a [u8])>>(
iter: I,
_max_weight: Weight,
) -> Weight {
@@ -177,10 +174,7 @@ fn new_test_ext() -> sp_io::TestExternalities {
HANDLED_DMP_MESSAGES.with(|m| m.borrow_mut().clear());
HANDLED_XCMP_MESSAGES.with(|m| m.borrow_mut().clear());
frame_system::GenesisConfig::default()
.build_storage::<Test>()
.unwrap()
.into()
frame_system::GenesisConfig::default().build_storage::<Test>().unwrap().into()
}
struct ReadRuntimeVersion(Vec<u8>);
@@ -204,9 +198,9 @@ fn wasm_ext() -> sp_io::TestExternalities {
};
let mut ext = new_test_ext();
ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(
ReadRuntimeVersion(version.encode()),
));
ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(ReadRuntimeVersion(
version.encode(),
)));
ext
}
@@ -247,11 +241,7 @@ impl BlockTests {
where
F: 'static + Fn(),
{
self.add_raw(BlockTest {
n,
within_block: Box::new(within_block),
after_block: None,
})
self.add_raw(BlockTest { n, within_block: Box::new(within_block), after_block: None })
}
fn add_with_post_test<F1, F2>(
@@ -299,12 +289,7 @@ impl BlockTests {
fn run(&mut self) {
self.ran = true;
wasm_ext().execute_with(|| {
for BlockTest {
n,
within_block,
after_block,
} in self.tests.iter()
{
for BlockTest { n, within_block, after_block } in self.tests.iter() {
// clear pending updates, as applicable
if let Some(upgrade_block) = self.pending_upgrade {
if n >= &upgrade_block.into() {
@@ -313,12 +298,7 @@ impl BlockTests {
}
// begin initialization
System::initialize(
&n,
&Default::default(),
&Default::default(),
InitKind::Full,
);
System::initialize(&n, &Default::default(), &Default::default(), InitKind::Full);
// now mess with the storage the way validate_block does
let mut sproof_builder = RelayStateSproofBuilder::default();
@@ -398,9 +378,7 @@ impl Drop for BlockTests {
#[test]
#[should_panic]
fn block_tests_run_on_drop() {
BlockTests::new().add(123, || {
panic!("if this test passes, block tests run properly")
});
BlockTests::new().add(123, || panic!("if this test passes, block tests run properly"));
}
#[test]
@@ -412,10 +390,7 @@ fn events() {
.add_with_post_test(
123,
|| {
assert_ok!(System::set_code(
RawOrigin::Root.into(),
Default::default()
));
assert_ok!(System::set_code(RawOrigin::Root.into(), Default::default()));
},
|| {
let events = System::events();
@@ -445,10 +420,7 @@ fn non_overlapping() {
builder.host_config.validation_upgrade_delay = 1000;
})
.add(123, || {
assert_ok!(System::set_code(
RawOrigin::Root.into(),
Default::default()
));
assert_ok!(System::set_code(RawOrigin::Root.into(), Default::default()));
})
.add(234, || {
assert_eq!(
@@ -466,14 +438,8 @@ fn manipulates_storage() {
!<PendingValidationCode<Test>>::exists(),
"validation function must not exist yet"
);
assert_ok!(System::set_code(
RawOrigin::Root.into(),
Default::default()
));
assert!(
<PendingValidationCode<Test>>::exists(),
"validation function must now exist"
);
assert_ok!(System::set_code(RawOrigin::Root.into(), Default::default()));
assert!(<PendingValidationCode<Test>>::exists(), "validation function must now exist");
})
.add_with_post_test(
1234,
@@ -573,10 +539,7 @@ fn send_hrmp_message_buffer_channel_close() {
sproof.para_id = ParaId::from(200);
sproof.hrmp_egress_channel_index = Some(vec![ParaId::from(300), ParaId::from(400)]);
sproof.hrmp_channels.insert(
HrmpChannelId {
sender: ParaId::from(200),
recipient: ParaId::from(300),
},
HrmpChannelId { sender: ParaId::from(200), recipient: ParaId::from(300) },
AbridgedHrmpChannel {
max_capacity: 1,
msg_count: 1, // <- 1/1 means the channel is full
@@ -587,10 +550,7 @@ fn send_hrmp_message_buffer_channel_close() {
},
);
sproof.hrmp_channels.insert(
HrmpChannelId {
sender: ParaId::from(200),
recipient: ParaId::from(400),
},
HrmpChannelId { sender: ParaId::from(200), recipient: ParaId::from(400) },
AbridgedHrmpChannel {
max_capacity: 1,
msg_count: 1,
@@ -605,8 +565,8 @@ fn send_hrmp_message_buffer_channel_close() {
// Adjustment according to block
//
match relay_block_num {
1 => {}
2 => {}
1 => {},
2 => {},
3 => {
// The channel 200->400 ceases to exist at the relay chain block 3
sproof
@@ -628,21 +588,15 @@ fn send_hrmp_message_buffer_channel_close() {
})
.unwrap()
.msg_count = 0;
}
},
_ => unreachable!(),
}
})
.add_with_post_test(
1,
|| {
send_message(
ParaId::from(300),
b"1".to_vec(),
);
send_message(
ParaId::from(400),
b"2".to_vec(),
);
send_message(ParaId::from(300), b"1".to_vec());
send_message(ParaId::from(400), b"2".to_vec());
},
|| {},
)
@@ -662,10 +616,7 @@ fn send_hrmp_message_buffer_channel_close() {
let v = HrmpOutboundMessages::<Test>::get();
assert_eq!(
v,
vec![OutboundHrmpMessage {
recipient: ParaId::from(300),
data: b"1".to_vec(),
}]
vec![OutboundHrmpMessage { recipient: ParaId::from(300), data: b"1".to_vec() }]
);
},
);
@@ -682,27 +633,15 @@ fn message_queue_chain() {
// These cases are taken from https://github.com/paritytech/polkadot/pull/2351
assert_eq!(
MessageQueueChain::default()
.extend_downward(&InboundDownwardMessage {
sent_at: 2,
msg: vec![1, 2, 3],
})
.extend_downward(&InboundDownwardMessage {
sent_at: 3,
msg: vec![4, 5, 6],
})
.extend_downward(&InboundDownwardMessage { sent_at: 2, msg: vec![1, 2, 3] })
.extend_downward(&InboundDownwardMessage { sent_at: 3, msg: vec![4, 5, 6] })
.head(),
hex!["88dc00db8cc9d22aa62b87807705831f164387dfa49f80a8600ed1cbe1704b6b"].into(),
);
assert_eq!(
MessageQueueChain::default()
.extend_hrmp(&InboundHrmpMessage {
sent_at: 2,
data: vec![1, 2, 3],
})
.extend_hrmp(&InboundHrmpMessage {
sent_at: 3,
data: vec![4, 5, 6],
})
.extend_hrmp(&InboundHrmpMessage { sent_at: 2, data: vec![1, 2, 3] })
.extend_hrmp(&InboundHrmpMessage { sent_at: 3, data: vec![4, 5, 6] })
.head(),
hex!["88dc00db8cc9d22aa62b87807705831f164387dfa49f80a8600ed1cbe1704b6b"].into(),
);
@@ -722,13 +661,13 @@ fn receive_dmp() {
1 => {
sproof.dmq_mqc_head =
Some(MessageQueueChain::default().extend_downward(&MSG).head());
}
},
_ => unreachable!(),
})
.with_inherent_data(|_, relay_block_num, data| match relay_block_num {
1 => {
data.downward_messages.push(MSG.clone());
}
},
_ => unreachable!(),
})
.add(1, || {
@@ -771,7 +710,7 @@ fn receive_hrmp() {
// 300 - one new message
sproof.upsert_inbound_channel(ParaId::from(300)).mqc_head =
Some(MessageQueueChain::default().extend_hrmp(&MSG_1).head());
}
},
2 => {
// 200 - two new messages
// 300 - now present with one message.
@@ -784,20 +723,19 @@ fn receive_hrmp() {
.extend_hrmp(&MSG_3)
.head(),
);
}
},
3 => {
// 200 - no new messages
// 300 - is gone
sproof.upsert_inbound_channel(ParaId::from(200)).mqc_head =
Some(MessageQueueChain::default().extend_hrmp(&MSG_4).head());
}
},
_ => unreachable!(),
})
.with_inherent_data(|_, relay_block_num, data| match relay_block_num {
1 => {
data.horizontal_messages
.insert(ParaId::from(300), vec![MSG_1.clone()]);
}
data.horizontal_messages.insert(ParaId::from(300), vec![MSG_1.clone()]);
},
2 => {
data.horizontal_messages.insert(
ParaId::from(300),
@@ -809,10 +747,9 @@ fn receive_hrmp() {
MSG_3.clone(),
],
);
data.horizontal_messages
.insert(ParaId::from(200), vec![MSG_4.clone()]);
}
3 => {}
data.horizontal_messages.insert(ParaId::from(200), vec![MSG_4.clone()]);
},
3 => {},
_ => unreachable!(),
})
.add(1, || {
@@ -845,12 +782,12 @@ fn receive_hrmp_empty_channel() {
.with_relay_sproof_builder(|_, relay_block_num, sproof| match relay_block_num {
1 => {
// no channels
}
},
2 => {
// one new channel
sproof.upsert_inbound_channel(ParaId::from(300)).mqc_head =
Some(MessageQueueChain::default().head());
}
},
_ => unreachable!(),
})
.add(1, || {})
@@ -878,33 +815,30 @@ fn receive_hrmp_after_pause() {
1 => {
sproof.upsert_inbound_channel(ALICE).mqc_head =
Some(MessageQueueChain::default().extend_hrmp(&MSG_1).head());
}
},
2 => {
// 300 - no new messages, mqc stayed the same.
sproof.upsert_inbound_channel(ALICE).mqc_head =
Some(MessageQueueChain::default().extend_hrmp(&MSG_1).head());
}
},
3 => {
// 300 - new message.
sproof.upsert_inbound_channel(ALICE).mqc_head = Some(
MessageQueueChain::default()
.extend_hrmp(&MSG_1)
.extend_hrmp(&MSG_2)
.head(),
MessageQueueChain::default().extend_hrmp(&MSG_1).extend_hrmp(&MSG_2).head(),
);
}
},
_ => unreachable!(),
})
.with_inherent_data(|_, relay_block_num, data| match relay_block_num {
1 => {
data.horizontal_messages.insert(ALICE, vec![MSG_1.clone()]);
}
},
2 => {
// no new messages
}
},
3 => {
data.horizontal_messages.insert(ALICE, vec![MSG_2.clone()]);
}
},
_ => unreachable!(),
})
.add(1, || {
@@ -16,7 +16,7 @@
//! The actual implementation of the validate block functionality.
use frame_support::traits::{ExecuteBlock, ExtrinsicCall, IsSubType, Get};
use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType};
use sp_runtime::traits::{Block as BlockT, Extrinsic, HashFor, Header as HeaderT, NumberFor};
use sp_io::KillStorageResult;
@@ -64,10 +64,7 @@ where
let head_data = HeadData(header.encode());
let block = B::new(header, extrinsics);
assert!(
parent_head.hash() == *block.header().parent_hash(),
"Invalid parent hash",
);
assert!(parent_head.hash() == *block.header().parent_hash(), "Invalid parent hash",);
// Uncompress
let mut db = MemoryDB::default();
@@ -128,7 +125,8 @@ where
.iter()
.filter_map(|e| e.call().is_sub_type())
.find_map(|c| match c {
crate::Call::set_validation_data { data: validation_data } => Some(validation_data.clone()),
crate::Call::set_validation_data { data: validation_data } =>
Some(validation_data.clone()),
_ => None,
})
.expect("Could not find `set_validation_data` inherent");
@@ -196,7 +194,7 @@ fn host_storage_read(key: &[u8], value_out: &mut [u8], value_offset: u32) -> Opt
let written = sp_std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
Some(value.len() as u32)
}
},
None => None,
}
}
@@ -276,7 +274,7 @@ fn host_default_child_storage_read(
let written = sp_std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
Some(value.len() as u32)
}
},
None => None,
}
}
@@ -312,7 +310,11 @@ fn host_default_child_storage_exists(storage_key: &[u8], key: &[u8]) -> bool {
with_externalities(|ext| ext.exists_child_storage(&child_info, key))
}
fn host_default_child_storage_clear_prefix(storage_key: &[u8], prefix: &[u8], limit: Option<u32>) -> KillStorageResult {
fn host_default_child_storage_clear_prefix(
storage_key: &[u8],
prefix: &[u8],
limit: Option<u32>,
) -> KillStorageResult {
let child_info = ChildInfo::new_default(storage_key);
with_externalities(|ext| {
let (all_removed, num_removed) = ext.clear_child_prefix(&child_info, prefix, limit);
@@ -79,16 +79,11 @@ fn build_block_with_witness(
validation_data.relay_parent_storage_root = relay_parent_storage_root;
extra_extrinsics
.into_iter()
.for_each(|e| builder.push(e).unwrap());
extra_extrinsics.into_iter().for_each(|e| builder.push(e).unwrap());
let block = builder.build_parachain_block(*parent_head.state_root());
TestBlockData {
block,
validation_data,
}
TestBlockData { block, validation_data }
}
#[test]
@@ -96,18 +91,13 @@ fn validate_block_no_extra_extrinsics() {
sp_tracing::try_init_simple();
let (client, parent_head) = create_test_client();
let TestBlockData {
block,
validation_data,
} = build_block_with_witness(&client, vec![], parent_head.clone(), Default::default());
let TestBlockData { block, validation_data } =
build_block_with_witness(&client, vec![], parent_head.clone(), Default::default());
let header = block.header().clone();
let res_header = call_validate_block(
parent_head,
block,
validation_data.relay_parent_storage_root,
)
.expect("Calls `validate_block`");
let res_header =
call_validate_block(parent_head, block, validation_data.relay_parent_storage_root)
.expect("Calls `validate_block`");
assert_eq!(header, res_header);
}
@@ -122,10 +112,7 @@ fn validate_block_with_extra_extrinsics() {
transfer(&client, Charlie, Alice, 500),
];
let TestBlockData {
block,
validation_data,
} = build_block_with_witness(
let TestBlockData { block, validation_data } = build_block_with_witness(
&client,
extra_extrinsics,
parent_head.clone(),
@@ -133,12 +120,9 @@ fn validate_block_with_extra_extrinsics() {
);
let header = block.header().clone();
let res_header = call_validate_block(
parent_head,
block,
validation_data.relay_parent_storage_root,
)
.expect("Calls `validate_block`");
let res_header =
call_validate_block(parent_head, block, validation_data.relay_parent_storage_root)
.expect("Calls `validate_block`");
assert_eq!(header, res_header);
}
@@ -148,20 +132,14 @@ fn validate_block_invalid_parent_hash() {
if env::var("RUN_TEST").is_ok() {
let (client, parent_head) = create_test_client();
let TestBlockData {
block,
validation_data,
} = build_block_with_witness(&client, vec![], parent_head.clone(), Default::default());
let TestBlockData { block, validation_data } =
build_block_with_witness(&client, vec![], parent_head.clone(), Default::default());
let (mut header, extrinsics, witness) = block.deconstruct();
header.set_parent_hash(Hash::from_low_u64_be(1));
let block_data = ParachainBlockData::new(header, extrinsics, witness);
call_validate_block(
parent_head,
block_data,
validation_data.relay_parent_storage_root,
)
.unwrap_err();
call_validate_block(parent_head, block_data, validation_data.relay_parent_storage_root)
.unwrap_err();
} else {
let output = Command::new(env::current_exe().unwrap())
.args(&["validate_block_invalid_parent_hash", "--", "--nocapture"])
@@ -186,11 +164,7 @@ fn validate_block_fails_on_invalid_validation_data() {
call_validate_block(parent_head, block, Hash::random()).unwrap_err();
} else {
let output = Command::new(env::current_exe().unwrap())
.args(&[
"validate_block_fails_on_invalid_validation_data",
"--",
"--nocapture",
])
.args(&["validate_block_fails_on_invalid_validation_data", "--", "--nocapture"])
.env("RUN_TEST", "1")
.output()
.expect("Runs the test");
@@ -208,32 +182,18 @@ fn check_inherent_fails_on_validate_block_as_expected() {
if env::var("RUN_TEST").is_ok() {
let (client, parent_head) = create_test_client();
let TestBlockData {
block,
validation_data,
} = build_block_with_witness(
let TestBlockData { block, validation_data } = build_block_with_witness(
&client,
vec![],
parent_head.clone(),
RelayStateSproofBuilder {
current_slot: 1337.into(),
..Default::default()
},
RelayStateSproofBuilder { current_slot: 1337.into(), ..Default::default() },
);
call_validate_block(
parent_head,
block,
validation_data.relay_parent_storage_root,
)
.unwrap_err();
call_validate_block(parent_head, block, validation_data.relay_parent_storage_root)
.unwrap_err();
} else {
let output = Command::new(env::current_exe().unwrap())
.args(&[
"check_inherent_fails_on_validate_block_as_expected",
"--",
"--nocapture",
])
.args(&["check_inherent_fails_on_validate_block_as_expected", "--", "--nocapture"])
.env("RUN_TEST", "1")
.output()
.expect("Runs the test");
@@ -16,16 +16,14 @@
//! Benchmarking setup for pallet-session
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg(feature = "runtime-benchmarks")]
use sp_std::prelude::*;
use sp_std::vec;
use sp_std::{prelude::*, vec};
use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller};
use frame_system::{RawOrigin};
use frame_system::RawOrigin;
use pallet_session::*;
pub struct Pallet<T: Config>(pallet_session::Pallet<T>);
pub trait Config: pallet_session::Config {}
benchmarks! {
set_keys {
let caller: T::AccountId = whitelisted_caller();
@@ -43,9 +41,4 @@ benchmarks! {
}: _(RawOrigin::Signed(caller))
}
impl_benchmark_test_suite!(
Pallet,
crate::mock::new_test_ext(),
crate::mock::Test,
extra = false,
);
impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false,);
+22 -19
View File
@@ -20,15 +20,19 @@
#![cfg_attr(not(feature = "std"), no_std)]
use sp_std::{prelude::*, convert::TryFrom};
use cumulus_primitives_core::{ParaId, DmpMessageHandler};
use cumulus_primitives_core::relay_chain::BlockNumber as RelayBlockNumber;
use codec::{Encode, Decode};
use scale_info::TypeInfo;
use sp_runtime::traits::BadOrigin;
use xcm::{VersionedXcm, latest::{Xcm, Outcome, Parent, ExecuteXcm}};
use codec::{Decode, Encode};
use cumulus_primitives_core::{
relay_chain::BlockNumber as RelayBlockNumber, DmpMessageHandler, ParaId,
};
use frame_support::dispatch::Weight;
pub use pallet::*;
use scale_info::TypeInfo;
use sp_runtime::traits::BadOrigin;
use sp_std::{convert::TryFrom, prelude::*};
use xcm::{
latest::{ExecuteXcm, Outcome, Parent, Xcm},
VersionedXcm,
};
#[frame_support::pallet]
pub mod pallet {
@@ -50,8 +54,7 @@ pub mod pallet {
}
#[pallet::error]
pub enum Error<T> {
}
pub enum Error<T> {}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {}
@@ -106,14 +109,13 @@ pub mod pallet {
pub struct UnlimitedDmpExecution<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> DmpMessageHandler for UnlimitedDmpExecution<T> {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
iter: impl Iterator<Item = (RelayBlockNumber, Vec<u8>)>,
limit: Weight,
) -> Weight {
let mut used = 0;
for (_sent_at, data) in iter {
let id = sp_io::hashing::twox_64(&data[..]);
let msg = VersionedXcm::<T::Call>::decode(&mut &data[..])
.map(Xcm::<T::Call>::try_from);
let msg = VersionedXcm::<T::Call>::decode(&mut &data[..]).map(Xcm::<T::Call>::try_from);
match msg {
Err(_) => Pallet::<T>::deposit_event(Event::InvalidFormat(id)),
Ok(Err(())) => Pallet::<T>::deposit_event(Event::UnsupportedVersion(id)),
@@ -121,7 +123,7 @@ impl<T: Config> DmpMessageHandler for UnlimitedDmpExecution<T> {
let outcome = T::XcmExecutor::execute_xcm(Parent.into(), x, limit);
used += outcome.weight_used();
Pallet::<T>::deposit_event(Event::ExecutedDownward(id, outcome));
}
},
}
}
used
@@ -136,14 +138,13 @@ impl<T: Config> DmpMessageHandler for UnlimitedDmpExecution<T> {
pub struct LimitAndDropDmpExecution<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> DmpMessageHandler for LimitAndDropDmpExecution<T> {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
iter: impl Iterator<Item = (RelayBlockNumber, Vec<u8>)>,
limit: Weight,
) -> Weight {
let mut used = 0;
for (_sent_at, data) in iter {
let id = sp_io::hashing::twox_64(&data[..]);
let msg = VersionedXcm::<T::Call>::decode(&mut &data[..])
.map(Xcm::<T::Call>::try_from);
let msg = VersionedXcm::<T::Call>::decode(&mut &data[..]).map(Xcm::<T::Call>::try_from);
match msg {
Err(_) => Pallet::<T>::deposit_event(Event::InvalidFormat(id)),
Ok(Err(())) => Pallet::<T>::deposit_event(Event::UnsupportedVersion(id)),
@@ -152,7 +153,7 @@ impl<T: Config> DmpMessageHandler for LimitAndDropDmpExecution<T> {
let outcome = T::XcmExecutor::execute_xcm(Parent.into(), x, weight_limit);
used += outcome.weight_used();
Pallet::<T>::deposit_event(Event::ExecutedDownward(id, outcome));
}
},
}
}
used
@@ -162,7 +163,8 @@ impl<T: Config> DmpMessageHandler for LimitAndDropDmpExecution<T> {
/// Ensure that the origin `o` represents a sibling parachain.
/// Returns `Ok` with the parachain ID of the sibling or an `Err` otherwise.
pub fn ensure_sibling_para<OuterOrigin>(o: OuterOrigin) -> Result<ParaId, BadOrigin>
where OuterOrigin: Into<Result<Origin, OuterOrigin>>
where
OuterOrigin: Into<Result<Origin, OuterOrigin>>,
{
match o.into() {
Ok(Origin::SiblingParachain(id)) => Ok(id),
@@ -173,7 +175,8 @@ pub fn ensure_sibling_para<OuterOrigin>(o: OuterOrigin) -> Result<ParaId, BadOri
/// Ensure that the origin `o` represents is the relay chain.
/// Returns `Ok` if it does or an `Err` otherwise.
pub fn ensure_relay<OuterOrigin>(o: OuterOrigin) -> Result<(), BadOrigin>
where OuterOrigin: Into<Result<Origin, OuterOrigin>>
where
OuterOrigin: Into<Result<Origin, OuterOrigin>>,
{
match o.into() {
Ok(Origin::Relay) => Ok(()),
+54 -84
View File
@@ -34,7 +34,7 @@ mod tests;
use codec::{Decode, Encode};
use cumulus_primitives_core::{
relay_chain::BlockNumber as RelayBlockNumber, ChannelStatus, GetChannelInfo, MessageSendError,
ParaId, XcmpMessageHandler, XcmpMessageSource, XcmpMessageFormat,
ParaId, XcmpMessageFormat, XcmpMessageHandler, XcmpMessageSource,
};
use frame_support::weights::Weight;
use rand_chacha::{
@@ -43,8 +43,8 @@ use rand_chacha::{
};
use scale_info::TypeInfo;
use sp_runtime::{traits::Hash, RuntimeDebug};
use sp_std::{prelude::*, convert::TryFrom};
use xcm::{latest::prelude::*, WrapVersion, VersionedXcm};
use sp_std::{convert::TryFrom, prelude::*};
use xcm::{latest::prelude::*, VersionedXcm, WrapVersion};
pub use pallet::*;
@@ -126,11 +126,7 @@ pub mod pallet {
#[pallet::storage]
pub(super) type InboundXcmpStatus<T: Config> = StorageValue<
_,
Vec<(
ParaId,
InboundStatus,
Vec<(RelayBlockNumber, XcmpMessageFormat)>,
)>,
Vec<(ParaId, InboundStatus, Vec<(RelayBlockNumber, XcmpMessageFormat)>)>,
ValueQuery,
>;
@@ -242,28 +238,25 @@ impl<T: Config> Pallet<T> {
let max_message_size =
T::ChannelInfo::get_channel_max(recipient).ok_or(MessageSendError::NoChannel)?;
if data.len() > max_message_size {
return Err(MessageSendError::TooBig);
return Err(MessageSendError::TooBig)
}
let mut s = <OutboundXcmpStatus<T>>::get();
let index = s
.iter()
.position(|item| item.0 == recipient)
.unwrap_or_else(|| {
s.push((recipient, OutboundStatus::Ok, false, 0, 0));
s.len() - 1
});
let index = s.iter().position(|item| item.0 == recipient).unwrap_or_else(|| {
s.push((recipient, OutboundStatus::Ok, false, 0, 0));
s.len() - 1
});
let have_active = s[index].4 > s[index].3;
let appended = have_active
&& <OutboundXcmpMessages<T>>::mutate(recipient, s[index].4 - 1, |s| {
let appended = have_active &&
<OutboundXcmpMessages<T>>::mutate(recipient, s[index].4 - 1, |s| {
if XcmpMessageFormat::decode(&mut &s[..]) != Ok(format) {
return false;
return false
}
if s.len() + data.len() > max_message_size {
return false;
return false
}
s.extend_from_slice(&data[..]);
return true;
return true
});
if appended {
Ok((s[index].4 - s[index].3 - 1) as u32)
@@ -316,10 +309,9 @@ impl<T: Config> Pallet<T> {
// Create a shuffled order for use to iterate through.
// Not a great random seed, but good enough for our purposes.
let seed = frame_system::Pallet::<T>::parent_hash();
let seed = <[u8; 32]>::decode(&mut sp_runtime::traits::TrailingZeroInput::new(
seed.as_ref(),
))
.expect("input is padded with zeroes; qed");
let seed =
<[u8; 32]>::decode(&mut sp_runtime::traits::TrailingZeroInput::new(seed.as_ref()))
.expect("input is padded with zeroes; qed");
let mut rng = ChaChaRng::from_seed(seed);
let mut shuffled = (0..len).collect::<Vec<_>>();
for i in 0..len {
@@ -359,11 +351,8 @@ impl<T: Config> Pallet<T> {
// we just report the weight used.
Outcome::Incomplete(w, e) => (Ok(w), Event::Fail(Some(hash), e)),
}
}
Err(()) => (
Err(XcmError::UnhandledXcmVersion),
Event::BadVersion(Some(hash)),
),
},
Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))),
};
Self::deposit_event(event);
result
@@ -390,18 +379,18 @@ impl<T: Config> Pallet<T> {
// That message didn't get processed this time because of being
// too heavy. We leave it around for next time and bail.
remaining_fragments = last_remaining_fragments;
break;
}
break
},
Err(_) => {
// Message looks invalid; don't attempt to retry
}
},
}
} else {
debug_assert!(false, "Invalid incoming XCMP message data");
remaining_fragments = &b""[..];
}
}
}
},
XcmpMessageFormat::ConcatenatedEncodedBlob => {
while !remaining_fragments.is_empty() {
last_remaining_fragments = remaining_fragments;
@@ -413,22 +402,22 @@ impl<T: Config> Pallet<T> {
// That message didn't get processed this time because of being
// too heavy. We leave it around for next time and bail.
remaining_fragments = last_remaining_fragments;
break;
}
break
},
Err(false) => {
// Message invalid; don't attempt to retry
}
},
}
} else {
debug_assert!(false, "Invalid incoming blob message data");
remaining_fragments = &b""[..];
}
}
}
},
XcmpMessageFormat::Signals => {
debug_assert!(false, "All signals are handled immediately; qed");
remaining_fragments = &b""[..];
}
},
}
let is_empty = remaining_fragments.is_empty();
if is_empty {
@@ -469,15 +458,11 @@ impl<T: Config> Pallet<T> {
fn service_xcmp_queue(max_weight: Weight) -> Weight {
let mut status = <InboundXcmpStatus<T>>::get(); // <- sorted.
if status.len() == 0 {
return 0;
return 0
}
let QueueConfigData {
resume_threshold,
threshold_weight,
weight_restrict_decay,
..
} = <QueueConfig<T>>::get();
let QueueConfigData { resume_threshold, threshold_weight, weight_restrict_decay, .. } =
<QueueConfig<T>>::get();
let mut shuffled = Self::create_shuffle(status.len());
let mut weight_used = 0;
@@ -492,8 +477,8 @@ impl<T: Config> Pallet<T> {
// send more, heavier messages.
let mut shuffle_index = 0;
while shuffle_index < shuffled.len()
&& max_weight.saturating_sub(weight_used) >= threshold_weight
while shuffle_index < shuffled.len() &&
max_weight.saturating_sub(weight_used) >= threshold_weight
{
let index = shuffled[shuffle_index];
let sender = status[index].0;
@@ -514,10 +499,7 @@ impl<T: Config> Pallet<T> {
}
let weight_processed = if status[index].2.is_empty() {
debug_assert!(
false,
"channel exists in status; there must be messages; qed"
);
debug_assert!(false, "channel exists in status; there must be messages; qed");
0
} else {
// Process up to one block's worth for now.
@@ -531,26 +513,24 @@ impl<T: Config> Pallet<T> {
};
weight_used += weight_processed;
if status[index].2.len() as u32 <= resume_threshold
&& status[index].1 == InboundStatus::Suspended
if status[index].2.len() as u32 <= resume_threshold &&
status[index].1 == InboundStatus::Suspended
{
// Resume
let r = Self::send_signal(sender, ChannelSignal::Resume);
debug_assert!(
r.is_ok(),
"WARNING: Failed sending resume into suspended channel"
);
debug_assert!(r.is_ok(), "WARNING: Failed sending resume into suspended channel");
status[index].1 = InboundStatus::Ok;
}
// If there are more and we're making progress, we process them after we've given the
// other channels a look in. If we've still not unlocked all weight, then we set them
// up for processing a second time anyway.
if !status[index].2.is_empty() && (weight_processed > 0 || weight_available != max_weight)
if !status[index].2.is_empty() &&
(weight_processed > 0 || weight_available != max_weight)
{
if shuffle_index + 1 == shuffled.len() {
// Only this queue left. Just run around this loop once more.
continue;
continue
}
shuffled.push(index);
}
@@ -590,10 +570,7 @@ impl<T: Config> Pallet<T> {
s[index].1 = OutboundStatus::Ok;
}
} else {
debug_assert!(
false,
"WARNING: Attempt to resume channel that was not suspended."
);
debug_assert!(false, "WARNING: Attempt to resume channel that was not suspended.");
}
});
}
@@ -606,11 +583,7 @@ impl<T: Config> XcmpMessageHandler for Pallet<T> {
) -> Weight {
let mut status = <InboundXcmpStatus<T>>::get();
let QueueConfigData {
suspend_threshold,
drop_threshold,
..
} = <QueueConfig<T>>::get();
let QueueConfigData { suspend_threshold, drop_threshold, .. } = <QueueConfig<T>>::get();
for (sender, sent_at, data) in iter {
// Figure out the message format.
@@ -618,12 +591,9 @@ impl<T: Config> XcmpMessageHandler for Pallet<T> {
let format = match XcmpMessageFormat::decode(&mut data_ref) {
Ok(f) => f,
Err(_) => {
debug_assert!(
false,
"Unknown XCMP message format. Silently dropping message"
);
continue;
}
debug_assert!(false, "Unknown XCMP message format. Silently dropping message");
continue
},
};
if format == XcmpMessageFormat::Signals {
while !data_ref.is_empty() {
@@ -656,7 +626,7 @@ impl<T: Config> XcmpMessageHandler for Pallet<T> {
"XCMP channel queue full. Silently dropping message"
);
}
}
},
Err(_) => status.push((sender, InboundStatus::Ok, vec![(sent_at, format)])),
}
// Queue the payload for later execution.
@@ -686,10 +656,10 @@ impl<T: Config> XcmpMessageSource for Pallet<T> {
if result.len() == max_message_count {
// We check this condition in the beginning of the loop so that we don't include
// a message where the limit is 0.
break;
break
}
if outbound_status == OutboundStatus::Suspended {
continue;
continue
}
let (max_size_now, max_size_ever) = match T::ChannelInfo::get_channel_status(para_id) {
ChannelStatus::Closed => {
@@ -702,8 +672,8 @@ impl<T: Config> XcmpMessageSource for Pallet<T> {
<SignalMessages<T>>::remove(para_id);
}
*status = (para_id, OutboundStatus::Ok, false, 0, 0);
continue;
}
continue
},
ChannelStatus::Full => continue,
ChannelStatus::Ready(n, e) => (n, e),
};
@@ -715,7 +685,7 @@ impl<T: Config> XcmpMessageSource for Pallet<T> {
signalling = false;
page
} else {
continue;
continue
}
} else if end > begin {
let page = <OutboundXcmpMessages<T>>::get(para_id, begin);
@@ -724,10 +694,10 @@ impl<T: Config> XcmpMessageSource for Pallet<T> {
begin += 1;
page
} else {
continue;
continue
}
} else {
continue;
continue
};
if begin == end {
begin = 0;
@@ -789,7 +759,7 @@ impl<T: Config> SendXcm for Pallet<T> {
.map_err(|e| SendError::Transport(<&'static str>::from(e)))?;
Self::deposit_event(Event::XcmpMessageSent(Some(hash)));
Ok(())
}
},
// Anything else is unhandled. This includes a message this is meant for us.
_ => Err(SendError::CannotReachDestination(dest, msg)),
}
+7 -7
View File
@@ -15,12 +15,14 @@
use super::*;
use crate as xcmp_queue;
use sp_core::H256;
use frame_support::parameter_types;
use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header};
use sp_core::H256;
use sp_runtime::{
testing::Header,
traits::{BlakeTwo256, IdentityLookup},
};
use xcm_builder::{
FixedWeightBounds, IsConcrete, LocationInverter, NativeAsset, CurrencyAdapter,
ParentIsDefault,
CurrencyAdapter, FixedWeightBounds, IsConcrete, LocationInverter, NativeAsset, ParentIsDefault,
};
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>;
@@ -124,9 +126,7 @@ pub type LocalAssetTransactor = CurrencyAdapter<
(),
>;
pub type LocationToAccountId = (
ParentIsDefault<AccountId>,
);
pub type LocationToAccountId = (ParentIsDefault<AccountId>,);
pub struct XcmConfig;
impl xcm_executor::Config for XcmConfig {
+3 -12
View File
@@ -14,25 +14,16 @@
// limitations under the License.
use super::*;
use mock::{new_test_ext, XcmpQueue};
use cumulus_primitives_core::XcmpMessageHandler;
use mock::{new_test_ext, XcmpQueue};
#[test]
fn one_message_does_not_panic() {
new_test_ext().execute_with(|| {
let message_format = XcmpMessageFormat::ConcatenatedVersionedXcm.encode();
let messages = vec![
(
Default::default(),
1u32.into(),
message_format.as_slice(),
),
];
let messages = vec![(Default::default(), 1u32.into(), message_format.as_slice())];
// This shouldn't cause a panic
XcmpQueue::handle_xcmp_messages(
messages.into_iter(),
Weight::max_value(),
);
XcmpQueue::handle_xcmp_messages(messages.into_iter(), Weight::max_value());
})
}
@@ -22,9 +22,9 @@ pub use pallet::*;
#[frame_support::pallet]
pub mod pallet {
use cumulus_primitives_core::ParaId;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
use cumulus_primitives_core::ParaId;
#[pallet::pallet]
#[pallet::generate_store(pub(super) trait Store)]
@@ -47,9 +47,7 @@ pub mod pallet {
#[cfg(feature = "std")]
impl Default for GenesisConfig {
fn default() -> Self {
Self {
parachain_id: 100.into()
}
Self { parachain_id: 100.into() }
}
}
@@ -61,11 +59,14 @@ pub mod pallet {
}
#[pallet::type_value]
pub(super) fn DefaultForParachainId() -> ParaId { 100.into() }
pub(super) fn DefaultForParachainId() -> ParaId {
100.into()
}
#[pallet::storage]
#[pallet::getter(fn parachain_id)]
pub(super) type ParachainId<T: Config> = StorageValue<_, ParaId, ValueQuery, DefaultForParachainId>;
pub(super) type ParachainId<T: Config> =
StorageValue<_, ParaId, ValueQuery, DefaultForParachainId>;
impl<T: Config> Get<ParaId> for Pallet<T> {
fn get() -> ParaId {
@@ -18,20 +18,20 @@
#![cfg_attr(not(feature = "std"), no_std)]
use cumulus_pallet_xcm::{ensure_sibling_para, Origin as CumulusOrigin};
use cumulus_primitives_core::ParaId;
use frame_system::Config as SystemConfig;
use sp_runtime::traits::Saturating;
use sp_std::prelude::*;
use xcm::latest::prelude::*;
use sp_runtime::traits::Saturating;
use frame_system::Config as SystemConfig;
use cumulus_primitives_core::ParaId;
use cumulus_pallet_xcm::{Origin as CumulusOrigin, ensure_sibling_para};
pub use pallet::*;
#[frame_support::pallet]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
use super::*;
#[pallet::pallet]
#[pallet::generate_store(pub(super) trait Store)]
@@ -43,7 +43,8 @@ pub mod pallet {
/// The overarching event type.
type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>;
type Origin: From<<Self as SystemConfig>::Origin> + Into<Result<CumulusOrigin, <Self as Config>::Origin>>;
type Origin: From<<Self as SystemConfig>::Origin>
+ Into<Result<CumulusOrigin, <Self as Config>::Origin>>;
/// The overarching call type; we assume sibling chains use the same type.
type Call: From<Call<Self>> + Encode;
@@ -53,29 +54,16 @@ pub mod pallet {
/// The target parachains to ping.
#[pallet::storage]
pub(super) type Targets<T: Config> = StorageValue<
_,
Vec<(ParaId, Vec<u8>)>,
ValueQuery,
>;
pub(super) type Targets<T: Config> = StorageValue<_, Vec<(ParaId, Vec<u8>)>, ValueQuery>;
/// The total number of pings sent.
#[pallet::storage]
pub(super) type PingCount<T: Config> = StorageValue<
_,
u32,
ValueQuery,
>;
pub(super) type PingCount<T: Config> = StorageValue<_, u32, ValueQuery>;
/// The sent pings.
#[pallet::storage]
pub(super) type Pings<T: Config> = StorageMap<
_,
Blake2_128Concat,
u32,
T::BlockNumber,
OptionQuery,
>;
pub(super) type Pings<T: Config> =
StorageMap<_, Blake2_128Concat, u32, T::BlockNumber, OptionQuery>;
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
@@ -94,17 +82,23 @@ pub mod pallet {
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_finalize(
n: T::BlockNumber,
) {
fn on_finalize(n: T::BlockNumber) {
for (para, payload) in Targets::<T>::get().into_iter() {
let seq = PingCount::<T>::mutate(|seq| { *seq += 1; *seq });
let seq = PingCount::<T>::mutate(|seq| {
*seq += 1;
*seq
});
match T::XcmSender::send_xcm(
(1, Junction::Parachain(para.into())).into(),
Xcm(vec![Transact {
origin_type: OriginKind::Native,
require_weight_at_most: 1_000,
call: <T as Config>::Call::from(Call::<T>::ping { seq, payload: payload.clone() }).encode().into(),
call: <T as Config>::Call::from(Call::<T>::ping {
seq,
payload: payload.clone(),
})
.encode()
.into(),
}]),
) {
Ok(()) => {
@@ -113,7 +107,7 @@ pub mod pallet {
},
Err(e) => {
Self::deposit_event(Event::ErrorSendingPing(e, para, seq, payload));
}
},
}
}
}
@@ -129,7 +123,12 @@ pub mod pallet {
}
#[pallet::weight(0)]
pub fn start_many(origin: OriginFor<T>, para: ParaId, count: u32, payload: Vec<u8>) -> DispatchResult {
pub fn start_many(
origin: OriginFor<T>,
para: ParaId,
count: u32,
payload: Vec<u8>,
) -> DispatchResult {
ensure_root(origin)?;
for _ in 0..count {
Targets::<T>::mutate(|t| t.push((para, payload.clone())));
@@ -140,7 +139,11 @@ pub mod pallet {
#[pallet::weight(0)]
pub fn stop(origin: OriginFor<T>, para: ParaId) -> DispatchResult {
ensure_root(origin)?;
Targets::<T>::mutate(|t| if let Some(p) = t.iter().position(|(p, _)| p == &para) { t.swap_remove(p); });
Targets::<T>::mutate(|t| {
if let Some(p) = t.iter().position(|(p, _)| p == &para) {
t.swap_remove(p);
}
});
Ok(())
}
@@ -166,7 +169,12 @@ pub mod pallet {
Xcm(vec![Transact {
origin_type: OriginKind::Native,
require_weight_at_most: 1_000,
call: <T as Config>::Call::from(Call::<T>::pong { seq, payload: payload.clone() } ).encode().into(),
call: <T as Config>::Call::from(Call::<T>::pong {
seq,
payload: payload.clone(),
})
.encode()
.into(),
}]),
) {
Ok(()) => Self::deposit_event(Event::PongSent(para, seq, payload)),
@@ -181,7 +189,12 @@ pub mod pallet {
let para = ensure_sibling_para(<T as Config>::Origin::from(origin))?;
if let Some(sent_at) = Pings::<T>::take(seq) {
Self::deposit_event(Event::Ponged(para, seq, payload, frame_system::Pallet::<T>::block_number().saturating_sub(sent_at)));
Self::deposit_event(Event::Ponged(
para,
seq,
payload,
frame_system::Pallet::<T>::block_number().saturating_sub(sent_at),
));
} else {
// Pong received for a ping we apparently didn't send?!
Self::deposit_event(Event::UnknownPong(para, seq, payload));
@@ -185,9 +185,7 @@ mod tests {
}
pub fn new_test_ext() -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::default()
.build_storage::<Test>()
.unwrap();
let mut t = frame_system::GenesisConfig::default().build_storage::<Test>().unwrap();
// We use default for brevity, but you can configure as desired if needed.
pallet_balances::GenesisConfig::<Test>::default()
.assimilate_storage(&mut t)
@@ -107,10 +107,7 @@ pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}
/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers.
+1 -4
View File
@@ -75,10 +75,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}
/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers.
+18 -72
View File
@@ -72,10 +72,7 @@ pub fn get_chain_spec(id: ParaId) -> ChainSpec {
move || {
testnet_genesis(
get_account_id_from_seed::<sr25519::Public>("Alice"),
vec![
get_from_seed::<AuraId>("Alice"),
get_from_seed::<AuraId>("Bob"),
],
vec![get_from_seed::<AuraId>("Alice"), get_from_seed::<AuraId>("Bob")],
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
@@ -97,10 +94,7 @@ pub fn get_chain_spec(id: ParaId) -> ChainSpec {
None,
None,
None,
Extensions {
relay_chain: "westend".into(),
para_id: id.into(),
},
Extensions { relay_chain: "westend".into(), para_id: id.into() },
)
}
@@ -114,10 +108,7 @@ pub fn get_shell_chain_spec(id: ParaId) -> ShellChainSpec {
None,
None,
None,
Extensions {
relay_chain: "westend".into(),
para_id: id.into(),
},
Extensions { relay_chain: "westend".into(), para_id: id.into() },
)
}
@@ -138,7 +129,7 @@ pub fn staging_test_net(id: ParaId) -> ChainSpec {
.unchecked_into(),
],
vec![
hex!["9ed7705e3c7da027ba0583a22a3212042f7e715d3c168ba14f1424e2bc111d00"].into(),
hex!["9ed7705e3c7da027ba0583a22a3212042f7e715d3c168ba14f1424e2bc111d00"].into()
],
id,
)
@@ -147,10 +138,7 @@ pub fn staging_test_net(id: ParaId) -> ChainSpec {
None,
None,
None,
Extensions {
relay_chain: "westend".into(),
para_id: id.into(),
},
Extensions { relay_chain: "westend".into(), para_id: id.into() },
)
}
@@ -168,17 +156,11 @@ fn testnet_genesis(
changes_trie_config: Default::default(),
},
balances: rococo_parachain_runtime::BalancesConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, 1 << 60))
.collect(),
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(),
},
sudo: rococo_parachain_runtime::SudoConfig { key: root_key },
parachain_info: rococo_parachain_runtime::ParachainInfoConfig { parachain_id: id },
aura: rococo_parachain_runtime::AuraConfig {
authorities: initial_authorities,
},
aura: rococo_parachain_runtime::AuraConfig { authorities: initial_authorities },
aura_ext: Default::default(),
parachain_system: Default::default(),
}
@@ -277,10 +259,7 @@ pub fn statemint_development_config(id: ParaId) -> StatemintChainSpec {
None,
None,
Some(properties),
Extensions {
relay_chain: "polkadot-dev".into(),
para_id: id.into(),
},
Extensions { relay_chain: "polkadot-dev".into(), para_id: id.into() },
)
}
@@ -329,10 +308,7 @@ pub fn statemint_local_config(id: ParaId) -> StatemintChainSpec {
None,
None,
Some(properties),
Extensions {
relay_chain: "polkadot-local".into(),
para_id: id.into(),
},
Extensions { relay_chain: "polkadot-local".into(), para_id: id.into() },
)
}
@@ -349,11 +325,7 @@ fn statemint_genesis(
changes_trie_config: Default::default(),
},
balances: statemint_runtime::BalancesConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, STATEMINT_ED * 4096))
.collect(),
balances: endowed_accounts.iter().cloned().map(|k| (k, STATEMINT_ED * 4096)).collect(),
},
parachain_info: statemint_runtime::ParachainInfoConfig { parachain_id: id },
collator_selection: statemint_runtime::CollatorSelectionConfig {
@@ -413,10 +385,7 @@ pub fn statemine_development_config(id: ParaId) -> StatemineChainSpec {
None,
None,
Some(properties),
Extensions {
relay_chain: "kusama-dev".into(),
para_id: id.into(),
},
Extensions { relay_chain: "kusama-dev".into(), para_id: id.into() },
)
}
@@ -465,10 +434,7 @@ pub fn statemine_local_config(id: ParaId) -> StatemineChainSpec {
None,
None,
Some(properties),
Extensions {
relay_chain: "kusama-local".into(),
para_id: id.into(),
},
Extensions { relay_chain: "kusama-local".into(), para_id: id.into() },
)
}
@@ -520,10 +486,7 @@ pub fn statemine_config(id: ParaId) -> StatemineChainSpec {
None,
None,
Some(properties),
Extensions {
relay_chain: "kusama".into(),
para_id: id.into(),
},
Extensions { relay_chain: "kusama".into(), para_id: id.into() },
)
}
@@ -540,11 +503,7 @@ fn statemine_genesis(
changes_trie_config: Default::default(),
},
balances: statemine_runtime::BalancesConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, STATEMINE_ED * 4096))
.collect(),
balances: endowed_accounts.iter().cloned().map(|k| (k, STATEMINE_ED * 4096)).collect(),
},
parachain_info: statemine_runtime::ParachainInfoConfig { parachain_id: id },
collator_selection: statemine_runtime::CollatorSelectionConfig {
@@ -603,10 +562,7 @@ pub fn westmint_development_config(id: ParaId) -> WestmintChainSpec {
None,
None,
Some(properties),
Extensions {
relay_chain: "westend".into(),
para_id: id.into(),
},
Extensions { relay_chain: "westend".into(), para_id: id.into() },
)
}
@@ -656,10 +612,7 @@ pub fn westmint_local_config(id: ParaId) -> WestmintChainSpec {
None,
None,
Some(properties),
Extensions {
relay_chain: "westend-local".into(),
para_id: id.into(),
},
Extensions { relay_chain: "westend-local".into(), para_id: id.into() },
)
}
@@ -713,10 +666,7 @@ pub fn westmint_config(id: ParaId) -> WestmintChainSpec {
None,
None,
Some(properties),
Extensions {
relay_chain: "westend".into(),
para_id: id.into(),
},
Extensions { relay_chain: "westend".into(), para_id: id.into() },
)
}
@@ -734,11 +684,7 @@ fn westmint_genesis(
changes_trie_config: Default::default(),
},
balances: westmint_runtime::BalancesConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, WESTMINT_ED * 4096))
.collect(),
balances: endowed_accounts.iter().cloned().map(|k| (k, WESTMINT_ED * 4096)).collect(),
},
sudo: westmint_runtime::SudoConfig { key: root_key },
parachain_info: westmint_runtime::ParachainInfoConfig { parachain_id: id },
+2 -9
View File
@@ -132,14 +132,7 @@ impl RelayChainCli {
) -> Self {
let extension = chain_spec::Extensions::try_get(&*para_config.chain_spec);
let chain_id = extension.map(|e| e.relay_chain.clone());
let base_path = para_config
.base_path
.as_ref()
.map(|x| x.path().join("polkadot"));
Self {
base_path,
chain_id,
base: polkadot_cli::RunCmd::from_iter(relay_chain_args),
}
let base_path = para_config.base_path.as_ref().map(|x| x.path().join("polkadot"));
Self { base_path, chain_id, base: polkadot_cli::RunCmd::from_iter(relay_chain_args) }
}
}
+14 -26
View File
@@ -124,7 +124,7 @@ fn load_spec(
} else {
Box::new(chain_spec)
}
}
},
})
}
@@ -292,27 +292,27 @@ pub fn run() -> Result<()> {
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
}
},
Some(Subcommand::CheckBlock(cmd)) => {
construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, components.import_queue))
})
}
},
Some(Subcommand::ExportBlocks(cmd)) => {
construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, config.database))
})
}
},
Some(Subcommand::ExportState(cmd)) => {
construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, config.chain_spec))
})
}
},
Some(Subcommand::ImportBlocks(cmd)) => {
construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, components.import_queue))
})
}
},
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
@@ -333,7 +333,7 @@ pub fn run() -> Result<()> {
cmd.run(config, polkadot_config)
})
}
},
Some(Subcommand::Revert(cmd)) => construct_async_run!(|components, cli, cmd, config| {
Ok(cmd.run(components.client, components.backend))
}),
@@ -360,7 +360,7 @@ pub fn run() -> Result<()> {
}
Ok(())
}
},
Some(Subcommand::ExportGenesisWasm(params)) => {
let mut builder = sc_cli::LoggerBuilder::new("");
builder.with_profiling(sc_tracing::TracingReceiver::Log, "");
@@ -381,8 +381,8 @@ pub fn run() -> Result<()> {
}
Ok(())
}
Some(Subcommand::Benchmark(cmd)) => {
},
Some(Subcommand::Benchmark(cmd)) =>
if cfg!(feature = "runtime-benchmarks") {
let runner = cli.create_runner(cmd)?;
if runner.config().chain_spec.is_statemine() {
@@ -398,8 +398,7 @@ pub fn run() -> Result<()> {
Err("Benchmarking wasn't enabled when building the node. \
You can enable it with `--features runtime-benchmarks`."
.into())
}
}
},
None => {
let runner = cli.create_runner(&cli.run.normalize())?;
@@ -431,14 +430,7 @@ pub fn run() -> Result<()> {
info!("Parachain id: {:?}", id);
info!("Parachain Account: {}", parachain_account);
info!("Parachain genesis state: {}", genesis_state);
info!(
"Is collating: {}",
if config.role.is_authority() {
"yes"
} else {
"no"
}
);
info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" });
if config.chain_spec.is_statemint() {
crate::service::start_statemint_node::<
@@ -476,7 +468,7 @@ pub fn run() -> Result<()> {
.map_err(Into::into)
}
})
}
},
}
}
@@ -545,11 +537,7 @@ impl CliConfiguration<Self> for RelayChainCli {
fn chain_id(&self, is_dev: bool) -> Result<String> {
let chain_id = self.base.base.chain_id(is_dev)?;
Ok(if chain_id.is_empty() {
self.chain_id.clone().unwrap_or_default()
} else {
chain_id
})
Ok(if chain_id.is_empty() { self.chain_id.clone().unwrap_or_default() } else { chain_id })
}
fn role(&self, is_dev: bool) -> Result<sc_service::Role> {
+3 -13
View File
@@ -61,20 +61,10 @@ where
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi};
let mut io = jsonrpc_core::IoHandler::default();
let FullDeps {
client,
pool,
deny_unsafe,
} = deps;
let FullDeps { client, pool, deny_unsafe } = deps;
io.extend_with(SystemApi::to_delegate(FullSystem::new(
client.clone(),
pool,
deny_unsafe,
)));
io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(
client.clone(),
)));
io.extend_with(SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)));
io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())));
io
}
+26 -43
View File
@@ -308,7 +308,7 @@ where
) -> Result<Box<dyn ParachainConsensus<Block>>, sc_service::Error>,
{
if matches!(parachain_config.role, Role::Light) {
return Err("Light client not supported!".into());
return Err("Light client not supported!".into())
}
let parachain_config = prepare_node_config(parachain_config);
@@ -486,7 +486,7 @@ where
) -> Result<Box<dyn ParachainConsensus<Block>>, sc_service::Error>,
{
if matches!(parachain_config.role, Role::Light) {
return Err("Light client not supported!".into());
return Err("Light client not supported!".into())
}
let parachain_config = prepare_node_config(parachain_config);
@@ -800,13 +800,7 @@ pub async fn start_shell_node(
TFullClient<Block, shell_runtime::RuntimeApi, NativeElseWasmExecutor<ShellRuntimeExecutor>>,
>,
)> {
start_shell_node_impl::<
shell_runtime::RuntimeApi,
ShellRuntimeExecutor,
_,
_,
_,
>(
start_shell_node_impl::<shell_runtime::RuntimeApi, ShellRuntimeExecutor, _, _, _>(
parachain_config,
polkadot_config,
id,
@@ -832,17 +826,15 @@ pub async fn start_shell_node(
let relay_chain_backend = relay_chain_node.backend.clone();
let relay_chain_client = relay_chain_node.client.clone();
Ok(
cumulus_client_consensus_relay_chain::build_relay_chain_consensus(
cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams {
para_id: id,
proposer_factory,
block_import: client.clone(),
relay_chain_client: relay_chain_node.client.clone(),
relay_chain_backend: relay_chain_node.backend.clone(),
create_inherent_data_providers:
move |_, (relay_parent, validation_data)| {
let parachain_inherent =
Ok(cumulus_client_consensus_relay_chain::build_relay_chain_consensus(
cumulus_client_consensus_relay_chain::BuildRelayChainConsensusParams {
para_id: id,
proposer_factory,
block_import: client.clone(),
relay_chain_client: relay_chain_node.client.clone(),
relay_chain_backend: relay_chain_node.backend.clone(),
create_inherent_data_providers: move |_, (relay_parent, validation_data)| {
let parachain_inherent =
cumulus_primitives_parachain_inherent::ParachainInherentData::create_at_with_client(
relay_parent,
&relay_chain_client,
@@ -850,19 +842,17 @@ pub async fn start_shell_node(
&validation_data,
id,
);
async move {
let parachain_inherent =
parachain_inherent.ok_or_else(|| {
Box::<dyn std::error::Error + Send + Sync>::from(
"Failed to create parachain inherent",
)
})?;
Ok(parachain_inherent)
}
},
async move {
let parachain_inherent = parachain_inherent.ok_or_else(|| {
Box::<dyn std::error::Error + Send + Sync>::from(
"Failed to create parachain inherent",
)
})?;
Ok(parachain_inherent)
}
},
),
)
},
))
},
)
.await
@@ -879,7 +869,7 @@ impl<R> BuildOnAccess<R> {
match self {
Self::Uninitialized(f) => {
*self = Self::Initialized((f.take().unwrap())());
}
},
Self::Initialized(ref mut r) => return r,
}
}
@@ -954,13 +944,7 @@ where
async fn verify(
&mut self,
block_import: BlockImportParams<Block, ()>,
) -> Result<
(
BlockImportParams<Block, ()>,
Option<Vec<(CacheKeyId, Vec<u8>)>>,
),
String,
> {
) -> Result<(BlockImportParams<Block, ()>, Option<Vec<(CacheKeyId, Vec<u8>)>>), String> {
let block_id = BlockId::hash(*block_import.header.parent_hash());
if self
@@ -1036,9 +1020,8 @@ where
})) as Box<_>
};
let relay_chain_verifier = Box::new(RelayChainVerifier::new(client.clone(), |_, _| async {
Ok(())
})) as Box<_>;
let relay_chain_verifier =
Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>;
let verifier = Verifier {
client: client.clone(),
@@ -32,13 +32,13 @@ pub mod currency {
/// Fee-related.
pub mod fee {
use node_primitives::Balance;
pub use sp_runtime::Perbill;
use frame_support::weights::{
constants::ExtrinsicBaseWeight, WeightToFeeCoefficient, WeightToFeeCoefficients,
WeightToFeePolynomial,
};
use node_primitives::Balance;
use smallvec::smallvec;
pub use sp_runtime::Perbill;
/// The block saturation level. Fees will be updates based on this value.
pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25);
@@ -99,10 +99,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}
parameter_types! {
@@ -320,7 +317,16 @@ parameter_types! {
/// The type used to represent the kinds of proxying allowed.
#[derive(
Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, MaxEncodedLen,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Encode,
Decode,
RuntimeDebug,
MaxEncodedLen,
scale_info::TypeInfo,
)]
pub enum ProxyType {
@@ -348,58 +354,56 @@ impl InstanceFilter<Call> for ProxyType {
fn filter(&self, c: &Call) -> bool {
match self {
ProxyType::Any => true,
ProxyType::NonTransfer => {
!matches!(c, Call::Balances { .. } | Call::Assets { .. } | Call::Uniques { .. })
}
ProxyType::NonTransfer =>
!matches!(c, Call::Balances { .. } | Call::Assets { .. } | Call::Uniques { .. }),
ProxyType::CancelProxy => matches!(
c,
Call::Proxy(pallet_proxy::Call::reject_announcement { .. })
| Call::Utility { .. } | Call::Multisig { .. }
Call::Proxy(pallet_proxy::Call::reject_announcement { .. }) |
Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::Assets => {
matches!(
c,
Call::Assets { .. }
| Call::Utility { .. }
| Call::Multisig { .. }
| Call::Uniques { .. }
Call::Assets { .. } |
Call::Utility { .. } | Call::Multisig { .. } |
Call::Uniques { .. }
)
}
},
ProxyType::AssetOwner => matches!(
c,
Call::Assets(pallet_assets::Call::create { .. })
| Call::Assets(pallet_assets::Call::destroy { .. })
| Call::Assets(pallet_assets::Call::transfer_ownership { .. })
| Call::Assets(pallet_assets::Call::set_team { .. })
| Call::Assets(pallet_assets::Call::set_metadata { .. })
| Call::Assets(pallet_assets::Call::clear_metadata { .. })
| Call::Uniques(pallet_uniques::Call::create { .. })
| Call::Uniques(pallet_uniques::Call::destroy { .. })
| Call::Uniques(pallet_uniques::Call::transfer_ownership { .. })
| Call::Uniques(pallet_uniques::Call::set_team { .. })
| Call::Uniques(pallet_uniques::Call::set_metadata { .. })
| Call::Uniques(pallet_uniques::Call::set_attribute { .. })
| Call::Uniques(pallet_uniques::Call::set_class_metadata { .. })
| Call::Uniques(pallet_uniques::Call::clear_metadata { .. })
| Call::Uniques(pallet_uniques::Call::clear_attribute { .. })
| Call::Uniques(pallet_uniques::Call::clear_class_metadata { .. })
| Call::Utility { .. } | Call::Multisig { .. }
Call::Assets(pallet_assets::Call::create { .. }) |
Call::Assets(pallet_assets::Call::destroy { .. }) |
Call::Assets(pallet_assets::Call::transfer_ownership { .. }) |
Call::Assets(pallet_assets::Call::set_team { .. }) |
Call::Assets(pallet_assets::Call::set_metadata { .. }) |
Call::Assets(pallet_assets::Call::clear_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::create { .. }) |
Call::Uniques(pallet_uniques::Call::destroy { .. }) |
Call::Uniques(pallet_uniques::Call::transfer_ownership { .. }) |
Call::Uniques(pallet_uniques::Call::set_team { .. }) |
Call::Uniques(pallet_uniques::Call::set_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::set_attribute { .. }) |
Call::Uniques(pallet_uniques::Call::set_class_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::clear_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::clear_attribute { .. }) |
Call::Uniques(pallet_uniques::Call::clear_class_metadata { .. }) |
Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::AssetManager => matches!(
c,
Call::Assets(pallet_assets::Call::mint { .. })
| Call::Assets(pallet_assets::Call::burn { .. })
| Call::Assets(pallet_assets::Call::freeze { .. })
| Call::Assets(pallet_assets::Call::thaw { .. })
| Call::Assets(pallet_assets::Call::freeze_asset { .. })
| Call::Assets(pallet_assets::Call::thaw_asset { .. })
| Call::Uniques(pallet_uniques::Call::mint { .. })
| Call::Uniques(pallet_uniques::Call::burn { .. })
| Call::Uniques(pallet_uniques::Call::freeze { .. })
| Call::Uniques(pallet_uniques::Call::thaw { .. })
| Call::Uniques(pallet_uniques::Call::freeze_class { .. })
| Call::Uniques(pallet_uniques::Call::thaw_class { .. })
| Call::Utility { .. } | Call::Multisig { .. }
Call::Assets(pallet_assets::Call::mint { .. }) |
Call::Assets(pallet_assets::Call::burn { .. }) |
Call::Assets(pallet_assets::Call::freeze { .. }) |
Call::Assets(pallet_assets::Call::thaw { .. }) |
Call::Assets(pallet_assets::Call::freeze_asset { .. }) |
Call::Assets(pallet_assets::Call::thaw_asset { .. }) |
Call::Uniques(pallet_uniques::Call::mint { .. }) |
Call::Uniques(pallet_uniques::Call::burn { .. }) |
Call::Uniques(pallet_uniques::Call::freeze { .. }) |
Call::Uniques(pallet_uniques::Call::thaw { .. }) |
Call::Uniques(pallet_uniques::Call::freeze_class { .. }) |
Call::Uniques(pallet_uniques::Call::thaw_class { .. }) |
Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::Collator => matches!(
c,
@@ -1,7 +1,7 @@
pub mod pallet_assets;
pub mod pallet_balances;
pub mod pallet_multisig;
pub mod pallet_collator_selection;
pub mod pallet_multisig;
pub mod pallet_proxy;
pub mod pallet_session;
pub mod pallet_timestamp;
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_assets
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemine/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -38,7 +36,7 @@ impl<T: frame_system::Config> pallet_assets::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn destroy(c: u32, s: u32, a: u32, ) -> Weight {
fn destroy(c: u32, s: u32, a: u32) -> Weight {
(0 as Weight)
// Standard Error: 37_000
.saturating_add((21_529_000 as Weight).saturating_mul(c as Weight))
@@ -109,7 +107,7 @@ impl<T: frame_system::Config> pallet_assets::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_metadata(_n: u32, s: u32, ) -> Weight {
fn set_metadata(_n: u32, s: u32) -> Weight {
(50_315_000 as Weight)
// Standard Error: 0
.saturating_add((8_000 as Weight).saturating_mul(s as Weight))
@@ -121,7 +119,7 @@ impl<T: frame_system::Config> pallet_assets::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn force_set_metadata(_n: u32, s: u32, ) -> Weight {
fn force_set_metadata(_n: u32, s: u32) -> Weight {
(25_933_000 as Weight)
// Standard Error: 0
.saturating_add((7_000 as Weight).saturating_mul(s as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_balances
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemine/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_collator_selection
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemine/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,28 +26,26 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_collator_selection.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_collator_selection::WeightInfo for WeightInfo<T> {
fn set_invulnerables(b: u32, ) -> Weight {
fn set_invulnerables(b: u32) -> Weight {
(18_481_000 as Weight)
// Standard Error: 0
.saturating_add((67_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_desired_candidates() -> Weight {
(16_376_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(16_376_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_candidacy_bond() -> Weight {
(17_031_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(17_031_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn register_as_candidate(c: u32, ) -> Weight {
fn register_as_candidate(c: u32) -> Weight {
(72_345_000 as Weight)
// Standard Error: 0
.saturating_add((197_000 as Weight).saturating_mul(c as Weight))
.saturating_add(T::DbWeight::get().reads(4 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn leave_intent(c: u32, ) -> Weight {
fn leave_intent(c: u32) -> Weight {
(55_446_000 as Weight)
// Standard Error: 0
.saturating_add((153_000 as Weight).saturating_mul(c as Weight))
@@ -61,7 +57,7 @@ impl<T: frame_system::Config> pallet_collator_selection::WeightInfo for WeightIn
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(4 as Weight))
}
fn new_session(r: u32, c: u32, ) -> Weight {
fn new_session(r: u32, c: u32) -> Weight {
(0 as Weight)
// Standard Error: 1_004_000
.saturating_add((110_066_000 as Weight).saturating_mul(r as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_multisig
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemine/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,12 +26,12 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_multisig.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
fn as_multi_threshold_1(z: u32, ) -> Weight {
fn as_multi_threshold_1(z: u32) -> Weight {
(15_911_000 as Weight)
// Standard Error: 0
.saturating_add((1_000 as Weight).saturating_mul(z as Weight))
}
fn as_multi_create(s: u32, z: u32, ) -> Weight {
fn as_multi_create(s: u32, z: u32) -> Weight {
(55_326_000 as Weight)
// Standard Error: 0
.saturating_add((133_000 as Weight).saturating_mul(s as Weight))
@@ -42,7 +40,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn as_multi_create_store(s: u32, z: u32, ) -> Weight {
fn as_multi_create_store(s: u32, z: u32) -> Weight {
(62_423_000 as Weight)
// Standard Error: 0
.saturating_add((133_000 as Weight).saturating_mul(s as Weight))
@@ -51,7 +49,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn as_multi_approve(s: u32, z: u32, ) -> Weight {
fn as_multi_approve(s: u32, z: u32) -> Weight {
(32_430_000 as Weight)
// Standard Error: 0
.saturating_add((148_000 as Weight).saturating_mul(s as Weight))
@@ -60,7 +58,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn as_multi_approve_store(s: u32, z: u32, ) -> Weight {
fn as_multi_approve_store(s: u32, z: u32) -> Weight {
(59_789_000 as Weight)
// Standard Error: 0
.saturating_add((165_000 as Weight).saturating_mul(s as Weight))
@@ -69,7 +67,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn as_multi_complete(s: u32, z: u32, ) -> Weight {
fn as_multi_complete(s: u32, z: u32) -> Weight {
(80_926_000 as Weight)
// Standard Error: 0
.saturating_add((276_000 as Weight).saturating_mul(s as Weight))
@@ -78,28 +76,28 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn approve_as_multi_create(s: u32, ) -> Weight {
fn approve_as_multi_create(s: u32) -> Weight {
(54_860_000 as Weight)
// Standard Error: 0
.saturating_add((134_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn approve_as_multi_approve(s: u32, ) -> Weight {
fn approve_as_multi_approve(s: u32) -> Weight {
(31_924_000 as Weight)
// Standard Error: 0
.saturating_add((154_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn approve_as_multi_complete(s: u32, ) -> Weight {
fn approve_as_multi_complete(s: u32) -> Weight {
(154_001_000 as Weight)
// Standard Error: 0
.saturating_add((281_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn cancel_as_multi(s: u32, ) -> Weight {
fn cancel_as_multi(s: u32) -> Weight {
(103_770_000 as Weight)
// Standard Error: 0
.saturating_add((130_000 as Weight).saturating_mul(s as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_proxy
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemine/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,13 +26,13 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_proxy.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
fn proxy(p: u32, ) -> Weight {
fn proxy(p: u32) -> Weight {
(27_318_000 as Weight)
// Standard Error: 1_000
.saturating_add((208_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
}
fn proxy_announced(a: u32, p: u32, ) -> Weight {
fn proxy_announced(a: u32, p: u32) -> Weight {
(60_665_000 as Weight)
// Standard Error: 2_000
.saturating_add((677_000 as Weight).saturating_mul(a as Weight))
@@ -43,7 +41,7 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn remove_announcement(a: u32, p: u32, ) -> Weight {
fn remove_announcement(a: u32, p: u32) -> Weight {
(39_455_000 as Weight)
// Standard Error: 2_000
.saturating_add((687_000 as Weight).saturating_mul(a as Weight))
@@ -52,7 +50,7 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn reject_announcement(a: u32, p: u32, ) -> Weight {
fn reject_announcement(a: u32, p: u32) -> Weight {
(39_411_000 as Weight)
// Standard Error: 2_000
.saturating_add((686_000 as Weight).saturating_mul(a as Weight))
@@ -61,7 +59,7 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn announce(a: u32, p: u32, ) -> Weight {
fn announce(a: u32, p: u32) -> Weight {
(54_386_000 as Weight)
// Standard Error: 2_000
.saturating_add((677_000 as Weight).saturating_mul(a as Weight))
@@ -70,35 +68,35 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn add_proxy(p: u32, ) -> Weight {
fn add_proxy(p: u32) -> Weight {
(37_411_000 as Weight)
// Standard Error: 2_000
.saturating_add((298_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn remove_proxy(p: u32, ) -> Weight {
fn remove_proxy(p: u32) -> Weight {
(36_658_000 as Weight)
// Standard Error: 2_000
.saturating_add((332_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn remove_proxies(p: u32, ) -> Weight {
fn remove_proxies(p: u32) -> Weight {
(34_893_000 as Weight)
// Standard Error: 1_000
.saturating_add((209_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn anonymous(p: u32, ) -> Weight {
fn anonymous(p: u32) -> Weight {
(51_243_000 as Weight)
// Standard Error: 1_000
.saturating_add((44_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn kill_anonymous(p: u32, ) -> Weight {
fn kill_anonymous(p: u32) -> Weight {
(37_188_000 as Weight)
// Standard Error: 1_000
.saturating_add((208_000 as Weight).saturating_mul(p as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_session
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./polkadot-parachains/statemine-runtime/src/weights
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_timestamp
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemine/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -34,7 +34,6 @@
// --header=./file_header.txt
// --output=./polkadot-parachains/statemine-runtime/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -54,7 +53,7 @@ impl<T: frame_system::Config> pallet_uniques::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn destroy(n: u32, m: u32, a: u32, ) -> Weight {
fn destroy(n: u32, m: u32, a: u32) -> Weight {
(0 as Weight)
// Standard Error: 14_000
.saturating_add((16_814_000 as Weight).saturating_mul(n as Weight))
@@ -84,7 +83,7 @@ impl<T: frame_system::Config> pallet_uniques::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn redeposit(i: u32, ) -> Weight {
fn redeposit(i: u32) -> Weight {
(0 as Weight)
// Standard Error: 11_000
.saturating_add((26_921_000 as Weight).saturating_mul(i as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_utility
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemine/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,7 +26,7 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_utility.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
fn batch(c: u32, ) -> Weight {
fn batch(c: u32) -> Weight {
(16_177_000 as Weight)
// Standard Error: 0
.saturating_add((4_582_000 as Weight).saturating_mul(c as Weight))
@@ -36,7 +34,7 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
fn as_derivative() -> Weight {
(7_848_000 as Weight)
}
fn batch_all(c: u32, ) -> Weight {
fn batch_all(c: u32) -> Weight {
(17_745_000 as Weight)
// Standard Error: 0
.saturating_add((4_578_000 as Weight).saturating_mul(c as Weight))
@@ -21,7 +21,7 @@ pub mod currency {
pub const UNITS: Balance = 10_000_000_000;
pub const DOLLARS: Balance = UNITS;
pub const CENTS: Balance = UNITS / 100; // 100_000_000
pub const CENTS: Balance = UNITS / 100; // 100_000_000
pub const MILLICENTS: Balance = CENTS / 1_000; // 100_000
pub const fn deposit(items: u32, bytes: u32) -> Balance {
@@ -32,13 +32,13 @@ pub mod currency {
/// Fee-related.
pub mod fee {
use node_primitives::Balance;
pub use sp_runtime::Perbill;
use frame_support::weights::{
constants::ExtrinsicBaseWeight, WeightToFeeCoefficient, WeightToFeeCoefficients,
WeightToFeePolynomial,
};
use node_primitives::Balance;
use smallvec::smallvec;
pub use sp_runtime::Perbill;
/// The block saturation level. Fees will be updates based on this value.
pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25);
@@ -99,10 +99,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}
parameter_types! {
@@ -287,7 +284,16 @@ parameter_types! {
/// The type used to represent the kinds of proxying allowed.
#[derive(
Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, MaxEncodedLen,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Encode,
Decode,
RuntimeDebug,
MaxEncodedLen,
scale_info::TypeInfo,
)]
pub enum ProxyType {
@@ -315,63 +321,63 @@ impl InstanceFilter<Call> for ProxyType {
fn filter(&self, c: &Call) -> bool {
match self {
ProxyType::Any => true,
ProxyType::NonTransfer => {
!matches!(c, Call::Balances { .. } | Call::Assets { .. } | Call::Uniques { .. })
}
ProxyType::NonTransfer =>
!matches!(c, Call::Balances { .. } | Call::Assets { .. } | Call::Uniques { .. }),
ProxyType::CancelProxy => matches!(
c,
Call::Proxy(pallet_proxy::Call::reject_announcement { .. })
| Call::Utility { .. } | Call::Multisig { .. }
Call::Proxy(pallet_proxy::Call::reject_announcement { .. }) |
Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::Assets => {
matches!(
c,
Call::Assets { .. }
| Call::Utility { .. }
| Call::Multisig { .. }
| Call::Uniques { .. }
Call::Assets { .. } |
Call::Utility { .. } | Call::Multisig { .. } |
Call::Uniques { .. }
)
}
},
ProxyType::AssetOwner => matches!(
c,
Call::Assets(pallet_assets::Call::create { .. })
| Call::Assets(pallet_assets::Call::destroy { .. })
| Call::Assets(pallet_assets::Call::transfer_ownership { .. })
| Call::Assets(pallet_assets::Call::set_team { .. })
| Call::Assets(pallet_assets::Call::set_metadata { .. })
| Call::Assets(pallet_assets::Call::clear_metadata { .. })
| Call::Uniques(pallet_uniques::Call::create { .. })
| Call::Uniques(pallet_uniques::Call::destroy { .. })
| Call::Uniques(pallet_uniques::Call::transfer_ownership { .. })
| Call::Uniques(pallet_uniques::Call::set_team { .. })
| Call::Uniques(pallet_uniques::Call::set_metadata { .. })
| Call::Uniques(pallet_uniques::Call::set_attribute { .. })
| Call::Uniques(pallet_uniques::Call::set_class_metadata { .. })
| Call::Uniques(pallet_uniques::Call::clear_metadata { .. })
| Call::Uniques(pallet_uniques::Call::clear_attribute { .. })
| Call::Uniques(pallet_uniques::Call::clear_class_metadata { .. })
| Call::Utility { .. } | Call::Multisig { .. }
Call::Assets(pallet_assets::Call::create { .. }) |
Call::Assets(pallet_assets::Call::destroy { .. }) |
Call::Assets(pallet_assets::Call::transfer_ownership { .. }) |
Call::Assets(pallet_assets::Call::set_team { .. }) |
Call::Assets(pallet_assets::Call::set_metadata { .. }) |
Call::Assets(pallet_assets::Call::clear_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::create { .. }) |
Call::Uniques(pallet_uniques::Call::destroy { .. }) |
Call::Uniques(pallet_uniques::Call::transfer_ownership { .. }) |
Call::Uniques(pallet_uniques::Call::set_team { .. }) |
Call::Uniques(pallet_uniques::Call::set_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::set_attribute { .. }) |
Call::Uniques(pallet_uniques::Call::set_class_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::clear_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::clear_attribute { .. }) |
Call::Uniques(pallet_uniques::Call::clear_class_metadata { .. }) |
Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::AssetManager => matches!(
c,
Call::Assets(pallet_assets::Call::mint { .. })
| Call::Assets(pallet_assets::Call::burn { .. })
| Call::Assets(pallet_assets::Call::freeze { .. })
| Call::Assets(pallet_assets::Call::thaw { .. })
| Call::Assets(pallet_assets::Call::freeze_asset { .. })
| Call::Assets(pallet_assets::Call::thaw_asset { .. })
| Call::Uniques(pallet_uniques::Call::mint { .. })
| Call::Uniques(pallet_uniques::Call::burn { .. })
| Call::Uniques(pallet_uniques::Call::freeze { .. })
| Call::Uniques(pallet_uniques::Call::thaw { .. })
| Call::Uniques(pallet_uniques::Call::freeze_class { .. })
| Call::Uniques(pallet_uniques::Call::thaw_class { .. })
| Call::Utility { .. } | Call::Multisig { .. }
Call::Assets(pallet_assets::Call::mint { .. }) |
Call::Assets(pallet_assets::Call::burn { .. }) |
Call::Assets(pallet_assets::Call::freeze { .. }) |
Call::Assets(pallet_assets::Call::thaw { .. }) |
Call::Assets(pallet_assets::Call::freeze_asset { .. }) |
Call::Assets(pallet_assets::Call::thaw_asset { .. }) |
Call::Uniques(pallet_uniques::Call::mint { .. }) |
Call::Uniques(pallet_uniques::Call::burn { .. }) |
Call::Uniques(pallet_uniques::Call::freeze { .. }) |
Call::Uniques(pallet_uniques::Call::thaw { .. }) |
Call::Uniques(pallet_uniques::Call::freeze_class { .. }) |
Call::Uniques(pallet_uniques::Call::thaw_class { .. }) |
Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::Collator => matches!(
c,
Call::CollatorSelection { .. } | Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::Collator =>
matches!(c, Call::CollatorSelection(..) | Call::Utility(..) | Call::Multisig(..)),
}
}
fn is_superset(&self, o: &Self) -> bool {
@@ -1,7 +1,7 @@
pub mod pallet_assets;
pub mod pallet_balances;
pub mod pallet_multisig;
pub mod pallet_collator_selection;
pub mod pallet_multisig;
pub mod pallet_proxy;
pub mod pallet_session;
pub mod pallet_timestamp;
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_assets
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -38,7 +36,7 @@ impl<T: frame_system::Config> pallet_assets::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn destroy(c: u32, s: u32, a: u32, ) -> Weight {
fn destroy(c: u32, s: u32, a: u32) -> Weight {
(0 as Weight)
// Standard Error: 37_000
.saturating_add((21_822_000 as Weight).saturating_mul(c as Weight))
@@ -109,7 +107,7 @@ impl<T: frame_system::Config> pallet_assets::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_metadata(_n: u32, s: u32, ) -> Weight {
fn set_metadata(_n: u32, s: u32) -> Weight {
(50_330_000 as Weight)
// Standard Error: 0
.saturating_add((9_000 as Weight).saturating_mul(s as Weight))
@@ -121,7 +119,7 @@ impl<T: frame_system::Config> pallet_assets::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn force_set_metadata(_n: u32, s: u32, ) -> Weight {
fn force_set_metadata(_n: u32, s: u32) -> Weight {
(26_249_000 as Weight)
// Standard Error: 0
.saturating_add((6_000 as Weight).saturating_mul(s as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_balances
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_collator_selection
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,28 +26,26 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_collator_selection.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_collator_selection::WeightInfo for WeightInfo<T> {
fn set_invulnerables(b: u32, ) -> Weight {
fn set_invulnerables(b: u32) -> Weight {
(18_563_000 as Weight)
// Standard Error: 0
.saturating_add((68_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_desired_candidates() -> Weight {
(16_363_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(16_363_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_candidacy_bond() -> Weight {
(16_840_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(16_840_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn register_as_candidate(c: u32, ) -> Weight {
fn register_as_candidate(c: u32) -> Weight {
(71_196_000 as Weight)
// Standard Error: 0
.saturating_add((198_000 as Weight).saturating_mul(c as Weight))
.saturating_add(T::DbWeight::get().reads(4 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn leave_intent(c: u32, ) -> Weight {
fn leave_intent(c: u32) -> Weight {
(55_336_000 as Weight)
// Standard Error: 0
.saturating_add((151_000 as Weight).saturating_mul(c as Weight))
@@ -61,7 +57,7 @@ impl<T: frame_system::Config> pallet_collator_selection::WeightInfo for WeightIn
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(4 as Weight))
}
fn new_session(r: u32, c: u32, ) -> Weight {
fn new_session(r: u32, c: u32) -> Weight {
(0 as Weight)
// Standard Error: 1_010_000
.saturating_add((109_961_000 as Weight).saturating_mul(r as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_multisig
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,12 +26,12 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_multisig.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
fn as_multi_threshold_1(z: u32, ) -> Weight {
fn as_multi_threshold_1(z: u32) -> Weight {
(14_936_000 as Weight)
// Standard Error: 0
.saturating_add((1_000 as Weight).saturating_mul(z as Weight))
}
fn as_multi_create(s: u32, z: u32, ) -> Weight {
fn as_multi_create(s: u32, z: u32) -> Weight {
(56_090_000 as Weight)
// Standard Error: 1_000
.saturating_add((63_000 as Weight).saturating_mul(s as Weight))
@@ -42,7 +40,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn as_multi_create_store(s: u32, z: u32, ) -> Weight {
fn as_multi_create_store(s: u32, z: u32) -> Weight {
(62_519_000 as Weight)
// Standard Error: 1_000
.saturating_add((66_000 as Weight).saturating_mul(s as Weight))
@@ -51,7 +49,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn as_multi_approve(s: u32, z: u32, ) -> Weight {
fn as_multi_approve(s: u32, z: u32) -> Weight {
(30_781_000 as Weight)
// Standard Error: 0
.saturating_add((111_000 as Weight).saturating_mul(s as Weight))
@@ -60,7 +58,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn as_multi_approve_store(s: u32, z: u32, ) -> Weight {
fn as_multi_approve_store(s: u32, z: u32) -> Weight {
(60_393_000 as Weight)
// Standard Error: 0
.saturating_add((118_000 as Weight).saturating_mul(s as Weight))
@@ -69,7 +67,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn as_multi_complete(s: u32, z: u32, ) -> Weight {
fn as_multi_complete(s: u32, z: u32) -> Weight {
(81_704_000 as Weight)
// Standard Error: 1_000
.saturating_add((248_000 as Weight).saturating_mul(s as Weight))
@@ -78,28 +76,28 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn approve_as_multi_create(s: u32, ) -> Weight {
fn approve_as_multi_create(s: u32) -> Weight {
(55_585_000 as Weight)
// Standard Error: 1_000
.saturating_add((115_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn approve_as_multi_approve(s: u32, ) -> Weight {
fn approve_as_multi_approve(s: u32) -> Weight {
(33_483_000 as Weight)
// Standard Error: 1_000
.saturating_add((82_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn approve_as_multi_complete(s: u32, ) -> Weight {
fn approve_as_multi_complete(s: u32) -> Weight {
(154_732_000 as Weight)
// Standard Error: 1_000
.saturating_add((253_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn cancel_as_multi(s: u32, ) -> Weight {
fn cancel_as_multi(s: u32) -> Weight {
(104_447_000 as Weight)
// Standard Error: 1_000
.saturating_add((114_000 as Weight).saturating_mul(s as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_proxy
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,13 +26,13 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_proxy.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
fn proxy(p: u32, ) -> Weight {
fn proxy(p: u32) -> Weight {
(27_585_000 as Weight)
// Standard Error: 1_000
.saturating_add((203_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
}
fn proxy_announced(a: u32, p: u32, ) -> Weight {
fn proxy_announced(a: u32, p: u32) -> Weight {
(61_093_000 as Weight)
// Standard Error: 2_000
.saturating_add((680_000 as Weight).saturating_mul(a as Weight))
@@ -43,7 +41,7 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn remove_announcement(a: u32, p: u32, ) -> Weight {
fn remove_announcement(a: u32, p: u32) -> Weight {
(39_494_000 as Weight)
// Standard Error: 2_000
.saturating_add((686_000 as Weight).saturating_mul(a as Weight))
@@ -52,7 +50,7 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn reject_announcement(a: u32, p: u32, ) -> Weight {
fn reject_announcement(a: u32, p: u32) -> Weight {
(39_817_000 as Weight)
// Standard Error: 2_000
.saturating_add((685_000 as Weight).saturating_mul(a as Weight))
@@ -61,7 +59,7 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn announce(a: u32, p: u32, ) -> Weight {
fn announce(a: u32, p: u32) -> Weight {
(54_835_000 as Weight)
// Standard Error: 2_000
.saturating_add((684_000 as Weight).saturating_mul(a as Weight))
@@ -70,35 +68,35 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn add_proxy(p: u32, ) -> Weight {
fn add_proxy(p: u32) -> Weight {
(37_625_000 as Weight)
// Standard Error: 2_000
.saturating_add((300_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn remove_proxy(p: u32, ) -> Weight {
fn remove_proxy(p: u32) -> Weight {
(36_945_000 as Weight)
// Standard Error: 3_000
.saturating_add((325_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn remove_proxies(p: u32, ) -> Weight {
fn remove_proxies(p: u32) -> Weight {
(35_128_000 as Weight)
// Standard Error: 1_000
.saturating_add((209_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn anonymous(p: u32, ) -> Weight {
fn anonymous(p: u32) -> Weight {
(51_624_000 as Weight)
// Standard Error: 1_000
.saturating_add((41_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn kill_anonymous(p: u32, ) -> Weight {
fn kill_anonymous(p: u32) -> Weight {
(37_469_000 as Weight)
// Standard Error: 1_000
.saturating_add((204_000 as Weight).saturating_mul(p as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_session
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./polkadot-parachains/statemint-runtime/src/weights
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_timestamp
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -34,7 +34,6 @@
// --header=./file_header.txt
// --output=./polkadot-parachains/statemine-runtime/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -54,7 +53,7 @@ impl<T: frame_system::Config> pallet_uniques::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn destroy(n: u32, m: u32, a: u32, ) -> Weight {
fn destroy(n: u32, m: u32, a: u32) -> Weight {
(0 as Weight)
// Standard Error: 14_000
.saturating_add((16_814_000 as Weight).saturating_mul(n as Weight))
@@ -84,7 +83,7 @@ impl<T: frame_system::Config> pallet_uniques::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn redeposit(i: u32, ) -> Weight {
fn redeposit(i: u32) -> Weight {
(0 as Weight)
// Standard Error: 11_000
.saturating_add((26_921_000 as Weight).saturating_mul(i as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_utility
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,7 +26,7 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_utility.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
fn batch(c: u32, ) -> Weight {
fn batch(c: u32) -> Weight {
(15_408_000 as Weight)
// Standard Error: 0
.saturating_add((4_571_000 as Weight).saturating_mul(c as Weight))
@@ -36,7 +34,7 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
fn as_derivative() -> Weight {
(7_817_000 as Weight)
}
fn batch_all(c: u32, ) -> Weight {
fn batch_all(c: u32) -> Weight {
(16_520_000 as Weight)
// Standard Error: 0
.saturating_add((4_571_000 as Weight).saturating_mul(c as Weight))
@@ -48,10 +48,7 @@ fn polkadot_argument_parsing() {
.unwrap();
thread::sleep(Duration::from_secs(20));
assert!(
cmd.try_wait().unwrap().is_none(),
"the process should still be running"
);
assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running");
kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap();
assert_eq!(
common::wait_for(&mut cmd, 30).map(|x| x.success()),
@@ -39,10 +39,7 @@ fn interrupt_polkadot_mdns_issue_test() {
.unwrap();
thread::sleep(Duration::from_secs(20));
assert!(
cmd.try_wait().unwrap().is_none(),
"the process should still be running"
);
assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running");
kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap();
assert_eq!(
common::wait_for(&mut cmd, 30).map(|x| x.success()),
@@ -40,16 +40,11 @@ fn purge_chain_works() {
// Let it produce some blocks.
thread::sleep(Duration::from_secs(30));
assert!(
cmd.try_wait().unwrap().is_none(),
"the process should still be running"
);
assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running");
// Stop the process
kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap();
assert!(common::wait_for(&mut cmd, 30)
.map(|x| x.success())
.unwrap_or_default());
assert!(common::wait_for(&mut cmd, 30).map(|x| x.success()).unwrap_or_default());
base_path
}
@@ -39,10 +39,7 @@ fn running_the_node_works_and_can_be_interrupted() {
.unwrap();
thread::sleep(Duration::from_secs(30));
assert!(
cmd.try_wait().unwrap().is_none(),
"the process should still be running"
);
assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running");
kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap();
assert_eq!(
common::wait_for(&mut cmd, 30).map(|x| x.success()),
@@ -32,13 +32,13 @@ pub mod currency {
/// Fee-related.
pub mod fee {
use node_primitives::Balance;
pub use sp_runtime::Perbill;
use frame_support::weights::{
constants::ExtrinsicBaseWeight, WeightToFeeCoefficient, WeightToFeeCoefficients,
WeightToFeePolynomial,
};
use node_primitives::Balance;
use smallvec::smallvec;
pub use sp_runtime::Perbill;
/// The block saturation level. Fees will be updates based on this value.
pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25);
+51 -45
View File
@@ -99,10 +99,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}
parameter_types! {
@@ -286,7 +283,16 @@ parameter_types! {
/// The type used to represent the kinds of proxying allowed.
#[derive(
Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, MaxEncodedLen,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Encode,
Decode,
RuntimeDebug,
MaxEncodedLen,
scale_info::TypeInfo,
)]
pub enum ProxyType {
@@ -314,63 +320,63 @@ impl InstanceFilter<Call> for ProxyType {
fn filter(&self, c: &Call) -> bool {
match self {
ProxyType::Any => true,
ProxyType::NonTransfer => {
!matches!(c, Call::Balances { .. } | Call::Assets { .. } | Call::Uniques { .. })
}
ProxyType::NonTransfer =>
!matches!(c, Call::Balances { .. } | Call::Assets { .. } | Call::Uniques { .. }),
ProxyType::CancelProxy => matches!(
c,
Call::Proxy(pallet_proxy::Call::reject_announcement { .. })
| Call::Utility { .. } | Call::Multisig { .. }
Call::Proxy(pallet_proxy::Call::reject_announcement { .. }) |
Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::Assets => {
matches!(
c,
Call::Assets { .. }
| Call::Utility { .. }
| Call::Multisig { .. }
| Call::Uniques { .. }
Call::Assets { .. } |
Call::Utility { .. } | Call::Multisig { .. } |
Call::Uniques { .. }
)
}
},
ProxyType::AssetOwner => matches!(
c,
Call::Assets(pallet_assets::Call::create { .. })
| Call::Assets(pallet_assets::Call::destroy { .. })
| Call::Assets(pallet_assets::Call::transfer_ownership { .. })
| Call::Assets(pallet_assets::Call::set_team { .. })
| Call::Assets(pallet_assets::Call::set_metadata { .. })
| Call::Assets(pallet_assets::Call::clear_metadata { .. })
| Call::Uniques(pallet_uniques::Call::create { .. })
| Call::Uniques(pallet_uniques::Call::destroy { .. })
| Call::Uniques(pallet_uniques::Call::transfer_ownership { .. })
| Call::Uniques(pallet_uniques::Call::set_team { .. })
| Call::Uniques(pallet_uniques::Call::set_metadata { .. })
| Call::Uniques(pallet_uniques::Call::set_attribute { .. })
| Call::Uniques(pallet_uniques::Call::set_class_metadata { .. })
| Call::Uniques(pallet_uniques::Call::clear_metadata { .. })
| Call::Uniques(pallet_uniques::Call::clear_attribute { .. })
| Call::Uniques(pallet_uniques::Call::clear_class_metadata { .. })
| Call::Utility { .. } | Call::Multisig { .. }
Call::Assets(pallet_assets::Call::create { .. }) |
Call::Assets(pallet_assets::Call::destroy { .. }) |
Call::Assets(pallet_assets::Call::transfer_ownership { .. }) |
Call::Assets(pallet_assets::Call::set_team { .. }) |
Call::Assets(pallet_assets::Call::set_metadata { .. }) |
Call::Assets(pallet_assets::Call::clear_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::create { .. }) |
Call::Uniques(pallet_uniques::Call::destroy { .. }) |
Call::Uniques(pallet_uniques::Call::transfer_ownership { .. }) |
Call::Uniques(pallet_uniques::Call::set_team { .. }) |
Call::Uniques(pallet_uniques::Call::set_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::set_attribute { .. }) |
Call::Uniques(pallet_uniques::Call::set_class_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::clear_metadata { .. }) |
Call::Uniques(pallet_uniques::Call::clear_attribute { .. }) |
Call::Uniques(pallet_uniques::Call::clear_class_metadata { .. }) |
Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::AssetManager => matches!(
c,
Call::Assets(pallet_assets::Call::mint { .. })
| Call::Assets(pallet_assets::Call::burn { .. })
| Call::Assets(pallet_assets::Call::freeze { .. })
| Call::Assets(pallet_assets::Call::thaw { .. })
| Call::Assets(pallet_assets::Call::freeze_asset { .. })
| Call::Assets(pallet_assets::Call::thaw_asset { .. })
| Call::Uniques(pallet_uniques::Call::mint { .. })
| Call::Uniques(pallet_uniques::Call::burn { .. })
| Call::Uniques(pallet_uniques::Call::freeze { .. })
| Call::Uniques(pallet_uniques::Call::thaw { .. })
| Call::Uniques(pallet_uniques::Call::freeze_class { .. })
| Call::Uniques(pallet_uniques::Call::thaw_class { .. })
| Call::Utility { .. } | Call::Multisig { .. }
Call::Assets(pallet_assets::Call::mint { .. }) |
Call::Assets(pallet_assets::Call::burn { .. }) |
Call::Assets(pallet_assets::Call::freeze { .. }) |
Call::Assets(pallet_assets::Call::thaw { .. }) |
Call::Assets(pallet_assets::Call::freeze_asset { .. }) |
Call::Assets(pallet_assets::Call::thaw_asset { .. }) |
Call::Uniques(pallet_uniques::Call::mint { .. }) |
Call::Uniques(pallet_uniques::Call::burn { .. }) |
Call::Uniques(pallet_uniques::Call::freeze { .. }) |
Call::Uniques(pallet_uniques::Call::thaw { .. }) |
Call::Uniques(pallet_uniques::Call::freeze_class { .. }) |
Call::Uniques(pallet_uniques::Call::thaw_class { .. }) |
Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::Collator => matches!(
c,
Call::CollatorSelection { .. } | Call::Utility { .. } | Call::Multisig { .. }
),
ProxyType::Collator =>
matches!(c, Call::CollatorSelection(..) | Call::Utility(..) | Call::Multisig(..)),
}
}
fn is_superset(&self, o: &Self) -> bool {
@@ -1,7 +1,7 @@
pub mod pallet_assets;
pub mod pallet_balances;
pub mod pallet_multisig;
pub mod pallet_collator_selection;
pub mod pallet_multisig;
pub mod pallet_proxy;
pub mod pallet_session;
pub mod pallet_timestamp;
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_assets
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -38,7 +36,7 @@ impl<T: frame_system::Config> pallet_assets::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn destroy(c: u32, s: u32, a: u32, ) -> Weight {
fn destroy(c: u32, s: u32, a: u32) -> Weight {
(0 as Weight)
// Standard Error: 37_000
.saturating_add((21_822_000 as Weight).saturating_mul(c as Weight))
@@ -109,7 +107,7 @@ impl<T: frame_system::Config> pallet_assets::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_metadata(_n: u32, s: u32, ) -> Weight {
fn set_metadata(_n: u32, s: u32) -> Weight {
(50_330_000 as Weight)
// Standard Error: 0
.saturating_add((9_000 as Weight).saturating_mul(s as Weight))
@@ -121,7 +119,7 @@ impl<T: frame_system::Config> pallet_assets::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn force_set_metadata(_n: u32, s: u32, ) -> Weight {
fn force_set_metadata(_n: u32, s: u32) -> Weight {
(26_249_000 as Weight)
// Standard Error: 0
.saturating_add((6_000 as Weight).saturating_mul(s as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_balances
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_collator_selection
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,28 +26,26 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_collator_selection.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_collator_selection::WeightInfo for WeightInfo<T> {
fn set_invulnerables(b: u32, ) -> Weight {
fn set_invulnerables(b: u32) -> Weight {
(18_563_000 as Weight)
// Standard Error: 0
.saturating_add((68_000 as Weight).saturating_mul(b as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_desired_candidates() -> Weight {
(16_363_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(16_363_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn set_candidacy_bond() -> Weight {
(16_840_000 as Weight)
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(16_840_000 as Weight).saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn register_as_candidate(c: u32, ) -> Weight {
fn register_as_candidate(c: u32) -> Weight {
(71_196_000 as Weight)
// Standard Error: 0
.saturating_add((198_000 as Weight).saturating_mul(c as Weight))
.saturating_add(T::DbWeight::get().reads(4 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn leave_intent(c: u32, ) -> Weight {
fn leave_intent(c: u32) -> Weight {
(55_336_000 as Weight)
// Standard Error: 0
.saturating_add((151_000 as Weight).saturating_mul(c as Weight))
@@ -61,7 +57,7 @@ impl<T: frame_system::Config> pallet_collator_selection::WeightInfo for WeightIn
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(4 as Weight))
}
fn new_session(r: u32, c: u32, ) -> Weight {
fn new_session(r: u32, c: u32) -> Weight {
(0 as Weight)
// Standard Error: 1_010_000
.saturating_add((109_961_000 as Weight).saturating_mul(r as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_multisig
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,12 +26,12 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_multisig.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
fn as_multi_threshold_1(z: u32, ) -> Weight {
fn as_multi_threshold_1(z: u32) -> Weight {
(14_936_000 as Weight)
// Standard Error: 0
.saturating_add((1_000 as Weight).saturating_mul(z as Weight))
}
fn as_multi_create(s: u32, z: u32, ) -> Weight {
fn as_multi_create(s: u32, z: u32) -> Weight {
(56_090_000 as Weight)
// Standard Error: 1_000
.saturating_add((63_000 as Weight).saturating_mul(s as Weight))
@@ -42,7 +40,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn as_multi_create_store(s: u32, z: u32, ) -> Weight {
fn as_multi_create_store(s: u32, z: u32) -> Weight {
(62_519_000 as Weight)
// Standard Error: 1_000
.saturating_add((66_000 as Weight).saturating_mul(s as Weight))
@@ -51,7 +49,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn as_multi_approve(s: u32, z: u32, ) -> Weight {
fn as_multi_approve(s: u32, z: u32) -> Weight {
(30_781_000 as Weight)
// Standard Error: 0
.saturating_add((111_000 as Weight).saturating_mul(s as Weight))
@@ -60,7 +58,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn as_multi_approve_store(s: u32, z: u32, ) -> Weight {
fn as_multi_approve_store(s: u32, z: u32) -> Weight {
(60_393_000 as Weight)
// Standard Error: 0
.saturating_add((118_000 as Weight).saturating_mul(s as Weight))
@@ -69,7 +67,7 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn as_multi_complete(s: u32, z: u32, ) -> Weight {
fn as_multi_complete(s: u32, z: u32) -> Weight {
(81_704_000 as Weight)
// Standard Error: 1_000
.saturating_add((248_000 as Weight).saturating_mul(s as Weight))
@@ -78,28 +76,28 @@ impl<T: frame_system::Config> pallet_multisig::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn approve_as_multi_create(s: u32, ) -> Weight {
fn approve_as_multi_create(s: u32) -> Weight {
(55_585_000 as Weight)
// Standard Error: 1_000
.saturating_add((115_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn approve_as_multi_approve(s: u32, ) -> Weight {
fn approve_as_multi_approve(s: u32) -> Weight {
(33_483_000 as Weight)
// Standard Error: 1_000
.saturating_add((82_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn approve_as_multi_complete(s: u32, ) -> Weight {
fn approve_as_multi_complete(s: u32) -> Weight {
(154_732_000 as Weight)
// Standard Error: 1_000
.saturating_add((253_000 as Weight).saturating_mul(s as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn cancel_as_multi(s: u32, ) -> Weight {
fn cancel_as_multi(s: u32) -> Weight {
(104_447_000 as Weight)
// Standard Error: 1_000
.saturating_add((114_000 as Weight).saturating_mul(s as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_proxy
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,13 +26,13 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_proxy.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
fn proxy(p: u32, ) -> Weight {
fn proxy(p: u32) -> Weight {
(27_585_000 as Weight)
// Standard Error: 1_000
.saturating_add((203_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
}
fn proxy_announced(a: u32, p: u32, ) -> Weight {
fn proxy_announced(a: u32, p: u32) -> Weight {
(61_093_000 as Weight)
// Standard Error: 2_000
.saturating_add((680_000 as Weight).saturating_mul(a as Weight))
@@ -43,7 +41,7 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn remove_announcement(a: u32, p: u32, ) -> Weight {
fn remove_announcement(a: u32, p: u32) -> Weight {
(39_494_000 as Weight)
// Standard Error: 2_000
.saturating_add((686_000 as Weight).saturating_mul(a as Weight))
@@ -52,7 +50,7 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn reject_announcement(a: u32, p: u32, ) -> Weight {
fn reject_announcement(a: u32, p: u32) -> Weight {
(39_817_000 as Weight)
// Standard Error: 2_000
.saturating_add((685_000 as Weight).saturating_mul(a as Weight))
@@ -61,7 +59,7 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn announce(a: u32, p: u32, ) -> Weight {
fn announce(a: u32, p: u32) -> Weight {
(54_835_000 as Weight)
// Standard Error: 2_000
.saturating_add((684_000 as Weight).saturating_mul(a as Weight))
@@ -70,35 +68,35 @@ impl<T: frame_system::Config> pallet_proxy::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(2 as Weight))
}
fn add_proxy(p: u32, ) -> Weight {
fn add_proxy(p: u32) -> Weight {
(37_625_000 as Weight)
// Standard Error: 2_000
.saturating_add((300_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn remove_proxy(p: u32, ) -> Weight {
fn remove_proxy(p: u32) -> Weight {
(36_945_000 as Weight)
// Standard Error: 3_000
.saturating_add((325_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn remove_proxies(p: u32, ) -> Weight {
fn remove_proxies(p: u32) -> Weight {
(35_128_000 as Weight)
// Standard Error: 1_000
.saturating_add((209_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn anonymous(p: u32, ) -> Weight {
fn anonymous(p: u32) -> Weight {
(51_624_000 as Weight)
// Standard Error: 1_000
.saturating_add((41_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn kill_anonymous(p: u32, ) -> Weight {
fn kill_anonymous(p: u32) -> Weight {
(37_469_000 as Weight)
// Standard Error: 1_000
.saturating_add((204_000 as Weight).saturating_mul(p as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_session
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./polkadot-parachains/statemint-runtime/src/weights
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_timestamp
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -34,7 +34,6 @@
// --header=./file_header.txt
// --output=./polkadot-parachains/statemine-runtime/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -54,7 +53,7 @@ impl<T: frame_system::Config> pallet_uniques::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn destroy(n: u32, m: u32, a: u32, ) -> Weight {
fn destroy(n: u32, m: u32, a: u32) -> Weight {
(0 as Weight)
// Standard Error: 14_000
.saturating_add((16_814_000 as Weight).saturating_mul(n as Weight))
@@ -84,7 +83,7 @@ impl<T: frame_system::Config> pallet_uniques::WeightInfo for WeightInfo<T> {
.saturating_add(T::DbWeight::get().reads(2 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn redeposit(i: u32, ) -> Weight {
fn redeposit(i: u32) -> Weight {
(0 as Weight)
// Standard Error: 11_000
.saturating_add((26_921_000 as Weight).saturating_mul(i as Weight))
@@ -1,4 +1,3 @@
//! Autogenerated weights for pallet_utility
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
@@ -18,7 +17,6 @@
// --raw
// --output=./runtime/statemint/src/weights/
#![allow(unused_parens)]
#![allow(unused_imports)]
@@ -28,7 +26,7 @@ use sp_std::marker::PhantomData;
/// Weight functions for pallet_utility.
pub struct WeightInfo<T>(PhantomData<T>);
impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
fn batch(c: u32, ) -> Weight {
fn batch(c: u32) -> Weight {
(15_408_000 as Weight)
// Standard Error: 0
.saturating_add((4_571_000 as Weight).saturating_mul(c as Weight))
@@ -36,7 +34,7 @@ impl<T: frame_system::Config> pallet_utility::WeightInfo for WeightInfo<T> {
fn as_derivative() -> Weight {
(7_817_000 as Weight)
}
fn batch_all(c: u32, ) -> Weight {
fn batch_all(c: u32) -> Weight {
(16_520_000 as Weight)
// Standard Error: 0
.saturating_add((4_571_000 as Weight).saturating_mul(c as Weight))
+2 -12
View File
@@ -165,11 +165,7 @@ impl<B: BlockT> ParachainBlockData<B> {
extrinsics: sp_std::vec::Vec<<B as BlockT>::Extrinsic>,
storage_proof: sp_trie::CompactProof,
) -> Self {
Self {
header,
extrinsics,
storage_proof,
}
Self { header, extrinsics, storage_proof }
}
/// Convert `self` into the stored block.
@@ -198,13 +194,7 @@ impl<B: BlockT> ParachainBlockData<B> {
}
/// Deconstruct into the inner parts.
pub fn deconstruct(
self,
) -> (
B::Header,
sp_std::vec::Vec<B::Extrinsic>,
sp_trie::CompactProof,
) {
pub fn deconstruct(self) -> (B::Header, sp_std::vec::Vec<B::Extrinsic>, sp_trie::CompactProof) {
(self.header, self.extrinsics, self.storage_proof)
}
}
@@ -173,16 +173,10 @@ fn collect_relay_storage_proof(
relevant_keys.push(relay_well_known_keys::hrmp_ingress_channel_index(para_id));
relevant_keys.push(relay_well_known_keys::hrmp_egress_channel_index(para_id));
relevant_keys.extend(ingress_channels.into_iter().map(|sender| {
relay_well_known_keys::hrmp_channels(HrmpChannelId {
sender,
recipient: para_id,
})
relay_well_known_keys::hrmp_channels(HrmpChannelId { sender, recipient: para_id })
}));
relevant_keys.extend(egress_channels.into_iter().map(|recipient| {
relay_well_known_keys::hrmp_channels(HrmpChannelId {
sender: para_id,
recipient,
})
relay_well_known_keys::hrmp_channels(HrmpChannelId { sender: para_id, recipient })
}));
sp_state_machine::prove_read(relay_parent_state_backend, relevant_keys)
@@ -14,8 +14,8 @@
// You should have received a copy of the GNU General Public License
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.
use cumulus_primitives_core::PersistedValidationData;
use crate::{ParachainInherentData, INHERENT_IDENTIFIER};
use cumulus_primitives_core::PersistedValidationData;
use sp_inherents::{InherentData, InherentDataProvider};
use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
+11 -26
View File
@@ -48,25 +48,21 @@ impl InherentDataProvider {
relay_chain_slot: Slot,
relay_chain_slot_duration: Duration,
) -> Self {
Self {
relay_chain_slot,
relay_chain_slot_duration,
}
Self { relay_chain_slot, relay_chain_slot_duration }
}
/// Create the inherent data.
pub fn create_inherent_data(&self) -> Result<InherentData, Error> {
let mut inherent_data = InherentData::new();
self.provide_inherent_data(&mut inherent_data)
.map(|_| inherent_data)
self.provide_inherent_data(&mut inherent_data).map(|_| inherent_data)
}
/// Provide the inherent data into the given `inherent_data`.
pub fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> {
// As the parachain starts building at around `relay_chain_slot + 1` we use that slot to
// calculate the timestamp.
let data: InherentType = ((*self.relay_chain_slot + 1)
* self.relay_chain_slot_duration.as_millis() as u64)
let data: InherentType = ((*self.relay_chain_slot + 1) *
self.relay_chain_slot_duration.as_millis() as u64)
.into();
inherent_data.put_data(INHERENT_IDENTIFIER, &data)
@@ -114,16 +110,10 @@ mod tests {
timestamp: u64,
relay_chain_slot: Slot,
) -> (ParachainBlockData, PHash) {
let sproof_builder = RelayStateSproofBuilder {
current_slot: relay_chain_slot,
..Default::default()
};
let sproof_builder =
RelayStateSproofBuilder { current_slot: relay_chain_slot, ..Default::default() };
let parent_header = client
.header(&at)
.ok()
.flatten()
.expect("Genesis header exists");
let parent_header = client.header(&at).ok().flatten().expect("Genesis header exists");
let relay_parent_storage_root = sproof_builder.clone().into_state_root_and_proof().0;
@@ -155,18 +145,13 @@ mod tests {
let timestamp = u64::from_str(&env::var("TIMESTAMP").expect("TIMESTAMP is set"))
.expect("TIMESTAMP is a valid `u64`");
let block = build_block(&client, BlockId::number(0), SLOT_DURATION, 1.into())
.0
.into_block();
let block =
build_block(&client, BlockId::number(0), SLOT_DURATION, 1.into()).0.into_block();
futures::executor::block_on(client.import(sp_consensus::BlockOrigin::Own, block))
.unwrap();
let (block, relay_chain_root) = build_block(
&client,
BlockId::number(1),
timestamp,
relay_chain_slot.into(),
);
let (block, relay_chain_root) =
build_block(&client, BlockId::number(1), timestamp, relay_chain_slot.into());
let header = call_validate_block(
client
+5 -7
View File
@@ -19,10 +19,10 @@
#![cfg_attr(not(feature = "std"), no_std)]
use sp_std::marker::PhantomData;
use codec::Encode;
use cumulus_primitives_core::UpwardMessageSender;
use xcm::{WrapVersion, latest::prelude::*};
use sp_std::marker::PhantomData;
use xcm::{latest::prelude::*, WrapVersion};
/// Xcm router which recognises the `Parent` destination and handles it by sending the message into
/// the given UMP `UpwardMessageSender` implementation. Thus this essentially adapts an
@@ -36,12 +36,11 @@ impl<T: UpwardMessageSender, W: WrapVersion> SendXcm for ParentAsUmp<T, W> {
fn send_xcm(dest: MultiLocation, msg: Xcm<()>) -> Result<(), SendError> {
if dest.contains_parents_only(1) {
// An upward message for the relay chain.
let versioned_xcm = W::wrap_version(&dest, msg)
.map_err(|()| SendError::DestinationUnsupported)?;
let versioned_xcm =
W::wrap_version(&dest, msg).map_err(|()| SendError::DestinationUnsupported)?;
let data = versioned_xcm.encode();
T::send_upward_message(data)
.map_err(|e| SendError::Transport(e.into()))?;
T::send_upward_message(data).map_err(|e| SendError::Transport(e.into()))?;
Ok(())
} else {
@@ -50,4 +49,3 @@ impl<T: UpwardMessageSender, W: WrapVersion> SendXcm for ParentAsUmp<T, W> {
}
}
}
+3 -7
View File
@@ -107,9 +107,7 @@ fn init_block_builder<'a>(
)
.expect("Put validation function params failed");
let inherents = block_builder
.create_inherents(inherent_data)
.expect("Creates inherents");
let inherents = block_builder.create_inherents(inherent_data).expect("Creates inherents");
inherents
.into_iter()
@@ -138,10 +136,8 @@ impl InitBlockBuilder for Client {
validation_data: Option<PersistedValidationData<PHash, PBlockNumber>>,
relay_sproof_builder: RelayStateSproofBuilder,
) -> BlockBuilder<Block, Client, Backend> {
let last_timestamp = self
.runtime_api()
.get_last_timestamp(&at)
.expect("Get last timestamp");
let last_timestamp =
self.runtime_api().get_last_timestamp(&at).expect("Get last timestamp");
let timestamp = last_timestamp + cumulus_test_runtime::MinimumPeriod::get();
+5 -16
View File
@@ -128,10 +128,8 @@ pub fn generate_extrinsic(
let current_block = client.info().best_number.saturated_into();
let genesis_block = client.hash(0).unwrap().unwrap();
let nonce = 0;
let period = BlockHashCount::get()
.checked_next_power_of_two()
.map(|c| c / 2)
.unwrap_or(2) as u64;
let period =
BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64;
let tip = 0;
let extra: SignedExtra = (
frame_system::CheckSpecVersion::<Runtime>::new(),
@@ -144,14 +142,7 @@ pub fn generate_extrinsic(
let raw_payload = SignedPayload::from_raw(
function.clone(),
extra.clone(),
(
VERSION.spec_version,
genesis_block,
current_block_hash,
(),
(),
(),
),
(VERSION.spec_version, genesis_block, current_block_hash, (), (), ()),
);
let signature = raw_payload.using_encoded(|e| origin.sign(e));
@@ -170,10 +161,8 @@ pub fn transfer(
dest: sp_keyring::AccountKeyring,
value: Balance,
) -> UncheckedExtrinsic {
let function = Call::Balances(pallet_balances::Call::transfer {
dest: dest.public().into(),
value,
});
let function =
Call::Balances(pallet_balances::Call::transfer { dest: dest.public().into(), value });
generate_extrinsic(client, origin, function)
}
+5 -20
View File
@@ -81,10 +81,7 @@ impl RelayStateSproofBuilder {
}
self.hrmp_channels
.entry(relay_chain::v1::HrmpChannelId {
sender,
recipient: self.para_id,
})
.entry(relay_chain::v1::HrmpChannelId { sender, recipient: self.para_id })
.or_insert_with(|| AbridgedHrmpChannel {
max_capacity: 0,
max_total_size: 0,
@@ -97,10 +94,7 @@ impl RelayStateSproofBuilder {
pub fn into_state_root_and_proof(
self,
) -> (
polkadot_primitives::v1::Hash,
sp_state_machine::StorageProof,
) {
) -> (polkadot_primitives::v1::Hash, sp_state_machine::StorageProof) {
let (db, root) = MemoryDB::<HashFor<polkadot_primitives::v1::Block>>::default_with_root();
let mut backend = sp_state_machine::TrieBackend::new(db, root);
@@ -113,10 +107,7 @@ impl RelayStateSproofBuilder {
backend.insert(vec![(None, vec![(key, Some(value))])]);
};
insert(
relay_chain::well_known_keys::ACTIVE_CONFIG.to_vec(),
self.host_config.encode(),
);
insert(relay_chain::well_known_keys::ACTIVE_CONFIG.to_vec(), self.host_config.encode());
if let Some(dmq_mqc_head) = self.dmq_mqc_head {
insert(
relay_chain::well_known_keys::dmq_mqc_head(self.para_id),
@@ -150,16 +141,10 @@ impl RelayStateSproofBuilder {
);
}
for (channel, metadata) in self.hrmp_channels {
insert(
relay_chain::well_known_keys::hrmp_channels(channel),
metadata.encode(),
);
insert(relay_chain::well_known_keys::hrmp_channels(channel), metadata.encode());
}
insert(
relay_chain::well_known_keys::CURRENT_SLOT.to_vec(),
self.current_slot.encode(),
);
insert(relay_chain::well_known_keys::CURRENT_SLOT.to_vec(), self.current_slot.encode());
}
let root = backend.root().clone();
@@ -27,7 +27,7 @@ const SKIP_ENV: &str = "SKIP_BUILD";
fn main() {
if env::var(SKIP_ENV).is_ok() {
return;
return
}
let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo"));
@@ -35,11 +35,8 @@ fn main() {
let project = create_project(&out_dir);
build_project(&project.join("Cargo.toml"));
fs::copy(
project.join("target/release").join(PROJECT_NAME),
out_dir.join(PROJECT_NAME),
)
.expect("Copies validation worker");
fs::copy(project.join("target/release").join(PROJECT_NAME), out_dir.join(PROJECT_NAME))
.expect("Copies validation worker");
}
fn find_cargo_lock() -> PathBuf {
@@ -49,7 +46,7 @@ fn find_cargo_lock() -> PathBuf {
loop {
if path.join("Cargo.lock").exists() {
return path.join("Cargo.lock");
return path.join("Cargo.lock")
}
if !path.pop() {
@@ -109,7 +106,7 @@ fn build_project(cargo_toml: &Path) {
.status();
match status.map(|s| s.success()) {
Ok(true) => {}
Ok(true) => {},
// Use `process.exit(1)` to have a clean error output.
_ => process::exit(1),
}
+3 -9
View File
@@ -113,10 +113,7 @@ pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);
/// The version information used to identify this runtime when compiled natively.
#[cfg(feature = "std")]
pub fn native_version() -> NativeVersion {
NativeVersion {
runtime_version: VERSION,
can_author_with: Default::default(),
}
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}
/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers.
@@ -421,11 +418,8 @@ impl cumulus_pallet_parachain_system::CheckInherents<Block> for CheckInherents {
) -> sp_inherents::CheckInherentsResult {
if relay_state_proof.read_slot().expect("Reads slot") == 1337u64 {
let mut res = sp_inherents::CheckInherentsResult::new();
res.put_error(
[1u8; 8],
&sp_inherents::MakeFatalError::from("You are wrong"),
)
.expect("Puts error");
res.put_error([1u8; 8], &sp_inherents::MakeFatalError::from("You are wrong"))
.expect("Puts error");
res
} else {
let relay_chain_slot = relay_state_proof
+2 -8
View File
@@ -70,9 +70,7 @@ pub fn get_chain_spec(id: ParaId) -> ChainSpec {
None,
None,
None,
Extensions {
para_id: id.into(),
},
Extensions { para_id: id.into() },
)
}
@@ -110,11 +108,7 @@ fn testnet_genesis(
},
parachain_system: Default::default(),
balances: cumulus_test_runtime::BalancesConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, 1 << 60))
.collect(),
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(),
},
sudo: cumulus_test_runtime::SudoConfig { key: root_key },
}
+1 -1
View File
@@ -15,8 +15,8 @@
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.
use codec::Encode;
use cumulus_primitives_core::ParaId;
use cumulus_client_service::genesis::generate_genesis_block;
use cumulus_primitives_core::ParaId;
use cumulus_test_runtime::Block;
use polkadot_primitives::v0::HeadData;
use sp_runtime::traits::Block as BlockT;
+38 -72
View File
@@ -181,7 +181,7 @@ where
+ 'static,
{
if matches!(parachain_config.role, Role::Light) {
return Err("Light client not supported!".into());
return Err("Light client not supported!".into())
}
let mut parachain_config = prepare_node_config(parachain_config);
@@ -274,37 +274,35 @@ where
let relay_chain_client = relay_chain_full_node.client.clone();
let relay_chain_backend = relay_chain_full_node.backend.clone();
Box::new(
cumulus_client_consensus_relay_chain::RelayChainConsensus::new(
para_id,
proposer_factory,
move |_, (relay_parent, validation_data)| {
let parachain_inherent =
cumulus_primitives_parachain_inherent::ParachainInherentData::create_at(
relay_parent,
&*relay_chain_client,
&*relay_chain_backend,
&validation_data,
para_id,
);
Box::new(cumulus_client_consensus_relay_chain::RelayChainConsensus::new(
para_id,
proposer_factory,
move |_, (relay_parent, validation_data)| {
let parachain_inherent =
cumulus_primitives_parachain_inherent::ParachainInherentData::create_at(
relay_parent,
&*relay_chain_client,
&*relay_chain_backend,
&validation_data,
para_id,
);
async move {
let time = sp_timestamp::InherentDataProvider::from_system_time();
async move {
let time = sp_timestamp::InherentDataProvider::from_system_time();
let parachain_inherent = parachain_inherent.ok_or_else(|| {
Box::<dyn std::error::Error + Send + Sync>::from(String::from(
"error",
))
})?;
Ok((time, parachain_inherent))
}
},
client.clone(),
relay_chain_full_node.client.clone(),
relay_chain_full_node.backend.clone(),
),
)
}
let parachain_inherent = parachain_inherent.ok_or_else(|| {
Box::<dyn std::error::Error + Send + Sync>::from(String::from(
"error",
))
})?;
Ok((time, parachain_inherent))
}
},
client.clone(),
relay_chain_full_node.client.clone(),
relay_chain_full_node.backend.clone(),
))
},
Consensus::Null => Box::new(NullConsensus),
};
@@ -466,8 +464,7 @@ impl TestNodeBuilder {
mut self,
nodes: impl IntoIterator<Item = &'a polkadot_test_service::PolkadotTestNode>,
) -> Self {
self.relay_chain_nodes
.extend(nodes.into_iter().map(|n| n.addr.clone()));
self.relay_chain_nodes.extend(nodes.into_iter().map(|n| n.addr.clone()));
self
}
@@ -501,8 +498,7 @@ impl TestNodeBuilder {
/// Build the [`TestNode`].
pub async fn build(self) -> TestNode {
let parachain_config = node_config(
self.storage_update_func_parachain
.unwrap_or_else(|| Box::new(|| ())),
self.storage_update_func_parachain.unwrap_or_else(|| Box::new(|| ())),
self.tokio_handle.clone(),
self.key.clone(),
self.parachain_nodes,
@@ -512,8 +508,7 @@ impl TestNodeBuilder {
)
.expect("could not generate Configuration");
let mut relay_chain_config = polkadot_test_service::node_config(
self.storage_update_func_relay_chain
.unwrap_or_else(|| Box::new(|| ())),
self.storage_update_func_relay_chain.unwrap_or_else(|| Box::new(|| ())),
self.tokio_handle,
self.key,
self.relay_chain_nodes,
@@ -539,13 +534,7 @@ impl TestNodeBuilder {
let peer_id = network.local_peer_id().clone();
let addr = MultiaddrWithPeerId { multiaddr, peer_id };
TestNode {
task_manager,
client,
network,
addr,
rpc_handlers,
}
TestNode { task_manager, client, network, addr, rpc_handlers }
}
}
@@ -566,18 +555,11 @@ pub fn node_config(
) -> Result<Configuration, ServiceError> {
let base_path = BasePath::new_temp_dir()?;
let root = base_path.path().to_path_buf();
let role = if is_collator {
Role::Authority
} else {
Role::Full
};
let role = if is_collator { Role::Authority } else { Role::Full };
let key_seed = key.to_seed();
let mut spec = Box::new(chain_spec::get_chain_spec(para_id));
let mut storage = spec
.as_storage_builder()
.build_storage()
.expect("could not build storage");
let mut storage = spec.as_storage_builder().build_storage().expect("could not build storage");
BasicExternalities::execute_with_storage(&mut storage, storage_update_func);
spec.set_storage(storage);
@@ -614,10 +596,7 @@ pub fn node_config(
network: network_config,
keystore: KeystoreConfig::InMemory,
keystore_remote: Default::default(),
database: DatabaseSource::RocksDb {
path: root.join("db"),
cache_size: 128,
},
database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
state_cache_size: 67108864,
state_cache_child_ratio: None,
state_pruning: PruningMode::ArchiveAll,
@@ -643,10 +622,7 @@ pub fn node_config(
prometheus_config: None,
telemetry_endpoints: None,
default_heap_pages: None,
offchain_worker: OffchainWorkerConfig {
enabled: true,
indexing_enabled: false,
},
offchain_worker: OffchainWorkerConfig { enabled: true, indexing_enabled: false },
force_authoring: false,
disable_grandpa: false,
dev_key_seed: Some(key_seed),
@@ -685,10 +661,7 @@ impl TestNode {
let call = frame_system::Call::set_code { code: validation };
self.send_extrinsic(
runtime::SudoCall::sudo_unchecked_weight {
call: Box::new(call.into()),
weight: 1_000,
},
runtime::SudoCall::sudo_unchecked_weight { call: Box::new(call.into()), weight: 1_000 },
Sr25519Keyring::Alice,
)
.await
@@ -726,14 +699,7 @@ pub fn construct_extrinsic(
let raw_payload = runtime::SignedPayload::from_raw(
function.clone(),
extra.clone(),
(
runtime::VERSION.spec_version,
genesis_block,
current_block_hash,
(),
(),
(),
),
(runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), ()),
);
let signature = raw_payload.using_encoded(|e| caller.sign(e));
runtime::UncheckedExtrinsic::new_signed(
@@ -17,8 +17,8 @@
use cumulus_primitives_core::ParaId;
use cumulus_test_service::{initial_head_data, run_relay_chain_validator_node, Keyring::*};
use futures::{join, StreamExt};
use sp_runtime::generic::BlockId;
use sc_client_api::BlockchainEvents;
use sp_runtime::generic::BlockId;
#[substrate_test_utils::test]
#[ignore]
@@ -95,7 +95,7 @@ async fn test_runtime_upgrade() {
.expect("Runtime version exists");
if expected_runtime_version == runtime_version {
break;
break
}
}
}