[runtime] follow up relay chain cleanups (#4657)

* fix miscalculation of remaining weight

* rename a var

* move out enforcing filtering by dropping inherents

* prepare for dispute statement validity check being split off

* refactor

* refactor, only check disputes we actually want to include

* more refactor and documentation

* refactor and minimize inherent checks

* chore: warnings

* fix a few tests

* fix dedup regression

* fix

* more asserts in tests

* remove some asserts

* chore: fmt

* skip signatures checks, some more

* undo unwatend changes

* Update runtime/parachains/src/paras_inherent/mod.rs

Co-authored-by: sandreim <54316454+sandreim@users.noreply.github.com>

* cleanups, checking CheckedDisputeStatments makes no sense

* integrity, if called create_inherent_inner, it shall do the checks, and not rely on enter_inner

* review comments

* use from impl rather than into

* remove outdated comment

* adjust tests accordingly

* assure no weight is lost

* address review comments

* remove unused import

* split error into two and document

* use assurance, O(n)

* Revert "adjust tests accordingly"

This reverts commit 3cc9a3c449f82db38cea22c48f4a21876603374b.

* fix comment

* fix sorting

* comment

Co-authored-by: sandreim <54316454+sandreim@users.noreply.github.com>
This commit is contained in:
Bernhard Schuster
2022-01-20 12:00:29 +01:00
committed by GitHub
parent 883b490cae
commit b7a05fd40b
14 changed files with 1215 additions and 544 deletions
+8
View File
@@ -6991,6 +6991,7 @@ dependencies = [
name = "polkadot-runtime-parachains"
version = "0.9.13"
dependencies = [
"assert_matches",
"bitflags",
"bitvec",
"derive_more",
@@ -7030,6 +7031,7 @@ dependencies = [
"sp-staking",
"sp-std",
"sp-tracing",
"thousands",
"xcm",
"xcm-executor",
]
@@ -10693,6 +10695,12 @@ dependencies = [
"syn",
]
[[package]]
name = "thousands"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820"
[[package]]
name = "thread_local"
version = "1.1.3"
+1 -1
View File
@@ -623,7 +623,7 @@ async fn request_votes(
}
}
/// Extend `acc` by `n` random, picks of not-yet-present in `acc` items of `recent` without repetition and additions of recent.
/// Extend `acc` by `n` random, picks of not-yet-present in `acc` items of `recent` without repetition and additions of recent.
fn extend_by_random_subset_without_repetition(
acc: &mut Vec<(SessionIndex, CandidateHash)>,
extension: Vec<(SessionIndex, CandidateHash)>,
+39
View File
@@ -1285,9 +1285,48 @@ pub struct DisputeStatementSet {
pub statements: Vec<(DisputeStatement, ValidatorIndex, ValidatorSignature)>,
}
impl From<CheckedDisputeStatementSet> for DisputeStatementSet {
fn from(other: CheckedDisputeStatementSet) -> Self {
other.0
}
}
impl AsRef<DisputeStatementSet> for DisputeStatementSet {
fn as_ref(&self) -> &DisputeStatementSet {
&self
}
}
/// A set of dispute statements.
pub type MultiDisputeStatementSet = Vec<DisputeStatementSet>;
/// A _checked_ set of dispute statements.
#[derive(Clone, PartialEq, RuntimeDebug)]
pub struct CheckedDisputeStatementSet(DisputeStatementSet);
impl AsRef<DisputeStatementSet> for CheckedDisputeStatementSet {
fn as_ref(&self) -> &DisputeStatementSet {
&self.0
}
}
impl core::cmp::PartialEq<DisputeStatementSet> for CheckedDisputeStatementSet {
fn eq(&self, other: &DisputeStatementSet) -> bool {
self.0.eq(other)
}
}
impl CheckedDisputeStatementSet {
/// Convert from an unchecked, the verification of correctness of the `unchecked` statement set
/// _must_ be done before calling this function!
pub fn unchecked_from_unchecked(unchecked: DisputeStatementSet) -> Self {
Self(unchecked)
}
}
/// A set of _checked_ dispute statements.
pub type CheckedMultiDisputeStatementSet = Vec<CheckedDisputeStatementSet>;
/// The entire state of a dispute.
#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, TypeInfo)]
pub struct DisputeState<N = BlockNumber> {
+3 -2
View File
@@ -247,8 +247,9 @@ impl<Payload: EncodeAs<RealPayload>, RealPayload: Encode> UncheckedSigned<Payloa
}))
}
/// Validate the payload given the context and public key.
fn check_signature<H: Encode>(
/// Validate the payload given the context and public key
/// without creating a `Signed` type.
pub fn check_signature<H: Encode>(
&self,
context: &SigningContext<H>,
key: &ValidatorId,
+2
View File
@@ -52,6 +52,8 @@ keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substra
frame-support-test = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers"}
thousands = "0.2.0"
assert_matches = "1"
[features]
default = ["std"]
+1 -1
View File
@@ -606,7 +606,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
self.dispute_statements.get(&seed).cloned().unwrap_or(validators.len() as u32);
let statements = (0..statements_len)
.map(|validator_index| {
let validator_public = &validators.get(validator_index as usize).unwrap();
let validator_public = &validators.get(validator_index as usize).expect("Test case is not borked. `ValidatorIndex` out of bounds of `ValidatorId`s.");
// We need dispute statements on each side. And we don't want a revert log
// so we make sure that we have a super majority with valid statements.
File diff suppressed because it is too large Load Diff
@@ -21,10 +21,8 @@
//! to included.
use crate::{
configuration, disputes, dmp, hrmp, paras,
paras_inherent::{sanitize_bitfields, DisputedBitfield},
scheduler::CoreAssignment,
shared, ump,
configuration, disputes, dmp, hrmp, paras, paras_inherent::DisputedBitfield,
scheduler::CoreAssignment, shared, ump,
};
use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec};
use frame_support::pallet_prelude::*;
@@ -58,7 +56,7 @@ pub struct AvailabilityBitfieldRecord<N> {
/// Determines if all checks should be applied or if a subset was already completed
/// in a code path that will be executed afterwards or was already executed before.
#[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)]
#[derive(Clone, Copy, Encode, Decode, PartialEq, Eq, RuntimeDebug, TypeInfo)]
pub(crate) enum FullCheck {
/// Yes, do a full check, skip nothing.
Yes,
@@ -214,8 +212,18 @@ pub mod pallet {
#[pallet::error]
pub enum Error<T> {
/// Validator indices are out of order or contains duplicates.
UnsortedOrDuplicateValidatorIndices,
/// Dispute statement sets are out of order or contain duplicates.
UnsortedOrDuplicateDisputeStatementSet,
/// Backed candidates are out of order (core index) or contain duplicates.
UnsortedOrDuplicateBackedCandidates,
/// A different relay parent was provided compared to the on-chain stored one.
UnexpectedRelayParent,
/// Availability bitfield has unexpected size.
WrongBitfieldSize,
/// Bitfield consists of zeros only.
BitfieldAllZeros,
/// Multiple bitfields submitted by same validator or validators out of order by index.
BitfieldDuplicateOrUnordered,
/// Validator index out of bounds.
@@ -311,11 +319,12 @@ impl<T: Config> Pallet<T> {
/// Extract the freed cores based on cores that became available.
///
/// Updates storage items `PendingAvailability` and `AvailabilityBitfields`.
pub(crate) fn update_pending_availability_and_get_freed_cores<F, const ON_CHAIN_USE: bool>(
pub(crate) fn update_pending_availability_and_get_freed_cores<F>(
expected_bits: usize,
validators: &[ValidatorId],
signed_bitfields: UncheckedSignedAvailabilityBitfields,
core_lookup: F,
enact_candidate: bool,
) -> Vec<(CoreIndex, CandidateHash)>
where
F: Fn(CoreIndex) -> Option<ParaId>,
@@ -387,7 +396,7 @@ impl<T: Config> Pallet<T> {
},
};
if ON_CHAIN_USE {
if enact_candidate {
let receipt = CommittedCandidateReceipt {
descriptor: pending_availability.descriptor,
commitments,
@@ -420,29 +429,31 @@ impl<T: Config> Pallet<T> {
signed_bitfields: UncheckedSignedAvailabilityBitfields,
disputed_bitfield: DisputedBitfield,
core_lookup: impl Fn(CoreIndex) -> Option<ParaId>,
) -> Vec<(CoreIndex, CandidateHash)> {
full_check: FullCheck,
) -> Result<Vec<(CoreIndex, CandidateHash)>, crate::inclusion::Error<T>> {
let validators = shared::Pallet::<T>::active_validator_keys();
let session_index = shared::Pallet::<T>::session_index();
let parent_hash = frame_system::Pallet::<T>::parent_hash();
let checked_bitfields = sanitize_bitfields::<T>(
let checked_bitfields = crate::paras_inherent::assure_sanity_bitfields::<T>(
signed_bitfields,
disputed_bitfield,
expected_bits,
parent_hash,
session_index,
&validators[..],
FullCheck::Yes,
);
full_check,
)?;
let freed_cores = Self::update_pending_availability_and_get_freed_cores::<_, true>(
let freed_cores = Self::update_pending_availability_and_get_freed_cores::<_>(
expected_bits,
&validators[..],
checked_bitfields,
core_lookup,
true,
);
freed_cores
Ok(freed_cores)
}
/// Process candidates that have been backed. Provide the relay storage root, a set of candidates
@@ -26,6 +26,7 @@ use crate::{
paras_inherent::DisputedBitfield,
scheduler::AssignmentKind,
};
use assert_matches::assert_matches;
use frame_support::assert_noop;
use futures::executor::block_on;
use keyring::Sr25519Keyring;
@@ -41,7 +42,7 @@ use sc_keystore::LocalKeystore;
use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
use std::sync::Arc;
use test_helpers::{
dummy_candidate_descriptor, dummy_collator, dummy_collator_signature, dummy_hash,
dummy_candidate_receipt, dummy_collator, dummy_collator_signature, dummy_hash,
dummy_validation_code,
};
@@ -406,17 +407,18 @@ fn bitfield_checks() {
// mark all candidates as pending availability
let set_pending_av = || {
for (p_id, _) in paras {
let receipt = dummy_candidate_receipt(dummy_hash());
PendingAvailability::<Test>::insert(
p_id,
CandidatePendingAvailability {
availability_votes: default_availability_votes(),
core: Default::default(),
hash: Default::default(),
descriptor: dummy_candidate_descriptor(dummy_hash()),
backers: Default::default(),
relay_parent_number: Default::default(),
backed_in_number: Default::default(),
backing_group: Default::default(),
core: CoreIndex(0),
hash: receipt.hash(),
descriptor: receipt.descriptor,
backers: BitVec::default(),
relay_parent_number: BlockNumber::from(0_u32),
backed_in_number: BlockNumber::from(0_u32),
backing_group: GroupIndex(0),
},
)
}
@@ -434,14 +436,15 @@ fn bitfield_checks() {
&signing_context,
));
assert_eq!(
assert_matches!(
ParaInclusion::process_bitfields(
expected_bits(),
vec![signed.into()],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
FullCheck::Yes,
),
vec![]
Err(Error::<Test>::WrongBitfieldSize)
);
}
@@ -456,14 +459,15 @@ fn bitfield_checks() {
&signing_context,
));
assert_eq!(
assert_matches!(
ParaInclusion::process_bitfields(
expected_bits() + 1,
vec![signed.into()],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
FullCheck::Yes,
),
vec![]
Err(Error::<Test>::WrongBitfieldSize)
);
}
@@ -494,20 +498,23 @@ fn bitfield_checks() {
// the threshold to free a core is 4 availability votes, but we only expect 1 valid
// valid bitfield.
assert!(ParaInclusion::process_bitfields(
expected_bits(),
vec![signed.clone(), signed],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
)
.is_empty());
assert_matches!(
ParaInclusion::process_bitfields(
expected_bits(),
vec![signed.clone(), signed],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
FullCheck::Yes,
),
Err(Error::<Test>::UnsortedOrDuplicateValidatorIndices)
);
assert_eq!(
<PendingAvailability<Test>>::get(chain_a)
.unwrap()
.availability_votes
.count_ones(),
1
0
);
// clean up
@@ -550,20 +557,23 @@ fn bitfield_checks() {
// the threshold to free a core is 4 availability votes, but we only expect 1 valid
// valid bitfield because `signed_0` will get skipped for being out of order.
assert!(ParaInclusion::process_bitfields(
expected_bits(),
vec![signed_1, signed_0],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
)
.is_empty());
assert_matches!(
ParaInclusion::process_bitfields(
expected_bits(),
vec![signed_1, signed_0],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
FullCheck::Yes,
),
Err(Error::<Test>::UnsortedOrDuplicateValidatorIndices)
);
assert_eq!(
<PendingAvailability<Test>>::get(chain_a)
.unwrap()
.availability_votes
.count_ones(),
1
0
);
PendingAvailability::<Test>::remove_all(None);
@@ -581,13 +591,13 @@ fn bitfield_checks() {
&signing_context,
));
assert!(ParaInclusion::process_bitfields(
assert_matches!(ParaInclusion::process_bitfields(
expected_bits(),
vec![signed.into()],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
)
.is_empty());
FullCheck::Yes,
), Ok(x) => { assert!(x.is_empty())});
}
// empty bitfield signed: always ok, but kind of useless.
@@ -601,13 +611,13 @@ fn bitfield_checks() {
&signing_context,
));
assert!(ParaInclusion::process_bitfields(
assert_matches!(ParaInclusion::process_bitfields(
expected_bits(),
vec![signed.into()],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
)
.is_empty());
FullCheck::Yes,
), Ok(x) => { assert!(x.is_empty())});
}
// bitfield signed with pending bit signed.
@@ -641,13 +651,13 @@ fn bitfield_checks() {
&signing_context,
));
assert!(ParaInclusion::process_bitfields(
assert_matches!(ParaInclusion::process_bitfields(
expected_bits(),
vec![signed.into()],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
)
.is_empty());
FullCheck::Yes,
), Ok(v) => { assert!(v.is_empty())} );
<PendingAvailability<Test>>::remove(chain_a);
PendingAvailabilityCommitments::<Test>::remove(chain_a);
@@ -684,13 +694,13 @@ fn bitfield_checks() {
));
// no core is freed
assert!(ParaInclusion::process_bitfields(
assert_matches!(ParaInclusion::process_bitfields(
expected_bits(),
vec![signed.into()],
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
)
.is_empty());
FullCheck::Yes,
), Ok(v) => { assert!(v.is_empty()) });
}
});
}
@@ -827,14 +837,17 @@ fn supermajority_bitfields_trigger_availability() {
.collect();
// only chain A's core is freed.
assert_eq!(
assert_matches!(
ParaInclusion::process_bitfields(
expected_bits(),
signed_bitfields,
DisputedBitfield::zeros(expected_bits()),
&core_lookup,
FullCheck::Yes,
),
vec![(CoreIndex(0), candidate_a.hash())]
Ok(v) => {
assert_eq!(vec![(CoreIndex(0), candidate_a.hash())], v);
}
);
// chain A had 4 signing off, which is >= threshold.
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use sp_std::vec::Vec;
use sp_std::{cmp::Ordering, vec::Vec};
/// A helper trait to allow calling retain while getting access
/// to the index of the item in the `vec`.
@@ -38,3 +38,75 @@ impl<T> IndexedRetain<T> for Vec<T> {
})
}
}
/// Helper trait until `is_sorted_by` is stabilized.
/// TODO: https://github.com/rust-lang/rust/issues/53485
pub trait IsSortedBy<T> {
fn is_sorted_by<F>(self, cmp: F) -> bool
where
F: FnMut(&T, &T) -> Ordering;
}
impl<'x, T, X> IsSortedBy<T> for X
where
X: 'x + IntoIterator<Item = &'x T>,
T: 'x,
{
fn is_sorted_by<F>(self, mut cmp: F) -> bool
where
F: FnMut(&T, &T) -> Ordering,
{
let mut iter = self.into_iter();
let mut previous: &T = if let Some(previous) = iter.next() {
previous
} else {
// empty is always sorted
return true
};
while let Some(cursor) = iter.next() {
match cmp(&previous, &cursor) {
Ordering::Greater => return false,
_ => {
// ordering is ok
},
}
previous = cursor;
}
true
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_sorted_simple() {
let v = vec![1_i32, 2, 3, 1000];
assert!(IsSortedBy::<i32>::is_sorted_by(v.as_slice(), |a: &i32, b: &i32| { a.cmp(b) }));
assert!(!IsSortedBy::<i32>::is_sorted_by(&v, |a, b| { b.cmp(a) }));
let v = vec![8_i32, 8, 8, 8];
assert!(IsSortedBy::<i32>::is_sorted_by(v.as_slice(), |a: &i32, b: &i32| { a.cmp(b) }));
assert!(IsSortedBy::<i32>::is_sorted_by(v.as_slice(), |a: &i32, b: &i32| { b.cmp(a) }));
}
#[test]
fn is_not_sorted() {
let v = vec![7, 1, 3];
assert!(!IsSortedBy::is_sorted_by(&v, |a, b| { a.cmp(b) }));
assert!(!IsSortedBy::is_sorted_by(&v, |a, b| { b.cmp(a) }));
}
#[test]
fn empty_is_sorted() {
let v = Vec::<u8>::new();
assert!(IsSortedBy::is_sorted_by(&v, |_a, _b| { unreachable!() }));
}
#[test]
fn single_items_is_sorted() {
let v = vec![7_u8];
assert!(IsSortedBy::is_sorted_by(&v, |_a, _b| { unreachable!() }));
}
}
@@ -22,7 +22,8 @@
//! this module.
use crate::{
disputes::DisputesHandler,
configuration,
disputes::{DisputesHandler, VerifyDisputeSignatures},
inclusion,
inclusion::{CandidateCheckContext, FullCheck},
initializer,
@@ -39,10 +40,11 @@ use frame_support::{
use frame_system::pallet_prelude::*;
use pallet_babe::{self, CurrentBlockRandomness};
use primitives::v1::{
BackedCandidate, CandidateHash, CoreIndex, DisputeStatementSet,
BackedCandidate, CandidateHash, CandidateReceipt, CheckedDisputeStatementSet,
CheckedMultiDisputeStatementSet, CoreIndex, DisputeStatementSet,
InherentData as ParachainsInherentData, MultiDisputeStatementSet, ScrapedOnChainVotes,
SessionIndex, SigningContext, UncheckedSignedAvailabilityBitfield,
UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex,
UncheckedSignedAvailabilityBitfields, ValidatorId, ValidatorIndex, ValidityAttestation,
PARACHAINS_INHERENT_IDENTIFIER,
};
use rand::{seq::SliceRandom, SeedableRng};
@@ -60,10 +62,11 @@ mod misc;
mod weights;
pub use self::{
misc::IndexedRetain,
misc::{IndexedRetain, IsSortedBy},
weights::{
backed_candidate_weight, backed_candidates_weight, dispute_statements_weight,
paras_inherent_total_weight, signed_bitfields_weight, TestWeightInfo, WeightInfo,
backed_candidate_weight, backed_candidates_weight, dispute_statement_set_weight,
multi_dispute_statement_sets_weight, paras_inherent_total_weight, signed_bitfields_weight,
TestWeightInfo, WeightInfo,
},
};
@@ -124,6 +127,10 @@ pub mod pallet {
CandidateConcludedInvalid,
/// The data given to the inherent will result in an overweight block.
InherentOverweight,
/// The ordering of dispute statements was invalid.
DisputeStatementsUnsortedOrDuplicates,
/// A dispute statement was invalid.
DisputeInvalid,
}
/// Whether the paras inherent was included within this block.
@@ -140,6 +147,48 @@ pub mod pallet {
#[pallet::getter(fn on_chain_votes)]
pub(crate) type OnChainVotes<T: Config> = StorageValue<_, ScrapedOnChainVotes<T::Hash>>;
/// Update the disputes statements set part of the on-chain votes.
pub(crate) fn set_scrapable_on_chain_disputes<T: Config>(
session: SessionIndex,
checked_disputes: CheckedMultiDisputeStatementSet,
) {
crate::paras_inherent::OnChainVotes::<T>::mutate(move |value| {
let disputes =
checked_disputes.into_iter().map(DisputeStatementSet::from).collect::<Vec<_>>();
if let Some(ref mut value) = value {
value.disputes = disputes;
} else {
*value = Some(ScrapedOnChainVotes::<T::Hash> {
backing_validators_per_candidate: Vec::new(),
disputes,
session,
});
}
})
}
/// Update the backing votes including part of the on-chain votes.
pub(crate) fn set_scrapable_on_chain_backings<T: Config>(
session: SessionIndex,
backing_validators_per_candidate: Vec<(
CandidateReceipt<T::Hash>,
Vec<(ValidatorIndex, ValidityAttestation)>,
)>,
) {
crate::paras_inherent::OnChainVotes::<T>::mutate(move |value| {
if let Some(ref mut value) = value {
value.backing_validators_per_candidate.clear();
value.backing_validators_per_candidate.extend(backing_validators_per_candidate);
} else {
*value = Some(ScrapedOnChainVotes::<T::Hash> {
backing_validators_per_candidate,
disputes: MultiDisputeStatementSet::default(),
session,
});
}
})
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_initialize(_: T::BlockNumber) -> Weight {
@@ -282,68 +331,98 @@ impl<T: Config> Pallet<T> {
let now = <frame_system::Pallet<T>>::block_number();
let mut candidate_weight = backed_candidates_weight::<T>(&backed_candidates);
let mut candidates_weight = backed_candidates_weight::<T>(&backed_candidates);
let mut bitfields_weight = signed_bitfields_weight::<T>(signed_bitfields.len());
let disputes_weight = dispute_statements_weight::<T>(&disputes);
let disputes_weight = multi_dispute_statement_sets_weight::<T, _, _>(&disputes);
let current_session = <shared::Pallet<T>>::session_index();
let max_block_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
METRICS.on_before_filter(candidate_weight + bitfields_weight + disputes_weight);
METRICS.on_before_filter(candidates_weight + bitfields_weight + disputes_weight);
// Potentially trim inherent data to ensure processing will be within weight limits
let total_weight = {
if candidate_weight
T::DisputesHandler::assure_deduplicated_and_sorted(&mut disputes)
.map_err(|_e| Error::<T>::DisputeStatementsUnsortedOrDuplicates)?;
let (checked_disputes, total_consumed_weight) = {
// Obtain config params..
let config = <configuration::Pallet<T>>::config();
let max_spam_slots = config.dispute_max_spam_slots;
let post_conclusion_acceptance_period =
config.dispute_post_conclusion_acceptance_period;
let verify_dispute_sigs = if let FullCheck::Yes = full_check {
VerifyDisputeSignatures::Yes
} else {
VerifyDisputeSignatures::Skip
};
// .. and prepare a helper closure.
let dispute_set_validity_check = move |set| {
T::DisputesHandler::filter_dispute_data(
set,
max_spam_slots,
post_conclusion_acceptance_period,
verify_dispute_sigs,
)
};
// In case of an overweight block, consume up to the entire block weight
// in disputes, since we will never process anything else, but invalidate
// the block. It's still reasonable to protect against a massive amount of disputes.
if candidates_weight
.saturating_add(bitfields_weight)
.saturating_add(disputes_weight) >
max_block_weight
{
// if the total weight is over the max block weight, first try clearing backed
// candidates and bitfields.
log::warn!("Overweight para inherent data reached the runtime {:?}", parent_hash);
backed_candidates.clear();
candidate_weight = 0;
candidates_weight = 0;
signed_bitfields.clear();
bitfields_weight = 0;
}
if disputes_weight > max_block_weight {
// if disputes are by themselves overweight already, trim the disputes.
debug_assert!(candidate_weight == 0 && bitfields_weight == 0);
let entropy = compute_entropy::<T>(parent_hash);
let mut rng = rand_chacha::ChaChaRng::from_seed(entropy.into());
let entropy = compute_entropy::<T>(parent_hash);
let mut rng = rand_chacha::ChaChaRng::from_seed(entropy.into());
let remaining_weight =
limit_disputes::<T>(&mut disputes, max_block_weight, &mut rng);
max_block_weight.saturating_sub(remaining_weight)
} else {
candidate_weight
.saturating_add(bitfields_weight)
.saturating_add(disputes_weight)
}
let (checked_disputes, checked_disputes_weight) = limit_and_sanitize_disputes::<T, _>(
disputes,
&dispute_set_validity_check,
max_block_weight,
&mut rng,
);
(
checked_disputes,
checked_disputes_weight
.saturating_add(candidates_weight)
.saturating_add(bitfields_weight),
)
};
let expected_bits = <scheduler::Pallet<T>>::availability_cores().len();
// Handle disputes logic.
let current_session = <shared::Pallet<T>>::session_index();
let disputed_bitfield = {
let new_current_dispute_sets: Vec<_> = disputes
let new_current_dispute_sets: Vec<_> = checked_disputes
.iter()
.map(AsRef::as_ref)
.filter(|s| s.session == current_session)
.map(|s| (s.session, s.candidate_hash))
.collect();
// Note that `provide_multi_dispute_data` will iterate, verify, and import each
// dispute; so the input here must be reasonably bounded.
let _ = T::DisputesHandler::provide_multi_dispute_data(disputes.clone())?;
METRICS.on_disputes_imported(disputes.len() as u64);
// Note that `process_checked_multi_dispute_data` will iterate and import each
// dispute; so the input here must be reasonably bounded,
// which is guaranteed by the checks and weight limitation above.
let _ =
T::DisputesHandler::process_checked_multi_dispute_data(checked_disputes.clone())?;
METRICS.on_disputes_imported(checked_disputes.len() as u64);
if T::DisputesHandler::is_frozen() {
// Relay chain freeze, at this point we will not include any parachain blocks.
METRICS.on_relay_chain_freeze();
// The relay chain we are currently on is invalid. Proceed no further on parachains.
return Ok(Some(dispute_statements_weight::<T>(&disputes)).into())
return Ok(Some(total_consumed_weight).into())
}
// Process the dispute sets of the current session.
@@ -374,6 +453,8 @@ impl<T: Config> Pallet<T> {
// Create a bit index from the set of core indices where each index corresponds to
// a core index that was freed due to a dispute.
//
// I.e. 010100 would indicate, the candidates on Core 1 and 3 would be disputed.
let disputed_bitfield = create_disputed_bitfield(
expected_bits,
freed_disputed.iter().map(|(core_index, _)| core_index),
@@ -398,7 +479,11 @@ impl<T: Config> Pallet<T> {
signed_bitfields,
disputed_bitfield,
<scheduler::Pallet<T>>::core_para,
);
full_check,
)?;
// any error in the previous function will cause an invalid block and not include
// the `DisputeState` to be written to the storage, hence this is ok.
set_scrapable_on_chain_disputes::<T>(current_session, checked_disputes.clone());
// Inform the disputes module of all included candidates.
for (_, candidate_hash) in &freed_concluded {
@@ -414,15 +499,15 @@ impl<T: Config> Pallet<T> {
METRICS.on_candidates_processed_total(backed_candidates.len() as u64);
let scheduled = <scheduler::Pallet<T>>::scheduled();
let backed_candidates = sanitize_backed_candidates::<T, _>(
assure_sanity_backed_candidates::<T, _>(
parent_hash,
backed_candidates,
&backed_candidates,
move |_candidate_index: usize, backed_candidate: &BackedCandidate<T::Hash>| -> bool {
<T>::DisputesHandler::concluded_invalid(current_session, backed_candidate.hash())
// `fn process_candidates` does the verification checks
},
&scheduled[..],
);
)?;
METRICS.on_candidates_sanitized(backed_candidates.len() as u64);
@@ -439,15 +524,12 @@ impl<T: Config> Pallet<T> {
full_check,
)?;
METRICS.on_disputes_included(disputes.len() as u64);
METRICS.on_disputes_included(checked_disputes.len() as u64);
// The number of disputes included in a block is
// limited by the weight as well as the number of candidate blocks.
OnChainVotes::<T>::put(ScrapedOnChainVotes::<<T::Header as HeaderT>::Hash> {
session: current_session,
backing_validators_per_candidate: candidate_receipt_with_backing_validator_indices,
disputes,
});
set_scrapable_on_chain_backings::<T>(
current_session,
candidate_receipt_with_backing_validator_indices,
);
// Note which of the scheduled cores were actually occupied by a backed candidate.
<scheduler::Pallet<T>>::occupied(&occupied);
@@ -456,9 +538,9 @@ impl<T: Config> Pallet<T> {
// this is max config.ump_service_total_weight
let _ump_weight = <ump::Pallet<T>>::process_pending_upward_messages();
METRICS.on_after_filter(total_weight);
METRICS.on_after_filter(total_consumed_weight);
Ok(Some(total_weight).into())
Ok(Some(total_consumed_weight).into())
}
}
@@ -501,109 +583,142 @@ impl<T: Config> Pallet<T> {
let current_session = <shared::Pallet<T>>::session_index();
let expected_bits = <scheduler::Pallet<T>>::availability_cores().len();
let validator_public = shared::Pallet::<T>::active_validator_keys();
let max_block_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
T::DisputesHandler::filter_multi_dispute_data(&mut disputes);
let entropy = compute_entropy::<T>(parent_hash);
let mut rng = rand_chacha::ChaChaRng::from_seed(entropy.into());
let (mut backed_candidates, mut bitfields) =
frame_support::storage::with_transaction(|| {
// we don't care about fresh or not disputes
// this writes them to storage, so let's query it via those means
// if this fails for whatever reason, that's ok
let _ =
T::DisputesHandler::provide_multi_dispute_data(disputes.clone()).map_err(|e| {
log::warn!(
target: LOG_TARGET,
"MultiDisputesData failed to update: {:?}",
e
);
e
});
// Filter out duplicates and continue.
if let Err(_) = T::DisputesHandler::deduplicate_and_sort_dispute_data(&mut disputes) {
log::debug!(target: LOG_TARGET, "Found duplicate statement sets, retaining the first");
}
// Contains the disputes that are concluded in the current session only,
// since these are the only ones that are relevant for the occupied cores
// and lightens the load on `collect_disputed` significantly.
// Cores can't be occupied with candidates of the previous sessions, and only
// things with new votes can have just concluded. We only need to collect
// cores with disputes that conclude just now, because disputes that
// concluded longer ago have already had any corresponding cores cleaned up.
let current_concluded_invalid_disputes = disputes
.iter()
.filter(|dss| dss.session == current_session)
.map(|dss| (dss.session, dss.candidate_hash))
.filter(|(session, candidate)| {
<T>::DisputesHandler::concluded_invalid(*session, *candidate)
})
.map(|(_session, candidate)| candidate)
.collect::<BTreeSet<CandidateHash>>();
let config = <configuration::Pallet<T>>::config();
let max_spam_slots = config.dispute_max_spam_slots;
let post_conclusion_acceptance_period = config.dispute_post_conclusion_acceptance_period;
// All concluded invalid disputes, that are relevant for the set of candidates
// the inherent provided.
let concluded_invalid_disputes = backed_candidates
.iter()
.map(|backed_candidate| backed_candidate.hash())
.filter(|candidate| {
<T>::DisputesHandler::concluded_invalid(current_session, *candidate)
})
.collect::<BTreeSet<CandidateHash>>();
let (
mut backed_candidates,
mut bitfields,
checked_disputes_sets,
checked_disputes_sets_consumed_weight,
) = frame_support::storage::with_transaction(|| {
let dispute_statement_set_valid = move |set: DisputeStatementSet| {
T::DisputesHandler::filter_dispute_data(
set,
max_spam_slots,
post_conclusion_acceptance_period,
// `DisputeCoordinator` on the node side only forwards
// valid dispute statement sets and hence this does not
// need to be checked.
VerifyDisputeSignatures::Skip,
)
};
let mut freed_disputed: Vec<_> =
<inclusion::Pallet<T>>::collect_disputed(&current_concluded_invalid_disputes)
.into_iter()
.map(|core| (core, FreedReason::Concluded))
.collect();
let disputed_bitfield =
create_disputed_bitfield(expected_bits, freed_disputed.iter().map(|(x, _)| x));
if !freed_disputed.is_empty() {
// unstable sort is fine, because core indices are unique
// i.e. the same candidate can't occupy 2 cores at once.
freed_disputed.sort_unstable_by_key(|pair| pair.0); // sort by core index
<scheduler::Pallet<T>>::free_cores(freed_disputed.clone());
}
// The following 3 calls are equiv to a call to `process_bitfields`
// but we can retain access to `bitfields`.
let bitfields = sanitize_bitfields::<T>(
bitfields,
disputed_bitfield,
expected_bits,
parent_hash,
current_session,
&validator_public[..],
FullCheck::Skip,
// Limit the disputes first, since the following statements depend on the votes include here.
let (checked_disputes_sets, checked_disputes_sets_consumed_weight) =
limit_and_sanitize_disputes::<T, _>(
disputes,
dispute_statement_set_valid,
max_block_weight,
&mut rng,
);
let freed_concluded =
<inclusion::Pallet<T>>::update_pending_availability_and_get_freed_cores::<
_,
false,
>(
expected_bits,
&validator_public[..],
bitfields.clone(),
<scheduler::Pallet<T>>::core_para,
);
// we don't care about fresh or not disputes
// this writes them to storage, so let's query it via those means
// if this fails for whatever reason, that's ok
let _ = T::DisputesHandler::process_checked_multi_dispute_data(
checked_disputes_sets.clone(),
)
.map_err(|e| {
log::warn!(target: LOG_TARGET, "MultiDisputesData failed to update: {:?}", e);
e
});
let freed = collect_all_freed_cores::<T, _>(freed_concluded.iter().cloned());
// Contains the disputes that are concluded in the current session only,
// since these are the only ones that are relevant for the occupied cores
// and lightens the load on `collect_disputed` significantly.
// Cores can't be occupied with candidates of the previous sessions, and only
// things with new votes can have just concluded. We only need to collect
// cores with disputes that conclude just now, because disputes that
// concluded longer ago have already had any corresponding cores cleaned up.
let current_concluded_invalid_disputes = checked_disputes_sets
.iter()
.map(AsRef::as_ref)
.filter(|dss| dss.session == current_session)
.map(|dss| (dss.session, dss.candidate_hash))
.filter(|(session, candidate)| {
<T>::DisputesHandler::concluded_invalid(*session, *candidate)
})
.map(|(_session, candidate)| candidate)
.collect::<BTreeSet<CandidateHash>>();
<scheduler::Pallet<T>>::clear();
let now = <frame_system::Pallet<T>>::block_number();
<scheduler::Pallet<T>>::schedule(freed, now);
// All concluded invalid disputes, that are relevant for the set of candidates
// the inherent provided.
let concluded_invalid_disputes = backed_candidates
.iter()
.map(|backed_candidate| backed_candidate.hash())
.filter(|candidate| {
<T>::DisputesHandler::concluded_invalid(current_session, *candidate)
})
.collect::<BTreeSet<CandidateHash>>();
let scheduled = <scheduler::Pallet<T>>::scheduled();
let mut freed_disputed: Vec<_> =
<inclusion::Pallet<T>>::collect_disputed(&current_concluded_invalid_disputes)
.into_iter()
.map(|core| (core, FreedReason::Concluded))
.collect();
let relay_parent_number = now - One::one();
let disputed_bitfield =
create_disputed_bitfield(expected_bits, freed_disputed.iter().map(|(x, _)| x));
let check_ctx = CandidateCheckContext::<T>::new(now, relay_parent_number);
let backed_candidates = sanitize_backed_candidates::<T, _>(
parent_hash,
backed_candidates,
move |candidate_idx: usize,
backed_candidate: &BackedCandidate<<T as frame_system::Config>::Hash>|
-> bool {
// never include a concluded-invalid candidate
concluded_invalid_disputes.contains(&backed_candidate.hash()) ||
if !freed_disputed.is_empty() {
// unstable sort is fine, because core indices are unique
// i.e. the same candidate can't occupy 2 cores at once.
freed_disputed.sort_unstable_by_key(|pair| pair.0); // sort by core index
<scheduler::Pallet<T>>::free_cores(freed_disputed.clone());
}
// The following 3 calls are equiv to a call to `process_bitfields`
// but we can retain access to `bitfields`.
let bitfields = sanitize_bitfields::<T>(
bitfields,
disputed_bitfield,
expected_bits,
parent_hash,
current_session,
&validator_public[..],
FullCheck::Yes,
);
let freed_concluded =
<inclusion::Pallet<T>>::update_pending_availability_and_get_freed_cores::<_>(
expected_bits,
&validator_public[..],
bitfields.clone(),
<scheduler::Pallet<T>>::core_para,
false,
);
let freed = collect_all_freed_cores::<T, _>(freed_concluded.iter().cloned());
<scheduler::Pallet<T>>::clear();
let now = <frame_system::Pallet<T>>::block_number();
<scheduler::Pallet<T>>::schedule(freed, now);
let scheduled = <scheduler::Pallet<T>>::scheduled();
let relay_parent_number = now - One::one();
let check_ctx = CandidateCheckContext::<T>::new(now, relay_parent_number);
let backed_candidates = sanitize_backed_candidates::<T, _>(
parent_hash,
backed_candidates,
move |candidate_idx: usize,
backed_candidate: &BackedCandidate<<T as frame_system::Config>::Hash>|
-> bool {
// never include a concluded-invalid candidate
concluded_invalid_disputes.contains(&backed_candidate.hash()) ||
// Instead of checking the candidates with code upgrades twice
// move the checking up here and skip it in the training wheels fallback.
// That way we avoid possible duplicate checks while assuring all
@@ -611,31 +726,39 @@ impl<T: Config> Pallet<T> {
check_ctx
.verify_backed_candidate(parent_hash, candidate_idx, backed_candidate)
.is_err()
},
&scheduled[..],
);
},
&scheduled[..],
);
frame_support::storage::TransactionOutcome::Rollback((
// filtered backed candidates
backed_candidates,
// filtered bitfields
bitfields,
))
});
frame_support::storage::TransactionOutcome::Rollback((
// filtered backed candidates
backed_candidates,
// filtered bitfields
bitfields,
// checked disputes sets
checked_disputes_sets,
checked_disputes_sets_consumed_weight,
))
});
let entropy = compute_entropy::<T>(parent_hash);
let mut rng = rand_chacha::ChaChaRng::from_seed(entropy.into());
// Assure the maximum block weight is adhered.
let max_block_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
let _consumed_weight = apply_weight_limit::<T>(
// Assure the maximum block weight is adhered, by limiting bitfields and backed
// candidates. Dispute statement sets were already limited before.
let actual_weight = apply_weight_limit::<T>(
&mut backed_candidates,
&mut bitfields,
&mut disputes,
max_block_weight,
max_block_weight.saturating_sub(checked_disputes_sets_consumed_weight),
&mut rng,
);
if actual_weight > max_block_weight {
log::warn!(target: LOG_TARGET, "Post weight limiting weight is still too large.");
}
let disputes = checked_disputes_sets
.into_iter()
.map(|checked| checked.into())
.collect::<Vec<_>>();
Some(ParachainsInherentData::<T::Header> {
bitfields,
backed_candidates,
@@ -716,33 +839,34 @@ fn random_sel<X, F: Fn(&X) -> Weight>(
}
// sorting indices, so the ordering is retained
// unstable sorting is fine, since there are no duplicates
// unstable sorting is fine, since there are no duplicates in indices
// and even if there were, they don't have an identity
picked_indices.sort_unstable();
(weight_acc, picked_indices)
}
/// Considers an upper threshold that the inherent data must not exceed.
///
/// If there is sufficient space, all disputes, all bitfields and all candidates
/// If there is sufficient space, all bitfields and all candidates
/// will be included.
///
/// Otherwise tries to include all disputes, and then tries to fill the remaining space with bitfields and then candidates.
///
/// The selection process is random. For candidates, there is an exception for code upgrades as they are preferred.
/// And for disputes, local and older disputes are preferred (see `limit_disputes`).
/// And for disputes, local and older disputes are preferred (see `limit_and_sanitize_disputes`).
/// for backed candidates, since with a increasing number of parachains their chances of
/// inclusion become slim. All backed candidates are checked beforehands in `fn create_inherent_inner`
/// which guarantees sanity.
///
/// Assumes disputes are already filtered by the time this is called.
///
/// Returns the total weight consumed by `bitfields` and `candidates`.
fn apply_weight_limit<T: Config + inclusion::Config>(
candidates: &mut Vec<BackedCandidate<<T>::Hash>>,
bitfields: &mut UncheckedSignedAvailabilityBitfields,
disputes: &mut MultiDisputeStatementSet,
max_block_weight: Weight,
max_consumable_weight: Weight,
rng: &mut rand_chacha::ChaChaRng,
) -> Weight {
// include as many disputes as possible, always
let remaining_weight = limit_disputes::<T>(disputes, max_block_weight, rng);
let total_candidates_weight = backed_candidates_weight::<T>(candidates.as_slice());
let total_bitfields_weight = signed_bitfields_weight::<T>(bitfields.len());
@@ -750,12 +874,12 @@ fn apply_weight_limit<T: Config + inclusion::Config>(
let total = total_bitfields_weight.saturating_add(total_candidates_weight);
// candidates + bitfields fit into the block
if remaining_weight >= total {
if max_consumable_weight >= total {
return total
}
// Prefer code upgrades, they tend to be large and hence stand no chance to be picked
// late while maintaining the weight bounds
// late while maintaining the weight bounds.
let preferred_indices = candidates
.iter()
.enumerate()
@@ -766,37 +890,40 @@ fn apply_weight_limit<T: Config + inclusion::Config>(
// There is weight remaining to be consumed by a subset of candidates
// which are going to be picked now.
if let Some(remaining_weight) = remaining_weight.checked_sub(total_bitfields_weight) {
if let Some(max_consumable_by_candidates) =
max_consumable_weight.checked_sub(total_bitfields_weight)
{
let (acc_candidate_weight, indices) =
random_sel::<BackedCandidate<<T as frame_system::Config>::Hash>, _>(
rng,
candidates.clone(),
preferred_indices,
|c| backed_candidate_weight::<T>(c),
remaining_weight,
max_consumable_by_candidates,
);
candidates.indexed_retain(|idx, _backed_candidate| indices.binary_search(&idx).is_ok());
// pick all bitfields, and
// fill the remaining space with candidates
let total = acc_candidate_weight.saturating_add(total_bitfields_weight);
return total
let total_consumed = acc_candidate_weight.saturating_add(total_bitfields_weight);
return total_consumed
}
candidates.clear();
// insufficient space for even the bitfields alone, so only try to fit as many of those
// into the block and skip the candidates entirely
let (total, indices) = random_sel::<UncheckedSignedAvailabilityBitfield, _>(
let (total_consumed, indices) = random_sel::<UncheckedSignedAvailabilityBitfield, _>(
rng,
bitfields.clone(),
vec![],
|_| <<T as Config>::WeightInfo as WeightInfo>::enter_bitfields(),
remaining_weight,
max_consumable_weight,
);
bitfields.indexed_retain(|idx, _bitfield| indices.binary_search(&idx).is_ok());
total
total_consumed
}
/// Filter bitfields based on freed core indices, validity, and other sanity checks.
@@ -910,6 +1037,61 @@ pub(crate) fn sanitize_bitfields<T: crate::inclusion::Config>(
bitfields
}
pub(crate) fn assure_sanity_bitfields<T: crate::inclusion::Config>(
unchecked_bitfields: UncheckedSignedAvailabilityBitfields,
disputed_bitfield: DisputedBitfield,
expected_bits: usize,
parent_hash: T::Hash,
session_index: SessionIndex,
validators: &[ValidatorId],
full_check: FullCheck,
) -> Result<UncheckedSignedAvailabilityBitfields, crate::inclusion::Error<T>> {
let mut last_index: Option<ValidatorIndex> = None;
use crate::inclusion::Error;
ensure!(disputed_bitfield.0.len() == expected_bits, Error::<T>::WrongBitfieldSize);
let mut bitfields = Vec::with_capacity(unchecked_bitfields.len());
let signing_context = SigningContext { parent_hash, session_index };
for unchecked_bitfield in unchecked_bitfields {
// Find and skip invalid bitfields.
ensure!(
unchecked_bitfield.unchecked_payload().0.len() == expected_bits,
Error::<T>::WrongBitfieldSize
);
let validator_index = unchecked_bitfield.unchecked_validator_index();
if !last_index.map_or(true, |last_index: ValidatorIndex| last_index < validator_index) {
return Err(Error::<T>::UnsortedOrDuplicateValidatorIndices)
}
if unchecked_bitfield.unchecked_validator_index().0 as usize >= validators.len() {
return Err(Error::<T>::ValidatorIndexOutOfBounds)
}
let validator_public = &validators[validator_index.0 as usize];
if let FullCheck::Yes = full_check {
// Validate bitfield signature.
if let Ok(signed_bitfield) =
unchecked_bitfield.try_into_checked(&signing_context, validator_public)
{
bitfields.push(signed_bitfield.into_unchecked());
} else {
return Err(Error::<T>::InvalidBitfieldSignature)
}
} else {
bitfields.push(unchecked_bitfield);
}
last_index = Some(validator_index);
}
Ok(bitfields)
}
/// Filter out any candidates that have a concluded invalid dispute.
///
/// `scheduled` follows the same naming scheme as provided in the
@@ -918,7 +1100,7 @@ pub(crate) fn sanitize_bitfields<T: crate::inclusion::Config>(
/// state.
///
/// `candidate_has_concluded_invalid_dispute` must return `true` if the candidate
/// is disputed, false otherwise
/// is disputed, false otherwise. The passed `usize` is the candidate index.
///
/// The returned `Vec` is sorted according to the occupied core index.
fn sanitize_backed_candidates<
@@ -932,8 +1114,8 @@ fn sanitize_backed_candidates<
) -> Vec<BackedCandidate<T::Hash>> {
// Remove any candidates that were concluded invalid.
// This does not assume sorting.
backed_candidates.indexed_retain(move |idx, backed_candidate| {
!candidate_has_concluded_invalid_dispute_or_is_invalid(idx, backed_candidate)
backed_candidates.indexed_retain(move |candidate_idx, backed_candidate| {
!candidate_has_concluded_invalid_dispute_or_is_invalid(candidate_idx, backed_candidate)
});
let scheduled_paras_to_core_idx = scheduled
@@ -965,6 +1147,46 @@ fn sanitize_backed_candidates<
backed_candidates
}
/// Assumes sorted candidates.
pub(crate) fn assure_sanity_backed_candidates<
T: crate::inclusion::Config,
F: FnMut(usize, &BackedCandidate<T::Hash>) -> bool,
>(
relay_parent: T::Hash,
backed_candidates: &[BackedCandidate<T::Hash>],
mut candidate_has_concluded_invalid_dispute_or_is_invalid: F,
scheduled: &[CoreAssignment],
) -> Result<(), crate::inclusion::Error<T>> {
use crate::inclusion::Error;
for (idx, backed_candidate) in backed_candidates.iter().enumerate() {
if candidate_has_concluded_invalid_dispute_or_is_invalid(idx, backed_candidate) {
return Err(Error::<T>::UnsortedOrDuplicateBackedCandidates)
}
// Assure the backed candidate's `ParaId`'s core is free.
// This holds under the assumption that `Scheduler::schedule` is called _before_.
// Also checks the candidate references the correct relay parent.
let desc = backed_candidate.descriptor();
if desc.relay_parent != relay_parent {
return Err(Error::<T>::UnexpectedRelayParent)
}
}
let scheduled_paras_to_core_idx = scheduled
.into_iter()
.map(|core_assignment| (core_assignment.para_id, core_assignment.core))
.collect::<BTreeMap<ParaId, CoreIndex>>();
if !IsSortedBy::is_sorted_by(backed_candidates, |x, y| {
// Never panics, since we would have early returned on those in the above loop.
scheduled_paras_to_core_idx[&x.descriptor().para_id]
.cmp(&scheduled_paras_to_core_idx[&y.descriptor().para_id])
}) {
return Err(Error::<T>::UnsortedOrDuplicateBackedCandidates)
}
Ok(())
}
/// Derive entropy from babe provided per block randomness.
///
/// In the odd case none is available, uses the `parent_hash` and
@@ -986,31 +1208,36 @@ fn compute_entropy<T: Config>(parent_hash: T::Hash) -> [u8; 32] {
/// Limit disputes in place.
///
/// Returns the unused weight of `remaining_weight`.
fn limit_disputes<T: Config>(
disputes: &mut MultiDisputeStatementSet,
remaining_weight: Weight,
/// Assumes ordering of disputes, retains sorting of the statement.
///
/// Prime source of overload safety for dispute votes:
/// 1. Check accumulated weight does not exceed the maximum block weight.
/// 2. If exceeded:
/// 1. Check validity of all dispute statements sequentially
/// 2. If not exceeded:
/// 1. Sort the disputes based on locality and age, locality first.
/// 1. Split the array
/// 1. Prefer local ones over remote disputes
/// 1. If weight is exceeded by locals, pick the older ones (lower indices)
/// until the weight limit is reached.
/// 1. If weight is exceeded by locals and remotes, pick remotes
/// randomly and check validity one by one.
///
/// Returns the consumed weight amount, that is guaranteed to be less than the provided `max_consumable_weight`.
fn limit_and_sanitize_disputes<
T: Config,
CheckValidityFn: FnMut(DisputeStatementSet) -> Option<CheckedDisputeStatementSet>,
>(
mut disputes: MultiDisputeStatementSet,
mut dispute_statement_set_valid: CheckValidityFn,
max_consumable_weight: Weight,
rng: &mut rand_chacha::ChaChaRng,
) -> Weight {
let mut remaining_weight = remaining_weight;
let disputes_weight = dispute_statements_weight::<T>(&disputes);
if disputes_weight > remaining_weight {
// Sort the dispute statements according to the following prioritization:
// 1. Prioritize local disputes over remote disputes.
// 2. Prioritize older disputes over newer disputes.
disputes.sort_by(|a, b| {
let a_local_block = T::DisputesHandler::included_state(a.session, a.candidate_hash);
let b_local_block = T::DisputesHandler::included_state(b.session, b.candidate_hash);
match (a_local_block, b_local_block) {
// Prioritize local disputes over remote disputes.
(None, Some(_)) => Ordering::Greater,
(Some(_), None) => Ordering::Less,
// For local disputes, prioritize those that occur at an earlier height.
(Some(a_height), Some(b_height)) => a_height.cmp(&b_height),
// Prioritize earlier remote disputes using session as rough proxy.
(None, None) => a.session.cmp(&b.session),
}
});
) -> (Vec<CheckedDisputeStatementSet>, Weight) {
// The total weight if all disputes would be included
let disputes_weight = multi_dispute_statement_sets_weight::<T, _, _>(&disputes);
if disputes_weight > max_consumable_weight {
let mut checked_acc = Vec::<CheckedDisputeStatementSet>::with_capacity(disputes.len());
// Since the disputes array is sorted, we may use binary search to find the beginning of
// remote disputes
@@ -1018,9 +1245,9 @@ fn limit_disputes<T: Config>(
.binary_search_by(|probe| {
if T::DisputesHandler::included_state(probe.session, probe.candidate_hash).is_some()
{
Ordering::Greater
} else {
Ordering::Less
} else {
Ordering::Greater
}
})
// The above predicate will never find an item and therefore we are guaranteed to obtain
@@ -1028,19 +1255,24 @@ fn limit_disputes<T: Config>(
.unwrap_err();
// Due to the binary search predicate above, the index computed will constitute the beginning
// of the remote disputes sub-array
// of the remote disputes sub-array `[Local, Local, Local, ^Remote, Remote]`.
let remote_disputes = disputes.split_off(idx);
// Accumualated weight of all disputes picked, that passed the checks.
let mut weight_acc = 0 as Weight;
// Select disputes in-order until the remaining weight is attained
disputes.retain(|d| {
disputes.iter().for_each(|dss| {
let dispute_weight = <<T as Config>::WeightInfo as WeightInfo>::enter_variable_disputes(
d.statements.len() as u32,
dss.statements.len() as u32,
);
if remaining_weight >= dispute_weight {
remaining_weight -= dispute_weight;
true
} else {
false
let updated = weight_acc.saturating_add(dispute_weight);
if max_consumable_weight >= updated {
// only apply the weight if the validity check passes
if let Some(checked) = dispute_statement_set_valid(dss.clone()) {
checked_acc.push(checked);
weight_acc = updated;
}
}
});
@@ -1048,24 +1280,38 @@ fn limit_disputes<T: Config>(
let d = remote_disputes.iter().map(|d| d.statements.len() as u32).collect::<Vec<u32>>();
// Select remote disputes at random until the block is full
let (acc_remote_disputes_weight, indices) = random_sel::<u32, _>(
let (_acc_remote_disputes_weight, mut indices) = random_sel::<u32, _>(
rng,
d,
vec![],
|v| <<T as Config>::WeightInfo as WeightInfo>::enter_variable_disputes(*v),
remaining_weight,
max_consumable_weight.saturating_sub(weight_acc),
);
// Collect all remote disputes
let mut remote_disputes =
indices.into_iter().map(|idx| disputes[idx].clone()).collect::<Vec<_>>();
// Sort the indices, to retain the same sorting as the input.
indices.sort();
// Construct the full list of selected disputes
disputes.append(&mut remote_disputes);
// Add the remote disputes after checking their validity.
checked_acc.extend(indices.into_iter().filter_map(|idx| {
dispute_statement_set_valid(remote_disputes[idx].clone()).map(|cdss| {
let weight = <<T as Config>::WeightInfo as WeightInfo>::enter_variable_disputes(
cdss.as_ref().statements.len() as u32,
);
weight_acc = weight_acc.saturating_add(weight);
cdss
})
}));
// Update the remaining weight
remaining_weight = remaining_weight.saturating_sub(acc_remote_disputes_weight);
(checked_acc, weight_acc)
} else {
// Go through all of them, and just apply the filter, they would all fit
let checked = disputes
.into_iter()
.filter_map(|dss| dispute_statement_set_valid(dss))
.collect::<Vec<CheckedDisputeStatementSet>>();
// some might have been filtered out, so re-calc the weight
let checked_disputes_weight = multi_dispute_statement_sets_weight::<T, _, _>(&checked);
(checked, checked_disputes_weight)
}
remaining_weight
}
@@ -26,6 +26,7 @@ mod enter {
builder::{Bench, BenchBuilder},
mock::{new_test_ext, MockGenesisConfig, Test},
};
use assert_matches::assert_matches;
use frame_support::assert_ok;
use sp_std::collections::btree_map::BTreeMap;
@@ -295,10 +296,10 @@ mod enter {
// The current schedule is empty prior to calling `create_inherent_enter`.
assert_eq!(<scheduler::Pallet<Test>>::scheduled(), vec![]);
assert_ok!(Pallet::<Test>::enter(
assert_matches!(Pallet::<Test>::enter(
frame_system::RawOrigin::None.into(),
expected_para_inherent_data,
));
), Err(e) => { dbg!(e) });
});
}
@@ -417,24 +418,26 @@ mod enter {
assert_eq!(<scheduler::Pallet<Test>>::scheduled(), vec![]);
// Ensure that calling enter with 3 disputes and 2 candidates is over weight
assert_ok!(Pallet::<Test>::enter(
assert_matches!(Pallet::<Test>::enter(
frame_system::RawOrigin::None.into(),
expected_para_inherent_data,
));
), Err(e) => {
dbg!(e)
});
assert_eq!(
// The length of this vec is equal to the number of candidates, so we know
// all of our candidates got filtered out
Pallet::<Test>::on_chain_votes().unwrap().backing_validators_per_candidate.len(),
0,
Pallet::<Test>::on_chain_votes(),
None,
);
});
}
#[test]
// Ensure that when a block is over weight due to disputes and bitfields, the bitfields are
// Ensure an overweight block with an excess amount of disputes and bitfields, the bitfields are
// filtered to accommodate the block size and no backed candidates are included.
fn limit_bitfields() {
fn limit_bitfields_some() {
new_test_ext(MockGenesisConfig::default()).execute_with(|| {
// Create the inherent data for this block
let mut dispute_statements = BTreeMap::new();
@@ -477,7 +480,12 @@ mod enter {
// Nothing is filtered out (including the backed candidates.)
let limit_inherent_data =
Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
assert!(limit_inherent_data != expected_para_inherent_data);
assert_ne!(limit_inherent_data, expected_para_inherent_data);
assert!(
inherent_data_weight(&limit_inherent_data) <=
inherent_data_weight(&expected_para_inherent_data)
);
assert!(inherent_data_weight(&limit_inherent_data) <= max_block_weight());
// Three disputes is over weight (see previous test), so we expect to only see 2 disputes
assert_eq!(limit_inherent_data.disputes.len(), 2);
@@ -551,20 +559,49 @@ mod enter {
// The current schedule is empty prior to calling `create_inherent_enter`.
assert_eq!(<scheduler::Pallet<Test>>::scheduled(), vec![]);
assert_ok!(Pallet::<Test>::enter(
assert_matches!(Pallet::<Test>::enter(
frame_system::RawOrigin::None.into(),
expected_para_inherent_data,
));
), Err(_e) => {
/* TODO */
});
assert_eq!(
// The length of this vec is equal to the number of candidates, so we know
// all of our candidates got filtered out
Pallet::<Test>::on_chain_votes().unwrap().backing_validators_per_candidate.len(),
0,
);
// The block was not included, as such, `on_chain_votes` _must_ return `None`.
assert_matches!(Pallet::<Test>::on_chain_votes(), None);
});
}
fn max_block_weight() -> Weight {
<Test as frame_system::Config>::BlockWeights::get().max_block
}
fn inherent_data_weight(inherent_data: &ParachainsInherentData) -> Weight {
use thousands::Separable;
let multi_dispute_statement_sets_weight =
multi_dispute_statement_sets_weight::<Test, _, _>(&inherent_data.disputes);
let signed_bitfields_weight =
signed_bitfields_weight::<Test>(inherent_data.bitfields.len());
let backed_candidates_weight =
backed_candidates_weight::<Test>(&inherent_data.backed_candidates);
let sum = multi_dispute_statement_sets_weight +
signed_bitfields_weight +
backed_candidates_weight;
println!(
"disputes({})={} + bitfields({})={} + candidates({})={} -> {}",
inherent_data.disputes.len(),
multi_dispute_statement_sets_weight.separate_with_underscores(),
inherent_data.bitfields.len(),
signed_bitfields_weight.separate_with_underscores(),
inherent_data.backed_candidates.len(),
backed_candidates_weight.separate_with_underscores(),
sum.separate_with_underscores()
);
sum
}
#[test]
// Ensure that when a block is over weight due to disputes and bitfields, we abort
fn limit_candidates_over_weight_1() {
@@ -591,6 +628,7 @@ mod enter {
});
let expected_para_inherent_data = scenario.data.clone();
assert!(max_block_weight() < inherent_data_weight(&expected_para_inherent_data));
// Check the para inherent data is as expected:
// * 1 bitfield per validator (5 validators per core, 2 backed candidates, 3 disputes => 5*5 = 25)
@@ -608,6 +646,12 @@ mod enter {
Pallet::<Test>::create_inherent_inner(&inherent_data.clone()).unwrap();
// Expect that inherent data is filtered to include only 1 backed candidate and 2 disputes
assert!(limit_inherent_data != expected_para_inherent_data);
assert!(
max_block_weight() >= inherent_data_weight(&limit_inherent_data),
"Post limiting exceeded block weight: max={} vs. inherent={}",
max_block_weight(),
inherent_data_weight(&limit_inherent_data)
);
// * 1 bitfields
assert_eq!(limit_inherent_data.bitfields.len(), 25);
@@ -668,17 +712,12 @@ mod enter {
// * 3 disputes.
assert_eq!(expected_para_inherent_data.disputes.len(), 3);
assert_ok!(Pallet::<Test>::enter(
assert_matches!(Pallet::<Test>::enter(
frame_system::RawOrigin::None.into(),
expected_para_inherent_data,
));
), Err(e) => { dbg!(e) });
assert_eq!(
// The length of this vec is equal to the number of candidates, so we know our 2
// backed candidates did not get filtered out
Pallet::<Test>::on_chain_votes().unwrap().backing_validators_per_candidate.len(),
0
);
assert_matches!(Pallet::<Test>::on_chain_votes(), None);
});
}
}
@@ -77,18 +77,29 @@ pub fn paras_inherent_total_weight<T: Config>(
) -> Weight {
backed_candidates_weight::<T>(backed_candidates)
.saturating_add(signed_bitfields_weight::<T>(bitfields.len()))
.saturating_add(dispute_statements_weight::<T>(disputes))
.saturating_add(multi_dispute_statement_sets_weight::<T, _, _>(disputes))
}
pub fn dispute_statements_weight<T: Config>(disputes: &[DisputeStatementSet]) -> Weight {
pub fn dispute_statement_set_weight<T: Config, S: AsRef<DisputeStatementSet>>(
statement_set: S,
) -> Weight {
<<T as Config>::WeightInfo as WeightInfo>::enter_variable_disputes(
statement_set.as_ref().statements.len() as u32,
)
}
pub fn multi_dispute_statement_sets_weight<
T: Config,
D: AsRef<[S]>,
S: AsRef<DisputeStatementSet>,
>(
disputes: D,
) -> Weight {
disputes
.as_ref()
.iter()
.map(|d| {
<<T as Config>::WeightInfo as WeightInfo>::enter_variable_disputes(
d.statements.len() as u32
)
})
.fold(0, |acc, x| acc.saturating_add(x))
.map(|d| dispute_statement_set_weight::<T, &S>(d))
.fold(0, |acc_weight, weight| acc_weight.saturating_add(weight))
}
pub fn signed_bitfields_weight<T: Config>(bitfields_len: usize) -> Weight {
+2
View File
@@ -319,10 +319,12 @@ struct MonitorConfig {
#[derive(Debug, Clone, StructOpt)]
struct EmergencySolutionConfig {
/// The block hash at which scraping happens. If none is provided, the latest head is used.
#[allow(dead_code)]
#[structopt(long)]
at: Option<Hash>,
/// The solver algorithm to use.
#[allow(dead_code)]
#[structopt(subcommand)]
solver: Solvers,