Add elastic scaling support in ParaInherent BenchBuilder (#3690)

Extracted Benchbuilder enhancements used in
https://github.com/paritytech/polkadot-sdk/pull/3644 . Might still
require some work to fully support all scenarios when disputing elastic
scaling parachains, but it should be useful in writing elastic scaling
runtime tests.

---------

Signed-off-by: Andrei Sandu <andrei-mihail@parity.io>
This commit is contained in:
Andrei Sandu
2024-03-15 13:42:58 +02:00
committed by GitHub
parent 7099f6e1b1
commit 4987d79824
3 changed files with 279 additions and 175 deletions
+275 -171
View File
@@ -39,7 +39,11 @@ use sp_runtime::{
traits::{Header as HeaderT, One, TrailingZeroInput, Zero},
RuntimeAppPublic,
};
use sp_std::{collections::btree_map::BTreeMap, prelude::Vec, vec};
use sp_std::{
collections::{btree_map::BTreeMap, vec_deque::VecDeque},
prelude::Vec,
vec,
};
fn mock_validation_code() -> ValidationCode {
ValidationCode(vec![1, 2, 3])
@@ -83,13 +87,18 @@ pub(crate) struct BenchBuilder<T: paras_inherent::Config> {
/// Optionally set the number of dispute statements for each candidate.
dispute_statements: BTreeMap<u32, u32>,
/// Session index of for each dispute. Index of slice corresponds to a core,
/// which is offset by the number of entries for `backed_and_concluding_cores`. I.E. if
/// `backed_and_concluding_cores` has 3 entries, the first index of `dispute_sessions`
/// which is offset by the number of entries for `backed_and_concluding_paras`. I.E. if
/// `backed_and_concluding_paras` has 3 entries, the first index of `dispute_sessions`
/// will correspond to core index 3. There must be one entry for each core with a dispute
/// statement set.
dispute_sessions: Vec<u32>,
/// Map from core seed to number of validity votes.
backed_and_concluding_cores: BTreeMap<u32, u32>,
/// Map from para id to number of validity votes. Core indices are generated based on
/// `elastic_paras` configuration. Each para id in `elastic_paras` gets the
/// specified amount of consecutive cores assigned to it. If a para id is not present
/// in `elastic_paras` it get assigned to a single core.
backed_and_concluding_paras: BTreeMap<u32, u32>,
/// Map from para id (seed) to number of chained candidates.
elastic_paras: BTreeMap<u32, u8>,
/// Make every candidate include a code upgrade by setting this to `Some` where the interior
/// value is the byte length of the new code.
code_upgrade: Option<u32>,
@@ -106,6 +115,7 @@ pub(crate) struct Bench<T: paras_inherent::Config> {
pub(crate) _block_number: BlockNumberFor<T>,
}
#[allow(dead_code)]
impl<T: paras_inherent::Config> BenchBuilder<T> {
/// Create a new `BenchBuilder` with some opinionated values that should work with the rest
/// of the functions in this implementation.
@@ -119,7 +129,8 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
max_validators: None,
dispute_statements: BTreeMap::new(),
dispute_sessions: Default::default(),
backed_and_concluding_cores: Default::default(),
backed_and_concluding_paras: Default::default(),
elastic_paras: Default::default(),
code_upgrade: None,
fill_claimqueue: true,
_phantom: sp_std::marker::PhantomData::<T>,
@@ -129,7 +140,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
/// Set the session index for each dispute statement set (in other words, set the session the
/// the dispute statement set's relay chain block is from). Indexes of `dispute_sessions`
/// correspond to a core, which is offset by the number of entries for
/// `backed_and_concluding_cores`. I.E. if `backed_and_concluding_cores` cores has 3 entries,
/// `backed_and_concluding_paras`. I.E. if `backed_and_concluding_paras` cores has 3 entries,
/// the first index of `dispute_sessions` will correspond to core index 3.
///
/// Note that there must be an entry for each core with a dispute statement set.
@@ -138,12 +149,19 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
self
}
/// Set a map from core/para id seed to number of validity votes.
pub(crate) fn set_backed_and_concluding_cores(
/// Set a map from para id seed to number of validity votes.
pub(crate) fn set_backed_and_concluding_paras(
mut self,
backed_and_concluding_cores: BTreeMap<u32, u32>,
backed_and_concluding_paras: BTreeMap<u32, u32>,
) -> Self {
self.backed_and_concluding_cores = backed_and_concluding_cores;
self.backed_and_concluding_paras = backed_and_concluding_paras;
self
}
/// Set a map from para id seed to number of cores assigned to it.
#[cfg(feature = "runtime-benchmarks")]
pub(crate) fn set_elastic_paras(mut self, elastic_paras: BTreeMap<u32, u8>) -> Self {
self.elastic_paras = elastic_paras;
self
}
@@ -241,16 +259,6 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
(Self::fallback_max_validators() / 2) + 1
}
/// Create para id, core index, and grab the associated group index from the scheduler pallet.
fn create_indexes(&self, seed: u32) -> (ParaId, CoreIndex, GroupIndex) {
let para_id = ParaId::from(seed);
let core_idx = CoreIndex(seed);
let group_idx =
scheduler::Pallet::<T>::group_assigned_to_core(core_idx, self.block_number).unwrap();
(para_id, core_idx, group_idx)
}
fn mock_head_data() -> HeadData {
let max_head_size = configuration::Pallet::<T>::config().max_head_data_size;
HeadData(vec![0xFF; max_head_size as usize])
@@ -321,7 +329,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
/// Create an `AvailabilityBitfield` where `concluding` is a map where each key is a core index
/// that is concluding and `cores` is the total number of cores in the system.
fn availability_bitvec(concluding: &BTreeMap<u32, u32>, cores: u32) -> AvailabilityBitfield {
fn availability_bitvec(concluding: &BTreeMap<u32, u32>, cores: usize) -> AvailabilityBitfield {
let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0];
for i in 0..cores {
if concluding.get(&(i as u32)).is_some() {
@@ -352,7 +360,7 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
///
/// Note that this must be called at least 2 sessions before the target session as there is a
/// n+2 session delay for the scheduled actions to take effect.
fn setup_para_ids(cores: u32) {
fn setup_para_ids(cores: usize) {
// make sure parachains exist prior to session change.
for i in 0..cores {
let para_id = ParaId::from(i as u32);
@@ -407,7 +415,10 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
mut self,
target_session: SessionIndex,
validators: Vec<(T::AccountId, ValidatorId)>,
total_cores: u32,
// Total cores used in the scenario
total_cores: usize,
// Additional cores for elastic parachains
extra_cores: usize,
) -> Self {
let mut block = 1;
for session in 0..=target_session {
@@ -442,7 +453,8 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
self.validators = Some(validators_shuffled);
self.block_number = block_number;
self.session = target_session;
assert_eq!(paras::Pallet::<T>::parachains().len(), total_cores as usize);
assert_eq!(paras::Pallet::<T>::parachains().len(), total_cores - extra_cores);
self
}
@@ -453,13 +465,14 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
/// to the cores successfully being freed from the candidates being marked as available.
fn create_availability_bitfields(
&self,
concluding_cores: &BTreeMap<u32, u32>,
total_cores: u32,
concluding_paras: &BTreeMap<u32, u32>,
elastic_paras: &BTreeMap<u32, u8>,
total_cores: usize,
) -> Vec<UncheckedSigned<AvailabilityBitfield>> {
let validators =
self.validators.as_ref().expect("must have some validators prior to calling");
let availability_bitvec = Self::availability_bitvec(concluding_cores, total_cores);
let availability_bitvec = Self::availability_bitvec(concluding_paras, total_cores);
let bitfields: Vec<UncheckedSigned<AvailabilityBitfield>> = validators
.iter()
@@ -476,16 +489,27 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
})
.collect();
for (seed, _) in concluding_cores.iter() {
let mut current_core_idx = 0u32;
for (seed, _) in concluding_paras.iter() {
// make sure the candidates that will be concluding are marked as pending availability.
let (para_id, core_idx, group_idx) = self.create_indexes(*seed);
Self::add_availability(
para_id,
core_idx,
group_idx,
Self::validator_availability_votes_yes(validators.len()),
CandidateHash(H256::from(byte32_slice_from(*seed))),
);
let para_id = ParaId::from(*seed);
for _chain_idx in 0..elastic_paras.get(&seed).cloned().unwrap_or(1) {
let core_idx = CoreIndex::from(current_core_idx);
let group_idx =
scheduler::Pallet::<T>::group_assigned_to_core(core_idx, self.block_number)
.unwrap();
Self::add_availability(
para_id,
core_idx,
group_idx,
Self::validator_availability_votes_yes(validators.len()),
CandidateHash(H256::from(byte32_slice_from(current_core_idx))),
);
current_core_idx += 1;
}
}
bitfields
@@ -494,112 +518,143 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
/// Create backed candidates for `cores_with_backed_candidates`. You need these cores to be
/// scheduled _within_ paras inherent, which requires marking the available bitfields as fully
/// available.
/// - `cores_with_backed_candidates` Mapping of `para_id`/`core_idx`/`group_idx` seed to number
/// of
/// - `cores_with_backed_candidates` Mapping of `para_id` seed to number of
/// validity votes.
fn create_backed_candidates(
&self,
cores_with_backed_candidates: &BTreeMap<u32, u32>,
elastic_paras: &BTreeMap<u32, u8>,
includes_code_upgrade: Option<u32>,
) -> Vec<BackedCandidate<T::Hash>> {
let validators =
self.validators.as_ref().expect("must have some validators prior to calling");
let config = configuration::Pallet::<T>::config();
let mut current_core_idx = 0u32;
cores_with_backed_candidates
.iter()
.map(|(seed, num_votes)| {
.flat_map(|(seed, num_votes)| {
assert!(*num_votes <= validators.len() as u32);
let (para_id, core_idx, group_idx) = self.create_indexes(*seed);
// This generates a pair and adds it to the keystore, returning just the public.
let collator_public = CollatorId::generate_pair(None);
let header = Self::header(self.block_number);
let relay_parent = header.hash();
let head_data = Self::mock_head_data();
let persisted_validation_data_hash = PersistedValidationData::<H256> {
parent_head: head_data.clone(),
relay_parent_number: self.relay_parent_number(),
relay_parent_storage_root: Default::default(),
max_pov_size: config.max_pov_size,
}
.hash();
let pov_hash = Default::default();
let validation_code_hash = mock_validation_code().hash();
let payload = collator_signature_payload(
&relay_parent,
&para_id,
&persisted_validation_data_hash,
&pov_hash,
&validation_code_hash,
);
let signature = collator_public.sign(&payload).unwrap();
// Set the head data so it can be used while validating the signatures on the
// candidate receipt.
paras::Pallet::<T>::heads_insert(&para_id, head_data.clone());
let mut past_code_meta = paras::ParaPastCodeMeta::<BlockNumberFor<T>>::default();
past_code_meta.note_replacement(0u32.into(), 0u32.into());
let group_validators = scheduler::Pallet::<T>::group_validators(group_idx).unwrap();
let candidate = CommittedCandidateReceipt::<T::Hash> {
descriptor: CandidateDescriptor::<T::Hash> {
para_id,
relay_parent,
collator: collator_public,
persisted_validation_data_hash,
pov_hash,
erasure_root: Default::default(),
signature,
para_head: head_data.hash(),
validation_code_hash,
},
commitments: CandidateCommitments::<u32> {
upward_messages: Default::default(),
horizontal_messages: Default::default(),
new_validation_code: includes_code_upgrade
.map(|v| ValidationCode(vec![42u8; v as usize])),
head_data,
processed_downward_messages: 0,
hrmp_watermark: self.relay_parent_number(),
},
};
let candidate_hash = candidate.hash();
let validity_votes: Vec<_> = group_validators
.iter()
.take(*num_votes as usize)
.map(|val_idx| {
let public = validators.get(*val_idx).unwrap();
let sig = UncheckedSigned::<CompactStatement>::benchmark_sign(
public,
CompactStatement::Valid(candidate_hash),
&self.signing_context(),
*val_idx,
let para_id = ParaId::from(*seed);
let mut prev_head = None;
// How many chained candidates we want to build ?
(0..elastic_paras.get(&seed).cloned().unwrap_or(1))
.map(|chain_idx| {
let core_idx = CoreIndex::from(current_core_idx);
// Advance core index.
current_core_idx += 1;
let group_idx = scheduler::Pallet::<T>::group_assigned_to_core(
core_idx,
self.block_number,
)
.benchmark_signature();
.unwrap();
ValidityAttestation::Explicit(sig.clone())
// This generates a pair and adds it to the keystore, returning just the
// public.
let collator_public = CollatorId::generate_pair(None);
let header = Self::header(self.block_number);
let relay_parent = header.hash();
// Set the head data so it can be used while validating the signatures on
// the candidate receipt.
let mut head_data = Self::mock_head_data();
if chain_idx == 0 {
// Only first parahead of the chain needs to be set in storage.
paras::Pallet::<T>::heads_insert(&para_id, head_data.clone());
} else {
// Make each candidate head data unique to avoid cycles.
head_data.0[0] = chain_idx;
}
let persisted_validation_data_hash = PersistedValidationData::<H256> {
// To form a chain we set parent head to previous block if any, or
// default to what is in storage already setup.
parent_head: prev_head.take().unwrap_or(head_data.clone()),
relay_parent_number: self.relay_parent_number(),
relay_parent_storage_root: Default::default(),
max_pov_size: config.max_pov_size,
}
.hash();
prev_head = Some(head_data.clone());
let pov_hash = Default::default();
let validation_code_hash = mock_validation_code().hash();
let payload = collator_signature_payload(
&relay_parent,
&para_id,
&persisted_validation_data_hash,
&pov_hash,
&validation_code_hash,
);
let signature = collator_public.sign(&payload).unwrap();
let mut past_code_meta =
paras::ParaPastCodeMeta::<BlockNumberFor<T>>::default();
past_code_meta.note_replacement(0u32.into(), 0u32.into());
let group_validators =
scheduler::Pallet::<T>::group_validators(group_idx).unwrap();
let candidate = CommittedCandidateReceipt::<T::Hash> {
descriptor: CandidateDescriptor::<T::Hash> {
para_id,
relay_parent,
collator: collator_public,
persisted_validation_data_hash,
pov_hash,
erasure_root: Default::default(),
signature,
para_head: head_data.hash(),
validation_code_hash,
},
commitments: CandidateCommitments::<u32> {
upward_messages: Default::default(),
horizontal_messages: Default::default(),
new_validation_code: includes_code_upgrade
.map(|v| ValidationCode(vec![42u8; v as usize])),
head_data,
processed_downward_messages: 0,
hrmp_watermark: self.relay_parent_number(),
},
};
let candidate_hash = candidate.hash();
let validity_votes: Vec<_> = group_validators
.iter()
.take(*num_votes as usize)
.map(|val_idx| {
let public = validators.get(*val_idx).unwrap();
let sig = UncheckedSigned::<CompactStatement>::benchmark_sign(
public,
CompactStatement::Valid(candidate_hash),
&self.signing_context(),
*val_idx,
)
.benchmark_signature();
ValidityAttestation::Explicit(sig.clone())
})
.collect();
// Check if the elastic scaling bit is set, if so we need to supply the core
// index in the generated candidate.
let core_idx = configuration::Pallet::<T>::config()
.node_features
.get(FeatureIndex::ElasticScalingMVP as usize)
.map(|_the_bit| core_idx);
BackedCandidate::<T::Hash>::new(
candidate,
validity_votes,
bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()],
core_idx,
)
})
.collect();
// Check if the elastic scaling bit is set, if so we need to supply the core index
// in the generated candidate.
let core_idx = configuration::Pallet::<T>::config()
.node_features
.get(FeatureIndex::ElasticScalingMVP as usize)
.map(|_the_bit| core_idx);
BackedCandidate::<T::Hash>::new(
candidate,
validity_votes,
bitvec::bitvec![u8, bitvec::order::Lsb0; 1; group_validators.len()],
core_idx,
)
.collect::<Vec<_>>()
})
.collect()
}
@@ -616,6 +671,8 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
self.validators.as_ref().expect("must have some validators prior to calling");
let dispute_sessions = dispute_sessions.as_ref();
let mut current_core_idx = start;
(start..last)
.map(|seed| {
let dispute_session_idx = (seed - start) as usize;
@@ -624,7 +681,14 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
.cloned()
.unwrap_or(self.target_session);
let (para_id, core_idx, group_idx) = self.create_indexes(seed);
let para_id = ParaId::from(seed);
let core_idx = CoreIndex::from(current_core_idx);
current_core_idx +=1;
let group_idx =
scheduler::Pallet::<T>::group_assigned_to_core(core_idx, self.block_number)
.unwrap();
let candidate_hash = CandidateHash(H256::from(byte32_slice_from(seed)));
let relay_parent = H256::from(byte32_slice_from(seed));
@@ -668,22 +732,29 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
/// Build a scenario for testing or benchmarks.
///
/// Note that this API only allows building scenarios where the `backed_and_concluding_cores`
/// Note that this API only allows building scenarios where the `backed_and_concluding_paras`
/// are mutually exclusive with the cores for disputes. So
/// `backed_and_concluding_cores.len() + dispute_sessions.len()` must be less than the max
/// `backed_and_concluding_paras.len() + dispute_sessions.len()` must be less than the max
/// number of cores.
pub(crate) fn build(self) -> Bench<T> {
// Make sure relevant storage is cleared. This is just to get the asserts to work when
// running tests because it seems the storage is not cleared in between.
#[allow(deprecated)]
inclusion::PendingAvailabilityCommitments::<T>::remove_all(None);
#[allow(deprecated)]
inclusion::PendingAvailability::<T>::remove_all(None);
// We don't allow a core to have both disputes and be marked fully available at this block.
let max_cores = self.max_cores();
let max_cores = self.max_cores() as usize;
let extra_cores = self
.elastic_paras
.values()
.map(|count| *count as usize)
.sum::<usize>()
.saturating_sub(self.elastic_paras.len() as usize);
let used_cores =
(self.dispute_sessions.len() + self.backed_and_concluding_cores.len()) as u32;
self.dispute_sessions.len() + self.backed_and_concluding_paras.len() + extra_cores;
assert!(used_cores <= max_cores);
let fill_claimqueue = self.fill_claimqueue;
@@ -691,62 +762,95 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
// We are currently in Session 0, so these changes will take effect in Session 2.
Self::setup_para_ids(used_cores);
configuration::ActiveConfig::<T>::mutate(|c| {
c.scheduler_params.num_cores = used_cores;
c.scheduler_params.num_cores = used_cores as u32;
});
let validator_ids = Self::generate_validator_pairs(self.max_validators());
let target_session = SessionIndex::from(self.target_session);
let builder = self.setup_session(target_session, validator_ids, used_cores);
let builder = self.setup_session(target_session, validator_ids, used_cores, extra_cores);
let bitfields =
builder.create_availability_bitfields(&builder.backed_and_concluding_cores, used_cores);
let backed_candidates = builder
.create_backed_candidates(&builder.backed_and_concluding_cores, builder.code_upgrade);
let bitfields = builder.create_availability_bitfields(
&builder.backed_and_concluding_paras,
&builder.elastic_paras,
used_cores,
);
let backed_candidates = builder.create_backed_candidates(
&builder.backed_and_concluding_paras,
&builder.elastic_paras,
builder.code_upgrade,
);
let disputes = builder.create_disputes(
builder.backed_and_concluding_cores.len() as u32,
used_cores,
builder.backed_and_concluding_paras.len() as u32,
used_cores as u32,
builder.dispute_sessions.as_slice(),
);
let mut disputed_cores = (builder.backed_and_concluding_paras.len() as u32..
used_cores as u32)
.into_iter()
.map(|idx| (idx, 0))
.collect::<BTreeMap<_, _>>();
let mut all_cores = builder.backed_and_concluding_paras.clone();
all_cores.append(&mut disputed_cores);
assert_eq!(
inclusion::PendingAvailabilityCommitments::<T>::iter().count(),
used_cores as usize,
);
assert_eq!(inclusion::PendingAvailability::<T>::iter().count(), used_cores as usize,);
// Mark all the used cores as occupied. We expect that there are
// `backed_and_concluding_cores` that are pending availability and that there are
// `used_cores - backed_and_concluding_cores ` which are about to be disputed.
// `backed_and_concluding_paras` that are pending availability and that there are
// `used_cores - backed_and_concluding_paras ` which are about to be disputed.
let now = <frame_system::Pallet<T>>::block_number() + One::one();
let cores = (0..used_cores)
.into_iter()
.map(|i| {
let ttl = configuration::Pallet::<T>::config().scheduler_params.ttl;
// Load an assignment into provider so that one is present to pop
let assignment = <T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
CoreIndex(i),
ParaId::from(i),
);
CoreOccupied::Paras(ParasEntry::new(assignment, now + ttl))
let mut core_idx = 0u32;
let elastic_paras = &builder.elastic_paras;
// Assign potentially multiple cores to same parachains,
let cores = all_cores
.iter()
.flat_map(|(para_id, _)| {
(0..elastic_paras.get(&para_id).cloned().unwrap_or(1))
.map(|_para_local_core_idx| {
let ttl = configuration::Pallet::<T>::config().scheduler_params.ttl;
// Load an assignment into provider so that one is present to pop
let assignment =
<T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
CoreIndex(core_idx),
ParaId::from(*para_id),
);
core_idx += 1;
CoreOccupied::Paras(ParasEntry::new(assignment, now + ttl))
})
.collect::<Vec<CoreOccupied<_>>>()
})
.collect();
.collect::<Vec<CoreOccupied<_>>>();
scheduler::AvailabilityCores::<T>::set(cores);
core_idx = 0u32;
if fill_claimqueue {
// Add items to claim queue as well:
let cores = (0..used_cores)
.into_iter()
.map(|i| {
let ttl = configuration::Pallet::<T>::config().scheduler_params.ttl;
// Load an assignment into provider so that one is present to pop
let assignment =
<T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
CoreIndex(i),
ParaId::from(i),
);
(CoreIndex(i), [ParasEntry::new(assignment, now + ttl)].into())
let cores = all_cores
.keys()
.flat_map(|para_id| {
(0..elastic_paras.get(&para_id).cloned().unwrap_or(1))
.map(|_para_local_core_idx| {
let ttl = configuration::Pallet::<T>::config().scheduler_params.ttl;
// Load an assignment into provider so that one is present to pop
let assignment =
<T as scheduler::Config>::AssignmentProvider::get_mock_assignment(
CoreIndex(core_idx),
ParaId::from(*para_id),
);
let entry = (
CoreIndex(core_idx),
[ParasEntry::new(assignment, now + ttl)].into(),
);
core_idx += 1;
entry
})
.collect::<Vec<(CoreIndex, VecDeque<ParasEntry<_>>)>>()
})
.collect();
.collect::<BTreeMap<CoreIndex, VecDeque<ParasEntry<_>>>>();
scheduler::ClaimQueue::<T>::set(cores);
}
@@ -65,7 +65,7 @@ benchmarks! {
.collect();
let scenario = BenchBuilder::<T>::new()
.set_backed_and_concluding_cores(cores_with_backed)
.set_backed_and_concluding_paras(cores_with_backed)
.build();
let mut benchmark = scenario.data.clone();
@@ -110,7 +110,7 @@ benchmarks! {
.collect();
let scenario = BenchBuilder::<T>::new()
.set_backed_and_concluding_cores(cores_with_backed.clone())
.set_backed_and_concluding_paras(cores_with_backed.clone())
.build();
let mut benchmark = scenario.data.clone();
@@ -165,7 +165,7 @@ benchmarks! {
.collect();
let scenario = BenchBuilder::<T>::new()
.set_backed_and_concluding_cores(cores_with_backed.clone())
.set_backed_and_concluding_paras(cores_with_backed.clone())
.set_code_upgrade(v)
.build();
@@ -64,7 +64,7 @@ mod enter {
)
.set_max_validators_per_core(num_validators_per_core)
.set_dispute_statements(dispute_statements)
.set_backed_and_concluding_cores(backed_and_concluding)
.set_backed_and_concluding_paras(backed_and_concluding)
.set_dispute_sessions(&dispute_sessions[..])
.set_fill_claimqueue(fill_claimqueue);