Request based availability distribution (#2423)

* WIP

* availability distribution, still very wip.

Work on the requesting side of things.

* Some docs on what I intend to do.

* Checkpoint of session cache implementation

as I will likely replace it with something smarter.

* More work, mostly on cache

and getting things to type check.

* Only derive MallocSizeOf and Debug for std.

* availability-distribution: Cache feature complete.

* Sketch out logic in `FetchTask` for actual fetching.

- Compile fixes.
- Cleanup.

* Format cleanup.

* More format fixes.

* Almost feature complete `fetch_task`.

Missing:

- Check for cancel
- Actual querying of peer ids.

* Finish FetchTask so far.

* Directly use AuthorityDiscoveryId in protocol and cache.

* Resolve `AuthorityDiscoveryId` on sending requests.

* Rework fetch_task

- also make it impossible to check the wrong chunk index.
- Export needed function in validator_discovery.

* From<u32> implementation for `ValidatorIndex`.

* Fixes and more integration work.

* Make session cache proper lru cache.

* Use proper lru cache.

* Requester finished.

* ProtocolState -> Requester

Also make sure to not fetch our own chunk.

* Cleanup + fixes.

* Remove unused functions

- FetchTask::is_finished
- SessionCache::fetch_session_info

* availability-distribution responding side.

* Cleanup + Fixes.

* More fixes.

* More fixes.

adder-collator is running!

* Some docs.

* Docs.

* Fix reporting of bad guys.

* Fix tests

* Make all tests compile.

* Fix test.

* Cleanup + get rid of some warnings.

* state -> requester

* Mostly doc fixes.

* Fix test suite.

* Get rid of now redundant message types.

* WIP

* Rob's review remarks.

* Fix test suite.

* core.relay_parent -> leaf for session request.

* Style fix.

* Decrease request timeout.

* Cleanup obsolete errors.

* Metrics + don't fail on non fatal errors.

* requester.rs -> requester/mod.rs

* Panic on invalid BadValidator report.

* Fix indentation.

* Use typed default timeout constant.

* Make channel size 0, as each sender gets one slot anyways.

* Fix incorrect metrics initialization.

* Fix build after merge.

* More fixes.

* Hopefully valid metrics names.

* Better metrics names.

* Some tests that already work.

* Slightly better docs.

* Some more tests.

* Fix network bridge test.
This commit is contained in:
Robert Klotzner
2021-02-26 18:58:07 +01:00
committed by GitHub
parent 241b1f12a7
commit 48409e5548
45 changed files with 2037 additions and 1523 deletions
+4
View File
@@ -5087,15 +5087,19 @@ version = "0.1.0"
dependencies = [
"assert_matches",
"futures 0.3.12",
"lru",
"maplit",
"parity-scale-codec",
"polkadot-erasure-coding",
"polkadot-node-core-runtime-api",
"polkadot-node-network-protocol",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"rand 0.8.3",
"sc-keystore",
"sc-network",
"sp-application-crypto",
"sp-core",
"sp-keyring",
@@ -306,7 +306,7 @@ pub fn tranches_to_approve(
assignments.iter()
.map(|(v_index, tick)| (v_index, tick.saturating_sub(clock_drift) + no_show_duration))
.filter(|&(v_index, no_show_at)| {
let has_approved = approvals.get(*v_index as usize).map(|b| *b).unwrap_or(false);
let has_approved = approvals.get(v_index.0 as usize).map(|b| *b).unwrap_or(false);
let is_no_show = !has_approved && no_show_at <= drifted_tick_now;
@@ -348,7 +348,7 @@ pub fn tranches_to_approve(
mod tests {
use super::*;
use polkadot_primitives::v1::GroupIndex;
use polkadot_primitives::v1::{GroupIndex, ValidatorIndex};
use bitvec::bitvec;
use bitvec::order::Lsb0 as BitOrderLsb0;
@@ -393,7 +393,7 @@ mod tests {
}.into();
for i in 0..6 {
candidate.mark_approval(i);
candidate.mark_approval(ValidatorIndex(i));
}
let approval_entry = approval_db::v1::ApprovalEntry {
@@ -406,7 +406,7 @@ mod tests {
assert!(!check_approval(&candidate, &approval_entry, RequiredTranches::All));
candidate.mark_approval(6);
candidate.mark_approval(ValidatorIndex(6));
assert!(check_approval(&candidate, &approval_entry, RequiredTranches::All));
}
@@ -420,22 +420,22 @@ mod tests {
}.into();
for i in 0..6 {
candidate.mark_approval(i);
candidate.mark_approval(ValidatorIndex(i));
}
let approval_entry = approval_db::v1::ApprovalEntry {
tranches: vec![
approval_db::v1::TrancheEntry {
tranche: 0,
assignments: (0..4).map(|i| (i, 0.into())).collect(),
assignments: (0..4).map(|i| (ValidatorIndex(i), 0.into())).collect(),
},
approval_db::v1::TrancheEntry {
tranche: 1,
assignments: (4..6).map(|i| (i, 1.into())).collect(),
assignments: (4..6).map(|i| (ValidatorIndex(i), 1.into())).collect(),
},
approval_db::v1::TrancheEntry {
tranche: 2,
assignments: (6..10).map(|i| (i, 0.into())).collect(),
assignments: (6..10).map(|i| (ValidatorIndex(i), 0.into())).collect(),
},
],
assignments: bitvec![BitOrderLsb0, u8; 1; 10],
@@ -487,13 +487,13 @@ mod tests {
approved: false,
}.into();
approval_entry.import_assignment(0, 0, block_tick);
approval_entry.import_assignment(0, 1, block_tick);
approval_entry.import_assignment(0,ValidatorIndex(0), block_tick);
approval_entry.import_assignment(0,ValidatorIndex(1), block_tick);
approval_entry.import_assignment(1, 2, block_tick + 1);
approval_entry.import_assignment(1, 3, block_tick + 1);
approval_entry.import_assignment(1,ValidatorIndex(2), block_tick + 1);
approval_entry.import_assignment(1,ValidatorIndex(3), block_tick + 1);
approval_entry.import_assignment(2, 4, block_tick + 2);
approval_entry.import_assignment(2,ValidatorIndex(4), block_tick + 2);
let approvals = bitvec![BitOrderLsb0, u8; 1; 5];
@@ -524,8 +524,8 @@ mod tests {
approved: false,
}.into();
approval_entry.import_assignment(0, 0, block_tick);
approval_entry.import_assignment(1, 2, block_tick);
approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
approval_entry.import_assignment(1, ValidatorIndex(2), block_tick);
let approvals = bitvec![BitOrderLsb0, u8; 0; 10];
@@ -562,10 +562,10 @@ mod tests {
approved: false,
}.into();
approval_entry.import_assignment(0, 0, block_tick);
approval_entry.import_assignment(0, 1, block_tick);
approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
approval_entry.import_assignment(0, ValidatorIndex(1), block_tick);
approval_entry.import_assignment(1, 2, block_tick);
approval_entry.import_assignment(1, ValidatorIndex(2), block_tick);
let mut approvals = bitvec![BitOrderLsb0, u8; 0; 10];
approvals.set(0, true);
@@ -605,11 +605,11 @@ mod tests {
approved: false,
}.into();
approval_entry.import_assignment(0, 0, block_tick);
approval_entry.import_assignment(0, 1, block_tick);
approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
approval_entry.import_assignment(0, ValidatorIndex(1), block_tick);
approval_entry.import_assignment(1, 2, block_tick);
approval_entry.import_assignment(1, 3, block_tick);
approval_entry.import_assignment(1, ValidatorIndex(2), block_tick);
approval_entry.import_assignment(1, ValidatorIndex(3), block_tick);
let mut approvals = bitvec![BitOrderLsb0, u8; 0; n_validators];
approvals.set(0, true);
@@ -670,14 +670,14 @@ mod tests {
approved: false,
}.into();
approval_entry.import_assignment(0, 0, block_tick);
approval_entry.import_assignment(0, 1, block_tick);
approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
approval_entry.import_assignment(0, ValidatorIndex(1), block_tick);
approval_entry.import_assignment(1, 2, block_tick + 1);
approval_entry.import_assignment(1, 3, block_tick + 1);
approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1);
approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1);
approval_entry.import_assignment(2, 4, block_tick + no_show_duration + 2);
approval_entry.import_assignment(2, 5, block_tick + no_show_duration + 2);
approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + no_show_duration + 2);
approval_entry.import_assignment(2, ValidatorIndex(5), block_tick + no_show_duration + 2);
let mut approvals = bitvec![BitOrderLsb0, u8; 0; n_validators];
approvals.set(0, true);
@@ -757,14 +757,14 @@ mod tests {
approved: false,
}.into();
approval_entry.import_assignment(0, 0, block_tick);
approval_entry.import_assignment(0, 1, block_tick);
approval_entry.import_assignment(0, ValidatorIndex(0), block_tick);
approval_entry.import_assignment(0, ValidatorIndex(1), block_tick);
approval_entry.import_assignment(1, 2, block_tick + 1);
approval_entry.import_assignment(1, 3, block_tick + 1);
approval_entry.import_assignment(1, ValidatorIndex(2), block_tick + 1);
approval_entry.import_assignment(1, ValidatorIndex(3), block_tick + 1);
approval_entry.import_assignment(2, 4, block_tick + no_show_duration + 2);
approval_entry.import_assignment(2, 5, block_tick + no_show_duration + 2);
approval_entry.import_assignment(2, ValidatorIndex(4), block_tick + no_show_duration + 2);
approval_entry.import_assignment(2, ValidatorIndex(5), block_tick + no_show_duration + 2);
let mut approvals = bitvec![BitOrderLsb0, u8; 0; n_validators];
approvals.set(0, true);
@@ -813,7 +813,7 @@ mod tests {
},
);
approval_entry.import_assignment(3, 6, block_tick);
approval_entry.import_assignment(3, ValidatorIndex(6), block_tick);
approvals.set(6, true);
let tranche_now = no_show_duration as DelayTranche + 3;
@@ -253,7 +253,7 @@ pub(crate) fn compute_assignments(
let (index, assignments_key): (ValidatorIndex, AssignmentPair) = {
let key = config.assignment_keys.iter().enumerate()
.find_map(|(i, p)| match keystore.key_pair(p) {
Ok(Some(pair)) => Some((i as ValidatorIndex, pair)),
Ok(Some(pair)) => Some((ValidatorIndex(i as _), pair)),
Ok(None) => None,
Err(sc_keystore::Error::Unavailable) => None,
Err(sc_keystore::Error::Io(e)) if e.kind() == std::io::ErrorKind::NotFound => None,
@@ -422,7 +422,7 @@ pub(crate) fn check_assignment_cert(
backing_group: GroupIndex,
) -> Result<DelayTranche, InvalidAssignment> {
let validator_public = config.assignment_keys
.get(validator_index as usize)
.get(validator_index.0 as usize)
.ok_or(InvalidAssignment)?;
let public = schnorrkel::PublicKey::from_bytes(validator_public.as_slice())
@@ -540,7 +540,7 @@ mod tests {
(0..n_groups).map(|i| {
(i * size .. (i + 1) *size)
.chain(if i < big_groups { Some(scraps + i) } else { None })
.map(|j| j as ValidatorIndex)
.map(|j| ValidatorIndex(j as _))
.collect::<Vec<_>>()
}).collect()
}
@@ -570,7 +570,7 @@ mod tests {
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
]),
validator_groups: vec![vec![0], vec![1, 2]],
validator_groups: vec![vec![ValidatorIndex(0)], vec![ValidatorIndex(1), ValidatorIndex(2)]],
n_cores: 2,
zeroth_delay_tranche_width: 10,
relay_vrf_modulo_samples: 3,
@@ -601,7 +601,7 @@ mod tests {
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
]),
validator_groups: vec![vec![0], vec![1, 2]],
validator_groups: vec![vec![ValidatorIndex(0)], vec![ValidatorIndex(1), ValidatorIndex(2)]],
n_cores: 2,
zeroth_delay_tranche_width: 10,
relay_vrf_modulo_samples: 3,
@@ -693,7 +693,7 @@ mod tests {
group: group_for_core(core.0 as _),
cert: assignment.cert,
own_group: GroupIndex(0),
val_index: 0,
val_index: ValidatorIndex(0),
config: config.clone(),
};
@@ -743,7 +743,7 @@ mod tests {
#[test]
fn check_rejects_nonexistent_key() {
check_mutated_assignments(200, 100, 25, |m| {
m.val_index += 200;
m.val_index.0 += 200;
Some(false)
});
}
@@ -707,6 +707,7 @@ mod tests {
use super::*;
use polkadot_node_subsystem_test_helpers::make_subsystem_context;
use polkadot_node_primitives::approval::{VRFOutput, VRFProof};
use polkadot_primitives::v1::ValidatorIndex;
use polkadot_subsystem::messages::AllMessages;
use sp_core::testing::TaskExecutor;
use sp_runtime::{Digest, DigestItem};
@@ -1561,7 +1562,7 @@ mod tests {
validators: vec![Sr25519Keyring::Alice.public().into(); 6],
discovery_keys: Vec::new(),
assignment_keys: Vec::new(),
validator_groups: vec![vec![0; 5], vec![0; 2]],
validator_groups: vec![vec![ValidatorIndex(0); 5], vec![ValidatorIndex(0); 2]],
n_cores: 6,
needed_approvals: 2,
zeroth_delay_tranche_width: irrelevant,
@@ -864,7 +864,7 @@ fn check_and_import_assignment(
tracing::trace!(
target: LOG_TARGET,
"Imported assignment from validator {} on candidate {:?}",
assignment.validator,
assignment.validator.0,
(assigned_candidate_hash, candidate_entry.candidate_receipt().descriptor.para_id),
);
@@ -928,7 +928,7 @@ fn check_and_import_approval<T>(
block_entry.session(),
);
let pubkey = match session_info.validators.get(approval.validator as usize) {
let pubkey = match session_info.validators.get(approval.validator.0 as usize) {
Some(k) => k,
None => respond_early!(ApprovalCheckResult::Bad)
};
@@ -1492,13 +1492,13 @@ async fn issue_approval(
}
};
let validator_pubkey = match session_info.validators.get(validator_index as usize) {
let validator_pubkey = match session_info.validators.get(validator_index.0 as usize) {
Some(p) => p,
None => {
tracing::warn!(
target: LOG_TARGET,
"Validator index {} out of bounds in session {}",
validator_index,
validator_index.0,
block_entry.session(),
);
@@ -1517,7 +1517,7 @@ async fn issue_approval(
tracing::warn!(
target: LOG_TARGET,
"Could not issue approval signature with validator index {} in session {}. Assignment key present but not validator key?",
validator_index,
validator_index.0,
block_entry.session(),
);
@@ -110,7 +110,7 @@ impl ApprovalEntry {
/// Whether a validator is already assigned.
pub fn is_assigned(&self, validator_index: ValidatorIndex) -> bool {
self.assignments.get(validator_index as usize).map(|b| *b).unwrap_or(false)
self.assignments.get(validator_index.0 as usize).map(|b| *b).unwrap_or(false)
}
/// Import an assignment. No-op if already assigned on the same tranche.
@@ -143,7 +143,7 @@ impl ApprovalEntry {
};
self.tranches[idx].assignments.push((validator_index, tick_now));
self.assignments.set(validator_index as _, true);
self.assignments.set(validator_index.0 as _, true);
}
// Produce a bitvec indicating the assignments of all validators up to and
@@ -153,7 +153,7 @@ impl ApprovalEntry {
.take_while(|e| e.tranche <= tranche)
.fold(bitvec::bitvec![BitOrderLsb0, u8; 0; self.assignments.len()], |mut a, e| {
for &(v, _) in &e.assignments {
a.set(v as _, true);
a.set(v.0 as _, true);
}
a
@@ -235,8 +235,8 @@ impl CandidateEntry {
/// Note that a given validator has approved. Return the previous approval state.
pub fn mark_approval(&mut self, validator: ValidatorIndex) -> bool {
let prev = self.approvals.get(validator as usize).map(|b| *b).unwrap_or(false);
self.approvals.set(validator as usize, true);
let prev = self.approvals.get(validator.0 as usize).map(|b| *b).unwrap_or(false);
self.approvals.set(validator.0 as usize, true);
prev
}
+52 -52
View File
@@ -243,7 +243,7 @@ impl Default for StateConfig {
slot: Slot::from(0),
tick: 0,
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
validator_groups: vec![vec![0], vec![1]],
validator_groups: vec![vec![ValidatorIndex(0)], vec![ValidatorIndex(1)]],
needed_approvals: 1,
no_show_slots: 2,
}
@@ -364,7 +364,7 @@ fn rejects_bad_assignment() {
let block_hash = Hash::repeat_byte(0x01);
let assignment_good = IndirectAssignmentCert {
block_hash,
validator: 0,
validator: ValidatorIndex(0),
cert: garbage_assignment_cert(
AssignmentCertKind::RelayVRFModulo {
sample: 0,
@@ -386,7 +386,7 @@ fn rejects_bad_assignment() {
// unknown hash
let assignment = IndirectAssignmentCert {
block_hash: Hash::repeat_byte(0x02),
validator: 0,
validator: ValidatorIndex(0),
cert: garbage_assignment_cert(
AssignmentCertKind::RelayVRFModulo {
sample: 0,
@@ -423,7 +423,7 @@ fn rejects_assignment_in_future() {
let candidate_index = 0;
let assignment = IndirectAssignmentCert {
block_hash,
validator: 0,
validator: ValidatorIndex(0),
cert: garbage_assignment_cert(
AssignmentCertKind::RelayVRFModulo {
sample: 0,
@@ -467,7 +467,7 @@ fn rejects_assignment_with_unknown_candidate() {
let candidate_index = 1;
let assignment = IndirectAssignmentCert {
block_hash,
validator: 0,
validator: ValidatorIndex(0),
cert: garbage_assignment_cert(
AssignmentCertKind::RelayVRFModulo {
sample: 0,
@@ -493,7 +493,7 @@ fn assignment_import_updates_candidate_entry_and_schedules_wakeup() {
let candidate_index = 0;
let assignment = IndirectAssignmentCert {
block_hash,
validator: 0,
validator: ValidatorIndex(0),
cert: garbage_assignment_cert(
AssignmentCertKind::RelayVRFModulo {
sample: 0,
@@ -534,7 +534,7 @@ fn assignment_import_updates_candidate_entry_and_schedules_wakeup() {
actions.get(1).unwrap(),
Action::WriteCandidateEntry(c, e) => {
assert_eq!(c, &candidate_hash);
assert!(e.approval_entry(&block_hash).unwrap().is_assigned(0));
assert!(e.approval_entry(&block_hash).unwrap().is_assigned(ValidatorIndex(0)));
}
);
}
@@ -554,7 +554,7 @@ fn rejects_approval_before_assignment() {
let vote = IndirectSignedApprovalVote {
block_hash,
candidate_index: 0,
validator: 0,
validator: ValidatorIndex(0),
signature: sign_approval(Sr25519Keyring::Alice, candidate_hash, 1),
};
@@ -583,7 +583,7 @@ fn rejects_approval_if_no_candidate_entry() {
let vote = IndirectSignedApprovalVote {
block_hash,
candidate_index: 0,
validator: 0,
validator: ValidatorIndex(0),
signature: sign_approval(Sr25519Keyring::Alice, candidate_hash, 1),
};
@@ -603,7 +603,7 @@ fn rejects_approval_if_no_candidate_entry() {
fn rejects_approval_if_no_block_entry() {
let block_hash = Hash::repeat_byte(0x01);
let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
let validator_index = 0;
let validator_index = ValidatorIndex(0);
let mut state = State {
assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
@@ -615,7 +615,7 @@ fn rejects_approval_if_no_block_entry() {
let vote = IndirectSignedApprovalVote {
block_hash,
candidate_index: 0,
validator: 0,
validator: ValidatorIndex(0),
signature: sign_approval(Sr25519Keyring::Alice, candidate_hash, 1),
};
@@ -640,7 +640,7 @@ fn rejects_approval_if_no_block_entry() {
fn accepts_and_imports_approval_after_assignment() {
let block_hash = Hash::repeat_byte(0x01);
let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
let validator_index = 0;
let validator_index = ValidatorIndex(0);
let candidate_index = 0;
let mut state = State {
@@ -649,7 +649,7 @@ fn accepts_and_imports_approval_after_assignment() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
validator_groups: vec![vec![0, 1], vec![2]],
validator_groups: vec![vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2)]],
needed_approvals: 2,
..Default::default()
})
@@ -680,7 +680,7 @@ fn accepts_and_imports_approval_after_assignment() {
actions.get(0).unwrap(),
Action::WriteCandidateEntry(c_hash, c_entry) => {
assert_eq!(c_hash, &candidate_hash);
assert!(c_entry.approvals().get(validator_index as usize).unwrap());
assert!(c_entry.approvals().get(validator_index.0 as usize).unwrap());
assert!(!c_entry.approval_entry(&block_hash).unwrap().is_approved());
}
);
@@ -690,7 +690,7 @@ fn accepts_and_imports_approval_after_assignment() {
fn second_approval_import_is_no_op() {
let block_hash = Hash::repeat_byte(0x01);
let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
let validator_index = 0;
let validator_index = ValidatorIndex(0);
let candidate_index = 0;
let mut state = State {
@@ -699,7 +699,7 @@ fn second_approval_import_is_no_op() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
validator_groups: vec![vec![0, 1], vec![2]],
validator_groups: vec![vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2)]],
needed_approvals: 2,
..Default::default()
})
@@ -734,8 +734,8 @@ fn second_approval_import_is_no_op() {
fn check_and_apply_full_approval_sets_flag_and_bit() {
let block_hash = Hash::repeat_byte(0x01);
let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
let validator_index_a = 0;
let validator_index_b = 1;
let validator_index_a = ValidatorIndex(0);
let validator_index_b = ValidatorIndex(1);
let mut state = State {
assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
@@ -743,7 +743,7 @@ fn check_and_apply_full_approval_sets_flag_and_bit() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
validator_groups: vec![vec![0, 1], vec![2]],
validator_groups: vec![vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2)]],
needed_approvals: 2,
..Default::default()
})
@@ -795,8 +795,8 @@ fn check_and_apply_full_approval_sets_flag_and_bit() {
fn check_and_apply_full_approval_does_not_load_cached_block_from_db() {
let block_hash = Hash::repeat_byte(0x01);
let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
let validator_index_a = 0;
let validator_index_b = 1;
let validator_index_a = ValidatorIndex(0);
let validator_index_b = ValidatorIndex(1);
let mut state = State {
assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
@@ -804,7 +804,7 @@ fn check_and_apply_full_approval_does_not_load_cached_block_from_db() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
validator_groups: vec![vec![0, 1], vec![2]],
validator_groups: vec![vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2)]],
needed_approvals: 2,
..Default::default()
})
@@ -867,7 +867,7 @@ fn assignment_triggered_by_all_with_less_than_supermajority() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: 1,
validator_index: 4,
validator_index: ValidatorIndex(4),
triggered: false,
}),
assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
@@ -886,15 +886,15 @@ fn assignment_triggered_by_all_with_less_than_supermajority() {
candidate_entry
.approval_entry_mut(&block_hash)
.unwrap()
.import_assignment(0, 0, 0);
.import_assignment(0, ValidatorIndex(0), 0);
candidate_entry
.approval_entry_mut(&block_hash)
.unwrap()
.import_assignment(0, 1, 0);
.import_assignment(0, ValidatorIndex(1), 0);
candidate_entry.mark_approval(0);
candidate_entry.mark_approval(1);
candidate_entry.mark_approval(ValidatorIndex(0));
candidate_entry.mark_approval(ValidatorIndex(1));
let tranche_now = 1;
assert!(should_trigger_assignment(
@@ -918,7 +918,7 @@ fn assignment_not_triggered_by_all_with_supermajority() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: 1,
validator_index: 4,
validator_index: ValidatorIndex(4),
triggered: false,
}),
assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
@@ -937,21 +937,21 @@ fn assignment_not_triggered_by_all_with_supermajority() {
candidate_entry
.approval_entry_mut(&block_hash)
.unwrap()
.import_assignment(0, 0, 0);
.import_assignment(0, ValidatorIndex(0), 0);
candidate_entry
.approval_entry_mut(&block_hash)
.unwrap()
.import_assignment(0, 1, 0);
.import_assignment(0, ValidatorIndex(1), 0);
candidate_entry
.approval_entry_mut(&block_hash)
.unwrap()
.import_assignment(0, 2, 0);
.import_assignment(0, ValidatorIndex(2), 0);
candidate_entry.mark_approval(0);
candidate_entry.mark_approval(1);
candidate_entry.mark_approval(2);
candidate_entry.mark_approval(ValidatorIndex(0));
candidate_entry.mark_approval(ValidatorIndex(1));
candidate_entry.mark_approval(ValidatorIndex(2));
let tranche_now = 1;
assert!(!should_trigger_assignment(
@@ -975,7 +975,7 @@ fn assignment_not_triggered_if_already_triggered() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: 1,
validator_index: 4,
validator_index: ValidatorIndex(4),
triggered: true,
}),
assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
@@ -1012,7 +1012,7 @@ fn assignment_not_triggered_by_exact() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: 1,
validator_index: 4,
validator_index: ValidatorIndex(4),
triggered: false,
}),
assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
@@ -1050,7 +1050,7 @@ fn assignment_not_triggered_more_than_maximum() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: maximum_broadcast + 1,
validator_index: 4,
validator_index: ValidatorIndex(4),
triggered: false,
}),
assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
@@ -1093,7 +1093,7 @@ fn assignment_triggered_if_at_maximum() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: maximum_broadcast,
validator_index: 4,
validator_index: ValidatorIndex(4),
triggered: false,
}),
assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
@@ -1136,7 +1136,7 @@ fn assignment_not_triggered_if_at_maximum_but_clock_is_before() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: maximum_broadcast,
validator_index: 4,
validator_index: ValidatorIndex(4),
triggered: false,
}),
assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
@@ -1179,7 +1179,7 @@ fn assignment_not_triggered_if_at_maximum_but_clock_is_before_with_drift() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: maximum_broadcast,
validator_index: 4,
validator_index: ValidatorIndex(4),
triggered: false,
}),
assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; 4],
@@ -1277,8 +1277,8 @@ fn block_not_approved_until_all_candidates_approved() {
let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
let candidate_hash_2 = CandidateHash(Hash::repeat_byte(0xDD));
let validator_index_a = 0;
let validator_index_b = 1;
let validator_index_a = ValidatorIndex(0);
let validator_index_b = ValidatorIndex(1);
let mut state = State {
assignment_criteria: Box::new(MockAssignmentCriteria::check_only(|| {
@@ -1286,7 +1286,7 @@ fn block_not_approved_until_all_candidates_approved() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
validator_groups: vec![vec![0, 1], vec![2]],
validator_groups: vec![vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2)]],
needed_approvals: 2,
..Default::default()
})
@@ -1359,8 +1359,8 @@ fn candidate_approval_applied_to_all_blocks() {
let block_hash = Hash::repeat_byte(0x01);
let block_hash_2 = Hash::repeat_byte(0x02);
let candidate_hash = CandidateHash(Hash::repeat_byte(0xCC));
let validator_index_a = 0;
let validator_index_b = 1;
let validator_index_a = ValidatorIndex(0);
let validator_index_b = ValidatorIndex(1);
let slot = Slot::from(1);
let session_index = 1;
@@ -1371,7 +1371,7 @@ fn candidate_approval_applied_to_all_blocks() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie],
validator_groups: vec![vec![0, 1], vec![2]],
validator_groups: vec![vec![ValidatorIndex(0), ValidatorIndex(1)], vec![ValidatorIndex(2)]],
needed_approvals: 2,
session_index,
slot,
@@ -1474,7 +1474,7 @@ fn approved_ancestor_all_approved() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
validator_groups: vec![vec![0], vec![1]],
validator_groups: vec![vec![ValidatorIndex(0)], vec![ValidatorIndex(1)]],
needed_approvals: 2,
session_index,
slot,
@@ -1556,7 +1556,7 @@ fn approved_ancestor_missing_approval() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
validator_groups: vec![vec![0], vec![1]],
validator_groups: vec![vec![ValidatorIndex(0)], vec![ValidatorIndex(1)]],
needed_approvals: 2,
session_index,
slot,
@@ -1633,7 +1633,7 @@ fn process_wakeup_trigger_assignment_launch_approval() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
validator_groups: vec![vec![0], vec![1]],
validator_groups: vec![vec![ValidatorIndex(0)], vec![ValidatorIndex(1)]],
needed_approvals: 2,
session_index,
slot,
@@ -1660,7 +1660,7 @@ fn process_wakeup_trigger_assignment_launch_approval() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: 0,
validator_index: 0,
validator_index: ValidatorIndex(0),
triggered: false,
}.into());
@@ -1720,7 +1720,7 @@ fn process_wakeup_schedules_wakeup() {
})),
..some_state(StateConfig {
validators: vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob],
validator_groups: vec![vec![0], vec![1]],
validator_groups: vec![vec![ValidatorIndex(0)], vec![ValidatorIndex(1)]],
needed_approvals: 2,
session_index,
slot,
@@ -1738,7 +1738,7 @@ fn process_wakeup_schedules_wakeup() {
AssignmentCertKind::RelayVRFModulo { sample: 0 }
),
tranche: 10,
validator_index: 0,
validator_index: ValidatorIndex(0),
triggered: false,
}.into());
+5 -5
View File
@@ -968,7 +968,7 @@ fn process_message(
AvailabilityStoreMessage::QueryChunkAvailability(candidate, validator_index, tx) => {
let a = load_meta(&subsystem.db, &candidate)?
.map_or(false, |m|
*m.chunks_stored.get(validator_index as usize).as_deref().unwrap_or(&false)
*m.chunks_stored.get(validator_index.0 as usize).as_deref().unwrap_or(&false)
);
let _ = tx.send(a);
}
@@ -1034,10 +1034,10 @@ fn store_chunk(
None => return Ok(false), // we weren't informed of this candidate by import events.
};
match meta.chunks_stored.get(chunk.index as usize).map(|b| *b) {
match meta.chunks_stored.get(chunk.index.0 as usize).map(|b| *b) {
Some(true) => return Ok(true), // already stored.
Some(false) => {
meta.chunks_stored.set(chunk.index as usize, true);
meta.chunks_stored.set(chunk.index.0 as usize, true);
write_chunk(&mut tx, &candidate_hash, chunk.index, &chunk);
write_meta(&mut tx, &candidate_hash, &meta);
@@ -1090,7 +1090,7 @@ fn store_available_data(
.map(|(index, (chunk, proof))| ErasureChunk {
chunk: chunk.clone(),
proof,
index: index as u32,
index: ValidatorIndex(index as u32),
});
for chunk in erasure_chunks {
@@ -1142,7 +1142,7 @@ fn prune_all(db: &Arc<dyn KeyValueDB>, clock: &dyn Clock) -> Result<(), Error> {
// delete chunks.
for (i, b) in meta.chunks_stored.iter().enumerate() {
if *b {
delete_chunk(&mut tx, &candidate_hash, i as _);
delete_chunk(&mut tx, &candidate_hash, ValidatorIndex(i as _));
}
}
+13 -13
View File
@@ -260,7 +260,7 @@ fn runtime_api_error_does_not_stop_the_subsystem() {
// but that's fine, we're still alive
let (tx, rx) = oneshot::channel();
let candidate_hash = CandidateHash(Hash::repeat_byte(33));
let validator_index = 5;
let validator_index = ValidatorIndex(5);
let query_chunk = AvailabilityStoreMessage::QueryChunk(
candidate_hash,
validator_index,
@@ -281,7 +281,7 @@ fn store_chunk_works() {
let TestHarness { mut virtual_overseer } = test_harness;
let relay_parent = Hash::repeat_byte(32);
let candidate_hash = CandidateHash(Hash::repeat_byte(33));
let validator_index = 5;
let validator_index = ValidatorIndex(5);
let n_validators = 10;
let chunk = ErasureChunk {
@@ -333,7 +333,7 @@ fn store_chunk_does_nothing_if_no_entry_already() {
let TestHarness { mut virtual_overseer } = test_harness;
let relay_parent = Hash::repeat_byte(32);
let candidate_hash = CandidateHash(Hash::repeat_byte(33));
let validator_index = 5;
let validator_index = ValidatorIndex(5);
let chunk = ErasureChunk {
chunk: vec![1, 2, 3],
@@ -372,7 +372,7 @@ fn query_chunk_checks_meta() {
test_harness(TestState::default(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = CandidateHash(Hash::repeat_byte(33));
let validator_index = 5;
let validator_index = ValidatorIndex(5);
let n_validators = 10;
// Ensure an entry already exists. In reality this would come from watching
@@ -382,7 +382,7 @@ fn query_chunk_checks_meta() {
data_available: false,
chunks_stored: {
let mut v = bitvec::bitvec![BitOrderLsb0, u8; 0; n_validators];
v.set(validator_index as usize, true);
v.set(validator_index.0 as usize, true);
v
},
state: State::Unavailable(BETimestamp(0)),
@@ -402,7 +402,7 @@ fn query_chunk_checks_meta() {
let (tx, rx) = oneshot::channel();
let query_chunk = AvailabilityStoreMessage::QueryChunkAvailability(
candidate_hash,
validator_index + 1,
ValidatorIndex(validator_index.0 + 1),
tx,
);
@@ -418,7 +418,7 @@ fn store_block_works() {
test_harness(test_state.clone(), store.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;
let candidate_hash = CandidateHash(Hash::repeat_byte(1));
let validator_index = 5;
let validator_index = ValidatorIndex(5);
let n_validators = 10;
let pov = PoV {
@@ -455,7 +455,7 @@ fn store_block_works() {
let branch = branches.nth(5).unwrap();
let expected_chunk = ErasureChunk {
chunk: branch.1.to_vec(),
index: 5,
index: ValidatorIndex(5),
proof: branch.0,
};
@@ -497,10 +497,10 @@ fn store_pov_and_query_chunk_works() {
assert_eq!(rx.await.unwrap(), Ok(()));
for validator_index in 0..n_validators {
let chunk = query_chunk(&mut virtual_overseer, candidate_hash, validator_index).await.unwrap();
for i in 0..n_validators {
let chunk = query_chunk(&mut virtual_overseer, candidate_hash, ValidatorIndex(i as _)).await.unwrap();
assert_eq!(chunk.chunk, chunks_expected[validator_index as usize]);
assert_eq!(chunk.chunk, chunks_expected[i as usize]);
}
});
}
@@ -842,7 +842,7 @@ async fn query_available_data(
async fn query_chunk(
virtual_overseer: &mut test_helpers::TestSubsystemContextHandle<AvailabilityStoreMessage>,
candidate_hash: CandidateHash,
index: u32,
index: ValidatorIndex,
) -> Option<ErasureChunk> {
let (tx, rx) = oneshot::channel();
@@ -859,7 +859,7 @@ async fn query_all_chunks(
expect_present: bool,
) -> bool {
for i in 0..n_validators {
if query_chunk(virtual_overseer, candidate_hash, i).await.is_some() != expect_present {
if query_chunk(virtual_overseer, candidate_hash, ValidatorIndex(i)).await.is_some() != expect_present {
return false
}
}
+22 -21
View File
@@ -899,7 +899,7 @@ impl CandidateBackingJob {
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> {
let idx = statement.validator_index() as usize;
let idx = statement.validator_index().0 as usize;
if self.table_context.validators.len() > idx {
statement.check_signature(
@@ -1282,7 +1282,8 @@ mod tests {
let validator_public = validator_pubkeys(&validators);
let validator_groups = vec![vec![2, 0, 3, 5], vec![1], vec![4]];
let validator_groups = vec![vec![2, 0, 3, 5], vec![1], vec![4]]
.into_iter().map(|g| g.into_iter().map(ValidatorIndex).collect()).collect();
let group_rotation_info = GroupRotationInfo {
session_start_block: 0,
group_rotation_frequency: 100,
@@ -1599,7 +1600,7 @@ mod tests {
&test_state.keystore,
Statement::Seconded(candidate_a.clone()),
&test_state.signing_context,
2,
ValidatorIndex(2),
&public2.into(),
).await.ok().flatten().expect("should be signed");
@@ -1607,7 +1608,7 @@ mod tests {
&test_state.keystore,
Statement::Valid(candidate_a_hash),
&test_state.signing_context,
5,
ValidatorIndex(5),
&public1.into(),
).await.ok().flatten().expect("should be signed");
@@ -1741,7 +1742,7 @@ mod tests {
&test_state.keystore,
Statement::Seconded(candidate_a.clone()),
&test_state.signing_context,
2,
ValidatorIndex(2),
&public2.into(),
).await.ok().flatten().expect("should be signed");
@@ -1749,7 +1750,7 @@ mod tests {
&test_state.keystore,
Statement::Valid(candidate_a_hash),
&test_state.signing_context,
5,
ValidatorIndex(5),
&public1.into(),
).await.ok().flatten().expect("should be signed");
@@ -1757,7 +1758,7 @@ mod tests {
&test_state.keystore,
Statement::Valid(candidate_a_hash),
&test_state.signing_context,
3,
ValidatorIndex(3),
&public3.into(),
).await.ok().flatten().expect("should be signed");
@@ -1894,7 +1895,7 @@ mod tests {
&test_state.keystore,
Statement::Seconded(candidate_a.clone()),
&test_state.signing_context,
2,
ValidatorIndex(2),
&public2.into(),
).await.ok().flatten().expect("should be signed");
@@ -1902,7 +1903,7 @@ mod tests {
&test_state.keystore,
Statement::Invalid(candidate_a_hash),
&test_state.signing_context,
2,
ValidatorIndex(2),
&public2.into(),
).await.ok().flatten().expect("should be signed");
@@ -1910,7 +1911,7 @@ mod tests {
&test_state.keystore,
Statement::Invalid(candidate_a_hash),
&test_state.signing_context,
0,
ValidatorIndex(0),
&public0.into(),
).await.ok().flatten().expect("should be signed");
@@ -2002,7 +2003,7 @@ mod tests {
validator_index,
s1,
&test_state.signing_context,
&test_state.validator_public[validator_index as usize],
&test_state.validator_public[validator_index.0 as usize],
).expect("signature must be valid");
SignedFullStatement::new(
@@ -2010,7 +2011,7 @@ mod tests {
validator_index,
s2,
&test_state.signing_context,
&test_state.validator_public[validator_index as usize],
&test_state.validator_public[validator_index.0 as usize],
).expect("signature must be valid");
}
);
@@ -2042,7 +2043,7 @@ mod tests {
validator_index,
s1,
&test_state.signing_context,
&test_state.validator_public[validator_index as usize],
&test_state.validator_public[validator_index.0 as usize],
).expect("signature must be valid");
SignedFullStatement::new(
@@ -2050,7 +2051,7 @@ mod tests {
validator_index,
s2,
&test_state.signing_context,
&test_state.validator_public[validator_index as usize],
&test_state.validator_public[validator_index.0 as usize],
).expect("signature must be valid");
}
);
@@ -2223,7 +2224,7 @@ mod tests {
&test_state.keystore,
Statement::Seconded(candidate.clone()),
&test_state.signing_context,
2,
ValidatorIndex(2),
&validator2.into(),
).await.ok().flatten().expect("should be signed");
@@ -2361,7 +2362,7 @@ mod tests {
&test_state.keystore,
Statement::Seconded(candidate.clone()),
&test_state.signing_context,
2,
ValidatorIndex(2),
&public2.into(),
).await.ok().flatten().expect("should be signed");
@@ -2503,7 +2504,7 @@ mod tests {
&test_state.keystore,
Statement::Seconded(candidate_a.clone()),
&test_state.signing_context,
2,
ValidatorIndex(2),
&public2.into(),
).await.ok().flatten().expect("should be signed");
@@ -2542,7 +2543,7 @@ mod tests {
let validator_public = validator_pubkeys(&validators);
let validator_groups = {
let mut validator_groups = HashMap::new();
validator_groups.insert(para_id, vec![0, 1, 2, 3, 4, 5]);
validator_groups.insert(para_id, vec![0, 1, 2, 3, 4, 5].into_iter().map(ValidatorIndex).collect());
validator_groups
};
@@ -2567,9 +2568,9 @@ mod tests {
let attested = TableAttestedCandidate {
candidate: Default::default(),
validity_votes: vec![
(5, fake_attestation(5)),
(3, fake_attestation(3)),
(1, fake_attestation(1)),
(ValidatorIndex(5), fake_attestation(5)),
(ValidatorIndex(3), fake_attestation(3)),
(ValidatorIndex(1), fake_attestation(1)),
],
group_id: para_id,
};
@@ -333,7 +333,7 @@ mod tests {
block_on(async move {
let (mut sender, mut receiver) = mpsc::channel(10);
let relay_parent = Hash::default();
let validator_index = 1u32;
let validator_index = ValidatorIndex(1u32);
let future = construct_availability_bitfield(
relay_parent,
+1 -1
View File
@@ -501,7 +501,7 @@ fn bitfields_indicate_availability(
let availability_len = availability.len();
for bitfield in bitfields {
let validator_idx = bitfield.validator_index() as usize;
let validator_idx = bitfield.validator_index().0 as usize;
match availability.get_mut(validator_idx) {
None => {
// in principle, this function might return a `Result<bool, Error>` so that we can more clearly express this error condition
+13 -13
View File
@@ -78,9 +78,9 @@ mod select_availability_bitfields {
// we pass in three bitfields with two validators
// this helps us check the postcondition that we get two bitfields back, for which the validators differ
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec.clone(), 0)),
block_on(signed_bitfield(&keystore, bitvec.clone(), 1)),
block_on(signed_bitfield(&keystore, bitvec, 1)),
block_on(signed_bitfield(&keystore, bitvec.clone(), ValidatorIndex(0))),
block_on(signed_bitfield(&keystore, bitvec.clone(), ValidatorIndex(1))),
block_on(signed_bitfield(&keystore, bitvec, ValidatorIndex(1))),
];
let mut selected_bitfields = select_availability_bitfields(&cores, &bitfields);
@@ -116,9 +116,9 @@ mod select_availability_bitfields {
];
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec0, 0)),
block_on(signed_bitfield(&keystore, bitvec1, 1)),
block_on(signed_bitfield(&keystore, bitvec2.clone(), 2)),
block_on(signed_bitfield(&keystore, bitvec0, ValidatorIndex(0))),
block_on(signed_bitfield(&keystore, bitvec1, ValidatorIndex(1))),
block_on(signed_bitfield(&keystore, bitvec2.clone(), ValidatorIndex(2))),
];
let selected_bitfields = select_availability_bitfields(&cores, &bitfields);
@@ -140,8 +140,8 @@ mod select_availability_bitfields {
let cores = vec![occupied_core(0), occupied_core(1)];
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec, 1)),
block_on(signed_bitfield(&keystore, bitvec1.clone(), 1)),
block_on(signed_bitfield(&keystore, bitvec, ValidatorIndex(1))),
block_on(signed_bitfield(&keystore, bitvec1.clone(), ValidatorIndex(1))),
];
let selected_bitfields = select_availability_bitfields(&cores, &bitfields);
@@ -174,11 +174,11 @@ mod select_availability_bitfields {
// these are out of order but will be selected in order. The better
// bitfield for 3 will be selected.
let bitfields = vec![
block_on(signed_bitfield(&keystore, bitvec2.clone(), 3)),
block_on(signed_bitfield(&keystore, bitvec3.clone(), 3)),
block_on(signed_bitfield(&keystore, bitvec0.clone(), 0)),
block_on(signed_bitfield(&keystore, bitvec2.clone(), 2)),
block_on(signed_bitfield(&keystore, bitvec1.clone(), 1)),
block_on(signed_bitfield(&keystore, bitvec2.clone(), ValidatorIndex(3))),
block_on(signed_bitfield(&keystore, bitvec3.clone(), ValidatorIndex(3))),
block_on(signed_bitfield(&keystore, bitvec0.clone(), ValidatorIndex(0))),
block_on(signed_bitfield(&keystore, bitvec2.clone(), ValidatorIndex(2))),
block_on(signed_bitfield(&keystore, bitvec1.clone(), ValidatorIndex(1))),
];
let selected_bitfields = select_availability_bitfields(&cores, &bitfields);
+2 -2
View File
@@ -214,7 +214,7 @@ impl SpanBuilder {
#[inline(always)]
pub fn with_validator_index(mut self, validator: ValidatorIndex) -> Self {
self.span.add_string_tag("validator-index", &validator.to_string());
self.span.add_string_tag("validator-index", &validator.0.to_string());
self
}
@@ -234,7 +234,7 @@ impl SpanBuilder {
pub fn with_claimed_validator_index(mut self, claimed_validator_index: ValidatorIndex) -> Self {
self.span.add_string_tag(
"claimed-validator",
&claimed_validator_index.to_string(),
&claimed_validator_index.0.to_string(),
);
self
}
@@ -208,7 +208,7 @@ fn try_import_the_same_assignment() {
overseer_send(overseer, msg).await;
// send the assignment related to `hash`
let validator_index = 0u32;
let validator_index = ValidatorIndex(0);
let cert = fake_assignment_cert(hash, validator_index);
let assignments = vec![(cert.clone(), 0u32)];
@@ -299,7 +299,7 @@ fn spam_attack_results_in_negative_reputation_change() {
// to populate our knowledge
let assignments: Vec<_> = (0..candidates_count)
.map(|candidate_index| {
let validator_index = candidate_index as u32;
let validator_index = ValidatorIndex(candidate_index as u32);
let cert = fake_assignment_cert(hash_b, validator_index);
(cert, candidate_index as u32)
}).collect();
@@ -372,7 +372,7 @@ fn import_approval_happy_path() {
overseer_send(overseer, msg).await;
// import an assignment related to `hash` locally
let validator_index = 0u32;
let validator_index = ValidatorIndex(0);
let candidate_index = 0u32;
let cert = fake_assignment_cert(hash, validator_index);
overseer_send(
@@ -455,7 +455,7 @@ fn import_approval_bad() {
let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]);
overseer_send(overseer, msg).await;
let validator_index = 0u32;
let validator_index = ValidatorIndex(0);
let candidate_index = 0u32;
let cert = fake_assignment_cert(hash, validator_index);
@@ -616,8 +616,8 @@ fn update_peer_view() {
let msg = ApprovalDistributionMessage::NewBlocks(vec![meta_a, meta_b, meta_c]);
overseer_send(overseer, msg).await;
let cert_a = fake_assignment_cert(hash_a, 0);
let cert_b = fake_assignment_cert(hash_b, 0);
let cert_a = fake_assignment_cert(hash_a, ValidatorIndex(0));
let cert_b = fake_assignment_cert(hash_b, ValidatorIndex(0));
overseer_send(
overseer,
@@ -670,7 +670,7 @@ fn update_peer_view() {
)
).await;
let cert_c = fake_assignment_cert(hash_c, 0);
let cert_c = fake_assignment_cert(hash_c, ValidatorIndex(0));
overseer_send(
overseer,
@@ -753,7 +753,7 @@ fn import_remotely_then_locally() {
overseer_send(overseer, msg).await;
// import the assignment remotely first
let validator_index = 0u32;
let validator_index = ValidatorIndex(0);
let candidate_index = 0u32;
let cert = fake_assignment_cert(hash, validator_index);
let assignments = vec![(cert.clone(), candidate_index)];
@@ -14,16 +14,20 @@ polkadot-erasure-coding = { path = "../../../erasure-coding" }
polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" }
polkadot-node-network-protocol = { path = "../../network/protocol" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-node-core-runtime-api = { path = "../../core/runtime-api" }
sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
thiserror = "1.0.23"
rand = "0.8.3"
lru = "0.6.5"
[dev-dependencies]
polkadot-subsystem-testhelpers = { package = "polkadot-node-subsystem-test-helpers", path = "../../subsystem-test-helpers" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
assert_matches = "1.4.0"
maplit = "1.0"
@@ -0,0 +1,89 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//
//! Error handling related code and Error/Result definitions.
use thiserror::Error;
use futures::channel::oneshot;
use polkadot_node_subsystem_util::Error as UtilError;
use polkadot_primitives::v1::SessionIndex;
use polkadot_subsystem::{errors::RuntimeApiError, SubsystemError};
/// Errors of this subsystem.
#[derive(Debug, Error)]
pub enum Error {
#[error("Response channel to obtain QueryChunk failed")]
QueryChunkResponseChannel(#[source] oneshot::Canceled),
#[error("Receive channel closed")]
IncomingMessageChannel(#[source] SubsystemError),
/// Some request to utility functions failed.
#[error("Runtime request failed")]
UtilRequest(#[source] UtilError),
/// Some request to the runtime failed.
#[error("Runtime request failed")]
RuntimeRequestCanceled(#[source] oneshot::Canceled),
/// Some request to the runtime failed.
#[error("Runtime request failed")]
RuntimeRequest(#[source] RuntimeApiError),
/// We tried fetching a session which was not available.
#[error("No such session")]
NoSuchSession(SessionIndex),
/// Spawning a running task failed.
#[error("Spawning subsystem task failed")]
SpawnTask(#[source] SubsystemError),
/// We tried accessing a session that was not cached.
#[error("Session is not cached.")]
NoSuchCachedSession,
/// Requester stream exhausted.
#[error("Erasure chunk requester stream exhausted")]
RequesterExhausted,
/// Sending response failed.
#[error("Sending a request's response failed.")]
SendResponse,
}
pub type Result<T> = std::result::Result<T, Error>;
impl From<SubsystemError> for Error {
fn from(err: SubsystemError) -> Self {
Self::IncomingMessageChannel(err)
}
}
/// Receive a response from a runtime request and convert errors.
pub(crate) async fn recv_runtime<V>(
r: std::result::Result<
oneshot::Receiver<std::result::Result<V, RuntimeApiError>>,
UtilError,
>,
) -> Result<V> {
r.map_err(Error::UtilRequest)?
.await
.map_err(Error::RuntimeRequestCanceled)?
.map_err(Error::RuntimeRequest)
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,117 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use polkadot_node_subsystem_util::metrics::prometheus::{Counter, U64, Registry, PrometheusError, CounterVec, Opts};
use polkadot_node_subsystem_util::metrics::prometheus;
use polkadot_node_subsystem_util::metrics;
/// Label for success counters.
pub const SUCCEEDED: &'static str = "succeeded";
/// Label for fail counters.
pub const FAILED: &'static str = "failed";
/// Label for chunks that could not be served, because they were not available.
pub const NOT_FOUND: &'static str = "not-found";
/// Availability Distribution metrics.
#[derive(Clone, Default)]
pub struct Metrics(Option<MetricsInner>);
#[derive(Clone)]
struct MetricsInner {
/// Number of chunks fetched.
///
/// Note: The failed count gets incremented, when we were not able to fetch the chunk at all.
/// For times, where we failed downloading, but succeeded on the next try (with different
/// backers), see `retries`.
fetched_chunks: CounterVec<U64>,
/// Number of chunks served.
///
/// Note: Right now, `Succeeded` gets incremented whenever we were able to successfully respond
/// to a chunk request. This includes `NoSuchChunk` responses.
served_chunks: CounterVec<U64>,
/// Number of times our first set of validators did not provide the needed chunk and we had to
/// query further validators.
retries: Counter<U64>,
}
impl Metrics {
/// Create new dummy metrics, not reporting anything.
pub fn new_dummy() -> Self {
Metrics(None)
}
/// Increment counter on fetched labels.
pub fn on_fetch(&self, label: &'static str) {
if let Some(metrics) = &self.0 {
metrics.fetched_chunks.with_label_values(&[label]).inc()
}
}
/// Increment counter on served chunks.
pub fn on_served(&self, label: &'static str) {
if let Some(metrics) = &self.0 {
metrics.served_chunks.with_label_values(&[label]).inc()
}
}
/// Increment retry counter.
pub fn on_retry(&self) {
if let Some(metrics) = &self.0 {
metrics.retries.inc()
}
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &Registry) -> Result<Self, PrometheusError> {
let metrics = MetricsInner {
fetched_chunks: prometheus::register(
CounterVec::new(
Opts::new(
"parachain_fetched_chunks_total",
"Total number of fetched chunks.",
),
&["success"]
)?,
registry,
)?,
served_chunks: prometheus::register(
CounterVec::new(
Opts::new(
"parachain_served_chunks_total",
"Total number of chunks served by this backer.",
),
&["success"]
)?,
registry,
)?,
retries: prometheus::register(
Counter::new(
"parachain_fetch_retries_total",
"Number of times we did not succeed in fetching a chunk and needed to try more backers.",
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
@@ -0,0 +1,421 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use std::collections::HashSet;
use futures::channel::mpsc;
use futures::channel::oneshot;
use futures::future::select;
use futures::{FutureExt, SinkExt};
use polkadot_erasure_coding::branch_hash;
use polkadot_node_network_protocol::request_response::{
request::{OutgoingRequest, RequestError, Requests},
v1::{AvailabilityFetchingRequest, AvailabilityFetchingResponse},
};
use polkadot_primitives::v1::{
AuthorityDiscoveryId, BlakeTwo256, ErasureChunk, GroupIndex, Hash, HashT, OccupiedCore,
SessionIndex,
};
use polkadot_subsystem::messages::{
AllMessages, AvailabilityStoreMessage, NetworkBridgeMessage,
};
use polkadot_subsystem::SubsystemContext;
use crate::{
error::{Error, Result},
session_cache::{BadValidators, SessionInfo},
LOG_TARGET,
metrics::{Metrics, SUCCEEDED, FAILED},
};
#[cfg(test)]
mod tests;
/// Configuration for a `FetchTask`
///
/// This exists to separate preparation of a `FetchTask` from actual starting it, which is
/// beneficial as this allows as for taking session info by reference.
pub struct FetchTaskConfig {
prepared_running: Option<RunningTask>,
live_in: HashSet<Hash>,
}
/// Information about a task fetching an erasure chunk.
pub struct FetchTask {
/// For what relay parents this task is relevant.
///
/// In other words, for which relay chain parents this candidate is considered live.
/// This is updated on every `ActiveLeavesUpdate` and enables us to know when we can safely
/// stop keeping track of that candidate/chunk.
live_in: HashSet<Hash>,
/// We keep the task around in until `live_in` becomes empty, to make
/// sure we won't re-fetch an already fetched candidate.
state: FetchedState,
}
/// State of a particular candidate chunk fetching process.
enum FetchedState {
/// Chunk fetch has started.
///
/// Once the contained `Sender` is dropped, any still running task will be canceled.
Started(oneshot::Sender<()>),
/// All relevant live_in have been removed, before we were able to get our chunk.
Canceled,
}
/// Messages sent from `FetchTask`s to be handled/forwarded.
pub enum FromFetchTask {
/// Message to other subsystem.
Message(AllMessages),
/// Concluded with result.
///
/// In case of `None` everything was fine, in case of `Some`, some validators in the group
/// did not serve us our chunk as expected.
Concluded(Option<BadValidators>),
}
/// Information a running task needs.
struct RunningTask {
/// For what session we have been spawned.
session_index: SessionIndex,
/// Index of validator group to fetch the chunk from.
///
/// Needef for reporting bad validators.
group_index: GroupIndex,
/// Validators to request the chunk from.
///
/// This vector gets drained during execution of the task (it will be empty afterwards).
group: Vec<AuthorityDiscoveryId>,
/// The request to send.
request: AvailabilityFetchingRequest,
/// Root hash, for verifying the chunks validity.
erasure_root: Hash,
/// Relay parent of the candidate to fetch.
relay_parent: Hash,
/// Sender for communicating with other subsystems and reporting results.
sender: mpsc::Sender<FromFetchTask>,
/// Prometheues metrics for reporting results.
metrics: Metrics,
}
impl FetchTaskConfig {
/// Create a new configuration for a [`FetchTask`].
///
/// The result of this function can be passed into [`FetchTask::start`].
pub fn new(
leaf: Hash,
core: &OccupiedCore,
sender: mpsc::Sender<FromFetchTask>,
metrics: Metrics,
session_info: &SessionInfo,
) -> Self {
let live_in = vec![leaf].into_iter().collect();
// Don't run tasks for our backing group:
if session_info.our_group == core.group_responsible {
return FetchTaskConfig {
live_in,
prepared_running: None,
};
}
let prepared_running = RunningTask {
session_index: session_info.session_index,
group_index: core.group_responsible,
group: session_info.validator_groups.get(core.group_responsible.0 as usize)
.expect("The responsible group of a candidate should be available in the corresponding session. qed.")
.clone(),
request: AvailabilityFetchingRequest {
candidate_hash: core.candidate_hash,
index: session_info.our_index,
},
erasure_root: core.candidate_descriptor.erasure_root,
relay_parent: core.candidate_descriptor.relay_parent,
metrics,
sender,
};
FetchTaskConfig {
live_in,
prepared_running: Some(prepared_running),
}
}
}
impl FetchTask {
/// Start fetching a chunk.
///
/// A task handling the fetching of the configured chunk will be spawned.
pub async fn start<Context>(config: FetchTaskConfig, ctx: &mut Context) -> Result<Self>
where
Context: SubsystemContext,
{
let FetchTaskConfig {
prepared_running,
live_in,
} = config;
if let Some(running) = prepared_running {
let (handle, kill) = oneshot::channel();
ctx.spawn("chunk-fetcher", running.run(kill).boxed())
.await
.map_err(|e| Error::SpawnTask(e))?;
Ok(FetchTask {
live_in,
state: FetchedState::Started(handle),
})
} else {
Ok(FetchTask {
live_in,
state: FetchedState::Canceled,
})
}
}
/// Add the given leaf to the relay parents which are making this task relevant.
///
/// This is for book keeping, so we know we are already fetching a given chunk.
pub fn add_leaf(&mut self, leaf: Hash) {
self.live_in.insert(leaf);
}
/// Remove leaves and cancel the task, if it was the last one and the task has still been
/// fetching.
pub fn remove_leaves(&mut self, leaves: &HashSet<Hash>) {
self.live_in.difference(leaves);
if self.live_in.is_empty() && !self.is_finished() {
self.state = FetchedState::Canceled
}
}
/// Whether or not there are still relay parents around with this candidate pending
/// availability.
pub fn is_live(&self) -> bool {
!self.live_in.is_empty()
}
/// Whether or not this task can be considered finished.
///
/// That is, it is either canceled, succeeded or failed.
pub fn is_finished(&self) -> bool {
match &self.state {
FetchedState::Canceled => true,
FetchedState::Started(sender) => sender.is_canceled(),
}
}
}
/// Things that can go wrong in task execution.
#[derive(Debug)]
enum TaskError {
/// The peer failed to deliver a correct chunk for some reason (has been reported as
/// appropriate).
PeerError,
/// This very node is seemingly shutting down (sending of message failed).
ShuttingDown,
}
impl RunningTask {
async fn run(self, kill: oneshot::Receiver<()>) {
// Wait for completion/or cancel.
let run_it = self.run_inner();
futures::pin_mut!(run_it);
let _ = select(run_it, kill).await;
}
/// Fetch and store chunk.
///
/// Try validators in backing group in order.
async fn run_inner(mut self) {
let mut bad_validators = Vec::new();
let mut label = FAILED;
let mut count: u32 = 0;
// Try validators in reverse order:
while let Some(validator) = self.group.pop() {
// Report retries:
if count > 0 {
self.metrics.on_retry();
}
count +=1;
// Send request:
let resp = match self.do_request(&validator).await {
Ok(resp) => resp,
Err(TaskError::ShuttingDown) => {
tracing::info!(
target: LOG_TARGET,
"Node seems to be shutting down, canceling fetch task"
);
self.metrics.on_fetch(FAILED);
return
}
Err(TaskError::PeerError) => {
bad_validators.push(validator);
continue
}
};
let chunk = match resp {
AvailabilityFetchingResponse::Chunk(resp) => {
resp.recombine_into_chunk(&self.request)
}
AvailabilityFetchingResponse::NoSuchChunk => {
tracing::debug!(
target: LOG_TARGET,
validator = ?validator,
"Validator did not have our chunk"
);
bad_validators.push(validator);
continue
}
};
// Data genuine?
if !self.validate_chunk(&validator, &chunk) {
bad_validators.push(validator);
continue;
}
// Ok, let's store it and be happy:
self.store_chunk(chunk).await;
label = SUCCEEDED;
break;
}
self.metrics.on_fetch(label);
self.conclude(bad_validators).await;
}
/// Do request and return response, if successful.
async fn do_request(
&mut self,
validator: &AuthorityDiscoveryId,
) -> std::result::Result<AvailabilityFetchingResponse, TaskError> {
let (full_request, response_recv) =
OutgoingRequest::new(validator.clone(), self.request);
let requests = Requests::AvailabilityFetching(full_request);
self.sender
.send(FromFetchTask::Message(AllMessages::NetworkBridge(
NetworkBridgeMessage::SendRequests(vec![requests]),
)))
.await
.map_err(|_| TaskError::ShuttingDown)?;
match response_recv.await {
Ok(resp) => Ok(resp),
Err(RequestError::InvalidResponse(err)) => {
tracing::warn!(
target: LOG_TARGET,
origin= ?validator,
err= ?err,
"Peer sent us invalid erasure chunk data"
);
Err(TaskError::PeerError)
}
Err(RequestError::NetworkError(err)) => {
tracing::warn!(
target: LOG_TARGET,
origin= ?validator,
err= ?err,
"Some network error occurred when fetching erasure chunk"
);
Err(TaskError::PeerError)
}
Err(RequestError::Canceled(oneshot::Canceled)) => {
tracing::warn!(target: LOG_TARGET,
origin= ?validator,
"Erasure chunk request got canceled");
Err(TaskError::PeerError)
}
}
}
fn validate_chunk(&self, validator: &AuthorityDiscoveryId, chunk: &ErasureChunk) -> bool {
let anticipated_hash =
match branch_hash(&self.erasure_root, &chunk.proof, chunk.index.0 as usize) {
Ok(hash) => hash,
Err(e) => {
tracing::warn!(
target: LOG_TARGET,
candidate_hash = ?self.request.candidate_hash,
origin = ?validator,
error = ?e,
"Failed to calculate chunk merkle proof",
);
return false;
}
};
let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk);
if anticipated_hash != erasure_chunk_hash {
tracing::warn!(target: LOG_TARGET, origin = ?validator, "Received chunk does not match merkle tree");
return false;
}
true
}
/// Store given chunk and log any error.
async fn store_chunk(&mut self, chunk: ErasureChunk) {
let (tx, rx) = oneshot::channel();
let r = self
.sender
.send(FromFetchTask::Message(AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreChunk {
candidate_hash: self.request.candidate_hash,
relay_parent: self.relay_parent,
chunk,
tx,
},
)))
.await;
if let Err(err) = r {
tracing::error!(target: LOG_TARGET, err= ?err, "Storing erasure chunk failed, system shutting down?");
}
if let Err(oneshot::Canceled) = rx.await {
tracing::error!(target: LOG_TARGET, "Storing erasure chunk failed");
}
}
/// Tell subsystem we are done.
async fn conclude(&mut self, bad_validators: Vec<AuthorityDiscoveryId>) {
let payload = if bad_validators.is_empty() {
None
} else {
Some(BadValidators {
session_index: self.session_index,
group_index: self.group_index,
bad_validators,
})
};
if let Err(err) = self.sender.send(FromFetchTask::Concluded(payload)).await {
tracing::warn!(
target: LOG_TARGET,
err= ?err,
"Sending concluded message for task failed"
);
}
}
}
@@ -0,0 +1,315 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use std::collections::HashMap;
use std::sync::Arc;
use parity_scale_codec::Encode;
use futures::channel::{mpsc, oneshot};
use futures::{executor, Future, FutureExt, StreamExt, select};
use futures::task::{Poll, Context, noop_waker};
use polkadot_erasure_coding::{obtain_chunks_v1 as obtain_chunks, branches};
use sc_network as network;
use sp_keyring::Sr25519Keyring;
use polkadot_primitives::v1::{AvailableData, BlockData, CandidateHash, HeadData, PersistedValidationData, PoV, ValidatorIndex};
use polkadot_node_network_protocol::request_response::v1;
use polkadot_subsystem::messages::AllMessages;
use crate::metrics::Metrics;
use super::*;
#[test]
fn task_can_be_canceled() {
let (task, _rx) = get_test_running_task();
let (handle, kill) = oneshot::channel();
std::mem::drop(handle);
let running_task = task.run(kill);
futures::pin_mut!(running_task);
let waker = noop_waker();
let mut ctx = Context::from_waker(&waker);
assert!(running_task.poll(&mut ctx) == Poll::Ready(()), "Task is immediately finished");
}
/// Make sure task won't accept a chunk that has is invalid.
#[test]
fn task_does_not_accept_invalid_chunk() {
let (mut task, rx) = get_test_running_task();
let validators = vec![Sr25519Keyring::Alice.public().into()];
task.group = validators;
let test = TestRun {
chunk_responses: {
let mut m = HashMap::new();
m.insert(
Sr25519Keyring::Alice.public().into(),
AvailabilityFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: vec![1,2,3],
proof: vec![vec![9,8,2], vec![2,3,4]],
}
)
);
m
},
valid_chunks: HashSet::new(),
};
test.run(task, rx);
}
#[test]
fn task_stores_valid_chunk() {
let (mut task, rx) = get_test_running_task();
let (root_hash, chunk) = get_valid_chunk_data();
task.erasure_root = root_hash;
task.request.index = chunk.index;
let validators = vec![Sr25519Keyring::Alice.public().into()];
task.group = validators;
let test = TestRun {
chunk_responses: {
let mut m = HashMap::new();
m.insert(
Sr25519Keyring::Alice.public().into(),
AvailabilityFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: chunk.chunk.clone(),
proof: chunk.proof,
}
)
);
m
},
valid_chunks: {
let mut s = HashSet::new();
s.insert(chunk.chunk);
s
},
};
test.run(task, rx);
}
#[test]
fn task_does_not_accept_wrongly_indexed_chunk() {
let (mut task, rx) = get_test_running_task();
let (root_hash, chunk) = get_valid_chunk_data();
task.erasure_root = root_hash;
task.request.index = ValidatorIndex(chunk.index.0+1);
let validators = vec![Sr25519Keyring::Alice.public().into()];
task.group = validators;
let test = TestRun {
chunk_responses: {
let mut m = HashMap::new();
m.insert(
Sr25519Keyring::Alice.public().into(),
AvailabilityFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: chunk.chunk.clone(),
proof: chunk.proof,
}
)
);
m
},
valid_chunks: HashSet::new(),
};
test.run(task, rx);
}
/// Task stores chunk, if there is at least one validator having a valid chunk.
#[test]
fn task_stores_valid_chunk_if_there_is_one() {
let (mut task, rx) = get_test_running_task();
let (root_hash, chunk) = get_valid_chunk_data();
task.erasure_root = root_hash;
task.request.index = chunk.index;
let validators = [
// Only Alice has valid chunk - should succeed, even though she is tried last.
Sr25519Keyring::Alice,
Sr25519Keyring::Bob, Sr25519Keyring::Charlie,
Sr25519Keyring::Dave, Sr25519Keyring::Eve,
]
.iter().map(|v| v.public().into()).collect::<Vec<_>>();
task.group = validators;
let test = TestRun {
chunk_responses: {
let mut m = HashMap::new();
m.insert(
Sr25519Keyring::Alice.public().into(),
AvailabilityFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: chunk.chunk.clone(),
proof: chunk.proof,
}
)
);
m.insert(
Sr25519Keyring::Bob.public().into(),
AvailabilityFetchingResponse::NoSuchChunk
);
m.insert(
Sr25519Keyring::Charlie.public().into(),
AvailabilityFetchingResponse::Chunk(
v1::ChunkResponse {
chunk: vec![1,2,3],
proof: vec![vec![9,8,2], vec![2,3,4]],
}
)
);
m
},
valid_chunks: {
let mut s = HashSet::new();
s.insert(chunk.chunk);
s
},
};
test.run(task, rx);
}
struct TestRun {
/// Response to deliver for a given validator index.
/// None means, answer with NetworkError.
chunk_responses: HashMap<AuthorityDiscoveryId, AvailabilityFetchingResponse>,
/// Set of chunks that should be considered valid:
valid_chunks: HashSet<Vec<u8>>,
}
impl TestRun {
fn run(self, task: RunningTask, rx: mpsc::Receiver<FromFetchTask>) {
sp_tracing::try_init_simple();
let mut rx = rx.fuse();
let task = task.run_inner().fuse();
futures::pin_mut!(task);
executor::block_on(async {
let mut end_ok = false;
loop {
let msg = select!(
from_task = rx.next() => {
match from_task {
Some(msg) => msg,
None => break,
}
},
() = task =>
break,
);
match msg {
FromFetchTask::Concluded(_) => break,
FromFetchTask::Message(msg) =>
end_ok = self.handle_message(msg).await,
}
}
if !end_ok {
panic!("Task ended prematurely (failed to store valid chunk)!");
}
});
}
/// Returns true, if after processing of the given message it would be ok for the stream to
/// end.
async fn handle_message(&self, msg: AllMessages) -> bool {
match msg {
AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(reqs)) => {
let mut valid_responses = 0;
for req in reqs {
let req = match req {
Requests::AvailabilityFetching(req) => req,
};
let response = self.chunk_responses.get(&req.peer)
.ok_or(network::RequestFailure::Refused);
if let Ok(AvailabilityFetchingResponse::Chunk(resp)) = &response {
if self.valid_chunks.contains(&resp.chunk) {
valid_responses += 1;
}
}
req.pending_response.send(response.map(Encode::encode))
.expect("Sending response should succeed");
}
return (valid_responses == 0) && self.valid_chunks.is_empty()
}
AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreChunk { chunk, tx, .. }
) => {
assert!(self.valid_chunks.contains(&chunk.chunk));
tx.send(Ok(())).expect("Answering fetching task should work");
return true
}
_ => {
tracing::debug!(target: LOG_TARGET, "Unexpected message");
return false
}
}
}
}
/// Get a `RunningTask` filled with dummy values.
fn get_test_running_task() -> (RunningTask, mpsc::Receiver<FromFetchTask>) {
let (tx,rx) = mpsc::channel(0);
(
RunningTask {
session_index: 0,
group_index: GroupIndex(0),
group: Vec::new(),
request: AvailabilityFetchingRequest {
candidate_hash: CandidateHash([43u8;32].into()),
index: ValidatorIndex(0),
},
erasure_root: Hash::repeat_byte(99),
relay_parent: Hash::repeat_byte(71),
sender: tx,
metrics: Metrics::new_dummy(),
},
rx
)
}
fn get_valid_chunk_data() -> (Hash, ErasureChunk) {
let fake_validator_count = 10;
let persisted = PersistedValidationData {
parent_head: HeadData(vec![7, 8, 9]),
relay_parent_number: Default::default(),
max_pov_size: 1024,
relay_parent_storage_root: Default::default(),
};
let pov_block = PoV {
block_data: BlockData(vec![45, 46, 47]),
};
let available_data = AvailableData {
validation_data: persisted, pov: Arc::new(pov_block),
};
let chunks = obtain_chunks(fake_validator_count, &available_data).unwrap();
let branches = branches(chunks.as_ref());
let root = branches.root();
let chunk = branches.enumerate()
.map(|(index, (proof, chunk))| ErasureChunk {
chunk: chunk.to_vec(),
index: ValidatorIndex(index as _),
proof,
})
.next().expect("There really should be 10 chunks.");
(root, chunk)
}
@@ -0,0 +1,236 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Requester takes care of requesting erasure chunks for candidates that are pending
//! availability.
use std::collections::{
hash_map::{Entry, HashMap},
hash_set::HashSet,
};
use std::iter::IntoIterator;
use std::pin::Pin;
use std::sync::Arc;
use futures::{
channel::mpsc,
task::{Context, Poll},
Stream,
};
use sp_keystore::SyncCryptoStorePtr;
use polkadot_node_subsystem_util::request_availability_cores_ctx;
use polkadot_primitives::v1::{CandidateHash, CoreState, Hash, OccupiedCore};
use polkadot_subsystem::{
messages::AllMessages, ActiveLeavesUpdate, jaeger, SubsystemContext,
};
use super::{error::recv_runtime, session_cache::SessionCache, Result, LOG_TARGET, Metrics};
/// A task fetching a particular chunk.
mod fetch_task;
use fetch_task::{FetchTask, FetchTaskConfig, FromFetchTask};
/// Requester takes care of requesting erasure chunks from backing groups and stores them in the
/// av store.
///
/// It implements a stream that needs to be advanced for it making progress.
pub struct Requester {
/// Candidates we need to fetch our chunk for.
///
/// We keep those around as long as a candidate is pending availability on some leaf, so we
/// won't fetch chunks multiple times.
fetches: HashMap<CandidateHash, FetchTask>,
/// Localized information about sessions we are currently interested in.
session_cache: SessionCache,
/// Sender to be cloned for `FetchTask`s.
tx: mpsc::Sender<FromFetchTask>,
/// Receive messages from `FetchTask`.
rx: mpsc::Receiver<FromFetchTask>,
/// Prometheus Metrics
metrics: Metrics,
}
impl Requester {
/// Create a new `Requester`.
///
/// You must feed it with `ActiveLeavesUpdate` via `update_fetching_heads` and make it progress
/// by advancing the stream.
pub fn new(keystore: SyncCryptoStorePtr, metrics: Metrics) -> Self {
// All we do is forwarding messages, no need to make this big.
// Each sender will get one slot, see
// [here](https://docs.rs/futures/0.3.13/futures/channel/mpsc/fn.channel.html).
let (tx, rx) = mpsc::channel(0);
Requester {
fetches: HashMap::new(),
session_cache: SessionCache::new(keystore),
tx,
rx,
metrics,
}
}
/// Update heads that need availability distribution.
///
/// For all active heads we will be fetching our chunks for availabilty distribution.
pub async fn update_fetching_heads<Context>(
&mut self,
ctx: &mut Context,
update: ActiveLeavesUpdate,
) -> Result<()>
where
Context: SubsystemContext,
{
let ActiveLeavesUpdate {
activated,
deactivated,
} = update;
// Order important! We need to handle activated, prior to deactivated, otherwise we might
// cancel still needed jobs.
self.start_requesting_chunks(ctx, activated.into_iter())
.await?;
self.stop_requesting_chunks(deactivated.into_iter());
Ok(())
}
/// Start requesting chunks for newly imported heads.
async fn start_requesting_chunks<Context>(
&mut self,
ctx: &mut Context,
new_heads: impl Iterator<Item = (Hash, Arc<jaeger::Span>)>,
) -> Result<()>
where
Context: SubsystemContext,
{
for (leaf, _) in new_heads {
let cores = query_occupied_cores(ctx, leaf).await?;
self.add_cores(ctx, leaf, cores).await?;
}
Ok(())
}
/// Stop requesting chunks for obsolete heads.
///
fn stop_requesting_chunks(&mut self, obsolete_leaves: impl Iterator<Item = Hash>) {
let obsolete_leaves: HashSet<_> = obsolete_leaves.collect();
self.fetches.retain(|_, task| {
task.remove_leaves(&obsolete_leaves);
task.is_live()
})
}
/// Add candidates corresponding for a particular relay parent.
///
/// Starting requests where necessary.
///
/// Note: The passed in `leaf` is not the same as CandidateDescriptor::relay_parent in the
/// given cores. The latter is the relay_parent this candidate considers its parent, while the
/// passed in leaf might be some later block where the candidate is still pending availability.
async fn add_cores<Context>(
&mut self,
ctx: &mut Context,
leaf: Hash,
cores: impl IntoIterator<Item = OccupiedCore>,
) -> Result<()>
where
Context: SubsystemContext,
{
for core in cores {
match self.fetches.entry(core.candidate_hash) {
Entry::Occupied(mut e) =>
// Just book keeping - we are already requesting that chunk:
{
e.get_mut().add_leaf(leaf);
}
Entry::Vacant(e) => {
let tx = self.tx.clone();
let metrics = self.metrics.clone();
let task_cfg = self
.session_cache
.with_session_info(
ctx,
// We use leaf here, as relay_parent must be in the same session as the
// leaf. (Cores are dropped at session boundaries.) At the same time,
// only leaves are guaranteed to be fetchable by the state trie.
leaf,
|info| FetchTaskConfig::new(leaf, &core, tx, metrics, info),
)
.await?;
if let Some(task_cfg) = task_cfg {
e.insert(FetchTask::start(task_cfg, ctx).await?);
}
// Not a validator, nothing to do.
}
}
}
Ok(())
}
}
impl Stream for Requester {
type Item = AllMessages;
fn poll_next(
mut self: Pin<&mut Self>,
ctx: &mut Context,
) -> Poll<Option<AllMessages>> {
loop {
match Pin::new(&mut self.rx).poll_next(ctx) {
Poll::Ready(Some(FromFetchTask::Message(m))) =>
return Poll::Ready(Some(m)),
Poll::Ready(Some(FromFetchTask::Concluded(Some(bad_boys)))) => {
self.session_cache.report_bad_log(bad_boys);
continue
}
Poll::Ready(Some(FromFetchTask::Concluded(None))) =>
continue,
Poll::Ready(None) =>
return Poll::Ready(None),
Poll::Pending =>
return Poll::Pending,
}
}
}
}
/// Query all hashes and descriptors of candidates pending availability at a particular block.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
async fn query_occupied_cores<Context>(
ctx: &mut Context,
relay_parent: Hash,
) -> Result<Vec<OccupiedCore>>
where
Context: SubsystemContext,
{
let cores = recv_runtime(request_availability_cores_ctx(relay_parent, ctx).await).await?;
Ok(cores
.into_iter()
.filter_map(|core_state| {
if let CoreState::Occupied(occupied) = core_state {
Some(occupied)
} else {
None
}
})
.collect())
}
@@ -0,0 +1,97 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Answer requests for availability chunks.
use futures::channel::oneshot;
use polkadot_node_network_protocol::request_response::{request::IncomingRequest, v1};
use polkadot_primitives::v1::{CandidateHash, ErasureChunk, ValidatorIndex};
use polkadot_subsystem::{
messages::{AllMessages, AvailabilityStoreMessage},
SubsystemContext,
};
use crate::error::{Error, Result};
use crate::{LOG_TARGET, metrics::{Metrics, SUCCEEDED, FAILED, NOT_FOUND}};
/// Variant of `answer_request` that does Prometheus metric and logging on errors.
///
/// Any errors of `answer_request` will simply be logged.
pub async fn answer_request_log<Context>(
ctx: &mut Context,
req: IncomingRequest<v1::AvailabilityFetchingRequest>,
metrics: &Metrics,
) -> ()
where
Context: SubsystemContext,
{
let res = answer_request(ctx, req).await;
match res {
Ok(result) =>
metrics.on_served(if result {SUCCEEDED} else {NOT_FOUND}),
Err(err) => {
tracing::warn!(
target: LOG_TARGET,
err= ?err,
"Serving chunk failed with error"
);
metrics.on_served(FAILED);
}
}
}
/// Answer an incoming chunk request by querying the av store.
///
/// Returns: Ok(true) if chunk was found and served.
pub async fn answer_request<Context>(
ctx: &mut Context,
req: IncomingRequest<v1::AvailabilityFetchingRequest>,
) -> Result<bool>
where
Context: SubsystemContext,
{
let chunk = query_chunk(ctx, req.payload.candidate_hash, req.payload.index).await?;
let result = chunk.is_some();
let response = match chunk {
None => v1::AvailabilityFetchingResponse::NoSuchChunk,
Some(chunk) => v1::AvailabilityFetchingResponse::Chunk(chunk.into()),
};
req.send_response(response).map_err(|_| Error::SendResponse)?;
Ok(result)
}
/// Query chunk from the availability store.
#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))]
async fn query_chunk<Context>(
ctx: &mut Context,
candidate_hash: CandidateHash,
validator_index: ValidatorIndex,
) -> Result<Option<ErasureChunk>>
where
Context: SubsystemContext,
{
let (tx, rx) = oneshot::channel();
ctx.send_message(AllMessages::AvailabilityStore(
AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx),
))
.await;
rx.await.map_err(|e| Error::QueryChunkResponseChannel(e))
}
@@ -0,0 +1,275 @@
// Copyright 2021 Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use std::collections::HashSet;
use lru::LruCache;
use rand::{seq::SliceRandom, thread_rng};
use sp_application_crypto::AppKey;
use sp_core::crypto::Public;
use sp_keystore::{CryptoStore, SyncCryptoStorePtr};
use polkadot_node_subsystem_util::{
request_session_index_for_child_ctx, request_session_info_ctx,
};
use polkadot_primitives::v1::SessionInfo as GlobalSessionInfo;
use polkadot_primitives::v1::{
AuthorityDiscoveryId, GroupIndex, Hash, SessionIndex, ValidatorId, ValidatorIndex,
};
use polkadot_subsystem::SubsystemContext;
use super::{
error::{recv_runtime, Result},
Error,
LOG_TARGET,
};
/// Caching of session info as needed by availability distribution.
///
/// It should be ensured that a cached session stays live in the cache as long as we might need it.
pub struct SessionCache {
/// Get the session index for a given relay parent.
///
/// We query this up to a 100 times per block, so caching it here without roundtrips over the
/// overseer seems sensible.
session_index_cache: LruCache<Hash, SessionIndex>,
/// Look up cached sessions by SessionIndex.
///
/// Note: Performance of fetching is really secondary here, but we need to ensure we are going
/// to get any existing cache entry, before fetching new information, as we should not mess up
/// the order of validators in `SessionInfo::validator_groups`. (We want live TCP connections
/// wherever possible.)
session_info_cache: LruCache<SessionIndex, SessionInfo>,
/// Key store for determining whether we are a validator and what `ValidatorIndex` we have.
keystore: SyncCryptoStorePtr,
}
/// Localized session information, tailored for the needs of availability distribution.
#[derive(Clone)]
pub struct SessionInfo {
/// The index of this session.
pub session_index: SessionIndex,
/// Validator groups of the current session.
///
/// Each group's order is randomized. This way we achieve load balancing when requesting
/// chunks, as the validators in a group will be tried in that randomized order. Each node
/// should arrive at a different order, therefore we distribute the load on individual
/// validators.
pub validator_groups: Vec<Vec<AuthorityDiscoveryId>>,
/// Information about ourself:
pub our_index: ValidatorIndex,
/// Remember to which group we belong, so we won't start fetching chunks for candidates with
/// our group being responsible. (We should have that chunk already.)
pub our_group: GroupIndex,
}
/// Report of bad validators.
///
/// Fetching tasks will report back validators that did not respond as expected, so we can re-order
/// them.
pub struct BadValidators {
/// The session index that was used.
pub session_index: SessionIndex,
/// The group, the not properly responding validators belong to.
pub group_index: GroupIndex,
/// The list of bad validators.
pub bad_validators: Vec<AuthorityDiscoveryId>,
}
impl SessionCache {
/// Create a new `SessionCache`.
pub fn new(keystore: SyncCryptoStorePtr) -> Self {
SessionCache {
// 5 relatively conservative, 1 to 2 should suffice:
session_index_cache: LruCache::new(5),
// We need to cache the current and the last session the most:
session_info_cache: LruCache::new(2),
keystore,
}
}
/// Tries to retrieve `SessionInfo` and calls `with_info` if successful.
///
/// If this node is not a validator, the function will return `None`.
///
/// Use this function over any `fetch_session_info` if all you need is a reference to
/// `SessionInfo`, as it avoids an expensive clone.
pub async fn with_session_info<Context, F, R>(
&mut self,
ctx: &mut Context,
parent: Hash,
with_info: F,
) -> Result<Option<R>>
where
Context: SubsystemContext,
F: FnOnce(&SessionInfo) -> R,
{
let session_index = match self.session_index_cache.get(&parent) {
Some(index) => *index,
None => {
let index =
recv_runtime(request_session_index_for_child_ctx(parent, ctx).await)
.await?;
self.session_index_cache.put(parent, index);
index
}
};
if let Some(info) = self.session_info_cache.get(&session_index) {
return Ok(Some(with_info(info)));
}
if let Some(info) = self
.query_info_from_runtime(ctx, parent, session_index)
.await?
{
let r = with_info(&info);
self.session_info_cache.put(session_index, info);
return Ok(Some(r));
}
Ok(None)
}
/// Variant of `report_bad` that never fails, but just logs errors.
///
/// Not being able to report bad validators is not fatal, so we should not shutdown the
/// subsystem on this.
pub fn report_bad_log(&mut self, report: BadValidators) {
if let Err(err) = self.report_bad(report) {
tracing::warn!(
target: LOG_TARGET,
err= ?err,
"Reporting bad validators failed with error"
);
}
}
/// Make sure we try unresponsive or misbehaving validators last.
///
/// We assume validators in a group are tried in reverse order, so the reported bad validators
/// will be put at the beginning of the group.
#[tracing::instrument(level = "trace", skip(self, report), fields(subsystem = LOG_TARGET))]
pub fn report_bad(&mut self, report: BadValidators) -> Result<()> {
let session = self
.session_info_cache
.get_mut(&report.session_index)
.ok_or(Error::NoSuchCachedSession)?;
let group = session
.validator_groups
.get_mut(report.group_index.0 as usize)
.expect("A bad validator report must contain a valid group for the reported session. qed.");
let bad_set = report.bad_validators.iter().collect::<HashSet<_>>();
// Get rid of bad boys:
group.retain(|v| !bad_set.contains(v));
// We are trying validators in reverse order, so bad ones should be first:
let mut new_group = report.bad_validators;
new_group.append(group);
*group = new_group;
Ok(())
}
/// Query needed information from runtime.
///
/// We need to pass in the relay parent for our call to `request_session_info_ctx`. We should
/// actually don't need that: I suppose it is used for internal caching based on relay parents,
/// which we don't use here. It should not do any harm though.
async fn query_info_from_runtime<Context>(
&self,
ctx: &mut Context,
parent: Hash,
session_index: SessionIndex,
) -> Result<Option<SessionInfo>>
where
Context: SubsystemContext,
{
let GlobalSessionInfo {
validators,
discovery_keys,
mut validator_groups,
..
} = recv_runtime(request_session_info_ctx(parent, session_index, ctx).await)
.await?
.ok_or(Error::NoSuchSession(session_index))?;
if let Some(our_index) = self.get_our_index(validators).await {
// Get our group index:
let our_group = validator_groups
.iter()
.enumerate()
.find_map(|(i, g)| {
g.iter().find_map(|v| {
if *v == our_index {
Some(GroupIndex(i as u32))
} else {
None
}
})
})
.expect("Every validator should be in a validator group. qed.");
// Shuffle validators in groups:
let mut rng = thread_rng();
for g in validator_groups.iter_mut() {
g.shuffle(&mut rng)
}
// Look up `AuthorityDiscoveryId`s right away:
let validator_groups: Vec<Vec<_>> = validator_groups
.into_iter()
.map(|group| {
group
.into_iter()
.map(|index| {
discovery_keys.get(index.0 as usize)
.expect("There should be a discovery key for each validator of each validator group. qed.")
.clone()
})
.collect()
})
.collect();
let info = SessionInfo {
validator_groups,
our_index,
session_index,
our_group,
};
return Ok(Some(info));
}
return Ok(None);
}
/// Get our `ValidatorIndex`.
///
/// Returns: None if we are not a validator.
async fn get_our_index(&self, validators: Vec<ValidatorId>) -> Option<ValidatorIndex> {
for (i, v) in validators.iter().enumerate() {
if CryptoStore::has_keys(&*self.keystore, &[(v.to_raw_vec(), ValidatorId::ID)])
.await
{
return Some(ValidatorIndex(i as u32));
}
}
None
}
}
@@ -239,7 +239,7 @@ impl RequestFromBackersPhase {
// Request data.
to_state.send(FromInteraction::MakeFullDataRequest(
params.validator_authority_keys[validator_index as usize].clone(),
params.validator_authority_keys[validator_index.0 as usize].clone(),
params.candidate_hash.clone(),
validator_index,
tx,
@@ -279,8 +279,8 @@ impl RequestFromBackersPhase {
}
impl RequestChunksPhase {
fn new(n_validators: ValidatorIndex) -> Self {
let mut shuffling: Vec<_> = (0..n_validators).collect();
fn new(n_validators: u32) -> Self {
let mut shuffling: Vec<_> = (0..n_validators).map(ValidatorIndex).collect();
shuffling.shuffle(&mut rand::thread_rng());
RequestChunksPhase {
@@ -300,7 +300,7 @@ impl RequestChunksPhase {
let (tx, rx) = oneshot::channel();
to_state.send(FromInteraction::MakeChunkRequest(
params.validator_authority_keys[validator_index as usize].clone(),
params.validator_authority_keys[validator_index.0 as usize].clone(),
params.candidate_hash.clone(),
validator_index,
tx,
@@ -347,7 +347,7 @@ impl RequestChunksPhase {
if let Ok(anticipated_hash) = branch_hash(
&params.erasure_root,
&chunk.proof,
chunk.index as usize,
chunk.index.0 as usize,
) {
let erasure_chunk_hash = BlakeTwo256::hash(&chunk.chunk);
@@ -415,7 +415,7 @@ impl RequestChunksPhase {
if self.received_chunks.len() >= params.threshold {
let concluded = match polkadot_erasure_coding::reconstruct_v1(
params.validators.len(),
self.received_chunks.values().map(|c| (&c.chunk[..], c.index as usize)),
self.received_chunks.values().map(|c| (&c.chunk[..], c.index.0 as usize)),
) {
Ok(data) => {
if reconstructed_data_matches_root(params.validators.len(), &params.erasure_root, &data) {
@@ -852,7 +852,7 @@ async fn handle_network_update(
chunk.is_some(),
request_id,
candidate_hash,
validator_index,
validator_index.0,
);
// Whatever the result, issue an
@@ -882,7 +882,7 @@ async fn handle_network_update(
chunk.is_some(),
request_id,
awaited_chunk.candidate_hash,
awaited_chunk.validator_index,
awaited_chunk.validator_index.0,
);
// If there exists an entry under r_id, remove it.
@@ -1003,7 +1003,7 @@ async fn issue_request(
request_id,
peer_id,
awaited_chunk.candidate_hash,
awaited_chunk.validator_index,
awaited_chunk.validator_index.0,
);
protocol_v1::AvailabilityRecoveryMessage::RequestChunk(
@@ -1019,7 +1019,7 @@ async fn issue_request(
request_id,
peer_id,
awaited_data.candidate_hash,
awaited_data.validator_index,
awaited_data.validator_index.0,
);
protocol_v1::AvailabilityRecoveryMessage::RequestFullData(
@@ -184,7 +184,7 @@ impl TestState {
validators: self.validator_public.clone(),
discovery_keys: self.validator_authority_id.clone(),
// all validators in the same group.
validator_groups: vec![(0..self.validators.len()).map(|i| i as ValidatorIndex).collect()],
validator_groups: vec![(0..self.validators.len()).map(|i| ValidatorIndex(i as _)).collect()],
..Default::default()
}))).unwrap();
}
@@ -272,10 +272,10 @@ impl TestState {
virtual_overseer,
AvailabilityRecoveryMessage::NetworkBridgeUpdateV1(
NetworkBridgeEvent::PeerMessage(
self.validator_peer_id[validator_index as usize].clone(),
self.validator_peer_id[validator_index.0 as usize].clone(),
protocol_v1::AvailabilityRecoveryMessage::Chunk(
request_id,
Some(self.chunks[validator_index as usize].clone()),
Some(self.chunks[validator_index.0 as usize].clone()),
)
)
)
@@ -317,10 +317,10 @@ impl TestState {
virtual_overseer,
AvailabilityRecoveryMessage::NetworkBridgeUpdateV1(
NetworkBridgeEvent::PeerMessage(
self.validator_peer_id[validator_index as usize].clone(),
self.validator_peer_id[validator_index.0 as usize].clone(),
protocol_v1::AvailabilityRecoveryMessage::Chunk(
request_id,
Some(self.chunks[validator_index as usize].clone()),
Some(self.chunks[validator_index.0 as usize].clone()),
)
)
)
@@ -457,7 +457,7 @@ fn derive_erasure_chunks_with_proofs_and_root(
.enumerate()
.map(|(index, (proof, chunk))| ErasureChunk {
chunk: chunk.to_vec(),
index: index as _,
index: ValidatorIndex(index as _),
proof,
})
.collect::<Vec<ErasureChunk>>();
@@ -278,7 +278,7 @@ where
return;
}
let validator_index = signed_availability.validator_index() as usize;
let validator_index = signed_availability.validator_index().0 as usize;
let validator = if let Some(validator) = validator_set.get(validator_index) {
validator.clone()
} else {
@@ -420,7 +420,7 @@ where
// Use the (untrusted) validator index provided by the signed payload
// and see if that one actually signed the availability bitset.
let signing_context = job_data.signing_context.clone();
let validator_index = message.signed_availability.validator_index() as usize;
let validator_index = message.signed_availability.validator_index().0 as usize;
let validator = if let Some(validator) = validator_set.get(validator_index) {
validator.clone()
} else {
@@ -767,7 +767,7 @@ mod test {
use bitvec::bitvec;
use futures::executor;
use maplit::hashmap;
use polkadot_primitives::v1::{Signed, AvailabilityBitfield};
use polkadot_primitives::v1::{Signed, AvailabilityBitfield, ValidatorIndex};
use polkadot_node_subsystem_test_helpers::make_subsystem_context;
use polkadot_node_subsystem_util::TimeoutExt;
use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore};
@@ -882,7 +882,7 @@ mod test {
&keystore,
payload,
&signing_context,
0,
ValidatorIndex(0),
&malicious.into(),
)).ok().flatten().expect("should be signed");
@@ -947,7 +947,7 @@ mod test {
&keystore,
payload,
&signing_context,
42,
ValidatorIndex(42),
&validator,
)).ok().flatten().expect("should be signed");
@@ -1004,7 +1004,7 @@ mod test {
&keystore,
payload,
&signing_context,
0,
ValidatorIndex(0),
&validator,
)).ok().flatten().expect("should be signed");
@@ -1119,7 +1119,7 @@ mod test {
&keystore,
payload,
&signing_context,
0,
ValidatorIndex(0),
&validator,
)).ok().flatten().expect("should be signed");
@@ -1215,7 +1215,7 @@ mod test {
&keystore,
payload,
&signing_context,
0,
ValidatorIndex(0),
&validator,
)).ok().flatten().expect("should be signed");
@@ -1374,7 +1374,7 @@ mod test {
&keystore,
payload,
&signing_context,
0,
ValidatorIndex(0),
&validator,
)).ok().flatten().expect("should be signed");
+10 -13
View File
@@ -117,7 +117,7 @@ impl<N, AD> NetworkBridge<N, AD> {
impl<Net, AD, Context> Subsystem<Context> for NetworkBridge<Net, AD>
where
Net: Network + validator_discovery::Network,
Net: Network + validator_discovery::Network + Sync,
AD: validator_discovery::AuthorityDiscovery,
Context: SubsystemContext<Message=NetworkBridgeMessage>,
{
@@ -237,7 +237,10 @@ where
Action::SendRequests(reqs) => {
for req in reqs {
bridge.network_service.start_request(req);
bridge
.network_service
.start_request(&mut bridge.authority_discovery_service, req)
.await;
}
},
@@ -605,7 +608,6 @@ mod tests {
use polkadot_subsystem::{ActiveLeavesUpdate, FromOverseer, OverseerSignal};
use polkadot_subsystem::messages::{
AvailabilityDistributionMessage,
AvailabilityRecoveryMessage,
ApprovalDistributionMessage,
BitfieldDistributionMessage,
@@ -624,6 +626,7 @@ mod tests {
use polkadot_node_network_protocol::{ObservedRole, request_response::request::Requests};
use crate::network::{Network, NetworkAction};
use crate::validator_discovery::AuthorityDiscovery;
// The subsystem's view of the network - only supports a single call to `event_stream`.
struct TestNetwork {
@@ -663,6 +666,7 @@ mod tests {
)
}
#[async_trait]
impl Network for TestNetwork {
fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> {
self.net_events.lock()
@@ -677,7 +681,7 @@ mod tests {
Box::pin((&mut self.action_tx).sink_map_err(Into::into))
}
fn start_request(&self, _: Requests) {
async fn start_request<AD: AuthorityDiscovery>(&self, _: &mut AD, _: Requests) {
}
}
@@ -801,13 +805,6 @@ mod tests {
) if e == event.focus().expect("could not focus message")
);
assert_matches!(
virtual_overseer.recv().await,
AllMessages::AvailabilityDistribution(
AvailabilityDistributionMessage::NetworkBridgeUpdateV1(e)
) if e == event.focus().expect("could not focus message")
);
assert_matches!(
virtual_overseer.recv().await,
AllMessages::AvailabilityRecovery(
@@ -1527,7 +1524,7 @@ mod tests {
fn spread_event_to_subsystems_is_up_to_date() {
// Number of subsystems expected to be interested in a network event,
// and hence the network event broadcasted to.
const EXPECTED_COUNT: usize = 6;
const EXPECTED_COUNT: usize = 5;
let mut cnt = 0_usize;
for msg in AllMessages::dispatch_iter(NetworkBridgeEvent::PeerDisconnected(PeerId::random())) {
@@ -1538,7 +1535,7 @@ mod tests {
AllMessages::ChainApi(_) => unreachable!("Not interested in network events"),
AllMessages::CollatorProtocol(_) => unreachable!("Not interested in network events"),
AllMessages::StatementDistribution(_) => { cnt += 1; }
AllMessages::AvailabilityDistribution(_) => { cnt += 1; }
AllMessages::AvailabilityDistribution(_) => unreachable!("Not interested in network events"),
AllMessages::AvailabilityRecovery(_) => { cnt += 1; }
AllMessages::BitfieldDistribution(_) => { cnt += 1; }
AllMessages::BitfieldSigning(_) => unreachable!("Not interested in network events"),
+45 -5
View File
@@ -17,6 +17,7 @@
use std::pin::Pin;
use std::sync::Arc;
use async_trait::async_trait;
use futures::future::BoxFuture;
use futures::prelude::*;
use futures::stream::BoxStream;
@@ -24,7 +25,7 @@ use futures::stream::BoxStream;
use parity_scale_codec::Encode;
use sc_network::Event as NetworkEvent;
use sc_network::{NetworkService, IfDisconnected};
use sc_network::{IfDisconnected, NetworkService, OutboundFailure, RequestFailure};
use polkadot_node_network_protocol::{
peer_set::PeerSet,
@@ -34,6 +35,8 @@ use polkadot_node_network_protocol::{
use polkadot_primitives::v1::{Block, Hash};
use polkadot_subsystem::{SubsystemError, SubsystemResult};
use crate::validator_discovery::{peer_id_from_multiaddr, AuthorityDiscovery};
use super::LOG_TARGET;
/// Send a message to the network.
@@ -92,6 +95,7 @@ pub enum NetworkAction {
}
/// An abstraction over networking for the purposes of this subsystem.
#[async_trait]
pub trait Network: Send + 'static {
/// Get a stream of all events occurring on the network. This may include events unrelated
/// to the Polkadot protocol - the user of this function should filter only for events related
@@ -105,7 +109,11 @@ pub trait Network: Send + 'static {
) -> Pin<Box<dyn Sink<NetworkAction, Error = SubsystemError> + Send + 'a>>;
/// Send a request to a remote peer.
fn start_request(&self, req: Requests);
async fn start_request<AD: AuthorityDiscovery>(
&self,
authority_discovery: &mut AD,
req: Requests,
);
/// Report a given peer as either beneficial (+) or costly (-) according to the given scalar.
fn report_peer(
@@ -137,6 +145,7 @@ pub trait Network: Send + 'static {
}
}
#[async_trait]
impl Network for Arc<NetworkService<Block, Hash>> {
fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> {
NetworkService::event_stream(self, "polkadot-network-bridge").boxed()
@@ -189,7 +198,11 @@ impl Network for Arc<NetworkService<Block, Hash>> {
Box::pin(ActionSink(&**self))
}
fn start_request(&self, req: Requests) {
async fn start_request<AD: AuthorityDiscovery>(
&self,
authority_discovery: &mut AD,
req: Requests,
) {
let (
protocol,
OutgoingRequest {
@@ -199,8 +212,35 @@ impl Network for Arc<NetworkService<Block, Hash>> {
},
) = req.encode_request();
NetworkService::start_request(&*self,
peer,
let peer_id = authority_discovery
.get_addresses_by_authority_id(peer)
.await
.and_then(|addrs| {
addrs
.into_iter()
.find_map(|addr| peer_id_from_multiaddr(&addr))
});
let peer_id = match peer_id {
None => {
tracing::debug!(target: LOG_TARGET, "Discovering authority failed");
match pending_response
.send(Err(RequestFailure::Network(OutboundFailure::DialFailure)))
{
Err(_) => tracing::debug!(
target: LOG_TARGET,
"Sending failed request response failed."
),
Ok(_) => {}
}
return;
}
Some(peer_id) => peer_id,
};
NetworkService::start_request(
&*self,
peer_id,
protocol.into_protocol_name(),
payload,
pending_response,
@@ -131,7 +131,7 @@ fn on_revoke(map: &mut HashMap<AuthorityDiscoveryId, u64>, id: AuthorityDiscover
None
}
fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option<PeerId> {
pub(crate) fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option<PeerId> {
addr.iter().last().and_then(|protocol| if let Protocol::P2p(multihash) = protocol {
PeerId::from_multihash(multihash).ok()
} else {
@@ -379,8 +379,8 @@ async fn determine_our_validators(
let validators = request_validators_ctx(relay_parent, ctx).await?.await??;
let current_validators = current_validators.iter().map(|i| validators[*i as usize].clone()).collect();
let next_validators = next_validators.iter().map(|i| validators[*i as usize].clone()).collect();
let current_validators = current_validators.iter().map(|i| validators[i.0 as usize].clone()).collect();
let next_validators = next_validators.iter().map(|i| validators[i.0 as usize].clone()).collect();
Ok((current_validators, next_validators))
}
@@ -937,7 +937,8 @@ mod tests {
.take(validator_public.len())
.collect();
let validator_groups = vec![vec![2, 0, 4], vec![3, 2, 4]];
let validator_groups = vec![vec![2, 0, 4], vec![3, 2, 4]]
.into_iter().map(|g| g.into_iter().map(ValidatorIndex).collect()).collect();
let group_rotation_info = GroupRotationInfo {
session_start_block: 0,
group_rotation_frequency: 100,
@@ -979,20 +980,20 @@ mod tests {
}
fn current_group_validator_peer_ids(&self) -> Vec<PeerId> {
self.current_group_validator_indices().iter().map(|i| self.validator_peer_id[*i as usize].clone()).collect()
self.current_group_validator_indices().iter().map(|i| self.validator_peer_id[i.0 as usize].clone()).collect()
}
fn current_group_validator_authority_ids(&self) -> Vec<AuthorityDiscoveryId> {
self.current_group_validator_indices()
.iter()
.map(|i| self.validator_authority_id[*i as usize].clone())
.map(|i| self.validator_authority_id[i.0 as usize].clone())
.collect()
}
fn current_group_validator_ids(&self) -> Vec<ValidatorId> {
self.current_group_validator_indices()
.iter()
.map(|i| self.validator_public[*i as usize].clone())
.map(|i| self.validator_public[i.0 as usize].clone())
.collect()
}
@@ -1003,7 +1004,7 @@ mod tests {
fn next_group_validator_authority_ids(&self) -> Vec<AuthorityDiscoveryId> {
self.next_group_validator_indices()
.iter()
.map(|i| self.validator_authority_id[*i as usize].clone())
.map(|i| self.validator_authority_id[i.0 as usize].clone())
.collect()
}
@@ -378,7 +378,7 @@ async fn determine_validators_for_core(
let validators = connect_to_validators
.into_iter()
.map(|idx| validators[idx as usize].clone())
.map(|idx| validators[idx.0 as usize].clone())
.collect();
Ok(Some(validators))
@@ -174,7 +174,8 @@ impl Default for TestState {
.take(validator_public.len())
.collect();
let validator_groups = vec![vec![2, 0, 4], vec![1], vec![3]];
let validator_groups = vec![vec![2, 0, 4], vec![1], vec![3]]
.into_iter().map(|g| g.into_iter().map(ValidatorIndex).collect()).collect();
let group_rotation_info = GroupRotationInfo {
session_start_block: 0,
group_rotation_frequency: 100,
@@ -238,11 +239,11 @@ async fn test_validator_discovery(
assert_eq!(index, session_index);
let validators = validator_group.iter()
.map(|idx| validator_ids[*idx as usize].clone())
.map(|idx| validator_ids[idx.0 as usize].clone())
.collect();
let discovery_keys = validator_group.iter()
.map(|idx| discovery_ids[*idx as usize].clone())
.map(|idx| discovery_ids[idx.0 as usize].clone())
.collect();
tx.send(Ok(Some(SessionInfo {
@@ -737,7 +738,8 @@ fn we_inform_peers_with_same_view_we_are_awaiting() {
.take(validators.len())
.collect();
let validator_groups = vec![vec![2, 0, 4], vec![1], vec![3]];
let validator_groups = vec![vec![2, 0, 4], vec![1], vec![3]]
.into_iter().map(|g| g.into_iter().map(ValidatorIndex).collect()).collect();
let group_rotation_info = GroupRotationInfo {
session_start_block: 0,
group_rotation_frequency: 100,
-12
View File
@@ -300,14 +300,6 @@ pub mod v1 {
use super::RequestId;
use std::convert::TryFrom;
/// Network messages used by the availability distribution subsystem
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
pub enum AvailabilityDistributionMessage {
/// An erasure chunk for a given candidate hash.
#[codec(index = 0)]
Chunk(CandidateHash, ErasureChunk),
}
/// Network messages used by the availability recovery subsystem.
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
pub enum AvailabilityRecoveryMessage {
@@ -456,9 +448,6 @@ pub mod v1 {
/// All network messages on the validation peer-set.
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
pub enum ValidationProtocol {
/// Availability distribution messages
#[codec(index = 0)]
AvailabilityDistribution(AvailabilityDistributionMessage),
/// Bitfield distribution messages
#[codec(index = 1)]
BitfieldDistribution(BitfieldDistributionMessage),
@@ -476,7 +465,6 @@ pub mod v1 {
ApprovalDistribution(ApprovalDistributionMessage),
}
impl_try_from!(ValidationProtocol, AvailabilityDistribution, AvailabilityDistributionMessage);
impl_try_from!(ValidationProtocol, BitfieldDistribution, BitfieldDistributionMessage);
impl_try_from!(ValidationProtocol, PoVDistribution, PoVDistributionMessage);
impl_try_from!(ValidationProtocol, StatementDistribution, StatementDistributionMessage);
@@ -60,7 +60,11 @@ pub enum Protocol {
}
/// Default request timeout in seconds.
const DEFAULT_REQUEST_TIMEOUT: u64 = 8;
///
/// When decreasing this value, take into account that the very first request might need to open a
/// connection, which can be slow. If this causes problems, we should ensure connectivity via peer
/// sets.
const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(3);
impl Protocol {
/// Get a configuration for a given Request response protocol.
@@ -86,7 +90,7 @@ impl Protocol {
max_request_size: 10_000,
max_response_size: 1_000_000,
// Also just some relative conservative guess:
request_timeout: Duration::from_secs(DEFAULT_REQUEST_TIMEOUT),
request_timeout: DEFAULT_REQUEST_TIMEOUT,
inbound_queue: Some(tx),
},
};
@@ -22,6 +22,8 @@ use sc_network as network;
use sc_network::config as netconfig;
use sc_network::PeerId;
use polkadot_primitives::v1::AuthorityDiscoveryId;
use super::{v1, Protocol};
/// Common properties of any `Request`.
@@ -69,7 +71,7 @@ impl Requests {
#[derive(Debug)]
pub struct OutgoingRequest<Req> {
/// Intendent recipient of this request.
pub peer: PeerId,
pub peer: AuthorityDiscoveryId,
/// The actual request to send over the wire.
pub payload: Req,
/// Sender which is used by networking to get us back a response.
@@ -98,7 +100,7 @@ where
/// It will contain a sender that is used by the networking for sending back responses. The
/// connected receiver is returned as the second element in the returned tuple.
pub fn new(
peer: PeerId,
peer: AuthorityDiscoveryId,
payload: Req,
) -> (
Self,
@@ -24,18 +24,54 @@ use super::request::IsRequest;
use super::Protocol;
/// Request an availability chunk.
#[derive(Debug, Clone, Encode, Decode)]
#[derive(Debug, Copy, Clone, Encode, Decode)]
pub struct AvailabilityFetchingRequest {
candidate_hash: CandidateHash,
index: ValidatorIndex,
/// Hash of candidate we want a chunk for.
pub candidate_hash: CandidateHash,
/// The index of the chunk to fetch.
pub index: ValidatorIndex,
}
/// Receive a rqeuested erasure chunk.
#[derive(Debug, Clone, Encode, Decode)]
pub enum AvailabilityFetchingResponse {
/// The requested chunk.
/// The requested chunk data.
#[codec(index = 0)]
Chunk(ErasureChunk),
Chunk(ChunkResponse),
/// Node was not in possession of the requested chunk.
#[codec(index = 1)]
NoSuchChunk,
}
/// Skimmed down variant of `ErasureChunk`.
///
/// Instead of transmitting a full `ErasureChunk` we transmit `ChunkResponse` in
/// `AvailabilityFetchingResponse`, which omits the chunk's index. The index is already known by
/// the requester and by not transmitting it, we ensure the requester is going to use his index
/// value for validating the response, thus making sure he got what he requested.
#[derive(Debug, Clone, Encode, Decode)]
pub struct ChunkResponse {
/// The erasure-encoded chunk of data belonging to the candidate block.
pub chunk: Vec<u8>,
/// Proof for this chunk's branch in the Merkle tree.
pub proof: Vec<Vec<u8>>,
}
impl From<ErasureChunk> for ChunkResponse {
fn from(ErasureChunk {chunk, index: _, proof}: ErasureChunk) -> Self {
ChunkResponse {chunk, proof}
}
}
impl ChunkResponse {
/// Re-build an `ErasureChunk` from response and request.
pub fn recombine_into_chunk(self, req: &AvailabilityFetchingRequest) -> ErasureChunk {
ErasureChunk {
chunk: self.chunk,
proof: self.proof,
index: req.index,
}
}
}
impl IsRequest for AvailabilityFetchingRequest {
@@ -494,7 +494,7 @@ fn check_statement_signature(
parent_hash: relay_parent,
};
head.validators.get(statement.validator_index() as usize)
head.validators.get(statement.validator_index().0 as usize)
.ok_or(())
.and_then(|v| statement.check_signature(&signing_context, v))
}
@@ -1133,7 +1133,7 @@ mod tests {
&keystore,
Statement::Seconded(candidate_a.clone()),
&signing_context,
0,
ValidatorIndex(0),
&alice_public.into(),
)).ok().flatten().expect("should be signed");
let noted = head_data.note_statement(a_seconded_val_0.clone());
@@ -1150,7 +1150,7 @@ mod tests {
&keystore,
Statement::Seconded(candidate_b.clone()),
&signing_context,
0,
ValidatorIndex(0),
&alice_public.into(),
)).ok().flatten().expect("should be signed"));
@@ -1161,7 +1161,7 @@ mod tests {
&keystore,
Statement::Seconded(candidate_c.clone()),
&signing_context,
0,
ValidatorIndex(0),
&alice_public.into(),
)).ok().flatten().expect("should be signed"));
@@ -1172,7 +1172,7 @@ mod tests {
&keystore,
Statement::Seconded(candidate_b.clone()),
&signing_context,
1,
ValidatorIndex(1),
&bob_public.into(),
)).ok().flatten().expect("should be signed"));
@@ -1183,7 +1183,7 @@ mod tests {
&keystore,
Statement::Seconded(candidate_c.clone()),
&signing_context,
1,
ValidatorIndex(1),
&bob_public.into(),
)).ok().flatten().expect("should be signed"));
@@ -1233,7 +1233,7 @@ mod tests {
let hash_a = CandidateHash([1; 32].into());
// Sending an un-pinned statement should not work and should have no effect.
assert!(knowledge.send(&(CompactStatement::Valid(hash_a), 0)).is_none());
assert!(knowledge.send(&(CompactStatement::Valid(hash_a), ValidatorIndex(0))).is_none());
assert!(!knowledge.known_candidates.contains(&hash_a));
assert!(knowledge.sent_statements.is_empty());
assert!(knowledge.received_statements.is_empty());
@@ -1241,8 +1241,8 @@ mod tests {
assert!(knowledge.received_message_count.is_empty());
// Make the peer aware of the candidate.
assert_eq!(knowledge.send(&(CompactStatement::Candidate(hash_a), 0)), Some(true));
assert_eq!(knowledge.send(&(CompactStatement::Candidate(hash_a), 1)), Some(false));
assert_eq!(knowledge.send(&(CompactStatement::Candidate(hash_a), ValidatorIndex(0))), Some(true));
assert_eq!(knowledge.send(&(CompactStatement::Candidate(hash_a), ValidatorIndex(1))), Some(false));
assert!(knowledge.known_candidates.contains(&hash_a));
assert_eq!(knowledge.sent_statements.len(), 2);
assert!(knowledge.received_statements.is_empty());
@@ -1250,7 +1250,7 @@ mod tests {
assert!(knowledge.received_message_count.get(&hash_a).is_none());
// And now it should accept the dependent message.
assert_eq!(knowledge.send(&(CompactStatement::Valid(hash_a), 0)), Some(false));
assert_eq!(knowledge.send(&(CompactStatement::Valid(hash_a), ValidatorIndex(0))), Some(false));
assert!(knowledge.known_candidates.contains(&hash_a));
assert_eq!(knowledge.sent_statements.len(), 3);
assert!(knowledge.received_statements.is_empty());
@@ -1263,8 +1263,8 @@ mod tests {
let mut knowledge = PeerRelayParentKnowledge::default();
let hash_a = CandidateHash([1; 32].into());
assert!(knowledge.receive(&(CompactStatement::Candidate(hash_a), 0), 3).unwrap());
assert!(knowledge.send(&(CompactStatement::Candidate(hash_a), 0)).is_none());
assert!(knowledge.receive(&(CompactStatement::Candidate(hash_a), ValidatorIndex(0)), 3).unwrap());
assert!(knowledge.send(&(CompactStatement::Candidate(hash_a), ValidatorIndex(0))).is_none());
}
#[test]
@@ -1274,18 +1274,18 @@ mod tests {
let hash_a = CandidateHash([1; 32].into());
assert_eq!(
knowledge.receive(&(CompactStatement::Valid(hash_a), 0), 3),
knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(0)), 3),
Err(COST_UNEXPECTED_STATEMENT),
);
assert_eq!(
knowledge.receive(&(CompactStatement::Candidate(hash_a), 0), 3),
knowledge.receive(&(CompactStatement::Candidate(hash_a), ValidatorIndex(0)), 3),
Ok(true),
);
// Push statements up to the flood limit.
assert_eq!(
knowledge.receive(&(CompactStatement::Valid(hash_a), 1), 3),
knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(1)), 3),
Ok(false),
);
@@ -1293,14 +1293,14 @@ mod tests {
assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 2);
assert_eq!(
knowledge.receive(&(CompactStatement::Valid(hash_a), 2), 3),
knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3),
Ok(false),
);
assert_eq!(*knowledge.received_message_count.get(&hash_a).unwrap(), 3);
assert_eq!(
knowledge.receive(&(CompactStatement::Valid(hash_a), 7), 3),
knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(7)), 3),
Err(COST_APPARENT_FLOOD),
);
@@ -1312,23 +1312,23 @@ mod tests {
let hash_c = CandidateHash([3; 32].into());
assert_eq!(
knowledge.receive(&(CompactStatement::Candidate(hash_b), 0), 3),
knowledge.receive(&(CompactStatement::Candidate(hash_b), ValidatorIndex(0)), 3),
Ok(true),
);
assert_eq!(
knowledge.receive(&(CompactStatement::Candidate(hash_c), 0), 3),
knowledge.receive(&(CompactStatement::Candidate(hash_c), ValidatorIndex(0)), 3),
Err(COST_UNEXPECTED_STATEMENT),
);
// Last, make sure that already-known statements are disregarded.
assert_eq!(
knowledge.receive(&(CompactStatement::Valid(hash_a), 2), 3),
knowledge.receive(&(CompactStatement::Valid(hash_a), ValidatorIndex(2)), 3),
Err(COST_DUPLICATE_STATEMENT),
);
assert_eq!(
knowledge.receive(&(CompactStatement::Candidate(hash_b), 0), 3),
knowledge.receive(&(CompactStatement::Candidate(hash_b), ValidatorIndex(0)), 3),
Err(COST_DUPLICATE_STATEMENT),
);
}
@@ -1386,7 +1386,7 @@ mod tests {
&keystore,
Statement::Seconded(candidate.clone()),
&signing_context,
0,
ValidatorIndex(0),
&alice_public.into(),
)).ok().flatten().expect("should be signed"));
@@ -1396,7 +1396,7 @@ mod tests {
&keystore,
Statement::Valid(candidate_hash),
&signing_context,
1,
ValidatorIndex(1),
&bob_public.into(),
)).ok().flatten().expect("should be signed"));
@@ -1406,7 +1406,7 @@ mod tests {
&keystore,
Statement::Valid(candidate_hash),
&signing_context,
2,
ValidatorIndex(2),
&charlie_public.into(),
)).ok().flatten().expect("should be signed"));
@@ -1451,13 +1451,13 @@ mod tests {
assert!(c_knowledge.known_candidates.contains(&candidate_hash));
assert!(c_knowledge.sent_statements.contains(
&(CompactStatement::Candidate(candidate_hash), 0)
&(CompactStatement::Candidate(candidate_hash), ValidatorIndex(0))
));
assert!(c_knowledge.sent_statements.contains(
&(CompactStatement::Valid(candidate_hash), 1)
&(CompactStatement::Valid(candidate_hash), ValidatorIndex(1))
));
assert!(c_knowledge.sent_statements.contains(
&(CompactStatement::Valid(candidate_hash), 2)
&(CompactStatement::Valid(candidate_hash), ValidatorIndex(2))
));
// now see if we got the 3 messages from the active head data.
@@ -1538,14 +1538,14 @@ mod tests {
&keystore,
Statement::Seconded(candidate),
&signing_context,
0,
ValidatorIndex(0),
&alice_public.into(),
).await.ok().flatten().expect("should be signed");
StoredStatement {
comparator: StoredStatementComparator {
compact: statement.payload().to_compact(),
validator_index: 0,
validator_index: ValidatorIndex(0),
signature: statement.signature().clone()
},
statement,
@@ -1565,7 +1565,7 @@ mod tests {
assert!(needs_dependents.contains(&peer_c));
}
let fingerprint = (statement.compact().clone(), 0);
let fingerprint = (statement.compact().clone(), ValidatorIndex(0));
assert!(
peer_data.get(&peer_b).unwrap()
@@ -1706,7 +1706,7 @@ mod tests {
&keystore,
Statement::Seconded(candidate),
&signing_context,
0,
ValidatorIndex(0),
&alice_public.into(),
).await.ok().flatten().expect("should be signed")
};
+2 -7
View File
@@ -2743,10 +2743,6 @@ mod tests {
StatementDistributionMessage::NetworkBridgeUpdateV1(test_network_bridge_event())
}
fn test_availability_distribution_msg() -> AvailabilityDistributionMessage {
AvailabilityDistributionMessage::NetworkBridgeUpdateV1(test_network_bridge_event())
}
fn test_availability_recovery_msg() -> AvailabilityRecoveryMessage {
let (sender, _) = oneshot::channel();
AvailabilityRecoveryMessage::RecoverAvailableData(
@@ -2854,7 +2850,6 @@ mod tests {
handler.send_msg(AllMessages::CollationGeneration(test_collator_generation_msg())).await;
handler.send_msg(AllMessages::CollatorProtocol(test_collator_protocol_msg())).await;
handler.send_msg(AllMessages::StatementDistribution(test_statement_distribution_msg())).await;
handler.send_msg(AllMessages::AvailabilityDistribution(test_availability_distribution_msg())).await;
handler.send_msg(AllMessages::AvailabilityRecovery(test_availability_recovery_msg())).await;
// handler.send_msg(AllMessages::BitfieldSigning(test_bitfield_signing_msg())).await;
handler.send_msg(AllMessages::BitfieldDistribution(test_bitfield_distribution_msg())).await;
@@ -2877,8 +2872,8 @@ mod tests {
assert_eq!(stop_signals_received.load(atomic::Ordering::SeqCst), NUM_SUBSYSTEMS);
// x2 because of broadcast_signal on startup
assert_eq!(signals_received.load(atomic::Ordering::SeqCst), NUM_SUBSYSTEMS);
// -1 for BitfieldSigning
assert_eq!(msgs_received.load(atomic::Ordering::SeqCst), NUM_SUBSYSTEMS - 1);
// -2 for BitfieldSigning and Availability distribution
assert_eq!(msgs_received.load(atomic::Ordering::SeqCst), NUM_SUBSYSTEMS - 2);
assert!(res.is_ok());
},
+1 -1
View File
@@ -321,7 +321,7 @@ impl Validator {
.iter()
.enumerate()
.find(|(_, k)| k == &&key)
.map(|(idx, _)| idx as ValidatorIndex)
.map(|(idx, _)| ValidatorIndex(idx as u32))
.expect("signing_key would have already returned NotAValidator if the item we're searching for isn't in this list; qed");
Ok(Validator {
+2 -6
View File
@@ -271,11 +271,7 @@ impl NetworkBridgeMessage {
/// Availability Distribution Message.
#[derive(Debug, derive_more::From)]
pub enum AvailabilityDistributionMessage {
/// Event from the network bridge.
#[from]
NetworkBridgeUpdateV1(NetworkBridgeEvent<protocol_v1::AvailabilityDistributionMessage>),
/// Incoming request for an availability chunk.
#[from]
/// Incoming network request for an availability chunk.
AvailabilityFetchingRequest(IncomingRequest<req_res_v1::AvailabilityFetchingRequest>)
}
@@ -298,7 +294,6 @@ impl AvailabilityDistributionMessage {
/// If the current variant contains the relay parent hash, return it.
pub fn relay_parent(&self) -> Option<Hash> {
match self {
Self::NetworkBridgeUpdateV1(_) => None,
Self::AvailabilityFetchingRequest(_) => None,
}
}
@@ -709,6 +704,7 @@ pub enum AllMessages {
/// Message for the statement distribution subsystem.
StatementDistribution(StatementDistributionMessage),
/// Message for the availability distribution subsystem.
#[skip]
AvailabilityDistribution(AvailabilityDistributionMessage),
/// Message for the availability recovery subsystem.
AvailabilityRecovery(AvailabilityRecoveryMessage),
+12 -3
View File
@@ -114,7 +114,16 @@ impl MallocSizeOf for ValidatorId {
}
/// Index of the validator is used as a lightweight replacement of the `ValidatorId` when appropriate.
pub type ValidatorIndex = u32;
#[derive(Eq, Ord, PartialEq, PartialOrd, Copy, Clone, Encode, Decode)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash, MallocSizeOf))]
pub struct ValidatorIndex(pub u32);
// We should really get https://github.com/paritytech/polkadot/issues/2403 going ..
impl From<u32> for ValidatorIndex {
fn from(n: u32) -> Self {
ValidatorIndex(n)
}
}
application_crypto::with_pair! {
/// A Parachain validator keypair.
@@ -657,13 +666,13 @@ pub struct AvailableData {
}
/// A chunk of erasure-encoded block data.
#[derive(PartialEq, Eq, Clone, Encode, Decode, Default)]
#[derive(PartialEq, Eq, Clone, Encode, Decode)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug, Hash))]
pub struct ErasureChunk {
/// The erasure-encoded chunk of data belonging to the candidate block.
pub chunk: Vec<u8>,
/// The index of this erasure-encoded chunk of data.
pub index: u32,
pub index: ValidatorIndex,
/// Proof for this chunk's branch in the Merkle tree.
pub proof: Vec<Vec<u8>>,
}
+37 -37
View File
@@ -281,7 +281,7 @@ impl<T: Config> Module<T> {
);
ensure!(
signed_bitfield.validator_index() < validators.len() as ValidatorIndex,
(signed_bitfield.validator_index().0 as usize) < validators.len(),
Error::<T>::ValidatorIndexOutOfBounds,
);
@@ -290,7 +290,7 @@ impl<T: Config> Module<T> {
Error::<T>::UnoccupiedBitInBitfield,
);
let validator_public = &validators[signed_bitfield.validator_index() as usize];
let validator_public = &validators[signed_bitfield.validator_index().0 as usize];
signed_bitfield.check_signature(
&signing_context,
@@ -312,7 +312,7 @@ impl<T: Config> Module<T> {
// defensive check - this is constructed by loading the availability bitfield record,
// which is always `Some` if the core is occupied - that's why we're here.
let val_idx = signed_bitfield.validator_index() as usize;
let val_idx = signed_bitfield.validator_index().0 as usize;
if let Some(mut bit) = pending_availability.as_mut()
.and_then(|r| r.availability_votes.get_mut(val_idx))
{
@@ -527,7 +527,7 @@ impl<T: Config> Module<T> {
&signing_context,
group_vals.len(),
|idx| group_vals.get(idx)
.and_then(|i| validators.get(*i as usize))
.and_then(|i| validators.get(i.0 as usize))
.map(|v| v.clone()),
);
@@ -546,7 +546,7 @@ impl<T: Config> Module<T> {
let val_idx = group_vals.get(bit_idx)
.expect("this query done above; qed");
backers.set(*val_idx as _, true);
backers.set(val_idx.0 as _, true);
}
}
@@ -658,12 +658,12 @@ impl<T: Config> Module<T> {
T::RewardValidators::reward_backing(backers.iter().enumerate()
.filter(|(_, backed)| **backed)
.map(|(i, _)| i as _)
.map(|(i, _)| ValidatorIndex(i as _))
);
T::RewardValidators::reward_bitfields(availability_votes.iter().enumerate()
.filter(|(_, voted)| **voted)
.map(|(i, _)| i as _)
.map(|(i, _)| ValidatorIndex(i as _))
);
// initial weight is config read.
@@ -988,7 +988,7 @@ mod tests {
let candidate_hash = candidate.hash();
for (idx_in_group, val_idx) in group.iter().enumerate().take(signing) {
let key: Sr25519Keyring = validators[*val_idx as usize];
let key: Sr25519Keyring = validators[val_idx.0 as usize];
*validator_indices.get_mut(idx_in_group).unwrap() = true;
let signature = SignedStatement::sign(
@@ -1017,7 +1017,7 @@ mod tests {
&backed,
signing_context,
group.len(),
|i| Some(validators[group[i] as usize].public().into()),
|i| Some(validators[group[i].0 as usize].public().into()),
).ok().unwrap_or(0) * 2 > group.len();
if should_pass {
@@ -1238,7 +1238,7 @@ mod tests {
let signed = block_on(sign_bitfield(
&keystore,
&validators[0],
0,
ValidatorIndex(0),
bare_bitfield,
&signing_context,
));
@@ -1256,7 +1256,7 @@ mod tests {
let signed = block_on(sign_bitfield(
&keystore,
&validators[0],
0,
ValidatorIndex(0),
bare_bitfield,
&signing_context,
));
@@ -1274,7 +1274,7 @@ mod tests {
let signed = block_on(sign_bitfield(
&keystore,
&validators[0],
0,
ValidatorIndex(0),
bare_bitfield,
&signing_context,
));
@@ -1292,7 +1292,7 @@ mod tests {
let signed_0 = block_on(sign_bitfield(
&keystore,
&validators[0],
0,
ValidatorIndex(0),
bare_bitfield.clone(),
&signing_context,
));
@@ -1300,7 +1300,7 @@ mod tests {
let signed_1 = block_on(sign_bitfield(
&keystore,
&validators[1],
1,
ValidatorIndex(1),
bare_bitfield,
&signing_context,
));
@@ -1319,7 +1319,7 @@ mod tests {
let signed = block_on(sign_bitfield(
&keystore,
&validators[0],
0,
ValidatorIndex(0),
bare_bitfield,
&signing_context,
));
@@ -1337,7 +1337,7 @@ mod tests {
let signed = block_on(sign_bitfield(
&keystore,
&validators[0],
0,
ValidatorIndex(0),
bare_bitfield,
&signing_context,
));
@@ -1372,7 +1372,7 @@ mod tests {
let signed = block_on(sign_bitfield(
&keystore,
&validators[0],
0,
ValidatorIndex(0),
bare_bitfield,
&signing_context,
));
@@ -1409,7 +1409,7 @@ mod tests {
let signed = block_on(sign_bitfield(
&keystore,
&validators[0],
0,
ValidatorIndex(0),
bare_bitfield,
&signing_context,
));
@@ -1534,7 +1534,7 @@ mod tests {
Some(block_on(sign_bitfield(
&keystore,
key,
i as ValidatorIndex,
ValidatorIndex(i as _),
to_sign,
&signing_context,
)))
@@ -1573,18 +1573,18 @@ mod tests {
let rewards = crate::mock::availability_rewards();
assert_eq!(rewards.len(), 4);
assert_eq!(rewards.get(&0).unwrap(), &1);
assert_eq!(rewards.get(&1).unwrap(), &1);
assert_eq!(rewards.get(&2).unwrap(), &1);
assert_eq!(rewards.get(&3).unwrap(), &1);
assert_eq!(rewards.get(&ValidatorIndex(0)).unwrap(), &1);
assert_eq!(rewards.get(&ValidatorIndex(1)).unwrap(), &1);
assert_eq!(rewards.get(&ValidatorIndex(2)).unwrap(), &1);
assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1);
}
{
let rewards = crate::mock::backing_rewards();
assert_eq!(rewards.len(), 2);
assert_eq!(rewards.get(&3).unwrap(), &1);
assert_eq!(rewards.get(&4).unwrap(), &1);
assert_eq!(rewards.get(&ValidatorIndex(3)).unwrap(), &1);
assert_eq!(rewards.get(&ValidatorIndex(4)).unwrap(), &1);
}
});
}
@@ -1628,7 +1628,7 @@ mod tests {
group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]),
group_index if group_index == GroupIndex::from(2) => Some(vec![4]),
_ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"),
};
}.map(|m| m.into_iter().map(ValidatorIndex).collect::<Vec<_>>());
let thread_collator: CollatorId = Sr25519Keyring::Two.public().into();
@@ -2115,7 +2115,7 @@ mod tests {
group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]),
group_index if group_index == GroupIndex::from(2) => Some(vec![4]),
_ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"),
};
}.map(|vs| vs.into_iter().map(ValidatorIndex).collect::<Vec<_>>());
let thread_collator: CollatorId = Sr25519Keyring::Two.public().into();
@@ -2310,7 +2310,7 @@ mod tests {
let group_validators = |group_index: GroupIndex| match group_index {
group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1, 2, 3, 4]),
_ => panic!("Group index out of bounds for 1 parachain"),
};
}.map(|vs| vs.into_iter().map(ValidatorIndex).collect::<Vec<_>>());
let chain_a_assignment = CoreAssignment {
core: CoreIndex::from(0),
@@ -2408,7 +2408,7 @@ mod tests {
run_to_block(10, |_| None);
<AvailabilityBitfields<Test>>::insert(
&0,
&ValidatorIndex(0),
AvailabilityBitfieldRecord {
bitfield: default_bitfield(),
submitted_at: 9,
@@ -2416,7 +2416,7 @@ mod tests {
);
<AvailabilityBitfields<Test>>::insert(
&1,
&ValidatorIndex(1),
AvailabilityBitfieldRecord {
bitfield: default_bitfield(),
submitted_at: 9,
@@ -2424,7 +2424,7 @@ mod tests {
);
<AvailabilityBitfields<Test>>::insert(
&4,
&ValidatorIndex(4),
AvailabilityBitfieldRecord {
bitfield: default_bitfield(),
submitted_at: 9,
@@ -2461,9 +2461,9 @@ mod tests {
assert_eq!(Validators::get(), validator_public);
assert_eq!(shared::Module::<Test>::session_index(), 5);
assert!(<AvailabilityBitfields<Test>>::get(&0).is_some());
assert!(<AvailabilityBitfields<Test>>::get(&1).is_some());
assert!(<AvailabilityBitfields<Test>>::get(&4).is_some());
assert!(<AvailabilityBitfields<Test>>::get(&ValidatorIndex(0)).is_some());
assert!(<AvailabilityBitfields<Test>>::get(&ValidatorIndex(1)).is_some());
assert!(<AvailabilityBitfields<Test>>::get(&ValidatorIndex(4)).is_some());
assert!(<PendingAvailability<Test>>::get(&chain_a).is_some());
assert!(<PendingAvailability<Test>>::get(&chain_b).is_some());
@@ -2485,9 +2485,9 @@ mod tests {
assert_eq!(Validators::get(), validator_public_new);
assert_eq!(shared::Module::<Test>::session_index(), 6);
assert!(<AvailabilityBitfields<Test>>::get(&0).is_none());
assert!(<AvailabilityBitfields<Test>>::get(&1).is_none());
assert!(<AvailabilityBitfields<Test>>::get(&4).is_none());
assert!(<AvailabilityBitfields<Test>>::get(&ValidatorIndex(0)).is_none());
assert!(<AvailabilityBitfields<Test>>::get(&ValidatorIndex(1)).is_none());
assert!(<AvailabilityBitfields<Test>>::get(&ValidatorIndex(4)).is_none());
assert!(<PendingAvailability<Test>>::get(&chain_a).is_none());
assert!(<PendingAvailability<Test>>::get(&chain_b).is_none());
@@ -38,7 +38,7 @@ fn reward_by_indices<C, I>(points: u32, indices: I) where
// and we are rewarding for behavior in current session.
let validators = C::SessionInterface::validators();
let rewards = indices.into_iter()
.filter_map(|i| validators.get(i as usize).map(|v| v.clone()))
.filter_map(|i| validators.get(i.0 as usize).map(|v| v.clone()))
.map(|v| (v, points));
<pallet_staking::Module<C>>::reward_by_ids(rewards);
+1 -1
View File
@@ -263,7 +263,7 @@ impl<T: Config> Module<T> {
let mut shuffled_indices: Vec<_> = (0..validators.len())
.enumerate()
.map(|(i, _)| i as ValidatorIndex)
.map(|(i, _)| ValidatorIndex(i as _))
.collect();
shuffled_indices.shuffle(&mut rng);