feat: initialize Kurdistan SDK - independent fork of Polkadot SDK
This commit is contained in:
@@ -0,0 +1,177 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Assignment criteria VRF generation and checking interfaces.
|
||||
|
||||
use crate::approval::{
|
||||
v1::{DelayTranche, RelayVRFStory},
|
||||
v2::{AssignmentCertV2, CoreBitfield},
|
||||
};
|
||||
use codec::{Decode, Encode};
|
||||
use pezkuwi_primitives::{
|
||||
AssignmentId, CandidateHash, CoreIndex, GroupIndex, IndexedVec, SessionInfo, ValidatorIndex,
|
||||
};
|
||||
use sc_keystore::LocalKeystore;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Details pertaining to our assignment on a block.
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq)]
|
||||
pub struct OurAssignment {
|
||||
cert: AssignmentCertV2,
|
||||
tranche: DelayTranche,
|
||||
validator_index: ValidatorIndex,
|
||||
// Whether the assignment has been triggered already.
|
||||
triggered: bool,
|
||||
}
|
||||
|
||||
impl OurAssignment {
|
||||
/// Create a new `OurAssignment`.
|
||||
pub fn new(
|
||||
cert: AssignmentCertV2,
|
||||
tranche: DelayTranche,
|
||||
validator_index: ValidatorIndex,
|
||||
triggered: bool,
|
||||
) -> Self {
|
||||
OurAssignment { cert, tranche, validator_index, triggered }
|
||||
}
|
||||
/// Returns a reference to the assignment cert.
|
||||
pub fn cert(&self) -> &AssignmentCertV2 {
|
||||
&self.cert
|
||||
}
|
||||
|
||||
/// Returns the assignment cert.
|
||||
pub fn into_cert(self) -> AssignmentCertV2 {
|
||||
self.cert
|
||||
}
|
||||
|
||||
/// Returns the delay tranche of the assignment.
|
||||
pub fn tranche(&self) -> DelayTranche {
|
||||
self.tranche
|
||||
}
|
||||
|
||||
/// Returns the validator index of the assignment.
|
||||
pub fn validator_index(&self) -> ValidatorIndex {
|
||||
self.validator_index
|
||||
}
|
||||
|
||||
/// Returns whether the assignment has been triggered.
|
||||
pub fn triggered(&self) -> bool {
|
||||
self.triggered
|
||||
}
|
||||
|
||||
/// Marks the assignment as triggered.
|
||||
pub fn mark_triggered(&mut self) {
|
||||
self.triggered = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Information about the world assignments are being produced in.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Config {
|
||||
/// The assignment public keys for validators.
|
||||
pub assignment_keys: Vec<AssignmentId>,
|
||||
/// The groups of validators assigned to each core.
|
||||
pub validator_groups: IndexedVec<GroupIndex, Vec<ValidatorIndex>>,
|
||||
/// The number of availability cores used by the protocol during this session.
|
||||
pub n_cores: u32,
|
||||
/// The zeroth delay tranche width.
|
||||
pub zeroth_delay_tranche_width: u32,
|
||||
/// The number of samples we do of `relay_vrf_modulo`.
|
||||
pub relay_vrf_modulo_samples: u32,
|
||||
/// The number of delay tranches in total.
|
||||
pub n_delay_tranches: u32,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a SessionInfo> for Config {
|
||||
fn from(s: &'a SessionInfo) -> Self {
|
||||
Config {
|
||||
assignment_keys: s.assignment_keys.clone(),
|
||||
validator_groups: s.validator_groups.clone(),
|
||||
n_cores: s.n_cores,
|
||||
zeroth_delay_tranche_width: s.zeroth_delay_tranche_width,
|
||||
relay_vrf_modulo_samples: s.relay_vrf_modulo_samples,
|
||||
n_delay_tranches: s.n_delay_tranches,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait for producing and checking assignments.
|
||||
///
|
||||
/// Approval voting subsystem implements a a real implemention
|
||||
/// for it and tests use a mock implementation.
|
||||
pub trait AssignmentCriteria {
|
||||
/// Compute the assignments for the given relay VRF story.
|
||||
fn compute_assignments(
|
||||
&self,
|
||||
keystore: &LocalKeystore,
|
||||
relay_vrf_story: RelayVRFStory,
|
||||
config: &Config,
|
||||
leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>,
|
||||
enable_v2_assignments: bool,
|
||||
) -> HashMap<CoreIndex, OurAssignment>;
|
||||
|
||||
/// Check the assignment cert for the given relay VRF story and returns the delay tranche.
|
||||
fn check_assignment_cert(
|
||||
&self,
|
||||
claimed_core_bitfield: CoreBitfield,
|
||||
validator_index: ValidatorIndex,
|
||||
config: &Config,
|
||||
relay_vrf_story: RelayVRFStory,
|
||||
assignment: &AssignmentCertV2,
|
||||
// Backing groups for each "leaving core".
|
||||
backing_groups: Vec<GroupIndex>,
|
||||
) -> Result<DelayTranche, InvalidAssignment>;
|
||||
}
|
||||
|
||||
/// Assignment invalid.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct InvalidAssignment(pub InvalidAssignmentReason);
|
||||
|
||||
impl std::fmt::Display for InvalidAssignment {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "Invalid Assignment: {:?}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for InvalidAssignment {}
|
||||
|
||||
/// Failure conditions when checking an assignment cert.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum InvalidAssignmentReason {
|
||||
/// The validator index is out of bounds.
|
||||
ValidatorIndexOutOfBounds,
|
||||
/// Sample index is out of bounds.
|
||||
SampleOutOfBounds,
|
||||
/// Core index is out of bounds.
|
||||
CoreIndexOutOfBounds,
|
||||
/// Invalid assignment key.
|
||||
InvalidAssignmentKey,
|
||||
/// Node is in backing group.
|
||||
IsInBackingGroup,
|
||||
/// Modulo core index mismatch.
|
||||
VRFModuloCoreIndexMismatch,
|
||||
/// Modulo output mismatch.
|
||||
VRFModuloOutputMismatch,
|
||||
/// Delay core index mismatch.
|
||||
VRFDelayCoreIndexMismatch,
|
||||
/// Delay output mismatch.
|
||||
VRFDelayOutputMismatch,
|
||||
/// Invalid arguments
|
||||
InvalidArguments,
|
||||
/// Assignment vrf check resulted in 0 assigned cores.
|
||||
NullAssignment,
|
||||
}
|
||||
@@ -0,0 +1,590 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Types relevant for approval.
|
||||
|
||||
/// Criteria for assignment.
|
||||
pub mod criteria;
|
||||
|
||||
/// Time utilities for approval voting.
|
||||
pub mod time;
|
||||
|
||||
/// A list of primitives introduced in v1.
|
||||
pub mod v1 {
|
||||
use sp_consensus_babe as babe_primitives;
|
||||
pub use sp_consensus_babe::{
|
||||
Randomness, Slot, VrfPreOutput, VrfProof, VrfSignature, VrfTranscript,
|
||||
};
|
||||
|
||||
use codec::{Decode, Encode};
|
||||
use pezkuwi_primitives::{
|
||||
BlockNumber, CandidateHash, CandidateIndex, CoreIndex, GroupIndex, Hash, Header,
|
||||
SessionIndex, ValidatorIndex, ValidatorSignature,
|
||||
};
|
||||
use sp_application_crypto::ByteArray;
|
||||
|
||||
/// Validators assigning to check a particular candidate are split up into tranches.
|
||||
/// Earlier tranches of validators check first, with later tranches serving as backup.
|
||||
pub type DelayTranche = u32;
|
||||
|
||||
/// A static context used to compute the Relay VRF story based on the
|
||||
/// VRF output included in the header-chain.
|
||||
pub const RELAY_VRF_STORY_CONTEXT: &[u8] = b"A&V RC-VRF";
|
||||
|
||||
/// A static context used for all relay-vrf-modulo VRFs.
|
||||
pub const RELAY_VRF_MODULO_CONTEXT: &[u8] = b"A&V MOD";
|
||||
|
||||
/// A static context used for all relay-vrf-modulo VRFs.
|
||||
pub const RELAY_VRF_DELAY_CONTEXT: &[u8] = b"A&V DELAY";
|
||||
|
||||
/// A static context used for transcripts indicating assigned availability core.
|
||||
pub const ASSIGNED_CORE_CONTEXT: &[u8] = b"A&V ASSIGNED";
|
||||
|
||||
/// A static context associated with producing randomness for a core.
|
||||
pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE";
|
||||
|
||||
/// A static context associated with producing randomness for a tranche.
|
||||
pub const TRANCHE_RANDOMNESS_CONTEXT: &[u8] = b"A&V TRANCHE";
|
||||
|
||||
/// random bytes derived from the VRF submitted within the block by the
|
||||
/// block author as a credential and used as input to approval assignment criteria.
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq)]
|
||||
pub struct RelayVRFStory(pub [u8; 32]);
|
||||
|
||||
/// Different kinds of input data or criteria that can prove a validator's assignment
|
||||
/// to check a particular teyrchain.
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
|
||||
pub enum AssignmentCertKind {
|
||||
/// An assignment story based on the VRF that authorized the relay-chain block where the
|
||||
/// candidate was included combined with a sample number.
|
||||
///
|
||||
/// The context used to produce bytes is [`RELAY_VRF_MODULO_CONTEXT`]
|
||||
RelayVRFModulo {
|
||||
/// The sample number used in this cert.
|
||||
sample: u32,
|
||||
},
|
||||
/// An assignment story based on the VRF that authorized the relay-chain block where the
|
||||
/// candidate was included combined with the index of a particular core.
|
||||
///
|
||||
/// The context is [`RELAY_VRF_DELAY_CONTEXT`]
|
||||
RelayVRFDelay {
|
||||
/// The core index chosen in this cert.
|
||||
core_index: CoreIndex,
|
||||
},
|
||||
}
|
||||
|
||||
/// A certification of assignment.
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
|
||||
pub struct AssignmentCert {
|
||||
/// The criterion which is claimed to be met by this cert.
|
||||
pub kind: AssignmentCertKind,
|
||||
/// The VRF signature showing the criterion is met.
|
||||
pub vrf: VrfSignature,
|
||||
}
|
||||
|
||||
/// An assignment criterion which refers to the candidate under which the assignment is
|
||||
/// relevant by block hash.
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
|
||||
pub struct IndirectAssignmentCert {
|
||||
/// A block hash where the candidate appears.
|
||||
pub block_hash: Hash,
|
||||
/// The validator index.
|
||||
pub validator: ValidatorIndex,
|
||||
/// The cert itself.
|
||||
pub cert: AssignmentCert,
|
||||
}
|
||||
|
||||
/// A signed approval vote which references the candidate indirectly via the block.
|
||||
///
|
||||
/// In practice, we have a look-up from block hash and candidate index to candidate hash,
|
||||
/// so this can be transformed into a `SignedApprovalVote`.
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
|
||||
pub struct IndirectSignedApprovalVote {
|
||||
/// A block hash where the candidate appears.
|
||||
pub block_hash: Hash,
|
||||
/// The index of the candidate in the list of candidates fully included as-of the block.
|
||||
pub candidate_index: CandidateIndex,
|
||||
/// The validator index.
|
||||
pub validator: ValidatorIndex,
|
||||
/// The signature by the validator.
|
||||
pub signature: ValidatorSignature,
|
||||
}
|
||||
|
||||
/// Metadata about a block which is now live in the approval protocol.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BlockApprovalMeta {
|
||||
/// The hash of the block.
|
||||
pub hash: Hash,
|
||||
/// The number of the block.
|
||||
pub number: BlockNumber,
|
||||
/// The hash of the parent block.
|
||||
pub parent_hash: Hash,
|
||||
/// The candidates included by the block.
|
||||
/// Note that these are not the same as the candidates that appear within the block body.
|
||||
pub candidates: Vec<(CandidateHash, CoreIndex, GroupIndex)>,
|
||||
/// The consensus slot of the block.
|
||||
pub slot: Slot,
|
||||
/// The session of the block.
|
||||
pub session: SessionIndex,
|
||||
/// The vrf story.
|
||||
pub vrf_story: RelayVRFStory,
|
||||
}
|
||||
|
||||
/// Errors that can occur during the approvals protocol.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum ApprovalError {
|
||||
#[error("Schnorrkel signature error")]
|
||||
SchnorrkelSignature(schnorrkel::errors::SignatureError),
|
||||
#[error("Authority index {0} out of bounds")]
|
||||
AuthorityOutOfBounds(usize),
|
||||
}
|
||||
|
||||
/// An unsafe VRF pre-output. Provide BABE Epoch info to create a `RelayVRFStory`.
|
||||
pub struct UnsafeVRFPreOutput {
|
||||
vrf_pre_output: VrfPreOutput,
|
||||
slot: Slot,
|
||||
authority_index: u32,
|
||||
}
|
||||
|
||||
impl UnsafeVRFPreOutput {
|
||||
/// Get the slot.
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.slot
|
||||
}
|
||||
|
||||
/// Compute the randomness associated with this VRF output.
|
||||
pub fn compute_randomness(
|
||||
self,
|
||||
authorities: &[(babe_primitives::AuthorityId, babe_primitives::BabeAuthorityWeight)],
|
||||
randomness: &babe_primitives::Randomness,
|
||||
epoch_index: u64,
|
||||
) -> Result<RelayVRFStory, ApprovalError> {
|
||||
let author = match authorities.get(self.authority_index as usize) {
|
||||
None => return Err(ApprovalError::AuthorityOutOfBounds(self.authority_index as _)),
|
||||
Some(x) => &x.0,
|
||||
};
|
||||
|
||||
let pubkey = schnorrkel::PublicKey::from_bytes(author.as_slice())
|
||||
.map_err(ApprovalError::SchnorrkelSignature)?;
|
||||
|
||||
let transcript =
|
||||
sp_consensus_babe::make_vrf_transcript(randomness, self.slot, epoch_index);
|
||||
|
||||
let inout = self
|
||||
.vrf_pre_output
|
||||
.0
|
||||
.attach_input_hash(&pubkey, transcript.0)
|
||||
.map_err(ApprovalError::SchnorrkelSignature)?;
|
||||
Ok(RelayVRFStory(inout.make_bytes(super::v1::RELAY_VRF_STORY_CONTEXT)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract the slot number and relay VRF from a header.
|
||||
///
|
||||
/// This fails if either there is no BABE `PreRuntime` digest or
|
||||
/// the digest has type `SecondaryPlain`, which Substrate nodes do
|
||||
/// not produce or accept anymore.
|
||||
pub fn babe_unsafe_vrf_info(header: &Header) -> Option<UnsafeVRFPreOutput> {
|
||||
use babe_primitives::digests::CompatibleDigestItem;
|
||||
|
||||
for digest in &header.digest.logs {
|
||||
if let Some(pre) = digest.as_babe_pre_digest() {
|
||||
let slot = pre.slot();
|
||||
let authority_index = pre.authority_index();
|
||||
|
||||
return pre.vrf_signature().map(|sig| UnsafeVRFPreOutput {
|
||||
vrf_pre_output: sig.pre_output.clone(),
|
||||
slot,
|
||||
authority_index,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// A list of primitives introduced by v2.
|
||||
pub mod v2 {
|
||||
use codec::{Decode, Encode};
|
||||
pub use sp_consensus_babe::{
|
||||
Randomness, Slot, VrfPreOutput, VrfProof, VrfSignature, VrfTranscript,
|
||||
};
|
||||
use std::ops::BitOr;
|
||||
|
||||
use bitvec::{prelude::Lsb0, vec::BitVec};
|
||||
use pezkuwi_primitives::{CandidateIndex, CoreIndex, Hash, ValidatorIndex, ValidatorSignature};
|
||||
|
||||
/// A static context associated with producing randomness for a core.
|
||||
pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE v2";
|
||||
/// A static context associated with producing randomness for v2 multi-core assignments.
|
||||
pub const ASSIGNED_CORE_CONTEXT: &[u8] = b"A&V ASSIGNED v2";
|
||||
/// A static context used for all relay-vrf-modulo VRFs for v2 multi-core assignments.
|
||||
pub const RELAY_VRF_MODULO_CONTEXT: &[u8] = b"A&V MOD v2";
|
||||
/// A read-only bitvec wrapper
|
||||
#[derive(Clone, Debug, Encode, Decode, Hash, PartialEq, Eq)]
|
||||
pub struct Bitfield<T>(BitVec<u8, bitvec::order::Lsb0>, std::marker::PhantomData<T>);
|
||||
|
||||
/// A `read-only`, `non-zero` bitfield.
|
||||
/// Each 1 bit identifies a candidate by the bitfield bit index.
|
||||
pub type CandidateBitfield = Bitfield<CandidateIndex>;
|
||||
/// A bitfield of core assignments.
|
||||
pub type CoreBitfield = Bitfield<CoreIndex>;
|
||||
|
||||
/// Errors that can occur when creating and manipulating bitfields.
|
||||
#[derive(Debug)]
|
||||
pub enum BitfieldError {
|
||||
/// All bits are zero.
|
||||
NullAssignment,
|
||||
}
|
||||
|
||||
/// A bit index in `Bitfield`.
|
||||
#[cfg_attr(test, derive(PartialEq, Clone))]
|
||||
pub struct BitIndex(pub usize);
|
||||
|
||||
/// Helper trait to convert primitives to `BitIndex`.
|
||||
pub trait AsBitIndex {
|
||||
/// Returns the index of the corresponding bit in `Bitfield`.
|
||||
fn as_bit_index(&self) -> BitIndex;
|
||||
}
|
||||
|
||||
impl<T> Bitfield<T> {
|
||||
/// Returns the bit value at specified `index`. If `index` is greater than bitfield size,
|
||||
/// returns `false`.
|
||||
pub fn bit_at(&self, index: BitIndex) -> bool {
|
||||
if self.0.len() <= index.0 {
|
||||
false
|
||||
} else {
|
||||
self.0[index.0]
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns number of bits.
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
/// Returns the number of 1 bits.
|
||||
pub fn count_ones(&self) -> usize {
|
||||
self.0.count_ones()
|
||||
}
|
||||
|
||||
/// Returns the index of the first 1 bit.
|
||||
pub fn first_one(&self) -> Option<usize> {
|
||||
self.0.first_one()
|
||||
}
|
||||
|
||||
/// Returns an iterator over inner bits.
|
||||
pub fn iter_ones(&self) -> bitvec::slice::IterOnes<'_, u8, bitvec::order::Lsb0> {
|
||||
self.0.iter_ones()
|
||||
}
|
||||
|
||||
/// For testing purpose, we want a inner mutable ref.
|
||||
pub fn inner_mut(&mut self) -> &mut BitVec<u8, bitvec::order::Lsb0> {
|
||||
&mut self.0
|
||||
}
|
||||
|
||||
/// Returns the inner bitfield and consumes `self`.
|
||||
pub fn into_inner(self) -> BitVec<u8, bitvec::order::Lsb0> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsBitIndex for CandidateIndex {
|
||||
fn as_bit_index(&self) -> BitIndex {
|
||||
BitIndex(*self as usize)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsBitIndex for CoreIndex {
|
||||
fn as_bit_index(&self) -> BitIndex {
|
||||
BitIndex(self.0 as usize)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsBitIndex for usize {
|
||||
fn as_bit_index(&self) -> BitIndex {
|
||||
BitIndex(*self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<T> for Bitfield<T>
|
||||
where
|
||||
T: AsBitIndex,
|
||||
{
|
||||
fn from(value: T) -> Self {
|
||||
Self(
|
||||
{
|
||||
let mut bv = bitvec::bitvec![u8, Lsb0; 0; value.as_bit_index().0 + 1];
|
||||
bv.set(value.as_bit_index().0, true);
|
||||
bv
|
||||
},
|
||||
Default::default(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> TryFrom<Vec<T>> for Bitfield<T>
|
||||
where
|
||||
T: Into<Bitfield<T>>,
|
||||
{
|
||||
type Error = BitfieldError;
|
||||
|
||||
fn try_from(mut value: Vec<T>) -> Result<Self, Self::Error> {
|
||||
if value.is_empty() {
|
||||
return Err(BitfieldError::NullAssignment);
|
||||
}
|
||||
|
||||
let initial_bitfield =
|
||||
value.pop().expect("Just checked above it's not empty; qed").into();
|
||||
|
||||
Ok(Self(
|
||||
value.into_iter().fold(initial_bitfield.0, |initial_bitfield, element| {
|
||||
let mut bitfield: Bitfield<T> = element.into();
|
||||
bitfield
|
||||
.0
|
||||
.resize(std::cmp::max(initial_bitfield.len(), bitfield.0.len()), false);
|
||||
bitfield.0.bitor(initial_bitfield)
|
||||
}),
|
||||
Default::default(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Certificate is changed compared to `AssignmentCertKind`:
|
||||
/// - introduced RelayVRFModuloCompact
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
|
||||
pub enum AssignmentCertKindV2 {
|
||||
/// Multiple assignment stories based on the VRF that authorized the relay-chain block
|
||||
/// where the candidates were included.
|
||||
///
|
||||
/// The context is [`super::v2::RELAY_VRF_MODULO_CONTEXT`]
|
||||
#[codec(index = 0)]
|
||||
RelayVRFModuloCompact {
|
||||
/// A bitfield representing the core indices claimed by this assignment.
|
||||
core_bitfield: CoreBitfield,
|
||||
},
|
||||
/// An assignment story based on the VRF that authorized the relay-chain block where the
|
||||
/// candidate was included combined with the index of a particular core.
|
||||
///
|
||||
/// The context is [`super::v1::RELAY_VRF_DELAY_CONTEXT`]
|
||||
#[codec(index = 1)]
|
||||
RelayVRFDelay {
|
||||
/// The core index chosen in this cert.
|
||||
core_index: CoreIndex,
|
||||
},
|
||||
/// Deprecated assignment. Soon to be removed.
|
||||
/// An assignment story based on the VRF that authorized the relay-chain block where the
|
||||
/// candidate was included combined with a sample number.
|
||||
///
|
||||
/// The context used to produce bytes is [`super::v1::RELAY_VRF_MODULO_CONTEXT`]
|
||||
#[codec(index = 2)]
|
||||
RelayVRFModulo {
|
||||
/// The sample number used in this cert.
|
||||
sample: u32,
|
||||
},
|
||||
}
|
||||
|
||||
/// A certification of assignment.
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
|
||||
pub struct AssignmentCertV2 {
|
||||
/// The criterion which is claimed to be met by this cert.
|
||||
pub kind: AssignmentCertKindV2,
|
||||
/// The VRF showing the criterion is met.
|
||||
pub vrf: VrfSignature,
|
||||
}
|
||||
|
||||
impl From<super::v1::AssignmentCert> for AssignmentCertV2 {
|
||||
fn from(cert: super::v1::AssignmentCert) -> Self {
|
||||
Self {
|
||||
kind: match cert.kind {
|
||||
super::v1::AssignmentCertKind::RelayVRFDelay { core_index } =>
|
||||
AssignmentCertKindV2::RelayVRFDelay { core_index },
|
||||
super::v1::AssignmentCertKind::RelayVRFModulo { sample } =>
|
||||
AssignmentCertKindV2::RelayVRFModulo { sample },
|
||||
},
|
||||
vrf: cert.vrf,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Errors that can occur when trying to convert to/from assignment v1/v2
|
||||
#[derive(Debug)]
|
||||
pub enum AssignmentConversionError {
|
||||
/// Assignment certificate is not supported in v1.
|
||||
CertificateNotSupported,
|
||||
}
|
||||
|
||||
impl TryFrom<AssignmentCertV2> for super::v1::AssignmentCert {
|
||||
type Error = AssignmentConversionError;
|
||||
fn try_from(cert: AssignmentCertV2) -> Result<Self, AssignmentConversionError> {
|
||||
Ok(Self {
|
||||
kind: match cert.kind {
|
||||
AssignmentCertKindV2::RelayVRFDelay { core_index } =>
|
||||
super::v1::AssignmentCertKind::RelayVRFDelay { core_index },
|
||||
AssignmentCertKindV2::RelayVRFModulo { sample } =>
|
||||
super::v1::AssignmentCertKind::RelayVRFModulo { sample },
|
||||
// Not supported
|
||||
_ => return Err(AssignmentConversionError::CertificateNotSupported),
|
||||
},
|
||||
vrf: cert.vrf,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// An assignment criterion which refers to the candidate under which the assignment is
|
||||
/// relevant by block hash.
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
|
||||
pub struct IndirectAssignmentCertV2 {
|
||||
/// A block hash where the candidate appears.
|
||||
pub block_hash: Hash,
|
||||
/// The validator index.
|
||||
pub validator: ValidatorIndex,
|
||||
/// The cert itself.
|
||||
pub cert: AssignmentCertV2,
|
||||
}
|
||||
|
||||
impl From<super::v1::IndirectAssignmentCert> for IndirectAssignmentCertV2 {
|
||||
fn from(indirect_cert: super::v1::IndirectAssignmentCert) -> Self {
|
||||
Self {
|
||||
block_hash: indirect_cert.block_hash,
|
||||
validator: indirect_cert.validator,
|
||||
cert: indirect_cert.cert.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<IndirectAssignmentCertV2> for super::v1::IndirectAssignmentCert {
|
||||
type Error = AssignmentConversionError;
|
||||
fn try_from(
|
||||
indirect_cert: IndirectAssignmentCertV2,
|
||||
) -> Result<Self, AssignmentConversionError> {
|
||||
Ok(Self {
|
||||
block_hash: indirect_cert.block_hash,
|
||||
validator: indirect_cert.validator,
|
||||
cert: indirect_cert.cert.try_into()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<super::v1::IndirectSignedApprovalVote> for IndirectSignedApprovalVoteV2 {
|
||||
fn from(value: super::v1::IndirectSignedApprovalVote) -> Self {
|
||||
Self {
|
||||
block_hash: value.block_hash,
|
||||
validator: value.validator,
|
||||
candidate_indices: value.candidate_index.into(),
|
||||
signature: value.signature,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Errors that can occur when trying to convert to/from approvals v1/v2
|
||||
#[derive(Debug)]
|
||||
pub enum ApprovalConversionError {
|
||||
/// More than one candidate was signed.
|
||||
MoreThanOneCandidate(usize),
|
||||
}
|
||||
|
||||
impl TryFrom<IndirectSignedApprovalVoteV2> for super::v1::IndirectSignedApprovalVote {
|
||||
type Error = ApprovalConversionError;
|
||||
|
||||
fn try_from(value: IndirectSignedApprovalVoteV2) -> Result<Self, Self::Error> {
|
||||
if value.candidate_indices.count_ones() != 1 {
|
||||
return Err(ApprovalConversionError::MoreThanOneCandidate(
|
||||
value.candidate_indices.count_ones(),
|
||||
));
|
||||
}
|
||||
Ok(Self {
|
||||
block_hash: value.block_hash,
|
||||
validator: value.validator,
|
||||
candidate_index: value.candidate_indices.first_one().expect("Qed we checked above")
|
||||
as u32,
|
||||
signature: value.signature,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A signed approval vote which references the candidate indirectly via the block.
|
||||
///
|
||||
/// In practice, we have a look-up from block hash and candidate index to candidate hash,
|
||||
/// so this can be transformed into a `SignedApprovalVote`.
|
||||
#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)]
|
||||
pub struct IndirectSignedApprovalVoteV2 {
|
||||
/// A block hash where the candidate appears.
|
||||
pub block_hash: Hash,
|
||||
/// The index of the candidate in the list of candidates fully included as-of the block.
|
||||
pub candidate_indices: CandidateBitfield,
|
||||
/// The validator index.
|
||||
pub validator: ValidatorIndex,
|
||||
/// The signature by the validator.
|
||||
pub signature: ValidatorSignature,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::v2::{BitIndex, Bitfield};
|
||||
|
||||
use pezkuwi_primitives::{CandidateIndex, CoreIndex};
|
||||
|
||||
#[test]
|
||||
fn test_assignment_bitfield_from_vec() {
|
||||
let candidate_indices = vec![1u32, 7, 3, 10, 45, 8, 200, 2];
|
||||
let max_index = *candidate_indices.iter().max().unwrap();
|
||||
let bitfield = Bitfield::try_from(candidate_indices.clone()).unwrap();
|
||||
let candidate_indices =
|
||||
candidate_indices.into_iter().map(|i| BitIndex(i as usize)).collect::<Vec<_>>();
|
||||
|
||||
// Test 1 bits.
|
||||
for index in candidate_indices.clone() {
|
||||
assert!(bitfield.bit_at(index));
|
||||
}
|
||||
|
||||
// Test 0 bits.
|
||||
for index in 0..max_index {
|
||||
if candidate_indices.contains(&BitIndex(index as usize)) {
|
||||
continue;
|
||||
}
|
||||
assert!(!bitfield.bit_at(BitIndex(index as usize)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_assignment_bitfield_invariant_msb() {
|
||||
let core_indices = vec![CoreIndex(1), CoreIndex(3), CoreIndex(10), CoreIndex(20)];
|
||||
let mut bitfield = Bitfield::try_from(core_indices.clone()).unwrap();
|
||||
assert!(bitfield.inner_mut().pop().unwrap());
|
||||
|
||||
for i in 0..1024 {
|
||||
assert!(Bitfield::try_from(CoreIndex(i)).unwrap().inner_mut().pop().unwrap());
|
||||
assert!(Bitfield::try_from(i).unwrap().inner_mut().pop().unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_assignment_bitfield_basic() {
|
||||
let bitfield = Bitfield::try_from(CoreIndex(0)).unwrap();
|
||||
assert!(bitfield.bit_at(BitIndex(0)));
|
||||
assert!(!bitfield.bit_at(BitIndex(1)));
|
||||
assert_eq!(bitfield.len(), 1);
|
||||
|
||||
let mut bitfield = Bitfield::try_from(20 as CandidateIndex).unwrap();
|
||||
assert!(bitfield.bit_at(BitIndex(20)));
|
||||
assert_eq!(bitfield.inner_mut().count_ones(), 1);
|
||||
assert_eq!(bitfield.len(), 21);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,270 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Time utilities for approval voting subsystems.
|
||||
|
||||
use futures::{
|
||||
future::BoxFuture,
|
||||
prelude::*,
|
||||
stream::{FusedStream, FuturesUnordered},
|
||||
Stream, StreamExt,
|
||||
};
|
||||
|
||||
use crate::approval::v1::DelayTranche;
|
||||
use sp_consensus_slots::Slot;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
pin::Pin,
|
||||
task::Poll,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use pezkuwi_primitives::{Hash, ValidatorIndex};
|
||||
/// The duration of a single tick in milliseconds.
|
||||
pub const TICK_DURATION_MILLIS: u64 = 500;
|
||||
|
||||
/// A base unit of time, starting from the Unix epoch, split into half-second intervals.
|
||||
pub type Tick = u64;
|
||||
|
||||
/// How far in the future a tick can be accepted.
|
||||
pub const TICK_TOO_FAR_IN_FUTURE: Tick = 20; // 10 seconds.
|
||||
|
||||
/// A clock which allows querying of the current tick as well as
|
||||
/// waiting for a tick to be reached.
|
||||
pub trait Clock {
|
||||
/// Yields the current tick.
|
||||
fn tick_now(&self) -> Tick;
|
||||
|
||||
/// Yields a future which concludes when the given tick is reached.
|
||||
fn wait(&self, tick: Tick) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>>;
|
||||
}
|
||||
|
||||
/// Extension methods for clocks.
|
||||
pub trait ClockExt {
|
||||
/// Returns the current tranche.
|
||||
fn tranche_now(&self, slot_duration_millis: u64, base_slot: Slot) -> DelayTranche;
|
||||
}
|
||||
|
||||
impl<C: Clock + ?Sized> ClockExt for C {
|
||||
fn tranche_now(&self, slot_duration_millis: u64, base_slot: Slot) -> DelayTranche {
|
||||
self.tick_now()
|
||||
.saturating_sub(slot_number_to_tick(slot_duration_millis, base_slot)) as u32
|
||||
}
|
||||
}
|
||||
|
||||
/// A clock which uses the actual underlying system clock.
|
||||
#[derive(Clone)]
|
||||
pub struct SystemClock;
|
||||
|
||||
impl Clock for SystemClock {
|
||||
/// Yields the current tick.
|
||||
fn tick_now(&self) -> Tick {
|
||||
match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
|
||||
Err(_) => 0,
|
||||
Ok(d) => d.as_millis() as u64 / TICK_DURATION_MILLIS,
|
||||
}
|
||||
}
|
||||
|
||||
/// Yields a future which concludes when the given tick is reached.
|
||||
fn wait(&self, tick: Tick) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||
let fut = async move {
|
||||
let now = SystemTime::now();
|
||||
let tick_onset = tick_to_time(tick);
|
||||
if now < tick_onset {
|
||||
if let Some(until) = tick_onset.duration_since(now).ok() {
|
||||
futures_timer::Delay::new(until).await;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Box::pin(fut)
|
||||
}
|
||||
}
|
||||
|
||||
fn tick_to_time(tick: Tick) -> SystemTime {
|
||||
SystemTime::UNIX_EPOCH + Duration::from_millis(TICK_DURATION_MILLIS * tick)
|
||||
}
|
||||
|
||||
/// assumes `slot_duration_millis` evenly divided by tick duration.
|
||||
pub fn slot_number_to_tick(slot_duration_millis: u64, slot: Slot) -> Tick {
|
||||
let ticks_per_slot = slot_duration_millis / TICK_DURATION_MILLIS;
|
||||
u64::from(slot) * ticks_per_slot
|
||||
}
|
||||
|
||||
/// Converts a tick to the slot number.
|
||||
pub fn tick_to_slot_number(slot_duration_millis: u64, tick: Tick) -> Slot {
|
||||
let ticks_per_slot = slot_duration_millis / TICK_DURATION_MILLIS;
|
||||
(tick / ticks_per_slot).into()
|
||||
}
|
||||
|
||||
/// Converts a tranche from a slot to the tick number.
|
||||
pub fn tranche_to_tick(slot_duration_millis: u64, slot: Slot, tranche: u32) -> Tick {
|
||||
slot_number_to_tick(slot_duration_millis, slot) + tranche as u64
|
||||
}
|
||||
|
||||
/// A list of delayed futures that gets triggered when the waiting time has expired and it is
|
||||
/// time to sign the candidate.
|
||||
/// We have a timer per relay-chain block.
|
||||
#[derive(Default)]
|
||||
pub struct DelayedApprovalTimer {
|
||||
timers: FuturesUnordered<BoxFuture<'static, (Hash, ValidatorIndex)>>,
|
||||
blocks: HashSet<Hash>,
|
||||
}
|
||||
|
||||
impl DelayedApprovalTimer {
|
||||
/// Starts a single timer per block hash
|
||||
///
|
||||
/// Guarantees that if a timer already exits for the give block hash,
|
||||
/// no additional timer is started.
|
||||
pub fn maybe_arm_timer(
|
||||
&mut self,
|
||||
wait_until: Tick,
|
||||
clock: &dyn Clock,
|
||||
block_hash: Hash,
|
||||
validator_index: ValidatorIndex,
|
||||
) {
|
||||
if self.blocks.insert(block_hash) {
|
||||
let clock_wait = clock.wait(wait_until);
|
||||
self.timers.push(Box::pin(async move {
|
||||
clock_wait.await;
|
||||
(block_hash, validator_index)
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for DelayedApprovalTimer {
|
||||
type Item = (Hash, ValidatorIndex);
|
||||
|
||||
fn poll_next(
|
||||
mut self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
let poll_result = self.timers.poll_next_unpin(cx);
|
||||
match poll_result {
|
||||
Poll::Ready(Some(result)) => {
|
||||
self.blocks.remove(&result.0);
|
||||
Poll::Ready(Some(result))
|
||||
},
|
||||
_ => poll_result,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FusedStream for DelayedApprovalTimer {
|
||||
fn is_terminated(&self) -> bool {
|
||||
self.timers.is_terminated()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use futures::{executor::block_on, FutureExt, StreamExt};
|
||||
use futures_timer::Delay;
|
||||
use pezkuwi_primitives::{Hash, ValidatorIndex};
|
||||
|
||||
use crate::approval::time::{Clock, SystemClock};
|
||||
|
||||
use super::DelayedApprovalTimer;
|
||||
|
||||
#[test]
|
||||
fn test_select_empty_timer() {
|
||||
block_on(async move {
|
||||
let mut timer = DelayedApprovalTimer::default();
|
||||
|
||||
for _ in 1..10 {
|
||||
let result = futures::select!(
|
||||
_ = timer.select_next_some() => {
|
||||
0
|
||||
}
|
||||
// Only this arm should fire
|
||||
_ = Delay::new(Duration::from_millis(100)).fuse() => {
|
||||
1
|
||||
}
|
||||
);
|
||||
|
||||
assert_eq!(result, 1);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_timer_functionality() {
|
||||
block_on(async move {
|
||||
let mut timer = DelayedApprovalTimer::default();
|
||||
let test_hashes =
|
||||
vec![Hash::repeat_byte(0x01), Hash::repeat_byte(0x02), Hash::repeat_byte(0x03)];
|
||||
for (index, hash) in test_hashes.iter().enumerate() {
|
||||
timer.maybe_arm_timer(
|
||||
SystemClock.tick_now() + index as u64,
|
||||
&SystemClock,
|
||||
*hash,
|
||||
ValidatorIndex::from(2),
|
||||
);
|
||||
timer.maybe_arm_timer(
|
||||
SystemClock.tick_now() + index as u64,
|
||||
&SystemClock,
|
||||
*hash,
|
||||
ValidatorIndex::from(2),
|
||||
);
|
||||
}
|
||||
let timeout_hash = Hash::repeat_byte(0x02);
|
||||
for i in 0..test_hashes.len() * 2 {
|
||||
let result = futures::select!(
|
||||
(hash, _) = timer.select_next_some() => {
|
||||
hash
|
||||
}
|
||||
// Timers should fire only once, so for the rest of the iterations we should timeout through here.
|
||||
_ = Delay::new(Duration::from_secs(2)).fuse() => {
|
||||
timeout_hash
|
||||
}
|
||||
);
|
||||
assert_eq!(test_hashes.get(i).cloned().unwrap_or(timeout_hash), result);
|
||||
}
|
||||
|
||||
// Now check timer can be restarted if already fired
|
||||
for (index, hash) in test_hashes.iter().enumerate() {
|
||||
timer.maybe_arm_timer(
|
||||
SystemClock.tick_now() + index as u64,
|
||||
&SystemClock,
|
||||
*hash,
|
||||
ValidatorIndex::from(2),
|
||||
);
|
||||
timer.maybe_arm_timer(
|
||||
SystemClock.tick_now() + index as u64,
|
||||
&SystemClock,
|
||||
*hash,
|
||||
ValidatorIndex::from(2),
|
||||
);
|
||||
}
|
||||
|
||||
for i in 0..test_hashes.len() * 2 {
|
||||
let result = futures::select!(
|
||||
(hash, _) = timer.select_next_some() => {
|
||||
hash
|
||||
}
|
||||
// Timers should fire only once, so for the rest of the iterations we should timeout through here.
|
||||
_ = Delay::new(Duration::from_secs(2)).fuse() => {
|
||||
timeout_hash
|
||||
}
|
||||
);
|
||||
assert_eq!(test_hashes.get(i).cloned().unwrap_or(timeout_hash), result);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,268 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! `DisputeMessage` and associated types.
|
||||
//!
|
||||
//! A `DisputeMessage` is a message that indicates a node participating in a dispute and is used
|
||||
//! for interfacing with `DisputeDistribution` to send out our vote in a spam detectable way.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use codec::{Decode, Encode};
|
||||
|
||||
use super::{InvalidDisputeVote, SignedDisputeStatement, ValidDisputeVote};
|
||||
use pezkuwi_primitives::{
|
||||
CandidateReceiptV2 as CandidateReceipt, DisputeStatement, SessionIndex, SessionInfo,
|
||||
ValidatorIndex,
|
||||
};
|
||||
|
||||
/// A dispute initiating/participating message that have been built from signed
|
||||
/// statements.
|
||||
///
|
||||
/// And most likely has been constructed correctly. This is used with
|
||||
/// `DisputeDistributionMessage::SendDispute` for sending out votes.
|
||||
///
|
||||
/// NOTE: This is sent over the wire, any changes are a change in protocol and need to be
|
||||
/// versioned.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DisputeMessage(UncheckedDisputeMessage);
|
||||
|
||||
/// A `DisputeMessage` where signatures of statements have not yet been checked.
|
||||
#[derive(Clone, Encode, Decode, Debug)]
|
||||
pub struct UncheckedDisputeMessage {
|
||||
/// The candidate being disputed.
|
||||
pub candidate_receipt: CandidateReceipt,
|
||||
|
||||
/// The session the candidate appears in.
|
||||
pub session_index: SessionIndex,
|
||||
|
||||
/// The invalid vote data that makes up this dispute.
|
||||
pub invalid_vote: InvalidDisputeVote,
|
||||
|
||||
/// The valid vote that makes this dispute request valid.
|
||||
pub valid_vote: ValidDisputeVote,
|
||||
}
|
||||
|
||||
/// Things that can go wrong when constructing a `DisputeMessage`.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
/// The statements concerned different candidates.
|
||||
#[error("Candidate hashes of the two votes did not match up")]
|
||||
CandidateHashMismatch,
|
||||
|
||||
/// The statements concerned different sessions.
|
||||
#[error("Session indices of the two votes did not match up")]
|
||||
SessionIndexMismatch,
|
||||
|
||||
/// The valid statement validator key did not correspond to passed in `SessionInfo`.
|
||||
#[error("Valid statement validator key did not match session information")]
|
||||
InvalidValidKey,
|
||||
|
||||
/// The invalid statement validator key did not correspond to passed in `SessionInfo`.
|
||||
#[error("Invalid statement validator key did not match session information")]
|
||||
InvalidInvalidKey,
|
||||
|
||||
/// Provided receipt had different hash than the `CandidateHash` in the signed statements.
|
||||
#[error("Hash of candidate receipt did not match provided hash")]
|
||||
InvalidCandidateReceipt,
|
||||
|
||||
/// Valid statement should have `ValidDisputeStatementKind`.
|
||||
#[error("Valid statement has kind `invalid`")]
|
||||
ValidStatementHasInvalidKind,
|
||||
|
||||
/// Invalid statement should have `InvalidDisputeStatementKind`.
|
||||
#[error("Invalid statement has kind `valid`")]
|
||||
InvalidStatementHasValidKind,
|
||||
|
||||
/// Provided index could not be found in `SessionInfo`.
|
||||
#[error("The valid statement had an invalid validator index")]
|
||||
ValidStatementInvalidValidatorIndex,
|
||||
|
||||
/// Provided index could not be found in `SessionInfo`.
|
||||
#[error("The invalid statement had an invalid validator index")]
|
||||
InvalidStatementInvalidValidatorIndex,
|
||||
}
|
||||
|
||||
impl DisputeMessage {
|
||||
/// Build a `SignedDisputeMessage` and check what can be checked.
|
||||
///
|
||||
/// This function checks that:
|
||||
///
|
||||
/// - both statements concern the same candidate
|
||||
/// - both statements concern the same session
|
||||
/// - the invalid statement is indeed an invalid one
|
||||
/// - the valid statement is indeed a valid one
|
||||
/// - The passed `CandidateReceipt` has the correct hash (as signed in the statements).
|
||||
/// - the given validator indices match with the given `ValidatorId`s in the statements, given a
|
||||
/// `SessionInfo`.
|
||||
///
|
||||
/// We don't check whether the given `SessionInfo` matches the `SessionIndex` in the
|
||||
/// statements, because we can't without doing a runtime query. Nevertheless this smart
|
||||
/// constructor gives relative strong guarantees that the resulting `SignedDisputeStatement` is
|
||||
/// valid and good. Even the passed `SessionInfo` is most likely right if this function
|
||||
/// returns `Some`, because otherwise the passed `ValidatorId`s in the `SessionInfo` at
|
||||
/// their given index would very likely not match the `ValidatorId`s in the statements.
|
||||
///
|
||||
/// So in summary, this smart constructor should be smart enough to prevent from almost all
|
||||
/// programming errors that one could realistically make here.
|
||||
pub fn from_signed_statements(
|
||||
valid_statement: SignedDisputeStatement,
|
||||
valid_index: ValidatorIndex,
|
||||
invalid_statement: SignedDisputeStatement,
|
||||
invalid_index: ValidatorIndex,
|
||||
candidate_receipt: CandidateReceipt,
|
||||
session_info: &SessionInfo,
|
||||
) -> Result<Self, Error> {
|
||||
let candidate_hash = *valid_statement.candidate_hash();
|
||||
// Check statements concern same candidate:
|
||||
if candidate_hash != *invalid_statement.candidate_hash() {
|
||||
return Err(Error::CandidateHashMismatch);
|
||||
}
|
||||
|
||||
let session_index = valid_statement.session_index();
|
||||
if session_index != invalid_statement.session_index() {
|
||||
return Err(Error::SessionIndexMismatch);
|
||||
}
|
||||
|
||||
let valid_id = session_info
|
||||
.validators
|
||||
.get(valid_index)
|
||||
.ok_or(Error::ValidStatementInvalidValidatorIndex)?;
|
||||
let invalid_id = session_info
|
||||
.validators
|
||||
.get(invalid_index)
|
||||
.ok_or(Error::InvalidStatementInvalidValidatorIndex)?;
|
||||
|
||||
if valid_id != valid_statement.validator_public() {
|
||||
return Err(Error::InvalidValidKey);
|
||||
}
|
||||
|
||||
if invalid_id != invalid_statement.validator_public() {
|
||||
return Err(Error::InvalidInvalidKey);
|
||||
}
|
||||
|
||||
if candidate_receipt.hash() != candidate_hash {
|
||||
return Err(Error::InvalidCandidateReceipt);
|
||||
}
|
||||
|
||||
let valid_kind = match valid_statement.statement() {
|
||||
DisputeStatement::Valid(v) => v,
|
||||
_ => return Err(Error::ValidStatementHasInvalidKind),
|
||||
};
|
||||
|
||||
let invalid_kind = match invalid_statement.statement() {
|
||||
DisputeStatement::Invalid(v) => v,
|
||||
_ => return Err(Error::InvalidStatementHasValidKind),
|
||||
};
|
||||
|
||||
let valid_vote = ValidDisputeVote {
|
||||
validator_index: valid_index,
|
||||
signature: valid_statement.validator_signature().clone(),
|
||||
kind: valid_kind.clone(),
|
||||
};
|
||||
|
||||
let invalid_vote = InvalidDisputeVote {
|
||||
validator_index: invalid_index,
|
||||
signature: invalid_statement.validator_signature().clone(),
|
||||
kind: *invalid_kind,
|
||||
};
|
||||
|
||||
Ok(DisputeMessage(UncheckedDisputeMessage {
|
||||
candidate_receipt,
|
||||
session_index,
|
||||
valid_vote,
|
||||
invalid_vote,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Read only access to the candidate receipt.
|
||||
pub fn candidate_receipt(&self) -> &CandidateReceipt {
|
||||
&self.0.candidate_receipt
|
||||
}
|
||||
|
||||
/// Read only access to the `SessionIndex`.
|
||||
pub fn session_index(&self) -> SessionIndex {
|
||||
self.0.session_index
|
||||
}
|
||||
|
||||
/// Read only access to the invalid vote.
|
||||
pub fn invalid_vote(&self) -> &InvalidDisputeVote {
|
||||
&self.0.invalid_vote
|
||||
}
|
||||
|
||||
/// Read only access to the valid vote.
|
||||
pub fn valid_vote(&self) -> &ValidDisputeVote {
|
||||
&self.0.valid_vote
|
||||
}
|
||||
}
|
||||
|
||||
impl UncheckedDisputeMessage {
|
||||
/// Try to recover the two signed dispute votes from an `UncheckedDisputeMessage`.
|
||||
pub fn try_into_signed_votes(
|
||||
self,
|
||||
session_info: &SessionInfo,
|
||||
) -> Result<
|
||||
(
|
||||
CandidateReceipt,
|
||||
(SignedDisputeStatement, ValidatorIndex),
|
||||
(SignedDisputeStatement, ValidatorIndex),
|
||||
),
|
||||
(),
|
||||
> {
|
||||
let Self { candidate_receipt, session_index, valid_vote, invalid_vote } = self;
|
||||
let candidate_hash = candidate_receipt.hash();
|
||||
|
||||
let vote_valid = {
|
||||
let ValidDisputeVote { validator_index, signature, kind } = valid_vote;
|
||||
let validator_public = session_info.validators.get(validator_index).ok_or(())?.clone();
|
||||
|
||||
(
|
||||
SignedDisputeStatement::new_checked(
|
||||
DisputeStatement::Valid(kind),
|
||||
candidate_hash,
|
||||
session_index,
|
||||
validator_public,
|
||||
signature,
|
||||
)?,
|
||||
validator_index,
|
||||
)
|
||||
};
|
||||
|
||||
let vote_invalid = {
|
||||
let InvalidDisputeVote { validator_index, signature, kind } = invalid_vote;
|
||||
let validator_public = session_info.validators.get(validator_index).ok_or(())?.clone();
|
||||
|
||||
(
|
||||
SignedDisputeStatement::new_checked(
|
||||
DisputeStatement::Invalid(kind),
|
||||
candidate_hash,
|
||||
session_index,
|
||||
validator_public,
|
||||
signature,
|
||||
)?,
|
||||
validator_index,
|
||||
)
|
||||
};
|
||||
|
||||
Ok((candidate_receipt, vote_valid, vote_invalid))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DisputeMessage> for UncheckedDisputeMessage {
|
||||
fn from(message: DisputeMessage) -> Self {
|
||||
message.0
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,343 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::{
|
||||
btree_map::{Entry as Bentry, Keys as Bkeys},
|
||||
BTreeMap, BTreeSet,
|
||||
};
|
||||
|
||||
use codec::{Decode, Encode};
|
||||
|
||||
use sp_application_crypto::AppCrypto;
|
||||
use sp_keystore::{Error as KeystoreError, KeystorePtr};
|
||||
|
||||
use pezkuwi_primitives::{
|
||||
CandidateHash, CandidateReceiptV2 as CandidateReceipt, CompactStatement, DisputeStatement,
|
||||
EncodeAs, InvalidDisputeStatementKind, SessionIndex, SigningContext, UncheckedSigned,
|
||||
ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature,
|
||||
};
|
||||
|
||||
/// `DisputeMessage` and related types.
|
||||
mod message;
|
||||
pub use message::{DisputeMessage, Error as DisputeMessageCheckError, UncheckedDisputeMessage};
|
||||
mod status;
|
||||
pub use status::{dispute_is_inactive, DisputeStatus, Timestamp, ACTIVE_DURATION_SECS};
|
||||
|
||||
/// A checked dispute statement from an associated validator.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SignedDisputeStatement {
|
||||
dispute_statement: DisputeStatement,
|
||||
candidate_hash: CandidateHash,
|
||||
validator_public: ValidatorId,
|
||||
validator_signature: ValidatorSignature,
|
||||
session_index: SessionIndex,
|
||||
}
|
||||
|
||||
/// Errors encountered while signing a dispute statement
|
||||
#[derive(Debug)]
|
||||
pub enum SignedDisputeStatementError {
|
||||
/// Encountered a keystore error while signing
|
||||
KeyStoreError(KeystoreError),
|
||||
/// Could not generate signing payload
|
||||
PayloadError,
|
||||
}
|
||||
|
||||
/// Tracked votes on candidates, for the purposes of dispute resolution.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CandidateVotes {
|
||||
/// The receipt of the candidate itself.
|
||||
pub candidate_receipt: CandidateReceipt,
|
||||
/// Votes of validity, sorted by validator index.
|
||||
pub valid: ValidCandidateVotes,
|
||||
/// Votes of invalidity, sorted by validator index.
|
||||
pub invalid: BTreeMap<ValidatorIndex, (InvalidDisputeStatementKind, ValidatorSignature)>,
|
||||
}
|
||||
|
||||
/// Type alias for retrieving valid votes from `CandidateVotes`
|
||||
pub type ValidVoteData = (ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature));
|
||||
|
||||
/// Type alias for retrieving invalid votes from `CandidateVotes`
|
||||
pub type InvalidVoteData = (ValidatorIndex, (InvalidDisputeStatementKind, ValidatorSignature));
|
||||
|
||||
impl CandidateVotes {
|
||||
/// Get the set of all validators who have votes in the set, ascending.
|
||||
pub fn voted_indices(&self) -> BTreeSet<ValidatorIndex> {
|
||||
let mut keys: BTreeSet<_> = self.valid.keys().cloned().collect();
|
||||
keys.extend(self.invalid.keys().cloned());
|
||||
keys
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Valid candidate votes.
|
||||
///
|
||||
/// Prefer backing votes over other votes.
|
||||
pub struct ValidCandidateVotes {
|
||||
votes: BTreeMap<ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature)>,
|
||||
}
|
||||
|
||||
impl ValidCandidateVotes {
|
||||
/// Create new empty `ValidCandidateVotes`
|
||||
pub fn new() -> Self {
|
||||
Self { votes: BTreeMap::new() }
|
||||
}
|
||||
/// Insert a vote, replacing any already existing vote.
|
||||
///
|
||||
/// Except, for backing votes: Backing votes are always kept, and will never get overridden.
|
||||
/// Import of other king of `valid` votes, will be ignored if a backing vote is already
|
||||
/// present. Any already existing `valid` vote, will be overridden by any given backing vote.
|
||||
///
|
||||
/// Returns: true, if the insert had any effect.
|
||||
pub fn insert_vote(
|
||||
&mut self,
|
||||
validator_index: ValidatorIndex,
|
||||
kind: ValidDisputeStatementKind,
|
||||
sig: ValidatorSignature,
|
||||
) -> bool {
|
||||
match self.votes.entry(validator_index) {
|
||||
Bentry::Vacant(vacant) => {
|
||||
vacant.insert((kind, sig));
|
||||
true
|
||||
},
|
||||
Bentry::Occupied(mut occupied) => match occupied.get().0 {
|
||||
ValidDisputeStatementKind::BackingValid(_) |
|
||||
ValidDisputeStatementKind::BackingSeconded(_) => false,
|
||||
ValidDisputeStatementKind::Explicit |
|
||||
ValidDisputeStatementKind::ApprovalChecking |
|
||||
ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(_) => {
|
||||
occupied.insert((kind.clone(), sig));
|
||||
kind != occupied.get().0
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Retain any votes that match the given criteria.
|
||||
pub fn retain<F>(&mut self, f: F)
|
||||
where
|
||||
F: FnMut(&ValidatorIndex, &mut (ValidDisputeStatementKind, ValidatorSignature)) -> bool,
|
||||
{
|
||||
self.votes.retain(f)
|
||||
}
|
||||
|
||||
/// Get all the validator indices we have votes for.
|
||||
pub fn keys(
|
||||
&self,
|
||||
) -> Bkeys<'_, ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature)> {
|
||||
self.votes.keys()
|
||||
}
|
||||
|
||||
/// Get read only direct access to underlying map.
|
||||
pub fn raw(
|
||||
&self,
|
||||
) -> &BTreeMap<ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature)> {
|
||||
&self.votes
|
||||
}
|
||||
}
|
||||
|
||||
impl FromIterator<(ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature))>
|
||||
for ValidCandidateVotes
|
||||
{
|
||||
fn from_iter<T>(iter: T) -> Self
|
||||
where
|
||||
T: IntoIterator<Item = (ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature))>,
|
||||
{
|
||||
Self { votes: BTreeMap::from_iter(iter) }
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ValidCandidateVotes>
|
||||
for BTreeMap<ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature)>
|
||||
{
|
||||
fn from(wrapped: ValidCandidateVotes) -> Self {
|
||||
wrapped.votes
|
||||
}
|
||||
}
|
||||
impl IntoIterator for ValidCandidateVotes {
|
||||
type Item = (ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature));
|
||||
type IntoIter = <BTreeMap<ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature)> as IntoIterator>::IntoIter;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.votes.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl SignedDisputeStatement {
|
||||
/// Create a new `SignedDisputeStatement` from information
|
||||
/// that is available on-chain, and hence already can be trusted.
|
||||
///
|
||||
/// Attention: Not to be used other than with guaranteed fetches.
|
||||
pub fn new_unchecked_from_trusted_source(
|
||||
dispute_statement: DisputeStatement,
|
||||
candidate_hash: CandidateHash,
|
||||
session_index: SessionIndex,
|
||||
validator_public: ValidatorId,
|
||||
validator_signature: ValidatorSignature,
|
||||
) -> Self {
|
||||
SignedDisputeStatement {
|
||||
dispute_statement,
|
||||
candidate_hash,
|
||||
validator_public,
|
||||
validator_signature,
|
||||
session_index,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `SignedDisputeStatement`, which is only possible by checking the signature.
|
||||
pub fn new_checked(
|
||||
dispute_statement: DisputeStatement,
|
||||
candidate_hash: CandidateHash,
|
||||
session_index: SessionIndex,
|
||||
validator_public: ValidatorId,
|
||||
validator_signature: ValidatorSignature,
|
||||
) -> Result<Self, ()> {
|
||||
dispute_statement
|
||||
.check_signature(&validator_public, candidate_hash, session_index, &validator_signature)
|
||||
.map(|_| SignedDisputeStatement {
|
||||
dispute_statement,
|
||||
candidate_hash,
|
||||
validator_public,
|
||||
validator_signature,
|
||||
session_index,
|
||||
})
|
||||
}
|
||||
|
||||
/// Sign this statement with the given keystore and key. Pass `valid = true` to
|
||||
/// indicate validity of the candidate, and `valid = false` to indicate invalidity.
|
||||
pub fn sign_explicit(
|
||||
keystore: &KeystorePtr,
|
||||
valid: bool,
|
||||
candidate_hash: CandidateHash,
|
||||
session_index: SessionIndex,
|
||||
validator_public: ValidatorId,
|
||||
) -> Result<Option<Self>, SignedDisputeStatementError> {
|
||||
let dispute_statement = if valid {
|
||||
DisputeStatement::Valid(ValidDisputeStatementKind::Explicit)
|
||||
} else {
|
||||
DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit)
|
||||
};
|
||||
|
||||
let data = dispute_statement
|
||||
.payload_data(candidate_hash, session_index)
|
||||
.map_err(|_| SignedDisputeStatementError::PayloadError)?;
|
||||
let signature = keystore
|
||||
.sr25519_sign(ValidatorId::ID, validator_public.as_ref(), &data)
|
||||
.map_err(SignedDisputeStatementError::KeyStoreError)?
|
||||
.map(|sig| Self {
|
||||
dispute_statement,
|
||||
candidate_hash,
|
||||
validator_public,
|
||||
validator_signature: sig.into(),
|
||||
session_index,
|
||||
});
|
||||
Ok(signature)
|
||||
}
|
||||
|
||||
/// Access the underlying dispute statement
|
||||
pub fn statement(&self) -> &DisputeStatement {
|
||||
&self.dispute_statement
|
||||
}
|
||||
|
||||
/// Access the underlying candidate hash.
|
||||
pub fn candidate_hash(&self) -> &CandidateHash {
|
||||
&self.candidate_hash
|
||||
}
|
||||
|
||||
/// Access the underlying validator public key.
|
||||
pub fn validator_public(&self) -> &ValidatorId {
|
||||
&self.validator_public
|
||||
}
|
||||
|
||||
/// Access the underlying validator signature.
|
||||
pub fn validator_signature(&self) -> &ValidatorSignature {
|
||||
&self.validator_signature
|
||||
}
|
||||
|
||||
/// Consume self to return the signature.
|
||||
pub fn into_validator_signature(self) -> ValidatorSignature {
|
||||
self.validator_signature
|
||||
}
|
||||
|
||||
/// Access the underlying session index.
|
||||
pub fn session_index(&self) -> SessionIndex {
|
||||
self.session_index
|
||||
}
|
||||
|
||||
/// Convert a unchecked backing statement to a [`SignedDisputeStatement`]
|
||||
///
|
||||
/// As the unchecked backing statement contains only the validator index and
|
||||
/// not the validator public key, the public key must be passed as well,
|
||||
/// along with the signing context.
|
||||
///
|
||||
/// This does signature checks again with the data provided.
|
||||
pub fn from_backing_statement<T>(
|
||||
backing_statement: &UncheckedSigned<T, CompactStatement>,
|
||||
signing_context: SigningContext,
|
||||
validator_public: ValidatorId,
|
||||
) -> Result<Self, ()>
|
||||
where
|
||||
for<'a> &'a T: Into<CompactStatement>,
|
||||
T: EncodeAs<CompactStatement>,
|
||||
{
|
||||
let (statement_kind, candidate_hash) = match backing_statement.unchecked_payload().into() {
|
||||
CompactStatement::Seconded(candidate_hash) => (
|
||||
ValidDisputeStatementKind::BackingSeconded(signing_context.parent_hash),
|
||||
candidate_hash,
|
||||
),
|
||||
CompactStatement::Valid(candidate_hash) => (
|
||||
ValidDisputeStatementKind::BackingValid(signing_context.parent_hash),
|
||||
candidate_hash,
|
||||
),
|
||||
};
|
||||
|
||||
let dispute_statement = DisputeStatement::Valid(statement_kind);
|
||||
Self::new_checked(
|
||||
dispute_statement,
|
||||
candidate_hash,
|
||||
signing_context.session_index,
|
||||
validator_public,
|
||||
backing_statement.unchecked_signature().clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Any invalid vote (currently only explicit).
|
||||
#[derive(Clone, Encode, Decode, Debug)]
|
||||
pub struct InvalidDisputeVote {
|
||||
/// The voting validator index.
|
||||
pub validator_index: ValidatorIndex,
|
||||
|
||||
/// The validator signature, that can be verified when constructing a
|
||||
/// `SignedDisputeStatement`.
|
||||
pub signature: ValidatorSignature,
|
||||
|
||||
/// Kind of dispute statement.
|
||||
pub kind: InvalidDisputeStatementKind,
|
||||
}
|
||||
|
||||
/// Any valid vote (backing, approval, explicit).
|
||||
#[derive(Clone, Encode, Decode, Debug)]
|
||||
pub struct ValidDisputeVote {
|
||||
/// The voting validator index.
|
||||
pub validator_index: ValidatorIndex,
|
||||
|
||||
/// The validator signature, that can be verified when constructing a
|
||||
/// `SignedDisputeStatement`.
|
||||
pub signature: ValidatorSignature,
|
||||
|
||||
/// Kind of dispute statement.
|
||||
pub kind: ValidDisputeStatementKind,
|
||||
}
|
||||
@@ -0,0 +1,145 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use codec::{Decode, Encode};
|
||||
|
||||
/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS
|
||||
/// reboots.
|
||||
pub type Timestamp = u64;
|
||||
|
||||
/// The status of dispute.
|
||||
///
|
||||
/// As managed by the dispute coordinator.
|
||||
///
|
||||
/// NOTE: This status is persisted to the database, any changes have to be versioned and a db
|
||||
/// migration will be needed.
|
||||
#[derive(Debug, Clone, Copy, Encode, Decode, PartialEq)]
|
||||
pub enum DisputeStatus {
|
||||
/// The dispute is active and unconcluded.
|
||||
#[codec(index = 0)]
|
||||
Active,
|
||||
/// The dispute has been concluded in favor of the candidate
|
||||
/// since the given timestamp.
|
||||
#[codec(index = 1)]
|
||||
ConcludedFor(Timestamp),
|
||||
/// The dispute has been concluded against the candidate
|
||||
/// since the given timestamp.
|
||||
///
|
||||
/// This takes precedence over `ConcludedFor` in the case that
|
||||
/// both are true, which is impossible unless a large amount of
|
||||
/// validators are participating on both sides.
|
||||
#[codec(index = 2)]
|
||||
ConcludedAgainst(Timestamp),
|
||||
/// Dispute has been confirmed (more than `byzantine_threshold` have already participated/ or
|
||||
/// we have seen the candidate included already/participated successfully ourselves).
|
||||
#[codec(index = 3)]
|
||||
Confirmed,
|
||||
}
|
||||
|
||||
impl DisputeStatus {
|
||||
/// Initialize the status to the active state.
|
||||
pub fn active() -> DisputeStatus {
|
||||
DisputeStatus::Active
|
||||
}
|
||||
|
||||
/// Move status to confirmed status, if not yet concluded/confirmed already.
|
||||
pub fn confirm(self) -> DisputeStatus {
|
||||
match self {
|
||||
DisputeStatus::Active => DisputeStatus::Confirmed,
|
||||
DisputeStatus::Confirmed => DisputeStatus::Confirmed,
|
||||
DisputeStatus::ConcludedFor(_) | DisputeStatus::ConcludedAgainst(_) => self,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check whether the dispute is not a spam dispute.
|
||||
pub fn is_confirmed_concluded(&self) -> bool {
|
||||
match self {
|
||||
&DisputeStatus::Confirmed |
|
||||
&DisputeStatus::ConcludedFor(_) |
|
||||
DisputeStatus::ConcludedAgainst(_) => true,
|
||||
&DisputeStatus::Active => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Concluded valid?
|
||||
pub fn has_concluded_for(&self) -> bool {
|
||||
match self {
|
||||
&DisputeStatus::ConcludedFor(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
/// Concluded invalid?
|
||||
pub fn has_concluded_against(&self) -> bool {
|
||||
match self {
|
||||
&DisputeStatus::ConcludedAgainst(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Transition the status to a new status after observing the dispute has concluded for the
|
||||
/// candidate. This may be a no-op if the status was already concluded.
|
||||
pub fn conclude_for(self, now: Timestamp) -> DisputeStatus {
|
||||
match self {
|
||||
DisputeStatus::Active | DisputeStatus::Confirmed => DisputeStatus::ConcludedFor(now),
|
||||
DisputeStatus::ConcludedFor(at) => DisputeStatus::ConcludedFor(std::cmp::min(at, now)),
|
||||
against => against,
|
||||
}
|
||||
}
|
||||
|
||||
/// Transition the status to a new status after observing the dispute has concluded against the
|
||||
/// candidate. This may be a no-op if the status was already concluded.
|
||||
pub fn conclude_against(self, now: Timestamp) -> DisputeStatus {
|
||||
match self {
|
||||
DisputeStatus::Active | DisputeStatus::Confirmed =>
|
||||
DisputeStatus::ConcludedAgainst(now),
|
||||
DisputeStatus::ConcludedFor(at) =>
|
||||
DisputeStatus::ConcludedAgainst(std::cmp::min(at, now)),
|
||||
DisputeStatus::ConcludedAgainst(at) =>
|
||||
DisputeStatus::ConcludedAgainst(std::cmp::min(at, now)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether the disputed candidate is possibly invalid.
|
||||
pub fn is_possibly_invalid(&self) -> bool {
|
||||
match self {
|
||||
DisputeStatus::Active |
|
||||
DisputeStatus::Confirmed |
|
||||
DisputeStatus::ConcludedAgainst(_) => true,
|
||||
DisputeStatus::ConcludedFor(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Yields the timestamp this dispute concluded at, if any.
|
||||
pub fn concluded_at(&self) -> Option<Timestamp> {
|
||||
match self {
|
||||
DisputeStatus::Active | DisputeStatus::Confirmed => None,
|
||||
DisputeStatus::ConcludedFor(at) | DisputeStatus::ConcludedAgainst(at) => Some(*at),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The choice here is fairly arbitrary. But any dispute that concluded more than a few minutes ago
|
||||
/// is not worth considering anymore. Changing this value has little to no bearing on consensus,
|
||||
/// and really only affects the work that the node might do on startup during periods of many
|
||||
/// disputes.
|
||||
pub const ACTIVE_DURATION_SECS: Timestamp = 180;
|
||||
|
||||
/// Returns true if the dispute has concluded for longer than [`ACTIVE_DURATION_SECS`].
|
||||
pub fn dispute_is_inactive(status: &DisputeStatus, now: &Timestamp) -> bool {
|
||||
let at = status.concluded_at();
|
||||
|
||||
at.is_some() && at.unwrap() + ACTIVE_DURATION_SECS < *now
|
||||
}
|
||||
@@ -0,0 +1,671 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Primitive types used on the node-side.
|
||||
//!
|
||||
//! Unlike the `pezkuwi-primitives` crate, these primitives are only used on the node-side,
|
||||
//! not shared between the node and the runtime. This crate builds on top of the primitives defined
|
||||
//! there.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use std::pin::Pin;
|
||||
|
||||
use bounded_vec::BoundedVec;
|
||||
use codec::{Decode, Encode, Error as CodecError, Input};
|
||||
use futures::Future;
|
||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use pezkuwi_primitives::{
|
||||
BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, ChunkIndex, CollatorPair,
|
||||
CommittedCandidateReceiptError, CommittedCandidateReceiptV2 as CommittedCandidateReceipt,
|
||||
CompactStatement, CoreIndex, EncodeAs, Hash, HashT, HeadData, Id as ParaId,
|
||||
PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode,
|
||||
ValidationCodeHash, MAX_CODE_SIZE, MAX_POV_SIZE,
|
||||
};
|
||||
pub use sp_consensus_babe::{
|
||||
AllowedSlots as BabeAllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch,
|
||||
Randomness as BabeRandomness,
|
||||
};
|
||||
|
||||
pub use pezkuwi_teyrchain_primitives::primitives::{BlockData, HorizontalMessages, UpwardMessages};
|
||||
|
||||
pub mod approval;
|
||||
|
||||
/// Disputes related types.
|
||||
pub mod disputes;
|
||||
pub use disputes::{
|
||||
dispute_is_inactive, CandidateVotes, DisputeMessage, DisputeMessageCheckError, DisputeStatus,
|
||||
InvalidDisputeVote, SignedDisputeStatement, Timestamp, UncheckedDisputeMessage,
|
||||
ValidDisputeVote, ACTIVE_DURATION_SECS,
|
||||
};
|
||||
|
||||
/// The current node version, which takes the basic SemVer form `<major>.<minor>.<patch>`.
|
||||
/// In general, minor should be bumped on every release while major or patch releases are
|
||||
/// relatively rare.
|
||||
///
|
||||
/// The associated worker binaries should use the same version as the node that spawns them.
|
||||
pub const NODE_VERSION: &'static str = "1.20.2";
|
||||
|
||||
// For a 16-ary Merkle Prefix Trie, we can expect at most 16 32-byte hashes per node
|
||||
// plus some overhead:
|
||||
// header 1 + bitmap 2 + max partial_key 8 + children 16 * (32 + len 1) + value 32 + value len 1
|
||||
const MERKLE_NODE_MAX_SIZE: usize = 512 + 100;
|
||||
// 16-ary Merkle Prefix Trie for 32-bit ValidatorIndex has depth at most 8.
|
||||
const MERKLE_PROOF_MAX_DEPTH: usize = 8;
|
||||
|
||||
/// The bomb limit for decompressing code blobs.
|
||||
#[deprecated(
|
||||
note = "`VALIDATION_CODE_BOMB_LIMIT` will be removed. Use `validation_code_bomb_limit`
|
||||
runtime API to retrieve the value from the runtime"
|
||||
)]
|
||||
pub const VALIDATION_CODE_BOMB_LIMIT: usize = (MAX_CODE_SIZE * 4u32) as usize;
|
||||
|
||||
/// The bomb limit for decompressing PoV blobs.
|
||||
pub const POV_BOMB_LIMIT: usize = (MAX_POV_SIZE * 4u32) as usize;
|
||||
|
||||
/// How many blocks after finalization an information about backed/included candidate should be
|
||||
/// pre-loaded (when scraping onchain votes) and kept locally (when pruning).
|
||||
///
|
||||
/// We don't want to remove scraped candidates on finalization because we want to
|
||||
/// be sure that disputes will conclude on abandoned forks.
|
||||
/// Removing the candidate on finalization creates a possibility for an attacker to
|
||||
/// avoid slashing. If a bad fork is abandoned too quickly because another
|
||||
/// better one gets finalized the entries for the bad fork will be pruned and we
|
||||
/// might never participate in a dispute for it.
|
||||
///
|
||||
/// Why pre-load finalized blocks? I dispute might be raised against finalized candidate. In most
|
||||
/// of the cases it will conclude valid (otherwise we are in big trouble) but never the less the
|
||||
/// node must participate. It's possible to see a vote for such dispute onchain before we have it
|
||||
/// imported by `dispute-distribution`. In this case we won't have `CandidateReceipt` and the import
|
||||
/// will fail unless we keep them preloaded.
|
||||
///
|
||||
/// This value should consider the timeout we allow for participation in approval-voting. In
|
||||
/// particular, the following condition should hold:
|
||||
///
|
||||
/// slot time * `DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION` > `APPROVAL_EXECUTION_TIMEOUT`
|
||||
/// + slot time
|
||||
pub const DISPUTE_CANDIDATE_LIFETIME_AFTER_FINALIZATION: BlockNumber = 10;
|
||||
|
||||
/// Linked to `MAX_FINALITY_LAG` in relay chain selection,
|
||||
/// `MAX_HEADS_LOOK_BACK` in `approval-voting` and
|
||||
/// `MAX_BATCH_SCRAPE_ANCESTORS` in `dispute-coordinator`
|
||||
pub const MAX_FINALITY_LAG: u32 = 500;
|
||||
|
||||
/// Type of a session window size.
|
||||
///
|
||||
/// We are not using `NonZeroU32` here because `expect` and `unwrap` are not yet const, so global
|
||||
/// constants of `SessionWindowSize` would require `LazyLock` in that case.
|
||||
///
|
||||
/// See: <https://github.com/rust-lang/rust/issues/67441>
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
|
||||
pub struct SessionWindowSize(SessionIndex);
|
||||
|
||||
#[macro_export]
|
||||
/// Create a new checked `SessionWindowSize` which cannot be 0.
|
||||
macro_rules! new_session_window_size {
|
||||
(0) => {
|
||||
compile_error!("Must be non zero");
|
||||
};
|
||||
(0_u32) => {
|
||||
compile_error!("Must be non zero");
|
||||
};
|
||||
(0 as u32) => {
|
||||
compile_error!("Must be non zero");
|
||||
};
|
||||
(0 as _) => {
|
||||
compile_error!("Must be non zero");
|
||||
};
|
||||
($l:literal) => {
|
||||
SessionWindowSize::unchecked_new($l as _)
|
||||
};
|
||||
}
|
||||
|
||||
/// It would be nice to draw this from the chain state, but we have no tools for it right now.
|
||||
/// On Pezkuwi this is 1 day, and on Kusama it's 6 hours.
|
||||
///
|
||||
/// Number of sessions we want to consider in disputes.
|
||||
pub const DISPUTE_WINDOW: SessionWindowSize = new_session_window_size!(6);
|
||||
|
||||
impl SessionWindowSize {
|
||||
/// Get the value as `SessionIndex` for doing comparisons with those.
|
||||
pub fn get(self) -> SessionIndex {
|
||||
self.0
|
||||
}
|
||||
|
||||
/// Helper function for `new_session_window_size`.
|
||||
///
|
||||
/// Don't use it. The only reason it is public, is because otherwise the
|
||||
/// `new_session_window_size` macro would not work outside of this module.
|
||||
#[doc(hidden)]
|
||||
pub const fn unchecked_new(size: SessionIndex) -> Self {
|
||||
Self(size)
|
||||
}
|
||||
}
|
||||
|
||||
/// The cumulative weight of a block in a fork-choice rule.
|
||||
pub type BlockWeight = u32;
|
||||
|
||||
/// A statement, where the candidate receipt is included in the `Seconded` variant.
|
||||
///
|
||||
/// This is the committed candidate receipt instead of the bare candidate receipt. As such,
|
||||
/// it gives access to the commitments to validators who have not executed the candidate. This
|
||||
/// is necessary to allow a block-producing validator to include candidates from outside the para
|
||||
/// it is assigned to.
|
||||
#[derive(Clone, PartialEq, Eq, Encode, Decode)]
|
||||
pub enum Statement {
|
||||
/// A statement that a validator seconds a candidate.
|
||||
#[codec(index = 1)]
|
||||
Seconded(CommittedCandidateReceipt),
|
||||
/// A statement that a validator has deemed a candidate valid.
|
||||
#[codec(index = 2)]
|
||||
Valid(CandidateHash),
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Statement {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Statement::Seconded(seconded) => write!(f, "Seconded: {:?}", seconded.descriptor),
|
||||
Statement::Valid(hash) => write!(f, "Valid: {:?}", hash),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Statement {
|
||||
/// Get the candidate hash referenced by this statement.
|
||||
///
|
||||
/// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be
|
||||
/// expensive for large candidates.
|
||||
pub fn candidate_hash(&self) -> CandidateHash {
|
||||
match *self {
|
||||
Statement::Valid(ref h) => *h,
|
||||
Statement::Seconded(ref c) => c.hash(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Transform this statement into its compact version, which references only the hash
|
||||
/// of the candidate.
|
||||
pub fn to_compact(&self) -> CompactStatement {
|
||||
match *self {
|
||||
Statement::Seconded(ref c) => CompactStatement::Seconded(c.hash()),
|
||||
Statement::Valid(hash) => CompactStatement::Valid(hash),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add the [`PersistedValidationData`] to the statement, if seconded.
|
||||
pub fn supply_pvd(self, pvd: PersistedValidationData) -> StatementWithPVD {
|
||||
match self {
|
||||
Statement::Seconded(c) => StatementWithPVD::Seconded(c, pvd),
|
||||
Statement::Valid(hash) => StatementWithPVD::Valid(hash),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&'_ Statement> for CompactStatement {
|
||||
fn from(stmt: &Statement) -> Self {
|
||||
stmt.to_compact()
|
||||
}
|
||||
}
|
||||
|
||||
impl EncodeAs<CompactStatement> for Statement {
|
||||
fn encode_as(&self) -> Vec<u8> {
|
||||
self.to_compact().encode()
|
||||
}
|
||||
}
|
||||
|
||||
/// A statement, exactly the same as [`Statement`] but where seconded messages carry
|
||||
/// the [`PersistedValidationData`].
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub enum StatementWithPVD {
|
||||
/// A statement that a validator seconds a candidate.
|
||||
Seconded(CommittedCandidateReceipt, PersistedValidationData),
|
||||
/// A statement that a validator has deemed a candidate valid.
|
||||
Valid(CandidateHash),
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for StatementWithPVD {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
StatementWithPVD::Seconded(seconded, _) => {
|
||||
write!(f, "Seconded: {:?}", seconded.descriptor)
|
||||
},
|
||||
StatementWithPVD::Valid(hash) => write!(f, "Valid: {:?}", hash),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl StatementWithPVD {
|
||||
/// Get the candidate hash referenced by this statement.
|
||||
///
|
||||
/// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be
|
||||
/// expensive for large candidates.
|
||||
pub fn candidate_hash(&self) -> CandidateHash {
|
||||
match *self {
|
||||
StatementWithPVD::Valid(ref h) => *h,
|
||||
StatementWithPVD::Seconded(ref c, _) => c.hash(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Transform this statement into its compact version, which references only the hash
|
||||
/// of the candidate.
|
||||
pub fn to_compact(&self) -> CompactStatement {
|
||||
match *self {
|
||||
StatementWithPVD::Seconded(ref c, _) => CompactStatement::Seconded(c.hash()),
|
||||
StatementWithPVD::Valid(hash) => CompactStatement::Valid(hash),
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop the [`PersistedValidationData`] from the statement.
|
||||
pub fn drop_pvd(self) -> Statement {
|
||||
match self {
|
||||
StatementWithPVD::Seconded(c, _) => Statement::Seconded(c),
|
||||
StatementWithPVD::Valid(c_h) => Statement::Valid(c_h),
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop the [`PersistedValidationData`] from the statement in a signed
|
||||
/// variant.
|
||||
pub fn drop_pvd_from_signed(signed: SignedFullStatementWithPVD) -> SignedFullStatement {
|
||||
signed
|
||||
.convert_to_superpayload_with(|s| s.drop_pvd())
|
||||
.expect("persisted_validation_data doesn't affect encode_as; qed")
|
||||
}
|
||||
|
||||
/// Converts the statement to a compact signed statement by dropping the
|
||||
/// [`CommittedCandidateReceipt`] and the [`PersistedValidationData`].
|
||||
pub fn signed_to_compact(signed: SignedFullStatementWithPVD) -> Signed<CompactStatement> {
|
||||
signed
|
||||
.convert_to_superpayload_with(|s| s.to_compact())
|
||||
.expect("doesn't affect encode_as; qed")
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&'_ StatementWithPVD> for CompactStatement {
|
||||
fn from(stmt: &StatementWithPVD) -> Self {
|
||||
stmt.to_compact()
|
||||
}
|
||||
}
|
||||
|
||||
impl EncodeAs<CompactStatement> for StatementWithPVD {
|
||||
fn encode_as(&self) -> Vec<u8> {
|
||||
self.to_compact().encode()
|
||||
}
|
||||
}
|
||||
|
||||
/// A statement, the corresponding signature, and the index of the sender.
|
||||
///
|
||||
/// Signing context and validator set should be apparent from context.
|
||||
///
|
||||
/// This statement is "full" in the sense that the `Seconded` variant includes the candidate
|
||||
/// receipt. Only the compact `SignedStatement` is suitable for submission to the chain.
|
||||
pub type SignedFullStatement = Signed<Statement, CompactStatement>;
|
||||
|
||||
/// Variant of `SignedFullStatement` where the signature has not yet been verified.
|
||||
pub type UncheckedSignedFullStatement = UncheckedSigned<Statement, CompactStatement>;
|
||||
|
||||
/// A statement, the corresponding signature, and the index of the sender.
|
||||
///
|
||||
/// Seconded statements are accompanied by the [`PersistedValidationData`]
|
||||
///
|
||||
/// Signing context and validator set should be apparent from context.
|
||||
pub type SignedFullStatementWithPVD = Signed<StatementWithPVD, CompactStatement>;
|
||||
|
||||
/// Candidate invalidity details
|
||||
#[derive(Debug)]
|
||||
pub enum InvalidCandidate {
|
||||
/// Failed to execute `validate_block`. This includes function panicking.
|
||||
ExecutionError(String),
|
||||
/// Validation outputs check doesn't pass.
|
||||
InvalidOutputs,
|
||||
/// Execution timeout.
|
||||
Timeout,
|
||||
/// Validation input is over the limit.
|
||||
ParamsTooLarge(u64),
|
||||
/// Code size is over the limit.
|
||||
CodeTooLarge(u64),
|
||||
/// PoV does not decompress correctly.
|
||||
PoVDecompressionFailure,
|
||||
/// Validation function returned invalid data.
|
||||
BadReturn,
|
||||
/// Invalid relay chain parent.
|
||||
BadParent,
|
||||
/// POV hash does not match.
|
||||
PoVHashMismatch,
|
||||
/// Bad collator signature.
|
||||
BadSignature,
|
||||
/// Para head hash does not match.
|
||||
ParaHeadHashMismatch,
|
||||
/// Validation code hash does not match.
|
||||
CodeHashMismatch,
|
||||
/// Validation has generated different candidate commitments.
|
||||
CommitmentsHashMismatch,
|
||||
/// The candidate receipt contains an invalid session index.
|
||||
InvalidSessionIndex,
|
||||
/// The candidate receipt invalid UMP signals.
|
||||
InvalidUMPSignals(CommittedCandidateReceiptError),
|
||||
}
|
||||
|
||||
/// Result of the validation of the candidate.
|
||||
#[derive(Debug)]
|
||||
pub enum ValidationResult {
|
||||
/// Candidate is valid. The validation process yields these outputs and the persisted
|
||||
/// validation data used to form inputs.
|
||||
Valid(CandidateCommitments, PersistedValidationData),
|
||||
/// Candidate is invalid.
|
||||
Invalid(InvalidCandidate),
|
||||
}
|
||||
|
||||
/// A Proof-of-Validity
|
||||
#[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)]
|
||||
pub struct PoV {
|
||||
/// The block witness data.
|
||||
pub block_data: BlockData,
|
||||
}
|
||||
|
||||
impl PoV {
|
||||
/// Get the blake2-256 hash of the PoV.
|
||||
pub fn hash(&self) -> Hash {
|
||||
BlakeTwo256::hash_of(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// A type that represents a maybe compressed [`PoV`].
|
||||
#[derive(Clone, Encode, Decode)]
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
pub enum MaybeCompressedPoV {
|
||||
/// A raw [`PoV`], aka not compressed.
|
||||
Raw(PoV),
|
||||
/// The given [`PoV`] is already compressed.
|
||||
Compressed(PoV),
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
impl std::fmt::Debug for MaybeCompressedPoV {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
let (variant, size) = match self {
|
||||
MaybeCompressedPoV::Raw(pov) => ("Raw", pov.block_data.0.len()),
|
||||
MaybeCompressedPoV::Compressed(pov) => ("Compressed", pov.block_data.0.len()),
|
||||
};
|
||||
|
||||
write!(f, "{} PoV ({} bytes)", variant, size)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
impl MaybeCompressedPoV {
|
||||
/// Convert into a compressed [`PoV`].
|
||||
///
|
||||
/// If `self == Raw` it is compressed using [`maybe_compress_pov`].
|
||||
pub fn into_compressed(self) -> PoV {
|
||||
match self {
|
||||
Self::Raw(raw) => maybe_compress_pov(raw),
|
||||
Self::Compressed(compressed) => compressed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The output of a collator.
|
||||
///
|
||||
/// This differs from `CandidateCommitments` in two ways:
|
||||
///
|
||||
/// - does not contain the erasure root; that's computed at the Pezkuwi level, not at Cumulus
|
||||
/// - contains a proof of validity.
|
||||
#[derive(Debug, Clone, Encode, Decode)]
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
pub struct Collation<BlockNumber = pezkuwi_primitives::BlockNumber> {
|
||||
/// Messages destined to be interpreted by the Relay chain itself.
|
||||
pub upward_messages: UpwardMessages,
|
||||
/// The horizontal messages sent by the teyrchain.
|
||||
pub horizontal_messages: HorizontalMessages,
|
||||
/// New validation code.
|
||||
pub new_validation_code: Option<ValidationCode>,
|
||||
/// The head-data produced as a result of execution.
|
||||
pub head_data: HeadData,
|
||||
/// Proof to verify the state transition of the teyrchain.
|
||||
pub proof_of_validity: MaybeCompressedPoV,
|
||||
/// The number of messages processed from the DMQ.
|
||||
pub processed_downward_messages: u32,
|
||||
/// The mark which specifies the block number up to which all inbound HRMP messages are
|
||||
/// processed.
|
||||
pub hrmp_watermark: BlockNumber,
|
||||
}
|
||||
|
||||
/// Signal that is being returned when a collation was seconded by a validator.
|
||||
#[derive(Debug)]
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
pub struct CollationSecondedSignal {
|
||||
/// The hash of the relay chain block that was used as context to sign [`Self::statement`].
|
||||
pub relay_parent: Hash,
|
||||
/// The statement about seconding the collation.
|
||||
///
|
||||
/// Anything else than [`Statement::Seconded`] is forbidden here.
|
||||
pub statement: SignedFullStatement,
|
||||
}
|
||||
|
||||
/// Result of the [`CollatorFn`] invocation.
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
pub struct CollationResult {
|
||||
/// The collation that was build.
|
||||
pub collation: Collation,
|
||||
/// An optional result sender that should be informed about a successfully seconded collation.
|
||||
///
|
||||
/// There is no guarantee that this sender is informed ever about any result, it is completely
|
||||
/// okay to just drop it. However, if it is called, it should be called with the signed
|
||||
/// statement of a teyrchain validator seconding the collation.
|
||||
pub result_sender: Option<futures::channel::oneshot::Sender<CollationSecondedSignal>>,
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
impl CollationResult {
|
||||
/// Convert into the inner values.
|
||||
pub fn into_inner(
|
||||
self,
|
||||
) -> (Collation, Option<futures::channel::oneshot::Sender<CollationSecondedSignal>>) {
|
||||
(self.collation, self.result_sender)
|
||||
}
|
||||
}
|
||||
|
||||
/// Collation function.
|
||||
///
|
||||
/// Will be called with the hash of the relay chain block the teyrchain block should be build on and
|
||||
/// the [`PersistedValidationData`] that provides information about the state of the teyrchain on
|
||||
/// the relay chain.
|
||||
///
|
||||
/// Returns an optional [`CollationResult`].
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
pub type CollatorFn = Box<
|
||||
dyn Fn(
|
||||
Hash,
|
||||
&PersistedValidationData,
|
||||
) -> Pin<Box<dyn Future<Output = Option<CollationResult>> + Send>>
|
||||
+ Send
|
||||
+ Sync,
|
||||
>;
|
||||
|
||||
/// Configuration for the collation generator
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
pub struct CollationGenerationConfig {
|
||||
/// Collator's authentication key, so it can sign things.
|
||||
pub key: CollatorPair,
|
||||
/// Collation function. See [`CollatorFn`] for more details.
|
||||
///
|
||||
/// If this is `None`, it implies that collations are intended to be submitted
|
||||
/// out-of-band and not pulled out of the function.
|
||||
pub collator: Option<CollatorFn>,
|
||||
/// The teyrchain that this collator collates for
|
||||
pub para_id: ParaId,
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
impl std::fmt::Debug for CollationGenerationConfig {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "CollationGenerationConfig {{ ... }}")
|
||||
}
|
||||
}
|
||||
|
||||
/// Parameters for `CollationGenerationMessage::SubmitCollation`.
|
||||
#[derive(Debug)]
|
||||
pub struct SubmitCollationParams {
|
||||
/// The relay-parent the collation is built against.
|
||||
pub relay_parent: Hash,
|
||||
/// The collation itself (PoV and commitments)
|
||||
pub collation: Collation,
|
||||
/// The parent block's head-data.
|
||||
pub parent_head: HeadData,
|
||||
/// The hash of the validation code the collation was created against.
|
||||
pub validation_code_hash: ValidationCodeHash,
|
||||
/// An optional result sender that should be informed about a successfully seconded collation.
|
||||
///
|
||||
/// There is no guarantee that this sender is informed ever about any result, it is completely
|
||||
/// okay to just drop it. However, if it is called, it should be called with the signed
|
||||
/// statement of a teyrchain validator seconding the collation.
|
||||
pub result_sender: Option<futures::channel::oneshot::Sender<CollationSecondedSignal>>,
|
||||
/// The core index on which the resulting candidate should be backed
|
||||
pub core_index: CoreIndex,
|
||||
}
|
||||
|
||||
/// This is the data we keep available for each candidate included in the relay chain.
|
||||
#[derive(Clone, Encode, Decode, PartialEq, Eq, Debug)]
|
||||
pub struct AvailableData {
|
||||
/// The Proof-of-Validation of the candidate.
|
||||
pub pov: std::sync::Arc<PoV>,
|
||||
/// The persisted validation data needed for approval checks.
|
||||
pub validation_data: PersistedValidationData,
|
||||
}
|
||||
|
||||
/// This is a convenience type to allow the Erasure chunk proof to Decode into a nested BoundedVec
|
||||
#[derive(PartialEq, Eq, Clone, Debug, Hash)]
|
||||
pub struct Proof(BoundedVec<BoundedVec<u8, 1, MERKLE_NODE_MAX_SIZE>, 1, MERKLE_PROOF_MAX_DEPTH>);
|
||||
|
||||
impl Proof {
|
||||
/// This function allows to convert back to the standard nested Vec format
|
||||
pub fn iter(&self) -> impl Iterator<Item = &[u8]> {
|
||||
self.0.iter().map(|v| v.as_slice())
|
||||
}
|
||||
|
||||
/// Construct an invalid dummy proof
|
||||
///
|
||||
/// Useful for testing, should absolutely not be used in production.
|
||||
pub fn dummy_proof() -> Proof {
|
||||
Proof(BoundedVec::from_vec(vec![BoundedVec::from_vec(vec![0]).unwrap()]).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
/// Possible errors when converting from `Vec<Vec<u8>>` into [`Proof`].
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum MerkleProofError {
|
||||
#[error("Merkle max proof depth exceeded {0} > {} .", MERKLE_PROOF_MAX_DEPTH)]
|
||||
/// This error signifies that the Proof length exceeds the trie's max depth
|
||||
MerkleProofDepthExceeded(usize),
|
||||
|
||||
#[error("Merkle node max size exceeded {0} > {} .", MERKLE_NODE_MAX_SIZE)]
|
||||
/// This error signifies that a Proof node exceeds the 16-ary max node size
|
||||
MerkleProofNodeSizeExceeded(usize),
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<Vec<u8>>> for Proof {
|
||||
type Error = MerkleProofError;
|
||||
|
||||
fn try_from(input: Vec<Vec<u8>>) -> Result<Self, Self::Error> {
|
||||
if input.len() > MERKLE_PROOF_MAX_DEPTH {
|
||||
return Err(Self::Error::MerkleProofDepthExceeded(input.len()));
|
||||
}
|
||||
let mut out = Vec::new();
|
||||
for element in input.into_iter() {
|
||||
let length = element.len();
|
||||
let data: BoundedVec<u8, 1, MERKLE_NODE_MAX_SIZE> = BoundedVec::from_vec(element)
|
||||
.map_err(|_| Self::Error::MerkleProofNodeSizeExceeded(length))?;
|
||||
out.push(data);
|
||||
}
|
||||
Ok(Proof(BoundedVec::from_vec(out).expect("Buffer size is deterined above. qed")))
|
||||
}
|
||||
}
|
||||
|
||||
impl Decode for Proof {
|
||||
fn decode<I: Input>(value: &mut I) -> Result<Self, CodecError> {
|
||||
let temp: Vec<Vec<u8>> = Decode::decode(value)?;
|
||||
let mut out = Vec::new();
|
||||
for element in temp.into_iter() {
|
||||
let bounded_temp: Result<BoundedVec<u8, 1, MERKLE_NODE_MAX_SIZE>, CodecError> =
|
||||
BoundedVec::from_vec(element)
|
||||
.map_err(|_| "Inner node exceeds maximum node size.".into());
|
||||
out.push(bounded_temp?);
|
||||
}
|
||||
BoundedVec::from_vec(out)
|
||||
.map(Self)
|
||||
.map_err(|_| "Merkle proof depth exceeds maximum trie depth".into())
|
||||
}
|
||||
}
|
||||
|
||||
impl Encode for Proof {
|
||||
fn size_hint(&self) -> usize {
|
||||
MERKLE_NODE_MAX_SIZE * MERKLE_PROOF_MAX_DEPTH
|
||||
}
|
||||
|
||||
fn using_encoded<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
|
||||
let temp = self.0.iter().map(|v| v.as_vec()).collect::<Vec<_>>();
|
||||
temp.using_encoded(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Proof {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_bytes(&self.encode())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Proof {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
// Deserialize the string and get individual components
|
||||
let s = Vec::<u8>::deserialize(deserializer)?;
|
||||
let mut slice = s.as_slice();
|
||||
Decode::decode(&mut slice).map_err(de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
/// A chunk of erasure-encoded block data.
|
||||
#[derive(PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize, Debug, Hash)]
|
||||
pub struct ErasureChunk {
|
||||
/// The erasure-encoded chunk of data belonging to the candidate block.
|
||||
pub chunk: Vec<u8>,
|
||||
/// The index of this erasure-encoded chunk of data.
|
||||
pub index: ChunkIndex,
|
||||
/// Proof for this chunk's branch in the Merkle tree.
|
||||
pub proof: Proof,
|
||||
}
|
||||
|
||||
impl ErasureChunk {
|
||||
/// Convert bounded Vec Proof to regular `Vec<Vec<u8>>`
|
||||
pub fn proof(&self) -> &Proof {
|
||||
&self.proof
|
||||
}
|
||||
}
|
||||
|
||||
/// Compress a PoV, unless it exceeds the [`POV_BOMB_LIMIT`].
|
||||
#[cfg(not(target_os = "unknown"))]
|
||||
pub fn maybe_compress_pov(pov: PoV) -> PoV {
|
||||
let PoV { block_data: BlockData(raw) } = pov;
|
||||
let raw = sp_maybe_compressed_blob::compress_weakly(&raw, POV_BOMB_LIMIT).unwrap_or(raw);
|
||||
|
||||
let pov = PoV { block_data: BlockData(raw) };
|
||||
pov
|
||||
}
|
||||
Reference in New Issue
Block a user