babe: only process vrf on module finalization (#11113)

* babe: only process vrf on block execution finalization

* babe: rename CurrentBlockRandomness to PreviousBlockRandomness

* babe: add test for initialization ordering

* babe: rename PreviousBlockRandomness to ParentBlockRandomness

* babe: re-add CurrentBlockRandomness with deprecation notice

* babe: export CurrentBlockRandomness

* babe: silence deprecation warning when exporting CurrentBlockRandomness

* babe: suggestion from code review

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* babe: flatten nested option

* babe: rustfmt

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
This commit is contained in:
André Silva
2022-05-04 17:14:58 +01:00
committed by GitHub
parent 2dbdafb437
commit d6d4068ccc
7 changed files with 230 additions and 134 deletions
+1
View File
@@ -10232,6 +10232,7 @@ name = "sp-consensus-vrf"
version = "0.10.0-dev"
dependencies = [
"parity-scale-codec",
"scale-info",
"schnorrkel",
"sp-core",
"sp-runtime",
+96 -87
View File
@@ -61,8 +61,10 @@ mod mock;
mod tests;
pub use equivocation::{BabeEquivocationOffence, EquivocationHandler, HandleEquivocation};
#[allow(deprecated)]
pub use randomness::CurrentBlockRandomness;
pub use randomness::{
CurrentBlockRandomness, RandomnessFromOneEpochAgo, RandomnessFromTwoEpochsAgo,
ParentBlockRandomness, RandomnessFromOneEpochAgo, RandomnessFromTwoEpochsAgo,
};
pub use pallet::*;
@@ -104,8 +106,6 @@ impl EpochChangeTrigger for SameAuthoritiesForever {
const UNDER_CONSTRUCTION_SEGMENT_LENGTH: u32 = 256;
type MaybeRandomness = Option<schnorrkel::Randomness>;
#[frame_support::pallet]
pub mod pallet {
use super::*;
@@ -271,15 +271,16 @@ pub mod pallet {
/// if per-block initialization has already been called for current block.
#[pallet::storage]
#[pallet::getter(fn initialized)]
pub(super) type Initialized<T> = StorageValue<_, MaybeRandomness>;
pub(super) type Initialized<T> = StorageValue<_, Option<PreDigest>>;
/// This field should always be populated during block processing unless
/// secondary plain slots are enabled (which don't contain a VRF output).
///
/// It is set in `on_initialize`, before it will contain the value from the last block.
/// It is set in `on_finalize`, before it will contain the value from the last block.
#[pallet::storage]
#[pallet::getter(fn author_vrf_randomness)]
pub(super) type AuthorVrfRandomness<T> = StorageValue<_, MaybeRandomness, ValueQuery>;
pub(super) type AuthorVrfRandomness<T> =
StorageValue<_, Option<schnorrkel::Randomness>, ValueQuery>;
/// The block numbers when the last and current epoch have started, respectively `N-1` and
/// `N`.
@@ -320,7 +321,7 @@ pub mod pallet {
impl<T: Config> GenesisBuild<T> for GenesisConfig {
fn build(&self) {
SegmentIndex::<T>::put(0);
Pallet::<T>::initialize_authorities(&self.authorities);
Pallet::<T>::initialize_genesis_authorities(&self.authorities);
EpochConfig::<T>::put(
self.epoch_config.clone().expect("epoch_config must not be None"),
);
@@ -331,19 +332,60 @@ pub mod pallet {
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
/// Initialization
fn on_initialize(now: BlockNumberFor<T>) -> Weight {
Self::do_initialize(now);
Self::initialize(now);
0
}
/// Block finalization
fn on_finalize(_n: BlockNumberFor<T>) {
fn on_finalize(_now: BlockNumberFor<T>) {
// at the end of the block, we can safely include the new VRF output
// from this block into the under-construction randomness. If we've determined
// that this block was the first in a new epoch, the changeover logic has
// already occurred at this point, so the under-construction randomness
// will only contain outputs from the right epoch.
if let Some(Some(randomness)) = Initialized::<T>::take() {
Self::deposit_randomness(&randomness);
if let Some(pre_digest) = Initialized::<T>::take().flatten() {
let authority_index = pre_digest.authority_index();
if T::DisabledValidators::is_disabled(authority_index) {
panic!(
"Validator with index {:?} is disabled and should not be attempting to author blocks.",
authority_index,
);
}
if let Some((vrf_output, vrf_proof)) = pre_digest.vrf() {
let randomness: Option<schnorrkel::Randomness> = Authorities::<T>::get()
.get(authority_index as usize)
.and_then(|(authority, _)| {
schnorrkel::PublicKey::from_bytes(authority.as_slice()).ok()
})
.and_then(|pubkey| {
let current_slot = CurrentSlot::<T>::get();
let transcript = sp_consensus_babe::make_transcript(
&Self::randomness(),
current_slot,
EpochIndex::<T>::get(),
);
// NOTE: this is verified by the client when importing the block, before
// execution. we don't run the verification again here to avoid slowing
// down the runtime.
debug_assert!(pubkey
.vrf_verify(transcript.clone(), vrf_output, vrf_proof)
.is_ok());
vrf_output.0.attach_input_hash(&pubkey, transcript).ok()
})
.map(|inout| inout.make_bytes(sp_consensus_babe::BABE_VRF_INOUT_CONTEXT));
if let Some(randomness) = pre_digest.is_primary().then(|| randomness).flatten()
{
Self::deposit_randomness(&randomness);
}
AuthorVrfRandomness::<T>::put(randomness);
}
}
// remove temporary "environment" entry from storage
@@ -451,11 +493,10 @@ impl<T: Config> IsMember<AuthorityId> for Pallet<T> {
impl<T: Config> pallet_session::ShouldEndSession<T::BlockNumber> for Pallet<T> {
fn should_end_session(now: T::BlockNumber) -> bool {
// it might be (and it is in current implementation) that session module is calling
// should_end_session() from it's own on_initialize() handler
// => because pallet_session on_initialize() is called earlier than ours, let's ensure
// that we have synced with digest before checking if session should be ended.
Self::do_initialize(now);
// `should_end_session` from it's own `on_initialize` handler, in which case it's
// possible that babe's own `on_initialize` has not run yet, so let's ensure that we
// have initialized the pallet and updated the current slot.
Self::initialize(now);
Self::should_epoch_change(now)
}
}
@@ -573,7 +614,7 @@ impl<T: Config> Pallet<T> {
}
/// Finds the start slot of the current epoch. only guaranteed to
/// give correct results after `do_initialize` of the first block
/// give correct results after `initialize` of the first block
/// in the chain (as its result is based off of `GenesisSlot`).
pub fn current_epoch_start() -> Slot {
Self::epoch_start(EpochIndex::<T>::get())
@@ -649,15 +690,41 @@ impl<T: Config> Pallet<T> {
}
}
fn do_initialize(now: T::BlockNumber) {
// since do_initialize can be called twice (if session module is present)
// => let's ensure that we only modify the storage once per block
fn initialize_genesis_authorities(authorities: &[(AuthorityId, BabeAuthorityWeight)]) {
if !authorities.is_empty() {
assert!(Authorities::<T>::get().is_empty(), "Authorities are already initialized!");
let bounded_authorities =
WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec())
.expect("Initial number of authorities should be lower than T::MaxAuthorities");
Authorities::<T>::put(&bounded_authorities);
NextAuthorities::<T>::put(&bounded_authorities);
}
}
fn initialize_genesis_epoch(genesis_slot: Slot) {
GenesisSlot::<T>::put(genesis_slot);
debug_assert_ne!(*GenesisSlot::<T>::get(), 0);
// deposit a log because this is the first block in epoch #0
// we use the same values as genesis because we haven't collected any
// randomness yet.
let next = NextEpochDescriptor {
authorities: Self::authorities().to_vec(),
randomness: Self::randomness(),
};
Self::deposit_consensus(ConsensusLog::NextEpochData(next));
}
fn initialize(now: T::BlockNumber) {
// since `initialize` can be called twice (e.g. if session module is present)
// let's ensure that we only do the initialization once per block
let initialized = Self::initialized().is_some();
if initialized {
return
}
let maybe_pre_digest: Option<PreDigest> =
let pre_digest =
<frame_system::Pallet<T>>::digest()
.logs
.iter()
@@ -671,76 +738,29 @@ impl<T: Config> Pallet<T> {
})
.next();
let is_primary = matches!(maybe_pre_digest, Some(PreDigest::Primary(..)));
if let Some(ref pre_digest) = pre_digest {
// the slot number of the current block being initialized
let current_slot = pre_digest.slot();
let maybe_randomness: MaybeRandomness = maybe_pre_digest.and_then(|digest| {
// on the first non-zero block (i.e. block #1)
// this is where the first epoch (epoch #0) actually starts.
// we need to adjust internal storage accordingly.
if *GenesisSlot::<T>::get() == 0 {
GenesisSlot::<T>::put(digest.slot());
debug_assert_ne!(*GenesisSlot::<T>::get(), 0);
// deposit a log because this is the first block in epoch #0
// we use the same values as genesis because we haven't collected any
// randomness yet.
let next = NextEpochDescriptor {
authorities: Self::authorities().to_vec(),
randomness: Self::randomness(),
};
Self::deposit_consensus(ConsensusLog::NextEpochData(next))
Self::initialize_genesis_epoch(current_slot)
}
// the slot number of the current block being initialized
let current_slot = digest.slot();
// how many slots were skipped between current and last block
let lateness = current_slot.saturating_sub(CurrentSlot::<T>::get() + 1);
let lateness = T::BlockNumber::from(*lateness as u32);
Lateness::<T>::put(lateness);
CurrentSlot::<T>::put(current_slot);
}
let authority_index = digest.authority_index();
if T::DisabledValidators::is_disabled(authority_index) {
panic!(
"Validator with index {:?} is disabled and should not be attempting to author blocks.",
authority_index,
);
}
// Extract out the VRF output if we have it
digest.vrf_output().and_then(|vrf_output| {
// Reconstruct the bytes of VRFInOut using the authority id.
Authorities::<T>::get()
.get(authority_index as usize)
.and_then(|author| schnorrkel::PublicKey::from_bytes(author.0.as_slice()).ok())
.and_then(|pubkey| {
let transcript = sp_consensus_babe::make_transcript(
&Self::randomness(),
current_slot,
EpochIndex::<T>::get(),
);
vrf_output.0.attach_input_hash(&pubkey, transcript).ok()
})
.map(|inout| inout.make_bytes(sp_consensus_babe::BABE_VRF_INOUT_CONTEXT))
})
});
// For primary VRF output we place it in the `Initialized` storage
// item and it'll be put onto the under-construction randomness later,
// once we've decided which epoch this block is in.
Initialized::<T>::put(if is_primary { maybe_randomness } else { None });
// Place either the primary or secondary VRF output into the
// `AuthorVrfRandomness` storage item.
AuthorVrfRandomness::<T>::put(maybe_randomness);
Initialized::<T>::put(pre_digest);
// enact epoch change, if necessary.
T::EpochChangeTrigger::trigger::<T>(now)
T::EpochChangeTrigger::trigger::<T>(now);
}
/// Call this function exactly once when an epoch changes, to update the
@@ -762,17 +782,6 @@ impl<T: Config> Pallet<T> {
this_randomness
}
fn initialize_authorities(authorities: &[(AuthorityId, BabeAuthorityWeight)]) {
if !authorities.is_empty() {
assert!(Authorities::<T>::get().is_empty(), "Authorities are already initialized!");
let bounded_authorities =
WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec())
.expect("Initial number of authorities should be lower than T::MaxAuthorities");
Authorities::<T>::put(&bounded_authorities);
NextAuthorities::<T>::put(&bounded_authorities);
}
}
fn do_report_equivocation(
reporter: Option<T::AccountId>,
equivocation_proof: EquivocationProof<T::Header>,
@@ -891,7 +900,7 @@ impl<T: Config> OneSessionHandler<T::AccountId> for Pallet<T> {
I: Iterator<Item = (&'a T::AccountId, AuthorityId)>,
{
let authorities = validators.map(|(_, k)| (k, 1)).collect::<Vec<_>>();
Self::initialize_authorities(&authorities);
Self::initialize_genesis_authorities(&authorities);
}
fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I)
+37 -13
View File
@@ -22,7 +22,7 @@ use super::{
AuthorVrfRandomness, Config, EpochStart, NextRandomness, Randomness, VRF_OUTPUT_LENGTH,
};
use frame_support::traits::Randomness as RandomnessT;
use sp_runtime::traits::Hash;
use sp_runtime::traits::{Hash, One, Saturating};
/// Randomness usable by consensus protocols that **depend** upon finality and take action
/// based upon on-chain commitments made during the epoch before the previous epoch.
@@ -38,7 +38,7 @@ use sp_runtime::traits::Hash;
///
/// All input commitments used with `RandomnessFromTwoEpochsAgo` should come from at least
/// three epochs ago. We require BABE session keys be registered at least three epochs
/// before being used to derive `CurrentBlockRandomness` for example.
/// before being used to derive `ParentBlockRandomness` for example.
///
/// All users learn `RandomnessFromTwoEpochsAgo` when epoch `current_epoch - 1` starts,
/// although some learn it a few block earlier inside epoch `current_epoch - 2`.
@@ -76,7 +76,7 @@ pub struct RandomnessFromTwoEpochsAgo<T>(sp_std::marker::PhantomData<T>);
/// end of every epoch, but they possess some influence over when they possess more slots.
///
/// As an example usage, we determine parachain auctions ending times in Polkadot using
/// `RandomnessFromOneEpochAgo` because it reduces bias from `CurrentBlockRandomness` and
/// `RandomnessFromOneEpochAgo` because it reduces bias from `ParentBlockRandomness` and
/// does not require the extra finality delay of `RandomnessFromTwoEpochsAgo`.
pub struct RandomnessFromOneEpochAgo<T>(sp_std::marker::PhantomData<T>);
@@ -89,28 +89,44 @@ pub struct RandomnessFromOneEpochAgo<T>(sp_std::marker::PhantomData<T>);
/// wins whatever game they play.
///
/// As with `RandomnessFromTwoEpochsAgo`, all input commitments combined with
/// `CurrentBlockRandomness` should come from at least two epoch ago, except preferably
/// `ParentBlockRandomness` should come from at least two epoch ago, except preferably
/// not near epoch ending, and thus ideally three epochs ago.
///
/// Almost all users learn this randomness for a block when the block producer announces
/// the block, which makes this randomness appear quite fresh. Yet, the block producer
/// Almost all users learn this randomness for a given block by the time they receive it's
/// parent block, which makes this randomness appear fresh enough. Yet, the block producer
/// themselves learned this randomness at the beginning of epoch `current_epoch - 2`, at
/// the same time as they learn `RandomnessFromTwoEpochsAgo`.
///
/// Aside from just biasing `RandomnessFromTwoEpochsAgo`, adversaries could also bias
/// `CurrentBlockRandomness` by never announcing their block if doing so yields an
/// unfavorable randomness. As such, `CurrentBlockRandomness` should be considered weaker
/// than both other randomness sources provided by BABE, but `CurrentBlockRandomness`
/// `ParentBlockRandomness` by never announcing their block if doing so yields an
/// unfavorable randomness. As such, `ParentBlockRandomness` should be considered weaker
/// than both other randomness sources provided by BABE, but `ParentBlockRandomness`
/// remains constrained by declared staking, while a randomness source like block hash is
/// only constrained by adversaries' unknowable computational power.
///
/// As an example use, parachains could assign block production slots based upon the
/// `CurrentBlockRandomness` of their relay parent or relay parent's parent, provided the
/// `ParentBlockRandomness` of their relay parent or relay parent's parent, provided the
/// parachain registers collators but avoids censorship sensitive functionality like
/// slashing. Any parachain with slashing could operate BABE itself or perhaps better yet
/// a BABE-like approach that derives its `CurrentBlockRandomness`, and authorizes block
/// production, based upon the relay parent's `CurrentBlockRandomness` or more likely the
/// a BABE-like approach that derives its `ParentBlockRandomness`, and authorizes block
/// production, based upon the relay parent's `ParentBlockRandomness` or more likely the
/// relay parent's `RandomnessFromTwoEpochsAgo`.
///
/// NOTE: there is some nuance here regarding what is current and parent randomness. If
/// you are using this trait from within the runtime (i.e. as part of block execution)
/// then the randomness provided here will always be generated from the parent block. If
/// instead you are using this randomness externally, i.e. after block execution, then
/// this randomness will be provided by the "current" block (this stems from the fact that
/// we process VRF outputs on block execution finalization, i.e. `on_finalize`).
pub struct ParentBlockRandomness<T>(sp_std::marker::PhantomData<T>);
/// Randomness produced semi-freshly with each block, but inherits limitations of
/// `RandomnessFromTwoEpochsAgo` from which it derives.
///
/// See [`ParentBlockRandomness`].
#[deprecated(note = "Should not be relied upon for correctness, \
will not provide fresh randomness for the current block. \
Please use `ParentBlockRandomness` instead.")]
pub struct CurrentBlockRandomness<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> RandomnessT<T::Hash, T::BlockNumber> for RandomnessFromTwoEpochsAgo<T> {
@@ -133,7 +149,7 @@ impl<T: Config> RandomnessT<T::Hash, T::BlockNumber> for RandomnessFromOneEpochA
}
}
impl<T: Config> RandomnessT<Option<T::Hash>, T::BlockNumber> for CurrentBlockRandomness<T> {
impl<T: Config> RandomnessT<Option<T::Hash>, T::BlockNumber> for ParentBlockRandomness<T> {
fn random(subject: &[u8]) -> (Option<T::Hash>, T::BlockNumber) {
let random = AuthorVrfRandomness::<T>::get().map(|random| {
let mut subject = subject.to_vec();
@@ -143,6 +159,14 @@ impl<T: Config> RandomnessT<Option<T::Hash>, T::BlockNumber> for CurrentBlockRan
T::Hashing::hash(&subject[..])
});
(random, <frame_system::Pallet<T>>::block_number().saturating_sub(One::one()))
}
}
#[allow(deprecated)]
impl<T: Config> RandomnessT<Option<T::Hash>, T::BlockNumber> for CurrentBlockRandomness<T> {
fn random(subject: &[u8]) -> (Option<T::Hash>, T::BlockNumber) {
let (random, _) = ParentBlockRandomness::<T>::random(subject);
(random, <frame_system::Pallet<T>>::block_number())
}
}
+49 -25
View File
@@ -26,6 +26,7 @@ use frame_support::{
use mock::*;
use pallet_session::ShouldEndSession;
use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Slot};
use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof};
use sp_core::crypto::Pair;
const EMPTY_RANDOMNESS: [u8; 32] = [
@@ -76,11 +77,11 @@ fn first_block_epoch_zero_start() {
assert_eq!(Babe::genesis_slot(), genesis_slot);
assert_eq!(Babe::current_slot(), genesis_slot);
assert_eq!(Babe::epoch_index(), 0);
assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness));
Babe::on_finalize(1);
let header = System::finalize();
assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness));
assert_eq!(SegmentIndex::<Test>::get(), 0);
assert_eq!(UnderConstruction::<Test>::get(0), vec![vrf_randomness]);
assert_eq!(Babe::randomness(), [0; 32]);
@@ -105,46 +106,69 @@ fn first_block_epoch_zero_start() {
}
#[test]
fn author_vrf_output_for_primary() {
fn current_slot_is_processed_on_initialization() {
let (pairs, mut ext) = new_test_ext_with_pairs(1);
ext.execute_with(|| {
let genesis_slot = Slot::from(10);
let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]);
let primary_pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_output, vrf_proof);
let pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_output, vrf_proof);
System::reset_events();
System::initialize(&1, &Default::default(), &primary_pre_digest);
System::initialize(&1, &Default::default(), &pre_digest);
assert_eq!(Babe::current_slot(), Slot::from(0));
assert!(Babe::initialized().is_none());
Babe::do_initialize(1);
// current slot is updated on initialization
Babe::initialize(1);
assert_eq!(Babe::current_slot(), genesis_slot);
assert!(Babe::initialized().is_some());
// but author vrf randomness isn't
assert_eq!(Babe::author_vrf_randomness(), None);
// instead it is updated on block finalization
Babe::on_finalize(1);
assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness));
})
}
fn test_author_vrf_output<F>(make_pre_digest: F)
where
F: Fn(sp_consensus_babe::AuthorityIndex, Slot, VRFOutput, VRFProof) -> sp_runtime::Digest,
{
let (pairs, mut ext) = new_test_ext_with_pairs(1);
ext.execute_with(|| {
let genesis_slot = Slot::from(10);
let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]);
let pre_digest = make_pre_digest(0, genesis_slot, vrf_output, vrf_proof);
System::reset_events();
System::initialize(&1, &Default::default(), &pre_digest);
// author vrf randomness is not updated on initialization
Babe::initialize(1);
assert_eq!(Babe::author_vrf_randomness(), None);
// instead it is updated on block finalization to account for any
// epoch changes that might happen during the block
Babe::on_finalize(1);
assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness));
Babe::on_finalize(1);
// and it is kept after finalizing the block
System::finalize();
assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness));
})
}
#[test]
fn author_vrf_output_for_primary() {
test_author_vrf_output(make_primary_pre_digest);
}
#[test]
fn author_vrf_output_for_secondary_vrf() {
let (pairs, mut ext) = new_test_ext_with_pairs(1);
ext.execute_with(|| {
let genesis_slot = Slot::from(10);
let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]);
let secondary_vrf_pre_digest =
make_secondary_vrf_pre_digest(0, genesis_slot, vrf_output, vrf_proof);
System::reset_events();
System::initialize(&1, &Default::default(), &secondary_vrf_pre_digest);
Babe::do_initialize(1);
assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness));
Babe::on_finalize(1);
System::finalize();
assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness));
})
test_author_vrf_output(make_secondary_vrf_pre_digest);
}
#[test]
@@ -157,7 +181,7 @@ fn no_author_vrf_output_for_secondary_plain() {
System::initialize(&1, &Default::default(), &secondary_plain_pre_digest);
assert_eq!(Babe::author_vrf_randomness(), None);
Babe::do_initialize(1);
Babe::initialize(1);
assert_eq!(Babe::author_vrf_randomness(), None);
Babe::on_finalize(1);
@@ -22,13 +22,14 @@ use super::{
BabeEpochConfiguration, Slot, BABE_ENGINE_ID,
};
use codec::{Decode, Encode, MaxEncodedLen};
use scale_info::TypeInfo;
use sp_runtime::{DigestItem, RuntimeDebug};
use sp_std::vec::Vec;
use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof};
/// Raw BABE primary slot assignment pre-digest.
#[derive(Clone, RuntimeDebug, Encode, Decode)]
#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)]
pub struct PrimaryPreDigest {
/// Authority index
pub authority_index: super::AuthorityIndex,
@@ -41,7 +42,7 @@ pub struct PrimaryPreDigest {
}
/// BABE secondary slot assignment pre-digest.
#[derive(Clone, RuntimeDebug, Encode, Decode)]
#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)]
pub struct SecondaryPlainPreDigest {
/// Authority index
///
@@ -55,7 +56,7 @@ pub struct SecondaryPlainPreDigest {
}
/// BABE secondary deterministic slot assignment with VRF outputs.
#[derive(Clone, RuntimeDebug, Encode, Decode)]
#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)]
pub struct SecondaryVRFPreDigest {
/// Authority index
pub authority_index: super::AuthorityIndex,
@@ -70,7 +71,7 @@ pub struct SecondaryVRFPreDigest {
/// A BABE pre-runtime digest. This contains all data required to validate a
/// block and for the BABE runtime module. Slots can be assigned to a primary
/// (VRF based) and to a secondary (slot number based).
#[derive(Clone, RuntimeDebug, Encode, Decode)]
#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)]
pub enum PreDigest {
/// A primary VRF-based slot assignment.
#[codec(index = 1)]
@@ -102,6 +103,11 @@ impl PreDigest {
}
}
/// Returns true if this pre-digest is for a primary slot assignment.
pub fn is_primary(&self) -> bool {
matches!(self, PreDigest::Primary(..))
}
/// Returns the weight _added_ by this digest, not the cumulative weight
/// of the chain.
pub fn added_weight(&self) -> crate::BabeBlockWeight {
@@ -111,11 +117,12 @@ impl PreDigest {
}
}
/// Returns the VRF output, if it exists.
pub fn vrf_output(&self) -> Option<&VRFOutput> {
/// Returns the VRF output and proof, if they exist.
pub fn vrf(&self) -> Option<(&VRFOutput, &VRFProof)> {
match self {
PreDigest::Primary(primary) => Some(&primary.vrf_output),
PreDigest::SecondaryVRF(secondary) => Some(&secondary.vrf_output),
PreDigest::Primary(primary) => Some((&primary.vrf_output, &primary.vrf_proof)),
PreDigest::SecondaryVRF(secondary) =>
Some((&secondary.vrf_output, &secondary.vrf_proof)),
PreDigest::SecondaryPlain(_) => None,
}
}
@@ -14,6 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false }
scale-info = { version = "2.0.1", default-features = false }
schnorrkel = { version = "0.9.1", default-features = false, features = ["preaudit_deprecated", "u64_backend"] }
sp-core = { version = "6.0.0", default-features = false, path = "../../core" }
sp-runtime = { version = "6.0.0", default-features = false, path = "../../runtime" }
@@ -23,6 +24,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../std" }
default = ["std"]
std = [
"codec/std",
"scale-info/std",
"schnorrkel/std",
"sp-core/std",
"sp-runtime/std",
@@ -17,7 +17,8 @@
//! Schnorrkel-based VRF.
use codec::{Decode, Encode, EncodeLike};
use codec::{Decode, Encode, EncodeLike, MaxEncodedLen};
use scale_info::TypeInfo;
use schnorrkel::errors::MultiSignatureStage;
use sp_core::U512;
use sp_std::{
@@ -65,6 +66,20 @@ impl Decode for VRFOutput {
}
}
impl MaxEncodedLen for VRFOutput {
fn max_encoded_len() -> usize {
<[u8; VRF_OUTPUT_LENGTH]>::max_encoded_len()
}
}
impl TypeInfo for VRFOutput {
type Identity = [u8; VRF_OUTPUT_LENGTH];
fn type_info() -> scale_info::Type {
Self::Identity::type_info()
}
}
impl TryFrom<[u8; VRF_OUTPUT_LENGTH]> for VRFOutput {
type Error = SignatureError;
@@ -117,6 +132,20 @@ impl Decode for VRFProof {
}
}
impl MaxEncodedLen for VRFProof {
fn max_encoded_len() -> usize {
<[u8; VRF_PROOF_LENGTH]>::max_encoded_len()
}
}
impl TypeInfo for VRFProof {
type Identity = [u8; VRF_PROOF_LENGTH];
fn type_info() -> scale_info::Type {
Self::Identity::type_info()
}
}
impl TryFrom<[u8; VRF_PROOF_LENGTH]> for VRFProof {
type Error = SignatureError;