feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit e4778b4576
6838 changed files with 1847450 additions and 0 deletions
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,40 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec};
use pezkuwi_node_primitives::approval::{
v1::{AssignmentCert, AssignmentCertKind, VrfProof, VrfSignature, RELAY_VRF_MODULO_CONTEXT},
v2::VrfPreOutput,
};
pub fn make_bitvec(len: usize) -> BitVec<u8, BitOrderLsb0> {
bitvec::bitvec![u8, BitOrderLsb0; 0; len]
}
pub fn dummy_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert {
let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT);
let msg = b"test-garbage";
let mut prng = rand_core::OsRng;
let keypair = schnorrkel::Keypair::generate_with(&mut prng);
let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg));
let preout = inout.to_preout();
AssignmentCert {
kind,
vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) },
}
}
@@ -0,0 +1,293 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Common helper functions for all versions of approval-voting database.
use std::sync::Arc;
use codec::{Decode, Encode};
use pezkuwi_node_subsystem::{SubsystemError, SubsystemResult};
use pezkuwi_node_subsystem_util::database::{DBTransaction, Database};
use pezkuwi_primitives::{BlockNumber, CandidateHash, CandidateIndex, Hash};
use crate::{
backend::{Backend, BackendWriteOp, V1ReadBackend, V2ReadBackend},
persisted_entries,
};
use super::{
v2::{load_block_entry_v1, load_candidate_entry_v1},
v3::{load_block_entry_v2, load_candidate_entry_v2, BlockEntry, CandidateEntry},
};
pub mod migration_helpers;
const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks";
/// A range from earliest..last block number stored within the DB.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber);
/// The database config.
#[derive(Debug, Clone, Copy)]
pub struct Config {
/// The column family in the database where data is stored.
pub col_approval_data: u32,
}
/// `DbBackend` is a concrete implementation of the higher-level Backend trait
pub struct DbBackend {
inner: Arc<dyn Database>,
config: Config,
}
impl DbBackend {
/// Create a new [`DbBackend`] with the supplied key-value store and
/// config.
pub fn new(db: Arc<dyn Database>, config: Config) -> Self {
DbBackend { inner: db, config }
}
}
/// Errors while accessing things from the DB.
#[derive(Debug, derive_more::From, derive_more::Display)]
pub enum Error {
Io(std::io::Error),
InvalidDecoding(codec::Error),
InternalError(SubsystemError),
}
impl std::error::Error for Error {}
/// Result alias for DB errors.
pub type Result<T> = std::result::Result<T, Error>;
impl Backend for DbBackend {
fn load_block_entry(
&self,
block_hash: &Hash,
) -> SubsystemResult<Option<persisted_entries::BlockEntry>> {
load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into))
}
fn load_candidate_entry(
&self,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<persisted_entries::CandidateEntry>> {
load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into))
}
fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult<Vec<Hash>> {
load_blocks_at_height(&*self.inner, &self.config, block_height)
}
fn load_all_blocks(&self) -> SubsystemResult<Vec<Hash>> {
load_all_blocks(&*self.inner, &self.config)
}
fn load_stored_blocks(&self) -> SubsystemResult<Option<StoredBlockRange>> {
load_stored_blocks(&*self.inner, &self.config)
}
/// Atomically write the list of operations, with later operations taking precedence over prior.
fn write<I>(&mut self, ops: I) -> SubsystemResult<()>
where
I: IntoIterator<Item = BackendWriteOp>,
{
let mut tx = DBTransaction::new();
for op in ops {
match op {
BackendWriteOp::WriteStoredBlockRange(stored_block_range) => {
tx.put_vec(
self.config.col_approval_data,
&STORED_BLOCKS_KEY,
stored_block_range.encode(),
);
},
BackendWriteOp::DeleteStoredBlockRange => {
tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY);
},
BackendWriteOp::WriteBlocksAtHeight(h, blocks) => {
tx.put_vec(
self.config.col_approval_data,
&blocks_at_height_key(h),
blocks.encode(),
);
},
BackendWriteOp::DeleteBlocksAtHeight(h) => {
tx.delete(self.config.col_approval_data, &blocks_at_height_key(h));
},
BackendWriteOp::WriteBlockEntry(block_entry) => {
let block_entry: BlockEntry = block_entry.into();
tx.put_vec(
self.config.col_approval_data,
&block_entry_key(&block_entry.block_hash),
block_entry.encode(),
);
},
BackendWriteOp::DeleteBlockEntry(hash) => {
tx.delete(self.config.col_approval_data, &block_entry_key(&hash));
},
BackendWriteOp::WriteCandidateEntry(candidate_entry) => {
let candidate_entry: CandidateEntry = candidate_entry.into();
tx.put_vec(
self.config.col_approval_data,
&candidate_entry_key(&candidate_entry.candidate.hash()),
candidate_entry.encode(),
);
},
BackendWriteOp::DeleteCandidateEntry(candidate_hash) => {
tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash));
},
}
}
self.inner.write(tx).map_err(|e| e.into())
}
}
impl V1ReadBackend for DbBackend {
fn load_candidate_entry_v1(
&self,
candidate_hash: &CandidateHash,
candidate_index: CandidateIndex,
) -> SubsystemResult<Option<persisted_entries::CandidateEntry>> {
load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash)
.map(|e| e.map(|e| persisted_entries::CandidateEntry::from_v1(e, candidate_index)))
}
fn load_block_entry_v1(
&self,
block_hash: &Hash,
) -> SubsystemResult<Option<persisted_entries::BlockEntry>> {
load_block_entry_v1(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into))
}
}
impl V2ReadBackend for DbBackend {
fn load_candidate_entry_v2(
&self,
candidate_hash: &CandidateHash,
candidate_index: CandidateIndex,
) -> SubsystemResult<Option<persisted_entries::CandidateEntry>> {
load_candidate_entry_v2(&*self.inner, &self.config, candidate_hash)
.map(|e| e.map(|e| persisted_entries::CandidateEntry::from_v2(e, candidate_index)))
}
fn load_block_entry_v2(
&self,
block_hash: &Hash,
) -> SubsystemResult<Option<persisted_entries::BlockEntry>> {
load_block_entry_v2(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into))
}
}
pub(crate) fn load_decode<D: Decode>(
store: &dyn Database,
col_approval_data: u32,
key: &[u8],
) -> Result<Option<D>> {
match store.get(col_approval_data, key)? {
None => Ok(None),
Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into),
}
}
/// The key a given block entry is stored under.
pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] {
const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck";
let mut key = [0u8; 14 + 32];
key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX);
key[14..][..32].copy_from_slice(block_hash.as_ref());
key
}
/// The key a given candidate entry is stored under.
pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] {
const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand";
let mut key = [0u8; 14 + 32];
key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX);
key[14..][..32].copy_from_slice(candidate_hash.0.as_ref());
key
}
/// The key a set of block hashes corresponding to a block number is stored under.
pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] {
const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at";
let mut key = [0u8; 12 + 4];
key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX);
block_number.using_encoded(|s| key[12..16].copy_from_slice(s));
key
}
/// Return all blocks which have entries in the DB, ascending, by height.
pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult<Vec<Hash>> {
let mut hashes = Vec::new();
if let Some(stored_blocks) = load_stored_blocks(store, config)? {
for height in stored_blocks.0..stored_blocks.1 {
let blocks = load_blocks_at_height(store, config, &height)?;
hashes.extend(blocks);
}
}
Ok(hashes)
}
/// Load the stored-blocks key from the state.
pub fn load_stored_blocks(
store: &dyn Database,
config: &Config,
) -> SubsystemResult<Option<StoredBlockRange>> {
load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY)
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
/// Load a blocks-at-height entry for a given block number.
pub fn load_blocks_at_height(
store: &dyn Database,
config: &Config,
block_number: &BlockNumber,
) -> SubsystemResult<Vec<Hash>> {
load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number))
.map(|x| x.unwrap_or_default())
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
/// Load a block entry from the aux store.
pub fn load_block_entry(
store: &dyn Database,
config: &Config,
block_hash: &Hash,
) -> SubsystemResult<Option<BlockEntry>> {
load_decode(store, config.col_approval_data, &block_entry_key(block_hash))
.map(|u: Option<BlockEntry>| u.map(|v| v.into()))
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
/// Load a candidate entry from the aux store in current version format.
pub fn load_candidate_entry(
store: &dyn Database,
config: &Config,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<CandidateEntry>> {
load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash))
.map(|u: Option<CandidateEntry>| u.map(|v| v.into()))
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
@@ -0,0 +1,36 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Approval DB accessors and writers for on-disk persisted approval storage
//! data.
//!
//! We persist data to disk although it is not intended to be used across runs of the
//! program. This is because under medium to long periods of finality stalling, for whatever
//! reason that may be, the amount of data we'd need to keep would be potentially too large
//! for memory.
//!
//! With tens or hundreds of teyrchains, hundreds of validators, and parablocks
//! in every relay chain block, there can be a humongous amount of information to reference
//! at any given time.
//!
//! As such, we provide a function from this module to clear the database on start-up.
//! In the future, we may use a temporary DB which doesn't need to be wiped, but for the
//! time being we share the same DB with the rest of Substrate.
pub mod common;
pub mod v1;
pub mod v2;
pub mod v3;
@@ -0,0 +1,91 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Version 1 of the DB schema.
//!
//! Note that the version here differs from the actual version of the teyrchains
//! database (check `CURRENT_VERSION` in `node/service/src/teyrchains_db/upgrade.rs`).
//! The code in this module implements the way approval voting works with
//! its data in the database. Any breaking changes here will still
//! require a db migration (check `node/service/src/teyrchains_db/upgrade.rs`).
use codec::{Decode, Encode};
use pezkuwi_node_primitives::approval::v1::{AssignmentCert, DelayTranche};
use pezkuwi_primitives::{
BlockNumber, CandidateHash, CandidateReceiptV2 as CandidateReceipt, CoreIndex, GroupIndex,
Hash, SessionIndex, ValidatorIndex, ValidatorSignature,
};
use sp_consensus_slots::Slot;
use std::collections::BTreeMap;
use super::v2::Bitfield;
/// Details pertaining to our assignment on a block.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct OurAssignment {
pub cert: AssignmentCert,
pub tranche: DelayTranche,
pub validator_index: ValidatorIndex,
// Whether the assignment has been triggered already.
pub triggered: bool,
}
use super::v2::TrancheEntry;
/// Metadata regarding approval of a particular candidate within the context of some
/// particular block.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct ApprovalEntry {
pub tranches: Vec<TrancheEntry>,
pub backing_group: GroupIndex,
pub our_assignment: Option<OurAssignment>,
pub our_approval_sig: Option<ValidatorSignature>,
// `n_validators` bits.
pub assignments: Bitfield,
pub approved: bool,
}
/// Metadata regarding approval of a particular candidate.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct CandidateEntry {
pub candidate: CandidateReceipt,
pub session: SessionIndex,
// Assignments are based on blocks, so we need to track assignments separately
// based on the block we are looking at.
pub block_assignments: BTreeMap<Hash, ApprovalEntry>,
pub approvals: Bitfield,
}
/// Metadata regarding approval of a particular block, by way of approval of the
/// candidates contained within it.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct BlockEntry {
pub block_hash: Hash,
pub block_number: BlockNumber,
pub parent_hash: Hash,
pub session: SessionIndex,
pub slot: Slot,
/// Random bytes derived from the VRF submitted within the block by the block
/// author as a credential and used as input to approval assignment criteria.
pub relay_vrf_story: [u8; 32],
// The candidates included as-of this block and the index of the core they are
// leaving. Sorted ascending by core index.
pub candidates: Vec<(CoreIndex, CandidateHash)>,
// A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`.
// The i'th bit is `true` iff the candidate has been approved in the context of this
// block. The block can be considered approved if the bitfield has all bits set to `true`.
pub approved_bitfield: Bitfield,
pub children: Vec<Hash>,
}
@@ -0,0 +1,567 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Tests for the aux-schema of approval voting.
use super::{DbBackend, StoredBlockRange, *};
use crate::{
backend::{Backend, OverlayedBackend},
ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo},
};
use pezkuwi_node_subsystem_util::database::Database;
use pezkuwi_primitives::Id as ParaId;
use std::{collections::HashMap, sync::Arc};
use pezkuwi_primitives_test_helpers::{
dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash,
};
const DATA_COL: u32 = 0;
const NUM_COLUMNS: u32 = 1;
const TEST_CONFIG: Config = Config { col_approval_data: DATA_COL };
fn make_db() -> (DbBackend, Arc<dyn Database>) {
let db = kvdb_memorydb::create(NUM_COLUMNS);
let db = pezkuwi_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]);
let db_writer: Arc<dyn Database> = Arc::new(db);
(DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer)
}
fn make_block_entry(
block_hash: Hash,
parent_hash: Hash,
block_number: BlockNumber,
candidates: Vec<(CoreIndex, CandidateHash)>,
) -> BlockEntry {
BlockEntry {
block_hash,
parent_hash,
block_number,
session: 1,
slot: Slot::from(1),
relay_vrf_story: [0u8; 32],
approved_bitfield: make_bitvec(candidates.len()),
candidates,
children: Vec::new(),
}
}
fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt {
let mut c = dummy_candidate_receipt(dummy_hash());
c.descriptor.para_id = para_id;
c.descriptor.relay_parent = relay_parent;
c
}
#[test]
fn read_write() {
let (mut db, store) = make_db();
let hash_a = Hash::repeat_byte(1);
let hash_b = Hash::repeat_byte(2);
let candidate_hash = dummy_candidate_receipt_bad_sig(dummy_hash(), None).hash();
let range = StoredBlockRange(10, 20);
let at_height = vec![hash_a, hash_b];
let block_entry =
make_block_entry(hash_a, Default::default(), 1, vec![(CoreIndex(0), candidate_hash)]);
let candidate_entry = CandidateEntry {
candidate: dummy_candidate_receipt_bad_sig(dummy_hash(), None),
session: 5,
block_assignments: vec![(
hash_a,
ApprovalEntry {
tranches: Vec::new(),
backing_group: GroupIndex(1),
our_assignment: None,
our_approval_sig: None,
assignments: Default::default(),
approved: false,
},
)]
.into_iter()
.collect(),
approvals: Default::default(),
};
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(range.clone());
overlay_db.write_blocks_at_height(1, at_height.clone());
overlay_db.write_block_entry(block_entry.clone().into());
overlay_db.write_candidate_entry(candidate_entry.clone().into());
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), Some(range));
assert_eq!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap(), at_height);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(),
Some(block_entry.into())
);
assert_eq!(
load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(),
Some(candidate_entry.into()),
);
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.delete_blocks_at_height(1);
overlay_db.delete_block_entry(&hash_a);
overlay_db.delete_candidate_entry(&candidate_hash);
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap().is_empty());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none());
assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash)
.unwrap()
.is_none());
}
#[test]
fn add_block_entry_works() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let candidate_receipt_a = make_candidate(ParaId::from(1_u32), parent_hash);
let candidate_receipt_b = make_candidate(ParaId::from(2_u32), parent_hash);
let candidate_hash_a = candidate_receipt_a.hash();
let candidate_hash_b = candidate_receipt_b.hash();
let block_number = 10;
let block_entry_a = make_block_entry(
block_hash_a,
parent_hash,
block_number,
vec![(CoreIndex(0), candidate_hash_a)],
);
let block_entry_b = make_block_entry(
block_hash_b,
parent_hash,
block_number,
vec![(CoreIndex(0), candidate_hash_a), (CoreIndex(1), candidate_hash_b)],
);
let n_validators = 10;
let mut new_candidate_info = HashMap::new();
new_candidate_info
.insert(candidate_hash_a, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(0), None));
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |h| {
new_candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
new_candidate_info
.insert(candidate_hash_b, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(1), None));
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |h| {
new_candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(),
Some(block_entry_a.into())
);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(),
Some(block_entry_b.into())
);
let candidate_entry_a = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_a)
.unwrap()
.unwrap();
assert_eq!(
candidate_entry_a.block_assignments.keys().collect::<Vec<_>>(),
vec![&block_hash_a, &block_hash_b]
);
let candidate_entry_b = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_b)
.unwrap()
.unwrap();
assert_eq!(candidate_entry_b.block_assignments.keys().collect::<Vec<_>>(), vec![&block_hash_b]);
}
#[test]
fn add_block_entry_adds_child() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let mut block_entry_a = make_block_entry(block_hash_a, parent_hash, 1, Vec::new());
let block_entry_b = make_block_entry(block_hash_b, block_hash_a, 2, Vec::new());
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap();
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
block_entry_a.children.push(block_hash_b);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(),
Some(block_entry_a.into())
);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(),
Some(block_entry_b.into())
);
}
#[test]
fn canonicalize_works() {
let (mut db, store) = make_db();
// -> B1 -> C1 -> D1
// A -> B2 -> C2 -> D2
//
// We'll canonicalize C1. Everything except D1 should disappear.
//
// Candidates:
// Cand1 in B2
// Cand2 in C2
// Cand3 in C2 and D1
// Cand4 in D1
// Cand5 in D2
// Only Cand3 and Cand4 should remain after canonicalize.
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(StoredBlockRange(1, 5));
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let genesis = Hash::repeat_byte(0);
let block_hash_a = Hash::repeat_byte(1);
let block_hash_b1 = Hash::repeat_byte(2);
let block_hash_b2 = Hash::repeat_byte(3);
let block_hash_c1 = Hash::repeat_byte(4);
let block_hash_c2 = Hash::repeat_byte(5);
let block_hash_d1 = Hash::repeat_byte(6);
let block_hash_d2 = Hash::repeat_byte(7);
let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis);
let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a);
let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a);
let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1);
let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1);
let cand_hash_1 = candidate_receipt_genesis.hash();
let cand_hash_2 = candidate_receipt_a.hash();
let cand_hash_3 = candidate_receipt_b.hash();
let cand_hash_4 = candidate_receipt_b1.hash();
let cand_hash_5 = candidate_receipt_c1.hash();
let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new());
let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new());
let block_entry_b2 =
make_block_entry(block_hash_b2, block_hash_a, 2, vec![(CoreIndex(0), cand_hash_1)]);
let block_entry_c1 = make_block_entry(block_hash_c1, block_hash_b1, 3, Vec::new());
let block_entry_c2 = make_block_entry(
block_hash_c2,
block_hash_b2,
3,
vec![(CoreIndex(0), cand_hash_2), (CoreIndex(1), cand_hash_3)],
);
let block_entry_d1 = make_block_entry(
block_hash_d1,
block_hash_c1,
4,
vec![(CoreIndex(0), cand_hash_3), (CoreIndex(1), cand_hash_4)],
);
let block_entry_d2 =
make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]);
let candidate_info = {
let mut candidate_info = HashMap::new();
candidate_info.insert(
cand_hash_1,
NewCandidateInfo::new(candidate_receipt_genesis, GroupIndex(1), None),
);
candidate_info
.insert(cand_hash_2, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(2), None));
candidate_info
.insert(cand_hash_3, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(3), None));
candidate_info
.insert(cand_hash_4, NewCandidateInfo::new(candidate_receipt_b1, GroupIndex(4), None));
candidate_info
.insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None));
candidate_info
};
// now insert all the blocks.
let blocks = vec![
block_entry_a.clone(),
block_entry_b1.clone(),
block_entry_b2.clone(),
block_entry_c1.clone(),
block_entry_c2.clone(),
block_entry_d1.clone(),
block_entry_d2.clone(),
];
let mut overlay_db = OverlayedBackend::new(&db);
for block_entry in blocks {
add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| {
candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
}
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let check_candidates_in_store = |expected: Vec<(CandidateHash, Option<Vec<_>>)>| {
for (c_hash, in_blocks) in expected {
let (entry, in_blocks) = match in_blocks {
None => {
assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash)
.unwrap()
.is_none());
continue
},
Some(i) => (
load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash).unwrap().unwrap(),
i,
),
};
assert_eq!(entry.block_assignments.len(), in_blocks.len());
for x in in_blocks {
assert!(entry.block_assignments.contains_key(&x));
}
}
};
let check_blocks_in_store = |expected: Vec<(Hash, Option<Vec<_>>)>| {
for (hash, with_candidates) in expected {
let (entry, with_candidates) = match with_candidates {
None => {
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash)
.unwrap()
.is_none());
continue
},
Some(i) =>
(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i),
};
assert_eq!(entry.candidates.len(), with_candidates.len());
for x in with_candidates {
assert!(entry.candidates.iter().any(|(_, c)| c == &x));
}
}
};
check_candidates_in_store(vec![
(cand_hash_1, Some(vec![block_hash_b2])),
(cand_hash_2, Some(vec![block_hash_c2])),
(cand_hash_3, Some(vec![block_hash_c2, block_hash_d1])),
(cand_hash_4, Some(vec![block_hash_d1])),
(cand_hash_5, Some(vec![block_hash_d2])),
]);
check_blocks_in_store(vec![
(block_hash_a, Some(vec![])),
(block_hash_b1, Some(vec![])),
(block_hash_b2, Some(vec![cand_hash_1])),
(block_hash_c1, Some(vec![])),
(block_hash_c2, Some(vec![cand_hash_2, cand_hash_3])),
(block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])),
(block_hash_d2, Some(vec![cand_hash_5])),
]);
let mut overlay_db = OverlayedBackend::new(&db);
canonicalize(&mut overlay_db, 3, block_hash_c1).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(),
StoredBlockRange(4, 5)
);
check_candidates_in_store(vec![
(cand_hash_1, None),
(cand_hash_2, None),
(cand_hash_3, Some(vec![block_hash_d1])),
(cand_hash_4, Some(vec![block_hash_d1])),
(cand_hash_5, None),
]);
check_blocks_in_store(vec![
(block_hash_a, None),
(block_hash_b1, None),
(block_hash_b2, None),
(block_hash_c1, None),
(block_hash_c2, None),
(block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])),
(block_hash_d2, None),
]);
}
#[test]
fn force_approve_works() {
let (mut db, store) = make_db();
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(StoredBlockRange(1, 4));
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let candidate_hash = CandidateHash(Hash::repeat_byte(42));
let single_candidate_vec = vec![(CoreIndex(0), candidate_hash)];
let candidate_info = {
let mut candidate_info = HashMap::new();
candidate_info.insert(
candidate_hash,
NewCandidateInfo::new(
make_candidate(ParaId::from(1_u32), Default::default()),
GroupIndex(1),
None,
),
);
candidate_info
};
let block_hash_a = Hash::repeat_byte(1); // 1
let block_hash_b = Hash::repeat_byte(2);
let block_hash_c = Hash::repeat_byte(3);
let block_hash_d = Hash::repeat_byte(4); // 4
let block_entry_a =
make_block_entry(block_hash_a, Default::default(), 1, single_candidate_vec.clone());
let block_entry_b =
make_block_entry(block_hash_b, block_hash_a, 2, single_candidate_vec.clone());
let block_entry_c =
make_block_entry(block_hash_c, block_hash_b, 3, single_candidate_vec.clone());
let block_entry_d =
make_block_entry(block_hash_d, block_hash_c, 4, single_candidate_vec.clone());
let blocks = vec![
block_entry_a.clone(),
block_entry_b.clone(),
block_entry_c.clone(),
block_entry_d.clone(),
];
let mut overlay_db = OverlayedBackend::new(&db);
for block_entry in blocks {
add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| {
candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
}
let approved_hashes = force_approve(&mut overlay_db, block_hash_d, 2).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a,)
.unwrap()
.unwrap()
.approved_bitfield
.all());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b,)
.unwrap()
.unwrap()
.approved_bitfield
.all());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_c,)
.unwrap()
.unwrap()
.approved_bitfield
.not_any());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_d,)
.unwrap()
.unwrap()
.approved_bitfield
.not_any());
assert_eq!(approved_hashes, vec![block_hash_b, block_hash_a]);
}
#[test]
fn load_all_blocks_works() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let block_hash_c = Hash::repeat_byte(42);
let block_number = 10;
let block_entry_a = make_block_entry(block_hash_a, parent_hash, block_number, vec![]);
let block_entry_b = make_block_entry(block_hash_b, parent_hash, block_number, vec![]);
let block_entry_c = make_block_entry(block_hash_c, block_hash_a, block_number + 1, vec![]);
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap();
// add C before B to test sorting.
add_block_entry(&mut overlay_db, block_entry_c.clone().into(), n_validators, |_| None).unwrap();
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_all_blocks(store.as_ref(), &TEST_CONFIG).unwrap(),
vec![block_hash_a, block_hash_b, block_hash_c],
)
}
@@ -0,0 +1,202 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Approval DB migration helpers.
use super::*;
use crate::{
approval_db::common::{
migration_helpers::{dummy_assignment_cert, make_bitvec},
Error, Result, StoredBlockRange,
},
backend::Backend,
};
use pezkuwi_node_primitives::approval::v1::AssignmentCertKind;
use pezkuwi_node_subsystem_util::database::Database;
use sp_application_crypto::sp_core::H256;
use std::{collections::HashSet, sync::Arc};
fn make_block_entry_v1(
block_hash: Hash,
parent_hash: Hash,
block_number: BlockNumber,
candidates: Vec<(CoreIndex, CandidateHash)>,
) -> crate::approval_db::v1::BlockEntry {
crate::approval_db::v1::BlockEntry {
block_hash,
parent_hash,
block_number,
session: 1,
slot: Slot::from(1),
relay_vrf_story: [0u8; 32],
approved_bitfield: make_bitvec(candidates.len()),
candidates,
children: Vec::new(),
}
}
/// Migrates `OurAssignment`, `CandidateEntry` and `ApprovalEntry` to version 2.
/// Returns on any error.
/// Must only be used in teyrchains DB migration code - `pezkuwi-service` crate.
pub fn v1_to_latest(db: Arc<dyn Database>, config: Config) -> Result<()> {
let mut backend = crate::DbBackend::new(db, config);
let all_blocks = backend
.load_all_blocks()
.map_err(|e| Error::InternalError(e))?
.iter()
.filter_map(|block_hash| {
backend
.load_block_entry_v1(block_hash)
.map_err(|e| Error::InternalError(e))
.ok()?
})
.collect::<Vec<_>>();
gum::info!(
target: crate::LOG_TARGET,
"Migrating candidate entries on top of {} blocks",
all_blocks.len()
);
let mut overlay = crate::OverlayedBackend::new(&backend);
let mut counter = 0;
// Get all candidate entries, approval entries and convert each of them.
for block in all_blocks {
for (candidate_index, (_core_index, candidate_hash)) in
block.candidates().iter().enumerate()
{
// Loading the candidate will also perform the conversion to the updated format and
// return that representation.
if let Some(candidate_entry) = backend
.load_candidate_entry_v1(&candidate_hash, candidate_index as CandidateIndex)
.map_err(|e| Error::InternalError(e))?
{
// Write the updated representation.
overlay.write_candidate_entry(candidate_entry);
counter += 1;
}
}
overlay.write_block_entry(block);
}
gum::info!(target: crate::LOG_TARGET, "Migrated {} entries", counter);
// Commit all changes to DB.
let write_ops = overlay.into_write_ops();
backend.write(write_ops).unwrap();
Ok(())
}
// Fills the db with dummy data in v1 scheme.
pub fn v1_fill_test_data<F>(
db: Arc<dyn Database>,
config: Config,
dummy_candidate_create: F,
) -> Result<HashSet<CandidateHash>>
where
F: Fn(H256) -> CandidateReceipt<H256>,
{
let mut backend = crate::DbBackend::new(db.clone(), config);
let mut overlay_db = crate::OverlayedBackend::new(&backend);
let mut expected_candidates = HashSet::new();
const RELAY_BLOCK_COUNT: u32 = 10;
let range = StoredBlockRange(1, 11);
overlay_db.write_stored_block_range(range.clone());
for relay_number in 1..=RELAY_BLOCK_COUNT {
let relay_hash = Hash::repeat_byte(relay_number as u8);
let assignment_core_index = CoreIndex(relay_number);
let candidate = dummy_candidate_create(relay_hash);
let candidate_hash = candidate.hash();
let at_height = vec![relay_hash];
let block_entry = make_block_entry_v1(
relay_hash,
Default::default(),
relay_number,
vec![(assignment_core_index, candidate_hash)],
);
let dummy_assignment = crate::approval_db::v1::OurAssignment {
cert: dummy_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }).into(),
tranche: 0,
validator_index: ValidatorIndex(0),
triggered: false,
};
let candidate_entry = crate::approval_db::v1::CandidateEntry {
candidate,
session: 123,
block_assignments: vec![(
relay_hash,
crate::approval_db::v1::ApprovalEntry {
tranches: Vec::new(),
backing_group: GroupIndex(1),
our_assignment: Some(dummy_assignment),
our_approval_sig: None,
assignments: Default::default(),
approved: false,
},
)]
.into_iter()
.collect(),
approvals: Default::default(),
};
overlay_db.write_blocks_at_height(relay_number, at_height.clone());
expected_candidates.insert(candidate_entry.candidate.hash());
db.write(write_candidate_entry_v1(candidate_entry, config)).unwrap();
db.write(write_block_entry_v1(block_entry, config)).unwrap();
}
let write_ops = overlay_db.into_write_ops();
backend.write(write_ops).unwrap();
Ok(expected_candidates)
}
// Low level DB helper to write a candidate entry in v1 scheme.
fn write_candidate_entry_v1(
candidate_entry: crate::approval_db::v1::CandidateEntry,
config: Config,
) -> DBTransaction {
let mut tx = DBTransaction::new();
tx.put_vec(
config.col_approval_data,
&candidate_entry_key(&candidate_entry.candidate.hash()),
candidate_entry.encode(),
);
tx
}
// Low level DB helper to write a block entry in v1 scheme.
fn write_block_entry_v1(
block_entry: crate::approval_db::v1::BlockEntry,
config: Config,
) -> DBTransaction {
let mut tx = DBTransaction::new();
tx.put_vec(
config.col_approval_data,
&block_entry_key(&block_entry.block_hash),
block_entry.encode(),
);
tx
}
@@ -0,0 +1,153 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Version 2 of the DB schema.
use codec::{Decode, Encode};
use pezkuwi_node_primitives::approval::{v1::DelayTranche, v2::AssignmentCertV2};
use pezkuwi_node_subsystem::{SubsystemError, SubsystemResult};
use pezkuwi_node_subsystem_util::database::{DBTransaction, Database};
use pezkuwi_primitives::{
BlockNumber, CandidateHash, CandidateIndex, CandidateReceiptV2 as CandidateReceipt, CoreIndex,
GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature,
};
use sp_consensus_slots::Slot;
use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec};
use std::collections::BTreeMap;
use crate::backend::V1ReadBackend;
use super::common::{block_entry_key, candidate_entry_key, load_decode, Config};
pub mod migration_helpers;
#[cfg(test)]
pub mod tests;
// slot_duration * 2 + DelayTranche gives the number of delay tranches since the
// unix epoch.
#[derive(Encode, Decode, Clone, Copy, Debug, PartialEq)]
pub struct Tick(u64);
/// Convenience type definition
pub type Bitfield = BitVec<u8, BitOrderLsb0>;
/// Details pertaining to our assignment on a block.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct OurAssignment {
/// Our assignment certificate.
pub cert: AssignmentCertV2,
/// The tranche for which the assignment refers to.
pub tranche: DelayTranche,
/// Our validator index for the session in which the candidates were included.
pub validator_index: ValidatorIndex,
/// Whether the assignment has been triggered already.
pub triggered: bool,
}
/// Metadata regarding a specific tranche of assignments for a specific candidate.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct TrancheEntry {
pub tranche: DelayTranche,
// Assigned validators, and the instant we received their assignment, rounded
// to the nearest tick.
pub assignments: Vec<(ValidatorIndex, Tick)>,
}
/// Metadata regarding approval of a particular candidate within the context of some
/// particular block.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct ApprovalEntry {
pub tranches: Vec<TrancheEntry>,
pub backing_group: GroupIndex,
pub our_assignment: Option<OurAssignment>,
pub our_approval_sig: Option<ValidatorSignature>,
// `n_validators` bits.
pub assigned_validators: Bitfield,
pub approved: bool,
}
/// Metadata regarding approval of a particular candidate.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct CandidateEntry {
pub candidate: CandidateReceipt,
pub session: SessionIndex,
// Assignments are based on blocks, so we need to track assignments separately
// based on the block we are looking at.
pub block_assignments: BTreeMap<Hash, ApprovalEntry>,
pub approvals: Bitfield,
}
/// Metadata regarding approval of a particular block, by way of approval of the
/// candidates contained within it.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct BlockEntry {
pub block_hash: Hash,
pub block_number: BlockNumber,
pub parent_hash: Hash,
pub session: SessionIndex,
pub slot: Slot,
/// Random bytes derived from the VRF submitted within the block by the block
/// author as a credential and used as input to approval assignment criteria.
pub relay_vrf_story: [u8; 32],
// The candidates included as-of this block and the index of the core they are
// leaving. Sorted ascending by core index.
pub candidates: Vec<(CoreIndex, CandidateHash)>,
// A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`.
// The i'th bit is `true` iff the candidate has been approved in the context of this
// block. The block can be considered approved if the bitfield has all bits set to `true`.
pub approved_bitfield: Bitfield,
pub children: Vec<Hash>,
// Assignments we already distributed. A 1 bit means the candidate index for which
// we already have sent out an assignment. We need this to avoid distributing
// multiple core assignments more than once.
pub distributed_assignments: Bitfield,
}
impl From<crate::Tick> for Tick {
fn from(tick: crate::Tick) -> Tick {
Tick(tick)
}
}
impl From<Tick> for crate::Tick {
fn from(tick: Tick) -> crate::Tick {
tick.0
}
}
/// Load a candidate entry from the aux store in v1 format.
pub fn load_candidate_entry_v1(
store: &dyn Database,
config: &Config,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<super::v1::CandidateEntry>> {
load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash))
.map(|u: Option<super::v1::CandidateEntry>| u.map(|v| v.into()))
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
/// Load a block entry from the aux store in v1 format.
pub fn load_block_entry_v1(
store: &dyn Database,
config: &Config,
block_hash: &Hash,
) -> SubsystemResult<Option<super::v1::BlockEntry>> {
load_decode(store, config.col_approval_data, &block_entry_key(block_hash))
.map(|u: Option<super::v1::BlockEntry>| u.map(|v| v.into()))
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
@@ -0,0 +1,586 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Tests for the aux-schema of approval voting.
use crate::{
approval_db::{
common::{migration_helpers::make_bitvec, DbBackend, StoredBlockRange, *},
v2::*,
v3::{load_block_entry_v2, load_candidate_entry_v2},
},
backend::{Backend, OverlayedBackend},
ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo},
};
use pezkuwi_primitives::{
BlockNumber, CandidateHash, CandidateReceiptV2 as CandidateReceipt, CoreIndex, GroupIndex,
Hash, MutateDescriptorV2,
};
use pezkuwi_node_subsystem_util::database::Database;
use pezkuwi_primitives::Id as ParaId;
use sp_consensus_slots::Slot;
use std::{collections::HashMap, sync::Arc};
use pezkuwi_primitives_test_helpers::{
dummy_candidate_receipt_bad_sig, dummy_candidate_receipt_v2,
dummy_candidate_receipt_v2_bad_sig, dummy_hash,
};
const DATA_COL: u32 = 0;
const NUM_COLUMNS: u32 = 1;
const TEST_CONFIG: Config = Config { col_approval_data: DATA_COL };
fn make_db() -> (DbBackend, Arc<dyn Database>) {
let db = kvdb_memorydb::create(NUM_COLUMNS);
let db = pezkuwi_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]);
let db_writer: Arc<dyn Database> = Arc::new(db);
(DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer)
}
fn make_block_entry(
block_hash: Hash,
parent_hash: Hash,
block_number: BlockNumber,
candidates: Vec<(CoreIndex, CandidateHash)>,
) -> BlockEntry {
BlockEntry {
block_hash,
parent_hash,
block_number,
session: 1,
slot: Slot::from(1),
relay_vrf_story: [0u8; 32],
approved_bitfield: make_bitvec(candidates.len()),
candidates,
children: Vec::new(),
distributed_assignments: Default::default(),
}
}
fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt {
let mut c = dummy_candidate_receipt_v2(dummy_hash());
c.descriptor.set_para_id(para_id);
c.descriptor.set_relay_parent(relay_parent);
c
}
#[test]
fn read_write() {
let (mut db, store) = make_db();
let hash_a = Hash::repeat_byte(1);
let hash_b = Hash::repeat_byte(2);
let candidate_hash = dummy_candidate_receipt_bad_sig(dummy_hash(), None).hash();
let range = StoredBlockRange(10, 20);
let at_height = vec![hash_a, hash_b];
let block_entry =
make_block_entry(hash_a, Default::default(), 1, vec![(CoreIndex(0), candidate_hash)]);
let candidate_entry = CandidateEntry {
candidate: dummy_candidate_receipt_v2_bad_sig(dummy_hash(), None),
session: 5,
block_assignments: vec![(
hash_a,
ApprovalEntry {
tranches: Vec::new(),
backing_group: GroupIndex(1),
our_assignment: None,
our_approval_sig: None,
assigned_validators: Default::default(),
approved: false,
},
)]
.into_iter()
.collect(),
approvals: Default::default(),
};
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(range.clone());
overlay_db.write_blocks_at_height(1, at_height.clone());
overlay_db.write_block_entry(block_entry.clone().into());
overlay_db.write_candidate_entry(crate::persisted_entries::CandidateEntry::from_v2(
candidate_entry.clone(),
0,
));
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), Some(range));
assert_eq!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap(), at_height);
assert_eq!(
load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(),
Some(block_entry.into())
);
assert_eq!(
load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(),
Some(candidate_entry.into()),
);
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.delete_blocks_at_height(1);
overlay_db.delete_block_entry(&hash_a);
overlay_db.delete_candidate_entry(&candidate_hash);
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap().is_empty());
assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none());
assert!(load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash)
.unwrap()
.is_none());
}
#[test]
fn add_block_entry_works() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let candidate_receipt_a = make_candidate(ParaId::from(1_u32), parent_hash);
let candidate_receipt_b = make_candidate(ParaId::from(2_u32), parent_hash);
let candidate_hash_a = candidate_receipt_a.hash();
let candidate_hash_b = candidate_receipt_b.hash();
let block_number = 10;
let block_entry_a = make_block_entry(
block_hash_a,
parent_hash,
block_number,
vec![(CoreIndex(0), candidate_hash_a)],
);
let block_entry_b = make_block_entry(
block_hash_b,
parent_hash,
block_number,
vec![(CoreIndex(0), candidate_hash_a), (CoreIndex(1), candidate_hash_b)],
);
let n_validators = 10;
let mut new_candidate_info = HashMap::new();
new_candidate_info
.insert(candidate_hash_a, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(0), None));
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |h| {
new_candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
new_candidate_info
.insert(candidate_hash_b, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(1), None));
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |h| {
new_candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(),
Some(block_entry_a.into())
);
assert_eq!(
load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(),
Some(block_entry_b.into())
);
let candidate_entry_a =
load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash_a)
.unwrap()
.unwrap();
assert_eq!(
candidate_entry_a.block_assignments.keys().collect::<Vec<_>>(),
vec![&block_hash_a, &block_hash_b]
);
let candidate_entry_b =
load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash_b)
.unwrap()
.unwrap();
assert_eq!(candidate_entry_b.block_assignments.keys().collect::<Vec<_>>(), vec![&block_hash_b]);
}
#[test]
fn add_block_entry_adds_child() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let mut block_entry_a = make_block_entry(block_hash_a, parent_hash, 1, Vec::new());
let block_entry_b = make_block_entry(block_hash_b, block_hash_a, 2, Vec::new());
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap();
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
block_entry_a.children.push(block_hash_b);
assert_eq!(
load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(),
Some(block_entry_a.into())
);
assert_eq!(
load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(),
Some(block_entry_b.into())
);
}
#[test]
fn canonicalize_works() {
let (mut db, store) = make_db();
// -> B1 -> C1 -> D1
// A -> B2 -> C2 -> D2
//
// We'll canonicalize C1. Everything except D1 should disappear.
//
// Candidates:
// Cand1 in B2
// Cand2 in C2
// Cand3 in C2 and D1
// Cand4 in D1
// Cand5 in D2
// Only Cand3 and Cand4 should remain after canonicalize.
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(StoredBlockRange(1, 5));
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let genesis = Hash::repeat_byte(0);
let block_hash_a = Hash::repeat_byte(1);
let block_hash_b1 = Hash::repeat_byte(2);
let block_hash_b2 = Hash::repeat_byte(3);
let block_hash_c1 = Hash::repeat_byte(4);
let block_hash_c2 = Hash::repeat_byte(5);
let block_hash_d1 = Hash::repeat_byte(6);
let block_hash_d2 = Hash::repeat_byte(7);
let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis);
let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a);
let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a);
let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1);
let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1);
let cand_hash_1 = candidate_receipt_genesis.hash();
let cand_hash_2 = candidate_receipt_a.hash();
let cand_hash_3 = candidate_receipt_b.hash();
let cand_hash_4 = candidate_receipt_b1.hash();
let cand_hash_5 = candidate_receipt_c1.hash();
let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new());
let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new());
let block_entry_b2 =
make_block_entry(block_hash_b2, block_hash_a, 2, vec![(CoreIndex(0), cand_hash_1)]);
let block_entry_c1 = make_block_entry(block_hash_c1, block_hash_b1, 3, Vec::new());
let block_entry_c2 = make_block_entry(
block_hash_c2,
block_hash_b2,
3,
vec![(CoreIndex(0), cand_hash_2), (CoreIndex(1), cand_hash_3)],
);
let block_entry_d1 = make_block_entry(
block_hash_d1,
block_hash_c1,
4,
vec![(CoreIndex(0), cand_hash_3), (CoreIndex(1), cand_hash_4)],
);
let block_entry_d2 =
make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]);
let candidate_info = {
let mut candidate_info = HashMap::new();
candidate_info.insert(
cand_hash_1,
NewCandidateInfo::new(candidate_receipt_genesis, GroupIndex(1), None),
);
candidate_info
.insert(cand_hash_2, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(2), None));
candidate_info
.insert(cand_hash_3, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(3), None));
candidate_info
.insert(cand_hash_4, NewCandidateInfo::new(candidate_receipt_b1, GroupIndex(4), None));
candidate_info
.insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None));
candidate_info
};
// now insert all the blocks.
let blocks = vec![
block_entry_a.clone(),
block_entry_b1.clone(),
block_entry_b2.clone(),
block_entry_c1.clone(),
block_entry_c2.clone(),
block_entry_d1.clone(),
block_entry_d2.clone(),
];
let mut overlay_db = OverlayedBackend::new(&db);
for block_entry in blocks {
add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| {
candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
}
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let check_candidates_in_store = |expected: Vec<(CandidateHash, Option<Vec<_>>)>| {
for (c_hash, in_blocks) in expected {
let (entry, in_blocks) = match in_blocks {
None => {
assert!(load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &c_hash)
.unwrap()
.is_none());
continue;
},
Some(i) => (
load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &c_hash)
.unwrap()
.unwrap(),
i,
),
};
assert_eq!(entry.block_assignments.len(), in_blocks.len());
for x in in_blocks {
assert!(entry.block_assignments.contains_key(&x));
}
}
};
let check_blocks_in_store = |expected: Vec<(Hash, Option<Vec<_>>)>| {
for (hash, with_candidates) in expected {
let (entry, with_candidates) = match with_candidates {
None => {
assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash)
.unwrap()
.is_none());
continue;
},
Some(i) =>
(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i),
};
assert_eq!(entry.candidates.len(), with_candidates.len());
for x in with_candidates {
assert!(entry.candidates.iter().any(|(_, c)| c == &x));
}
}
};
check_candidates_in_store(vec![
(cand_hash_1, Some(vec![block_hash_b2])),
(cand_hash_2, Some(vec![block_hash_c2])),
(cand_hash_3, Some(vec![block_hash_c2, block_hash_d1])),
(cand_hash_4, Some(vec![block_hash_d1])),
(cand_hash_5, Some(vec![block_hash_d2])),
]);
check_blocks_in_store(vec![
(block_hash_a, Some(vec![])),
(block_hash_b1, Some(vec![])),
(block_hash_b2, Some(vec![cand_hash_1])),
(block_hash_c1, Some(vec![])),
(block_hash_c2, Some(vec![cand_hash_2, cand_hash_3])),
(block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])),
(block_hash_d2, Some(vec![cand_hash_5])),
]);
let mut overlay_db = OverlayedBackend::new(&db);
canonicalize(&mut overlay_db, 3, block_hash_c1).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(),
StoredBlockRange(4, 5)
);
check_candidates_in_store(vec![
(cand_hash_1, None),
(cand_hash_2, None),
(cand_hash_3, Some(vec![block_hash_d1])),
(cand_hash_4, Some(vec![block_hash_d1])),
(cand_hash_5, None),
]);
check_blocks_in_store(vec![
(block_hash_a, None),
(block_hash_b1, None),
(block_hash_b2, None),
(block_hash_c1, None),
(block_hash_c2, None),
(block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])),
(block_hash_d2, None),
]);
}
#[test]
fn force_approve_works() {
let (mut db, store) = make_db();
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(StoredBlockRange(1, 4));
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let candidate_hash = CandidateHash(Hash::repeat_byte(42));
let single_candidate_vec = vec![(CoreIndex(0), candidate_hash)];
let candidate_info = {
let mut candidate_info = HashMap::new();
candidate_info.insert(
candidate_hash,
NewCandidateInfo::new(
make_candidate(ParaId::from(1_u32), Default::default()),
GroupIndex(1),
None,
),
);
candidate_info
};
let block_hash_a = Hash::repeat_byte(1); // 1
let block_hash_b = Hash::repeat_byte(2);
let block_hash_c = Hash::repeat_byte(3);
let block_hash_d = Hash::repeat_byte(4); // 4
let block_entry_a =
make_block_entry(block_hash_a, Default::default(), 1, single_candidate_vec.clone());
let block_entry_b =
make_block_entry(block_hash_b, block_hash_a, 2, single_candidate_vec.clone());
let block_entry_c =
make_block_entry(block_hash_c, block_hash_b, 3, single_candidate_vec.clone());
let block_entry_d =
make_block_entry(block_hash_d, block_hash_c, 4, single_candidate_vec.clone());
let blocks = vec![
block_entry_a.clone(),
block_entry_b.clone(),
block_entry_c.clone(),
block_entry_d.clone(),
];
let mut overlay_db = OverlayedBackend::new(&db);
for block_entry in blocks {
add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| {
candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
}
let approved_hashes = force_approve(&mut overlay_db, block_hash_d, 2).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_a,)
.unwrap()
.unwrap()
.approved_bitfield
.all());
assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_b,)
.unwrap()
.unwrap()
.approved_bitfield
.all());
assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_c,)
.unwrap()
.unwrap()
.approved_bitfield
.not_any());
assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_d,)
.unwrap()
.unwrap()
.approved_bitfield
.not_any());
assert_eq!(approved_hashes, vec![block_hash_b, block_hash_a]);
}
#[test]
fn load_all_blocks_works() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let block_hash_c = Hash::repeat_byte(42);
let block_number = 10;
let block_entry_a = make_block_entry(block_hash_a, parent_hash, block_number, vec![]);
let block_entry_b = make_block_entry(block_hash_b, parent_hash, block_number, vec![]);
let block_entry_c = make_block_entry(block_hash_c, block_hash_a, block_number + 1, vec![]);
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap();
// add C before B to test sorting.
add_block_entry(&mut overlay_db, block_entry_c.clone().into(), n_validators, |_| None).unwrap();
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_all_blocks(store.as_ref(), &TEST_CONFIG).unwrap(),
vec![block_hash_a, block_hash_b, block_hash_c],
)
}
@@ -0,0 +1,237 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Approval DB migration helpers.
use super::*;
use crate::{
approval_db::common::{
block_entry_key, candidate_entry_key,
migration_helpers::{dummy_assignment_cert, make_bitvec},
Config, Error, Result, StoredBlockRange,
},
backend::{Backend, V2ReadBackend},
};
use pezkuwi_node_primitives::approval::v1::AssignmentCertKind;
use pezkuwi_node_subsystem_util::database::Database;
use sp_application_crypto::sp_core::H256;
use std::{collections::HashSet, sync::Arc};
/// Migrates `BlockEntry`, `CandidateEntry`, `ApprovalEntry` and `OurApproval` to version 3.
/// Returns on any error.
/// Must only be used in teyrchains DB migration code - `pezkuwi-service` crate.
pub fn v2_to_latest(db: Arc<dyn Database>, config: Config) -> Result<()> {
let mut backend = crate::DbBackend::new(db, config);
let all_blocks = backend
.load_all_blocks()
.map_err(|e| Error::InternalError(e))?
.iter()
.filter_map(|block_hash| {
backend
.load_block_entry_v2(block_hash)
.map_err(|e| Error::InternalError(e))
.ok()?
})
.collect::<Vec<_>>();
gum::info!(
target: crate::LOG_TARGET,
"Migrating candidate entries on top of {} blocks",
all_blocks.len()
);
let mut overlay = crate::OverlayedBackend::new(&backend);
let mut counter = 0;
// Get all candidate entries, approval entries and convert each of them.
for block in all_blocks {
for (candidate_index, (_core_index, candidate_hash)) in
block.candidates().iter().enumerate()
{
// Loading the candidate will also perform the conversion to the updated format and
// return that representation.
if let Some(candidate_entry) = backend
.load_candidate_entry_v2(&candidate_hash, candidate_index as CandidateIndex)
.map_err(|e| Error::InternalError(e))?
{
// Write the updated representation.
overlay.write_candidate_entry(candidate_entry);
counter += 1;
}
}
overlay.write_block_entry(block);
}
gum::info!(target: crate::LOG_TARGET, "Migrated {} entries", counter);
// Commit all changes to DB.
let write_ops = overlay.into_write_ops();
backend.write(write_ops).unwrap();
Ok(())
}
// Checks if the migration doesn't leave the DB in an unsane state.
// This function is to be used in tests.
pub fn v1_to_latest_sanity_check(
db: Arc<dyn Database>,
config: Config,
expected_candidates: HashSet<CandidateHash>,
) -> Result<()> {
let backend = crate::DbBackend::new(db, config);
let all_blocks = backend
.load_all_blocks()
.unwrap()
.iter()
.map(|block_hash| backend.load_block_entry(block_hash).unwrap().unwrap())
.collect::<Vec<_>>();
let mut candidates = HashSet::new();
// Iterate all blocks and approval entries.
for block in all_blocks {
for (_core_index, candidate_hash) in block.candidates() {
// Loading the candidate will also perform the conversion to the updated format and
// return that representation.
if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() {
candidates.insert(candidate_entry.candidate.hash());
}
}
}
assert_eq!(candidates, expected_candidates);
Ok(())
}
// Fills the db with dummy data in v2 scheme.
pub fn v2_fill_test_data<F>(
db: Arc<dyn Database>,
config: Config,
dummy_candidate_create: F,
) -> Result<HashSet<CandidateHash>>
where
F: Fn(H256) -> CandidateReceipt<H256>,
{
let mut backend = crate::DbBackend::new(db.clone(), config);
let mut overlay_db = crate::OverlayedBackend::new(&backend);
let mut expected_candidates = HashSet::new();
const RELAY_BLOCK_COUNT: u32 = 10;
let range = StoredBlockRange(1, 11);
overlay_db.write_stored_block_range(range.clone());
for relay_number in 1..=RELAY_BLOCK_COUNT {
let relay_hash = Hash::repeat_byte(relay_number as u8);
let assignment_core_index = CoreIndex(relay_number);
let candidate = dummy_candidate_create(relay_hash);
let candidate_hash = candidate.hash();
let at_height = vec![relay_hash];
let block_entry = make_block_entry_v2(
relay_hash,
Default::default(),
relay_number,
vec![(assignment_core_index, candidate_hash)],
);
let dummy_assignment = crate::approval_db::v2::OurAssignment {
cert: dummy_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }).into(),
tranche: 0,
validator_index: ValidatorIndex(0),
triggered: false,
};
let candidate_entry = crate::approval_db::v2::CandidateEntry {
candidate,
session: 123,
block_assignments: vec![(
relay_hash,
crate::approval_db::v2::ApprovalEntry {
tranches: Vec::new(),
backing_group: GroupIndex(1),
our_assignment: Some(dummy_assignment),
our_approval_sig: None,
approved: false,
assigned_validators: make_bitvec(1),
},
)]
.into_iter()
.collect(),
approvals: Default::default(),
};
overlay_db.write_blocks_at_height(relay_number, at_height.clone());
expected_candidates.insert(candidate_entry.candidate.hash());
db.write(write_candidate_entry_v2(candidate_entry, config)).unwrap();
db.write(write_block_entry_v2(block_entry, config)).unwrap();
}
let write_ops = overlay_db.into_write_ops();
backend.write(write_ops).unwrap();
Ok(expected_candidates)
}
fn make_block_entry_v2(
block_hash: Hash,
parent_hash: Hash,
block_number: BlockNumber,
candidates: Vec<(CoreIndex, CandidateHash)>,
) -> crate::approval_db::v2::BlockEntry {
crate::approval_db::v2::BlockEntry {
block_hash,
parent_hash,
block_number,
session: 1,
slot: Slot::from(1),
relay_vrf_story: [0u8; 32],
approved_bitfield: make_bitvec(candidates.len()),
distributed_assignments: make_bitvec(candidates.len()),
candidates,
children: Vec::new(),
}
}
// Low level DB helper to write a candidate entry in v1 scheme.
fn write_candidate_entry_v2(
candidate_entry: crate::approval_db::v2::CandidateEntry,
config: Config,
) -> DBTransaction {
let mut tx = DBTransaction::new();
tx.put_vec(
config.col_approval_data,
&candidate_entry_key(&candidate_entry.candidate.hash()),
candidate_entry.encode(),
);
tx
}
// Low level DB helper to write a block entry in v1 scheme.
fn write_block_entry_v2(
block_entry: crate::approval_db::v2::BlockEntry,
config: Config,
) -> DBTransaction {
let mut tx = DBTransaction::new();
tx.put_vec(
config.col_approval_data,
&block_entry_key(&block_entry.block_hash),
block_entry.encode(),
);
tx
}
@@ -0,0 +1,137 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Version 3 of the DB schema.
//!
//! Version 3 modifies the `our_approval` format of `ApprovalEntry`
//! and adds a new field `pending_signatures` for `BlockEntry`
use codec::{Decode, Encode};
use pezkuwi_node_primitives::approval::v2::CandidateBitfield;
use pezkuwi_node_subsystem::SubsystemResult;
use pezkuwi_node_subsystem_util::database::{DBTransaction, Database};
use pezkuwi_overseer::SubsystemError;
use pezkuwi_primitives::{
BlockNumber, CandidateHash, CandidateIndex, CandidateReceiptV2 as CandidateReceipt, CoreIndex,
GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature,
};
use sp_consensus_slots::Slot;
use std::collections::BTreeMap;
use super::common::{block_entry_key, candidate_entry_key, load_decode, Config};
/// Re-export this structs as v3 since they did not change between v2 and v3.
pub use super::v2::{Bitfield, OurAssignment, Tick, TrancheEntry};
pub mod migration_helpers;
#[cfg(test)]
pub mod tests;
/// Metadata about our approval signature
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct OurApproval {
/// The signature for the candidates hashes pointed by indices.
pub signature: ValidatorSignature,
/// The indices of the candidates signed in this approval.
pub signed_candidates_indices: CandidateBitfield,
}
/// Metadata regarding approval of a particular candidate within the context of some
/// particular block.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct ApprovalEntry {
pub tranches: Vec<TrancheEntry>,
pub backing_group: GroupIndex,
pub our_assignment: Option<OurAssignment>,
pub our_approval_sig: Option<OurApproval>,
// `n_validators` bits.
pub assigned_validators: Bitfield,
pub approved: bool,
}
/// Metadata regarding approval of a particular candidate.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct CandidateEntry {
pub candidate: CandidateReceipt,
pub session: SessionIndex,
// Assignments are based on blocks, so we need to track assignments separately
// based on the block we are looking at.
pub block_assignments: BTreeMap<Hash, ApprovalEntry>,
pub approvals: Bitfield,
}
/// Metadata regarding approval of a particular block, by way of approval of the
/// candidates contained within it.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct BlockEntry {
pub block_hash: Hash,
pub block_number: BlockNumber,
pub parent_hash: Hash,
pub session: SessionIndex,
pub slot: Slot,
/// Random bytes derived from the VRF submitted within the block by the block
/// author as a credential and used as input to approval assignment criteria.
pub relay_vrf_story: [u8; 32],
// The candidates included as-of this block and the index of the core they are
// leaving. Sorted ascending by core index.
pub candidates: Vec<(CoreIndex, CandidateHash)>,
// A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`.
// The i'th bit is `true` iff the candidate has been approved in the context of this
// block. The block can be considered approved if the bitfield has all bits set to `true`.
pub approved_bitfield: Bitfield,
pub children: Vec<Hash>,
// A list of candidates we have checked, but didn't not sign and
// advertise the vote yet.
pub candidates_pending_signature: BTreeMap<CandidateIndex, CandidateSigningContext>,
// Assignments we already distributed. A 1 bit means the candidate index for which
// we already have sent out an assignment. We need this to avoid distributing
// multiple core assignments more than once.
pub distributed_assignments: Bitfield,
}
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
/// Context needed for creating an approval signature for a given candidate.
pub struct CandidateSigningContext {
/// The candidate hash, to be included in the signature.
pub candidate_hash: CandidateHash,
/// The latest tick we have to create and send the approval.
pub sign_no_later_than_tick: Tick,
}
/// Load a candidate entry from the aux store in v2 format.
pub fn load_candidate_entry_v2(
store: &dyn Database,
config: &Config,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<super::v2::CandidateEntry>> {
load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash))
.map(|u: Option<super::v2::CandidateEntry>| u.map(|v| v.into()))
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
/// Load a block entry from the aux store in v2 format.
pub fn load_block_entry_v2(
store: &dyn Database,
config: &Config,
block_hash: &Hash,
) -> SubsystemResult<Option<super::v2::BlockEntry>> {
load_decode(store, config.col_approval_data, &block_entry_key(block_hash))
.map(|u: Option<super::v2::BlockEntry>| u.map(|v| v.into()))
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
@@ -0,0 +1,624 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Tests for the aux-schema of approval voting.
use crate::{
approval_db::{
common::{migration_helpers::make_bitvec, DbBackend, StoredBlockRange, *},
v3::*,
},
backend::{Backend, OverlayedBackend},
ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo},
};
use pezkuwi_primitives::{
BlockNumber, CandidateHash, CandidateReceiptV2 as CandidateReceipt, CoreIndex, GroupIndex,
Hash, MutateDescriptorV2,
};
use pezkuwi_node_subsystem_util::database::Database;
use pezkuwi_primitives::Id as ParaId;
use sp_consensus_slots::Slot;
use std::{collections::HashMap, sync::Arc};
use pezkuwi_primitives_test_helpers::{
dummy_candidate_receipt_v2, dummy_candidate_receipt_v2_bad_sig, dummy_hash,
};
const DATA_COL: u32 = 0;
const NUM_COLUMNS: u32 = 1;
const TEST_CONFIG: Config = Config { col_approval_data: DATA_COL };
fn make_db() -> (DbBackend, Arc<dyn Database>) {
let db = kvdb_memorydb::create(NUM_COLUMNS);
let db = pezkuwi_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]);
let db_writer: Arc<dyn Database> = Arc::new(db);
(DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer)
}
fn make_block_entry(
block_hash: Hash,
parent_hash: Hash,
block_number: BlockNumber,
candidates: Vec<(CoreIndex, CandidateHash)>,
) -> BlockEntry {
BlockEntry {
block_hash,
parent_hash,
block_number,
session: 1,
slot: Slot::from(1),
relay_vrf_story: [0u8; 32],
approved_bitfield: make_bitvec(candidates.len()),
candidates,
children: Vec::new(),
candidates_pending_signature: Default::default(),
distributed_assignments: Default::default(),
}
}
fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt {
let mut c = dummy_candidate_receipt_v2(dummy_hash());
c.descriptor.set_para_id(para_id);
c.descriptor.set_relay_parent(relay_parent);
c.into()
}
#[test]
fn read_write() {
let (mut db, store) = make_db();
let hash_a = Hash::repeat_byte(1);
let hash_b = Hash::repeat_byte(2);
let candidate_hash = dummy_candidate_receipt_v2_bad_sig(dummy_hash(), None).hash();
let range = StoredBlockRange(10, 20);
let at_height = vec![hash_a, hash_b];
let block_entry =
make_block_entry(hash_a, Default::default(), 1, vec![(CoreIndex(0), candidate_hash)]);
let candidate_entry = CandidateEntry {
candidate: dummy_candidate_receipt_v2_bad_sig(dummy_hash(), None),
session: 5,
block_assignments: vec![(
hash_a,
ApprovalEntry {
tranches: Vec::new(),
backing_group: GroupIndex(1),
our_assignment: None,
our_approval_sig: None,
assigned_validators: Default::default(),
approved: false,
},
)]
.into_iter()
.collect(),
approvals: Default::default(),
};
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(range.clone());
overlay_db.write_blocks_at_height(1, at_height.clone());
overlay_db.write_block_entry(block_entry.clone().into());
overlay_db.write_candidate_entry(candidate_entry.clone().into());
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), Some(range));
assert_eq!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap(), at_height);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(),
Some(block_entry.into())
);
assert_eq!(
load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(),
Some(candidate_entry.into()),
);
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.delete_blocks_at_height(1);
overlay_db.delete_block_entry(&hash_a);
overlay_db.delete_candidate_entry(&candidate_hash);
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap().is_empty());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none());
assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash)
.unwrap()
.is_none());
}
#[test]
fn add_block_entry_works() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let candidate_receipt_a = make_candidate(ParaId::from(1_u32), parent_hash);
let candidate_receipt_b = make_candidate(ParaId::from(2_u32), parent_hash);
let candidate_hash_a = candidate_receipt_a.hash();
let candidate_hash_b = candidate_receipt_b.hash();
let block_number = 10;
let block_entry_a = make_block_entry(
block_hash_a,
parent_hash,
block_number,
vec![(CoreIndex(0), candidate_hash_a)],
);
let block_entry_b = make_block_entry(
block_hash_b,
parent_hash,
block_number,
vec![(CoreIndex(0), candidate_hash_a), (CoreIndex(1), candidate_hash_b)],
);
let n_validators = 10;
let mut new_candidate_info = HashMap::new();
new_candidate_info
.insert(candidate_hash_a, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(0), None));
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |h| {
new_candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
new_candidate_info
.insert(candidate_hash_b, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(1), None));
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |h| {
new_candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(),
Some(block_entry_a.into())
);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(),
Some(block_entry_b.into())
);
let candidate_entry_a = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_a)
.unwrap()
.unwrap();
assert_eq!(
candidate_entry_a.block_assignments.keys().collect::<Vec<_>>(),
vec![&block_hash_a, &block_hash_b]
);
let candidate_entry_b = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_b)
.unwrap()
.unwrap();
assert_eq!(candidate_entry_b.block_assignments.keys().collect::<Vec<_>>(), vec![&block_hash_b]);
}
#[test]
fn add_block_entry_adds_child() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let mut block_entry_a = make_block_entry(block_hash_a, parent_hash, 1, Vec::new());
let block_entry_b = make_block_entry(block_hash_b, block_hash_a, 2, Vec::new());
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap();
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
block_entry_a.children.push(block_hash_b);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(),
Some(block_entry_a.into())
);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(),
Some(block_entry_b.into())
);
}
#[test]
fn canonicalize_works() {
let (mut db, store) = make_db();
// -> B1 -> C1 -> D1 -> E1
// A -> B2 -> C2 -> D2 -> E2
//
// We'll canonicalize C1. Everything except D1 should disappear.
//
// Candidates:
// Cand1 in B2
// Cand2 in C2
// Cand3 in C2 and D1
// Cand4 in D1
// Cand5 in D2
// Only Cand3 and Cand4 should remain after canonicalize.
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(StoredBlockRange(1, 5));
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let genesis = Hash::repeat_byte(0);
let block_hash_a = Hash::repeat_byte(1);
let block_hash_b1 = Hash::repeat_byte(2);
let block_hash_b2 = Hash::repeat_byte(3);
let block_hash_c1 = Hash::repeat_byte(4);
let block_hash_c2 = Hash::repeat_byte(5);
let block_hash_d1 = Hash::repeat_byte(6);
let block_hash_d2 = Hash::repeat_byte(7);
let block_hash_e1 = Hash::repeat_byte(8);
let block_hash_e2 = Hash::repeat_byte(9);
let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis);
let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a);
let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a);
let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1);
let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1);
let candidate_receipt_e1 = make_candidate(ParaId::from(6_u32), block_hash_e1);
let cand_hash_1 = candidate_receipt_genesis.hash();
let cand_hash_2 = candidate_receipt_a.hash();
let cand_hash_3 = candidate_receipt_b.hash();
let cand_hash_4 = candidate_receipt_b1.hash();
let cand_hash_5 = candidate_receipt_c1.hash();
let cand_hash_6 = candidate_receipt_e1.hash();
let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new());
let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new());
let block_entry_b2 =
make_block_entry(block_hash_b2, block_hash_a, 2, vec![(CoreIndex(0), cand_hash_1)]);
let block_entry_c1 = make_block_entry(block_hash_c1, block_hash_b1, 3, Vec::new());
let block_entry_c2 = make_block_entry(
block_hash_c2,
block_hash_b2,
3,
vec![(CoreIndex(0), cand_hash_2), (CoreIndex(1), cand_hash_3)],
);
let block_entry_d1 = make_block_entry(
block_hash_d1,
block_hash_c1,
4,
vec![(CoreIndex(0), cand_hash_3), (CoreIndex(1), cand_hash_4)],
);
let block_entry_d2 =
make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]);
let block_entry_e1 =
make_block_entry(block_hash_e1, block_hash_d1, 5, vec![(CoreIndex(0), cand_hash_6)]);
let block_entry_e2 =
make_block_entry(block_hash_e2, block_hash_d2, 5, vec![(CoreIndex(0), cand_hash_6)]);
let candidate_info = {
let mut candidate_info = HashMap::new();
candidate_info.insert(
cand_hash_1,
NewCandidateInfo::new(candidate_receipt_genesis, GroupIndex(1), None),
);
candidate_info
.insert(cand_hash_2, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(2), None));
candidate_info
.insert(cand_hash_3, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(3), None));
candidate_info
.insert(cand_hash_4, NewCandidateInfo::new(candidate_receipt_b1, GroupIndex(4), None));
candidate_info
.insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None));
candidate_info
.insert(cand_hash_6, NewCandidateInfo::new(candidate_receipt_e1, GroupIndex(6), None));
candidate_info
};
// now insert all the blocks.
let blocks = vec![
block_entry_a.clone(),
block_entry_b1.clone(),
block_entry_b2.clone(),
block_entry_c1.clone(),
block_entry_c2.clone(),
block_entry_d1.clone(),
block_entry_d2.clone(),
block_entry_e1.clone(),
block_entry_e2.clone(),
];
let mut overlay_db = OverlayedBackend::new(&db);
for block_entry in blocks {
add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| {
candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
}
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let check_candidates_in_store = |expected: Vec<(CandidateHash, Option<Vec<_>>)>| {
for (c_hash, in_blocks) in expected {
let (entry, in_blocks) = match in_blocks {
None => {
assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash)
.unwrap()
.is_none());
continue;
},
Some(i) => (
load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash).unwrap().unwrap(),
i,
),
};
assert_eq!(entry.block_assignments.len(), in_blocks.len());
for x in in_blocks {
assert!(entry.block_assignments.contains_key(&x));
}
}
};
let check_blocks_in_store = |expected: Vec<(Hash, Option<Vec<_>>)>| {
for (hash, with_candidates) in expected {
let (entry, with_candidates) = match with_candidates {
None => {
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash)
.unwrap()
.is_none());
continue;
},
Some(i) =>
(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i),
};
assert_eq!(entry.candidates.len(), with_candidates.len());
for x in with_candidates {
assert!(entry.candidates.iter().any(|(_, c)| c == &x));
}
}
};
check_candidates_in_store(vec![
(cand_hash_1, Some(vec![block_hash_b2])),
(cand_hash_2, Some(vec![block_hash_c2])),
(cand_hash_3, Some(vec![block_hash_c2, block_hash_d1])),
(cand_hash_4, Some(vec![block_hash_d1])),
(cand_hash_5, Some(vec![block_hash_d2])),
]);
check_blocks_in_store(vec![
(block_hash_a, Some(vec![])),
(block_hash_b1, Some(vec![])),
(block_hash_b2, Some(vec![cand_hash_1])),
(block_hash_c1, Some(vec![])),
(block_hash_c2, Some(vec![cand_hash_2, cand_hash_3])),
(block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])),
(block_hash_d2, Some(vec![cand_hash_5])),
]);
let mut overlay_db = OverlayedBackend::new(&db);
canonicalize(&mut overlay_db, 3, block_hash_c1).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(),
StoredBlockRange(4, 6)
);
check_candidates_in_store(vec![
(cand_hash_1, None),
(cand_hash_2, None),
(cand_hash_3, Some(vec![block_hash_d1])),
(cand_hash_4, Some(vec![block_hash_d1])),
(cand_hash_5, None),
(cand_hash_6, Some(vec![block_hash_e1])),
]);
check_blocks_in_store(vec![
(block_hash_a, None),
(block_hash_b1, None),
(block_hash_b2, None),
(block_hash_c1, None),
(block_hash_c2, None),
(block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])),
(block_hash_e1, Some(vec![cand_hash_6])),
(block_hash_d2, None),
]);
let mut overlay_db = OverlayedBackend::new(&db);
canonicalize(&mut overlay_db, 4, block_hash_d1).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(),
StoredBlockRange(5, 6)
);
check_candidates_in_store(vec![
(cand_hash_1, None),
(cand_hash_2, None),
(cand_hash_3, None),
(cand_hash_4, None),
(cand_hash_5, None),
(cand_hash_6, Some(vec![block_hash_e1])),
]);
check_blocks_in_store(vec![
(block_hash_a, None),
(block_hash_b1, None),
(block_hash_b2, None),
(block_hash_c1, None),
(block_hash_c2, None),
(block_hash_d1, None),
(block_hash_e1, Some(vec![cand_hash_6])),
(block_hash_d2, None),
]);
}
#[test]
fn force_approve_works() {
let (mut db, store) = make_db();
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(StoredBlockRange(1, 4));
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let candidate_hash = CandidateHash(Hash::repeat_byte(42));
let single_candidate_vec = vec![(CoreIndex(0), candidate_hash)];
let candidate_info = {
let mut candidate_info = HashMap::new();
candidate_info.insert(
candidate_hash,
NewCandidateInfo::new(
make_candidate(ParaId::from(1_u32), Default::default()),
GroupIndex(1),
None,
),
);
candidate_info
};
let block_hash_a = Hash::repeat_byte(1); // 1
let block_hash_b = Hash::repeat_byte(2);
let block_hash_c = Hash::repeat_byte(3);
let block_hash_d = Hash::repeat_byte(4); // 4
let block_entry_a =
make_block_entry(block_hash_a, Default::default(), 1, single_candidate_vec.clone());
let block_entry_b =
make_block_entry(block_hash_b, block_hash_a, 2, single_candidate_vec.clone());
let block_entry_c =
make_block_entry(block_hash_c, block_hash_b, 3, single_candidate_vec.clone());
let block_entry_d =
make_block_entry(block_hash_d, block_hash_c, 4, single_candidate_vec.clone());
let blocks = vec![
block_entry_a.clone(),
block_entry_b.clone(),
block_entry_c.clone(),
block_entry_d.clone(),
];
let mut overlay_db = OverlayedBackend::new(&db);
for block_entry in blocks {
add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| {
candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
}
let approved_hashes = force_approve(&mut overlay_db, block_hash_d, 2).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a,)
.unwrap()
.unwrap()
.approved_bitfield
.all());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b,)
.unwrap()
.unwrap()
.approved_bitfield
.all());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_c,)
.unwrap()
.unwrap()
.approved_bitfield
.not_any());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_d,)
.unwrap()
.unwrap()
.approved_bitfield
.not_any());
assert_eq!(approved_hashes, vec![block_hash_b, block_hash_a]);
}
#[test]
fn load_all_blocks_works() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let block_hash_c = Hash::repeat_byte(42);
let block_number = 10;
let block_entry_a = make_block_entry(block_hash_a, parent_hash, block_number, vec![]);
let block_entry_b = make_block_entry(block_hash_b, parent_hash, block_number, vec![]);
let block_entry_c = make_block_entry(block_hash_c, block_hash_a, block_number + 1, vec![]);
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap();
// add C before B to test sorting.
add_block_entry(&mut overlay_db, block_entry_c.clone().into(), n_validators, |_| None).unwrap();
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_all_blocks(store.as_ref(), &TEST_CONFIG).unwrap(),
vec![block_hash_a, block_hash_b, block_hash_c],
)
}
@@ -0,0 +1,249 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! An abstraction over storage used by the chain selection subsystem.
//!
//! This provides both a [`Backend`] trait and an [`OverlayedBackend`]
//! struct which allows in-memory changes to be applied on top of a
//! [`Backend`], maintaining consistency between queries and temporary writes,
//! before any commit to the underlying storage is made.
use pezkuwi_node_subsystem::SubsystemResult;
use pezkuwi_primitives::{BlockNumber, CandidateHash, CandidateIndex, Hash};
use std::collections::HashMap;
use super::{
approval_db::common::StoredBlockRange,
persisted_entries::{BlockEntry, CandidateEntry},
};
#[derive(Debug)]
pub enum BackendWriteOp {
WriteStoredBlockRange(StoredBlockRange),
WriteBlocksAtHeight(BlockNumber, Vec<Hash>),
WriteBlockEntry(BlockEntry),
WriteCandidateEntry(CandidateEntry),
DeleteStoredBlockRange,
DeleteBlocksAtHeight(BlockNumber),
DeleteBlockEntry(Hash),
DeleteCandidateEntry(CandidateHash),
}
/// An abstraction over backend storage for the logic of this subsystem.
/// Implementation must always target latest storage version.
pub trait Backend {
/// Load a block entry from the DB.
fn load_block_entry(&self, hash: &Hash) -> SubsystemResult<Option<BlockEntry>>;
/// Load a candidate entry from the DB.
fn load_candidate_entry(
&self,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<CandidateEntry>>;
/// Load all blocks at a specific height.
fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult<Vec<Hash>>;
/// Load all block from the DB.
fn load_all_blocks(&self) -> SubsystemResult<Vec<Hash>>;
/// Load stored block range form the DB.
fn load_stored_blocks(&self) -> SubsystemResult<Option<StoredBlockRange>>;
/// Atomically write the list of operations, with later operations taking precedence over prior.
fn write<I>(&mut self, ops: I) -> SubsystemResult<()>
where
I: IntoIterator<Item = BackendWriteOp>;
}
/// A read only backend to enable db migration from version 1 of DB.
pub trait V1ReadBackend: Backend {
/// Load a candidate entry from the DB with scheme version 1.
fn load_candidate_entry_v1(
&self,
candidate_hash: &CandidateHash,
candidate_index: CandidateIndex,
) -> SubsystemResult<Option<CandidateEntry>>;
/// Load a block entry from the DB with scheme version 1.
fn load_block_entry_v1(&self, block_hash: &Hash) -> SubsystemResult<Option<BlockEntry>>;
}
/// A read only backend to enable db migration from version 2 of DB.
pub trait V2ReadBackend: Backend {
/// Load a candidate entry from the DB with scheme version 1.
fn load_candidate_entry_v2(
&self,
candidate_hash: &CandidateHash,
candidate_index: CandidateIndex,
) -> SubsystemResult<Option<CandidateEntry>>;
/// Load a block entry from the DB with scheme version 1.
fn load_block_entry_v2(&self, block_hash: &Hash) -> SubsystemResult<Option<BlockEntry>>;
}
// Status of block range in the `OverlayedBackend`.
#[derive(PartialEq)]
enum BlockRangeStatus {
// Value has not been modified.
NotModified,
// Value has been deleted
Deleted,
// Value has been updated.
Inserted(StoredBlockRange),
}
/// An in-memory overlay over the backend.
///
/// This maintains read-only access to the underlying backend, but can be
/// converted into a set of write operations which will, when written to
/// the underlying backend, give the same view as the state of the overlay.
pub struct OverlayedBackend<'a, B: 'a> {
inner: &'a B,
// `Some(None)` means deleted. Missing (`None`) means query inner.
stored_block_range: BlockRangeStatus,
// `None` means 'deleted', missing means query inner.
blocks_at_height: HashMap<BlockNumber, Option<Vec<Hash>>>,
// `None` means 'deleted', missing means query inner.
block_entries: HashMap<Hash, Option<BlockEntry>>,
// `None` means 'deleted', missing means query inner.
candidate_entries: HashMap<CandidateHash, Option<CandidateEntry>>,
}
impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> {
pub fn new(backend: &'a B) -> Self {
OverlayedBackend {
inner: backend,
stored_block_range: BlockRangeStatus::NotModified,
blocks_at_height: HashMap::new(),
block_entries: HashMap::new(),
candidate_entries: HashMap::new(),
}
}
pub fn is_empty(&self) -> bool {
self.block_entries.is_empty() &&
self.candidate_entries.is_empty() &&
self.blocks_at_height.is_empty() &&
self.stored_block_range == BlockRangeStatus::NotModified
}
pub fn load_all_blocks(&self) -> SubsystemResult<Vec<Hash>> {
let mut hashes = Vec::new();
if let Some(stored_blocks) = self.load_stored_blocks()? {
for height in stored_blocks.0..stored_blocks.1 {
hashes.extend(self.load_blocks_at_height(&height)?);
}
}
Ok(hashes)
}
pub fn load_stored_blocks(&self) -> SubsystemResult<Option<StoredBlockRange>> {
match self.stored_block_range {
BlockRangeStatus::Inserted(ref value) => Ok(Some(value.clone())),
BlockRangeStatus::Deleted => Ok(None),
BlockRangeStatus::NotModified => self.inner.load_stored_blocks(),
}
}
pub fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult<Vec<Hash>> {
if let Some(val) = self.blocks_at_height.get(&height) {
return Ok(val.clone().unwrap_or_default());
}
self.inner.load_blocks_at_height(height)
}
pub fn load_block_entry(&self, hash: &Hash) -> SubsystemResult<Option<BlockEntry>> {
if let Some(val) = self.block_entries.get(&hash) {
return Ok(val.clone());
}
self.inner.load_block_entry(hash)
}
pub fn load_candidate_entry(
&self,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<CandidateEntry>> {
if let Some(val) = self.candidate_entries.get(&candidate_hash) {
return Ok(val.clone());
}
self.inner.load_candidate_entry(candidate_hash)
}
pub fn write_stored_block_range(&mut self, range: StoredBlockRange) {
self.stored_block_range = BlockRangeStatus::Inserted(range);
}
pub fn delete_stored_block_range(&mut self) {
self.stored_block_range = BlockRangeStatus::Deleted;
}
pub fn write_blocks_at_height(&mut self, height: BlockNumber, blocks: Vec<Hash>) {
self.blocks_at_height.insert(height, Some(blocks));
}
pub fn delete_blocks_at_height(&mut self, height: BlockNumber) {
self.blocks_at_height.insert(height, None);
}
pub fn write_block_entry(&mut self, entry: BlockEntry) {
self.block_entries.insert(entry.block_hash(), Some(entry));
}
pub fn delete_block_entry(&mut self, hash: &Hash) {
self.block_entries.insert(*hash, None);
}
pub fn write_candidate_entry(&mut self, entry: CandidateEntry) {
self.candidate_entries.insert(entry.candidate_receipt().hash(), Some(entry));
}
pub fn delete_candidate_entry(&mut self, hash: &CandidateHash) {
self.candidate_entries.insert(*hash, None);
}
/// Transform this backend into a set of write-ops to be written to the
/// inner backend.
pub fn into_write_ops(self) -> impl Iterator<Item = BackendWriteOp> {
let blocks_at_height_ops = self.blocks_at_height.into_iter().map(|(h, v)| match v {
Some(v) => BackendWriteOp::WriteBlocksAtHeight(h, v),
None => BackendWriteOp::DeleteBlocksAtHeight(h),
});
let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v {
Some(v) => BackendWriteOp::WriteBlockEntry(v),
None => BackendWriteOp::DeleteBlockEntry(h),
});
let candidate_entry_ops = self.candidate_entries.into_iter().map(|(h, v)| match v {
Some(v) => BackendWriteOp::WriteCandidateEntry(v),
None => BackendWriteOp::DeleteCandidateEntry(h),
});
let stored_block_range_ops = match self.stored_block_range {
BlockRangeStatus::Inserted(val) => Some(BackendWriteOp::WriteStoredBlockRange(val)),
BlockRangeStatus::Deleted => Some(BackendWriteOp::DeleteStoredBlockRange),
BlockRangeStatus::NotModified => None,
};
stored_block_range_ops
.into_iter()
.chain(blocks_at_height_ops)
.chain(block_entry_ops)
.chain(candidate_entry_ops)
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,428 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Middleware interface that leverages low-level database operations
//! to provide a clean API for processing block and candidate imports.
use pezkuwi_node_subsystem::{SubsystemError, SubsystemResult};
use bitvec::order::Lsb0 as BitOrderLsb0;
use pezkuwi_primitives::{
BlockNumber, CandidateHash, CandidateReceiptV2 as CandidateReceipt, GroupIndex, Hash,
};
use std::collections::{hash_map::Entry, BTreeMap, HashMap};
use super::{
approval_db::{common::StoredBlockRange, v2::OurAssignment},
backend::{Backend, OverlayedBackend},
persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry},
LOG_TARGET,
};
/// Information about a new candidate necessary to instantiate the requisite
/// candidate and approval entries.
#[derive(Clone)]
pub struct NewCandidateInfo {
candidate: CandidateReceipt,
backing_group: GroupIndex,
our_assignment: Option<OurAssignment>,
}
impl NewCandidateInfo {
/// Convenience constructor
pub fn new(
candidate: CandidateReceipt,
backing_group: GroupIndex,
our_assignment: Option<OurAssignment>,
) -> Self {
Self { candidate, backing_group, our_assignment }
}
}
fn visit_and_remove_block_entry(
block_hash: Hash,
overlayed_db: &mut OverlayedBackend<'_, impl Backend>,
visited_candidates: &mut HashMap<CandidateHash, CandidateEntry>,
) -> SubsystemResult<Vec<Hash>> {
let block_entry = match overlayed_db.load_block_entry(&block_hash)? {
None => return Ok(Vec::new()),
Some(b) => b,
};
overlayed_db.delete_block_entry(&block_hash);
for (_, candidate_hash) in block_entry.candidates() {
let candidate = match visited_candidates.entry(*candidate_hash) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
e.insert(match overlayed_db.load_candidate_entry(candidate_hash)? {
None => continue, // Should not happen except for corrupt DB
Some(c) => c,
})
},
};
candidate.block_assignments.remove(&block_hash);
}
Ok(block_entry.children)
}
/// Canonicalize some particular block, pruning everything before it and
/// pruning any competing branches at the same height.
pub fn canonicalize(
overlay_db: &mut OverlayedBackend<'_, impl Backend>,
canon_number: BlockNumber,
canon_hash: Hash,
) -> SubsystemResult<()> {
let range = match overlay_db.load_stored_blocks()? {
None => return Ok(()),
Some(range) if range.0 > canon_number => return Ok(()),
Some(range) => range,
};
// Storing all candidates in memory is potentially heavy, but should be fine
// as long as finality doesn't stall for a long while. We could optimize this
// by keeping only the metadata about which blocks reference each candidate.
let mut visited_candidates = HashMap::new();
// All the block heights we visited but didn't necessarily delete everything from.
let mut visited_heights = HashMap::new();
// First visit everything before the height.
for i in range.0..canon_number {
let at_height = overlay_db.load_blocks_at_height(&i)?;
overlay_db.delete_blocks_at_height(i);
for b in at_height {
visit_and_remove_block_entry(b, overlay_db, &mut visited_candidates)?;
}
}
// Then visit everything at the height.
let pruned_branches = {
let at_height = overlay_db.load_blocks_at_height(&canon_number)?;
overlay_db.delete_blocks_at_height(canon_number);
// Note that while there may be branches descending from blocks at earlier heights,
// we have already covered them by removing everything at earlier heights.
let mut pruned_branches = Vec::new();
for b in at_height {
let children = visit_and_remove_block_entry(b, overlay_db, &mut visited_candidates)?;
if b != canon_hash {
pruned_branches.extend(children);
}
}
pruned_branches
};
// Follow all children of non-canonicalized blocks.
{
let mut frontier: Vec<(BlockNumber, Hash)> =
pruned_branches.into_iter().map(|h| (canon_number + 1, h)).collect();
while let Some((height, next_child)) = frontier.pop() {
let children =
visit_and_remove_block_entry(next_child, overlay_db, &mut visited_candidates)?;
// extend the frontier of branches to include the given height.
frontier.extend(children.into_iter().map(|h| (height + 1, h)));
// visit the at-height key for this deleted block's height.
let at_height = match visited_heights.entry(height) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => e.insert(overlay_db.load_blocks_at_height(&height)?),
};
if let Some(i) = at_height.iter().position(|x| x == &next_child) {
at_height.remove(i);
}
}
}
// Update all `CandidateEntry`s, deleting all those which now have empty `block_assignments`.
for (candidate_hash, candidate) in visited_candidates.into_iter() {
if candidate.block_assignments.is_empty() {
overlay_db.delete_candidate_entry(&candidate_hash);
} else {
overlay_db.write_candidate_entry(candidate);
}
}
// Update all blocks-at-height keys, deleting all those which now have empty
// `block_assignments`.
for (h, at) in visited_heights.into_iter() {
if at.is_empty() {
overlay_db.delete_blocks_at_height(h);
} else {
overlay_db.write_blocks_at_height(h, at);
}
}
// due to the fork pruning, this range actually might go too far above where our actual highest
// block is, if a relatively short fork is canonicalized.
// TODO https://github.com/paritytech/polkadot/issues/3389
let new_range = StoredBlockRange(canon_number + 1, std::cmp::max(range.1, canon_number + 2));
overlay_db.write_stored_block_range(new_range);
Ok(())
}
/// Record a new block entry.
///
/// This will update the blocks-at-height mapping, the stored block range, if necessary,
/// and add block and candidate entries. It will also add approval entries to existing
/// candidate entries and add this as a child of any block entry corresponding to the
/// parent hash.
///
/// Has no effect if there is already an entry for the block or `candidate_info` returns
/// `None` for any of the candidates referenced by the block entry. In these cases,
/// no information about new candidates will be referred to by this function.
pub fn add_block_entry(
store: &mut OverlayedBackend<'_, impl Backend>,
entry: BlockEntry,
n_validators: usize,
candidate_info: impl Fn(&CandidateHash) -> Option<NewCandidateInfo>,
) -> SubsystemResult<Vec<(CandidateHash, CandidateEntry)>> {
let session = entry.session();
let parent_hash = entry.parent_hash();
let number = entry.block_number();
// Update the stored block range.
{
let new_range = match store.load_stored_blocks()? {
None => Some(StoredBlockRange(number, number + 1)),
Some(range) if range.1 <= number => Some(StoredBlockRange(range.0, number + 1)),
Some(_) => None,
};
new_range.map(|n| store.write_stored_block_range(n));
};
// Update the blocks at height meta key.
{
let mut blocks_at_height = store.load_blocks_at_height(&number)?;
if blocks_at_height.contains(&entry.block_hash()) {
// seems we already have a block entry for this block. nothing to do here.
return Ok(Vec::new());
}
blocks_at_height.push(entry.block_hash());
store.write_blocks_at_height(number, blocks_at_height)
};
let mut candidate_entries = Vec::with_capacity(entry.candidates().len());
// read and write all updated entries.
{
for (_, candidate_hash) in entry.candidates() {
let NewCandidateInfo { candidate, backing_group, our_assignment } =
match candidate_info(candidate_hash) {
None => return Ok(Vec::new()),
Some(info) => info,
};
let mut candidate_entry =
store.load_candidate_entry(&candidate_hash)?.unwrap_or_else(move || {
CandidateEntry {
candidate,
session,
block_assignments: BTreeMap::new(),
approvals: bitvec::bitvec![u8, BitOrderLsb0; 0; n_validators],
}
});
candidate_entry.block_assignments.insert(
entry.block_hash(),
ApprovalEntry::new(
Vec::new(),
backing_group,
our_assignment.map(|v| v.into()),
None,
bitvec::bitvec![u8, BitOrderLsb0; 0; n_validators],
false,
),
);
store.write_candidate_entry(candidate_entry.clone());
candidate_entries.push((*candidate_hash, candidate_entry));
}
};
// Update the child index for the parent.
store.load_block_entry(&parent_hash)?.map(|mut e| {
e.children.push(entry.block_hash());
store.write_block_entry(e);
});
// Put the new block entry in.
store.write_block_entry(entry);
Ok(candidate_entries)
}
/// Forcibly approve all candidates included at up to the given relay-chain height in the indicated
/// chain.
pub fn force_approve(
store: &mut OverlayedBackend<'_, impl Backend>,
chain_head: Hash,
up_to: BlockNumber,
) -> SubsystemResult<Vec<Hash>> {
#[derive(PartialEq, Eq)]
enum State {
WalkTo,
Approving,
}
let mut approved_hashes = Vec::new();
let mut cur_hash = chain_head;
let mut state = State::WalkTo;
let mut cur_block_number: BlockNumber = 0;
// iterate back to the `up_to` block, and then iterate backwards until all blocks
// are updated.
while let Some(mut entry) = store.load_block_entry(&cur_hash)? {
cur_block_number = entry.block_number();
if cur_block_number <= up_to {
if state == State::WalkTo {
gum::debug!(
target: LOG_TARGET,
block_hash = ?chain_head,
?cur_hash,
?cur_block_number,
"Start forced approval from block",
);
}
state = State::Approving;
}
cur_hash = entry.parent_hash();
match state {
State::WalkTo => {},
State::Approving => {
entry.approved_bitfield.iter_mut().for_each(|mut b| *b = true);
approved_hashes.push(entry.block_hash());
store.write_block_entry(entry);
},
}
}
if state == State::WalkTo {
gum::warn!(
target: LOG_TARGET,
?chain_head,
?cur_hash,
?cur_block_number,
?up_to,
"Missing block in the chain, cannot start force approval"
);
}
Ok(approved_hashes)
}
/// Revert to the block corresponding to the specified `hash`.
/// The operation is not allowed for blocks older than the last finalized one.
pub fn revert_to(
overlay: &mut OverlayedBackend<'_, impl Backend>,
hash: Hash,
) -> SubsystemResult<()> {
let mut stored_range = overlay.load_stored_blocks()?.ok_or_else(|| {
SubsystemError::Context("no available blocks to infer revert point height".to_string())
})?;
let (children, children_height) = match overlay.load_block_entry(&hash)? {
Some(mut entry) => {
let children_height = entry.block_number() + 1;
let children = std::mem::take(&mut entry.children);
// Write revert point block entry without the children.
overlay.write_block_entry(entry);
(children, children_height)
},
None => {
let children_height = stored_range.0;
let children = overlay.load_blocks_at_height(&children_height)?;
let child_entry = children
.first()
.and_then(|hash| overlay.load_block_entry(hash).ok())
.flatten()
.ok_or_else(|| {
SubsystemError::Context("lookup failure for first block".to_string())
})?;
// The parent is expected to be the revert point
if child_entry.parent_hash() != hash {
return Err(SubsystemError::Context(
"revert below last finalized block or corrupted storage".to_string(),
));
}
(children, children_height)
},
};
let mut stack: Vec<_> = children.into_iter().map(|h| (h, children_height)).collect();
let mut range_end = stored_range.1;
while let Some((hash, number)) = stack.pop() {
let mut blocks_at_height = overlay.load_blocks_at_height(&number)?;
blocks_at_height.retain(|h| h != &hash);
// Check if we need to update the range top
if blocks_at_height.is_empty() && number < range_end {
range_end = number;
}
overlay.write_blocks_at_height(number, blocks_at_height);
if let Some(entry) = overlay.load_block_entry(&hash)? {
overlay.delete_block_entry(&hash);
// Cleanup the candidate entries by removing any reference to the
// removed block. If for a candidate entry the block block_assignments
// drops to zero then we remove the entry.
for (_, candidate_hash) in entry.candidates() {
if let Some(mut candidate_entry) = overlay.load_candidate_entry(candidate_hash)? {
candidate_entry.block_assignments.remove(&hash);
if candidate_entry.block_assignments.is_empty() {
overlay.delete_candidate_entry(candidate_hash);
} else {
overlay.write_candidate_entry(candidate_entry);
}
}
}
stack.extend(entry.children.into_iter().map(|h| (h, number + 1)));
}
}
// Check if our modifications to the dag has reduced the range top
if range_end != stored_range.1 {
if stored_range.0 < range_end {
stored_range.1 = range_end;
overlay.write_stored_block_range(stored_range);
} else {
overlay.delete_stored_block_range();
}
}
Ok(())
}
@@ -0,0 +1,769 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Entries pertaining to approval which need to be persisted.
//!
//! The actual persisting of data is handled by the `approval_db` module.
//! Within that context, things are plain-old-data. Within this module,
//! data and logic are intertwined.
use itertools::Itertools;
use pezkuwi_node_primitives::approval::{
v1::{DelayTranche, RelayVRFStory},
v2::{AssignmentCertV2, CandidateBitfield},
};
use pezkuwi_primitives::{
BlockNumber, CandidateHash, CandidateIndex, CandidateReceiptV2 as CandidateReceipt, CoreIndex,
GroupIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature,
};
use sp_consensus_slots::Slot;
use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice};
use std::collections::BTreeMap;
use crate::approval_db::v2::Bitfield;
use super::criteria::OurAssignment;
use pezkuwi_node_primitives::approval::time::Tick;
/// Metadata regarding a specific tranche of assignments for a specific candidate.
#[derive(Debug, Clone, PartialEq)]
pub struct TrancheEntry {
tranche: DelayTranche,
// Assigned validators, and the instant we received their assignment, rounded
// to the nearest tick.
assignments: Vec<(ValidatorIndex, Tick)>,
}
impl TrancheEntry {
/// Get the tranche of this entry.
pub fn tranche(&self) -> DelayTranche {
self.tranche
}
/// Get the assignments for this entry.
pub fn assignments(&self) -> &[(ValidatorIndex, Tick)] {
&self.assignments
}
}
impl From<crate::approval_db::v2::TrancheEntry> for TrancheEntry {
fn from(entry: crate::approval_db::v2::TrancheEntry) -> Self {
TrancheEntry {
tranche: entry.tranche,
assignments: entry.assignments.into_iter().map(|(v, t)| (v, t.into())).collect(),
}
}
}
impl From<TrancheEntry> for crate::approval_db::v2::TrancheEntry {
fn from(entry: TrancheEntry) -> Self {
Self {
tranche: entry.tranche,
assignments: entry.assignments.into_iter().map(|(v, t)| (v, t.into())).collect(),
}
}
}
impl From<crate::approval_db::v3::OurApproval> for OurApproval {
fn from(approval: crate::approval_db::v3::OurApproval) -> Self {
Self {
signature: approval.signature,
signed_candidates_indices: approval.signed_candidates_indices,
}
}
}
impl From<OurApproval> for crate::approval_db::v3::OurApproval {
fn from(approval: OurApproval) -> Self {
Self {
signature: approval.signature,
signed_candidates_indices: approval.signed_candidates_indices,
}
}
}
/// Metadata about our approval signature
#[derive(Debug, Clone, PartialEq)]
pub struct OurApproval {
/// The signature for the candidates hashes pointed by indices.
pub signature: ValidatorSignature,
/// The indices of the candidates signed in this approval.
pub signed_candidates_indices: CandidateBitfield,
}
impl OurApproval {
/// Converts a ValidatorSignature to an OurApproval.
/// It used in converting the database from v1 to latest.
pub fn from_v1(value: ValidatorSignature, candidate_index: CandidateIndex) -> Self {
Self { signature: value, signed_candidates_indices: candidate_index.into() }
}
/// Converts a ValidatorSignature to an OurApproval.
/// It used in converting the database from v2 to latest.
pub fn from_v2(value: ValidatorSignature, candidate_index: CandidateIndex) -> Self {
Self::from_v1(value, candidate_index)
}
}
/// Metadata regarding approval of a particular candidate within the context of some
/// particular block.
#[derive(Debug, Clone, PartialEq)]
pub struct ApprovalEntry {
tranches: Vec<TrancheEntry>,
backing_group: GroupIndex,
our_assignment: Option<OurAssignment>,
our_approval_sig: Option<OurApproval>,
// `n_validators` bits.
assigned_validators: Bitfield,
approved: bool,
}
impl ApprovalEntry {
/// Convenience constructor
pub fn new(
tranches: Vec<TrancheEntry>,
backing_group: GroupIndex,
our_assignment: Option<OurAssignment>,
our_approval_sig: Option<OurApproval>,
// `n_validators` bits.
assigned_validators: Bitfield,
approved: bool,
) -> Self {
Self {
tranches,
backing_group,
our_assignment,
our_approval_sig,
assigned_validators,
approved,
}
}
// Access our assignment for this approval entry.
pub fn our_assignment(&self) -> Option<&OurAssignment> {
self.our_assignment.as_ref()
}
// Note that our assignment is triggered. No-op if already triggered.
pub fn trigger_our_assignment(
&mut self,
tick_now: Tick,
) -> Option<(AssignmentCertV2, ValidatorIndex, DelayTranche)> {
let our = self.our_assignment.as_mut().and_then(|a| {
if a.triggered() {
return None;
}
a.mark_triggered();
Some(a.clone())
});
our.map(|a| {
self.import_assignment(a.tranche(), a.validator_index(), tick_now, false);
(a.cert().clone(), a.validator_index(), a.tranche())
})
}
/// Import our local approval vote signature for this candidate.
pub fn import_approval_sig(&mut self, approval_sig: OurApproval) {
self.our_approval_sig = Some(approval_sig);
}
/// Whether a validator is already assigned.
pub fn is_assigned(&self, validator_index: ValidatorIndex) -> bool {
self.assigned_validators
.get(validator_index.0 as usize)
.map(|b| *b)
.unwrap_or(false)
}
/// Import an assignment. No-op if already assigned on the same tranche.
pub fn import_assignment(
&mut self,
tranche: DelayTranche,
validator_index: ValidatorIndex,
tick_now: Tick,
is_duplicate: bool,
) {
// linear search probably faster than binary. not many tranches typically.
let idx = match self.tranches.iter().position(|t| t.tranche >= tranche) {
Some(pos) => {
if self.tranches[pos].tranche > tranche {
self.tranches.insert(pos, TrancheEntry { tranche, assignments: Vec::new() });
}
pos
},
None => {
self.tranches.push(TrancheEntry { tranche, assignments: Vec::new() });
self.tranches.len() - 1
},
};
// At restart we might have duplicate assignments because approval-distribution is not
// persistent across restarts, so avoid adding duplicates.
// We already know if we have seen an assignment from this validator and since this
// function is on the hot path we can avoid iterating through tranches by using
// !is_duplicate to determine if it is already present in the vector and does not need
// adding.
if !is_duplicate {
self.tranches[idx].assignments.push((validator_index, tick_now));
}
self.assigned_validators.set(validator_index.0 as _, true);
}
// Produce a bitvec indicating the assignments of all validators up to and
// including `tranche`.
pub fn assignments_up_to(&self, tranche: DelayTranche) -> Bitfield {
self.tranches.iter().take_while(|e| e.tranche <= tranche).fold(
bitvec::bitvec![u8, BitOrderLsb0; 0; self.assigned_validators.len()],
|mut a, e| {
for &(v, _) in &e.assignments {
a.set(v.0 as _, true);
}
a
},
)
}
/// Whether the approval entry is approved
pub fn is_approved(&self) -> bool {
self.approved
}
/// Mark the approval entry as approved.
pub fn mark_approved(&mut self) {
self.approved = true;
}
/// Access the tranches.
pub fn tranches(&self) -> &[TrancheEntry] {
&self.tranches
}
/// Get the number of validators in this approval entry.
pub fn n_validators(&self) -> usize {
self.assigned_validators.len()
}
/// Get the number of assignments by validators, including the local validator.
pub fn n_assignments(&self) -> usize {
self.assigned_validators.count_ones()
}
/// Get the backing group index of the approval entry.
pub fn backing_group(&self) -> GroupIndex {
self.backing_group
}
/// Get the assignment cert & approval signature.
///
/// The approval signature will only be `Some` if the assignment is too.
pub fn local_statements(&self) -> (Option<OurAssignment>, Option<OurApproval>) {
let approval_sig = self.our_approval_sig.clone();
if let Some(our_assignment) = self.our_assignment.as_ref().filter(|a| a.triggered()) {
(Some(our_assignment.clone()), approval_sig)
} else {
(None, None)
}
}
// Convert an ApprovalEntry from v1 version to latest version
pub fn from_v1(
value: crate::approval_db::v1::ApprovalEntry,
candidate_index: CandidateIndex,
) -> Self {
ApprovalEntry {
tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(),
backing_group: value.backing_group,
our_assignment: value.our_assignment.map(|assignment| assignment.into()),
our_approval_sig: value
.our_approval_sig
.map(|sig| OurApproval::from_v1(sig, candidate_index)),
assigned_validators: value.assignments,
approved: value.approved,
}
}
// Convert an ApprovalEntry from v1 version to latest version
pub fn from_v2(
value: crate::approval_db::v2::ApprovalEntry,
candidate_index: CandidateIndex,
) -> Self {
ApprovalEntry {
tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(),
backing_group: value.backing_group,
our_assignment: value.our_assignment.map(|assignment| assignment.into()),
our_approval_sig: value
.our_approval_sig
.map(|sig| OurApproval::from_v2(sig, candidate_index)),
assigned_validators: value.assigned_validators,
approved: value.approved,
}
}
}
impl From<crate::approval_db::v3::ApprovalEntry> for ApprovalEntry {
fn from(entry: crate::approval_db::v3::ApprovalEntry) -> Self {
ApprovalEntry {
tranches: entry.tranches.into_iter().map(Into::into).collect(),
backing_group: entry.backing_group,
our_assignment: entry.our_assignment.map(Into::into),
our_approval_sig: entry.our_approval_sig.map(Into::into),
assigned_validators: entry.assigned_validators,
approved: entry.approved,
}
}
}
impl From<ApprovalEntry> for crate::approval_db::v3::ApprovalEntry {
fn from(entry: ApprovalEntry) -> Self {
Self {
tranches: entry.tranches.into_iter().map(Into::into).collect(),
backing_group: entry.backing_group,
our_assignment: entry.our_assignment.map(Into::into),
our_approval_sig: entry.our_approval_sig.map(Into::into),
assigned_validators: entry.assigned_validators,
approved: entry.approved,
}
}
}
/// Metadata regarding approval of a particular candidate.
#[derive(Debug, Clone, PartialEq)]
pub struct CandidateEntry {
pub candidate: CandidateReceipt,
pub session: SessionIndex,
// Assignments are based on blocks, so we need to track assignments separately
// based on the block we are looking at.
pub block_assignments: BTreeMap<Hash, ApprovalEntry>,
pub approvals: Bitfield,
}
impl CandidateEntry {
/// Access the bit-vec of approvals.
pub fn approvals(&self) -> &BitSlice<u8, BitOrderLsb0> {
&self.approvals
}
/// Note that a given validator has approved. Return the previous approval state.
pub fn mark_approval(&mut self, validator: ValidatorIndex) -> bool {
let prev = self.has_approved(validator);
self.approvals.set(validator.0 as usize, true);
prev
}
/// Query whether a given validator has approved the candidate.
pub fn has_approved(&self, validator: ValidatorIndex) -> bool {
self.approvals.get(validator.0 as usize).map(|b| *b).unwrap_or(false)
}
/// Get the candidate receipt.
pub fn candidate_receipt(&self) -> &CandidateReceipt {
&self.candidate
}
/// Get the approval entry, mutably, for this candidate under a specific block.
pub fn approval_entry_mut(&mut self, block_hash: &Hash) -> Option<&mut ApprovalEntry> {
self.block_assignments.get_mut(block_hash)
}
/// Get the approval entry for this candidate under a specific block.
pub fn approval_entry(&self, block_hash: &Hash) -> Option<&ApprovalEntry> {
self.block_assignments.get(block_hash)
}
/// Convert a CandidateEntry from a v1 to its latest equivalent.
pub fn from_v1(
value: crate::approval_db::v1::CandidateEntry,
candidate_index: CandidateIndex,
) -> Self {
Self {
approvals: value.approvals,
block_assignments: value
.block_assignments
.into_iter()
.map(|(h, ae)| (h, ApprovalEntry::from_v1(ae, candidate_index)))
.collect(),
candidate: value.candidate,
session: value.session,
}
}
/// Convert a CandidateEntry from a v2 to its latest equivalent.
pub fn from_v2(
value: crate::approval_db::v2::CandidateEntry,
candidate_index: CandidateIndex,
) -> Self {
Self {
approvals: value.approvals,
block_assignments: value
.block_assignments
.into_iter()
.map(|(h, ae)| (h, ApprovalEntry::from_v2(ae, candidate_index)))
.collect(),
candidate: value.candidate,
session: value.session,
}
}
}
impl From<crate::approval_db::v3::CandidateEntry> for CandidateEntry {
fn from(entry: crate::approval_db::v3::CandidateEntry) -> Self {
CandidateEntry {
candidate: entry.candidate,
session: entry.session,
block_assignments: entry
.block_assignments
.into_iter()
.map(|(h, ae)| (h, ae.into()))
.collect(),
approvals: entry.approvals,
}
}
}
impl From<CandidateEntry> for crate::approval_db::v3::CandidateEntry {
fn from(entry: CandidateEntry) -> Self {
Self {
candidate: entry.candidate,
session: entry.session,
block_assignments: entry
.block_assignments
.into_iter()
.map(|(h, ae)| (h, ae.into()))
.collect(),
approvals: entry.approvals,
}
}
}
/// Metadata regarding approval of a particular block, by way of approval of the
/// candidates contained within it.
#[derive(Debug, Clone, PartialEq)]
pub struct BlockEntry {
block_hash: Hash,
parent_hash: Hash,
block_number: BlockNumber,
session: SessionIndex,
slot: Slot,
relay_vrf_story: RelayVRFStory,
// The candidates included as-of this block and the index of the core they are
// leaving.
candidates: Vec<(CoreIndex, CandidateHash)>,
// A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`.
// The i'th bit is `true` iff the candidate has been approved in the context of this
// block. The block can be considered approved if the bitfield has all bits set to `true`.
pub approved_bitfield: Bitfield,
pub children: Vec<Hash>,
// A list of candidates we have checked, but didn't not sign and
// advertise the vote yet.
candidates_pending_signature: BTreeMap<CandidateIndex, CandidateSigningContext>,
// A list of assignments for which we already distributed the assignment.
// We use this to ensure we don't distribute multiple core assignments twice as we track
// individual wakeups for each core.
distributed_assignments: Bitfield,
}
#[derive(Debug, Clone, PartialEq)]
pub struct CandidateSigningContext {
pub candidate_hash: CandidateHash,
pub sign_no_later_than_tick: Tick,
}
impl BlockEntry {
/// Mark a candidate as fully approved in the bitfield.
pub fn mark_approved_by_hash(&mut self, candidate_hash: &CandidateHash) {
if let Some(p) = self.candidates.iter().position(|(_, h)| h == candidate_hash) {
self.approved_bitfield.set(p, true);
}
}
/// Whether a candidate is approved in the bitfield.
pub fn is_candidate_approved(&self, candidate_hash: &CandidateHash) -> bool {
self.candidates
.iter()
.position(|(_, h)| h == candidate_hash)
.and_then(|p| self.approved_bitfield.get(p).map(|b| *b))
.unwrap_or(false)
}
/// Whether the block entry is fully approved.
pub fn is_fully_approved(&self) -> bool {
self.approved_bitfield.all()
}
/// Iterate over all unapproved candidates.
pub fn unapproved_candidates(&self) -> impl Iterator<Item = CandidateHash> + '_ {
self.approved_bitfield.iter().enumerate().filter_map(move |(i, a)| {
if !*a {
Some(self.candidates[i].1)
} else {
None
}
})
}
/// Get the slot of the block.
pub fn slot(&self) -> Slot {
self.slot
}
/// Get the relay-vrf-story of the block.
pub fn relay_vrf_story(&self) -> RelayVRFStory {
self.relay_vrf_story.clone()
}
/// Get the session index of the block.
pub fn session(&self) -> SessionIndex {
self.session
}
/// Get the i'th candidate.
pub fn candidate(&self, i: usize) -> Option<&(CoreIndex, CandidateHash)> {
self.candidates.get(i)
}
/// Access the underlying candidates as a slice.
pub fn candidates(&self) -> &[(CoreIndex, CandidateHash)] {
&self.candidates
}
/// Access the block number of the block entry.
pub fn block_number(&self) -> BlockNumber {
self.block_number
}
/// Access the block hash of the block entry.
pub fn block_hash(&self) -> Hash {
self.block_hash
}
/// Access the parent hash of the block entry.
pub fn parent_hash(&self) -> Hash {
self.parent_hash
}
/// Mark distributed assignment for many candidate indices.
/// Returns `true` if an assignment was already distributed for the `candidates`.
pub fn mark_assignment_distributed(&mut self, candidates: CandidateBitfield) -> bool {
let bitfield = candidates.into_inner();
let total_one_bits = self.distributed_assignments.count_ones();
let new_len = std::cmp::max(self.distributed_assignments.len(), bitfield.len());
self.distributed_assignments.resize(new_len, false);
self.distributed_assignments |= bitfield;
// If an operation did not change our current bitfield, we return true.
let distributed = total_one_bits == self.distributed_assignments.count_ones();
distributed
}
/// Defer signing and issuing an approval for a candidate no later than the specified tick
pub fn defer_candidate_signature(
&mut self,
candidate_index: CandidateIndex,
candidate_hash: CandidateHash,
sign_no_later_than_tick: Tick,
) -> Option<CandidateSigningContext> {
self.candidates_pending_signature.insert(
candidate_index,
CandidateSigningContext { candidate_hash, sign_no_later_than_tick },
)
}
/// Returns the number of candidates waiting for an approval to be issued.
pub fn num_candidates_pending_signature(&self) -> usize {
self.candidates_pending_signature.len()
}
/// Return if we have candidates waiting for signature to be issued
pub fn has_candidates_pending_signature(&self) -> bool {
!self.candidates_pending_signature.is_empty()
}
/// Returns true if candidate hash is in the queue for a signature.
pub fn candidate_is_pending_signature(&self, candidate_hash: CandidateHash) -> bool {
self.candidates_pending_signature
.values()
.any(|context| context.candidate_hash == candidate_hash)
}
/// Candidate hashes for candidates pending signatures
fn candidate_hashes_pending_signature(&self) -> Vec<CandidateHash> {
self.candidates_pending_signature
.values()
.map(|unsigned_approval| unsigned_approval.candidate_hash)
.collect()
}
/// Candidate indices for candidates pending signature
fn candidate_indices_pending_signature(&self) -> Option<CandidateBitfield> {
self.candidates_pending_signature
.keys()
.map(|val| *val)
.collect_vec()
.try_into()
.ok()
}
/// Returns a list of candidates hashes that need need signature created at the current tick:
/// This might happen in other of the two reasons:
/// 1. We queued more than max_approval_coalesce_count candidates.
/// 2. We have candidates that waiting in the queue past their `sign_no_later_than_tick`
///
/// Additionally, we also return the first tick when we will have to create a signature,
/// so that the caller can arm the timer if it is not already armed.
pub fn get_candidates_that_need_signature(
&self,
tick_now: Tick,
max_approval_coalesce_count: u32,
) -> (Option<(Vec<CandidateHash>, CandidateBitfield)>, Option<Tick>) {
let sign_no_later_than_tick = self
.candidates_pending_signature
.values()
.min_by(|a, b| a.sign_no_later_than_tick.cmp(&b.sign_no_later_than_tick))
.map(|val| val.sign_no_later_than_tick);
if let Some(sign_no_later_than_tick) = sign_no_later_than_tick {
if sign_no_later_than_tick <= tick_now ||
self.num_candidates_pending_signature() >= max_approval_coalesce_count as usize
{
(
self.candidate_indices_pending_signature().and_then(|candidate_indices| {
Some((self.candidate_hashes_pending_signature(), candidate_indices))
}),
Some(sign_no_later_than_tick),
)
} else {
// We can still wait for other candidates to queue in, so just make sure
// we wake up at the tick we have to sign the longest waiting candidate.
(Default::default(), Some(sign_no_later_than_tick))
}
} else {
// No cached candidates, nothing to do here, this just means the timer fired,
// but the signatures were already sent because we gathered more than
// max_approval_coalesce_count.
(Default::default(), sign_no_later_than_tick)
}
}
/// Clears the candidates pending signature because the approval was issued.
pub fn issued_approval(&mut self) {
self.candidates_pending_signature.clear();
}
}
impl From<crate::approval_db::v3::BlockEntry> for BlockEntry {
fn from(entry: crate::approval_db::v3::BlockEntry) -> Self {
BlockEntry {
block_hash: entry.block_hash,
parent_hash: entry.parent_hash,
block_number: entry.block_number,
session: entry.session,
slot: entry.slot,
relay_vrf_story: RelayVRFStory(entry.relay_vrf_story),
candidates: entry.candidates,
approved_bitfield: entry.approved_bitfield,
children: entry.children,
candidates_pending_signature: entry
.candidates_pending_signature
.into_iter()
.map(|(candidate_index, signing_context)| (candidate_index, signing_context.into()))
.collect(),
distributed_assignments: entry.distributed_assignments,
}
}
}
impl From<crate::approval_db::v1::BlockEntry> for BlockEntry {
fn from(entry: crate::approval_db::v1::BlockEntry) -> Self {
BlockEntry {
block_hash: entry.block_hash,
parent_hash: entry.parent_hash,
block_number: entry.block_number,
session: entry.session,
slot: entry.slot,
relay_vrf_story: RelayVRFStory(entry.relay_vrf_story),
candidates: entry.candidates,
approved_bitfield: entry.approved_bitfield,
children: entry.children,
distributed_assignments: Default::default(),
candidates_pending_signature: Default::default(),
}
}
}
impl From<crate::approval_db::v2::BlockEntry> for BlockEntry {
fn from(entry: crate::approval_db::v2::BlockEntry) -> Self {
BlockEntry {
block_hash: entry.block_hash,
parent_hash: entry.parent_hash,
block_number: entry.block_number,
session: entry.session,
slot: entry.slot,
relay_vrf_story: RelayVRFStory(entry.relay_vrf_story),
candidates: entry.candidates,
approved_bitfield: entry.approved_bitfield,
children: entry.children,
distributed_assignments: entry.distributed_assignments,
candidates_pending_signature: Default::default(),
}
}
}
impl From<BlockEntry> for crate::approval_db::v3::BlockEntry {
fn from(entry: BlockEntry) -> Self {
Self {
block_hash: entry.block_hash,
parent_hash: entry.parent_hash,
block_number: entry.block_number,
session: entry.session,
slot: entry.slot,
relay_vrf_story: entry.relay_vrf_story.0,
candidates: entry.candidates,
approved_bitfield: entry.approved_bitfield,
children: entry.children,
candidates_pending_signature: entry
.candidates_pending_signature
.into_iter()
.map(|(candidate_index, signing_context)| (candidate_index, signing_context.into()))
.collect(),
distributed_assignments: entry.distributed_assignments,
}
}
}
impl From<crate::approval_db::v3::CandidateSigningContext> for CandidateSigningContext {
fn from(signing_context: crate::approval_db::v3::CandidateSigningContext) -> Self {
Self {
candidate_hash: signing_context.candidate_hash,
sign_no_later_than_tick: signing_context.sign_no_later_than_tick.into(),
}
}
}
impl From<CandidateSigningContext> for crate::approval_db::v3::CandidateSigningContext {
fn from(signing_context: CandidateSigningContext) -> Self {
Self {
candidate_hash: signing_context.candidate_hash,
sign_no_later_than_tick: signing_context.sign_no_later_than_tick.into(),
}
}
}
File diff suppressed because it is too large Load Diff