Light GRANDPA import handler (#1669)

* GrandpaLightBlockImport

* extract authorities in AuraVerifier

* post-merge fix

* restore authorities cache

* license

* new finality proof draft

* generalized PendingJustifications

* finality proof messages

* fixed compilation

* pass verifier to import_finality_proof

* do not fetch remote proof from light import directly

* FinalityProofProvider

* fixed authorities cache test

* restored finality proof tests

* finality_proof docs

* use DB backend in test client

* justification_is_fetched_by_light_client_when_consensus_data_changes

* restore justification_is_fetched_by_light_client_when_consensus_data_changes

* some more tests

* added authorities-related TODO

* removed unneeded clear_finality_proof_requests field

* truncated some long lines

* more granular light import tests

* only provide finality proof if it is generated by the requested set

* post-merge fix

* finality_proof_is_none_if_first_justification_is_generated_by_unknown_set

* make light+grandpa test rely on finality proofs (instead of simple justifications)

* empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different

* missing trait method impl

* fixed proof-of-finality docs

* one more doc fix

* fix docs

* initialize authorities cache (post-merge fix)

* fixed cache initialization (post-merge fix)

* post-fix merge: fix light + GRANDPA tests (bad way)

* proper fix of empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different

* fixed easy grumbles

* import finality proofs in BlockImportWorker thread

* allow import of finality proofs for non-requested blocks

* limit number of fragments in finality proof

* GRANDPA post-merge fix

* BABE: pos-merge fix
This commit is contained in:
Svyatoslav Nikolsky
2019-05-13 12:36:52 +03:00
committed by Gavin Wood
parent 258f0835e4
commit 22586113ea
36 changed files with 3320 additions and 803 deletions
+32 -16
View File
@@ -46,7 +46,7 @@ use log::warn;
use client::error::{Error as ClientError, Result as ClientResult};
use runtime_primitives::traits::{Block as BlockT, NumberFor, As, Zero};
use crate::cache::{CacheItemT, ComplexBlockId};
use crate::cache::{CacheItemT, ComplexBlockId, EntryType};
use crate::cache::list_entry::{Entry, StorageEntry};
use crate::cache::list_storage::{Storage, StorageTransaction, Metadata};
@@ -174,10 +174,10 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
parent: ComplexBlockId<Block>,
block: ComplexBlockId<Block>,
value: Option<T>,
is_final: bool,
entry_type: EntryType,
) -> ClientResult<Option<CommitOperation<Block, T>>> {
// this guarantee is currently provided by LightStorage && we're relying on it here
debug_assert!(!is_final || self.best_finalized_block.hash == parent.hash);
debug_assert!(entry_type != EntryType::Final || self.best_finalized_block.hash == parent.hash);
// we do not store any values behind finalized
if block.number != Zero::zero() && self.best_finalized_block.number >= block.number {
@@ -185,6 +185,7 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
}
// if the block is not final, it is possibly appended to/forking from existing unfinalized fork
let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis;
if !is_final {
let mut fork_and_action = None;
@@ -831,12 +832,27 @@ pub mod tests {
#[test]
fn list_on_block_insert_works() {
let nfin = EntryType::NonFinal;
let fin = EntryType::Final;
// when trying to insert block < finalized number
assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100))
.on_block_insert(&mut DummyTransaction::new(), test_id(49), test_id(50), Some(50), false).unwrap().is_none());
.on_block_insert(
&mut DummyTransaction::new(),
test_id(49),
test_id(50),
Some(50),
nfin,
).unwrap().is_none());
// when trying to insert block @ finalized number
assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100))
.on_block_insert(&mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), false).unwrap().is_none());
.on_block_insert(
&mut DummyTransaction::new(),
test_id(99),
test_id(100),
Some(100),
nfin,
).unwrap().is_none());
// when trying to insert non-final block AND it appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
@@ -848,7 +864,7 @@ pub mod tests {
);
cache.unfinalized[0].best_block = Some(test_id(4));
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), false).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin).unwrap(),
Some(CommitOperation::AppendNewBlock(0, test_id(5))));
assert!(tx.inserted_entries().is_empty());
assert!(tx.removed_entries().is_empty());
@@ -856,7 +872,7 @@ pub mod tests {
// when trying to insert non-final block AND it appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), false).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin).unwrap(),
Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: Some(5) })));
assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect());
assert!(tx.removed_entries().is_empty());
@@ -872,7 +888,7 @@ pub mod tests {
1024, test_id(2)
);
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), false).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), nfin).unwrap(),
Some(CommitOperation::AppendNewBlock(0, correct_id(5))));
assert!(tx.inserted_entries().is_empty());
assert!(tx.removed_entries().is_empty());
@@ -880,7 +896,7 @@ pub mod tests {
// when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), false).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), nfin).unwrap(),
Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: Some(5) })));
assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect());
assert!(tx.removed_entries().is_empty());
@@ -898,7 +914,7 @@ pub mod tests {
1024, correct_id(2)
);
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), false).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin).unwrap(),
Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: Some(14) })));
assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect());
assert!(tx.removed_entries().is_empty());
@@ -913,7 +929,7 @@ pub mod tests {
1024, correct_id(2)
);
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), false).unwrap(), None);
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin).unwrap(), None);
assert!(tx.inserted_entries().is_empty());
assert!(tx.removed_entries().is_empty());
assert!(tx.updated_meta().is_none());
@@ -926,7 +942,7 @@ pub mod tests {
1024, correct_id(2)
);
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), false).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin).unwrap(),
Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: Some(3) })));
assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
assert!(tx.removed_entries().is_empty());
@@ -935,7 +951,7 @@ pub mod tests {
// when inserting finalized entry AND there are no previous finalized entries
let cache = ListCache::new(DummyStorage::new(), 1024, correct_id(2));
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(),
Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default())));
assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
assert!(tx.removed_entries().is_empty());
@@ -948,14 +964,14 @@ pub mod tests {
1024, correct_id(2)
);
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(),
Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())));
assert!(tx.inserted_entries().is_empty());
assert!(tx.removed_entries().is_empty());
assert!(tx.updated_meta().is_none());
// when inserting finalized entry AND value differs from previous finalized
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(),
Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default())));
assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
assert!(tx.removed_entries().is_empty());
@@ -970,7 +986,7 @@ pub mod tests {
1024, correct_id(2)
);
let mut tx = DummyTransaction::new();
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(),
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(),
Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())));
}
+46 -6
View File
@@ -25,9 +25,9 @@ use client::blockchain::Cache as BlockchainCache;
use client::error::Result as ClientResult;
use parity_codec::{Encode, Decode};
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As, Zero};
use consensus_common::well_known_cache_keys::Id as CacheKeyId;
use crate::utils::{self, COLUMN_META};
use crate::utils::{self, COLUMN_META, db_err};
use self::list_cache::ListCache;
@@ -38,6 +38,17 @@ mod list_storage;
/// Minimal post-finalization age age of finalized blocks before they'll pruned.
const PRUNE_DEPTH: u64 = 1024;
/// The type of entry that is inserted to the cache.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EntryType {
/// Non-final entry.
NonFinal,
/// Final entry.
Final,
/// Genesis entry (inserted during cache initialization).
Genesis,
}
/// Block identifier that holds both hash and number.
#[derive(Clone, Debug, Encode, Decode, PartialEq)]
pub struct ComplexBlockId<Block: BlockT> {
@@ -70,6 +81,7 @@ pub struct DbCache<Block: BlockT> {
key_lookup_column: Option<u32>,
header_column: Option<u32>,
authorities_column: Option<u32>,
genesis_hash: Block::Hash,
best_finalized_block: ComplexBlockId<Block>,
}
@@ -80,6 +92,7 @@ impl<Block: BlockT> DbCache<Block> {
key_lookup_column: Option<u32>,
header_column: Option<u32>,
authorities_column: Option<u32>,
genesis_hash: Block::Hash,
best_finalized_block: ComplexBlockId<Block>,
) -> Self {
Self {
@@ -88,10 +101,16 @@ impl<Block: BlockT> DbCache<Block> {
key_lookup_column,
header_column,
authorities_column,
genesis_hash,
best_finalized_block,
}
}
/// Set genesis block hash.
pub fn set_genesis_hash(&mut self, genesis_hash: Block::Hash) {
self.genesis_hash = genesis_hash;
}
/// Begin cache transaction.
pub fn transaction<'a>(&'a mut self, tx: &'a mut DBTransaction) -> DbCacheTransaction<'a, Block> {
DbCacheTransaction {
@@ -182,7 +201,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
parent: ComplexBlockId<Block>,
block: ComplexBlockId<Block>,
data_at: HashMap<CacheKeyId, Vec<u8>>,
is_final: bool,
entry_type: EntryType,
) -> ClientResult<Self> {
assert!(self.cache_at_op.is_empty());
@@ -203,7 +222,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
parent.clone(),
block.clone(),
value.or(cache.value_at_block(&parent)?),
is_final,
entry_type,
)?;
if let Some(op) = op {
self.cache_at_op.insert(name, op);
@@ -214,8 +233,10 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
data_at.into_iter().try_for_each(|(name, data)| insert_op(name, Some(data)))?;
missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?;
if is_final {
self.best_finalized_block = Some(block);
match entry_type {
EntryType::Final | EntryType::Genesis =>
self.best_finalized_block = Some(block),
EntryType::NonFinal => (),
}
Ok(self)
@@ -254,6 +275,25 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
pub struct DbCacheSync<Block: BlockT>(pub RwLock<DbCache<Block>>);
impl<Block: BlockT> BlockchainCache<Block> for DbCacheSync<Block> {
fn initialize(&self, key: &CacheKeyId, data: Vec<u8>) -> ClientResult<()> {
let mut cache = self.0.write();
let genesis_hash = cache.genesis_hash;
let cache_contents = vec![(*key, data)].into_iter().collect();
let db = cache.db.clone();
let mut dbtx = DBTransaction::new();
let tx = cache.transaction(&mut dbtx);
let tx = tx.on_block_insert(
ComplexBlockId::new(Default::default(), Zero::zero()),
ComplexBlockId::new(genesis_hash, Zero::zero()),
cache_contents,
EntryType::Genesis,
)?;
let tx_ops = tx.into_ops();
db.write(dbtx).map_err(db_err)?;
cache.commit(tx_ops);
Ok(())
}
fn get_at(&self, key: &CacheKeyId, at: &BlockId<Block>) -> Option<Vec<u8>> {
let cache = self.0.read();
let storage = cache.cache_at.get(key)?.storage();
+24 -2
View File
@@ -34,7 +34,7 @@ use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT,
Zero, One, As, NumberFor, Digest, DigestItem};
use consensus_common::well_known_cache_keys;
use crate::cache::{DbCacheSync, DbCache, ComplexBlockId};
use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType};
use crate::utils::{self, meta_keys, Meta, db_err, open_database,
read_db, block_id_to_lookup_key, read_meta};
use crate::DatabaseSettings;
@@ -91,6 +91,7 @@ impl<Block> LightStorage<Block>
columns::KEY_LOOKUP,
columns::HEADER,
columns::CACHE,
meta.genesis_hash,
ComplexBlockId::new(meta.finalized_hash, meta.finalized_number),
);
@@ -406,6 +407,7 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
let is_genesis = number.is_zero();
if is_genesis {
self.cache.0.write().set_genesis_hash(hash);
transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
}
@@ -434,7 +436,7 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }),
ComplexBlockId::new(hash, number),
cache_at,
finalized,
if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal },
)?
.into_ops();
@@ -1040,4 +1042,24 @@ pub(crate) mod tests {
// leaves at same height stay. Leaves at lower heights pruned.
assert_eq!(db.leaves.read().hashes(), vec![block2_a, block2_b, block2_c]);
}
#[test]
fn cache_can_be_initialized_after_genesis_inserted() {
let db = LightStorage::<Block>::new_test();
// before cache is initialized => None
assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None);
// insert genesis block (no value for cache is provided)
insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0));
// after genesis is inserted => None
assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None);
// initialize cache
db.cache().initialize(b"test", vec![42]).unwrap();
// after genesis is inserted + cache is initialized => Some
assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some(vec![42]));
}
}