mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-06 02:08:02 +00:00
Light GRANDPA import handler (#1669)
* GrandpaLightBlockImport * extract authorities in AuraVerifier * post-merge fix * restore authorities cache * license * new finality proof draft * generalized PendingJustifications * finality proof messages * fixed compilation * pass verifier to import_finality_proof * do not fetch remote proof from light import directly * FinalityProofProvider * fixed authorities cache test * restored finality proof tests * finality_proof docs * use DB backend in test client * justification_is_fetched_by_light_client_when_consensus_data_changes * restore justification_is_fetched_by_light_client_when_consensus_data_changes * some more tests * added authorities-related TODO * removed unneeded clear_finality_proof_requests field * truncated some long lines * more granular light import tests * only provide finality proof if it is generated by the requested set * post-merge fix * finality_proof_is_none_if_first_justification_is_generated_by_unknown_set * make light+grandpa test rely on finality proofs (instead of simple justifications) * empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different * missing trait method impl * fixed proof-of-finality docs * one more doc fix * fix docs * initialize authorities cache (post-merge fix) * fixed cache initialization (post-merge fix) * post-fix merge: fix light + GRANDPA tests (bad way) * proper fix of empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different * fixed easy grumbles * import finality proofs in BlockImportWorker thread * allow import of finality proofs for non-requested blocks * limit number of fragments in finality proof * GRANDPA post-merge fix * BABE: pos-merge fix
This commit is contained in:
committed by
Gavin Wood
parent
258f0835e4
commit
22586113ea
+32
-16
@@ -46,7 +46,7 @@ use log::warn;
|
||||
use client::error::{Error as ClientError, Result as ClientResult};
|
||||
use runtime_primitives::traits::{Block as BlockT, NumberFor, As, Zero};
|
||||
|
||||
use crate::cache::{CacheItemT, ComplexBlockId};
|
||||
use crate::cache::{CacheItemT, ComplexBlockId, EntryType};
|
||||
use crate::cache::list_entry::{Entry, StorageEntry};
|
||||
use crate::cache::list_storage::{Storage, StorageTransaction, Metadata};
|
||||
|
||||
@@ -174,10 +174,10 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
|
||||
parent: ComplexBlockId<Block>,
|
||||
block: ComplexBlockId<Block>,
|
||||
value: Option<T>,
|
||||
is_final: bool,
|
||||
entry_type: EntryType,
|
||||
) -> ClientResult<Option<CommitOperation<Block, T>>> {
|
||||
// this guarantee is currently provided by LightStorage && we're relying on it here
|
||||
debug_assert!(!is_final || self.best_finalized_block.hash == parent.hash);
|
||||
debug_assert!(entry_type != EntryType::Final || self.best_finalized_block.hash == parent.hash);
|
||||
|
||||
// we do not store any values behind finalized
|
||||
if block.number != Zero::zero() && self.best_finalized_block.number >= block.number {
|
||||
@@ -185,6 +185,7 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
|
||||
}
|
||||
|
||||
// if the block is not final, it is possibly appended to/forking from existing unfinalized fork
|
||||
let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis;
|
||||
if !is_final {
|
||||
let mut fork_and_action = None;
|
||||
|
||||
@@ -831,12 +832,27 @@ pub mod tests {
|
||||
|
||||
#[test]
|
||||
fn list_on_block_insert_works() {
|
||||
let nfin = EntryType::NonFinal;
|
||||
let fin = EntryType::Final;
|
||||
|
||||
// when trying to insert block < finalized number
|
||||
assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100))
|
||||
.on_block_insert(&mut DummyTransaction::new(), test_id(49), test_id(50), Some(50), false).unwrap().is_none());
|
||||
.on_block_insert(
|
||||
&mut DummyTransaction::new(),
|
||||
test_id(49),
|
||||
test_id(50),
|
||||
Some(50),
|
||||
nfin,
|
||||
).unwrap().is_none());
|
||||
// when trying to insert block @ finalized number
|
||||
assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100))
|
||||
.on_block_insert(&mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), false).unwrap().is_none());
|
||||
.on_block_insert(
|
||||
&mut DummyTransaction::new(),
|
||||
test_id(99),
|
||||
test_id(100),
|
||||
Some(100),
|
||||
nfin,
|
||||
).unwrap().is_none());
|
||||
|
||||
// when trying to insert non-final block AND it appends to the best block of unfinalized fork
|
||||
// AND new value is the same as in the fork' best block
|
||||
@@ -848,7 +864,7 @@ pub mod tests {
|
||||
);
|
||||
cache.unfinalized[0].best_block = Some(test_id(4));
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), false).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin).unwrap(),
|
||||
Some(CommitOperation::AppendNewBlock(0, test_id(5))));
|
||||
assert!(tx.inserted_entries().is_empty());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
@@ -856,7 +872,7 @@ pub mod tests {
|
||||
// when trying to insert non-final block AND it appends to the best block of unfinalized fork
|
||||
// AND new value is the same as in the fork' best block
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), false).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin).unwrap(),
|
||||
Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: Some(5) })));
|
||||
assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
@@ -872,7 +888,7 @@ pub mod tests {
|
||||
1024, test_id(2)
|
||||
);
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), false).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), nfin).unwrap(),
|
||||
Some(CommitOperation::AppendNewBlock(0, correct_id(5))));
|
||||
assert!(tx.inserted_entries().is_empty());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
@@ -880,7 +896,7 @@ pub mod tests {
|
||||
// when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork
|
||||
// AND new value is the same as in the fork' best block
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), false).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), nfin).unwrap(),
|
||||
Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: Some(5) })));
|
||||
assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
@@ -898,7 +914,7 @@ pub mod tests {
|
||||
1024, correct_id(2)
|
||||
);
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), false).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin).unwrap(),
|
||||
Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: Some(14) })));
|
||||
assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
@@ -913,7 +929,7 @@ pub mod tests {
|
||||
1024, correct_id(2)
|
||||
);
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), false).unwrap(), None);
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin).unwrap(), None);
|
||||
assert!(tx.inserted_entries().is_empty());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
assert!(tx.updated_meta().is_none());
|
||||
@@ -926,7 +942,7 @@ pub mod tests {
|
||||
1024, correct_id(2)
|
||||
);
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), false).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin).unwrap(),
|
||||
Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: Some(3) })));
|
||||
assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
@@ -935,7 +951,7 @@ pub mod tests {
|
||||
// when inserting finalized entry AND there are no previous finalized entries
|
||||
let cache = ListCache::new(DummyStorage::new(), 1024, correct_id(2));
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(),
|
||||
Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default())));
|
||||
assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
@@ -948,14 +964,14 @@ pub mod tests {
|
||||
1024, correct_id(2)
|
||||
);
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(),
|
||||
Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())));
|
||||
assert!(tx.inserted_entries().is_empty());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
assert!(tx.updated_meta().is_none());
|
||||
// when inserting finalized entry AND value differs from previous finalized
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(),
|
||||
Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default())));
|
||||
assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect());
|
||||
assert!(tx.removed_entries().is_empty());
|
||||
@@ -970,7 +986,7 @@ pub mod tests {
|
||||
1024, correct_id(2)
|
||||
);
|
||||
let mut tx = DummyTransaction::new();
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(),
|
||||
assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(),
|
||||
Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())));
|
||||
}
|
||||
|
||||
|
||||
+46
-6
@@ -25,9 +25,9 @@ use client::blockchain::Cache as BlockchainCache;
|
||||
use client::error::Result as ClientResult;
|
||||
use parity_codec::{Encode, Decode};
|
||||
use runtime_primitives::generic::BlockId;
|
||||
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As};
|
||||
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As, Zero};
|
||||
use consensus_common::well_known_cache_keys::Id as CacheKeyId;
|
||||
use crate::utils::{self, COLUMN_META};
|
||||
use crate::utils::{self, COLUMN_META, db_err};
|
||||
|
||||
use self::list_cache::ListCache;
|
||||
|
||||
@@ -38,6 +38,17 @@ mod list_storage;
|
||||
/// Minimal post-finalization age age of finalized blocks before they'll pruned.
|
||||
const PRUNE_DEPTH: u64 = 1024;
|
||||
|
||||
/// The type of entry that is inserted to the cache.
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum EntryType {
|
||||
/// Non-final entry.
|
||||
NonFinal,
|
||||
/// Final entry.
|
||||
Final,
|
||||
/// Genesis entry (inserted during cache initialization).
|
||||
Genesis,
|
||||
}
|
||||
|
||||
/// Block identifier that holds both hash and number.
|
||||
#[derive(Clone, Debug, Encode, Decode, PartialEq)]
|
||||
pub struct ComplexBlockId<Block: BlockT> {
|
||||
@@ -70,6 +81,7 @@ pub struct DbCache<Block: BlockT> {
|
||||
key_lookup_column: Option<u32>,
|
||||
header_column: Option<u32>,
|
||||
authorities_column: Option<u32>,
|
||||
genesis_hash: Block::Hash,
|
||||
best_finalized_block: ComplexBlockId<Block>,
|
||||
}
|
||||
|
||||
@@ -80,6 +92,7 @@ impl<Block: BlockT> DbCache<Block> {
|
||||
key_lookup_column: Option<u32>,
|
||||
header_column: Option<u32>,
|
||||
authorities_column: Option<u32>,
|
||||
genesis_hash: Block::Hash,
|
||||
best_finalized_block: ComplexBlockId<Block>,
|
||||
) -> Self {
|
||||
Self {
|
||||
@@ -88,10 +101,16 @@ impl<Block: BlockT> DbCache<Block> {
|
||||
key_lookup_column,
|
||||
header_column,
|
||||
authorities_column,
|
||||
genesis_hash,
|
||||
best_finalized_block,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set genesis block hash.
|
||||
pub fn set_genesis_hash(&mut self, genesis_hash: Block::Hash) {
|
||||
self.genesis_hash = genesis_hash;
|
||||
}
|
||||
|
||||
/// Begin cache transaction.
|
||||
pub fn transaction<'a>(&'a mut self, tx: &'a mut DBTransaction) -> DbCacheTransaction<'a, Block> {
|
||||
DbCacheTransaction {
|
||||
@@ -182,7 +201,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
|
||||
parent: ComplexBlockId<Block>,
|
||||
block: ComplexBlockId<Block>,
|
||||
data_at: HashMap<CacheKeyId, Vec<u8>>,
|
||||
is_final: bool,
|
||||
entry_type: EntryType,
|
||||
) -> ClientResult<Self> {
|
||||
assert!(self.cache_at_op.is_empty());
|
||||
|
||||
@@ -203,7 +222,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
|
||||
parent.clone(),
|
||||
block.clone(),
|
||||
value.or(cache.value_at_block(&parent)?),
|
||||
is_final,
|
||||
entry_type,
|
||||
)?;
|
||||
if let Some(op) = op {
|
||||
self.cache_at_op.insert(name, op);
|
||||
@@ -214,8 +233,10 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
|
||||
data_at.into_iter().try_for_each(|(name, data)| insert_op(name, Some(data)))?;
|
||||
missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?;
|
||||
|
||||
if is_final {
|
||||
self.best_finalized_block = Some(block);
|
||||
match entry_type {
|
||||
EntryType::Final | EntryType::Genesis =>
|
||||
self.best_finalized_block = Some(block),
|
||||
EntryType::NonFinal => (),
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
@@ -254,6 +275,25 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> {
|
||||
pub struct DbCacheSync<Block: BlockT>(pub RwLock<DbCache<Block>>);
|
||||
|
||||
impl<Block: BlockT> BlockchainCache<Block> for DbCacheSync<Block> {
|
||||
fn initialize(&self, key: &CacheKeyId, data: Vec<u8>) -> ClientResult<()> {
|
||||
let mut cache = self.0.write();
|
||||
let genesis_hash = cache.genesis_hash;
|
||||
let cache_contents = vec![(*key, data)].into_iter().collect();
|
||||
let db = cache.db.clone();
|
||||
let mut dbtx = DBTransaction::new();
|
||||
let tx = cache.transaction(&mut dbtx);
|
||||
let tx = tx.on_block_insert(
|
||||
ComplexBlockId::new(Default::default(), Zero::zero()),
|
||||
ComplexBlockId::new(genesis_hash, Zero::zero()),
|
||||
cache_contents,
|
||||
EntryType::Genesis,
|
||||
)?;
|
||||
let tx_ops = tx.into_ops();
|
||||
db.write(dbtx).map_err(db_err)?;
|
||||
cache.commit(tx_ops);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_at(&self, key: &CacheKeyId, at: &BlockId<Block>) -> Option<Vec<u8>> {
|
||||
let cache = self.0.read();
|
||||
let storage = cache.cache_at.get(key)?.storage();
|
||||
|
||||
@@ -34,7 +34,7 @@ use runtime_primitives::generic::BlockId;
|
||||
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT,
|
||||
Zero, One, As, NumberFor, Digest, DigestItem};
|
||||
use consensus_common::well_known_cache_keys;
|
||||
use crate::cache::{DbCacheSync, DbCache, ComplexBlockId};
|
||||
use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType};
|
||||
use crate::utils::{self, meta_keys, Meta, db_err, open_database,
|
||||
read_db, block_id_to_lookup_key, read_meta};
|
||||
use crate::DatabaseSettings;
|
||||
@@ -91,6 +91,7 @@ impl<Block> LightStorage<Block>
|
||||
columns::KEY_LOOKUP,
|
||||
columns::HEADER,
|
||||
columns::CACHE,
|
||||
meta.genesis_hash,
|
||||
ComplexBlockId::new(meta.finalized_hash, meta.finalized_number),
|
||||
);
|
||||
|
||||
@@ -406,6 +407,7 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
|
||||
|
||||
let is_genesis = number.is_zero();
|
||||
if is_genesis {
|
||||
self.cache.0.write().set_genesis_hash(hash);
|
||||
transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
|
||||
}
|
||||
|
||||
@@ -434,7 +436,7 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
|
||||
ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }),
|
||||
ComplexBlockId::new(hash, number),
|
||||
cache_at,
|
||||
finalized,
|
||||
if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal },
|
||||
)?
|
||||
.into_ops();
|
||||
|
||||
@@ -1040,4 +1042,24 @@ pub(crate) mod tests {
|
||||
// leaves at same height stay. Leaves at lower heights pruned.
|
||||
assert_eq!(db.leaves.read().hashes(), vec![block2_a, block2_b, block2_c]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cache_can_be_initialized_after_genesis_inserted() {
|
||||
let db = LightStorage::<Block>::new_test();
|
||||
|
||||
// before cache is initialized => None
|
||||
assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None);
|
||||
|
||||
// insert genesis block (no value for cache is provided)
|
||||
insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0));
|
||||
|
||||
// after genesis is inserted => None
|
||||
assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None);
|
||||
|
||||
// initialize cache
|
||||
db.cache().initialize(b"test", vec![42]).unwrap();
|
||||
|
||||
// after genesis is inserted + cache is initialized => Some
|
||||
assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some(vec![42]));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,6 +100,11 @@ pub trait ProvideCache<Block: BlockT> {
|
||||
|
||||
/// Blockchain optional data cache.
|
||||
pub trait Cache<Block: BlockT>: Send + Sync {
|
||||
/// Initialize genesis value for the given cache.
|
||||
///
|
||||
/// The operation should be performed once before anything else is inserted in the cache.
|
||||
/// Otherwise cache may end up in inconsistent state.
|
||||
fn initialize(&self, key: &well_known_cache_keys::Id, value_at_genesis: Vec<u8>) -> Result<()>;
|
||||
/// Returns cached value by the given key.
|
||||
fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId<Block>) -> Option<Vec<u8>>;
|
||||
}
|
||||
|
||||
@@ -55,7 +55,10 @@ pub enum Error {
|
||||
/// Genesis config is invalid.
|
||||
#[display(fmt = "Genesis config provided is invalid")]
|
||||
GenesisInvalid,
|
||||
/// Bad justification for header.
|
||||
/// Error decoding header justification.
|
||||
#[display(fmt = "error decoding justification for header")]
|
||||
JustificationDecode,
|
||||
/// Justification for header is correctly encoded, but invalid.
|
||||
#[display(fmt = "bad justification for header: {}", _0)]
|
||||
BadJustification(String),
|
||||
/// Not available on light client.
|
||||
|
||||
@@ -64,7 +64,7 @@ pub use crate::client::{
|
||||
#[cfg(feature = "std")]
|
||||
pub use crate::notifications::{StorageEventStream, StorageChangeSet};
|
||||
#[cfg(feature = "std")]
|
||||
pub use state_machine::ExecutionStrategy;
|
||||
pub use state_machine::{ExecutionStrategy, NeverOffchainExt};
|
||||
#[cfg(feature = "std")]
|
||||
pub use crate::leaves::LeafSet;
|
||||
|
||||
|
||||
@@ -33,7 +33,10 @@ use consensus_common::{self, Authorities, BlockImport, Environment, Proposer,
|
||||
ForkChoiceStrategy, ImportBlock, BlockOrigin, Error as ConsensusError,
|
||||
SelectChain, well_known_cache_keys
|
||||
};
|
||||
use consensus_common::import_queue::{Verifier, BasicQueue, SharedBlockImport, SharedJustificationImport};
|
||||
use consensus_common::import_queue::{
|
||||
Verifier, BasicQueue, SharedBlockImport, SharedJustificationImport, SharedFinalityProofImport,
|
||||
SharedFinalityProofRequestBuilder,
|
||||
};
|
||||
use client::{
|
||||
block_builder::api::BlockBuilder as BlockBuilderApi,
|
||||
blockchain::ProvideCache,
|
||||
@@ -44,7 +47,7 @@ use client::{
|
||||
use aura_primitives::AURA_ENGINE_ID;
|
||||
use runtime_primitives::{generic, generic::BlockId, Justification};
|
||||
use runtime_primitives::traits::{
|
||||
Block, Header, Digest, DigestItemFor, DigestItem, ProvideRuntimeApi, AuthorityIdFor,
|
||||
Block, Header, Digest, DigestItemFor, DigestItem, ProvideRuntimeApi, AuthorityIdFor, Zero,
|
||||
};
|
||||
use primitives::Pair;
|
||||
use inherents::{InherentDataProviders, InherentData, RuntimeString};
|
||||
@@ -653,6 +656,10 @@ impl<B: Block, C, E, P> Verifier<B> for AuraVerifier<C, E, P> where
|
||||
|
||||
extra_verification.into_future().wait()?;
|
||||
|
||||
let new_authorities = pre_header.digest()
|
||||
.log(DigestItem::as_authorities_change)
|
||||
.map(|digest| digest.to_vec());
|
||||
|
||||
let import_block = ImportBlock {
|
||||
origin,
|
||||
header: pre_header,
|
||||
@@ -664,8 +671,7 @@ impl<B: Block, C, E, P> Verifier<B> for AuraVerifier<C, E, P> where
|
||||
fork_choice: ForkChoiceStrategy::LongestChain,
|
||||
};
|
||||
|
||||
// FIXME #1019 extract authorities
|
||||
Ok((import_block, None))
|
||||
Ok((import_block, new_authorities))
|
||||
}
|
||||
CheckedHeader::Deferred(a, b) => {
|
||||
debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b);
|
||||
@@ -690,6 +696,38 @@ impl<B, C, E, P> Authorities<B> for AuraVerifier<C, E, P> where
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize_authorities_cache<B, C>(client: &C) -> Result<(), ConsensusError> where
|
||||
B: Block,
|
||||
C: ProvideRuntimeApi + ProvideCache<B>,
|
||||
C::Api: AuthoritiesApi<B>,
|
||||
{
|
||||
// no cache => no initialization
|
||||
let cache = match client.cache() {
|
||||
Some(cache) => cache,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// check if we already have initialized the cache
|
||||
let genesis_id = BlockId::Number(Zero::zero());
|
||||
let genesis_authorities: Option<Vec<AuthorityIdFor<B>>> = cache
|
||||
.get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id)
|
||||
.and_then(|v| Decode::decode(&mut &v[..]));
|
||||
if genesis_authorities.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let map_err = |error| consensus_common::Error::from(consensus_common::ErrorKind::ClientImport(
|
||||
format!(
|
||||
"Error initializing authorities cache: {}",
|
||||
error,
|
||||
)));
|
||||
let genesis_authorities = authorities(client, &genesis_id)?;
|
||||
cache.initialize(&well_known_cache_keys::AUTHORITIES, genesis_authorities.encode())
|
||||
.map_err(map_err)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
fn authorities<B, C>(client: &C, at: &BlockId<B>) -> Result<Vec<AuthorityIdFor<B>>, ConsensusError> where
|
||||
B: Block,
|
||||
@@ -731,6 +769,8 @@ pub fn import_queue<B, C, E, P>(
|
||||
slot_duration: SlotDuration,
|
||||
block_import: SharedBlockImport<B>,
|
||||
justification_import: Option<SharedJustificationImport<B>>,
|
||||
finality_proof_import: Option<SharedFinalityProofImport<B>>,
|
||||
finality_proof_request_builder: Option<SharedFinalityProofRequestBuilder<B>>,
|
||||
client: Arc<C>,
|
||||
extra: E,
|
||||
inherent_data_providers: InherentDataProviders,
|
||||
@@ -745,6 +785,7 @@ pub fn import_queue<B, C, E, P>(
|
||||
P::Signature: Encode + Decode,
|
||||
{
|
||||
register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?;
|
||||
initialize_authorities_cache(&*client)?;
|
||||
|
||||
let verifier = Arc::new(
|
||||
AuraVerifier {
|
||||
@@ -755,7 +796,13 @@ pub fn import_queue<B, C, E, P>(
|
||||
allow_old_seals: false,
|
||||
}
|
||||
);
|
||||
Ok(BasicQueue::new(verifier, block_import, justification_import))
|
||||
Ok(BasicQueue::new(
|
||||
verifier,
|
||||
block_import,
|
||||
justification_import,
|
||||
finality_proof_import,
|
||||
finality_proof_request_builder,
|
||||
))
|
||||
}
|
||||
|
||||
/// Start an import queue for the Aura consensus algorithm with backwards compatibility.
|
||||
@@ -767,6 +814,8 @@ pub fn import_queue_accept_old_seals<B, C, E, P>(
|
||||
slot_duration: SlotDuration,
|
||||
block_import: SharedBlockImport<B>,
|
||||
justification_import: Option<SharedJustificationImport<B>>,
|
||||
finality_proof_import: Option<SharedFinalityProofImport<B>>,
|
||||
finality_proof_request_builder: Option<SharedFinalityProofRequestBuilder<B>>,
|
||||
client: Arc<C>,
|
||||
extra: E,
|
||||
inherent_data_providers: InherentDataProviders,
|
||||
@@ -781,6 +830,7 @@ pub fn import_queue_accept_old_seals<B, C, E, P>(
|
||||
P::Signature: Encode + Decode,
|
||||
{
|
||||
register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?;
|
||||
initialize_authorities_cache(&*client)?;
|
||||
|
||||
let verifier = Arc::new(
|
||||
AuraVerifier {
|
||||
@@ -791,7 +841,13 @@ pub fn import_queue_accept_old_seals<B, C, E, P>(
|
||||
allow_old_seals: true,
|
||||
}
|
||||
);
|
||||
Ok(BasicQueue::new(verifier, block_import, justification_import))
|
||||
Ok(BasicQueue::new(
|
||||
verifier,
|
||||
block_import,
|
||||
justification_import,
|
||||
finality_proof_import,
|
||||
finality_proof_request_builder,
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -799,7 +855,7 @@ mod tests {
|
||||
use super::*;
|
||||
use consensus_common::NoNetwork as DummyOracle;
|
||||
use network::test::*;
|
||||
use network::test::{Block as TestBlock, PeersClient};
|
||||
use network::test::{Block as TestBlock, PeersClient, PeersFullClient};
|
||||
use runtime_primitives::traits::Block as BlockT;
|
||||
use network::config::ProtocolConfig;
|
||||
use parking_lot::Mutex;
|
||||
@@ -846,7 +902,7 @@ mod tests {
|
||||
|
||||
impl TestNetFactory for AuraTestNet {
|
||||
type Specialization = DummySpecialization;
|
||||
type Verifier = AuraVerifier<PeersClient, NothingExtra, sr25519::Pair>;
|
||||
type Verifier = AuraVerifier<PeersFullClient, NothingExtra, sr25519::Pair>;
|
||||
type PeerData = ();
|
||||
|
||||
/// Create new test network with peers and given config.
|
||||
@@ -857,25 +913,30 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn make_verifier(&self, client: Arc<PeersClient>, _cfg: &ProtocolConfig)
|
||||
fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig)
|
||||
-> Arc<Self::Verifier>
|
||||
{
|
||||
let slot_duration = SlotDuration::get_or_compute(&*client)
|
||||
.expect("slot duration available");
|
||||
let inherent_data_providers = InherentDataProviders::new();
|
||||
register_aura_inherent_data_provider(
|
||||
&inherent_data_providers,
|
||||
slot_duration.get()
|
||||
).expect("Registers aura inherent data provider");
|
||||
match client {
|
||||
PeersClient::Full(client) => {
|
||||
let slot_duration = SlotDuration::get_or_compute(&*client)
|
||||
.expect("slot duration available");
|
||||
let inherent_data_providers = InherentDataProviders::new();
|
||||
register_aura_inherent_data_provider(
|
||||
&inherent_data_providers,
|
||||
slot_duration.get()
|
||||
).expect("Registers aura inherent data provider");
|
||||
|
||||
assert_eq!(slot_duration.get(), SLOT_DURATION);
|
||||
Arc::new(AuraVerifier {
|
||||
client,
|
||||
extra: NothingExtra,
|
||||
inherent_data_providers,
|
||||
phantom: Default::default(),
|
||||
allow_old_seals: false,
|
||||
})
|
||||
assert_eq!(slot_duration.get(), SLOT_DURATION);
|
||||
Arc::new(AuraVerifier {
|
||||
client,
|
||||
extra: NothingExtra,
|
||||
inherent_data_providers,
|
||||
phantom: Default::default(),
|
||||
allow_old_seals: false,
|
||||
})
|
||||
},
|
||||
PeersClient::Light(_) => unreachable!("No (yet) tests for light client + Aura"),
|
||||
}
|
||||
}
|
||||
|
||||
fn peer(&self, i: usize) -> &Peer<Self::PeerData, DummySpecialization> {
|
||||
@@ -917,7 +978,7 @@ mod tests {
|
||||
|
||||
let mut runtime = current_thread::Runtime::new().unwrap();
|
||||
for (peer_id, key) in peers {
|
||||
let client = net.lock().peer(*peer_id).client().clone();
|
||||
let client = net.lock().peer(*peer_id).client().as_full().expect("full clients are created").clone();
|
||||
let select_chain = LongestChain::new(
|
||||
client.backend().clone(),
|
||||
client.import_lock().clone(),
|
||||
|
||||
@@ -914,7 +914,7 @@ mod tests {
|
||||
|
||||
impl TestNetFactory for BabeTestNet {
|
||||
type Specialization = DummySpecialization;
|
||||
type Verifier = BabeVerifier<PeersClient, NothingExtra>;
|
||||
type Verifier = BabeVerifier<PeersFullClient, NothingExtra>;
|
||||
type PeerData = ();
|
||||
|
||||
/// Create new test network with peers and given config.
|
||||
@@ -926,9 +926,10 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
fn make_verifier(&self, client: Arc<PeersClient>, _cfg: &ProtocolConfig)
|
||||
fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig)
|
||||
-> Arc<Self::Verifier>
|
||||
{
|
||||
let client = client.as_full().expect("only full clients are used in test");
|
||||
trace!(target: "babe", "Creating a verifier");
|
||||
let config = Config::get_or_compute(&*client)
|
||||
.expect("slot duration available");
|
||||
@@ -1001,7 +1002,7 @@ mod tests {
|
||||
debug!(target: "babe", "checkpoint 4");
|
||||
let mut runtime = current_thread::Runtime::new().unwrap();
|
||||
for (peer_id, key) in peers {
|
||||
let client = net.lock().peer(*peer_id).client().clone();
|
||||
let client = net.lock().peer(*peer_id).client().as_full().unwrap();
|
||||
let environ = Arc::new(DummyFactory(client.clone()));
|
||||
import_notifications.push(
|
||||
client.import_notification_stream()
|
||||
|
||||
@@ -22,6 +22,8 @@ use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use crate::well_known_cache_keys;
|
||||
|
||||
use crate::import_queue::Verifier;
|
||||
|
||||
/// Block import result.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ImportResult {
|
||||
@@ -44,6 +46,8 @@ pub struct ImportedAux {
|
||||
pub needs_justification: bool,
|
||||
/// Received a bad justification.
|
||||
pub bad_justification: bool,
|
||||
/// Request a finality proof for the given block.
|
||||
pub needs_finality_proof: bool,
|
||||
}
|
||||
|
||||
impl Default for ImportedAux {
|
||||
@@ -52,6 +56,7 @@ impl Default for ImportedAux {
|
||||
clear_justification_requests: false,
|
||||
needs_justification: false,
|
||||
bad_justification: false,
|
||||
needs_finality_proof: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -202,3 +207,26 @@ pub trait JustificationImport<B: BlockT> {
|
||||
justification: Justification,
|
||||
) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
/// Finality proof import trait.
|
||||
pub trait FinalityProofImport<B: BlockT> {
|
||||
type Error: ::std::error::Error + Send + 'static;
|
||||
|
||||
/// Called by the import queue when it is started.
|
||||
fn on_start(&self, _link: &crate::import_queue::Link<B>) { }
|
||||
|
||||
/// Import a Block justification and finalize the given block. Returns finalized block or error.
|
||||
fn import_finality_proof(
|
||||
&self,
|
||||
hash: B::Hash,
|
||||
number: NumberFor<B>,
|
||||
finality_proof: Vec<u8>,
|
||||
verifier: &Verifier<B>,
|
||||
) -> Result<(B::Hash, NumberFor<B>), Self::Error>;
|
||||
}
|
||||
|
||||
/// Finality proof request builder.
|
||||
pub trait FinalityProofRequestBuilder<B: BlockT>: Send {
|
||||
/// Build data blob, associated with the request.
|
||||
fn build_request_data(&self, hash: &B::Hash) -> Vec<u8>;
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
use crate::block_import::{
|
||||
BlockImport, BlockOrigin, ImportBlock, ImportedAux, ImportResult, JustificationImport,
|
||||
FinalityProofImport, FinalityProofRequestBuilder,
|
||||
};
|
||||
use crossbeam_channel::{self as channel, Receiver, Sender};
|
||||
use parity_codec::Encode;
|
||||
@@ -57,6 +58,12 @@ pub type SharedBlockImport<B> = Arc<dyn BlockImport<B, Error = ConsensusError> +
|
||||
/// Shared justification import struct used by the queue.
|
||||
pub type SharedJustificationImport<B> = Arc<dyn JustificationImport<B, Error=ConsensusError> + Send + Sync>;
|
||||
|
||||
/// Shared finality proof import struct used by the queue.
|
||||
pub type SharedFinalityProofImport<B> = Arc<dyn FinalityProofImport<B, Error=ConsensusError> + Send + Sync>;
|
||||
|
||||
/// Shared finality proof request builder struct used by the queue.
|
||||
pub type SharedFinalityProofRequestBuilder<B> = Arc<dyn FinalityProofRequestBuilder<B> + Send + Sync>;
|
||||
|
||||
/// Maps to the Origin used by the network.
|
||||
pub type Origin = libp2p::PeerId;
|
||||
|
||||
@@ -76,7 +83,7 @@ pub struct IncomingBlock<B: BlockT> {
|
||||
}
|
||||
|
||||
/// Verify a justification of a block
|
||||
pub trait Verifier<B: BlockT>: Send + Sync + Sized {
|
||||
pub trait Verifier<B: BlockT>: Send + Sync {
|
||||
/// Verify the given data and return the ImportBlock and an optional
|
||||
/// new set of validators to import. If not, err with an Error-Message
|
||||
/// presented to the User in the logs.
|
||||
@@ -104,6 +111,8 @@ pub trait ImportQueue<B: BlockT>: Send + Sync + ImportQueueClone<B> {
|
||||
fn import_blocks(&self, origin: BlockOrigin, blocks: Vec<IncomingBlock<B>>);
|
||||
/// Import a block justification.
|
||||
fn import_justification(&self, who: Origin, hash: B::Hash, number: NumberFor<B>, justification: Justification);
|
||||
/// Import block finality proof.
|
||||
fn import_finality_proof(&self, who: Origin, hash: B::Hash, number: NumberFor<B>, finality_proof: Vec<u8>);
|
||||
}
|
||||
|
||||
pub trait ImportQueueClone<B: BlockT> {
|
||||
@@ -129,6 +138,7 @@ impl<B: BlockT> ImportQueueClone<B> for BasicQueue<B> {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// "BasicQueue" is a wrapper around a channel sender to the "BlockImporter".
|
||||
/// "BasicQueue" itself does not keep any state or do any importing work, and
|
||||
/// can therefore be send to other threads.
|
||||
@@ -153,11 +163,25 @@ impl<B: BlockT> BasicQueue<B> {
|
||||
pub fn new<V: 'static + Verifier<B>>(
|
||||
verifier: Arc<V>,
|
||||
block_import: SharedBlockImport<B>,
|
||||
justification_import: Option<SharedJustificationImport<B>>
|
||||
justification_import: Option<SharedJustificationImport<B>>,
|
||||
finality_proof_import: Option<SharedFinalityProofImport<B>>,
|
||||
finality_proof_request_builder: Option<SharedFinalityProofRequestBuilder<B>>,
|
||||
) -> Self {
|
||||
let (result_sender, result_port) = channel::unbounded();
|
||||
let worker_sender = BlockImportWorker::new(result_sender, verifier, block_import);
|
||||
let importer_sender = BlockImporter::new(result_port, worker_sender, justification_import);
|
||||
let worker_sender = BlockImportWorker::new(
|
||||
result_sender,
|
||||
verifier.clone(),
|
||||
block_import,
|
||||
finality_proof_import.clone(),
|
||||
);
|
||||
let importer_sender = BlockImporter::new(
|
||||
result_port,
|
||||
worker_sender,
|
||||
verifier,
|
||||
justification_import,
|
||||
finality_proof_import,
|
||||
finality_proof_request_builder,
|
||||
);
|
||||
|
||||
Self {
|
||||
sender: importer_sender,
|
||||
@@ -210,25 +234,36 @@ impl<B: BlockT> ImportQueue<B> for BasicQueue<B> {
|
||||
.send(BlockImportMsg::ImportJustification(who.clone(), hash, number, justification))
|
||||
.expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed");
|
||||
}
|
||||
|
||||
fn import_finality_proof(&self, who: Origin, hash: B::Hash, number: NumberFor<B>, finality_proof: Vec<u8>) {
|
||||
let _ = self
|
||||
.sender
|
||||
.send(BlockImportMsg::ImportFinalityProof(who, hash, number, finality_proof))
|
||||
.expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed");
|
||||
}
|
||||
}
|
||||
|
||||
pub enum BlockImportMsg<B: BlockT> {
|
||||
ImportBlocks(BlockOrigin, Vec<IncomingBlock<B>>),
|
||||
ImportJustification(Origin, B::Hash, NumberFor<B>, Justification),
|
||||
ImportFinalityProof(Origin, B::Hash, NumberFor<B>, Vec<u8>),
|
||||
Start(Box<Link<B>>, Sender<Result<(), std::io::Error>>),
|
||||
Stop,
|
||||
#[cfg(any(test, feature = "test-helpers"))]
|
||||
Synchronize,
|
||||
}
|
||||
|
||||
#[cfg_attr(test, derive(Debug, PartialEq))]
|
||||
pub enum BlockImportWorkerMsg<B: BlockT> {
|
||||
ImportBlocks(BlockOrigin, Vec<IncomingBlock<B>>),
|
||||
Imported(
|
||||
ImportedBlocks(
|
||||
Vec<(
|
||||
Result<BlockImportResult<NumberFor<B>>, BlockImportError>,
|
||||
B::Hash,
|
||||
)>,
|
||||
),
|
||||
ImportFinalityProof(Origin, B::Hash, NumberFor<B>, Vec<u8>),
|
||||
ImportedFinalityProof(Origin, (B::Hash, NumberFor<B>), Result<(B::Hash, NumberFor<B>), ()>),
|
||||
#[cfg(any(test, feature = "test-helpers"))]
|
||||
Synchronize,
|
||||
}
|
||||
@@ -243,14 +278,20 @@ struct BlockImporter<B: BlockT> {
|
||||
result_port: Receiver<BlockImportWorkerMsg<B>>,
|
||||
worker_sender: Sender<BlockImportWorkerMsg<B>>,
|
||||
link: Option<Box<dyn Link<B>>>,
|
||||
verifier: Arc<Verifier<B>>,
|
||||
justification_import: Option<SharedJustificationImport<B>>,
|
||||
finality_proof_import: Option<SharedFinalityProofImport<B>>,
|
||||
finality_proof_request_builder: Option<SharedFinalityProofRequestBuilder<B>>,
|
||||
}
|
||||
|
||||
impl<B: BlockT> BlockImporter<B> {
|
||||
fn new(
|
||||
result_port: Receiver<BlockImportWorkerMsg<B>>,
|
||||
worker_sender: Sender<BlockImportWorkerMsg<B>>,
|
||||
verifier: Arc<Verifier<B>>,
|
||||
justification_import: Option<SharedJustificationImport<B>>,
|
||||
finality_proof_import: Option<SharedFinalityProofImport<B>>,
|
||||
finality_proof_request_builder: Option<SharedFinalityProofRequestBuilder<B>>,
|
||||
) -> Sender<BlockImportMsg<B>> {
|
||||
trace!(target: "block_import", "Creating new Block Importer!");
|
||||
let (sender, port) = channel::bounded(4);
|
||||
@@ -262,7 +303,10 @@ impl<B: BlockT> BlockImporter<B> {
|
||||
result_port,
|
||||
worker_sender,
|
||||
link: None,
|
||||
verifier,
|
||||
justification_import,
|
||||
finality_proof_import,
|
||||
finality_proof_request_builder,
|
||||
};
|
||||
while importer.run() {
|
||||
// Importing until all senders have been dropped...
|
||||
@@ -303,10 +347,19 @@ impl<B: BlockT> BlockImporter<B> {
|
||||
BlockImportMsg::ImportJustification(who, hash, number, justification) => {
|
||||
self.handle_import_justification(who, hash, number, justification)
|
||||
},
|
||||
BlockImportMsg::ImportFinalityProof(who, hash, number, finality_proof) => {
|
||||
self.handle_import_finality_proof(who, hash, number, finality_proof)
|
||||
},
|
||||
BlockImportMsg::Start(link, sender) => {
|
||||
if let Some(finality_proof_request_builder) = self.finality_proof_request_builder.take() {
|
||||
link.set_finality_proof_request_builder(finality_proof_request_builder);
|
||||
}
|
||||
if let Some(justification_import) = self.justification_import.as_ref() {
|
||||
justification_import.on_start(&*link);
|
||||
}
|
||||
if let Some(finality_proof_import) = self.finality_proof_import.as_ref() {
|
||||
finality_proof_import.on_start(&*link);
|
||||
}
|
||||
self.link = Some(link);
|
||||
let _ = sender.send(Ok(()));
|
||||
},
|
||||
@@ -332,14 +385,20 @@ impl<B: BlockT> BlockImporter<B> {
|
||||
};
|
||||
|
||||
let results = match msg {
|
||||
BlockImportWorkerMsg::Imported(results) => (results),
|
||||
BlockImportWorkerMsg::ImportedBlocks(results) => (results),
|
||||
BlockImportWorkerMsg::ImportedFinalityProof(who, request_block, finalization_result) => {
|
||||
link.finality_proof_imported(who, request_block, finalization_result);
|
||||
return true;
|
||||
},
|
||||
#[cfg(any(test, feature = "test-helpers"))]
|
||||
BlockImportWorkerMsg::Synchronize => {
|
||||
trace!(target: "sync", "Synchronizing link");
|
||||
link.synchronized();
|
||||
return true;
|
||||
},
|
||||
_ => unreachable!("Import Worker does not send ImportBlocks message; qed"),
|
||||
BlockImportWorkerMsg::ImportBlocks(_, _)
|
||||
| BlockImportWorkerMsg::ImportFinalityProof(_, _, _, _)
|
||||
=> unreachable!("Import Worker does not send Import* message; qed"),
|
||||
};
|
||||
let mut has_error = false;
|
||||
let mut hashes = vec![];
|
||||
@@ -375,6 +434,11 @@ impl<B: BlockT> BlockImporter<B> {
|
||||
link.report_peer(peer, BAD_JUSTIFICATION_REPUTATION_CHANGE);
|
||||
}
|
||||
}
|
||||
|
||||
if aux.needs_finality_proof {
|
||||
trace!(target: "sync", "Block imported but requires finality proof {}: {:?}", number, hash);
|
||||
link.request_finality_proof(&hash, number);
|
||||
}
|
||||
},
|
||||
Err(BlockImportError::IncompleteHeader(who)) => {
|
||||
if let Some(peer) = who {
|
||||
@@ -422,6 +486,13 @@ impl<B: BlockT> BlockImporter<B> {
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_import_finality_proof(&self, who: Origin, hash: B::Hash, number: NumberFor<B>, finality_proof: Vec<u8>) {
|
||||
trace!(target: "sync", "Scheduling finality proof of {}/{} for import", number, hash);
|
||||
self.worker_sender
|
||||
.send(BlockImportWorkerMsg::ImportFinalityProof(who, hash, number, finality_proof))
|
||||
.expect("1. This is holding a sender to the worker, 2. the worker should not quit while a sender is still held; qed");
|
||||
}
|
||||
|
||||
fn handle_import_blocks(&mut self, origin: BlockOrigin, blocks: Vec<IncomingBlock<B>>) {
|
||||
trace!(target: "sync", "Scheduling {} blocks for import", blocks.len());
|
||||
self.worker_sender
|
||||
@@ -433,6 +504,7 @@ impl<B: BlockT> BlockImporter<B> {
|
||||
struct BlockImportWorker<B: BlockT, V: Verifier<B>> {
|
||||
result_sender: Sender<BlockImportWorkerMsg<B>>,
|
||||
block_import: SharedBlockImport<B>,
|
||||
finality_proof_import: Option<SharedFinalityProofImport<B>>,
|
||||
verifier: Arc<V>,
|
||||
}
|
||||
|
||||
@@ -441,6 +513,7 @@ impl<B: BlockT, V: 'static + Verifier<B>> BlockImportWorker<B, V> {
|
||||
result_sender: Sender<BlockImportWorkerMsg<B>>,
|
||||
verifier: Arc<V>,
|
||||
block_import: SharedBlockImport<B>,
|
||||
finality_proof_import: Option<SharedFinalityProofImport<B>>,
|
||||
) -> Sender<BlockImportWorkerMsg<B>> {
|
||||
let (sender, port) = channel::bounded(4);
|
||||
let _ = thread::Builder::new()
|
||||
@@ -450,6 +523,7 @@ impl<B: BlockT, V: 'static + Verifier<B>> BlockImportWorker<B, V> {
|
||||
result_sender,
|
||||
verifier,
|
||||
block_import,
|
||||
finality_proof_import,
|
||||
};
|
||||
for msg in port.iter() {
|
||||
// Working until all senders have been dropped...
|
||||
@@ -457,12 +531,17 @@ impl<B: BlockT, V: 'static + Verifier<B>> BlockImportWorker<B, V> {
|
||||
BlockImportWorkerMsg::ImportBlocks(origin, blocks) => {
|
||||
worker.import_a_batch_of_blocks(origin, blocks);
|
||||
},
|
||||
BlockImportWorkerMsg::ImportFinalityProof(who, hash, number, proof) => {
|
||||
worker.import_finality_proof(who, hash, number, proof);
|
||||
},
|
||||
#[cfg(any(test, feature = "test-helpers"))]
|
||||
BlockImportWorkerMsg::Synchronize => {
|
||||
trace!(target: "sync", "Sending sync message");
|
||||
let _ = worker.result_sender.send(BlockImportWorkerMsg::Synchronize);
|
||||
},
|
||||
_ => unreachable!("Import Worker does not receive the Imported message; qed"),
|
||||
BlockImportWorkerMsg::ImportedBlocks(_)
|
||||
| BlockImportWorkerMsg::ImportedFinalityProof(_, _, _)
|
||||
=> unreachable!("Import Worker does not receive the Imported* messages; qed"),
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -512,10 +591,31 @@ impl<B: BlockT, V: 'static + Verifier<B>> BlockImportWorker<B, V> {
|
||||
|
||||
let _ = self
|
||||
.result_sender
|
||||
.send(BlockImportWorkerMsg::Imported(results));
|
||||
.send(BlockImportWorkerMsg::ImportedBlocks(results));
|
||||
|
||||
trace!(target: "sync", "Imported {} of {}", imported, count);
|
||||
}
|
||||
|
||||
fn import_finality_proof(&self, who: Origin, hash: B::Hash, number: NumberFor<B>, finality_proof: Vec<u8>) {
|
||||
let result = self.finality_proof_import.as_ref().map(|finality_proof_import| {
|
||||
finality_proof_import.import_finality_proof(hash, number, finality_proof, &*self.verifier)
|
||||
.map_err(|e| {
|
||||
debug!(
|
||||
"Finality proof import failed with {:?} for hash: {:?} number: {:?} coming from node: {:?}",
|
||||
e,
|
||||
hash,
|
||||
number,
|
||||
who,
|
||||
);
|
||||
})
|
||||
}).unwrap_or(Err(()));
|
||||
|
||||
let _ = self
|
||||
.result_sender
|
||||
.send(BlockImportWorkerMsg::ImportedFinalityProof(who, (hash, number), result));
|
||||
|
||||
trace!(target: "sync", "Imported finality proof for {}/{}", number, hash);
|
||||
}
|
||||
}
|
||||
|
||||
/// Hooks that the verification queue can use to influence the synchronization
|
||||
@@ -531,6 +631,21 @@ pub trait Link<B: BlockT>: Send {
|
||||
fn clear_justification_requests(&self) {}
|
||||
/// Request a justification for the given block.
|
||||
fn request_justification(&self, _hash: &B::Hash, _number: NumberFor<B>) {}
|
||||
/// Finality proof import result.
|
||||
///
|
||||
/// Even though we have asked for finality proof of block A, provider could return proof of
|
||||
/// some earlier block B, if the proof for A was too large. The sync module should continue
|
||||
/// asking for proof of A in this case.
|
||||
fn finality_proof_imported(
|
||||
&self,
|
||||
_who: Origin,
|
||||
_request_block: (B::Hash, NumberFor<B>),
|
||||
_finalization_result: Result<(B::Hash, NumberFor<B>), ()>,
|
||||
) {}
|
||||
/// Request a finality proof for the given block.
|
||||
fn request_finality_proof(&self, _hash: &B::Hash, _number: NumberFor<B>) {}
|
||||
/// Remember finality proof request builder on start.
|
||||
fn set_finality_proof_request_builder(&self, _request_builder: SharedFinalityProofRequestBuilder<B>) {}
|
||||
/// Adjusts the reputation of the given peer.
|
||||
fn report_peer(&self, _who: Origin, _reputation_change: i32) {}
|
||||
/// Restart sync.
|
||||
@@ -637,12 +752,14 @@ pub fn import_single_block<B: BlockT, V: Verifier<B>>(
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::block_import::ForkChoiceStrategy;
|
||||
use libp2p::PeerId;
|
||||
use test_client::runtime::{Block, Hash};
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum LinkMsg {
|
||||
BlockImported,
|
||||
FinalityProofImported,
|
||||
Disconnected,
|
||||
Restarted,
|
||||
}
|
||||
@@ -664,6 +781,14 @@ mod tests {
|
||||
fn block_imported(&self, _hash: &Hash, _number: NumberFor<Block>) {
|
||||
let _ = self.sender.send(LinkMsg::BlockImported);
|
||||
}
|
||||
fn finality_proof_imported(
|
||||
&self,
|
||||
_: Origin,
|
||||
_: (Hash, NumberFor<Block>),
|
||||
_: Result<(Hash, NumberFor<Block>), ()>,
|
||||
) {
|
||||
let _ = self.sender.send(LinkMsg::FinalityProofImported);
|
||||
}
|
||||
fn report_peer(&self, _: Origin, _: i32) {
|
||||
let _ = self.sender.send(LinkMsg::Disconnected);
|
||||
}
|
||||
@@ -672,12 +797,33 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: BlockT> Verifier<B> for () {
|
||||
fn verify(
|
||||
&self,
|
||||
origin: BlockOrigin,
|
||||
header: B::Header,
|
||||
justification: Option<Justification>,
|
||||
body: Option<Vec<B::Extrinsic>>,
|
||||
) -> Result<(ImportBlock<B>, Option<Vec<AuthorityIdFor<B>>>), String> {
|
||||
Ok((ImportBlock {
|
||||
origin,
|
||||
header,
|
||||
body,
|
||||
finalized: false,
|
||||
justification,
|
||||
post_digests: vec![],
|
||||
auxiliary: Vec::new(),
|
||||
fork_choice: ForkChoiceStrategy::LongestChain,
|
||||
}, None))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_import_result_works() {
|
||||
let (result_sender, result_port) = channel::unbounded();
|
||||
let (worker_sender, _) = channel::unbounded();
|
||||
let (link_sender, link_port) = channel::unbounded();
|
||||
let importer_sender = BlockImporter::<Block>::new(result_port, worker_sender, None);
|
||||
let importer_sender = BlockImporter::<Block>::new(result_port, worker_sender, Arc::new(()), None, None, None);
|
||||
let link = TestLink::new(link_sender);
|
||||
let (ack_sender, start_ack_port) = channel::bounded(4);
|
||||
let _ = importer_sender.send(BlockImportMsg::Start(Box::new(link.clone()), ack_sender));
|
||||
@@ -687,52 +833,101 @@ mod tests {
|
||||
|
||||
// Send a known
|
||||
let results = vec![(Ok(BlockImportResult::ImportedKnown(Default::default())), Default::default())];
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap();
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap();
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported));
|
||||
|
||||
// Send a second known
|
||||
let results = vec![(Ok(BlockImportResult::ImportedKnown(Default::default())), Default::default())];
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap();
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap();
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported));
|
||||
|
||||
// Send an unknown
|
||||
let results = vec![(Ok(BlockImportResult::ImportedUnknown(Default::default(), Default::default(), None)), Default::default())];
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap();
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap();
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported));
|
||||
|
||||
// Send an unknown with peer and bad justification
|
||||
let peer_id = PeerId::random();
|
||||
let results = vec![(Ok(BlockImportResult::ImportedUnknown(Default::default(),
|
||||
ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: true },
|
||||
ImportedAux {
|
||||
needs_justification: true,
|
||||
clear_justification_requests: false,
|
||||
bad_justification: true,
|
||||
needs_finality_proof: false,
|
||||
},
|
||||
Some(peer_id.clone()))), Default::default())];
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap();
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap();
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported));
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected));
|
||||
|
||||
// Send an incomplete header
|
||||
let results = vec![(Err(BlockImportError::IncompleteHeader(Some(peer_id.clone()))), Default::default())];
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap();
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap();
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected));
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted));
|
||||
|
||||
// Send an unknown parent
|
||||
let results = vec![(Err(BlockImportError::UnknownParent), Default::default())];
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap();
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap();
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted));
|
||||
|
||||
// Send a verification failed
|
||||
let results = vec![(Err(BlockImportError::VerificationFailed(Some(peer_id.clone()), String::new())), Default::default())];
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap();
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap();
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected));
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted));
|
||||
|
||||
// Send an error
|
||||
let results = vec![(Err(BlockImportError::Error), Default::default())];
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap();
|
||||
let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap();
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted));
|
||||
|
||||
// Drop the importer sender first, ensuring graceful shutdown.
|
||||
drop(importer_sender);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn process_finality_proof_import_result_works() {
|
||||
let (result_sender, result_port) = channel::unbounded();
|
||||
let (worker_sender, worker_receiver) = channel::unbounded();
|
||||
let (link_sender, link_port) = channel::unbounded();
|
||||
let importer_sender = BlockImporter::<Block>::new(result_port, worker_sender, Arc::new(()), None, None, None);
|
||||
let link = TestLink::new(link_sender);
|
||||
let (ack_sender, start_ack_port) = channel::bounded(4);
|
||||
let _ = importer_sender.send(BlockImportMsg::Start(Box::new(link.clone()), ack_sender));
|
||||
let who = Origin::random();
|
||||
|
||||
// Ensure the importer handles Start before any result messages.
|
||||
start_ack_port.recv().unwrap().unwrap();
|
||||
|
||||
// Send finality proof import request to BlockImporter
|
||||
importer_sender.send(BlockImportMsg::ImportFinalityProof(
|
||||
who.clone(),
|
||||
Default::default(),
|
||||
1,
|
||||
vec![42],
|
||||
)).unwrap();
|
||||
|
||||
// Wait until this request is redirected to the BlockImportWorker
|
||||
assert_eq!(worker_receiver.recv(), Ok(BlockImportWorkerMsg::ImportFinalityProof(
|
||||
who.clone(),
|
||||
Default::default(),
|
||||
1,
|
||||
vec![42],
|
||||
)));
|
||||
|
||||
// Send ack of proof import from BlockImportWorker to BlockImporter
|
||||
result_sender.send(BlockImportWorkerMsg::ImportedFinalityProof(
|
||||
who.clone(),
|
||||
(Default::default(), 0),
|
||||
Ok((Default::default(), 0)),
|
||||
)).unwrap();
|
||||
|
||||
// Wait for finality proof import result
|
||||
assert_eq!(link_port.recv(), Ok(LinkMsg::FinalityProofImported));
|
||||
|
||||
// Drop the importer sender first, ensuring graceful shutdown.
|
||||
drop(importer_sender);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,8 @@ const MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512;
|
||||
|
||||
pub use self::error::{Error, ErrorKind};
|
||||
pub use block_import::{
|
||||
BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult, JustificationImport,
|
||||
BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult,
|
||||
JustificationImport, FinalityProofImport, FinalityProofRequestBuilder,
|
||||
};
|
||||
pub use select_chain::SelectChain;
|
||||
|
||||
|
||||
@@ -105,7 +105,7 @@ where H: Clone + Debug + PartialEq,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_decode<B: AuxStore, T: Decode>(backend: &B, key: &[u8]) -> ClientResult<Option<T>> {
|
||||
pub(crate) fn load_decode<B: AuxStore, T: Decode>(backend: &B, key: &[u8]) -> ClientResult<Option<T>> {
|
||||
match backend.get_aux(key)? {
|
||||
None => Ok(None),
|
||||
Some(t) => T::decode(&mut &t[..])
|
||||
|
||||
@@ -32,6 +32,11 @@ impl<H, N> ConsensusChanges<H, N> {
|
||||
|
||||
impl<H: Copy + PartialEq, N: Copy + Ord> ConsensusChanges<H, N> {
|
||||
|
||||
/// Returns reference to all pending changes.
|
||||
pub fn pending_changes(&self) -> &[(N, H)] {
|
||||
&self.pending_changes
|
||||
}
|
||||
|
||||
/// Note unfinalized change of consensus-related data.
|
||||
pub(crate) fn note_change(&mut self, at: (N, H)) {
|
||||
let idx = self.pending_changes
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -552,7 +552,7 @@ where
|
||||
enacts_change: bool,
|
||||
) -> Result<(), ConsensusError> {
|
||||
let justification = GrandpaJustification::decode_and_verify_finalizes(
|
||||
justification,
|
||||
&justification,
|
||||
(hash, number),
|
||||
self.authority_set.set_id(),
|
||||
&self.authority_set.current_authorities(),
|
||||
|
||||
@@ -95,17 +95,16 @@ impl<Block: BlockT<Hash=H256>> GrandpaJustification<Block> {
|
||||
/// Decode a GRANDPA justification and validate the commit and the votes'
|
||||
/// ancestry proofs finalize the given block.
|
||||
pub(crate) fn decode_and_verify_finalizes(
|
||||
encoded: Vec<u8>,
|
||||
encoded: &[u8],
|
||||
finalized_target: (Block::Hash, NumberFor<Block>),
|
||||
set_id: u64,
|
||||
voters: &VoterSet<AuthorityId>,
|
||||
) -> Result<GrandpaJustification<Block>, ClientError> where
|
||||
NumberFor<Block>: grandpa::BlockNumberOps,
|
||||
{
|
||||
let justification = GrandpaJustification::<Block>::decode(&mut &*encoded).ok_or_else(|| {
|
||||
let msg = "failed to decode grandpa justification".to_string();
|
||||
ClientError::from(ClientError::BadJustification(msg))
|
||||
})?;
|
||||
|
||||
let justification = GrandpaJustification::<Block>::decode(&mut &*encoded)
|
||||
.ok_or(ClientError::JustificationDecode)?;
|
||||
|
||||
if (justification.commit.target_hash, justification.commit.target_number) != finalized_target {
|
||||
let msg = "invalid commit target in grandpa justification".to_string();
|
||||
|
||||
@@ -93,15 +93,17 @@ mod environment;
|
||||
mod finality_proof;
|
||||
mod import;
|
||||
mod justification;
|
||||
mod light_import;
|
||||
mod observer;
|
||||
mod until_imported;
|
||||
|
||||
#[cfg(feature="service-integration")]
|
||||
mod service_integration;
|
||||
#[cfg(feature="service-integration")]
|
||||
pub use service_integration::{LinkHalfForService, BlockImportForService};
|
||||
pub use service_integration::{LinkHalfForService, BlockImportForService, BlockImportForLightService};
|
||||
pub use communication::Network;
|
||||
pub use finality_proof::{prove_finality, check_finality_proof};
|
||||
pub use finality_proof::FinalityProofProvider;
|
||||
pub use light_import::light_block_import;
|
||||
pub use observer::run_grandpa_observer;
|
||||
|
||||
use aux_schema::PersistentData;
|
||||
@@ -300,7 +302,7 @@ pub struct LinkHalf<B, E, Block: BlockT<Hash=H256>, RA, SC> {
|
||||
pub fn block_import<B, E, Block: BlockT<Hash=H256>, RA, PRA, SC>(
|
||||
client: Arc<Client<B, E, Block, RA>>,
|
||||
api: Arc<PRA>,
|
||||
select_chain: SC
|
||||
select_chain: SC,
|
||||
) -> Result<(
|
||||
GrandpaBlockImport<B, E, Block, RA, PRA, SC>,
|
||||
LinkHalf<B, E, Block, RA, SC>
|
||||
|
||||
@@ -0,0 +1,728 @@
|
||||
// Copyright 2019 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use log::{info, trace, warn};
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use client::{
|
||||
CallExecutor, Client,
|
||||
backend::{AuxStore, Backend},
|
||||
blockchain::HeaderBackend,
|
||||
error::Error as ClientError,
|
||||
};
|
||||
use parity_codec::{Encode, Decode};
|
||||
use consensus_common::{
|
||||
import_queue::{Verifier, SharedFinalityProofRequestBuilder}, well_known_cache_keys,
|
||||
BlockOrigin, BlockImport, FinalityProofImport, ImportBlock, ImportResult, ImportedAux,
|
||||
Error as ConsensusError, ErrorKind as ConsensusErrorKind, FinalityProofRequestBuilder,
|
||||
};
|
||||
use runtime_primitives::Justification;
|
||||
use runtime_primitives::traits::{
|
||||
NumberFor, Block as BlockT, Header as HeaderT, ProvideRuntimeApi, DigestFor,
|
||||
};
|
||||
use fg_primitives::GrandpaApi;
|
||||
use runtime_primitives::generic::BlockId;
|
||||
use substrate_primitives::{H256, Blake2Hasher, ed25519::Public as AuthorityId};
|
||||
|
||||
use crate::aux_schema::load_decode;
|
||||
use crate::consensus_changes::ConsensusChanges;
|
||||
use crate::environment::canonical_at_height;
|
||||
use crate::finality_proof::{AuthoritySetForFinalityChecker, ProvableJustification, make_finality_proof_request};
|
||||
use crate::justification::GrandpaJustification;
|
||||
|
||||
/// LightAuthoritySet is saved under this key in aux storage.
|
||||
const LIGHT_AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters";
|
||||
/// ConsensusChanges is saver under this key in aux storage.
|
||||
const LIGHT_CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes";
|
||||
|
||||
/// Create light block importer.
|
||||
pub fn light_block_import<B, E, Block: BlockT<Hash=H256>, RA, PRA>(
|
||||
client: Arc<Client<B, E, Block, RA>>,
|
||||
authority_set_provider: Arc<AuthoritySetForFinalityChecker<Block>>,
|
||||
api: Arc<PRA>,
|
||||
) -> Result<GrandpaLightBlockImport<B, E, Block, RA>, ClientError>
|
||||
where
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
RA: Send + Sync,
|
||||
PRA: ProvideRuntimeApi,
|
||||
PRA::Api: GrandpaApi<Block>,
|
||||
{
|
||||
let info = client.info()?;
|
||||
let import_data = load_aux_import_data(info.chain.finalized_hash, &**client.backend(), api)?;
|
||||
Ok(GrandpaLightBlockImport {
|
||||
client,
|
||||
authority_set_provider,
|
||||
data: Arc::new(RwLock::new(import_data)),
|
||||
})
|
||||
}
|
||||
|
||||
/// A light block-import handler for GRANDPA.
|
||||
///
|
||||
/// It is responsible for:
|
||||
/// - checking GRANDPA justifications;
|
||||
/// - fetching finality proofs for blocks that are enacting consensus changes.
|
||||
pub struct GrandpaLightBlockImport<B, E, Block: BlockT<Hash=H256>, RA> {
|
||||
client: Arc<Client<B, E, Block, RA>>,
|
||||
authority_set_provider: Arc<AuthoritySetForFinalityChecker<Block>>,
|
||||
data: Arc<RwLock<LightImportData<Block>>>,
|
||||
}
|
||||
|
||||
/// Mutable data of light block importer.
|
||||
struct LightImportData<Block: BlockT<Hash=H256>> {
|
||||
last_finalized: Block::Hash,
|
||||
authority_set: LightAuthoritySet,
|
||||
consensus_changes: ConsensusChanges<Block::Hash, NumberFor<Block>>,
|
||||
}
|
||||
|
||||
/// Latest authority set tracker.
|
||||
#[derive(Debug, Encode, Decode)]
|
||||
struct LightAuthoritySet {
|
||||
set_id: u64,
|
||||
authorities: Vec<(AuthorityId, u64)>,
|
||||
}
|
||||
|
||||
impl<B, E, Block: BlockT<Hash=H256>, RA> GrandpaLightBlockImport<B, E, Block, RA> {
|
||||
/// Create finality proof request builder.
|
||||
pub fn create_finality_proof_request_builder(&self) -> SharedFinalityProofRequestBuilder<Block> {
|
||||
Arc::new(GrandpaFinalityProofRequestBuilder(self.data.clone())) as _
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, E, Block: BlockT<Hash=H256>, RA> BlockImport<Block>
|
||||
for GrandpaLightBlockImport<B, E, Block, RA> where
|
||||
NumberFor<Block>: grandpa::BlockNumberOps,
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
DigestFor<Block>: Encode,
|
||||
RA: Send + Sync,
|
||||
{
|
||||
type Error = ConsensusError;
|
||||
|
||||
fn import_block(
|
||||
&self,
|
||||
block: ImportBlock<Block>,
|
||||
new_cache: HashMap<well_known_cache_keys::Id, Vec<u8>>,
|
||||
) -> Result<ImportResult, Self::Error> {
|
||||
do_import_block::<_, _, _, _, GrandpaJustification<Block>>(&*self.client, &mut *self.data.write(), block, new_cache)
|
||||
}
|
||||
|
||||
fn check_block(
|
||||
&self,
|
||||
hash: Block::Hash,
|
||||
parent_hash: Block::Hash,
|
||||
) -> Result<ImportResult, Self::Error> {
|
||||
self.client.check_block(hash, parent_hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, E, Block: BlockT<Hash=H256>, RA> FinalityProofImport<Block>
|
||||
for GrandpaLightBlockImport<B, E, Block, RA> where
|
||||
NumberFor<Block>: grandpa::BlockNumberOps,
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
DigestFor<Block>: Encode,
|
||||
RA: Send + Sync,
|
||||
{
|
||||
type Error = ConsensusError;
|
||||
|
||||
fn on_start(&self, link: &::consensus_common::import_queue::Link<Block>) {
|
||||
let chain_info = match self.client.info() {
|
||||
Ok(info) => info.chain,
|
||||
_ => return,
|
||||
};
|
||||
|
||||
let data = self.data.read();
|
||||
for (pending_number, pending_hash) in data.consensus_changes.pending_changes() {
|
||||
if *pending_number > chain_info.finalized_number && *pending_number <= chain_info.best_number {
|
||||
link.request_finality_proof(pending_hash, *pending_number);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn import_finality_proof(
|
||||
&self,
|
||||
hash: Block::Hash,
|
||||
number: NumberFor<Block>,
|
||||
finality_proof: Vec<u8>,
|
||||
verifier: &Verifier<Block>,
|
||||
) -> Result<(Block::Hash, NumberFor<Block>), Self::Error> {
|
||||
do_import_finality_proof::<_, _, _, _, GrandpaJustification<Block>>(
|
||||
&*self.client,
|
||||
&*self.authority_set_provider,
|
||||
&mut *self.data.write(),
|
||||
hash,
|
||||
number,
|
||||
finality_proof,
|
||||
verifier,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl LightAuthoritySet {
|
||||
/// Get a genesis set with given authorities.
|
||||
pub fn genesis(initial: Vec<(AuthorityId, u64)>) -> Self {
|
||||
LightAuthoritySet {
|
||||
set_id: 0,
|
||||
authorities: initial,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get latest set id.
|
||||
pub fn set_id(&self) -> u64 {
|
||||
self.set_id
|
||||
}
|
||||
|
||||
/// Get latest authorities set.
|
||||
pub fn authorities(&self) -> Vec<(AuthorityId, u64)> {
|
||||
self.authorities.clone()
|
||||
}
|
||||
|
||||
/// Set new authorities set.
|
||||
pub fn update(&mut self, set_id: u64, authorities: Vec<(AuthorityId, u64)>) {
|
||||
self.set_id = set_id;
|
||||
std::mem::replace(&mut self.authorities, authorities);
|
||||
}
|
||||
}
|
||||
|
||||
struct GrandpaFinalityProofRequestBuilder<B: BlockT<Hash=H256>>(Arc<RwLock<LightImportData<B>>>);
|
||||
|
||||
impl<B: BlockT<Hash=H256>> FinalityProofRequestBuilder<B> for GrandpaFinalityProofRequestBuilder<B> {
|
||||
fn build_request_data(&self, _hash: &B::Hash) -> Vec<u8> {
|
||||
let data = self.0.read();
|
||||
make_finality_proof_request(
|
||||
data.last_finalized,
|
||||
data.authority_set.set_id(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to import new block.
|
||||
fn do_import_block<B, E, Block: BlockT<Hash=H256>, RA, J>(
|
||||
client: &Client<B, E, Block, RA>,
|
||||
data: &mut LightImportData<Block>,
|
||||
mut block: ImportBlock<Block>,
|
||||
new_cache: HashMap<well_known_cache_keys::Id, Vec<u8>>,
|
||||
) -> Result<ImportResult, ConsensusError>
|
||||
where
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
RA: Send + Sync,
|
||||
NumberFor<Block>: grandpa::BlockNumberOps,
|
||||
DigestFor<Block>: Encode,
|
||||
J: ProvableJustification<Block::Header>,
|
||||
{
|
||||
let hash = block.post_header().hash();
|
||||
let number = block.header.number().clone();
|
||||
|
||||
// we don't want to finalize on `inner.import_block`
|
||||
let justification = block.justification.take();
|
||||
let enacts_consensus_change = !new_cache.is_empty();
|
||||
let import_result = client.import_block(block, new_cache);
|
||||
|
||||
let mut imported_aux = match import_result {
|
||||
Ok(ImportResult::Imported(aux)) => aux,
|
||||
Ok(r) => return Ok(r),
|
||||
Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()),
|
||||
};
|
||||
|
||||
match justification {
|
||||
Some(justification) => {
|
||||
trace!(
|
||||
target: "finality",
|
||||
"Imported block {}{}. Importing justification.",
|
||||
if enacts_consensus_change { " which enacts consensus changes" } else { "" },
|
||||
hash,
|
||||
);
|
||||
|
||||
do_import_justification::<_, _, _, _, J>(client, data, hash, number, justification)
|
||||
},
|
||||
None if enacts_consensus_change => {
|
||||
trace!(
|
||||
target: "finality",
|
||||
"Imported block {} which enacts consensus changes. Requesting finality proof.",
|
||||
hash,
|
||||
);
|
||||
|
||||
// remember that we need finality proof for this block
|
||||
imported_aux.needs_finality_proof = true;
|
||||
data.consensus_changes.note_change((number, hash));
|
||||
Ok(ImportResult::Imported(imported_aux))
|
||||
},
|
||||
None => Ok(ImportResult::Imported(imported_aux)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to import finality proof.
|
||||
fn do_import_finality_proof<B, E, Block: BlockT<Hash=H256>, RA, J>(
|
||||
client: &Client<B, E, Block, RA>,
|
||||
authority_set_provider: &AuthoritySetForFinalityChecker<Block>,
|
||||
data: &mut LightImportData<Block>,
|
||||
_hash: Block::Hash,
|
||||
_number: NumberFor<Block>,
|
||||
finality_proof: Vec<u8>,
|
||||
verifier: &Verifier<Block>,
|
||||
) -> Result<(Block::Hash, NumberFor<Block>), ConsensusError>
|
||||
where
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
RA: Send + Sync,
|
||||
DigestFor<Block>: Encode,
|
||||
NumberFor<Block>: grandpa::BlockNumberOps,
|
||||
J: ProvableJustification<Block::Header>,
|
||||
{
|
||||
let authority_set_id = data.authority_set.set_id();
|
||||
let authorities = data.authority_set.authorities();
|
||||
let finality_effects = crate::finality_proof::check_finality_proof(
|
||||
&*client.backend().blockchain(),
|
||||
authority_set_id,
|
||||
authorities,
|
||||
authority_set_provider,
|
||||
finality_proof,
|
||||
).map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))?;
|
||||
|
||||
// try to import all new headers
|
||||
let block_origin = BlockOrigin::NetworkBroadcast;
|
||||
for header_to_import in finality_effects.headers_to_import {
|
||||
let (block_to_import, new_authorities) = verifier.verify(block_origin, header_to_import, None, None)?;
|
||||
assert!(block_to_import.justification.is_none(), "We have passed None as justification to verifier.verify");
|
||||
|
||||
let mut cache = HashMap::new();
|
||||
if let Some(authorities) = new_authorities {
|
||||
cache.insert(well_known_cache_keys::AUTHORITIES, authorities.encode());
|
||||
}
|
||||
do_import_block::<_, _, _, _, J>(client, data, block_to_import, cache)?;
|
||||
}
|
||||
|
||||
// try to import latest justification
|
||||
let finalized_block_hash = finality_effects.block;
|
||||
let finalized_block_number = client.backend().blockchain()
|
||||
.expect_block_number_from_id(&BlockId::Hash(finality_effects.block))
|
||||
.map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))?;
|
||||
do_finalize_block(
|
||||
client,
|
||||
data,
|
||||
finalized_block_hash,
|
||||
finalized_block_number,
|
||||
finality_effects.justification.encode(),
|
||||
)?;
|
||||
|
||||
// apply new authorities set
|
||||
data.authority_set.update(
|
||||
finality_effects.new_set_id,
|
||||
finality_effects.new_authorities,
|
||||
);
|
||||
|
||||
Ok((finalized_block_hash, finalized_block_number))
|
||||
}
|
||||
|
||||
/// Try to import justification.
|
||||
fn do_import_justification<B, E, Block: BlockT<Hash=H256>, RA, J>(
|
||||
client: &Client<B, E, Block, RA>,
|
||||
data: &mut LightImportData<Block>,
|
||||
hash: Block::Hash,
|
||||
number: NumberFor<Block>,
|
||||
justification: Justification,
|
||||
) -> Result<ImportResult, ConsensusError>
|
||||
where
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
RA: Send + Sync,
|
||||
NumberFor<Block>: grandpa::BlockNumberOps,
|
||||
J: ProvableJustification<Block::Header>,
|
||||
{
|
||||
// with justification, we have two cases
|
||||
//
|
||||
// optimistic: the same GRANDPA authorities set has generated intermediate justification
|
||||
// => justification is verified using current authorities set + we could proceed further
|
||||
//
|
||||
// pessimistic scenario: the GRANDPA authorities set has changed
|
||||
// => we need to fetch new authorities set (i.e. finality proof) from remote node
|
||||
|
||||
// first, try to behave optimistically
|
||||
let authority_set_id = data.authority_set.set_id();
|
||||
let justification = J::decode_and_verify(
|
||||
&justification,
|
||||
authority_set_id,
|
||||
&data.authority_set.authorities(),
|
||||
);
|
||||
|
||||
// BadJustification error means that justification has been successfully decoded, but
|
||||
// it isn't valid within current authority set
|
||||
let justification = match justification {
|
||||
Err(ClientError::BadJustification(_)) => {
|
||||
trace!(
|
||||
target: "finality",
|
||||
"Justification for {} is not valid within current authorities set. Requesting finality proof.",
|
||||
hash,
|
||||
);
|
||||
|
||||
let mut imported_aux = ImportedAux::default();
|
||||
imported_aux.needs_finality_proof = true;
|
||||
return Ok(ImportResult::Imported(imported_aux));
|
||||
},
|
||||
Err(e) => {
|
||||
trace!(
|
||||
target: "finality",
|
||||
"Justification for {} is not valid. Bailing.",
|
||||
hash,
|
||||
);
|
||||
|
||||
return Err(ConsensusErrorKind::ClientImport(e.to_string()).into());
|
||||
},
|
||||
Ok(justification) => {
|
||||
trace!(
|
||||
target: "finality",
|
||||
"Justification for {} is valid. Finalizing the block.",
|
||||
hash,
|
||||
);
|
||||
|
||||
justification
|
||||
},
|
||||
};
|
||||
|
||||
// finalize the block
|
||||
do_finalize_block(client, data, hash, number, justification.encode())
|
||||
}
|
||||
|
||||
/// Finalize the block.
|
||||
fn do_finalize_block<B, E, Block: BlockT<Hash=H256>, RA>(
|
||||
client: &Client<B, E, Block, RA>,
|
||||
data: &mut LightImportData<Block>,
|
||||
hash: Block::Hash,
|
||||
number: NumberFor<Block>,
|
||||
justification: Justification,
|
||||
) -> Result<ImportResult, ConsensusError>
|
||||
where
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
RA: Send + Sync,
|
||||
NumberFor<Block>: grandpa::BlockNumberOps,
|
||||
{
|
||||
// finalize the block
|
||||
client.finalize_block(BlockId::Hash(hash), Some(justification), true).map_err(|e| {
|
||||
warn!(target: "finality", "Error applying finality to block {:?}: {:?}", (hash, number), e);
|
||||
ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string()))
|
||||
})?;
|
||||
|
||||
// forget obsoleted consensus changes
|
||||
let consensus_finalization_res = data.consensus_changes
|
||||
.finalize((number, hash), |at_height| canonical_at_height(&client, (hash, number), true, at_height));
|
||||
match consensus_finalization_res {
|
||||
Ok((true, _)) => require_insert_aux(
|
||||
&client,
|
||||
LIGHT_CONSENSUS_CHANGES_KEY,
|
||||
&data.consensus_changes,
|
||||
"consensus changes",
|
||||
)?,
|
||||
Ok(_) => (),
|
||||
Err(error) => return Err(on_post_finalization_error(error, "consensus changes")),
|
||||
}
|
||||
|
||||
// update last finalized block reference
|
||||
data.last_finalized = hash;
|
||||
|
||||
Ok(ImportResult::imported())
|
||||
}
|
||||
|
||||
/// Load light import aux data from the store.
|
||||
fn load_aux_import_data<B, Block: BlockT<Hash=H256>, PRA>(
|
||||
last_finalized: Block::Hash,
|
||||
aux_store: &B,
|
||||
api: Arc<PRA>,
|
||||
) -> Result<LightImportData<Block>, ClientError>
|
||||
where
|
||||
B: AuxStore,
|
||||
PRA: ProvideRuntimeApi,
|
||||
PRA::Api: GrandpaApi<Block>,
|
||||
{
|
||||
use runtime_primitives::traits::Zero;
|
||||
let authority_set = match load_decode(aux_store, LIGHT_AUTHORITY_SET_KEY)? {
|
||||
Some(authority_set) => authority_set,
|
||||
None => {
|
||||
info!(target: "afg", "Loading GRANDPA authorities \
|
||||
from genesis on what appears to be first startup.");
|
||||
|
||||
// no authority set on disk: fetch authorities from genesis state
|
||||
let genesis_authorities = api.runtime_api().grandpa_authorities(&BlockId::number(Zero::zero()))?;
|
||||
|
||||
let authority_set = LightAuthoritySet::genesis(genesis_authorities);
|
||||
let encoded = authority_set.encode();
|
||||
aux_store.insert_aux(&[(LIGHT_AUTHORITY_SET_KEY, &encoded[..])], &[])?;
|
||||
|
||||
authority_set
|
||||
},
|
||||
};
|
||||
|
||||
let consensus_changes = match load_decode(aux_store, LIGHT_CONSENSUS_CHANGES_KEY)? {
|
||||
Some(consensus_changes) => consensus_changes,
|
||||
None => {
|
||||
let consensus_changes = ConsensusChanges::<Block::Hash, NumberFor<Block>>::empty();
|
||||
|
||||
let encoded = authority_set.encode();
|
||||
aux_store.insert_aux(&[(LIGHT_CONSENSUS_CHANGES_KEY, &encoded[..])], &[])?;
|
||||
|
||||
consensus_changes
|
||||
},
|
||||
};
|
||||
|
||||
Ok(LightImportData {
|
||||
last_finalized,
|
||||
authority_set,
|
||||
consensus_changes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Insert into aux store. If failed, return error && show inconsistency warning.
|
||||
fn require_insert_aux<T: Encode, B, E, Block: BlockT<Hash=H256>, RA>(
|
||||
client: &Client<B, E, Block, RA>,
|
||||
key: &[u8],
|
||||
value: &T,
|
||||
value_type: &str,
|
||||
) -> Result<(), ConsensusError>
|
||||
where
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
{
|
||||
let backend = &**client.backend();
|
||||
let encoded = value.encode();
|
||||
let update_res = Backend::insert_aux(backend, &[(key, &encoded[..])], &[]);
|
||||
if let Err(error) = update_res {
|
||||
return Err(on_post_finalization_error(error, value_type));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Display inconsistency warning.
|
||||
fn on_post_finalization_error(error: ClientError, value_type: &str) -> ConsensusError {
|
||||
warn!(target: "finality", "Failed to write updated {} to disk. Bailing.", value_type);
|
||||
warn!(target: "finality", "Node is in a potentially inconsistent state.");
|
||||
ConsensusError::from(ConsensusErrorKind::ClientImport(error.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
use consensus_common::ForkChoiceStrategy;
|
||||
use substrate_primitives::H256;
|
||||
use test_client::client::in_mem::Blockchain as InMemoryAuxStore;
|
||||
use test_client::runtime::{Block, Header};
|
||||
use crate::tests::TestApi;
|
||||
use crate::finality_proof::tests::TestJustification;
|
||||
|
||||
pub struct NoJustificationsImport<B, E, Block: BlockT<Hash=H256>, RA>(
|
||||
pub GrandpaLightBlockImport<B, E, Block, RA>
|
||||
);
|
||||
|
||||
impl<B, E, Block: BlockT<Hash=H256>, RA> BlockImport<Block>
|
||||
for NoJustificationsImport<B, E, Block, RA> where
|
||||
NumberFor<Block>: grandpa::BlockNumberOps,
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
DigestFor<Block>: Encode,
|
||||
RA: Send + Sync,
|
||||
{
|
||||
type Error = ConsensusError;
|
||||
|
||||
fn import_block(
|
||||
&self,
|
||||
mut block: ImportBlock<Block>,
|
||||
new_cache: HashMap<well_known_cache_keys::Id, Vec<u8>>,
|
||||
) -> Result<ImportResult, Self::Error> {
|
||||
block.justification.take();
|
||||
self.0.import_block(block, new_cache)
|
||||
}
|
||||
|
||||
fn check_block(
|
||||
&self,
|
||||
hash: Block::Hash,
|
||||
parent_hash: Block::Hash,
|
||||
) -> Result<ImportResult, Self::Error> {
|
||||
self.0.check_block(hash, parent_hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, E, Block: BlockT<Hash=H256>, RA> FinalityProofImport<Block>
|
||||
for NoJustificationsImport<B, E, Block, RA> where
|
||||
NumberFor<Block>: grandpa::BlockNumberOps,
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
DigestFor<Block>: Encode,
|
||||
RA: Send + Sync,
|
||||
{
|
||||
type Error = ConsensusError;
|
||||
|
||||
fn on_start(&self, link: &::consensus_common::import_queue::Link<Block>) {
|
||||
self.0.on_start(link)
|
||||
}
|
||||
|
||||
fn import_finality_proof(
|
||||
&self,
|
||||
hash: Block::Hash,
|
||||
number: NumberFor<Block>,
|
||||
finality_proof: Vec<u8>,
|
||||
verifier: &Verifier<Block>,
|
||||
) -> Result<(Block::Hash, NumberFor<Block>), Self::Error> {
|
||||
self.0.import_finality_proof(hash, number, finality_proof, verifier)
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates light block import that ignores justifications that came outside of finality proofs.
|
||||
pub fn light_block_import_without_justifications<B, E, Block: BlockT<Hash=H256>, RA, PRA>(
|
||||
client: Arc<Client<B, E, Block, RA>>,
|
||||
authority_set_provider: Arc<AuthoritySetForFinalityChecker<Block>>,
|
||||
api: Arc<PRA>,
|
||||
) -> Result<NoJustificationsImport<B, E, Block, RA>, ClientError>
|
||||
where
|
||||
B: Backend<Block, Blake2Hasher> + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
|
||||
RA: Send + Sync,
|
||||
PRA: ProvideRuntimeApi,
|
||||
PRA::Api: GrandpaApi<Block>,
|
||||
{
|
||||
light_block_import(client, authority_set_provider, api).map(NoJustificationsImport)
|
||||
}
|
||||
|
||||
fn import_block(
|
||||
new_cache: HashMap<well_known_cache_keys::Id, Vec<u8>>,
|
||||
justification: Option<Justification>,
|
||||
) -> ImportResult {
|
||||
let client = test_client::new_light();
|
||||
let mut import_data = LightImportData {
|
||||
last_finalized: Default::default(),
|
||||
authority_set: LightAuthoritySet::genesis(vec![(AuthorityId([1; 32]), 1)]),
|
||||
consensus_changes: ConsensusChanges::empty(),
|
||||
};
|
||||
let block = ImportBlock {
|
||||
origin: BlockOrigin::Own,
|
||||
header: Header {
|
||||
number: 1,
|
||||
parent_hash: client.info().unwrap().chain.best_hash,
|
||||
state_root: Default::default(),
|
||||
digest: Default::default(),
|
||||
extrinsics_root: Default::default(),
|
||||
},
|
||||
justification,
|
||||
post_digests: Vec::new(),
|
||||
body: None,
|
||||
finalized: false,
|
||||
auxiliary: Vec::new(),
|
||||
fork_choice: ForkChoiceStrategy::LongestChain,
|
||||
};
|
||||
do_import_block::<_, _, _, _, TestJustification>(
|
||||
&client,
|
||||
&mut import_data,
|
||||
block,
|
||||
new_cache,
|
||||
).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn finality_proof_not_required_when_consensus_data_does_not_changes_and_no_justification_provided() {
|
||||
assert_eq!(import_block(HashMap::new(), None), ImportResult::Imported(ImportedAux {
|
||||
clear_justification_requests: false,
|
||||
needs_justification: false,
|
||||
bad_justification: false,
|
||||
needs_finality_proof: false,
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn finality_proof_not_required_when_consensus_data_does_not_changes_and_correct_justification_provided() {
|
||||
let justification = TestJustification(true, Vec::new()).encode();
|
||||
assert_eq!(import_block(HashMap::new(), Some(justification)), ImportResult::Imported(ImportedAux {
|
||||
clear_justification_requests: false,
|
||||
needs_justification: false,
|
||||
bad_justification: false,
|
||||
needs_finality_proof: false,
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn finality_proof_required_when_consensus_data_changes_and_no_justification_provided() {
|
||||
let mut cache = HashMap::new();
|
||||
cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId([2; 32])].encode());
|
||||
assert_eq!(import_block(cache, None), ImportResult::Imported(ImportedAux {
|
||||
clear_justification_requests: false,
|
||||
needs_justification: false,
|
||||
bad_justification: false,
|
||||
needs_finality_proof: true,
|
||||
}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn finality_proof_required_when_consensus_data_changes_and_incorrect_justification_provided() {
|
||||
let justification = TestJustification(false, Vec::new()).encode();
|
||||
let mut cache = HashMap::new();
|
||||
cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId([2; 32])].encode());
|
||||
assert_eq!(
|
||||
import_block(cache, Some(justification)),
|
||||
ImportResult::Imported(ImportedAux {
|
||||
clear_justification_requests: false,
|
||||
needs_justification: false,
|
||||
bad_justification: false,
|
||||
needs_finality_proof: true,
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn aux_data_updated_on_start() {
|
||||
let aux_store = InMemoryAuxStore::<Block>::new();
|
||||
let api = Arc::new(TestApi::new(vec![(AuthorityId([1; 32]), 1)]));
|
||||
|
||||
// when aux store is empty initially
|
||||
assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_none());
|
||||
assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_none());
|
||||
|
||||
// it is updated on importer start
|
||||
load_aux_import_data(Default::default(), &aux_store, api).unwrap();
|
||||
assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_some());
|
||||
assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn aux_data_loaded_on_restart() {
|
||||
let aux_store = InMemoryAuxStore::<Block>::new();
|
||||
let api = Arc::new(TestApi::new(vec![(AuthorityId([1; 32]), 1)]));
|
||||
|
||||
// when aux store is non-empty initially
|
||||
let mut consensus_changes = ConsensusChanges::<H256, u64>::empty();
|
||||
consensus_changes.note_change((42, Default::default()));
|
||||
aux_store.insert_aux(
|
||||
&[
|
||||
(
|
||||
LIGHT_AUTHORITY_SET_KEY,
|
||||
LightAuthoritySet::genesis(vec![(AuthorityId([42; 32]), 2)]).encode().as_slice(),
|
||||
),
|
||||
(
|
||||
LIGHT_CONSENSUS_CHANGES_KEY,
|
||||
consensus_changes.encode().as_slice(),
|
||||
),
|
||||
],
|
||||
&[],
|
||||
).unwrap();
|
||||
|
||||
// importer uses it on start
|
||||
let data = load_aux_import_data(Default::default(), &aux_store, api).unwrap();
|
||||
assert_eq!(data.authority_set.authorities(), vec![(AuthorityId([42; 32]), 2)]);
|
||||
assert_eq!(data.consensus_changes.pending_changes(), &[(42, Default::default())]);
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@
|
||||
/// Integrate grandpa finality with substrate service
|
||||
|
||||
use client;
|
||||
use service::{FullBackend, FullExecutor, ServiceFactory};
|
||||
use service::{FullBackend, FullExecutor, LightBackend, LightExecutor, ServiceFactory};
|
||||
|
||||
pub type BlockImportForService<F> = crate::GrandpaBlockImport<
|
||||
FullBackend<F>,
|
||||
@@ -25,12 +25,12 @@ pub type BlockImportForService<F> = crate::GrandpaBlockImport<
|
||||
<F as ServiceFactory>::Block,
|
||||
<F as ServiceFactory>::RuntimeApi,
|
||||
client::Client<
|
||||
FullBackend<F>,
|
||||
FullExecutor<F>,
|
||||
<F as ServiceFactory>::Block,
|
||||
<F as ServiceFactory>::RuntimeApi
|
||||
>,
|
||||
<F as ServiceFactory>::SelectChain
|
||||
FullBackend<F>,
|
||||
FullExecutor<F>,
|
||||
<F as ServiceFactory>::Block,
|
||||
<F as ServiceFactory>::RuntimeApi
|
||||
>,
|
||||
<F as ServiceFactory>::SelectChain,
|
||||
>;
|
||||
|
||||
pub type LinkHalfForService<F> = crate::LinkHalf<
|
||||
@@ -40,3 +40,10 @@ pub type LinkHalfForService<F> = crate::LinkHalf<
|
||||
<F as ServiceFactory>::RuntimeApi,
|
||||
<F as ServiceFactory>::SelectChain
|
||||
>;
|
||||
|
||||
pub type BlockImportForLightService<F> = crate::light_import::GrandpaLightBlockImport<
|
||||
LightBackend<F>,
|
||||
LightExecutor<F>,
|
||||
<F as ServiceFactory>::Block,
|
||||
<F as ServiceFactory>::RuntimeApi,
|
||||
>;
|
||||
|
||||
@@ -25,21 +25,24 @@ use parking_lot::Mutex;
|
||||
use tokio::runtime::current_thread;
|
||||
use keyring::ed25519::{Keyring as AuthorityKeyring};
|
||||
use client::{
|
||||
BlockchainEvents, error::Result,
|
||||
blockchain::Backend as BlockchainBackend,
|
||||
error::Result,
|
||||
runtime_api::{Core, RuntimeVersion, ApiExt},
|
||||
LongestChain,
|
||||
};
|
||||
use test_client::{self, runtime::BlockNumber};
|
||||
use consensus_common::{BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult};
|
||||
use consensus_common::import_queue::{SharedBlockImport, SharedJustificationImport};
|
||||
use consensus_common::import_queue::{SharedBlockImport, SharedJustificationImport, SharedFinalityProofImport,
|
||||
SharedFinalityProofRequestBuilder,
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::result;
|
||||
use parity_codec::Decode;
|
||||
use runtime_primitives::traits::{ApiRef, ProvideRuntimeApi, Header as HeaderT};
|
||||
use runtime_primitives::generic::BlockId;
|
||||
use substrate_primitives::{NativeOrEncoded, ExecutionContext, ed25519::Public as AuthorityId};
|
||||
|
||||
use authorities::AuthoritySet;
|
||||
use finality_proof::{FinalityProofProvider, AuthoritySetForFinalityProver, AuthoritySetForFinalityChecker};
|
||||
use communication::GRANDPA_ENGINE_ID;
|
||||
use consensus_changes::ConsensusChanges;
|
||||
|
||||
@@ -72,7 +75,7 @@ impl GrandpaTestNet {
|
||||
};
|
||||
let config = Self::default_config();
|
||||
for _ in 0..n_peers {
|
||||
net.add_peer(&config);
|
||||
net.add_full_peer(&config);
|
||||
}
|
||||
net
|
||||
}
|
||||
@@ -99,27 +102,61 @@ impl TestNetFactory for GrandpaTestNet {
|
||||
}
|
||||
}
|
||||
|
||||
fn make_verifier(&self, _client: Arc<PeersClient>, _cfg: &ProtocolConfig)
|
||||
fn make_verifier(&self, _client: PeersClient, _cfg: &ProtocolConfig)
|
||||
-> Arc<Self::Verifier>
|
||||
{
|
||||
Arc::new(PassThroughVerifier(false)) // use non-instant finality.
|
||||
}
|
||||
|
||||
fn make_block_import(&self, client: Arc<PeersClient>)
|
||||
-> (SharedBlockImport<Block>, Option<SharedJustificationImport<Block>>, PeerData)
|
||||
fn make_block_import(&self, client: PeersClient)
|
||||
-> (
|
||||
SharedBlockImport<Block>,
|
||||
Option<SharedJustificationImport<Block>>,
|
||||
Option<SharedFinalityProofImport<Block>>,
|
||||
Option<SharedFinalityProofRequestBuilder<Block>>,
|
||||
PeerData,
|
||||
)
|
||||
{
|
||||
|
||||
let select_chain = LongestChain::new(
|
||||
client.backend().clone(),
|
||||
client.import_lock().clone()
|
||||
);
|
||||
let (import, link) = block_import(
|
||||
client,
|
||||
Arc::new(self.test_config.clone()),
|
||||
select_chain,
|
||||
).expect("Could not create block import for fresh peer.");
|
||||
let shared_import = Arc::new(import);
|
||||
(shared_import.clone(), Some(shared_import), Mutex::new(Some(link)))
|
||||
match client {
|
||||
PeersClient::Full(ref client) => {
|
||||
let select_chain = LongestChain::new(
|
||||
client.backend().clone(),
|
||||
client.import_lock().clone()
|
||||
);
|
||||
let (import, link) = block_import(
|
||||
client.clone(),
|
||||
Arc::new(self.test_config.clone()),
|
||||
select_chain,
|
||||
).expect("Could not create block import for fresh peer.");
|
||||
let shared_import = Arc::new(import);
|
||||
(shared_import.clone(), Some(shared_import), None, None, Mutex::new(Some(link)))
|
||||
},
|
||||
PeersClient::Light(ref client) => {
|
||||
use crate::light_import::tests::light_block_import_without_justifications;
|
||||
|
||||
let authorities_provider = Arc::new(self.test_config.clone());
|
||||
// forbid direct finalization using justification that cames with the block
|
||||
// => light clients will try to fetch finality proofs
|
||||
let import = light_block_import_without_justifications(
|
||||
client.clone(),
|
||||
authorities_provider,
|
||||
Arc::new(self.test_config.clone())
|
||||
).expect("Could not create block import for fresh peer.");
|
||||
let finality_proof_req_builder = import.0.create_finality_proof_request_builder();
|
||||
let shared_import = Arc::new(import);
|
||||
(shared_import.clone(), None, Some(shared_import), Some(finality_proof_req_builder), Mutex::new(None))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn make_finality_proof_provider(&self, client: PeersClient) -> Option<Arc<network::FinalityProofProvider<Block>>> {
|
||||
match client {
|
||||
PeersClient::Full(ref client) => {
|
||||
let authorities_provider = Arc::new(self.test_config.clone());
|
||||
Some(Arc::new(FinalityProofProvider::new(client.clone(), authorities_provider)))
|
||||
},
|
||||
PeersClient::Light(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn peer(&self, i: usize) -> &GrandpaPeer {
|
||||
@@ -234,14 +271,14 @@ impl Future for Exit {
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
struct TestApi {
|
||||
pub(crate) struct TestApi {
|
||||
genesis_authorities: Vec<(AuthorityId, u64)>,
|
||||
scheduled_changes: Arc<Mutex<HashMap<Hash, ScheduledChange<BlockNumber>>>>,
|
||||
forced_changes: Arc<Mutex<HashMap<Hash, (BlockNumber, ScheduledChange<BlockNumber>)>>>,
|
||||
}
|
||||
|
||||
impl TestApi {
|
||||
fn new(genesis_authorities: Vec<(AuthorityId, u64)>) -> Self {
|
||||
pub fn new(genesis_authorities: Vec<(AuthorityId, u64)>) -> Self {
|
||||
TestApi {
|
||||
genesis_authorities,
|
||||
scheduled_changes: Arc::new(Mutex::new(HashMap::new())),
|
||||
@@ -250,7 +287,7 @@ impl TestApi {
|
||||
}
|
||||
}
|
||||
|
||||
struct RuntimeApi {
|
||||
pub(crate) struct RuntimeApi {
|
||||
inner: TestApi,
|
||||
}
|
||||
|
||||
@@ -327,16 +364,12 @@ impl ApiExt<Block> for RuntimeApi {
|
||||
impl GrandpaApi<Block> for RuntimeApi {
|
||||
fn GrandpaApi_grandpa_authorities_runtime_api_impl(
|
||||
&self,
|
||||
at: &BlockId<Block>,
|
||||
_: &BlockId<Block>,
|
||||
_: ExecutionContext,
|
||||
_: Option<()>,
|
||||
_: Vec<u8>,
|
||||
) -> Result<NativeOrEncoded<Vec<(substrate_primitives::ed25519::Public, u64)>>> {
|
||||
if at == &BlockId::Number(0) {
|
||||
Ok(self.inner.genesis_authorities.clone()).map(NativeOrEncoded::Native)
|
||||
} else {
|
||||
panic!("should generally only request genesis authorities")
|
||||
}
|
||||
Ok(self.inner.genesis_authorities.clone()).map(NativeOrEncoded::Native)
|
||||
}
|
||||
|
||||
fn GrandpaApi_grandpa_pending_change_runtime_api_impl(
|
||||
@@ -375,6 +408,33 @@ impl GrandpaApi<Block> for RuntimeApi {
|
||||
}
|
||||
}
|
||||
|
||||
impl AuthoritySetForFinalityProver<Block> for TestApi {
|
||||
fn authorities(&self, block: &BlockId<Block>) -> Result<Vec<(AuthorityId, u64)>> {
|
||||
let runtime_api = RuntimeApi { inner: self.clone() };
|
||||
runtime_api.GrandpaApi_grandpa_authorities_runtime_api_impl(block, ExecutionContext::Syncing, None, Vec::new())
|
||||
.map(|v| match v {
|
||||
NativeOrEncoded::Native(value) => value,
|
||||
_ => unreachable!("only providing native values"),
|
||||
})
|
||||
}
|
||||
|
||||
fn prove_authorities(&self, block: &BlockId<Block>) -> Result<Vec<Vec<u8>>> {
|
||||
self.authorities(block).map(|auth| vec![auth.encode()])
|
||||
}
|
||||
}
|
||||
|
||||
impl AuthoritySetForFinalityChecker<Block> for TestApi {
|
||||
fn check_authorities_proof(
|
||||
&self,
|
||||
_hash: <Block as BlockT>::Hash,
|
||||
_header: <Block as BlockT>::Header,
|
||||
proof: Vec<Vec<u8>>,
|
||||
) -> Result<Vec<(AuthorityId, u64)>> {
|
||||
Decode::decode(&mut &proof[0][..])
|
||||
.ok_or_else(|| unreachable!("incorrect value is passed as GRANDPA authorities proof"))
|
||||
}
|
||||
}
|
||||
|
||||
const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500);
|
||||
const TEST_ROUTING_INTERVAL: Duration = Duration::from_millis(50);
|
||||
|
||||
@@ -499,7 +559,7 @@ fn finalize_3_voters_no_observers() {
|
||||
run_to_completion(20, net.clone(), peers);
|
||||
|
||||
// normally there's no justification for finalized blocks
|
||||
assert!(net.lock().peer(0).client().backend().blockchain().justification(BlockId::Number(20)).unwrap().is_none(),
|
||||
assert!(net.lock().peer(0).client().justification(&BlockId::Number(20)).unwrap().is_none(),
|
||||
"Extra justification for block#1");
|
||||
}
|
||||
|
||||
@@ -602,11 +662,12 @@ fn transition_3_voters_twice_1_full_observer() {
|
||||
net.lock().sync();
|
||||
|
||||
for (i, peer) in net.lock().peers().iter().enumerate() {
|
||||
assert_eq!(peer.client().info().unwrap().chain.best_number, 1,
|
||||
let full_client = peer.client().as_full().expect("only full clients are used in test");
|
||||
assert_eq!(full_client.info().unwrap().chain.best_number, 1,
|
||||
"Peer #{} failed to sync", i);
|
||||
|
||||
let set: AuthoritySet<Hash, BlockNumber> = crate::aux_schema::load_authorities(
|
||||
&**peer.client().backend()
|
||||
&**full_client.backend()
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(set.current(), (0, make_ids(peers_a).as_slice()));
|
||||
@@ -693,8 +754,9 @@ fn transition_3_voters_twice_1_full_observer() {
|
||||
.take_while(|n| Ok(n.header.number() < &30))
|
||||
.for_each(move |_| Ok(()))
|
||||
.map(move |()| {
|
||||
let full_client = client.as_full().expect("only full clients are used in test");
|
||||
let set: AuthoritySet<Hash, BlockNumber> = crate::aux_schema::load_authorities(
|
||||
&**client.backend()
|
||||
&**full_client.backend()
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(set.current(), (2, make_ids(peers_c).as_slice()));
|
||||
@@ -749,8 +811,8 @@ fn justification_is_emitted_when_consensus_data_changes() {
|
||||
let net = Arc::new(Mutex::new(net));
|
||||
run_to_completion(1, net.clone(), peers);
|
||||
|
||||
// ... and check that there's no justification for block#1
|
||||
assert!(net.lock().peer(0).client().backend().blockchain().justification(BlockId::Number(1)).unwrap().is_some(),
|
||||
// ... and check that there's justification for block#1
|
||||
assert!(net.lock().peer(0).client().justification(&BlockId::Number(1)).unwrap().is_some(),
|
||||
"Missing justification for block#1");
|
||||
}
|
||||
|
||||
@@ -769,8 +831,7 @@ fn justification_is_generated_periodically() {
|
||||
// when block#32 (justification_period) is finalized, justification
|
||||
// is required => generated
|
||||
for i in 0..3 {
|
||||
assert!(net.lock().peer(i).client().backend().blockchain()
|
||||
.justification(BlockId::Number(32)).unwrap().is_some());
|
||||
assert!(net.lock().peer(i).client().justification(&BlockId::Number(32)).unwrap().is_some());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -963,8 +1024,9 @@ fn force_change_to_new_set() {
|
||||
assert_eq!(peer.client().info().unwrap().chain.best_number, 26,
|
||||
"Peer #{} failed to sync", i);
|
||||
|
||||
let full_client = peer.client().as_full().expect("only full clients are used in test");
|
||||
let set: AuthoritySet<Hash, BlockNumber> = crate::aux_schema::load_authorities(
|
||||
&**peer.client().backend()
|
||||
&**full_client.backend()
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(set.current(), (1, voters.as_slice()));
|
||||
@@ -991,7 +1053,8 @@ fn allows_reimporting_change_blocks() {
|
||||
let client = net.peer(0).client().clone();
|
||||
let (block_import, ..) = net.make_block_import(client.clone());
|
||||
|
||||
let builder = client.new_block_at(&BlockId::Number(0)).unwrap();
|
||||
let full_client = client.as_full().unwrap();
|
||||
let builder = full_client.new_block_at(&BlockId::Number(0)).unwrap();
|
||||
let block = builder.bake().unwrap();
|
||||
api.scheduled_changes.lock().insert(*block.header.parent_hash(), ScheduledChange {
|
||||
next_authorities: make_ids(peers_b),
|
||||
@@ -1014,7 +1077,12 @@ fn allows_reimporting_change_blocks() {
|
||||
|
||||
assert_eq!(
|
||||
block_import.import_block(block(), HashMap::new()).unwrap(),
|
||||
ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: false }),
|
||||
ImportResult::Imported(ImportedAux {
|
||||
needs_justification: true,
|
||||
clear_justification_requests: false,
|
||||
bad_justification: false,
|
||||
needs_finality_proof: false,
|
||||
}),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
@@ -1034,7 +1102,8 @@ fn test_bad_justification() {
|
||||
let client = net.peer(0).client().clone();
|
||||
let (block_import, ..) = net.make_block_import(client.clone());
|
||||
|
||||
let builder = client.new_block_at(&BlockId::Number(0)).unwrap();
|
||||
let full_client = client.as_full().expect("only full clients are used in test");
|
||||
let builder = full_client.new_block_at(&BlockId::Number(0)).unwrap();
|
||||
let block = builder.bake().unwrap();
|
||||
api.scheduled_changes.lock().insert(*block.header.parent_hash(), ScheduledChange {
|
||||
next_authorities: make_ids(peers_b),
|
||||
@@ -1057,7 +1126,12 @@ fn test_bad_justification() {
|
||||
|
||||
assert_eq!(
|
||||
block_import.import_block(block(), HashMap::new()).unwrap(),
|
||||
ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: true }),
|
||||
ImportResult::Imported(ImportedAux {
|
||||
needs_justification: true,
|
||||
clear_justification_requests: false,
|
||||
bad_justification: true,
|
||||
..Default::default()
|
||||
}),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
@@ -1102,7 +1176,7 @@ fn voter_persists_its_votes() {
|
||||
let net = net.clone();
|
||||
|
||||
let voter = future::loop_fn(voter_rx, move |rx| {
|
||||
let (_block_import, _, link) = net.lock().make_block_import(client.clone());
|
||||
let (_block_import, _, _, _, link) = net.lock().make_block_import(client.clone());
|
||||
let link = link.lock().take().unwrap();
|
||||
|
||||
let grandpa_params = GrandpaParams {
|
||||
@@ -1201,7 +1275,7 @@ fn voter_persists_its_votes() {
|
||||
"Peer #{} failed to sync", 0);
|
||||
|
||||
let block_30_hash =
|
||||
net.lock().peer(0).client().backend().blockchain().hash(30).unwrap().unwrap();
|
||||
net.lock().peer(0).client().as_full().unwrap().backend().blockchain().hash(30).unwrap().unwrap();
|
||||
|
||||
// we restart alice's voter
|
||||
voter_tx.unbounded_send(()).unwrap();
|
||||
@@ -1302,3 +1376,94 @@ fn finalize_3_voters_1_light_observer() {
|
||||
Some(Box::new(finality_notifications.map(|_| ())))
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() {
|
||||
let _ = ::env_logger::try_init();
|
||||
|
||||
let peers = &[AuthorityKeyring::Alice];
|
||||
let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1);
|
||||
net.add_light_peer(&GrandpaTestNet::default_config());
|
||||
|
||||
// import block#1 WITH consensus data change. Light client ignores justification
|
||||
// && instead fetches finality proof for block #1
|
||||
net.peer(0).push_authorities_change_block(vec![substrate_primitives::sr25519::Public::from_raw([42; 32])]);
|
||||
let net = Arc::new(Mutex::new(net));
|
||||
run_to_completion(1, net.clone(), peers);
|
||||
net.lock().sync_without_disconnects();
|
||||
|
||||
// check that the block#1 is finalized on light client
|
||||
while net.lock().peer(1).client().info().unwrap().chain.finalized_number != 1 {
|
||||
net.lock().tick_peer(1);
|
||||
net.lock().sync_without_disconnects();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different() {
|
||||
// for debug: to ensure that without forced change light client will sync finality proof
|
||||
const FORCE_CHANGE: bool = true;
|
||||
|
||||
let _ = ::env_logger::try_init();
|
||||
|
||||
// two of these guys are offline.
|
||||
let genesis_authorities = if FORCE_CHANGE {
|
||||
vec![
|
||||
AuthorityKeyring::Alice,
|
||||
AuthorityKeyring::Bob,
|
||||
AuthorityKeyring::Charlie,
|
||||
AuthorityKeyring::One,
|
||||
AuthorityKeyring::Two,
|
||||
]
|
||||
} else {
|
||||
vec![
|
||||
AuthorityKeyring::Alice,
|
||||
AuthorityKeyring::Bob,
|
||||
AuthorityKeyring::Charlie,
|
||||
]
|
||||
};
|
||||
let peers_a = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie];
|
||||
let api = TestApi::new(make_ids(&genesis_authorities));
|
||||
|
||||
let voters = make_ids(peers_a);
|
||||
let forced_transitions = api.forced_changes.clone();
|
||||
let net = GrandpaTestNet::new(api, 3);
|
||||
let net = Arc::new(Mutex::new(net));
|
||||
|
||||
let runner_net = net.clone();
|
||||
let add_blocks = move |_| {
|
||||
net.lock().peer(0).push_blocks(1, false); // best is #1
|
||||
|
||||
// add a forced transition at block 5.
|
||||
if FORCE_CHANGE {
|
||||
let parent_hash = net.lock().peer(0).client().info().unwrap().chain.best_hash;
|
||||
forced_transitions.lock().insert(parent_hash, (0, ScheduledChange {
|
||||
next_authorities: voters.clone(),
|
||||
delay: 3,
|
||||
}));
|
||||
}
|
||||
|
||||
// ensure block#10 enacts authorities set change => justification is generated
|
||||
// normally it will reach light client, but because of the forced change, it will not
|
||||
net.lock().peer(0).push_blocks(8, false); // best is #9
|
||||
net.lock().peer(0).push_authorities_change_block(
|
||||
vec![substrate_primitives::sr25519::Public::from_raw([42; 32])]
|
||||
); // #10
|
||||
net.lock().peer(0).push_blocks(1, false); // best is #11
|
||||
net.lock().sync_without_disconnects();
|
||||
|
||||
None
|
||||
};
|
||||
|
||||
// finalize block #11 on full clients
|
||||
run_to_completion_with(11, runner_net.clone(), peers_a, add_blocks);
|
||||
// request finalization by light client
|
||||
runner_net.lock().add_light_peer(&GrandpaTestNet::default_config());
|
||||
runner_net.lock().sync_without_disconnects();
|
||||
|
||||
// check block, finalized on light client
|
||||
assert_eq!(
|
||||
runner_net.lock().peer(3).client().info().unwrap().chain.finalized_number,
|
||||
if FORCE_CHANGE { 0 } else { 10 },
|
||||
);
|
||||
}
|
||||
@@ -68,6 +68,12 @@ pub trait Client<Block: BlockT>: Send + Sync {
|
||||
fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result<bool, Error>;
|
||||
}
|
||||
|
||||
/// Finality proof provider.
|
||||
pub trait FinalityProofProvider<Block: BlockT>: Send + Sync {
|
||||
/// Prove finality of the block.
|
||||
fn prove_finality(&self, for_block: Block::Hash, request: &[u8]) -> Result<Option<Vec<u8>>, Error>;
|
||||
}
|
||||
|
||||
impl<B, E, Block, RA> Client<Block> for SubstrateClient<B, E, Block, RA> where
|
||||
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
|
||||
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
pub use network_libp2p::{NonReservedPeerMode, NetworkConfiguration, NodeKeyConfig, Secret};
|
||||
|
||||
use bitflags::bitflags;
|
||||
use crate::chain::Client;
|
||||
use crate::chain::{Client, FinalityProofProvider};
|
||||
use parity_codec;
|
||||
use crate::on_demand::OnDemandService;
|
||||
use runtime_primitives::traits::{Block as BlockT};
|
||||
@@ -34,6 +34,8 @@ pub struct Params<B: BlockT, S, H: ExHashT> {
|
||||
pub network_config: NetworkConfiguration,
|
||||
/// Substrate relay chain access point.
|
||||
pub chain: Arc<Client<B>>,
|
||||
/// Finality proof provider.
|
||||
pub finality_proof_provider: Option<Arc<FinalityProofProvider<B>>>,
|
||||
/// On-demand service reference.
|
||||
pub on_demand: Option<Arc<OnDemandService<B>>>,
|
||||
/// Transaction pool.
|
||||
|
||||
@@ -0,0 +1,470 @@
|
||||
// Copyright 2017-2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::time::{Duration, Instant};
|
||||
use log::{trace, warn};
|
||||
use client::error::Error as ClientError;
|
||||
use consensus::import_queue::SharedFinalityProofRequestBuilder;
|
||||
use fork_tree::ForkTree;
|
||||
use network_libp2p::PeerId;
|
||||
use runtime_primitives::Justification;
|
||||
use runtime_primitives::traits::{Block as BlockT, NumberFor};
|
||||
use crate::message;
|
||||
use crate::protocol::Context;
|
||||
use crate::sync::{PeerSync, PeerSyncState};
|
||||
|
||||
// Time to wait before trying to get the same extra data from the same peer.
|
||||
const EXTRA_RETRY_WAIT: Duration = Duration::from_secs(10);
|
||||
|
||||
/// Pending extra data request for the given block (hash and number).
|
||||
type ExtraRequest<B> = (<B as BlockT>::Hash, NumberFor<B>);
|
||||
|
||||
/// Extra requests processor.
|
||||
pub(crate) trait ExtraRequestsEssence<B: BlockT> {
|
||||
type Response;
|
||||
|
||||
/// Name of request type to display in logs.
|
||||
fn type_name(&self) -> &'static str;
|
||||
/// Send network message corresponding to the request.
|
||||
fn send_network_request(&self, protocol: &mut Context<B>, peer: PeerId, request: ExtraRequest<B>);
|
||||
/// Create peer state for peer that is downloading extra data.
|
||||
fn peer_downloading_state(&self, block: B::Hash) -> PeerSyncState<B>;
|
||||
}
|
||||
|
||||
/// Manages all extra data requests required for sync.
|
||||
pub(crate) struct ExtraRequestsAggregator<B: BlockT> {
|
||||
/// Manages justifications requests.
|
||||
justifications: ExtraRequests<B, JustificationsRequestsEssence>,
|
||||
/// Manages finality proof requests.
|
||||
finality_proofs: ExtraRequests<B, FinalityProofRequestsEssence<B>>,
|
||||
}
|
||||
|
||||
impl<B: BlockT> ExtraRequestsAggregator<B> {
|
||||
pub(crate) fn new() -> Self {
|
||||
ExtraRequestsAggregator {
|
||||
justifications: ExtraRequests::new(JustificationsRequestsEssence),
|
||||
finality_proofs: ExtraRequests::new(FinalityProofRequestsEssence(None)),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn justifications(&mut self) -> &mut ExtraRequests<B, JustificationsRequestsEssence> {
|
||||
&mut self.justifications
|
||||
}
|
||||
|
||||
pub(crate) fn finality_proofs(&mut self) -> &mut ExtraRequests<B, FinalityProofRequestsEssence<B>> {
|
||||
&mut self.finality_proofs
|
||||
}
|
||||
|
||||
/// Dispatches all possible pending requests to the given peers.
|
||||
pub(crate) fn dispatch(&mut self, peers: &mut HashMap<PeerId, PeerSync<B>>, protocol: &mut Context<B>) {
|
||||
self.justifications.dispatch(peers, protocol);
|
||||
self.finality_proofs.dispatch(peers, protocol);
|
||||
}
|
||||
|
||||
/// Removes any pending extra requests for blocks lower than the
|
||||
/// given best finalized.
|
||||
pub(crate) fn on_block_finalized<F>(
|
||||
&mut self,
|
||||
best_finalized_hash: &B::Hash,
|
||||
best_finalized_number: NumberFor<B>,
|
||||
is_descendent_of: &F,
|
||||
) -> Result<(), fork_tree::Error<ClientError>>
|
||||
where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>
|
||||
{
|
||||
self.justifications.on_block_finalized(best_finalized_hash, best_finalized_number, is_descendent_of)?;
|
||||
self.finality_proofs.on_block_finalized(best_finalized_hash, best_finalized_number, is_descendent_of)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retry any pending request if a peer disconnected.
|
||||
pub(crate) fn peer_disconnected(&mut self, who: PeerId) {
|
||||
self.justifications.peer_disconnected(&who);
|
||||
self.finality_proofs.peer_disconnected(&who);
|
||||
}
|
||||
}
|
||||
|
||||
/// Manages pending block extra data (e.g. justification) requests.
|
||||
/// Multiple extras may be requested for competing forks, or for the same branch
|
||||
/// at different (increasing) heights. This structure will guarantee that extras
|
||||
/// are fetched in-order, and that obsolete changes are pruned (when finalizing a
|
||||
/// competing fork).
|
||||
pub(crate) struct ExtraRequests<B: BlockT, Essence> {
|
||||
tree: ForkTree<B::Hash, NumberFor<B>, ()>,
|
||||
pending_requests: VecDeque<ExtraRequest<B>>,
|
||||
peer_requests: HashMap<PeerId, ExtraRequest<B>>,
|
||||
previous_requests: HashMap<ExtraRequest<B>, Vec<(PeerId, Instant)>>,
|
||||
importing_requests: HashSet<ExtraRequest<B>>,
|
||||
essence: Essence,
|
||||
}
|
||||
|
||||
impl<B: BlockT, Essence: ExtraRequestsEssence<B>> ExtraRequests<B, Essence> {
|
||||
fn new(essence: Essence) -> Self {
|
||||
ExtraRequests {
|
||||
tree: ForkTree::new(),
|
||||
pending_requests: VecDeque::new(),
|
||||
peer_requests: HashMap::new(),
|
||||
previous_requests: HashMap::new(),
|
||||
importing_requests: HashSet::new(),
|
||||
essence,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get mutable reference to the requests essence.
|
||||
pub(crate) fn essence(&mut self) -> &mut Essence {
|
||||
&mut self.essence
|
||||
}
|
||||
|
||||
/// Dispatches all possible pending requests to the given peers. Peers are
|
||||
/// filtered according to the current known best block (i.e. we won't send a
|
||||
/// extra request for block #10 to a peer at block #2), and we also
|
||||
/// throttle requests to the same peer if a previous justification request
|
||||
/// yielded no results.
|
||||
pub(crate) fn dispatch(&mut self, peers: &mut HashMap<PeerId, PeerSync<B>>, protocol: &mut Context<B>) {
|
||||
if self.pending_requests.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let initial_pending_requests = self.pending_requests.len();
|
||||
|
||||
// clean up previous failed requests so we can retry again
|
||||
for (_, requests) in self.previous_requests.iter_mut() {
|
||||
requests.retain(|(_, instant)| instant.elapsed() < EXTRA_RETRY_WAIT);
|
||||
}
|
||||
|
||||
let mut available_peers = peers.iter().filter_map(|(peer, sync)| {
|
||||
// don't request to any peers that already have pending requests or are unavailable
|
||||
if sync.state != PeerSyncState::Available || self.peer_requests.contains_key(&peer) {
|
||||
None
|
||||
} else {
|
||||
Some((peer.clone(), sync.best_number))
|
||||
}
|
||||
}).collect::<VecDeque<_>>();
|
||||
|
||||
let mut last_peer = available_peers.back().map(|p| p.0.clone());
|
||||
let mut unhandled_requests = VecDeque::new();
|
||||
|
||||
loop {
|
||||
let (peer, peer_best_number) = match available_peers.pop_front() {
|
||||
Some(p) => p,
|
||||
_ => break,
|
||||
};
|
||||
|
||||
// only ask peers that have synced past the block number that we're
|
||||
// asking the extra for and to whom we haven't already made
|
||||
// the same request recently
|
||||
let peer_eligible = {
|
||||
let request = match self.pending_requests.front() {
|
||||
Some(r) => r.clone(),
|
||||
_ => break,
|
||||
};
|
||||
|
||||
peer_best_number >= request.1 &&
|
||||
!self.previous_requests
|
||||
.get(&request)
|
||||
.map(|requests| requests.iter().any(|i| i.0 == peer))
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
if !peer_eligible {
|
||||
available_peers.push_back((peer.clone(), peer_best_number));
|
||||
|
||||
// we tried all peers and none can answer this request
|
||||
if Some(peer) == last_peer {
|
||||
last_peer = available_peers.back().map(|p| p.0.clone());
|
||||
|
||||
let request = self.pending_requests.pop_front()
|
||||
.expect("verified to be Some in the beginning of the loop; qed");
|
||||
|
||||
unhandled_requests.push_back(request);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
last_peer = available_peers.back().map(|p| p.0.clone());
|
||||
|
||||
let request = self.pending_requests.pop_front()
|
||||
.expect("verified to be Some in the beginning of the loop; qed");
|
||||
|
||||
self.peer_requests.insert(peer.clone(), request);
|
||||
|
||||
peers.get_mut(&peer)
|
||||
.expect("peer was is taken from available_peers; available_peers is a subset of peers; qed")
|
||||
.state = self.essence.peer_downloading_state(request.0.clone());
|
||||
|
||||
trace!(target: "sync", "Requesting {} for block #{} from {}", self.essence.type_name(), request.0, peer);
|
||||
self.essence.send_network_request(protocol, peer, request);
|
||||
}
|
||||
|
||||
self.pending_requests.append(&mut unhandled_requests);
|
||||
|
||||
trace!(target: "sync", "Dispatched {} {} requests ({} pending)",
|
||||
initial_pending_requests - self.pending_requests.len(),
|
||||
self.essence.type_name(),
|
||||
self.pending_requests.len(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Queue a extra data request (without dispatching it).
|
||||
pub(crate) fn queue_request<F>(&mut self, request: ExtraRequest<B>, is_descendent_of: F)
|
||||
where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>
|
||||
{
|
||||
match self.tree.import(request.0.clone(), request.1.clone(), (), &is_descendent_of) {
|
||||
Ok(true) => {
|
||||
// this is a new root so we add it to the current `pending_requests`
|
||||
self.pending_requests.push_back((request.0, request.1));
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "sync", "Failed to insert requested {} {:?} {:?} into tree: {:?}",
|
||||
self.essence.type_name(),
|
||||
request.0,
|
||||
request.1,
|
||||
err,
|
||||
);
|
||||
return;
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
|
||||
/// Retry any pending request if a peer disconnected.
|
||||
fn peer_disconnected(&mut self, who: &PeerId) {
|
||||
if let Some(request) = self.peer_requests.remove(who) {
|
||||
self.pending_requests.push_front(request);
|
||||
}
|
||||
}
|
||||
|
||||
/// Process the import result of an extra.
|
||||
/// Queues a retry in case the import failed.
|
||||
/// Returns true if import has been queued.
|
||||
pub(crate) fn on_import_result(
|
||||
&mut self,
|
||||
request: (B::Hash, NumberFor<B>),
|
||||
finalization_result: Result<(B::Hash, NumberFor<B>), ()>,
|
||||
) -> bool {
|
||||
self.try_finalize_root(request, finalization_result, true)
|
||||
}
|
||||
|
||||
/// Processes the response for the request previously sent to the given
|
||||
/// peer. Queues a retry in case the given justification
|
||||
/// was `None`.
|
||||
pub(crate) fn on_response(
|
||||
&mut self,
|
||||
who: PeerId,
|
||||
response: Option<Essence::Response>,
|
||||
) -> Option<(PeerId, B::Hash, NumberFor<B>, Essence::Response)> {
|
||||
// we assume that the request maps to the given response, this is
|
||||
// currently enforced by the outer network protocol before passing on
|
||||
// messages to chain sync.
|
||||
if let Some(request) = self.peer_requests.remove(&who) {
|
||||
if let Some(response) = response {
|
||||
self.importing_requests.insert(request);
|
||||
return Some((who, request.0, request.1, response));
|
||||
}
|
||||
|
||||
self.previous_requests
|
||||
.entry(request)
|
||||
.or_insert(Vec::new())
|
||||
.push((who, Instant::now()));
|
||||
self.pending_requests.push_front(request);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Removes any pending extra requests for blocks lower than the
|
||||
/// given best finalized.
|
||||
fn on_block_finalized<F>(
|
||||
&mut self,
|
||||
best_finalized_hash: &B::Hash,
|
||||
best_finalized_number: NumberFor<B>,
|
||||
is_descendent_of: F,
|
||||
) -> Result<(), fork_tree::Error<ClientError>>
|
||||
where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>
|
||||
{
|
||||
let is_scheduled_root = self.try_finalize_root(
|
||||
(*best_finalized_hash, best_finalized_number),
|
||||
Ok((*best_finalized_hash, best_finalized_number)),
|
||||
false,
|
||||
);
|
||||
if is_scheduled_root {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
self.tree.finalize(best_finalized_hash, best_finalized_number, &is_descendent_of)?;
|
||||
|
||||
let roots = self.tree.roots().collect::<HashSet<_>>();
|
||||
|
||||
self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &())));
|
||||
self.peer_requests.retain(|_, (h, n)| roots.contains(&(h, n, &())));
|
||||
self.previous_requests.retain(|(h, n), _| roots.contains(&(h, n, &())));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear all data.
|
||||
pub(crate) fn clear(&mut self) {
|
||||
self.tree = ForkTree::new();
|
||||
self.pending_requests.clear();
|
||||
self.peer_requests.clear();
|
||||
self.previous_requests.clear();
|
||||
}
|
||||
|
||||
/// Try to finalize pending root.
|
||||
/// Returns true if import of this request has been scheduled.
|
||||
fn try_finalize_root(
|
||||
&mut self,
|
||||
request: (B::Hash, NumberFor<B>),
|
||||
finalization_result: Result<(B::Hash, NumberFor<B>), ()>,
|
||||
reschedule_on_failure: bool,
|
||||
) -> bool {
|
||||
if !self.importing_requests.remove(&request) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let (finalized_hash, finalized_number) = match finalization_result {
|
||||
Ok((finalized_hash, finalized_number)) => (finalized_hash, finalized_number),
|
||||
Err(_) => {
|
||||
if reschedule_on_failure {
|
||||
self.pending_requests.push_front(request);
|
||||
}
|
||||
return true;
|
||||
},
|
||||
};
|
||||
|
||||
if self.tree.finalize_root(&finalized_hash).is_none() {
|
||||
warn!(target: "sync", "Imported {} for {:?} {:?} which isn't a root in the tree: {:?}",
|
||||
self.essence.type_name(),
|
||||
finalized_hash,
|
||||
finalized_number,
|
||||
self.tree.roots().collect::<Vec<_>>(),
|
||||
);
|
||||
return true;
|
||||
};
|
||||
|
||||
self.previous_requests.clear();
|
||||
self.peer_requests.clear();
|
||||
self.pending_requests =
|
||||
self.tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect();
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct JustificationsRequestsEssence;
|
||||
|
||||
impl<B: BlockT> ExtraRequestsEssence<B> for JustificationsRequestsEssence {
|
||||
type Response = Justification;
|
||||
|
||||
fn type_name(&self) -> &'static str {
|
||||
"justification"
|
||||
}
|
||||
|
||||
fn send_network_request(&self, protocol: &mut Context<B>, peer: PeerId, request: ExtraRequest<B>) {
|
||||
protocol.send_block_request(peer, message::generic::BlockRequest {
|
||||
id: 0,
|
||||
fields: message::BlockAttributes::JUSTIFICATION,
|
||||
from: message::FromBlock::Hash(request.0),
|
||||
to: None,
|
||||
direction: message::Direction::Ascending,
|
||||
max: Some(1),
|
||||
})
|
||||
}
|
||||
|
||||
fn peer_downloading_state(&self, block: B::Hash) -> PeerSyncState<B> {
|
||||
PeerSyncState::DownloadingJustification(block)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct FinalityProofRequestsEssence<B: BlockT>(pub Option<SharedFinalityProofRequestBuilder<B>>);
|
||||
|
||||
impl<B: BlockT> ExtraRequestsEssence<B> for FinalityProofRequestsEssence<B> {
|
||||
type Response = Vec<u8>;
|
||||
|
||||
fn type_name(&self) -> &'static str {
|
||||
"finality proof"
|
||||
}
|
||||
|
||||
fn send_network_request(&self, protocol: &mut Context<B>, peer: PeerId, request: ExtraRequest<B>) {
|
||||
protocol.send_finality_proof_request(peer, message::generic::FinalityProofRequest {
|
||||
id: 0,
|
||||
block: request.0,
|
||||
request: self.0.as_ref()
|
||||
.map(|builder| builder.build_request_data(&request.0))
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn peer_downloading_state(&self, block: B::Hash) -> PeerSyncState<B> {
|
||||
PeerSyncState::DownloadingFinalityProof(block)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use client::error::Error as ClientError;
|
||||
use test_client::runtime::{Block, Hash};
|
||||
use super::ExtraRequestsAggregator;
|
||||
|
||||
#[test]
|
||||
fn request_is_rescheduled_when_earlier_block_is_finalized() {
|
||||
let _ = ::env_logger::try_init();
|
||||
|
||||
let mut extra_requests = ExtraRequestsAggregator::<Block>::new();
|
||||
|
||||
let hash4 = [4; 32].into();
|
||||
let hash5 = [5; 32].into();
|
||||
let hash6 = [6; 32].into();
|
||||
let hash7 = [7; 32].into();
|
||||
|
||||
fn is_descendent_of(base: &Hash, target: &Hash) -> Result<bool, ClientError> {
|
||||
Ok(target[0] >= base[0])
|
||||
}
|
||||
|
||||
// make #4 last finalized block
|
||||
extra_requests.finality_proofs().tree.import(hash4, 4, (), &is_descendent_of).unwrap();
|
||||
extra_requests.finality_proofs().tree.finalize_root(&hash4);
|
||||
|
||||
// schedule request for #6
|
||||
extra_requests.finality_proofs().queue_request((hash6, 6), is_descendent_of);
|
||||
|
||||
// receive finality proof for #5
|
||||
extra_requests.finality_proofs().importing_requests.insert((hash6, 6));
|
||||
extra_requests.finality_proofs().on_block_finalized(&hash5, 5, is_descendent_of).unwrap();
|
||||
extra_requests.finality_proofs().on_import_result((hash6, 6), Ok((hash5, 5)));
|
||||
|
||||
// ensure that request for #6 is still pending
|
||||
assert_eq!(
|
||||
extra_requests.finality_proofs().pending_requests.iter().collect::<Vec<_>>(),
|
||||
vec![&(hash6, 6)],
|
||||
);
|
||||
|
||||
// receive finality proof for #7
|
||||
extra_requests.finality_proofs().importing_requests.insert((hash6, 6));
|
||||
extra_requests.finality_proofs().on_block_finalized(&hash6, 6, is_descendent_of).unwrap();
|
||||
extra_requests.finality_proofs().on_block_finalized(&hash7, 7, is_descendent_of).unwrap();
|
||||
extra_requests.finality_proofs().on_import_result((hash6, 6), Ok((hash7, 7)));
|
||||
|
||||
// ensure that there's no request for #6
|
||||
assert_eq!(
|
||||
extra_requests.finality_proofs().pending_requests.iter().collect::<Vec<_>>(),
|
||||
Vec::<&(Hash, u64)>::new(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,7 @@ mod protocol;
|
||||
mod chain;
|
||||
mod blocks;
|
||||
mod on_demand;
|
||||
mod extra_requests;
|
||||
mod util;
|
||||
pub mod config;
|
||||
pub mod consensus_gossip;
|
||||
@@ -40,7 +41,7 @@ pub mod specialization;
|
||||
#[cfg(any(test, feature = "test-helpers"))]
|
||||
pub mod test;
|
||||
|
||||
pub use chain::Client as ClientHandle;
|
||||
pub use chain::{Client as ClientHandle, FinalityProofProvider};
|
||||
pub use service::{
|
||||
Service, FetchFuture, TransactionPool, ManageNetwork, NetworkMsg,
|
||||
SyncProvider, ExHashT, ReportHandle,
|
||||
|
||||
@@ -23,6 +23,7 @@ pub use self::generic::{
|
||||
BlockAnnounce, RemoteCallRequest, RemoteReadRequest,
|
||||
RemoteHeaderRequest, RemoteHeaderResponse,
|
||||
RemoteChangesRequest, RemoteChangesResponse,
|
||||
FinalityProofRequest, FinalityProofResponse,
|
||||
FromBlock, RemoteReadChildRequest,
|
||||
};
|
||||
|
||||
@@ -200,6 +201,10 @@ pub mod generic {
|
||||
RemoteChangesResponse(RemoteChangesResponse<Number, Hash>),
|
||||
/// Remote child storage read request.
|
||||
RemoteReadChildRequest(RemoteReadChildRequest<Hash>),
|
||||
/// Finality proof request.
|
||||
FinalityProofRequest(FinalityProofRequest<Hash>),
|
||||
/// Finality proof reponse.
|
||||
FinalityProofResponse(FinalityProofResponse<Hash>),
|
||||
/// Chain-specific message
|
||||
#[codec(index = "255")]
|
||||
ChainSpecific(Vec<u8>),
|
||||
@@ -359,4 +364,26 @@ pub mod generic {
|
||||
/// Missing changes tries roots proof.
|
||||
pub roots_proof: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
|
||||
/// Finality proof request.
|
||||
pub struct FinalityProofRequest<H> {
|
||||
/// Unique request id.
|
||||
pub id: RequestId,
|
||||
/// Hash of the block to request proof for.
|
||||
pub block: H,
|
||||
/// Additional data blob (that both requester and provider understood) required for proving finality.
|
||||
pub request: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
|
||||
/// Finality proof response.
|
||||
pub struct FinalityProofResponse<H> {
|
||||
/// Id of a request this response was made for.
|
||||
pub id: RequestId,
|
||||
/// Hash of the block (the same as in the FinalityProofRequest).
|
||||
pub block: H,
|
||||
/// Finality proof (if available).
|
||||
pub proof: Option<Vec<u8>>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,6 +155,11 @@ impl<B: BlockT> OnDemand<B> where
|
||||
}
|
||||
}
|
||||
|
||||
/// Get checker reference.
|
||||
pub fn checker(&self) -> &Arc<FetchChecker<B>> {
|
||||
&self.checker
|
||||
}
|
||||
|
||||
/// Sets weak reference to network service.
|
||||
pub fn set_network_sender(&self, network_sender: NetworkChan<B>) {
|
||||
self.network_sender.lock().replace(network_sender);
|
||||
|
||||
@@ -20,7 +20,11 @@ use primitives::storage::StorageKey;
|
||||
use consensus::{import_queue::IncomingBlock, import_queue::Origin, BlockOrigin};
|
||||
use runtime_primitives::{generic::BlockId, ConsensusEngineId, Justification};
|
||||
use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, NumberFor, Zero};
|
||||
use crate::message::{self, BlockRequest as BlockRequestMessage, Message};
|
||||
use consensus::import_queue::SharedFinalityProofRequestBuilder;
|
||||
use crate::message::{
|
||||
self, BlockRequest as BlockRequestMessage,
|
||||
FinalityProofRequest as FinalityProofRequestMessage, Message,
|
||||
};
|
||||
use crate::message::generic::{Message as GenericMessage, ConsensusMessage};
|
||||
use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient};
|
||||
use crate::on_demand::OnDemandService;
|
||||
@@ -34,7 +38,7 @@ use std::collections::{BTreeMap, HashMap};
|
||||
use std::sync::Arc;
|
||||
use std::{cmp, num::NonZeroUsize, time};
|
||||
use log::{trace, debug, warn, error};
|
||||
use crate::chain::Client;
|
||||
use crate::chain::{Client, FinalityProofProvider};
|
||||
use client::light::fetcher::ChangesProof;
|
||||
use crate::{error, util::LruHashSet};
|
||||
|
||||
@@ -163,6 +167,9 @@ pub trait Context<B: BlockT> {
|
||||
/// Request a block from a peer.
|
||||
fn send_block_request(&mut self, who: PeerId, request: BlockRequestMessage<B>);
|
||||
|
||||
/// Request a finality proof from a peer.
|
||||
fn send_finality_proof_request(&mut self, who: PeerId, request: FinalityProofRequestMessage<B::Hash>);
|
||||
|
||||
/// Send a consensus message to a peer.
|
||||
fn send_consensus(&mut self, who: PeerId, consensus: ConsensusMessage);
|
||||
|
||||
@@ -205,6 +212,12 @@ impl<'a, B: BlockT + 'a, H: ExHashT + 'a> Context<B> for ProtocolContext<'a, B,
|
||||
)
|
||||
}
|
||||
|
||||
fn send_finality_proof_request(&mut self, who: PeerId, request: FinalityProofRequestMessage<B::Hash>) {
|
||||
send_message(&mut self.context_data.peers, &self.network_chan, who,
|
||||
GenericMessage::FinalityProofRequest(request)
|
||||
)
|
||||
}
|
||||
|
||||
fn send_consensus(&mut self, who: PeerId, consensus: ConsensusMessage) {
|
||||
send_message(&mut self.context_data.peers, &self.network_chan, who,
|
||||
GenericMessage::Consensus(consensus)
|
||||
@@ -223,6 +236,7 @@ struct ContextData<B: BlockT, H: ExHashT> {
|
||||
// All connected peers
|
||||
peers: HashMap<PeerId, Peer<B, H>>,
|
||||
pub chain: Arc<Client<B>>,
|
||||
pub finality_proof_provider: Option<Arc<FinalityProofProvider<B>>>,
|
||||
}
|
||||
|
||||
/// A task, consisting of a user-provided closure, to be executed on the Protocol thread.
|
||||
@@ -263,6 +277,12 @@ pub enum ProtocolMsg<B: BlockT, S: NetworkSpecialization<B>> {
|
||||
RequestJustification(B::Hash, NumberFor<B>),
|
||||
/// Inform protocol whether a justification was successfully imported.
|
||||
JustificationImportResult(B::Hash, NumberFor<B>, bool),
|
||||
/// Set finality proof request builder.
|
||||
SetFinalityProofRequestBuilder(SharedFinalityProofRequestBuilder<B>),
|
||||
/// Tell protocol to request finality proof for a block.
|
||||
RequestFinalityProof(B::Hash, NumberFor<B>),
|
||||
/// Inform protocol whether a finality proof was successfully imported.
|
||||
FinalityProofImportResult((B::Hash, NumberFor<B>), Result<(B::Hash, NumberFor<B>), ()>),
|
||||
/// Propagate a block to peers.
|
||||
AnnounceBlock(B::Hash),
|
||||
/// A block has been imported (sent by the client).
|
||||
@@ -290,6 +310,7 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
|
||||
network_chan: NetworkChan<B>,
|
||||
config: ProtocolConfig,
|
||||
chain: Arc<Client<B>>,
|
||||
finality_proof_provider: Option<Arc<FinalityProofProvider<B>>>,
|
||||
on_demand: Option<Arc<OnDemandService<B>>>,
|
||||
transaction_pool: Arc<TransactionPool<H, B>>,
|
||||
specialization: S,
|
||||
@@ -306,6 +327,7 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
|
||||
context_data: ContextData {
|
||||
peers: HashMap::new(),
|
||||
chain,
|
||||
finality_proof_provider,
|
||||
},
|
||||
on_demand,
|
||||
genesis_hash: info.chain.genesis_hash,
|
||||
@@ -408,6 +430,16 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
|
||||
self.sync.request_justification(&hash, number, &mut context);
|
||||
},
|
||||
ProtocolMsg::JustificationImportResult(hash, number, success) => self.sync.justification_import_result(hash, number, success),
|
||||
ProtocolMsg::SetFinalityProofRequestBuilder(builder) => self.sync.set_finality_proof_request_builder(builder),
|
||||
ProtocolMsg::RequestFinalityProof(hash, number) => {
|
||||
let mut context =
|
||||
ProtocolContext::new(&mut self.context_data, &self.network_chan);
|
||||
self.sync.request_finality_proof(&hash, number, &mut context);
|
||||
},
|
||||
ProtocolMsg::FinalityProofImportResult(
|
||||
requested_block,
|
||||
finalziation_result,
|
||||
) => self.sync.finality_proof_import_result(requested_block, finalziation_result),
|
||||
ProtocolMsg::PropagateExtrinsics => self.propagate_extrinsics(),
|
||||
#[cfg(any(test, feature = "test-helpers"))]
|
||||
ProtocolMsg::Tick => self.tick(),
|
||||
@@ -476,6 +508,8 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
|
||||
GenericMessage::RemoteHeaderResponse(response) => self.on_remote_header_response(who, response),
|
||||
GenericMessage::RemoteChangesRequest(request) => self.on_remote_changes_request(who, request),
|
||||
GenericMessage::RemoteChangesResponse(response) => self.on_remote_changes_response(who, response),
|
||||
GenericMessage::FinalityProofRequest(request) => self.on_finality_proof_request(who, request),
|
||||
GenericMessage::FinalityProofResponse(response) => return self.on_finality_proof_response(who, response),
|
||||
GenericMessage::Consensus(msg) => {
|
||||
if self.context_data.peers.get(&who).map_or(false, |peer| peer.info.protocol_version > 2) {
|
||||
self.consensus_gossip.on_incoming(
|
||||
@@ -1099,6 +1133,53 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
|
||||
.as_ref()
|
||||
.map(|s| s.on_remote_changes_response(who, response));
|
||||
}
|
||||
|
||||
fn on_finality_proof_request(
|
||||
&mut self,
|
||||
who: PeerId,
|
||||
request: message::FinalityProofRequest<B::Hash>,
|
||||
) {
|
||||
trace!(target: "sync", "Finality proof request from {} for {}", who, request.block);
|
||||
let finality_proof = self.context_data.finality_proof_provider.as_ref()
|
||||
.ok_or_else(|| String::from("Finality provider is not configured"))
|
||||
.and_then(|provider| provider.prove_finality(request.block, &request.request)
|
||||
.map_err(|e| e.to_string()));
|
||||
let finality_proof = match finality_proof {
|
||||
Ok(finality_proof) => finality_proof,
|
||||
Err(error) => {
|
||||
trace!(target: "sync", "Finality proof request from {} for {} failed with: {}",
|
||||
who, request.block, error);
|
||||
None
|
||||
},
|
||||
};
|
||||
self.send_message(
|
||||
who,
|
||||
GenericMessage::FinalityProofResponse(message::FinalityProofResponse {
|
||||
id: 0,
|
||||
block: request.block,
|
||||
proof: finality_proof,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
fn on_finality_proof_response(
|
||||
&mut self,
|
||||
who: PeerId,
|
||||
response: message::FinalityProofResponse<B::Hash>,
|
||||
) -> CustomMessageOutcome<B> {
|
||||
trace!(target: "sync", "Finality proof response from {} for {}", who, response.block);
|
||||
let outcome = self.sync.on_block_finality_proof_data(
|
||||
&mut ProtocolContext::new(&mut self.context_data, &self.network_chan),
|
||||
who,
|
||||
response,
|
||||
);
|
||||
|
||||
if let Some((origin, hash, nb, proof)) = outcome {
|
||||
CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof)
|
||||
} else {
|
||||
CustomMessageOutcome::None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Outcome of an incoming custom message.
|
||||
@@ -1106,6 +1187,7 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
|
||||
pub enum CustomMessageOutcome<B: BlockT> {
|
||||
BlockImport(BlockOrigin, Vec<IncomingBlock<B>>),
|
||||
JustificationImport(Origin, B::Hash, NumberFor<B>, Justification),
|
||||
FinalityProofImport(Origin, B::Hash, NumberFor<B>, Vec<u8>),
|
||||
None,
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ use network_libp2p::{ProtocolId, NetworkConfiguration};
|
||||
use network_libp2p::{start_service, parse_str_addr, Service as NetworkService, ServiceEvent as NetworkServiceEvent};
|
||||
use network_libp2p::{RegisteredProtocol, NetworkState};
|
||||
use peerset::PeersetHandle;
|
||||
use consensus::import_queue::{ImportQueue, Link};
|
||||
use consensus::import_queue::{ImportQueue, Link, SharedFinalityProofRequestBuilder};
|
||||
use runtime_primitives::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId};
|
||||
|
||||
use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient};
|
||||
@@ -115,6 +115,31 @@ impl<B: BlockT, S: NetworkSpecialization<B>> Link<B> for NetworkLink<B, S> {
|
||||
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::RequestJustification(hash.clone(), number));
|
||||
}
|
||||
|
||||
fn request_finality_proof(&self, hash: &B::Hash, number: NumberFor<B>) {
|
||||
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::RequestFinalityProof(
|
||||
hash.clone(),
|
||||
number,
|
||||
));
|
||||
}
|
||||
|
||||
fn finality_proof_imported(
|
||||
&self,
|
||||
who: PeerId,
|
||||
request_block: (B::Hash, NumberFor<B>),
|
||||
finalization_result: Result<(B::Hash, NumberFor<B>), ()>,
|
||||
) {
|
||||
let success = finalization_result.is_ok();
|
||||
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::FinalityProofImportResult(
|
||||
request_block,
|
||||
finalization_result,
|
||||
));
|
||||
if !success {
|
||||
info!("Invalid finality proof provided by {} for #{}", who, request_block.0);
|
||||
let _ = self.network_sender.send(NetworkMsg::ReportPeer(who.clone(), i32::min_value()));
|
||||
let _ = self.network_sender.send(NetworkMsg::DisconnectPeer(who.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
fn report_peer(&self, who: PeerId, reputation_change: i32) {
|
||||
self.network_sender.send(NetworkMsg::ReportPeer(who, reputation_change));
|
||||
}
|
||||
@@ -122,6 +147,10 @@ impl<B: BlockT, S: NetworkSpecialization<B>> Link<B> for NetworkLink<B, S> {
|
||||
fn restart(&self) {
|
||||
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::RestartSync);
|
||||
}
|
||||
|
||||
fn set_finality_proof_request_builder(&self, request_builder: SharedFinalityProofRequestBuilder<B>) {
|
||||
let _ = self.protocol_sender.unbounded_send(ProtocolMsg::SetFinalityProofRequestBuilder(request_builder));
|
||||
}
|
||||
}
|
||||
|
||||
/// A cloneable handle for reporting cost/benefits of peers.
|
||||
@@ -179,6 +208,7 @@ impl<B: BlockT + 'static, S: NetworkSpecialization<B>> Service<B, S> {
|
||||
network_chan.clone(),
|
||||
params.config,
|
||||
params.chain,
|
||||
params.finality_proof_provider,
|
||||
params.on_demand,
|
||||
params.transaction_pool,
|
||||
params.specialization,
|
||||
@@ -593,6 +623,8 @@ fn run_thread<B: BlockT + 'static, S: NetworkSpecialization<B>, H: ExHashT>(
|
||||
import_queue.import_blocks(origin, blocks),
|
||||
CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) =>
|
||||
import_queue.import_justification(origin, hash, nb, justification),
|
||||
CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) =>
|
||||
import_queue.import_finality_proof(origin, hash, nb, proof),
|
||||
CustomMessageOutcome::None => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,18 +32,16 @@
|
||||
|
||||
use std::cmp::max;
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::time::{Duration, Instant};
|
||||
use log::{debug, trace, info, warn};
|
||||
use log::{debug, trace, warn, info};
|
||||
use crate::protocol::Context;
|
||||
use fork_tree::ForkTree;
|
||||
use network_libp2p::PeerId;
|
||||
use client::{BlockStatus, ClientInfo};
|
||||
use consensus::{BlockOrigin, import_queue::IncomingBlock};
|
||||
use consensus::{BlockOrigin, import_queue::{IncomingBlock, SharedFinalityProofRequestBuilder}};
|
||||
use client::error::Error as ClientError;
|
||||
use crate::blocks::BlockCollection;
|
||||
use runtime_primitives::Justification;
|
||||
use crate::extra_requests::ExtraRequestsAggregator;
|
||||
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero, CheckedSub};
|
||||
use runtime_primitives::generic::BlockId;
|
||||
use runtime_primitives::{Justification, generic::BlockId};
|
||||
use crate::message;
|
||||
use crate::config::Roles;
|
||||
use std::collections::HashSet;
|
||||
@@ -54,8 +52,6 @@ const MAX_BLOCKS_TO_REQUEST: usize = 128;
|
||||
const MAX_IMPORTING_BLOCKS: usize = 2048;
|
||||
// Number of blocks in the queue that prevents ancestry search.
|
||||
const MAJOR_SYNC_BLOCKS: usize = 5;
|
||||
// Time to wait before trying to get a justification from the same peer.
|
||||
const JUSTIFICATION_RETRY_WAIT: Duration = Duration::from_secs(10);
|
||||
// Number of recently announced blocks to track for each peer.
|
||||
const ANNOUNCE_HISTORY_SIZE: usize = 64;
|
||||
// Max number of blocks to download for unknown forks.
|
||||
@@ -68,7 +64,7 @@ const ANCESTRY_BLOCK_ERROR_REPUTATION_CHANGE: i32 = -(1 << 9);
|
||||
const GENESIS_MISMATCH_REPUTATION_CHANGE: i32 = i32::min_value() + 1;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PeerSync<B: BlockT> {
|
||||
pub(crate) struct PeerSync<B: BlockT> {
|
||||
pub common_number: NumberFor<B>,
|
||||
pub best_hash: B::Hash,
|
||||
pub best_number: NumberFor<B>,
|
||||
@@ -86,7 +82,7 @@ pub(crate) struct PeerInfo<B: BlockT> {
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
enum AncestorSearchState<B: BlockT> {
|
||||
pub(crate) enum AncestorSearchState<B: BlockT> {
|
||||
/// Use exponential backoff to find an ancestor, then switch to binary search.
|
||||
/// We keep track of the exponent.
|
||||
ExponentialBackoff(NumberFor<B>),
|
||||
@@ -96,270 +92,13 @@ enum AncestorSearchState<B: BlockT> {
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
enum PeerSyncState<B: BlockT> {
|
||||
pub(crate) enum PeerSyncState<B: BlockT> {
|
||||
AncestorSearch(NumberFor<B>, AncestorSearchState<B>),
|
||||
Available,
|
||||
DownloadingNew(NumberFor<B>),
|
||||
DownloadingStale(B::Hash),
|
||||
DownloadingJustification(B::Hash),
|
||||
}
|
||||
|
||||
/// Pending justification request for the given block (hash and number).
|
||||
type PendingJustification<B> = (<B as BlockT>::Hash, NumberFor<B>);
|
||||
|
||||
/// Manages pending block justification requests. Multiple justifications may be
|
||||
/// requested for competing forks, or for the same branch at different
|
||||
/// (increasing) heights. This structure will guarantee that justifications are
|
||||
/// fetched in-order, and that obsolete changes are pruned (when finalizing a
|
||||
/// competing fork).
|
||||
struct PendingJustifications<B: BlockT> {
|
||||
justifications: ForkTree<B::Hash, NumberFor<B>, ()>,
|
||||
pending_requests: VecDeque<PendingJustification<B>>,
|
||||
peer_requests: HashMap<PeerId, PendingJustification<B>>,
|
||||
previous_requests: HashMap<PendingJustification<B>, Vec<(PeerId, Instant)>>,
|
||||
importing_requests: HashSet<PendingJustification<B>>,
|
||||
}
|
||||
|
||||
impl<B: BlockT> PendingJustifications<B> {
|
||||
fn new() -> PendingJustifications<B> {
|
||||
PendingJustifications {
|
||||
justifications: ForkTree::new(),
|
||||
pending_requests: VecDeque::new(),
|
||||
peer_requests: HashMap::new(),
|
||||
previous_requests: HashMap::new(),
|
||||
importing_requests: HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Dispatches all possible pending requests to the given peers. Peers are
|
||||
/// filtered according to the current known best block (i.e. we won't send a
|
||||
/// justification request for block #10 to a peer at block #2), and we also
|
||||
/// throttle requests to the same peer if a previous justification request
|
||||
/// yielded no results.
|
||||
fn dispatch(&mut self, peers: &mut HashMap<PeerId, PeerSync<B>>, protocol: &mut Context<B>) {
|
||||
if self.pending_requests.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let initial_pending_requests = self.pending_requests.len();
|
||||
|
||||
// clean up previous failed requests so we can retry again
|
||||
for (_, requests) in self.previous_requests.iter_mut() {
|
||||
requests.retain(|(_, instant)| instant.elapsed() < JUSTIFICATION_RETRY_WAIT);
|
||||
}
|
||||
|
||||
let mut available_peers = peers.iter().filter_map(|(peer, sync)| {
|
||||
// don't request to any peers that already have pending requests or are unavailable
|
||||
if sync.state != PeerSyncState::Available || self.peer_requests.contains_key(&peer) {
|
||||
None
|
||||
} else {
|
||||
Some((peer.clone(), sync.best_number))
|
||||
}
|
||||
}).collect::<VecDeque<_>>();
|
||||
|
||||
let mut last_peer = available_peers.back().map(|p| p.0.clone());
|
||||
let mut unhandled_requests = VecDeque::new();
|
||||
|
||||
loop {
|
||||
let (peer, peer_best_number) = match available_peers.pop_front() {
|
||||
Some(p) => p,
|
||||
_ => break,
|
||||
};
|
||||
|
||||
// only ask peers that have synced past the block number that we're
|
||||
// asking the justification for and to whom we haven't already made
|
||||
// the same request recently
|
||||
let peer_eligible = {
|
||||
let request = match self.pending_requests.front() {
|
||||
Some(r) => r.clone(),
|
||||
_ => break,
|
||||
};
|
||||
|
||||
peer_best_number >= request.1 &&
|
||||
!self.previous_requests
|
||||
.get(&request)
|
||||
.map(|requests| requests.iter().any(|i| i.0 == peer))
|
||||
.unwrap_or(false)
|
||||
};
|
||||
|
||||
if !peer_eligible {
|
||||
available_peers.push_back((peer.clone(), peer_best_number));
|
||||
|
||||
// we tried all peers and none can answer this request
|
||||
if Some(peer) == last_peer {
|
||||
last_peer = available_peers.back().map(|p| p.0.clone());
|
||||
|
||||
let request = self.pending_requests.pop_front()
|
||||
.expect("verified to be Some in the beginning of the loop; qed");
|
||||
|
||||
unhandled_requests.push_back(request);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
last_peer = available_peers.back().map(|p| p.0.clone());
|
||||
|
||||
let request = self.pending_requests.pop_front()
|
||||
.expect("verified to be Some in the beginning of the loop; qed");
|
||||
|
||||
self.peer_requests.insert(peer.clone(), request);
|
||||
|
||||
peers.get_mut(&peer)
|
||||
.expect("peer was is taken from available_peers; available_peers is a subset of peers; qed")
|
||||
.state = PeerSyncState::DownloadingJustification(request.0);
|
||||
|
||||
trace!(target: "sync", "Requesting justification for block #{} from {}", request.0, peer);
|
||||
let request = message::generic::BlockRequest {
|
||||
id: 0,
|
||||
fields: message::BlockAttributes::JUSTIFICATION,
|
||||
from: message::FromBlock::Hash(request.0),
|
||||
to: None,
|
||||
direction: message::Direction::Ascending,
|
||||
max: Some(1),
|
||||
};
|
||||
|
||||
protocol.send_block_request(peer, request);
|
||||
}
|
||||
|
||||
self.pending_requests.append(&mut unhandled_requests);
|
||||
|
||||
trace!(target: "sync", "Dispatched {} justification requests ({} pending)",
|
||||
initial_pending_requests - self.pending_requests.len(),
|
||||
self.pending_requests.len(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Queue a justification request (without dispatching it).
|
||||
fn queue_request<F>(
|
||||
&mut self,
|
||||
justification: &PendingJustification<B>,
|
||||
is_descendent_of: F,
|
||||
) where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError> {
|
||||
match self.justifications.import(justification.0.clone(), justification.1.clone(), (), &is_descendent_of) {
|
||||
Ok(true) => {
|
||||
// this is a new root so we add it to the current `pending_requests`
|
||||
self.pending_requests.push_back((justification.0, justification.1));
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "sync", "Failed to insert requested justification {:?} {:?} into tree: {:?}",
|
||||
justification.0,
|
||||
justification.1,
|
||||
err,
|
||||
);
|
||||
return;
|
||||
},
|
||||
_ => {},
|
||||
};
|
||||
}
|
||||
|
||||
/// Retry any pending request if a peer disconnected.
|
||||
fn peer_disconnected(&mut self, who: PeerId) {
|
||||
if let Some(request) = self.peer_requests.remove(&who) {
|
||||
self.pending_requests.push_front(request);
|
||||
}
|
||||
}
|
||||
|
||||
/// Process the import of a justification.
|
||||
/// Queues a retry in case the import failed.
|
||||
fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor<B>, success: bool) {
|
||||
let request = (hash, number);
|
||||
|
||||
if !self.importing_requests.remove(&request) {
|
||||
debug!(target: "sync", "Got justification import result for unknown justification {:?} {:?} request.",
|
||||
request.0,
|
||||
request.1,
|
||||
);
|
||||
|
||||
return;
|
||||
};
|
||||
|
||||
if success {
|
||||
if self.justifications.finalize_root(&request.0).is_none() {
|
||||
warn!(target: "sync", "Imported justification for {:?} {:?} which isn't a root in the tree: {:?}",
|
||||
request.0,
|
||||
request.1,
|
||||
self.justifications.roots().collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
return;
|
||||
};
|
||||
|
||||
self.previous_requests.clear();
|
||||
self.peer_requests.clear();
|
||||
self.pending_requests =
|
||||
self.justifications.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect();
|
||||
|
||||
return;
|
||||
}
|
||||
self.pending_requests.push_front(request);
|
||||
}
|
||||
|
||||
/// Processes the response for the request previously sent to the given
|
||||
/// peer. Queues a retry in case the given justification
|
||||
/// was `None`.
|
||||
///
|
||||
/// Returns `Some` if this produces a justification that must be imported in the import queue.
|
||||
#[must_use]
|
||||
fn on_response(
|
||||
&mut self,
|
||||
who: PeerId,
|
||||
justification: Option<Justification>,
|
||||
) -> Option<(PeerId, B::Hash, NumberFor<B>, Justification)> {
|
||||
// we assume that the request maps to the given response, this is
|
||||
// currently enforced by the outer network protocol before passing on
|
||||
// messages to chain sync.
|
||||
if let Some(request) = self.peer_requests.remove(&who) {
|
||||
if let Some(justification) = justification {
|
||||
self.importing_requests.insert(request);
|
||||
return Some((who, request.0, request.1, justification))
|
||||
}
|
||||
|
||||
self.previous_requests
|
||||
.entry(request)
|
||||
.or_insert(Vec::new())
|
||||
.push((who, Instant::now()));
|
||||
|
||||
self.pending_requests.push_front(request);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Removes any pending justification requests for blocks lower than the
|
||||
/// given best finalized.
|
||||
fn on_block_finalized<F>(
|
||||
&mut self,
|
||||
best_finalized_hash: &B::Hash,
|
||||
best_finalized_number: NumberFor<B>,
|
||||
is_descendent_of: F,
|
||||
) -> Result<(), fork_tree::Error<ClientError>>
|
||||
where F: Fn(&B::Hash, &B::Hash) -> Result<bool, ClientError>
|
||||
{
|
||||
if self.importing_requests.contains(&(*best_finalized_hash, best_finalized_number)) {
|
||||
// we imported this justification ourselves, so we should get back a response
|
||||
// from the import queue through `justification_import_result`
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.justifications.finalize(best_finalized_hash, best_finalized_number, &is_descendent_of)?;
|
||||
|
||||
let roots = self.justifications.roots().collect::<HashSet<_>>();
|
||||
|
||||
self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &())));
|
||||
self.peer_requests.retain(|_, (h, n)| roots.contains(&(h, n, &())));
|
||||
self.previous_requests.retain(|(h, n), _| roots.contains(&(h, n, &())));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear all data.
|
||||
fn clear(&mut self) {
|
||||
self.justifications = ForkTree::new();
|
||||
self.pending_requests.clear();
|
||||
self.peer_requests.clear();
|
||||
self.previous_requests.clear();
|
||||
}
|
||||
DownloadingFinalityProof(B::Hash),
|
||||
}
|
||||
|
||||
/// Relay chain sync strategy.
|
||||
@@ -370,7 +109,7 @@ pub struct ChainSync<B: BlockT> {
|
||||
best_queued_number: NumberFor<B>,
|
||||
best_queued_hash: B::Hash,
|
||||
required_block_attributes: message::BlockAttributes,
|
||||
justifications: PendingJustifications<B>,
|
||||
extra_requests: ExtraRequestsAggregator<B>,
|
||||
queue_blocks: HashSet<B::Hash>,
|
||||
best_importing_number: NumberFor<B>,
|
||||
}
|
||||
@@ -428,7 +167,7 @@ impl<B: BlockT> ChainSync<B> {
|
||||
blocks: BlockCollection::new(),
|
||||
best_queued_hash: info.best_queued_hash.unwrap_or(info.chain.best_hash),
|
||||
best_queued_number: info.best_queued_number.unwrap_or(info.chain.best_number),
|
||||
justifications: PendingJustifications::new(),
|
||||
extra_requests: ExtraRequestsAggregator::new(),
|
||||
required_block_attributes,
|
||||
queue_blocks: Default::default(),
|
||||
best_importing_number: Zero::zero(),
|
||||
@@ -664,7 +403,7 @@ impl<B: BlockT> ChainSync<B> {
|
||||
vec![]
|
||||
}
|
||||
},
|
||||
PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) => Vec::new(),
|
||||
PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) | PeerSyncState::DownloadingFinalityProof(..) => Vec::new(),
|
||||
}
|
||||
} else {
|
||||
Vec::new()
|
||||
@@ -722,7 +461,7 @@ impl<B: BlockT> ChainSync<B> {
|
||||
return None;
|
||||
}
|
||||
|
||||
return self.justifications.on_response(
|
||||
return self.extra_requests.justifications().on_response(
|
||||
who,
|
||||
response.justification,
|
||||
);
|
||||
@@ -744,6 +483,42 @@ impl<B: BlockT> ChainSync<B> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Handle new finality proof data.
|
||||
pub(crate) fn on_block_finality_proof_data(
|
||||
&mut self,
|
||||
protocol: &mut Context<B>,
|
||||
who: PeerId,
|
||||
response: message::FinalityProofResponse<B::Hash>,
|
||||
) -> Option<(PeerId, B::Hash, NumberFor<B>, Vec<u8>)> {
|
||||
if let Some(ref mut peer) = self.peers.get_mut(&who) {
|
||||
if let PeerSyncState::DownloadingFinalityProof(hash) = peer.state {
|
||||
peer.state = PeerSyncState::Available;
|
||||
|
||||
// we only request one finality proof at a time
|
||||
if hash != response.block {
|
||||
info!(
|
||||
"Invalid block finality proof provided: requested: {:?} got: {:?}",
|
||||
hash,
|
||||
response.block,
|
||||
);
|
||||
|
||||
protocol.report_peer(who.clone(), i32::min_value());
|
||||
protocol.disconnect_peer(who);
|
||||
return None;
|
||||
}
|
||||
|
||||
return self.extra_requests.finality_proofs().on_response(
|
||||
who,
|
||||
response.proof,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
self.maintain_sync(protocol);
|
||||
None
|
||||
}
|
||||
|
||||
/// A batch of blocks have been processed, with or without errors.
|
||||
/// Call this when a batch of blocks have been processed by the import queue, with or without
|
||||
/// errors.
|
||||
pub fn blocks_processed(&mut self, processed_blocks: Vec<B::Hash>, has_error: bool) {
|
||||
@@ -761,13 +536,13 @@ impl<B: BlockT> ChainSync<B> {
|
||||
for peer in peers {
|
||||
self.download_new(protocol, peer);
|
||||
}
|
||||
self.justifications.dispatch(&mut self.peers, protocol);
|
||||
self.extra_requests.dispatch(&mut self.peers, protocol);
|
||||
}
|
||||
|
||||
/// Called periodically to perform any time-based actions. Must be called at a regular
|
||||
/// interval.
|
||||
pub fn tick(&mut self, protocol: &mut Context<B>) {
|
||||
self.justifications.dispatch(&mut self.peers, protocol);
|
||||
self.extra_requests.dispatch(&mut self.peers, protocol);
|
||||
}
|
||||
|
||||
/// Request a justification for the given block.
|
||||
@@ -775,23 +550,53 @@ impl<B: BlockT> ChainSync<B> {
|
||||
/// Uses `protocol` to queue a new justification request and tries to dispatch all pending
|
||||
/// requests.
|
||||
pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor<B>, protocol: &mut Context<B>) {
|
||||
self.justifications.queue_request(
|
||||
&(*hash, number),
|
||||
self.extra_requests.justifications().queue_request(
|
||||
(*hash, number),
|
||||
|base, block| protocol.client().is_descendent_of(base, block),
|
||||
);
|
||||
|
||||
self.justifications.dispatch(&mut self.peers, protocol);
|
||||
self.extra_requests.justifications().dispatch(&mut self.peers, protocol);
|
||||
}
|
||||
|
||||
/// Clears all pending justification requests.
|
||||
pub fn clear_justification_requests(&mut self) {
|
||||
self.justifications.clear();
|
||||
self.extra_requests.justifications().clear();
|
||||
}
|
||||
|
||||
/// Call this when a justification has been processed by the import queue, with or without
|
||||
/// errors.
|
||||
pub fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor<B>, success: bool) {
|
||||
self.justifications.justification_import_result(hash, number, success);
|
||||
let finalization_result = if success { Ok((hash, number)) } else { Err(()) };
|
||||
if !self.extra_requests.justifications().on_import_result((hash, number), finalization_result) {
|
||||
debug!(target: "sync", "Got justification import result for unknown justification {:?} {:?} request.",
|
||||
hash,
|
||||
number,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a finality proof for the given block.
|
||||
///
|
||||
/// Queues a new finality proof request and tries to dispatch all pending requests.
|
||||
pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor<B>, protocol: &mut Context<B>) {
|
||||
self.extra_requests.finality_proofs().queue_request(
|
||||
(*hash, number),
|
||||
|base, block| protocol.client().is_descendent_of(base, block),
|
||||
);
|
||||
|
||||
self.extra_requests.finality_proofs().dispatch(&mut self.peers, protocol);
|
||||
}
|
||||
|
||||
pub fn finality_proof_import_result(
|
||||
&mut self,
|
||||
request_block: (B::Hash, NumberFor<B>),
|
||||
finalization_result: Result<(B::Hash, NumberFor<B>), ()>,
|
||||
) {
|
||||
self.extra_requests.finality_proofs().on_import_result(request_block, finalization_result);
|
||||
}
|
||||
|
||||
pub fn set_finality_proof_request_builder(&mut self, request_builder: SharedFinalityProofRequestBuilder<B>) {
|
||||
self.extra_requests.finality_proofs().essence().0 = Some(request_builder);
|
||||
}
|
||||
|
||||
/// Notify about successful import of the given block.
|
||||
@@ -801,12 +606,12 @@ impl<B: BlockT> ChainSync<B> {
|
||||
|
||||
/// Notify about finalization of the given block.
|
||||
pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor<B>, protocol: &mut Context<B>) {
|
||||
if let Err(err) = self.justifications.on_block_finalized(
|
||||
if let Err(err) = self.extra_requests.on_block_finalized(
|
||||
hash,
|
||||
number,
|
||||
|base, block| protocol.client().is_descendent_of(base, block),
|
||||
&|base, block| protocol.client().is_descendent_of(base, block),
|
||||
) {
|
||||
warn!(target: "sync", "Error cleaning up pending justification requests: {:?}", err);
|
||||
warn!(target: "sync", "Error cleaning up pending extra data requests: {:?}", err);
|
||||
};
|
||||
}
|
||||
|
||||
@@ -916,7 +721,7 @@ impl<B: BlockT> ChainSync<B> {
|
||||
pub(crate) fn peer_disconnected(&mut self, protocol: &mut Context<B>, who: PeerId) {
|
||||
self.blocks.clear_peer_download(&who);
|
||||
self.peers.remove(&who);
|
||||
self.justifications.peer_disconnected(who);
|
||||
self.extra_requests.peer_disconnected(who);
|
||||
self.maintain_sync(protocol);
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ fn async_import_queue_drops() {
|
||||
// Perform this test multiple times since it exhibits non-deterministic behavior.
|
||||
for _ in 0..100 {
|
||||
let verifier = Arc::new(PassThroughVerifier(true));
|
||||
let queue = BasicQueue::new(verifier, Arc::new(test_client::new()), None);
|
||||
let queue = BasicQueue::new(verifier, Arc::new(test_client::new()), None, None, None);
|
||||
queue.start(Box::new(TestLink{})).unwrap();
|
||||
drop(queue);
|
||||
}
|
||||
|
||||
@@ -26,11 +26,16 @@ use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use log::trace;
|
||||
use client;
|
||||
use crate::chain::FinalityProofProvider;
|
||||
use client::{self, ClientInfo, BlockchainEvents, FinalityNotifications, in_mem::Backend as InMemoryBackend, error::Result as ClientResult};
|
||||
use client::block_builder::BlockBuilder;
|
||||
use crate::config::ProtocolConfig;
|
||||
use client::backend::AuxStore;
|
||||
use crate::config::{ProtocolConfig, Roles};
|
||||
use consensus::import_queue::{BasicQueue, ImportQueue, IncomingBlock};
|
||||
use consensus::import_queue::{Link, SharedBlockImport, SharedJustificationImport, Verifier};
|
||||
use consensus::import_queue::{
|
||||
Link, SharedBlockImport, SharedJustificationImport, Verifier, SharedFinalityProofImport,
|
||||
SharedFinalityProofRequestBuilder,
|
||||
};
|
||||
use consensus::{Error as ConsensusError, ErrorKind as ConsensusErrorKind};
|
||||
use consensus::{BlockOrigin, ForkChoiceStrategy, ImportBlock, JustificationImport};
|
||||
use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient, TopicNotification};
|
||||
@@ -39,7 +44,7 @@ use futures::{prelude::*, sync::{mpsc, oneshot}};
|
||||
use crate::message::Message;
|
||||
use network_libp2p::PeerId;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use primitives::{H256, sr25519::Public as AuthorityId};
|
||||
use primitives::{H256, sr25519::Public as AuthorityId, Blake2Hasher};
|
||||
use crate::protocol::{ConnectedPeer, Context, Protocol, ProtocolMsg, CustomMessageOutcome};
|
||||
use runtime_primitives::generic::BlockId;
|
||||
use runtime_primitives::traits::{AuthorityIdFor, Block as BlockT, Digest, DigestItem, Header, NumberFor};
|
||||
@@ -111,7 +116,79 @@ impl NetworkSpecialization<Block> for DummySpecialization {
|
||||
}
|
||||
}
|
||||
|
||||
pub type PeersClient = client::Client<test_client::Backend, test_client::Executor, Block, test_client::runtime::RuntimeApi>;
|
||||
pub type PeersFullClient = client::Client<test_client::Backend, test_client::Executor, Block, test_client::runtime::RuntimeApi>;
|
||||
pub type PeersLightClient = client::Client<test_client::LightBackend, test_client::LightExecutor, Block, test_client::runtime::RuntimeApi>;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum PeersClient {
|
||||
Full(Arc<PeersFullClient>),
|
||||
Light(Arc<PeersLightClient>),
|
||||
}
|
||||
|
||||
impl PeersClient {
|
||||
pub fn as_full(&self) -> Option<Arc<PeersFullClient>> {
|
||||
match *self {
|
||||
PeersClient::Full(ref client) => Some(client.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_block_import(&self) -> SharedBlockImport<Block> {
|
||||
match *self {
|
||||
PeersClient::Full(ref client) => client.clone() as _,
|
||||
PeersClient::Light(ref client) => client.clone() as _,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_in_memory_backend(&self) -> InMemoryBackend<Block, Blake2Hasher> {
|
||||
match *self {
|
||||
PeersClient::Full(ref client) => client.backend().as_in_memory(),
|
||||
PeersClient::Light(_) => unimplemented!("TODO"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_aux(&self, key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
|
||||
match *self {
|
||||
PeersClient::Full(ref client) => client.backend().get_aux(key),
|
||||
PeersClient::Light(ref client) => client.backend().get_aux(key),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn info(&self) -> ClientResult<ClientInfo<Block>> {
|
||||
match *self {
|
||||
PeersClient::Full(ref client) => client.info(),
|
||||
PeersClient::Light(ref client) => client.info(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn header(&self, block: &BlockId<Block>) -> ClientResult<Option<<Block as BlockT>::Header>> {
|
||||
match *self {
|
||||
PeersClient::Full(ref client) => client.header(block),
|
||||
PeersClient::Light(ref client) => client.header(block),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn justification(&self, block: &BlockId<Block>) -> ClientResult<Option<Justification>> {
|
||||
match *self {
|
||||
PeersClient::Full(ref client) => client.justification(block),
|
||||
PeersClient::Light(ref client) => client.justification(block),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finality_notification_stream(&self) -> FinalityNotifications<Block> {
|
||||
match *self {
|
||||
PeersClient::Full(ref client) => client.finality_notification_stream(),
|
||||
PeersClient::Light(ref client) => client.finality_notification_stream(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> ClientResult<()> {
|
||||
match *self {
|
||||
PeersClient::Full(ref client) => client.finalize_block(id, justification, notify),
|
||||
PeersClient::Light(ref client) => client.finalize_block(id, justification, notify),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A Link that can wait for a block to have been imported.
|
||||
pub struct TestLink<S: NetworkSpecialization<Block>> {
|
||||
@@ -155,6 +232,23 @@ impl<S: NetworkSpecialization<Block>> Link<Block> for TestLink<S> {
|
||||
self.link.request_justification(hash, number);
|
||||
}
|
||||
|
||||
fn finality_proof_imported(
|
||||
&self,
|
||||
who: PeerId,
|
||||
request_block: (Hash, NumberFor<Block>),
|
||||
finalization_result: Result<(Hash, NumberFor<Block>), ()>,
|
||||
) {
|
||||
self.link.finality_proof_imported(who, request_block, finalization_result);
|
||||
}
|
||||
|
||||
fn request_finality_proof(&self, hash: &Hash, number: NumberFor<Block>) {
|
||||
self.link.request_finality_proof(hash, number);
|
||||
}
|
||||
|
||||
fn set_finality_proof_request_builder(&self, request_builder: SharedFinalityProofRequestBuilder<Block>) {
|
||||
self.link.set_finality_proof_request_builder(request_builder);
|
||||
}
|
||||
|
||||
fn report_peer(&self, who: PeerId, reputation_change: i32) {
|
||||
self.link.report_peer(who, reputation_change);
|
||||
}
|
||||
@@ -178,7 +272,7 @@ pub struct Peer<D, S: NetworkSpecialization<Block>> {
|
||||
pub is_major_syncing: Arc<AtomicBool>,
|
||||
pub peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>>,
|
||||
pub peer_id: PeerId,
|
||||
client: Arc<PeersClient>,
|
||||
client: PeersClient,
|
||||
net_proto_channel: ProtocolChannel<S>,
|
||||
pub import_queue: Box<BasicQueue<Block>>,
|
||||
pub data: D,
|
||||
@@ -188,7 +282,7 @@ pub struct Peer<D, S: NetworkSpecialization<Block>> {
|
||||
|
||||
type MessageFilter = Fn(&NetworkMsg<Block>) -> bool;
|
||||
|
||||
enum FromNetworkMsg<B: BlockT> {
|
||||
pub enum FromNetworkMsg<B: BlockT> {
|
||||
/// A peer connected, with debug info.
|
||||
PeerConnected(PeerId, String),
|
||||
/// A peer disconnected, with debug info.
|
||||
@@ -294,7 +388,7 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
|
||||
is_offline: Arc<AtomicBool>,
|
||||
is_major_syncing: Arc<AtomicBool>,
|
||||
peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>>,
|
||||
client: Arc<PeersClient>,
|
||||
client: PeersClient,
|
||||
import_queue: Box<BasicQueue<Block>>,
|
||||
network_to_protocol_sender: mpsc::UnboundedSender<FromNetworkMsg<Block>>,
|
||||
protocol_sender: mpsc::UnboundedSender<ProtocolMsg<Block, S>>,
|
||||
@@ -327,7 +421,7 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
|
||||
}
|
||||
}
|
||||
/// Called after blockchain has been populated to updated current state.
|
||||
fn start(&self) {
|
||||
pub fn start(&self) {
|
||||
// Update the sync state to the latest chain state.
|
||||
let info = self.client.info().expect("In-mem client does not fail");
|
||||
let header = self
|
||||
@@ -484,7 +578,7 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
|
||||
|
||||
/// Add blocks to the peer -- edit the block before adding
|
||||
pub fn generate_blocks<F>(&self, count: usize, origin: BlockOrigin, edit_block: F) -> H256
|
||||
where F: FnMut(BlockBuilder<Block, PeersClient>) -> Block
|
||||
where F: FnMut(BlockBuilder<Block, PeersFullClient>) -> Block
|
||||
{
|
||||
let best_hash = self.client.info().unwrap().chain.best_hash;
|
||||
self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block)
|
||||
@@ -493,11 +587,12 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
|
||||
/// Add blocks to the peer -- edit the block before adding. The chain will
|
||||
/// start at the given block iD.
|
||||
pub fn generate_blocks_at<F>(&self, at: BlockId<Block>, count: usize, origin: BlockOrigin, mut edit_block: F) -> H256
|
||||
where F: FnMut(BlockBuilder<Block, PeersClient>) -> Block
|
||||
where F: FnMut(BlockBuilder<Block, PeersFullClient>) -> Block
|
||||
{
|
||||
let mut at = self.client.header(&at).unwrap().unwrap().hash();
|
||||
let full_client = self.client.as_full().expect("blocks could only be generated by full clients");
|
||||
let mut at = full_client.header(&at).unwrap().unwrap().hash();
|
||||
for _ in 0..count {
|
||||
let builder = self.client.new_block_at(&BlockId::Hash(at)).unwrap();
|
||||
let builder = full_client.new_block_at(&BlockId::Hash(at)).unwrap();
|
||||
let block = edit_block(builder);
|
||||
let hash = block.header.hash();
|
||||
trace!(
|
||||
@@ -562,7 +657,7 @@ impl<D, S: NetworkSpecialization<Block>> Peer<D, S> {
|
||||
}
|
||||
|
||||
/// Get a reference to the client.
|
||||
pub fn client(&self) -> &Arc<PeersClient> {
|
||||
pub fn client(&self) -> &PeersClient {
|
||||
&self.client
|
||||
}
|
||||
}
|
||||
@@ -598,7 +693,7 @@ pub trait TestNetFactory: Sized {
|
||||
|
||||
/// These two need to be implemented!
|
||||
fn from_config(config: &ProtocolConfig) -> Self;
|
||||
fn make_verifier(&self, client: Arc<PeersClient>, config: &ProtocolConfig) -> Arc<Self::Verifier>;
|
||||
fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig) -> Arc<Self::Verifier>;
|
||||
|
||||
/// Get reference to peer.
|
||||
fn peer(&self, i: usize) -> &Peer<Self::PeerData, Self::Specialization>;
|
||||
@@ -609,10 +704,21 @@ pub trait TestNetFactory: Sized {
|
||||
fn set_started(&mut self, now: bool);
|
||||
|
||||
/// Get custom block import handle for fresh client, along with peer data.
|
||||
fn make_block_import(&self, client: Arc<PeersClient>)
|
||||
-> (SharedBlockImport<Block>, Option<SharedJustificationImport<Block>>, Self::PeerData)
|
||||
fn make_block_import(&self, client: PeersClient)
|
||||
-> (
|
||||
SharedBlockImport<Block>,
|
||||
Option<SharedJustificationImport<Block>>,
|
||||
Option<SharedFinalityProofImport<Block>>,
|
||||
Option<SharedFinalityProofRequestBuilder<Block>>,
|
||||
Self::PeerData,
|
||||
)
|
||||
{
|
||||
(client, None, Default::default())
|
||||
(client.as_block_import(), None, None, None, Default::default())
|
||||
}
|
||||
|
||||
/// Get finality proof provider (if supported).
|
||||
fn make_finality_proof_provider(&self, _client: PeersClient) -> Option<Arc<FinalityProofProvider<Block>>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn default_config() -> ProtocolConfig {
|
||||
@@ -627,41 +733,21 @@ pub trait TestNetFactory: Sized {
|
||||
|
||||
for i in 0..n {
|
||||
trace!(target: "test_network", "Adding peer {}", i);
|
||||
net.add_peer(&config);
|
||||
net.add_full_peer(&config);
|
||||
}
|
||||
net
|
||||
}
|
||||
|
||||
/// Add a peer.
|
||||
fn add_peer(&mut self, config: &ProtocolConfig) {
|
||||
let client = Arc::new(test_client::new());
|
||||
let tx_pool = Arc::new(EmptyTransactionPool);
|
||||
let verifier = self.make_verifier(client.clone(), config);
|
||||
let (block_import, justification_import, data) = self.make_block_import(client.clone());
|
||||
let (network_sender, network_port) = network_channel();
|
||||
|
||||
let import_queue = Box::new(BasicQueue::new(verifier, block_import, justification_import));
|
||||
let is_offline = Arc::new(AtomicBool::new(true));
|
||||
let is_major_syncing = Arc::new(AtomicBool::new(false));
|
||||
let specialization = self::SpecializationFactory::create();
|
||||
let peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>> = Arc::new(Default::default());
|
||||
|
||||
let (network_to_protocol_sender, mut network_to_protocol_rx) = mpsc::unbounded();
|
||||
|
||||
let (mut protocol, protocol_sender) = Protocol::new(
|
||||
peers.clone(),
|
||||
network_sender.clone(),
|
||||
config.clone(),
|
||||
client.clone(),
|
||||
None,
|
||||
tx_pool,
|
||||
specialization,
|
||||
).unwrap();
|
||||
|
||||
let is_offline2 = is_offline.clone();
|
||||
let is_major_syncing2 = is_major_syncing.clone();
|
||||
let import_queue2 = import_queue.clone();
|
||||
|
||||
/// Add created peer.
|
||||
fn add_peer(
|
||||
&mut self,
|
||||
is_offline: Arc<AtomicBool>,
|
||||
is_major_syncing: Arc<AtomicBool>,
|
||||
import_queue: Box<BasicQueue<Block>>,
|
||||
mut protocol: Protocol<Block, Self::Specialization, Hash>,
|
||||
mut network_to_protocol_rx: mpsc::UnboundedReceiver<FromNetworkMsg<Block>>,
|
||||
peer: Arc<Peer<Self::PeerData, Self::Specialization>>,
|
||||
) {
|
||||
std::thread::spawn(move || {
|
||||
tokio::runtime::current_thread::run(futures::future::poll_fn(move || {
|
||||
while let Async::Ready(msg) = network_to_protocol_rx.poll().unwrap() {
|
||||
@@ -680,14 +766,16 @@ pub trait TestNetFactory: Sized {
|
||||
protocol.synchronize();
|
||||
CustomMessageOutcome::None
|
||||
},
|
||||
None => return Ok(Async::Ready(()))
|
||||
None => return Ok(Async::Ready(())),
|
||||
};
|
||||
|
||||
match outcome {
|
||||
CustomMessageOutcome::BlockImport(origin, blocks) =>
|
||||
import_queue2.import_blocks(origin, blocks),
|
||||
import_queue.import_blocks(origin, blocks),
|
||||
CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) =>
|
||||
import_queue2.import_justification(origin, hash, nb, justification),
|
||||
import_queue.import_justification(origin, hash, nb, justification),
|
||||
CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) =>
|
||||
import_queue.import_finality_proof(origin, hash, nb, proof),
|
||||
CustomMessageOutcome::None => {}
|
||||
}
|
||||
}
|
||||
@@ -696,31 +784,140 @@ pub trait TestNetFactory: Sized {
|
||||
return Ok(Async::Ready(()))
|
||||
}
|
||||
|
||||
is_offline2.store(protocol.is_offline(), Ordering::Relaxed);
|
||||
is_major_syncing2.store(protocol.is_major_syncing(), Ordering::Relaxed);
|
||||
|
||||
is_offline.store(protocol.is_offline(), Ordering::Relaxed);
|
||||
is_major_syncing.store(protocol.is_major_syncing(), Ordering::Relaxed);
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}));
|
||||
});
|
||||
|
||||
let peer = Arc::new(Peer::new(
|
||||
is_offline,
|
||||
is_major_syncing,
|
||||
peers,
|
||||
client,
|
||||
import_queue,
|
||||
network_to_protocol_sender,
|
||||
protocol_sender,
|
||||
network_sender,
|
||||
network_port,
|
||||
data,
|
||||
));
|
||||
if self.started() {
|
||||
peer.start();
|
||||
self.peers().iter().for_each(|other| {
|
||||
other.on_connect(&*peer);
|
||||
peer.on_connect(other);
|
||||
});
|
||||
}
|
||||
|
||||
self.mut_peers(|peers| {
|
||||
peers.push(peer)
|
||||
});
|
||||
}
|
||||
|
||||
/// Add a full peer.
|
||||
fn add_full_peer(&mut self, config: &ProtocolConfig) {
|
||||
let client = Arc::new(test_client::new());
|
||||
let tx_pool = Arc::new(EmptyTransactionPool);
|
||||
let verifier = self.make_verifier(PeersClient::Full(client.clone()), config);
|
||||
let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data)
|
||||
= self.make_block_import(PeersClient::Full(client.clone()));
|
||||
let (network_sender, network_port) = network_channel();
|
||||
|
||||
let import_queue = Box::new(BasicQueue::new(
|
||||
verifier,
|
||||
block_import,
|
||||
justification_import,
|
||||
finality_proof_import,
|
||||
finality_proof_request_builder,
|
||||
));
|
||||
let is_offline = Arc::new(AtomicBool::new(true));
|
||||
let is_major_syncing = Arc::new(AtomicBool::new(false));
|
||||
let specialization = self::SpecializationFactory::create();
|
||||
let peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>> = Arc::new(Default::default());
|
||||
|
||||
let (network_to_protocol_sender, network_to_protocol_rx) = mpsc::unbounded();
|
||||
|
||||
let (protocol, protocol_sender) = Protocol::new(
|
||||
peers.clone(),
|
||||
network_sender.clone(),
|
||||
config.clone(),
|
||||
client.clone(),
|
||||
self.make_finality_proof_provider(PeersClient::Full(client.clone())),
|
||||
None,
|
||||
tx_pool,
|
||||
specialization,
|
||||
).unwrap();
|
||||
|
||||
self.add_peer(
|
||||
is_offline.clone(),
|
||||
is_major_syncing.clone(),
|
||||
import_queue.clone(),
|
||||
protocol,
|
||||
network_to_protocol_rx,
|
||||
Arc::new(Peer::new(
|
||||
is_offline,
|
||||
is_major_syncing,
|
||||
peers,
|
||||
PeersClient::Full(client),
|
||||
import_queue,
|
||||
network_to_protocol_sender,
|
||||
protocol_sender,
|
||||
network_sender,
|
||||
network_port,
|
||||
data,
|
||||
)),
|
||||
);
|
||||
}
|
||||
|
||||
/// Add a light peer.
|
||||
fn add_light_peer(&mut self, config: &ProtocolConfig) {
|
||||
let mut config = config.clone();
|
||||
config.roles = Roles::LIGHT;
|
||||
|
||||
let client = Arc::new(test_client::new_light());
|
||||
let tx_pool = Arc::new(EmptyTransactionPool);
|
||||
let verifier = self.make_verifier(PeersClient::Light(client.clone()), &config);
|
||||
let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data)
|
||||
= self.make_block_import(PeersClient::Light(client.clone()));
|
||||
let (network_sender, network_port) = network_channel();
|
||||
|
||||
let import_queue = Box::new(BasicQueue::new(
|
||||
verifier,
|
||||
block_import,
|
||||
justification_import,
|
||||
finality_proof_import,
|
||||
finality_proof_request_builder,
|
||||
));
|
||||
let is_offline = Arc::new(AtomicBool::new(true));
|
||||
let is_major_syncing = Arc::new(AtomicBool::new(false));
|
||||
let specialization = self::SpecializationFactory::create();
|
||||
let peers: Arc<RwLock<HashMap<PeerId, ConnectedPeer<Block>>>> = Arc::new(Default::default());
|
||||
|
||||
let (network_to_protocol_sender, network_to_protocol_rx) = mpsc::unbounded();
|
||||
|
||||
let (protocol, protocol_sender) = Protocol::new(
|
||||
peers.clone(),
|
||||
network_sender.clone(),
|
||||
config,
|
||||
client.clone(),
|
||||
self.make_finality_proof_provider(PeersClient::Light(client.clone())),
|
||||
None,
|
||||
tx_pool,
|
||||
specialization,
|
||||
).unwrap();
|
||||
|
||||
self.add_peer(
|
||||
is_offline.clone(),
|
||||
is_major_syncing.clone(),
|
||||
import_queue.clone(),
|
||||
protocol,
|
||||
network_to_protocol_rx,
|
||||
Arc::new(Peer::new(
|
||||
is_offline,
|
||||
is_major_syncing,
|
||||
peers,
|
||||
PeersClient::Light(client),
|
||||
import_queue,
|
||||
network_to_protocol_sender,
|
||||
protocol_sender,
|
||||
network_sender,
|
||||
network_port,
|
||||
data,
|
||||
)),
|
||||
);
|
||||
}
|
||||
|
||||
/// Start network.
|
||||
fn start(&mut self) {
|
||||
if self.started() {
|
||||
@@ -832,6 +1029,11 @@ pub trait TestNetFactory: Sized {
|
||||
self.route_single(true, None, &|_| true);
|
||||
}
|
||||
|
||||
/// Maintain sync for a peer.
|
||||
fn tick_peer(&mut self, i: usize) {
|
||||
self.peers()[i].sync_step();
|
||||
}
|
||||
|
||||
/// Deliver pending messages until there are no more.
|
||||
fn sync(&mut self) {
|
||||
self.sync_with(true, None)
|
||||
@@ -866,7 +1068,7 @@ impl TestNetFactory for TestNet {
|
||||
}
|
||||
}
|
||||
|
||||
fn make_verifier(&self, _client: Arc<PeersClient>, _config: &ProtocolConfig)
|
||||
fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig)
|
||||
-> Arc<Self::Verifier>
|
||||
{
|
||||
Arc::new(PassThroughVerifier(false))
|
||||
@@ -893,7 +1095,7 @@ impl TestNetFactory for TestNet {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ForceFinalized(Arc<PeersClient>);
|
||||
pub struct ForceFinalized(PeersClient);
|
||||
|
||||
impl JustificationImport<Block> for ForceFinalized {
|
||||
type Error = ConsensusError;
|
||||
@@ -920,7 +1122,7 @@ impl TestNetFactory for JustificationTestNet {
|
||||
JustificationTestNet(TestNet::from_config(config))
|
||||
}
|
||||
|
||||
fn make_verifier(&self, client: Arc<PeersClient>, config: &ProtocolConfig)
|
||||
fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig)
|
||||
-> Arc<Self::Verifier>
|
||||
{
|
||||
self.0.make_verifier(client, config)
|
||||
@@ -946,9 +1148,15 @@ impl TestNetFactory for JustificationTestNet {
|
||||
self.0.set_started(new)
|
||||
}
|
||||
|
||||
fn make_block_import(&self, client: Arc<PeersClient>)
|
||||
-> (SharedBlockImport<Block>, Option<SharedJustificationImport<Block>>, Self::PeerData)
|
||||
fn make_block_import(&self, client: PeersClient)
|
||||
-> (
|
||||
SharedBlockImport<Block>,
|
||||
Option<SharedJustificationImport<Block>>,
|
||||
Option<SharedFinalityProofImport<Block>>,
|
||||
Option<SharedFinalityProofRequestBuilder<Block>>,
|
||||
Self::PeerData,
|
||||
)
|
||||
{
|
||||
(client.clone(), Some(Arc::new(ForceFinalized(client))), Default::default())
|
||||
(client.as_block_import(), Some(Arc::new(ForceFinalized(client))), None, None, Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,8 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use client::backend::Backend;
|
||||
use client::blockchain::HeaderBackend as BlockchainHeaderBackend;
|
||||
use client::{backend::Backend, blockchain::HeaderBackend};
|
||||
use crate::config::Roles;
|
||||
use consensus::BlockOrigin;
|
||||
use std::collections::HashSet;
|
||||
@@ -34,8 +33,8 @@ fn test_ancestor_search_when_common_is(n: usize) {
|
||||
net.peer(2).push_blocks(100, false);
|
||||
|
||||
net.sync();
|
||||
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
|
||||
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -130,8 +129,8 @@ fn sync_from_two_peers_works() {
|
||||
net.peer(1).push_blocks(100, false);
|
||||
net.peer(2).push_blocks(100, false);
|
||||
net.sync();
|
||||
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
|
||||
.equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
|
||||
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
|
||||
.equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
|
||||
assert!(!net.peer(0).is_major_syncing());
|
||||
}
|
||||
|
||||
@@ -143,8 +142,8 @@ fn sync_from_two_peers_with_ancestry_search_works() {
|
||||
net.peer(1).push_blocks(100, false);
|
||||
net.peer(2).push_blocks(100, false);
|
||||
net.sync();
|
||||
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
|
||||
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -157,8 +156,8 @@ fn ancestry_search_works_when_backoff_is_one() {
|
||||
net.peer(2).push_blocks(2, false);
|
||||
|
||||
net.sync();
|
||||
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
|
||||
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -171,8 +170,8 @@ fn ancestry_search_works_when_ancestor_is_genesis() {
|
||||
net.peer(2).push_blocks(100, false);
|
||||
|
||||
net.sync();
|
||||
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
|
||||
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -195,8 +194,8 @@ fn sync_long_chain_works() {
|
||||
let mut net = TestNet::new(2);
|
||||
net.peer(1).push_blocks(500, false);
|
||||
net.sync();
|
||||
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
|
||||
.equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
|
||||
assert!(net.peer(0).client.as_in_memory_backend().blockchain()
|
||||
.equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -206,8 +205,8 @@ fn sync_no_common_longer_chain_fails() {
|
||||
net.peer(0).push_blocks(20, true);
|
||||
net.peer(1).push_blocks(20, false);
|
||||
net.sync();
|
||||
assert!(!net.peer(0).client.backend().as_in_memory().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
|
||||
assert!(!net.peer(0).client.as_in_memory_backend().blockchain()
|
||||
.canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -285,11 +284,11 @@ fn sync_after_fork_works() {
|
||||
net.peer(2).push_blocks(1, false);
|
||||
|
||||
// peer 1 has the best chain
|
||||
let peer1_chain = net.peer(1).client.backend().as_in_memory().blockchain().clone();
|
||||
let peer1_chain = net.peer(1).client.as_in_memory_backend().blockchain().clone();
|
||||
net.sync();
|
||||
assert!(net.peer(0).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain));
|
||||
assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain));
|
||||
assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain));
|
||||
assert!(net.peer(0).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain));
|
||||
assert!(net.peer(1).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain));
|
||||
assert!(net.peer(2).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -305,8 +304,8 @@ fn syncs_all_forks() {
|
||||
|
||||
net.sync();
|
||||
// Check that all peers have all of the blocks.
|
||||
assert_eq!(9, net.peer(0).client.backend().as_in_memory().blockchain().blocks_count());
|
||||
assert_eq!(9, net.peer(1).client.backend().as_in_memory().blockchain().blocks_count());
|
||||
assert_eq!(9, net.peer(0).client.as_in_memory_backend().blockchain().blocks_count());
|
||||
assert_eq!(9, net.peer(1).client.as_in_memory_backend().blockchain().blocks_count());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -320,11 +319,11 @@ fn own_blocks_are_announced() {
|
||||
net.peer(0).on_block_imported(header.hash(), &header);
|
||||
net.sync();
|
||||
|
||||
assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1);
|
||||
assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1);
|
||||
let peer0_chain = net.peer(0).client.backend().as_in_memory().blockchain().clone();
|
||||
assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain));
|
||||
assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain));
|
||||
assert_eq!(net.peer(0).client.as_in_memory_backend().blockchain().info().unwrap().best_number, 1);
|
||||
assert_eq!(net.peer(1).client.as_in_memory_backend().blockchain().info().unwrap().best_number, 1);
|
||||
let peer0_chain = net.peer(0).client.as_in_memory_backend().blockchain().clone();
|
||||
assert!(net.peer(1).client.as_in_memory_backend().blockchain().canon_equals_to(&peer0_chain));
|
||||
assert!(net.peer(2).client.as_in_memory_backend().blockchain().canon_equals_to(&peer0_chain));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -336,9 +335,9 @@ fn blocks_are_not_announced_by_light_nodes() {
|
||||
// light peer1 is connected to full peer2
|
||||
let mut light_config = ProtocolConfig::default();
|
||||
light_config.roles = Roles::LIGHT;
|
||||
net.add_peer(&ProtocolConfig::default());
|
||||
net.add_peer(&light_config);
|
||||
net.add_peer(&ProtocolConfig::default());
|
||||
net.add_full_peer(&ProtocolConfig::default());
|
||||
net.add_full_peer(&light_config);
|
||||
net.add_full_peer(&ProtocolConfig::default());
|
||||
|
||||
net.peer(0).push_blocks(1, false);
|
||||
net.peer(0).start();
|
||||
@@ -356,9 +355,9 @@ fn blocks_are_not_announced_by_light_nodes() {
|
||||
// peer 0 has the best chain
|
||||
// peer 1 has the best chain
|
||||
// peer 2 has genesis-chain only
|
||||
assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1);
|
||||
assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1);
|
||||
assert_eq!(net.peer(2).client.backend().blockchain().info().unwrap().best_number, 0);
|
||||
assert_eq!(net.peer(0).client.info().unwrap().chain.best_number, 1);
|
||||
assert_eq!(net.peer(1).client.info().unwrap().chain.best_number, 1);
|
||||
assert_eq!(net.peer(2).client.info().unwrap().chain.best_number, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -24,7 +24,7 @@ use client_db;
|
||||
use client::{self, Client, runtime_api};
|
||||
use crate::{error, Service, maybe_start_server};
|
||||
use consensus_common::{import_queue::ImportQueue, SelectChain};
|
||||
use network::{self, OnDemand};
|
||||
use network::{self, OnDemand, FinalityProofProvider};
|
||||
use substrate_executor::{NativeExecutor, NativeExecutionDispatch};
|
||||
use transaction_pool::txpool::{self, Options as TransactionPoolOptions, Pool as TransactionPool};
|
||||
use runtime_primitives::{
|
||||
@@ -72,7 +72,7 @@ pub type LightExecutor<F> = client::light::call_executor::RemoteOrLocalCallExecu
|
||||
client_db::light::LightStorage<<F as ServiceFactory>::Block>,
|
||||
network::OnDemand<<F as ServiceFactory>::Block>
|
||||
>,
|
||||
network::OnDemand<<F as ServiceFactory>::Block>
|
||||
network::OnDemand<<F as ServiceFactory>::Block>,
|
||||
>,
|
||||
client::LocalCallExecutor<
|
||||
client::light::backend::Backend<
|
||||
@@ -322,6 +322,11 @@ pub trait ServiceFactory: 'static + Sized {
|
||||
fn build_network_protocol(config: &FactoryFullConfiguration<Self>)
|
||||
-> Result<Self::NetworkProtocol, error::Error>;
|
||||
|
||||
/// Build finality proof provider for serving network requests on full node.
|
||||
fn build_finality_proof_provider(
|
||||
client: Arc<FullClient<Self>>
|
||||
) -> Result<Option<Arc<FinalityProofProvider<Self::Block>>>, error::Error>;
|
||||
|
||||
/// Build the Fork Choice algorithm for full client
|
||||
fn build_select_chain(
|
||||
config: &mut FactoryFullConfiguration<Self>,
|
||||
@@ -413,12 +418,16 @@ pub trait Components: Sized + 'static {
|
||||
select_chain: Self::SelectChain,
|
||||
) -> Result<Self::ImportQueue, error::Error>;
|
||||
|
||||
/// Finality proof provider for serving network requests.
|
||||
fn build_finality_proof_provider(
|
||||
client: Arc<ComponentClient<Self>>
|
||||
) -> Result<Option<Arc<FinalityProofProvider<<Self::Factory as ServiceFactory>::Block>>>, error::Error>;
|
||||
|
||||
/// Build fork choice selector
|
||||
fn build_select_chain(
|
||||
config: &mut FactoryFullConfiguration<Self::Factory>,
|
||||
client: Arc<ComponentClient<Self>>
|
||||
) -> Result<Self::SelectChain, error::Error>;
|
||||
|
||||
}
|
||||
|
||||
/// A struct that implement `Components` for the full client.
|
||||
@@ -508,7 +517,12 @@ impl<Factory: ServiceFactory> Components for FullComponents<Factory> {
|
||||
) -> Result<Self::SelectChain, error::Error> {
|
||||
Self::Factory::build_select_chain(config, client)
|
||||
}
|
||||
|
||||
|
||||
fn build_finality_proof_provider(
|
||||
client: Arc<ComponentClient<Self>>
|
||||
) -> Result<Option<Arc<FinalityProofProvider<<Self::Factory as ServiceFactory>::Block>>>, error::Error> {
|
||||
Factory::build_finality_proof_provider(client)
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct that implement `Components` for the light client.
|
||||
@@ -587,14 +601,17 @@ impl<Factory: ServiceFactory> Components for LightComponents<Factory> {
|
||||
Factory::build_light_import_queue(config, client)
|
||||
}
|
||||
|
||||
/// Build fork choice selector
|
||||
fn build_finality_proof_provider(
|
||||
_client: Arc<ComponentClient<Self>>
|
||||
) -> Result<Option<Arc<FinalityProofProvider<<Self::Factory as ServiceFactory>::Block>>>, error::Error> {
|
||||
Ok(None)
|
||||
}
|
||||
fn build_select_chain(
|
||||
_config: &mut FactoryFullConfiguration<Self::Factory>,
|
||||
_client: Arc<ComponentClient<Self>>
|
||||
) -> Result<Self::SelectChain, error::Error> {
|
||||
Err("Fork choice doesn't happen on light clients.".into())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -65,7 +65,7 @@ use components::{StartRPC, MaintainTransactionPool, OffchainWorker};
|
||||
#[doc(hidden)]
|
||||
pub use std::{ops::Deref, result::Result, sync::Arc};
|
||||
#[doc(hidden)]
|
||||
pub use network::OnDemand;
|
||||
pub use network::{FinalityProofProvider, OnDemand};
|
||||
#[doc(hidden)]
|
||||
pub use tokio::runtime::TaskExecutor;
|
||||
|
||||
@@ -156,8 +156,9 @@ impl<Components: components::Components> Service<Components> {
|
||||
let import_queue = Box::new(Components::build_import_queue(
|
||||
&mut config,
|
||||
client.clone(),
|
||||
select_chain.clone()
|
||||
select_chain.clone(),
|
||||
)?);
|
||||
let finality_proof_provider = Components::build_finality_proof_provider(client.clone())?;
|
||||
let best_header = select_chain.best_chain()?;
|
||||
|
||||
let version = config.full_version();
|
||||
@@ -178,6 +179,7 @@ impl<Components: components::Components> Service<Components> {
|
||||
config: network::config::ProtocolConfig { roles: config.roles },
|
||||
network_config: config.network.clone(),
|
||||
chain: client.clone(),
|
||||
finality_proof_provider,
|
||||
on_demand: on_demand.as_ref().map(|d| d.clone() as _),
|
||||
transaction_pool: transaction_pool_adapter.clone() as _,
|
||||
specialization: network_protocol,
|
||||
@@ -593,6 +595,7 @@ macro_rules! construct_service_factory {
|
||||
{ $( $light_import_queue_init:tt )* },
|
||||
SelectChain = $select_chain:ty
|
||||
{ $( $select_chain_init:tt )* },
|
||||
FinalityProofProvider = { $( $finality_proof_provider_init:tt )* },
|
||||
}
|
||||
) => {
|
||||
$( #[$attr] )*
|
||||
@@ -658,6 +661,12 @@ macro_rules! construct_service_factory {
|
||||
( $( $light_import_queue_init )* ) (config, client)
|
||||
}
|
||||
|
||||
fn build_finality_proof_provider(
|
||||
client: Arc<$crate::FullClient<Self>>
|
||||
) -> Result<Option<Arc<$crate::FinalityProofProvider<Self::Block>>>, $crate::Error> {
|
||||
( $( $finality_proof_provider_init )* ) (client)
|
||||
}
|
||||
|
||||
fn new_light(
|
||||
config: $crate::FactoryFullConfiguration<Self>,
|
||||
executor: $crate::TaskExecutor
|
||||
|
||||
@@ -92,6 +92,8 @@ construct_service_factory! {
|
||||
SlotDuration::get_or_compute(&*client)?,
|
||||
client.clone(),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
client,
|
||||
NothingExtra,
|
||||
config.custom.inherent_data_providers.clone(),
|
||||
@@ -106,6 +108,8 @@ construct_service_factory! {
|
||||
SlotDuration::get_or_compute(&*client)?,
|
||||
client.clone(),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
client,
|
||||
NothingExtra,
|
||||
config.custom.inherent_data_providers.clone(),
|
||||
@@ -120,5 +124,8 @@ construct_service_factory! {
|
||||
))
|
||||
}
|
||||
},
|
||||
FinalityProofProvider = { |_client: Arc<FullClient<Self>>| {
|
||||
Ok(None)
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,10 +22,8 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use client::{self, LongestChain};
|
||||
use consensus::{import_queue, start_aura, AuraImportQueue,
|
||||
SlotDuration, NothingExtra
|
||||
};
|
||||
use grandpa;
|
||||
use consensus::{import_queue, start_aura, AuraImportQueue, SlotDuration, NothingExtra};
|
||||
use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider};
|
||||
use node_executor;
|
||||
use primitives::{Pair as PairT, ed25519};
|
||||
use node_primitives::Block;
|
||||
@@ -170,6 +168,8 @@ construct_service_factory! {
|
||||
slot_duration,
|
||||
block_import,
|
||||
Some(justification_import),
|
||||
None,
|
||||
None,
|
||||
client,
|
||||
NothingExtra,
|
||||
config.custom.inherent_data_providers.clone(),
|
||||
@@ -177,16 +177,28 @@ construct_service_factory! {
|
||||
}},
|
||||
LightImportQueue = AuraImportQueue<Self::Block>
|
||||
{ |config: &FactoryFullConfiguration<Self>, client: Arc<LightClient<Self>>| {
|
||||
let fetch_checker = client.backend().blockchain().fetcher()
|
||||
.upgrade()
|
||||
.map(|fetcher| fetcher.checker().clone())
|
||||
.ok_or_else(|| "Trying to start light import queue without active fetch checker")?;
|
||||
let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, LightClient<Self>>(
|
||||
client.clone(), Arc::new(fetch_checker), client.clone()
|
||||
)?;
|
||||
let block_import = Arc::new(block_import);
|
||||
let finality_proof_import = block_import.clone();
|
||||
let finality_proof_request_builder = finality_proof_import.create_finality_proof_request_builder();
|
||||
|
||||
import_queue::<_, _, _, ed25519::Pair>(
|
||||
SlotDuration::get_or_compute(&*client)?,
|
||||
client.clone(),
|
||||
block_import,
|
||||
None,
|
||||
Some(finality_proof_import),
|
||||
Some(finality_proof_request_builder),
|
||||
client,
|
||||
NothingExtra,
|
||||
config.custom.inherent_data_providers.clone(),
|
||||
).map_err(Into::into)
|
||||
}
|
||||
},
|
||||
}},
|
||||
SelectChain = LongestChain<FullBackend<Self>, Self::Block>
|
||||
{ |config: &FactoryFullConfiguration<Self>, client: Arc<FullClient<Self>>| {
|
||||
Ok(LongestChain::new(
|
||||
@@ -195,6 +207,9 @@ construct_service_factory! {
|
||||
))
|
||||
}
|
||||
},
|
||||
FinalityProofProvider = { |client: Arc<FullClient<Self>>| {
|
||||
Ok(Some(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _))
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user