Grandpa validator set handoff justification (#1190)

* core: make block justification optional

* runtime: update wasm binaries

* core: optionally pass justification on finalize_block

* finality-grandpa: add channel to trigger authority set changes

this will allow the `BlockImport` to trigger an authority set change when
importing a change block that provides a justification (when syncing)

* finality-grandpa: move finalize_block to free function

* finality-grandpa: add GrandpaOracle for auth set liveness checking

this will be used by `BlockImport` to check whether the authority set for a
given block is still live, if the authority set isn't live then importing a
change block requires a justification.

* finality-grandpa: store justification on finalized transition blocks

* finality-grandpa: check justification on authority set change blocks

* finality-grandpa: poll grandpa liveness oracle every 10 seconds

* finality-grandpa: spawn grandpa oracle in service setup

* core: support multiple subscriptions per consensus gossip topic

* finality-grandpa: create and verify justifications

* finality-grandpa: update to local branch of grandpa

* finality-grandpa: update to finality-grandpa v0.5.0

* finality-grandpa: move grandpa oracle code

* finality-grandpa: fix canonality check

* finality-grandpa: clean up error handling

* finality-grandpa: fix canonical_at_height

* finality-grandpa: fix tests

* runtime: update wasm binaries

* core: add tests for finalizing block with justification

* finality-grandpa: improve validation of justifications

* core: remove unused IncompleteJustification block import error

* core: test multiple subscribers for same consensus gossip topic

* Revert "finality-grandpa: improve validation of justifications"

This reverts commit 51eb2c58c2219801e876af6d6c9371bdd9ff2477.

* finality-grandpa: fix commit validation

* finality-grandpa: fix commit ancestry validation

* finality-grandpa: use grandpa v0.5.1

* finality-grandpa: add docs

* finality-grandpa: fix failing test

* finality-grandpa: only allow a pending authority set change per fork

* finality-grandpa: fix validator set transition test
This commit is contained in:
André Silva
2018-12-08 05:34:59 +00:00
committed by Gav Wood
parent da822276dd
commit e779eeb2ec
29 changed files with 1115 additions and 389 deletions
+3 -3
View File
@@ -724,7 +724,7 @@ dependencies = [
[[package]]
name = "finality-grandpa"
version = "0.4.0"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -3388,7 +3388,7 @@ name = "substrate-finality-grandpa"
version = "0.1.0"
dependencies = [
"env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)",
"finality-grandpa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"finality-grandpa 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -4643,7 +4643,7 @@ dependencies = [
"checksum failure_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "64c2d913fe8ed3b6c6518eedf4538255b989945c14c2a7d5cbff62a5e2120596"
"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"
"checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa"
"checksum finality-grandpa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4fc88b8ddddcf3f998b8196d93c3ce31427c5b241cfe6c5a342e2a3f5d13ecbb"
"checksum finality-grandpa 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a1dffe3c9d4c59d964f25cea31880e56c20414cdae7efe2269411238f850ad39"
"checksum fixed-hash 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a557e80084b05c32b455963ff565a9de6f2866da023d6671705c6aff6f65e01c"
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
"checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
+48 -3
View File
@@ -797,7 +797,9 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
Ok(())
}
fn finalize_block(&self, block: BlockId<Block>) -> Result<(), client::error::Error> {
fn finalize_block(&self, block: BlockId<Block>, justification: Option<Justification>)
-> Result<(), client::error::Error>
{
use runtime_primitives::traits::Header;
if let Some(header) = ::client::blockchain::HeaderBackend::header(&self.blockchain, block)? {
@@ -805,6 +807,14 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
// TODO: ensure best chain contains this block.
let hash = header.hash();
self.note_finalized(&mut transaction, &header, hash.clone())?;
if let Some(justification) = justification {
let number = header.number().clone();
transaction.put(
columns::JUSTIFICATION,
&::utils::number_and_hash_to_lookup_key(number, hash.clone()),
&justification.encode(),
);
}
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, header.number().clone(), false, true);
Ok(())
@@ -1196,8 +1206,8 @@ mod tests {
assert!(backend.storage.db.get(::columns::STATE, key.as_bytes()).unwrap().is_none());
}
backend.finalize_block(BlockId::Number(1)).unwrap();
backend.finalize_block(BlockId::Number(2)).unwrap();
backend.finalize_block(BlockId::Number(1), None).unwrap();
backend.finalize_block(BlockId::Number(2), None).unwrap();
assert!(backend.storage.db.get(::columns::STATE, key.as_bytes()).unwrap().is_none());
}
@@ -1499,4 +1509,39 @@ mod tests {
backend.insert_aux(&[], &[&b"test"[..]]).unwrap();
assert!(backend.get_aux(b"test").unwrap().is_none());
}
#[test]
fn test_finalize_block_with_justification() {
use client::blockchain::{Backend as BlockChainBackend};
let backend = Backend::<Block>::new_test(0, 0);
{
let mut op = backend.begin_operation(BlockId::Hash(Default::default())).unwrap();
let header = Header {
number: 0,
parent_hash: Default::default(),
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};
op.set_block_data(
header,
Some(vec![]),
None,
NewBlockState::Best,
).unwrap();
backend.commit_operation(op).unwrap();
}
let justification = Some(vec![1, 2, 3]);
backend.finalize_block(BlockId::Number(0), justification.clone()).unwrap();
assert_eq!(
backend.blockchain().justification(BlockId::Number(0)).unwrap(),
justification,
);
}
}
+1 -1
View File
@@ -107,7 +107,7 @@ pub trait Backend<Block, H>: Send + Sync where
fn commit_operation(&self, transaction: Self::BlockImportOperation) -> error::Result<()>;
/// Finalize block with given Id. This should only be called if the parent of the given
/// block has been finalized.
fn finalize_block(&self, block: BlockId<Block>) -> error::Result<()>;
fn finalize_block(&self, block: BlockId<Block>, justification: Option<Justification>) -> error::Result<()>;
/// Returns reference to blockchain backend.
fn blockchain(&self) -> &Self::Blockchain;
/// Returns reference to changes trie storage.
+84 -27
View File
@@ -593,7 +593,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
origin: BlockOrigin,
hash: Block::Hash,
import_headers: PrePostHeader<Block::Header>,
justification: Justification,
justification: Option<Justification>,
body: Option<Vec<Block::Extrinsic>>,
authorities: Option<Vec<AuthorityId>>,
finalized: bool,
@@ -624,7 +624,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
// ensure parent block is finalized to maintain invariant that
// finality is called sequentially.
if finalized {
self.apply_finality(parent_hash, last_best, make_notifications)?;
self.apply_finality(parent_hash, None, last_best, make_notifications)?;
}
let tags = self.transaction_tags(parent_hash, &body)?;
@@ -678,7 +678,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
transaction.set_block_data(
import_headers.post().clone(),
body,
Some(justification),
justification,
leaf_state,
)?;
@@ -727,8 +727,16 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
Ok(ImportResult::Queued)
}
/// Finalizes all blocks up to given.
fn apply_finality(&self, block: Block::Hash, best_block: Block::Hash, notify: bool) -> error::Result<()> {
/// Finalizes all blocks up to given. If a justification is provided it is
/// stored with the given finalized block (any other finalized blocks are
/// left unjustified).
fn apply_finality(
&self,
block: Block::Hash,
justification: Option<Justification>,
best_block: Block::Hash,
notify: bool,
) -> error::Result<()> {
// find tree route from last finalized to given block.
let last_finalized = self.backend.blockchain().last_finalized()?;
@@ -759,10 +767,15 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
// `block`.
}
for finalize_new in route_from_finalized.enacted() {
self.backend.finalize_block(BlockId::Hash(finalize_new.hash))?;
let enacted = route_from_finalized.enacted();
assert!(enacted.len() > 0);
for finalize_new in &enacted[..enacted.len() - 1] {
self.backend.finalize_block(BlockId::Hash(finalize_new.hash), None)?;
}
assert_eq!(enacted.last().map(|e| e.hash), Some(block));
self.backend.finalize_block(BlockId::Hash(block), justification)?;
if notify {
// sometimes when syncing, tons of blocks can be finalized at once.
// we'll send notifications spuriously in that case.
@@ -791,7 +804,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
/// Pass a flag to indicate whether finality notifications should be propagated.
/// This is usually tied to some synchronization state, where we don't send notifications
/// while performing major synchronization work.
pub fn finalize_block(&self, id: BlockId<Block>, notify: bool) -> error::Result<()> {
pub fn finalize_block(&self, id: BlockId<Block>, justification: Option<Justification>, notify: bool) -> error::Result<()> {
let last_best = self.backend.blockchain().info()?.best_hash;
let to_finalize_hash = match id {
BlockId::Hash(h) => h,
@@ -799,7 +812,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
.ok_or_else(|| error::ErrorKind::UnknownBlock(format!("No block with number {:?}", n)))?,
};
self.apply_finality(to_finalize_hash, last_best, notify)
self.apply_finality(to_finalize_hash, justification, last_best, notify)
}
/// Attempts to revert the chain by `n` blocks. Returns the number of blocks that were
@@ -858,7 +871,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
-> error::Result<Option<SignedBlock<Block>>>
{
Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) {
(Some(header), Some(extrinsics), Some(justification)) =>
(Some(header), Some(extrinsics), justification) =>
Some(SignedBlock { block: Block::new(header, extrinsics), justification }),
_ => None,
})
@@ -1064,7 +1077,8 @@ impl<B, E, Block, RA> consensus::BlockImport<Block> for Client<B, E, Block, RA>
{
type Error = Error;
/// Import a checked and validated block
/// Import a checked and validated block. If a justification is provided in
/// `ImportBlock` then `finalized` *must* be true.
fn import_block(
&self,
import_block: ImportBlock<Block>,
@@ -1081,6 +1095,9 @@ impl<B, E, Block, RA> consensus::BlockImport<Block> for Client<B, E, Block, RA>
finalized,
auxiliary,
} = import_block;
assert!(justification.is_some() && finalized || justification.is_none());
let parent_hash = header.parent_hash().clone();
match self.backend.blockchain().status(BlockId::Hash(parent_hash))? {
@@ -1254,7 +1271,7 @@ pub(crate) mod tests {
nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(),
}).unwrap();
}
remote_client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
remote_client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap();
let trie_root = header.digest().log(DigestItem::as_changes_trie_root)
@@ -1334,7 +1351,7 @@ pub(crate) mod tests {
let builder = client.new_block().unwrap();
client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
assert_eq!(client.info().unwrap().chain.best_number, 1);
}
@@ -1352,7 +1369,7 @@ pub(crate) mod tests {
nonce: 0,
}).unwrap();
client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
assert_eq!(client.info().unwrap().chain.best_number, 1);
assert!(client.state_at(&BlockId::Number(1)).unwrap() != client.state_at(&BlockId::Number(0)).unwrap());
@@ -1404,7 +1421,7 @@ pub(crate) mod tests {
nonce: 0,
}).is_err());
client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
assert_eq!(client.info().unwrap().chain.best_number, 1);
assert!(client.state_at(&BlockId::Number(1)).unwrap() != client.state_at(&BlockId::Number(0)).unwrap());
@@ -1444,11 +1461,11 @@ pub(crate) mod tests {
// G -> A1
let a1 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
client.import(BlockOrigin::Own, a1.clone()).unwrap();
// A1 -> A2
let a2 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
client.import(BlockOrigin::Own, a2.clone()).unwrap();
let genesis_hash = client.info().unwrap().chain.genesis_hash;
@@ -1473,23 +1490,23 @@ pub(crate) mod tests {
// G -> A1
let a1 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
client.import(BlockOrigin::Own, a1.clone()).unwrap();
// A1 -> A2
let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
client.import(BlockOrigin::Own, a2.clone()).unwrap();
// A2 -> A3
let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a3.clone()).unwrap();
client.import(BlockOrigin::Own, a3.clone()).unwrap();
// A3 -> A4
let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a4.clone()).unwrap();
client.import(BlockOrigin::Own, a4.clone()).unwrap();
// A4 -> A5
let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a5.clone()).unwrap();
client.import(BlockOrigin::Own, a5.clone()).unwrap();
// A1 -> B2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
@@ -1501,15 +1518,15 @@ pub(crate) mod tests {
nonce: 0,
}).unwrap();
let b2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b2.clone()).unwrap();
client.import(BlockOrigin::Own, b2.clone()).unwrap();
// B2 -> B3
let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b3.clone()).unwrap();
client.import(BlockOrigin::Own, b3.clone()).unwrap();
// B3 -> B4
let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b4.clone()).unwrap();
client.import(BlockOrigin::Own, b4.clone()).unwrap();
// // B2 -> C3
let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap();
@@ -1521,7 +1538,7 @@ pub(crate) mod tests {
nonce: 1,
}).unwrap();
let c3 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, c3.clone()).unwrap();
client.import(BlockOrigin::Own, c3.clone()).unwrap();
// A1 -> D2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
@@ -1533,7 +1550,7 @@ pub(crate) mod tests {
nonce: 0,
}).unwrap();
let d2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, d2.clone()).unwrap();
client.import(BlockOrigin::Own, d2.clone()).unwrap();
assert_eq!(client.info().unwrap().chain.best_hash, a5.hash());
@@ -1686,4 +1703,44 @@ pub(crate) mod tests {
}
}
}
#[test]
fn import_with_justification() {
use test_client::blockchain::Backend;
let client = test_client::new();
// G -> A1
let a1 = client.new_block().unwrap().bake().unwrap();
client.import(BlockOrigin::Own, a1.clone()).unwrap();
// A1 -> A2
let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap();
client.import(BlockOrigin::Own, a2.clone()).unwrap();
// A2 -> A3
let justification = vec![1, 2, 3];
let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap();
client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap();
assert_eq!(
client.backend().blockchain().last_finalized().unwrap(),
a3.hash(),
);
assert_eq!(
client.backend().blockchain().justification(BlockId::Hash(a3.hash())).unwrap(),
Some(justification),
);
assert_eq!(
client.backend().blockchain().justification(BlockId::Hash(a1.hash())).unwrap(),
None,
);
assert_eq!(
client.backend().blockchain().justification(BlockId::Hash(a2.hash())).unwrap(),
None,
);
}
}
+18 -5
View File
@@ -242,13 +242,26 @@ impl<Block: BlockT> Blockchain<Block> {
self.storage.write().header_cht_roots.insert(block, cht_root);
}
fn finalize_header(&self, id: BlockId<Block>) -> error::Result<()> {
fn finalize_header(&self, id: BlockId<Block>, justification: Option<Justification>) -> error::Result<()> {
let hash = match self.header(id)? {
Some(h) => h.hash(),
None => return Err(error::ErrorKind::UnknownBlock(format!("{}", id)).into()),
};
self.storage.write().finalized_hash = hash;
let mut storage = self.storage.write();
storage.finalized_hash = hash;
if justification.is_some() {
let block = storage.blocks.get_mut(&hash)
.expect("hash was fetched from a block in the db; qed");
let block_justification = match block {
StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j
};
*block_justification = justification;
}
Ok(())
}
@@ -352,7 +365,7 @@ impl<Block: BlockT> light::blockchain::Storage<Block> for Blockchain<Block>
}
fn finalize_header(&self, id: BlockId<Block>) -> error::Result<()> {
Blockchain::finalize_header(self, id)
Blockchain::finalize_header(self, id, None)
}
fn header_cht_root(&self, _cht_size: u64, block: NumberFor<Block>) -> error::Result<Block::Hash> {
@@ -543,8 +556,8 @@ where
Ok(())
}
fn finalize_block(&self, block: BlockId<Block>) -> error::Result<()> {
self.blockchain.finalize_header(block)
fn finalize_block(&self, block: BlockId<Block>, justification: Option<Justification>) -> error::Result<()> {
self.blockchain.finalize_header(block, justification)
}
fn blockchain(&self) -> &Self::Blockchain {
+1 -1
View File
@@ -102,7 +102,7 @@ impl<S, F, Block, H> ClientBackend<Block, H> for Backend<S, F> where
)
}
fn finalize_block(&self, block: BlockId<Block>) -> ClientResult<()> {
fn finalize_block(&self, block: BlockId<Block>, _justification: Option<Justification>) -> ClientResult<()> {
self.blockchain.storage().finalize_header(block)
}
+1 -1
View File
@@ -469,7 +469,7 @@ pub mod tests {
let mut local_headers_hashes = Vec::new();
for i in 0..4 {
let builder = remote_client.new_block().unwrap();
remote_client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
remote_client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
local_headers_hashes.push(remote_client.block_hash(i + 1)
.map_err(|_| ClientErrorKind::Backend("TestError".into()).into()));
}
+5 -4
View File
@@ -60,7 +60,7 @@ use codec::Encode;
use consensus_common::{Authorities, BlockImport, Environment, Proposer};
use client::ChainHead;
use consensus_common::{ImportBlock, BlockOrigin};
use runtime_primitives::{generic, generic::BlockId};
use runtime_primitives::{generic, generic::BlockId, Justification};
use runtime_primitives::traits::{Block, Header, Digest, DigestItemFor};
use network::import_queue::{Verifier, BasicQueue};
use primitives::{AuthorityId, ed25519};
@@ -244,7 +244,7 @@ pub fn start_aura<B, C, E, I, SO, Error>(
let import_block = ImportBlock {
origin: BlockOrigin::Own,
header,
justification: Vec::new(),
justification: None,
post_digests: vec![item],
body: Some(body),
finalized: false,
@@ -367,7 +367,7 @@ impl<B: Block, C, E> Verifier<B> for AuraVerifier<C, E> where
&self,
origin: BlockOrigin,
header: B::Header,
_justification: Vec<u8>,
justification: Option<Justification>,
body: Option<Vec<B::Extrinsic>>
) -> Result<(ImportBlock<B>, Option<Vec<AuthorityId>>), String> {
let slot_now = slot_now(self.config.slot_duration)
@@ -390,13 +390,14 @@ impl<B: Block, C, E> Verifier<B> for AuraVerifier<C, E> where
debug!(target: "aura", "Checked {:?}; importing.", pre_header);
extra_verification.into_future().wait()?;
let import_block = ImportBlock {
origin,
header: pre_header,
justification: Vec::new(),
post_digests: vec![item],
body,
finalized: false,
justification,
auxiliary: Vec::new(),
};
@@ -22,7 +22,7 @@ use runtime_primitives::Justification;
use std::borrow::Cow;
/// Block import result.
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq)]
pub enum ImportResult {
/// Added to the import queue.
Queued,
@@ -69,8 +69,8 @@ pub struct ImportBlock<Block: BlockT> {
/// re-executed in a runtime that checks digest equivalence -- the
/// post-runtime digests are pushed back on after.
pub header: Block::Header,
/// Justification provided for this block from the outside:.
pub justification: Justification,
/// Justification provided for this block from the outside.
pub justification: Option<Justification>,
/// Digest items that have been added after the runtime for external
/// work, like a consensus signature.
pub post_digests: Vec<DigestItemFor<Block>>,
@@ -91,7 +91,7 @@ impl<Block: BlockT> ImportBlock<Block> {
-> (
BlockOrigin,
<Block as BlockT>::Header,
Justification,
Option<Justification>,
Vec<DigestItemFor<Block>>,
Option<Vec<<Block as BlockT>::Extrinsic>>,
bool,
+1 -1
View File
@@ -417,7 +417,7 @@ impl<B, P, I, InStream, OutSink> Future for BftFuture<B, P, I, InStream, OutSink
let import_block = ImportBlock {
origin: BlockOrigin::ConsensusBroadcast,
header: header,
justification: just.into(),
justification: Some(just),
body: Some(body),
finalized: true,
post_digests: Default::default(),
+1 -1
View File
@@ -20,7 +20,7 @@ substrate-finality-grandpa-primitives = { path = "primitives" }
rand = "0.6"
[dependencies.finality-grandpa]
version = "0.4.0"
version = "0.5.1"
features = ["derive-codec"]
[dev-dependencies]
@@ -20,6 +20,7 @@ use parking_lot::RwLock;
use substrate_primitives::AuthorityId;
use std::cmp::Ord;
use std::collections::HashMap;
use std::fmt::Debug;
use std::ops::Add;
use std::sync::Arc;
@@ -63,6 +64,11 @@ where
pub(crate) fn set_id(&self) -> u64 {
self.inner.read().set_id
}
/// Get the current authorities and their weights (for the current set ID).
pub(crate) fn current_authorities(&self) -> HashMap<AuthorityId, u64> {
self.inner.read().current_authorities.iter().cloned().collect()
}
}
impl<H, N> From<AuthoritySet<H, N>> for SharedAuthoritySet<H, N> {
@@ -109,8 +115,22 @@ where
N: Add<Output=N> + Ord + Clone + Debug,
H: Debug
{
/// Note an upcoming pending transition.
pub(crate) fn add_pending_change(&mut self, pending: PendingChange<H, N>) {
/// Note an upcoming pending transition. This makes sure that there isn't
/// already any pending change for the same chain. Multiple pending changes
/// are allowed but they must be signalled in different forks. The closure
/// should return an error if the pending change block is equal to or a
/// descendent of the given block.
pub(crate) fn add_pending_change<F, E: Debug>(
&mut self,
pending: PendingChange<H, N>,
is_equal_or_descendent_of: F,
) -> Result<(), E> where
F: Fn(&H) -> Result<(), E>,
{
for change in self.pending_changes.iter() {
is_equal_or_descendent_of(&change.canon_hash)?;
}
// ordered first by effective number and then by signal-block number.
let key = (pending.effective_number(), pending.canon_height.clone());
let idx = self.pending_changes
@@ -121,6 +141,8 @@ where
.unwrap_or_else(|i| i);
self.pending_changes.insert(idx, pending);
Ok(())
}
/// Inspect pending changes.
@@ -141,7 +163,7 @@ where
/// block where the set last changed.
pub(crate) fn apply_changes<F, E>(&mut self, just_finalized: N, mut canonical: F)
-> Result<Status<H, N>, E>
where F: FnMut(N) -> Result<H, E>
where F: FnMut(N) -> Result<Option<H>, E>
{
let mut status = Status {
changed: false,
@@ -156,30 +178,35 @@ where
// check if the block that signalled the change is canonical in
// our chain.
let canonical_at_height = canonical(change.canon_height.clone())?;
let canonical_hash = canonical(change.canon_height.clone())?;
let effective_hash = canonical(effective_number.clone())?;
debug!(target: "afg", "Evaluating potential set change at block {:?}. Our canonical hash is {:?}",
(&change.canon_height, &change.canon_hash), canonical_at_height);
(&change.canon_height, &change.canon_hash), canonical_hash);
if canonical_at_height == change.canon_hash {
// apply this change: make the set canonical
info!(target: "finality", "Applying authority set change scheduled at block #{:?}",
change.canon_height);
match (canonical_hash, effective_hash) {
(Some(canonical_hash), Some(effective_hash)) => {
if canonical_hash == change.canon_hash {
// apply this change: make the set canonical
info!(target: "finality", "Applying authority set change scheduled at block #{:?}",
change.canon_height);
self.current_authorities = change.next_authorities.clone();
self.set_id += 1;
self.current_authorities = change.next_authorities.clone();
self.set_id += 1;
status.new_set_block = Some((
canonical(effective_number.clone())?,
effective_number.clone(),
));
status.new_set_block = Some((
effective_hash,
effective_number.clone(),
));
// discard any signalled changes
// that happened before or equal to the effective number of the change.
self.pending_changes.iter()
.take_while(|c| c.canon_height <= effective_number)
.count()
} else {
1 // prune out this entry; it's no longer relevant.
// discard all signalled changes since they're
// necessarily from other forks
self.pending_changes.len()
} else {
1 // prune out this entry; it's no longer relevant.
}
},
_ => 1, // prune out this entry; it's no longer relevant.
}
}
};
@@ -191,6 +218,28 @@ where
Ok(status)
}
/// Check whether the given finalized block number enacts any authority set
/// change (without triggering it). Provide a closure that can be used to
/// check for the canonical block with a given number.
pub fn enacts_change<F, E>(&self, just_finalized: N, mut canonical: F)
-> Result<bool, E>
where F: FnMut(N) -> Result<Option<H>, E>
{
for change in self.pending_changes.iter() {
if change.effective_number() > just_finalized { break };
// check if the block that signalled the change is canonical in
// our chain.
match canonical(change.canon_height.clone())? {
Some(ref canonical_hash) if *canonical_hash == change.canon_hash =>
return Ok(true),
_ => (),
}
}
Ok(false)
}
}
/// A pending change to the authority set.
@@ -221,6 +270,10 @@ impl<H, N: Add<Output=N> + Clone> PendingChange<H, N> {
mod tests {
use super::*;
fn ignore_existing_changes<A>(_a: &A) -> Result<(), ::Error> {
Ok(())
}
#[test]
fn changes_sorted_in_correct_order() {
let mut authorities = AuthoritySet {
@@ -250,9 +303,9 @@ mod tests {
canon_hash: "hash_c",
};
authorities.add_pending_change(change_a.clone());
authorities.add_pending_change(change_b.clone());
authorities.add_pending_change(change_c.clone());
authorities.add_pending_change(change_a.clone(), ignore_existing_changes).unwrap();
authorities.add_pending_change(change_b.clone(), ignore_existing_changes).unwrap();
authorities.add_pending_change(change_c.clone(), ignore_existing_changes).unwrap();
assert_eq!(authorities.pending_changes, vec![change_a, change_c, change_b]);
}
@@ -282,15 +335,15 @@ mod tests {
canon_hash: "hash_b",
};
authorities.add_pending_change(change_a.clone());
authorities.add_pending_change(change_b.clone());
authorities.add_pending_change(change_a.clone(), ignore_existing_changes).unwrap();
authorities.add_pending_change(change_b.clone(), ignore_existing_changes).unwrap();
authorities.apply_changes(10, |_| Err(())).unwrap();
assert!(authorities.current_authorities.is_empty());
authorities.apply_changes(15, |n| match n {
5 => Ok("hash_a"),
15 => Ok("hash_15_canon"),
5 => Ok(Some("hash_a")),
15 => Ok(Some("hash_15_canon")),
_ => Err(()),
}).unwrap();
@@ -300,7 +353,7 @@ mod tests {
}
#[test]
fn apply_many_changes_at_once() {
fn disallow_multiple_changes_on_same_fork() {
let mut authorities = AuthoritySet {
current_authorities: Vec::new(),
set_id: 0,
@@ -318,11 +371,10 @@ mod tests {
canon_hash: "hash_a",
};
// will be ignored because it was signalled when change_a still pending.
let change_b = PendingChange {
next_authorities: set_b.clone(),
finalization_depth: 10,
canon_height: 15,
canon_height: 16,
canon_hash: "hash_b",
};
@@ -333,20 +385,50 @@ mod tests {
canon_hash: "hash_c",
};
authorities.add_pending_change(change_a.clone());
authorities.add_pending_change(change_b.clone());
authorities.add_pending_change(change_c.clone());
let is_equal_or_descendent_of = |base, block| -> Result<(), ()> {
match (base, block) {
("hash_a", "hash_b") => return Err(()),
("hash_a", "hash_c") => return Ok(()),
("hash_c", "hash_b") => return Ok(()),
_ => unreachable!(),
}
};
authorities.apply_changes(26, |n| match n {
5 => Ok("hash_a"),
15 => Ok("hash_b"),
16 => Ok("hash_c"),
26 => Ok("hash_26"),
authorities.add_pending_change(
change_a.clone(),
|base| is_equal_or_descendent_of(base, change_a.canon_hash),
).unwrap();
// change b is on the same chain has the unfinalized change a so it should error
assert!(
authorities.add_pending_change(
change_b.clone(),
|base| is_equal_or_descendent_of(base, change_b.canon_hash),
).is_err()
);
// change c is accepted because it's on a different fork
authorities.add_pending_change(
change_c.clone(),
|base| is_equal_or_descendent_of(base, change_c.canon_hash)
).unwrap();
authorities.apply_changes(15, |n| match n {
5 => Ok(Some("hash_a")),
15 => Ok(Some("hash_a15")),
_ => Err(()),
}).unwrap();
assert_eq!(authorities.current_authorities, set_c);
assert_eq!(authorities.set_id, 2); // has been bumped only twice
assert_eq!(authorities.current_authorities, set_a);
// pending change c has been removed since it was on a different fork
// and can no longer be enacted
assert!(authorities.pending_changes.is_empty());
// pending change b can now be added
authorities.add_pending_change(
change_b.clone(),
|base| is_equal_or_descendent_of(base, change_b.canon_hash),
).unwrap();
}
}
@@ -32,7 +32,7 @@ fn localized_payload<E: Encode>(round: u64, set_id: u64, message: &E) -> Vec<u8>
}
// check a message.
fn check_message_sig<Block: BlockT>(
pub(crate) fn check_message_sig<Block: BlockT>(
message: &Message<Block>,
id: &AuthorityId,
signature: &ed25519::Signature,
+614 -145
View File
@@ -84,7 +84,8 @@ extern crate parity_codec_derive;
use futures::prelude::*;
use futures::sync::mpsc;
use client::{
Client, error::Error as ClientError, backend::Backend, CallExecutor, BlockchainEvents
BlockchainEvents, CallExecutor, Client, backend::Backend,
error::Error as ClientError, error::ErrorKind as ClientErrorKind,
};
use client::blockchain::HeaderBackend;
use client::runtime_api::TaggedTransactionQueue;
@@ -96,14 +97,15 @@ use runtime_primitives::traits::{
use fg_primitives::GrandpaApi;
use runtime_primitives::generic::BlockId;
use substrate_primitives::{ed25519, H256, AuthorityId, Blake2Hasher};
use tokio::timer::Delay;
use tokio::timer::{Delay, Interval};
use grandpa::Error as GrandpaError;
use grandpa::{voter, round::State as RoundState, Equivocation, BlockNumberOps};
use network::{Service as NetworkService, ExHashT};
use network::consensus_gossip::{ConsensusMessage};
use std::collections::HashMap;
use parking_lot::Mutex;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::sync::Arc;
use std::time::{Instant, Duration};
@@ -197,6 +199,12 @@ impl From<GrandpaError> for Error {
}
}
impl From<ClientError> for Error {
fn from(e: ClientError) -> Self {
Error::Client(e)
}
}
/// A handle to the network. This is generally implemented by providing some
/// handle to a gossip service or similar.
///
@@ -508,78 +516,8 @@ impl<B, E, Block: BlockT<Hash=H256>, N, RA> voter::Environment<Block::Hash, Numb
}
}
fn finalize_block(&self, hash: Block::Hash, number: NumberFor<Block>, _commit: Commit<Block>) -> Result<(), Self::Error> {
// ideally some handle to a synchronization oracle would be used
// to avoid unconditionally notifying.
if let Err(e) = self.inner.finalize_block(BlockId::Hash(hash), true) {
warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e);
// we return without error because not being able to finalize (temporarily) is
// non-fatal.
return Ok(());
}
debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash);
// lock must be held through writing to DB to avoid race
let mut authority_set = self.authority_set.inner().write();
let client = &self.inner;
let status = authority_set.apply_changes(number, |canon_number| {
client.block_hash_from_id(&BlockId::number(canon_number))
.map(|h| h.expect("given number always less than newly-finalized number; \
thus there is a block with that number finalized already; qed"))
})?;
if status.changed {
// write new authority set state to disk.
let encoded_set = authority_set.encode();
let write_result = if let Some((ref canon_hash, ref canon_number)) = status.new_set_block {
// we also overwrite the "last completed round" entry with a blank slate
// because from the perspective of the finality gadget, the chain has
// reset.
let round_state = RoundState::genesis((*canon_hash, *canon_number));
let last_completed: LastCompleted<_, _> = (0, round_state);
let encoded = last_completed.encode();
client.backend().insert_aux(
&[
(AUTHORITY_SET_KEY, &encoded_set[..]),
(LAST_COMPLETED_KEY, &encoded[..]),
],
&[]
)
} else {
client.backend().insert_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])], &[])
};
if let Err(e) = write_result {
warn!(target: "finality", "Failed to write updated authority set to disk. Bailing.");
warn!(target: "finality", "Node is in a potentially inconsistent state.");
return Err(e.into());
}
}
if let Some((canon_hash, canon_number)) = status.new_set_block {
// the authority set has changed.
let (new_id, set_ref) = authority_set.current();
if set_ref.len() > 16 {
info!("Applying GRANDPA set change to new set with {} authorities", set_ref.len());
} else {
info!("Applying GRANDPA set change to new set {:?}", set_ref);
}
Err(ExitOrError::AuthoritiesChanged(NewAuthoritySet {
canon_hash,
canon_number,
set_id: new_id,
authorities: set_ref.to_vec(),
}))
} else {
Ok(())
}
fn finalize_block(&self, hash: Block::Hash, number: NumberFor<Block>, round: u64, commit: Commit<Block>) -> Result<(), Self::Error> {
finalize_block(&*self.inner, &self.authority_set, hash, number, (round, commit).into())
}
fn round_commit_timer(&self) -> Self::Timer {
@@ -611,19 +549,344 @@ impl<B, E, Block: BlockT<Hash=H256>, N, RA> voter::Environment<Block::Hash, Numb
}
}
/// A GRANDPA justification for block finality, it includes a commit message and
/// an ancestry proof including all headers routing all precommit target blocks
/// to the commit target block. Due to the current voting strategy the precommit
/// targets should be the same as the commit target, since honest voters don't
/// vote past authority set change blocks.
///
/// This is meant to be stored in the db and passed around the network to other
/// nodes, and are used by syncing nodes to prove authority set handoffs.
#[derive(Encode, Decode)]
struct GrandpaJustification<Block: BlockT> {
round: u64,
commit: Commit<Block>,
votes_ancestries: Vec<Block::Header>,
}
impl<Block: BlockT<Hash=H256>> GrandpaJustification<Block> {
/// Create a GRANDPA justification from the given commit. This method
/// assumes the commit is valid and well-formed.
fn from_commit<B, E, RA>(
client: &Client<B, E, Block, RA>,
round: u64,
commit: Commit<Block>,
) -> Result<GrandpaJustification<Block>, Error> where
B: Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
RA: Send + Sync,
{
let mut votes_ancestries_hashes = HashSet::new();
let mut votes_ancestries = Vec::new();
let error = || {
let msg = "invalid precommits for target commit".to_string();
Err(Error::Client(ClientErrorKind::BadJustification(msg).into()))
};
for signed in commit.precommits.iter() {
let mut current_hash = signed.precommit.target_hash.clone();
loop {
if current_hash == commit.target_hash { break; }
match client.backend().blockchain().header(BlockId::Hash(current_hash))? {
Some(current_header) => {
if *current_header.number() <= commit.target_number {
return error();
}
let parent_hash = current_header.parent_hash().clone();
if votes_ancestries_hashes.insert(current_hash) {
votes_ancestries.push(current_header);
}
current_hash = parent_hash;
},
_ => return error(),
}
}
}
Ok(GrandpaJustification { round, commit, votes_ancestries })
}
/// Decode a GRANDPA justification and validate the commit and the votes'
/// ancestry proofs.
fn decode_and_verify(
encoded: Vec<u8>,
set_id: u64,
voters: &HashMap<AuthorityId, u64>,
) -> Result<GrandpaJustification<Block>, ClientError> where
NumberFor<Block>: grandpa::BlockNumberOps,
{
use grandpa::Chain;
let justification = match GrandpaJustification::decode(&mut &*encoded) {
Some(justification) => justification,
_ => {
let msg = "failed to decode grandpa justification".to_string();
return Err(ClientErrorKind::BadJustification(msg).into());
}
};
let ancestry_chain = AncestryChain::<Block>::new(&justification.votes_ancestries);
match grandpa::validate_commit(
&justification.commit,
voters,
None,
&ancestry_chain,
) {
Ok(Some(_)) => {},
_ => {
let msg = "invalid commit in grandpa justification".to_string();
return Err(ClientErrorKind::BadJustification(msg).into());
}
}
let mut visited_hashes = HashSet::new();
for signed in justification.commit.precommits.iter() {
if let Err(_) = communication::check_message_sig::<Block>(
&grandpa::Message::Precommit(signed.precommit.clone()),
&signed.id,
&signed.signature,
justification.round,
set_id,
) {
return Err(ClientErrorKind::BadJustification(
"invalid signature for precommit in grandpa justification".to_string()).into());
}
if justification.commit.target_hash == signed.precommit.target_hash {
continue;
}
match ancestry_chain.ancestry(justification.commit.target_hash, signed.precommit.target_hash) {
Ok(route) => {
// ancestry starts from parent hash but the precommit target hash has been visited
visited_hashes.insert(signed.precommit.target_hash);
for hash in route {
visited_hashes.insert(hash);
}
},
_ => {
return Err(ClientErrorKind::BadJustification(
"invalid precommit ancestry proof in grandpa justification".to_string()).into());
},
}
}
let ancestry_hashes = justification.votes_ancestries
.iter()
.map(|h: &Block::Header| h.hash())
.collect();
if visited_hashes != ancestry_hashes {
return Err(ClientErrorKind::BadJustification(
"invalid precommit ancestries in grandpa justification with unused headers".to_string()).into());
}
Ok(justification)
}
}
enum JustificationOrCommit<Block: BlockT> {
Justification(GrandpaJustification<Block>),
Commit((u64, Commit<Block>)),
}
impl<Block: BlockT> From<(u64, Commit<Block>)> for JustificationOrCommit<Block> {
fn from(commit: (u64, Commit<Block>)) -> JustificationOrCommit<Block> {
JustificationOrCommit::Commit(commit)
}
}
impl<Block: BlockT> From<GrandpaJustification<Block>> for JustificationOrCommit<Block> {
fn from(justification: GrandpaJustification<Block>) -> JustificationOrCommit<Block> {
JustificationOrCommit::Justification(justification)
}
}
/// Finalize the given block and apply any authority set changes. If an
/// authority set change is enacted then a justification is created (if not
/// given) and stored with the block when finalizing it.
fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>(
client: &Client<B, E, Block, RA>,
authority_set: &SharedAuthoritySet<Block::Hash, NumberFor<Block>>,
hash: Block::Hash,
number: NumberFor<Block>,
justification_or_commit: JustificationOrCommit<Block>,
) -> Result<(), ExitOrError<Block::Hash, NumberFor<Block>>> where
B: Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
RA: Send + Sync,
{
// lock must be held through writing to DB to avoid race
let mut authority_set = authority_set.inner().write();
let status = authority_set.apply_changes(number, |canon_number| {
canonical_at_height(client, (hash, number), canon_number)
})?;
if status.changed {
// write new authority set state to disk.
let encoded_set = authority_set.encode();
let write_result = if let Some((ref canon_hash, ref canon_number)) = status.new_set_block {
// we also overwrite the "last completed round" entry with a blank slate
// because from the perspective of the finality gadget, the chain has
// reset.
let round_state = RoundState::genesis((*canon_hash, *canon_number));
let last_completed: LastCompleted<_, _> = (0, round_state);
let encoded = last_completed.encode();
client.backend().insert_aux(
&[
(AUTHORITY_SET_KEY, &encoded_set[..]),
(LAST_COMPLETED_KEY, &encoded[..]),
],
&[]
)
} else {
client.backend().insert_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])], &[])
};
if let Err(e) = write_result {
warn!(target: "finality", "Failed to write updated authority set to disk. Bailing.");
warn!(target: "finality", "Node is in a potentially inconsistent state.");
return Err(e.into());
}
}
// NOTE: this code assumes that honest voters will never vote past a
// transition block, thus we don't have to worry about the case where
// we have a transition with `effective_block = N`, but we finalize
// `N+1`. this assumption is required to make sure we store
// justifications for transition blocks which will be requested by
// syncing clients.
let justification = match justification_or_commit {
JustificationOrCommit::Justification(justification) => Some(justification.encode()),
JustificationOrCommit::Commit((round_number, commit)) =>
if status.new_set_block.is_some() {
let justification = GrandpaJustification::from_commit(
client,
round_number,
commit,
)?;
Some(justification.encode())
} else {
None
},
};
debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash);
// ideally some handle to a synchronization oracle would be used
// to avoid unconditionally notifying.
client.finalize_block(BlockId::Hash(hash), justification, true).map_err(|e| {
warn!(target: "finality", "Error applying finality to block {:?}: {:?}", (hash, number), e);
warn!(target: "finality", "Node is in a potentially inconsistent state.");
e
})?;
if let Some((canon_hash, canon_number)) = status.new_set_block {
// the authority set has changed.
let (new_id, set_ref) = authority_set.current();
if set_ref.len() > 16 {
info!("Applying GRANDPA set change to new set with {} authorities", set_ref.len());
} else {
info!("Applying GRANDPA set change to new set {:?}", set_ref);
}
Err(ExitOrError::AuthoritiesChanged(NewAuthoritySet {
canon_hash,
canon_number,
set_id: new_id,
authorities: set_ref.to_vec(),
}))
} else {
Ok(())
}
}
/// An oracle for liveness checking of a GRANDPA authority set. This is used
/// when importing blocks, if the block enacts an authority set change then
/// either it must provide a justification or if the GRANDPA authority set is
/// still live then the block can be imported unjustified since the block will
/// still be finalized by GRANDPA in a future round. The current heuristic for
/// deciding whether an authority set is live is to check if there were any
/// recent commit messages on an unfiltered stream).
struct GrandpaOracle<Block: BlockT> {
unfiltered_commits_stream: Box<dyn Stream<Item=(u64, CompactCommit<Block>), Error=Error> + Send>,
last_commit_target: Option<(Instant, Block::Hash, NumberFor<Block>)>,
}
impl<Block: BlockT> GrandpaOracle<Block> {
fn new(stream: Box<dyn Stream<Item=(u64, CompactCommit<Block>), Error=Error> + Send>) -> GrandpaOracle<Block> {
GrandpaOracle {
unfiltered_commits_stream: stream,
last_commit_target: None,
}
}
fn poll(&mut self) {
while let Ok(Async::Ready(Some((_, commit)))) = self.unfiltered_commits_stream.poll() {
self.last_commit_target = Some((Instant::now(), commit.target_hash, commit.target_number));
}
}
fn is_live(&self) -> bool {
self.last_commit_target.map(|(instant, _, _)| {
instant.elapsed() < Duration::from_secs(30)
}).unwrap_or(false)
}
}
#[derive(Clone)]
struct SharedGrandpaOracle<Block: BlockT> {
inner: Arc<Mutex<Option<GrandpaOracle<Block>>>>,
}
impl<Block: BlockT> SharedGrandpaOracle<Block> {
fn empty() -> SharedGrandpaOracle<Block> {
SharedGrandpaOracle { inner: Arc::new(Mutex::new(None)) }
}
fn poll(&self) {
if let Some(inner) = self.inner.lock().as_mut() {
inner.poll();
}
}
fn is_live(&self) -> bool {
self.inner.lock()
.as_ref()
.map(|inner| inner.is_live())
.unwrap_or(false)
}
}
/// A block-import handler for GRANDPA.
///
/// This scans each imported block for signals of changing authority set.
/// If the block being imported enacts an authority set change then:
/// - If the current authority set is still live: we import the block
/// - Otherwise, the block must include a valid justification.
///
/// When using GRANDPA, the block import worker should be using this block import
/// object.
pub struct GrandpaBlockImport<B, E, Block: BlockT<Hash=H256>, RA, PRA> {
inner: Arc<Client<B, E, Block, RA>>,
authority_set: SharedAuthoritySet<Block::Hash, NumberFor<Block>>,
authority_set_change: mpsc::UnboundedSender<NewAuthoritySet<Block::Hash, NumberFor<Block>>>,
authority_set_oracle: SharedGrandpaOracle<Block>,
api: Arc<PRA>,
}
impl<B, E, Block: BlockT<Hash=H256>, RA, PRA> BlockImport<Block>
for GrandpaBlockImport<B, E, Block, RA, PRA> where
NumberFor<Block>: grandpa::BlockNumberOps,
B: Backend<Block, Blake2Hasher> + 'static,
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
DigestFor<Block>: Encode,
@@ -638,43 +901,160 @@ impl<B, E, Block: BlockT<Hash=H256>, RA, PRA> BlockImport<Block>
{
use authorities::PendingChange;
let maybe_change = self.api.runtime_api().grandpa_pending_change(
&BlockId::hash(*block.header.parent_hash()),
&block.header.digest().clone(),
)?;
// we don't want to finalize on `inner.import_block`
let justification = block.justification.take();
let number = block.header.number().clone();
let hash = block.post_header().hash();
let parent_hash = *block.header.parent_hash();
let digest = block.header.digest().clone();
let is_live = self.authority_set_oracle.is_live();
// when we update the authorities, we need to hold the lock
// until the block is written to prevent a race if we need to restore
// the old authority set on error.
let just_in_case = maybe_change.map(|change| {
let hash = block.post_header().hash();
let number = block.header.number().clone();
let mut authorities = self.authority_set.inner().write();
let old_set = authorities.clone();
authorities.add_pending_change(PendingChange {
next_authorities: change.next_authorities,
finalization_depth: change.delay,
canon_height: number,
canon_hash: hash,
});
block.auxiliary.push((AUTHORITY_SET_KEY.to_vec(), Some(authorities.encode())));
(old_set, authorities)
});
let result = self.inner.import_block(block, new_authorities);
if let Err(ref e) = result {
if let Some((old_set, mut authorities)) = just_in_case {
debug!(target: "afg", "Restoring old set after block import error: {:?}", e);
*authorities = old_set;
}
let import_result = self.inner.import_block(block, new_authorities)?;
if import_result != ImportResult::Queued {
return Ok(import_result);
}
result
let maybe_change = self.api.runtime_api().grandpa_pending_change(
&BlockId::hash(parent_hash),
&digest,
)?;
let is_equal_or_descendent_of = |base: &Block::Hash| -> Result<(), ClientError> {
let error = || {
Err(ClientErrorKind::Backend(
"invalid authority set change: multiple pending changes on the same chain".to_string()
).into())
};
if *base == hash { return error(); }
if *base == parent_hash { return error(); }
let tree_route = ::client::blockchain::tree_route(
self.inner.backend().blockchain(),
BlockId::Hash(parent_hash),
BlockId::Hash(*base),
)?;
if tree_route.common_block().hash == *base {
return error();
}
Ok(())
};
if let Some(change) = maybe_change {
let mut authorities = self.authority_set.inner().write();
authorities.add_pending_change(
PendingChange {
next_authorities: change.next_authorities,
finalization_depth: change.delay,
canon_height: number,
canon_hash: hash,
},
is_equal_or_descendent_of,
)?;
let encoded = authorities.encode();
self.inner.backend().insert_aux(&[(AUTHORITY_SET_KEY, &encoded[..])], &[])?;
};
let enacts_change = self.authority_set.inner().read().enacts_change(number, |canon_number| {
canonical_at_height(&self.inner, (hash, number), canon_number)
})?;
// a pending change is enacted by the given block, if the current
// grandpa authority set isn't live anymore the provided `ImportBlock`
// should include a justification for finalizing the block.
match justification {
Some(justification) => {
if enacts_change && !is_live {
let justification = GrandpaJustification::decode_and_verify(
justification,
self.authority_set.set_id(),
&self.authority_set.current_authorities(),
)?;
let result = finalize_block(
&*self.inner,
&self.authority_set,
hash,
number,
justification.into(),
);
match result {
Ok(_) => {
unreachable!("returns Ok when no authority set change should be enacted; \
verified previously that finalizing the current block enacts a change; \
qed;");
},
Err(ExitOrError::AuthoritiesChanged(new)) => {
debug!(target: "finality", "Imported justified block #{} that enacts authority set change, signalling voter.", number);
if let Err(_) = self.authority_set_change.unbounded_send(new) {
return Err(ClientErrorKind::Backend(
"imported and finalized change block but grandpa voter is no longer running".to_string()
).into());
}
},
Err(ExitOrError::Error(_)) => {
return Err(ClientErrorKind::Backend(
"imported change block but failed to finalize it, node may be in an inconsistent state".to_string()
).into());
},
}
}
},
None if enacts_change && !is_live => {
return Err(ClientErrorKind::BadJustification(
"missing justification for block that enacts authority set change".to_string()
).into());
},
_ => {}
}
Ok(import_result)
}
}
/// Using the given base get the block at the given height on this chain. The
/// target block must be an ancestor of base, therefore `height <= base.height`.
fn canonical_at_height<B, E, Block: BlockT<Hash=H256>, RA>(
client: &Client<B, E, Block, RA>,
base: (Block::Hash, NumberFor<Block>),
height: NumberFor<Block>,
) -> Result<Option<Block::Hash>, ClientError> where
B: Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync,
{
use runtime_primitives::traits::{One, Zero};
if height > base.1 {
return Ok(None);
}
if height == base.1 {
return Ok(Some(base.0));
}
let mut current = match client.header(&BlockId::Hash(base.0))? {
Some(header) => header,
_ => return Ok(None),
};
let mut steps = base.1 - height;
while steps > NumberFor::<Block>::zero() {
current = match client.header(&BlockId::Hash(*current.parent_hash()))? {
Some(header) => header,
_ => return Ok(None),
};
steps -= NumberFor::<Block>::one();
}
Ok(Some(current.hash()))
}
impl<B, E, Block: BlockT<Hash=H256>, RA, PRA> Authorities<Block> for GrandpaBlockImport<B, E, Block, RA, PRA>
where
B: Backend<Block, Blake2Hasher> + 'static,
@@ -693,18 +1073,49 @@ where
pub struct LinkHalf<B, E, Block: BlockT<Hash=H256>, RA> {
client: Arc<Client<B, E, Block, RA>>,
authority_set: SharedAuthoritySet<Block::Hash, NumberFor<Block>>,
authority_set_change: mpsc::UnboundedReceiver<NewAuthoritySet<Block::Hash, NumberFor<Block>>>,
authority_set_oracle: SharedGrandpaOracle<Block>,
}
impl<B, E, Block: BlockT<Hash=H256>, RA> Clone for LinkHalf<B, E, Block, RA>
where
B: Backend<Block, Blake2Hasher> + 'static,
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone + Send + Sync,
RA: TaggedTransactionQueue<Block>, // necessary for client to import `BlockImport`.
struct AncestryChain<Block: BlockT> {
ancestry: HashMap<Block::Hash, Block::Header>,
}
impl<Block: BlockT> AncestryChain<Block> {
fn new(ancestry: &[Block::Header]) -> AncestryChain<Block> {
let ancestry: HashMap<_, _> = ancestry
.iter()
.cloned()
.map(|h: Block::Header| (h.hash(), h))
.collect();
AncestryChain { ancestry }
}
}
impl<Block: BlockT> grandpa::Chain<Block::Hash, NumberFor<Block>> for AncestryChain<Block> where
NumberFor<Block>: grandpa::BlockNumberOps
{
fn clone(&self) -> Self {
LinkHalf {
client: self.client.clone(),
authority_set: self.authority_set.clone()
fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result<Vec<Block::Hash>, GrandpaError> {
let mut route = Vec::new();
let mut current_hash = block;
loop {
if current_hash == base { break; }
match self.ancestry.get(&current_hash) {
Some(current_header) => {
current_hash = *current_header.parent_hash();
route.push(current_hash);
},
_ => return Err(GrandpaError::NotDescendent),
}
}
route.pop(); // remove the base
Ok(route)
}
fn best_chain_containing(&self, _block: Block::Hash) -> Option<(Block::Hash, NumberFor<Block>)> {
None
}
}
@@ -746,13 +1157,24 @@ pub fn block_import<B, E, Block: BlockT<Hash=H256>, RA, PRA>(
.into(),
};
let (authority_set_change_tx, authority_set_change_rx) = mpsc::unbounded();
let authority_set_oracle = SharedGrandpaOracle::empty();
Ok((
GrandpaBlockImport {
inner: client.clone(),
authority_set: authority_set.clone(),
authority_set_change: authority_set_change_tx,
authority_set_oracle: authority_set_oracle.clone(),
api
},
LinkHalf { client, authority_set },
LinkHalf {
client,
authority_set,
authority_set_change: authority_set_change_rx,
authority_set_oracle,
},
))
}
@@ -808,7 +1230,10 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>(
config: Config,
link: LinkHalf<B, E, Block, RA>,
network: N,
) -> ::client::error::Result<impl Future<Item=(),Error=()> + Send + 'static> where
) -> ::client::error::Result<(
impl Future<Item=(),Error=()> + Send + 'static,
impl Future<Item=(),Error=()> + Send + 'static,
)> where
Block::Hash: Ord,
B: Backend<Block, Blake2Hasher> + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
@@ -821,7 +1246,20 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>(
use futures::future::{self, Loop as FutureLoop};
use runtime_primitives::traits::Zero;
let LinkHalf { client, authority_set } = link;
let LinkHalf {
client,
authority_set,
authority_set_change,
authority_set_oracle
} = link;
let oracle_work = {
let authority_set_oracle = authority_set_oracle.clone();
Interval::new(Instant::now(), Duration::from_secs(1))
.for_each(move |_| Ok(authority_set_oracle.poll()))
.map_err(|_| ())
};
let chain_info = client.info()?;
let genesis_hash = chain_info.chain.genesis_hash;
@@ -833,9 +1271,7 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>(
))?
};
let voters = authority_set.inner().read().current().1.iter()
.cloned()
.collect();
let voters = authority_set.current_authorities();
let initial_environment = Arc::new(Environment {
inner: client.clone(),
@@ -846,8 +1282,9 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>(
authority_set: authority_set.clone(),
});
let work = future::loop_fn((initial_environment, last_round_number, last_state), move |params| {
let (env, last_round_number, last_state) = params;
let initial_state = (initial_environment, last_round_number, last_state, authority_set_change.into_future());
let voter_work = future::loop_fn(initial_state, move |params| {
let (env, last_round_number, last_state, authority_set_change) = params;
debug!(target: "afg", "{}: Starting new voter with set ID {}", config.name(), env.set_id);
let chain_info = match client.info() {
@@ -867,6 +1304,14 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>(
&network,
);
let unfiltered_commits_stream = Box::new(::communication::checked_commit_stream::<Block, _>(
env.set_id,
network.commit_messages(env.set_id),
env.voters.clone(),
));
*authority_set_oracle.inner.lock() = Some(GrandpaOracle::new(unfiltered_commits_stream));
let voters = (*env.voters).clone();
let voter = voter::Voter::new(
@@ -881,30 +1326,54 @@ pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N, RA>(
let config = config.clone();
let network = network.clone();
let authority_set = authority_set.clone();
future::Either::A(voter.then(move |res| match res {
// voters don't conclude naturally; this could reasonably be an error.
Ok(()) => Ok(FutureLoop::Break(())),
Err(ExitOrError::Error(e)) => Err(e),
Err(ExitOrError::AuthoritiesChanged(new)) => {
let env = Arc::new(Environment {
inner: client,
config,
voters: Arc::new(new.authorities.into_iter().collect()),
set_id: new.set_id,
network,
authority_set,
});
// start the new authority set using the block where the
// set changed (not where the signal happened!) as the base.
Ok(FutureLoop::Continue((
env,
0, // always start at round 0 when changing sets.
RoundState::genesis((new.canon_hash, new.canon_number)),
)))
let trigger_authority_set_change = |new: NewAuthoritySet<_, _>, authority_set_change| {
let env = Arc::new(Environment {
inner: client,
config,
voters: Arc::new(new.authorities.into_iter().collect()),
set_id: new.set_id,
network,
authority_set,
});
// start the new authority set using the block where the
// set changed (not where the signal happened!) as the base.
Ok(FutureLoop::Continue((
env,
0, // always start at round 0 when changing sets.
RoundState::genesis((new.canon_hash, new.canon_number)),
authority_set_change,
)))
};
future::Either::A(voter.select2(authority_set_change).then(move |res| match res {
Ok(future::Either::A(((), _))) => {
// voters don't conclude naturally; this could reasonably be an error.
Ok(FutureLoop::Break(()))
},
Err(future::Either::B(_)) => {
// the `authority_set_change` stream should not fail.
Ok(FutureLoop::Break(()))
},
Ok(future::Either::B(((None, _), _))) => {
// the `authority_set_change` stream should never conclude since it's never closed.
Ok(FutureLoop::Break(()))
},
Err(future::Either::A((ExitOrError::Error(e), _))) => {
// return inner voter error
Err(e)
}
Ok(future::Either::B(((Some(new), authority_set_change), _))) => {
// authority set change triggered externally through the channel
trigger_authority_set_change(new, authority_set_change.into_future())
}
Err(future::Either::A((ExitOrError::AuthoritiesChanged(new), authority_set_change))) => {
// authority set change triggered internally by finalizing a change block
trigger_authority_set_change(new, authority_set_change)
},
}))
});
}).map_err(|e| warn!("GRANDPA Voter failed: {:?}", e));
Ok(work.map_err(|e| warn!("GRANDPA Voter failed: {:?}", e)))
Ok((voter_work, oracle_work))
}
+90 -46
View File
@@ -364,7 +364,7 @@ fn finalize_3_voters_no_observers() {
);
fn assert_send<T: Send>(_: &T) { }
let voter = run_grandpa(
let (voter, oracle) = run_grandpa(
Config {
gossip_duration: TEST_GOSSIP_DURATION,
local_key: Some(Arc::new(key.clone().into())),
@@ -376,6 +376,7 @@ fn finalize_3_voters_no_observers() {
assert_send(&voter);
runtime.spawn(oracle);
runtime.spawn(voter);
}
@@ -424,7 +425,7 @@ fn finalize_3_voters_1_observer() {
.take_while(|n| Ok(n.header.number() < &20))
.for_each(move |_| Ok(()))
);
let voter = run_grandpa(
let (voter, oracle) = run_grandpa(
Config {
gossip_duration: TEST_GOSSIP_DURATION,
local_key,
@@ -434,6 +435,7 @@ fn finalize_3_voters_1_observer() {
MessageRouting::new(net.clone(), peer_id),
).expect("all in order with client and network");
runtime.spawn(oracle);
runtime.spawn(voter);
}
@@ -476,59 +478,96 @@ fn transition_3_voters_twice_1_observer() {
let api = TestApi::new(genesis_voters);
let transitions = api.scheduled_changes.clone();
let add_transition = move |parent_hash, change| {
transitions.lock().insert(parent_hash, change);
};
let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8)));
let mut net = GrandpaTestNet::new(api, 9);
let mut runtime = tokio::runtime::Runtime::new().unwrap();
// first 20 blocks: transition at 15, applied at 20.
{
net.peer(0).push_blocks(14, false);
net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| {
let block = builder.bake().unwrap();
add_transition(*block.header.parent_hash(), ScheduledChange {
next_authorities: make_ids(peers_b),
delay: 4,
});
net.lock().peer(0).push_blocks(1, false);
net.lock().sync();
block
});
net.peer(0).push_blocks(5, false);
}
// at block 21 we do another transition, but this time instant.
// add more until we have 30.
{
net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| {
let block = builder.bake().unwrap();
add_transition(*block.header.parent_hash(), ScheduledChange {
next_authorities: make_ids(peers_c),
delay: 0,
});
block
});
net.peer(0).push_blocks(9, false);
}
net.sync();
for (i, peer) in net.peers().iter().enumerate() {
assert_eq!(peer.client().info().unwrap().chain.best_number, 30,
"Peer #{} failed to sync", i);
for (i, peer) in net.lock().peers().iter().enumerate() {
assert_eq!(peer.client().info().unwrap().chain.best_number, 1,
"Peer #{} failed to sync", i);
let set_raw = peer.client().backend().get_aux(::AUTHORITY_SET_KEY).unwrap().unwrap();
let set = AuthoritySet::<Hash, BlockNumber>::decode(&mut &set_raw[..]).unwrap();
assert_eq!(set.current(), (0, make_ids(peers_a).as_slice()));
assert_eq!(set.pending_changes().len(), 2);
assert_eq!(set.pending_changes().len(), 0);
}
let net = Arc::new(Mutex::new(net));
let mut finality_notifications = Vec::new();
{
let net = net.clone();
let client = net.lock().peers[0].client().clone();
let transitions = transitions.clone();
let add_transition = move |parent_hash, change| {
transitions.lock().insert(parent_hash, change);
};
let peers_c = peers_c.clone();
let executor = runtime.executor().clone();
let mut runtime = current_thread::Runtime::new().unwrap();
// wait for blocks to be finalized before generating new ones
let block_production = client.finality_notification_stream()
.take_while(|n| Ok(n.header.number() < &30))
.for_each(move |n| {
match n.header.number() {
1 => {
// first 14 blocks.
net.lock().peer(0).push_blocks(13, false);
},
14 => {
// generate transition at block 15, applied at 20.
net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| {
let block = builder.bake().unwrap();
add_transition(*block.header.parent_hash(), ScheduledChange {
next_authorities: make_ids(peers_b),
delay: 4,
});
block
});
net.lock().peer(0).push_blocks(5, false);
},
20 => {
let net = net.clone();
let add_transition = add_transition.clone();
// at block 21 we do another transition, but this time instant.
// add more until we have 30.
let generate_blocks = move || {
net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| {
let block = builder.bake().unwrap();
add_transition(*block.header.parent_hash(), ScheduledChange {
next_authorities: make_ids(&peers_c),
delay: 0,
});
block
});
net.lock().peer(0).push_blocks(9, false);
};
// delay block generation for a bit for the liveness tracker to be
// able to update due to the authority set change
let delay_generate = Delay::new(Instant::now() + Duration::from_millis(5000))
.and_then(move |_| {
generate_blocks();
Ok(())
})
.map_err(|_| ());
executor.spawn(delay_generate);
},
_ => {},
}
Ok(())
});
runtime.spawn(block_production);
}
let mut finality_notifications = Vec::new();
let all_peers = peers_a.iter()
.chain(peers_b)
.chain(peers_c)
@@ -560,7 +599,7 @@ fn transition_3_voters_twice_1_observer() {
assert!(set.pending_changes().is_empty());
})
);
let voter = run_grandpa(
let (voter, oracle) = run_grandpa(
Config {
gossip_duration: TEST_GOSSIP_DURATION,
local_key,
@@ -570,6 +609,7 @@ fn transition_3_voters_twice_1_observer() {
MessageRouting::new(net.clone(), peer_id),
).expect("all in order with client and network");
runtime.spawn(oracle);
runtime.spawn(voter);
}
@@ -579,7 +619,11 @@ fn transition_3_voters_twice_1_observer() {
.map_err(|_| ());
let drive_to_completion = ::tokio::timer::Interval::new_interval(TEST_ROUTING_INTERVAL)
.for_each(move |_| { net.lock().route_until_complete(); Ok(()) })
.for_each(move |_| {
net.lock().send_import_notifications();
net.lock().sync();
Ok(())
})
.map(|_| ())
.map_err(|_| ());
+34 -9
View File
@@ -46,7 +46,7 @@ struct MessageEntry<B: BlockT> {
/// Consensus network protocol handler. Manages statements and candidate requests.
pub struct ConsensusGossip<B: BlockT> {
peers: HashMap<NodeIndex, PeerConsensus<(B::Hash, B::Hash)>>,
live_message_sinks: HashMap<B::Hash, mpsc::UnboundedSender<ConsensusMessage>>,
live_message_sinks: HashMap<B::Hash, Vec<mpsc::UnboundedSender<ConsensusMessage>>>,
messages: Vec<MessageEntry<B>>,
known_messages: HashSet<(B::Hash, B::Hash)>,
session_start: Option<B::Hash>,
@@ -150,7 +150,10 @@ impl<B: BlockT> ConsensusGossip<B> {
/// Prune old or no longer relevant consensus messages. Provide a predicate
/// for pruning, which returns `false` when the items with a given topic should be pruned.
pub fn collect_garbage<P: Fn(&B::Hash) -> bool>(&mut self, predicate: P) {
self.live_message_sinks.retain(|_, sink| !sink.is_closed());
self.live_message_sinks.retain(|_, sinks| {
sinks.retain(|sink| !sink.is_closed());
!sinks.is_empty()
});
let hashes = &mut self.known_messages;
let before = self.messages.len();
@@ -175,7 +178,7 @@ impl<B: BlockT> ConsensusGossip<B> {
for entry in self.messages.iter().filter(|e| e.topic == topic) {
tx.unbounded_send(entry.message.clone()).expect("receiver known to be live; qed");
}
self.live_message_sinks.insert(topic, tx);
self.live_message_sinks.entry(topic).or_default().push(tx);
rx
}
@@ -217,12 +220,14 @@ impl<B: BlockT> ConsensusGossip<B> {
use std::collections::hash_map::Entry;
peer.known_messages.insert((topic, message_hash));
if let Entry::Occupied(mut entry) = self.live_message_sinks.entry(topic) {
debug!(target: "gossip", "Pushing consensus message to sink for {}.", topic);
if let Err(e) = entry.get().unbounded_send(message.clone()) {
trace!(target:"gossip", "Error broadcasting message notification: {:?}", e);
}
if entry.get().is_closed() {
debug!(target: "gossip", "Pushing consensus message to sinks for {}.", topic);
entry.get_mut().retain(|sink| {
if let Err(e) = sink.unbounded_send(message.clone()) {
trace!(target:"gossip", "Error broadcasting message notification: {:?}", e);
}
!sink.is_closed()
});
if entry.get().is_empty() {
entry.remove_entry();
}
}
@@ -345,4 +350,24 @@ mod tests {
assert_eq!(consensus.messages.len(), 2);
}
#[test]
fn can_keep_multiple_subscribers_per_topic() {
use futures::Stream;
let mut consensus = ConsensusGossip::<Block>::new();
let message = vec![1, 2, 3];
let message_hash = HashFor::<Block>::hash(&message);
let topic = HashFor::<Block>::hash(&[1,2,3]);
consensus.register_message(message_hash, topic, || message.clone());
let stream1 = consensus.messages_for(topic);
let stream2 = consensus.messages_for(topic);
assert_eq!(stream1.wait().next(), Some(Ok(message.clone())));
assert_eq!(stream2.wait().next(), Some(Ok(message)));
}
}
+6 -36
View File
@@ -31,6 +31,7 @@ use parking_lot::{Condvar, Mutex, RwLock};
use network_libp2p::{NodeIndex, Severity};
use primitives::AuthorityId;
use runtime_primitives::Justification;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero};
pub use blocks::BlockData;
@@ -57,7 +58,7 @@ pub trait Verifier<B: BlockT>: Send + Sync + Sized {
&self,
origin: BlockOrigin,
header: B::Header,
justification: Vec<u8>,
justification: Option<Justification>,
body: Option<Vec<B::Extrinsic>>
) -> Result<(ImportBlock<B>, Option<Vec<AuthorityId>>), String>;
}
@@ -339,8 +340,6 @@ enum BlockImportResult<H: ::std::fmt::Debug + PartialEq, N: ::std::fmt::Debug +
enum BlockImportError {
/// Block missed header, can't be imported
IncompleteHeader(Option<NodeIndex>),
/// Block missed justification, can't be imported
IncompleteJustification(Option<NodeIndex>),
/// Block verification failed, can't be imported
VerificationFailed(Option<NodeIndex>, String),
/// Block is known to be Bad
@@ -411,7 +410,7 @@ fn import_single_block<B: BlockT, V: Verifier<B>>(
let block = block.block;
let (header, justification) = match (block.header, block.justification) {
(Some(header), Some(justification)) => (header, justification),
(Some(header), justification) => (header, justification),
(None, _) => {
if let Some(peer) = peer {
debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer);
@@ -420,14 +419,6 @@ fn import_single_block<B: BlockT, V: Verifier<B>>(
}
return Err(BlockImportError::IncompleteHeader(peer)) //TODO: use persistent ID
},
(_, None) => {
if let Some(peer) = peer {
debug!(target: "sync", "Justification set for block {} was not provided by {} ", block.hash, peer);
} else {
debug!(target: "sync", "Justification set for block {} was not provided", block.hash);
}
return Err(BlockImportError::IncompleteJustification(peer)) //TODO: use persistent ID
}
};
let number = header.number().clone();
@@ -486,12 +477,6 @@ fn process_import_result<B: BlockT>(
link.block_imported(&hash, number);
1
},
Err(BlockImportError::IncompleteJustification(who)) => {
if let Some(peer) = who {
link.useless_peer(peer, "Sent block with incomplete justification to import");
}
0
},
Err(BlockImportError::IncompleteHeader(who)) => {
if let Some(peer) = who {
link.useless_peer(peer, "Sent block with incomplete header to import");
@@ -555,7 +540,7 @@ impl<B: BlockT> Verifier<B> for PassThroughVerifier {
&self,
origin: BlockOrigin,
header: B::Header,
justification: Vec<u8>,
justification: Option<Justification>,
body: Option<Vec<B::Extrinsic>>
) -> Result<(ImportBlock<B>, Option<Vec<AuthorityId>>), String> {
Ok((ImportBlock {
@@ -563,7 +548,7 @@ impl<B: BlockT> Verifier<B> for PassThroughVerifier {
header,
body,
finalized: self.0,
justification: justification,
justification,
post_digests: vec![],
auxiliary: Vec::new(),
}, None))
@@ -702,7 +687,7 @@ pub mod tests {
fn prepare_good_block() -> (client::Client<test_client::Backend, test_client::Executor, Block, test_client::runtime::RuntimeApi>, Hash, u64, BlockData<Block>) {
let client = test_client::new();
let block = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::File, block).unwrap();
client.import(BlockOrigin::File, block).unwrap();
let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1);
let block = message::BlockData::<Block> {
@@ -745,16 +730,6 @@ pub mod tests {
);
}
#[test]
fn import_single_good_block_without_justification_fails() {
let (_, _, _, mut block) = prepare_good_block();
block.block.justification = None;
assert_eq!(
import_single_block(&test_client::new(), BlockOrigin::File, block, Arc::new(PassThroughVerifier(true))),
Err(BlockImportError::IncompleteJustification(Some(0)))
);
}
#[test]
fn process_import_result_works() {
let link = TestLink::new();
@@ -776,11 +751,6 @@ pub mod tests {
assert_eq!(link.total(), 1);
assert_eq!(link.disconnects.get(), 1);
let link = TestLink::new();
assert_eq!(process_import_result::<Block>(&link, Err(BlockImportError::IncompleteJustification(Some(0)))), 0);
assert_eq!(link.total(), 1);
assert_eq!(link.disconnects.get(), 1);
let link = TestLink::new();
assert_eq!(process_import_result::<Block>(&link, Err(BlockImportError::UnknownParent)), 0);
assert_eq!(link.total(), 1);
+1 -1
View File
@@ -255,7 +255,7 @@ impl<V: 'static + Verifier<Block>, D> Peer<V, D> {
body: Some(block.extrinsics),
receipt: None,
message_queue: None,
justification: Some(Vec::new()),
justification: None,
},
}]);
}
+9 -10
View File
@@ -70,13 +70,12 @@ fn should_return_a_block() {
let block = api.client.new_block().unwrap().bake().unwrap();
let block_hash = block.hash();
api.client.justify_and_import(BlockOrigin::Own, block).unwrap();
api.client.import(BlockOrigin::Own, block).unwrap();
// Genesis block is not justified, so we can't query it?
// Genesis block is not justified
assert_matches!(
api.block(Some(api.client.genesis_hash()).into()),
Ok(None)
Ok(Some(SignedBlock { justification: None, .. }))
);
assert_matches!(
@@ -140,7 +139,7 @@ fn should_return_block_hash() {
);
let block = client.client.new_block().unwrap().bake().unwrap();
client.client.justify_and_import(BlockOrigin::Own, block.clone()).unwrap();
client.client.import(BlockOrigin::Own, block.clone()).unwrap();
assert_matches!(
client.block_hash(Some(0u64).into()),
@@ -170,7 +169,7 @@ fn should_return_finalised_hash() {
// import new block
let builder = client.client.new_block().unwrap();
client.client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
client.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
// no finalisation yet
assert_matches!(
client.finalised_head(),
@@ -178,7 +177,7 @@ fn should_return_finalised_hash() {
);
// finalise
client.client.finalize_block(BlockId::number(1), true).unwrap();
client.client.finalize_block(BlockId::number(1), None, true).unwrap();
assert_matches!(
client.finalised_head(),
Ok(ref x) if x == &client.client.block_hash(1).unwrap().unwrap()
@@ -203,7 +202,7 @@ fn should_notify_about_latest_block() {
assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1))));
let builder = api.client.new_block().unwrap();
api.client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
}
// assert initial head sent.
@@ -234,8 +233,8 @@ fn should_notify_about_finalised_block() {
assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1))));
let builder = api.client.new_block().unwrap();
api.client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
api.client.finalize_block(BlockId::number(1), true).unwrap();
api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
api.client.finalize_block(BlockId::number(1), None, true).unwrap();
}
// assert initial head sent.
+3 -3
View File
@@ -69,7 +69,7 @@ fn should_notify_about_storage_changes() {
amount: 42,
nonce: 0,
}).unwrap();
api.client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
}
// assert notification sent to transport
@@ -102,7 +102,7 @@ fn should_send_initial_storage_changes_and_notifications() {
amount: 42,
nonce: 0,
}).unwrap();
api.client.justify_and_import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
}
// assert initial values sent to transport
@@ -131,7 +131,7 @@ fn should_query_storage() {
}).unwrap();
let block = builder.bake().unwrap();
let hash = block.header.hash();
client.justify_and_import(BlockOrigin::Own, block).unwrap();
client.import(BlockOrigin::Own, block).unwrap();
hash
};
let block1_hash = add_block(0);
+1 -1
View File
@@ -116,7 +116,7 @@ pub fn import_blocks<F, E, R>(mut config: FactoryFullConfiguration<F>, exit: E,
let hash = header.hash();
let block = message::BlockData::<F::Block> {
hash: hash,
justification: Some(signed.justification),
justification: signed.justification,
header: Some(header),
body: Some(extrinsics),
receipt: None,
@@ -116,7 +116,7 @@ pub struct SignedBlock<Block> {
/// Full block.
pub block: Block,
/// Block justification.
pub justification: Justification,
pub justification: Option<Justification>,
}
// TODO: Remove Deserialize for SignedBlock once RPC no longer needs it #1098
+28 -7
View File
@@ -18,18 +18,23 @@
use client::{self, Client};
use consensus::{ImportBlock, BlockImport, BlockOrigin};
use runtime_primitives::Justification;
use runtime_primitives::generic::BlockId;
use primitives::Blake2Hasher;
use runtime;
/// Extension trait for a test client.
pub trait TestClient: Sized {
/// Justify and import block to the chain. No finality.
fn justify_and_import(&self, origin: BlockOrigin, block: runtime::Block)
/// Import block to the chain. No finality.
fn import(&self, origin: BlockOrigin, block: runtime::Block)
-> client::error::Result<()>;
/// Import block with justification, finalizes block.
fn import_justified(&self, origin: BlockOrigin, block: runtime::Block, justification: Justification)
-> client::error::Result<()>;
/// Finalize a block.
fn finalize_block(&self, id: BlockId<runtime::Block>) -> client::error::Result<()>;
fn finalize_block(&self, id: BlockId<runtime::Block>, justification: Option<Justification>) -> client::error::Result<()>;
/// Returns hash of the genesis block.
fn genesis_hash(&self) -> runtime::Hash;
@@ -41,13 +46,13 @@ impl<B, E, RA> TestClient for Client<B, E, runtime::Block, RA>
E: client::CallExecutor<runtime::Block, Blake2Hasher>,
Self: BlockImport<runtime::Block, Error=client::error::Error>,
{
fn justify_and_import(&self, origin: BlockOrigin, block: runtime::Block)
fn import(&self, origin: BlockOrigin, block: runtime::Block)
-> client::error::Result<()>
{
let import = ImportBlock {
origin,
header: block.header,
justification: vec![],
justification: None,
post_digests: vec![],
body: Some(block.extrinsics),
finalized: false,
@@ -57,8 +62,24 @@ impl<B, E, RA> TestClient for Client<B, E, runtime::Block, RA>
self.import_block(import, None).map(|_| ())
}
fn finalize_block(&self, id: BlockId<runtime::Block>) -> client::error::Result<()> {
self.finalize_block(id, true)
fn import_justified(&self, origin: BlockOrigin, block: runtime::Block, justification: Justification)
-> client::error::Result<()>
{
let import = ImportBlock {
origin,
header: block.header,
justification: Some(justification),
post_digests: vec![],
body: Some(block.extrinsics),
finalized: true,
auxiliary: Vec::new(),
};
self.import_block(import, None).map(|_| ())
}
fn finalize_block(&self, id: BlockId<runtime::Block>, justification: Option<Justification>) -> client::error::Result<()> {
self.finalize_block(id, justification, true)
}
fn genesis_hash(&self) -> runtime::Hash {
+20 -20
View File
@@ -51,35 +51,35 @@ pub fn test_leaves_for_backend<B>(backend: Arc<B>) where
// G -> A1
let a1 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
client.import(BlockOrigin::Own, a1.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a1.hash()]);
// A1 -> A2
let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
client.import(BlockOrigin::Own, a2.clone()).unwrap();
assert_eq!(
client.backend().blockchain().leaves().unwrap(),
vec![a2.hash()]);
// A2 -> A3
let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a3.clone()).unwrap();
client.import(BlockOrigin::Own, a3.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a3.hash()]);
// A3 -> A4
let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a4.clone()).unwrap();
client.import(BlockOrigin::Own, a4.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a4.hash()]);
// A4 -> A5
let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a5.clone()).unwrap();
client.import(BlockOrigin::Own, a5.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash()]);
@@ -94,21 +94,21 @@ pub fn test_leaves_for_backend<B>(backend: Arc<B>) where
nonce: 0,
}).unwrap();
let b2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b2.clone()).unwrap();
client.import(BlockOrigin::Own, b2.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b2.hash()]);
// B2 -> B3
let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b3.clone()).unwrap();
client.import(BlockOrigin::Own, b3.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b3.hash()]);
// B3 -> B4
let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b4.clone()).unwrap();
client.import(BlockOrigin::Own, b4.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b4.hash()]);
@@ -123,7 +123,7 @@ pub fn test_leaves_for_backend<B>(backend: Arc<B>) where
nonce: 1,
}).unwrap();
let c3 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, c3.clone()).unwrap();
client.import(BlockOrigin::Own, c3.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b4.hash(), c3.hash()]);
@@ -138,7 +138,7 @@ pub fn test_leaves_for_backend<B>(backend: Arc<B>) where
nonce: 0,
}).unwrap();
let d2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, d2.clone()).unwrap();
client.import(BlockOrigin::Own, d2.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()]);
@@ -157,23 +157,23 @@ pub fn test_blockchain_query_by_number_gets_canonical<B>(backend: Arc<B>) where
// G -> A1
let a1 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
client.import(BlockOrigin::Own, a1.clone()).unwrap();
// A1 -> A2
let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
client.import(BlockOrigin::Own, a2.clone()).unwrap();
// A2 -> A3
let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a3.clone()).unwrap();
client.import(BlockOrigin::Own, a3.clone()).unwrap();
// A3 -> A4
let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a4.clone()).unwrap();
client.import(BlockOrigin::Own, a4.clone()).unwrap();
// A4 -> A5
let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a5.clone()).unwrap();
client.import(BlockOrigin::Own, a5.clone()).unwrap();
// A1 -> B2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
@@ -185,15 +185,15 @@ pub fn test_blockchain_query_by_number_gets_canonical<B>(backend: Arc<B>) where
nonce: 0,
}).unwrap();
let b2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b2.clone()).unwrap();
client.import(BlockOrigin::Own, b2.clone()).unwrap();
// B2 -> B3
let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b3.clone()).unwrap();
client.import(BlockOrigin::Own, b3.clone()).unwrap();
// B3 -> B4
let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b4.clone()).unwrap();
client.import(BlockOrigin::Own, b4.clone()).unwrap();
// // B2 -> C3
let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap();
@@ -205,7 +205,7 @@ pub fn test_blockchain_query_by_number_gets_canonical<B>(backend: Arc<B>) where
nonce: 1,
}).unwrap();
let c3 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, c3.clone()).unwrap();
client.import(BlockOrigin::Own, c3.clone()).unwrap();
// A1 -> D2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
@@ -217,7 +217,7 @@ pub fn test_blockchain_query_by_number_gets_canonical<B>(backend: Arc<B>) where
nonce: 0,
}).unwrap();
let d2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, d2.clone()).unwrap();
client.import(BlockOrigin::Own, d2.clone()).unwrap();
let genesis_hash = client.info().unwrap().chain.genesis_hash;
+6 -6
View File
@@ -326,10 +326,10 @@ dependencies = [
[[package]]
name = "kvdb"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
source = "git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6#616b40150ded71f57f650067fcbc5c99d7c343e6"
dependencies = [
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)",
]
[[package]]
@@ -543,7 +543,7 @@ dependencies = [
[[package]]
name = "parity-bytes"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
source = "git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6#616b40150ded71f57f650067fcbc5c99d7c343e6"
[[package]]
name = "parity-codec"
@@ -940,7 +940,7 @@ dependencies = [
"hash-db 0.9.0 (git+https://github.com/paritytech/trie)",
"heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1493,7 +1493,7 @@ dependencies = [
"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
"checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum kvdb 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "72ae89206cea31c32014b39d5a454b96135894221610dbfd19cf4d2d044fa546"
"checksum kvdb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)" = "<none>"
"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73"
"checksum lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7"
"checksum lazycell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ddba4c30a78328befecec92fc94970e53b3ae385827d28620f0f5bb2493081e0"
@@ -1519,7 +1519,7 @@ dependencies = [
"checksum openssl 0.10.15 (registry+https://github.com/rust-lang/crates.io-index)" = "5e1309181cdcbdb51bc3b6bedb33dfac2a83b3d585033d3f6d9e22e8c1928613"
"checksum openssl-sys 0.9.39 (registry+https://github.com/rust-lang/crates.io-index)" = "278c1ad40a89aa1e741a1eed089a2f60b18fab8089c3139b542140fc7d674106"
"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
"checksum parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fa5168b4cf41f3835e4bc6ffb32f51bc9365dc50cb351904595b3931d917fd0c"
"checksum parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)" = "<none>"
"checksum parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "dca389ea5e1632c89b2ce54f7e2b4a8a8c9d278042222a91e0bf95451218cb4c"
"checksum parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ffa42c2cb493b60b12c75b26e8c94cb734af4df4d7f2cc229dc04c1953dac189"
"checksum parity-wasm 0.31.3 (registry+https://github.com/rust-lang/crates.io-index)" = "511379a8194230c2395d2f5fa627a5a7e108a9f976656ce723ae68fca4097bfc"
+4 -3
View File
@@ -87,17 +87,18 @@ construct_service_factory! {
if service.config.custom.grandpa_authority {
info!("Running Grandpa session as Authority {}", key.public());
let grandpa_fut = grandpa::run_grandpa(
let (voter, oracle) = grandpa::run_grandpa(
grandpa::Config {
gossip_duration: Duration::new(4, 0), // FIXME: make this available through chainspec?
local_key: Some(key.clone()),
name: Some(service.config.name.clone())
},
link_half,
grandpa::NetworkBridge::new(service.network())
grandpa::NetworkBridge::new(service.network()),
)?;
executor.spawn(grandpa_fut);
executor.spawn(oracle);
executor.spawn(voter);
}
if !service.config.custom.grandpa_authority_only {
info!("Using authority key {}", key.public());
+6 -7
View File
@@ -326,10 +326,10 @@ dependencies = [
[[package]]
name = "kvdb"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
source = "git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6#616b40150ded71f57f650067fcbc5c99d7c343e6"
dependencies = [
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)",
]
[[package]]
@@ -586,7 +586,7 @@ dependencies = [
[[package]]
name = "parity-bytes"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
source = "git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6#616b40150ded71f57f650067fcbc5c99d7c343e6"
[[package]]
name = "parity-codec"
@@ -1179,7 +1179,6 @@ version = "0.1.0"
dependencies = [
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-io 0.1.0",
"sr-primitives 0.1.0",
@@ -1244,7 +1243,7 @@ dependencies = [
"hash-db 0.9.0 (git+https://github.com/paritytech/trie)",
"heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1792,7 +1791,7 @@ dependencies = [
"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
"checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum kvdb 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "72ae89206cea31c32014b39d5a454b96135894221610dbfd19cf4d2d044fa546"
"checksum kvdb 0.1.0 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)" = "<none>"
"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73"
"checksum lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7"
"checksum lazycell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ddba4c30a78328befecec92fc94970e53b3ae385827d28620f0f5bb2493081e0"
@@ -1818,7 +1817,7 @@ dependencies = [
"checksum openssl 0.10.15 (registry+https://github.com/rust-lang/crates.io-index)" = "5e1309181cdcbdb51bc3b6bedb33dfac2a83b3d585033d3f6d9e22e8c1928613"
"checksum openssl-sys 0.9.39 (registry+https://github.com/rust-lang/crates.io-index)" = "278c1ad40a89aa1e741a1eed089a2f60b18fab8089c3139b542140fc7d674106"
"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37"
"checksum parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fa5168b4cf41f3835e4bc6ffb32f51bc9365dc50cb351904595b3931d917fd0c"
"checksum parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common?rev=616b40150ded71f57f650067fcbc5c99d7c343e6)" = "<none>"
"checksum parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "dca389ea5e1632c89b2ce54f7e2b4a8a8c9d278042222a91e0bf95451218cb4c"
"checksum parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ffa42c2cb493b60b12c75b26e8c94cb734af4df4d7f2cc229dc04c1953dac189"
"checksum parity-wasm 0.31.3 (registry+https://github.com/rust-lang/crates.io-index)" = "511379a8194230c2395d2f5fa627a5a7e108a9f976656ce723ae68fca4097bfc"