Generalize the Consensus Infrastructure (#883)

* Split out Consensus
* Supply ImportQueue through network-service
  - simplify ImportQueue.import_blocks
  - remove Deadlock on import_block
  - Adding Verifier-Trait
  - Implement import_queue provisioning in service; allow cli to import
* Allow to actually customize import queue
* Consensus Gossip: Cache Message hash per Topic
This commit is contained in:
Benjamin Kampmann
2018-10-16 13:40:33 +02:00
committed by GitHub
parent a24e61cb29
commit ac4bcf879f
61 changed files with 1937 additions and 3306 deletions
+830 -798
View File
File diff suppressed because it is too large Load Diff
+2 -2
View File
@@ -19,14 +19,14 @@ vergen = "2"
[workspace]
members = [
"core/bft",
"core/cli",
"core/client",
"core/client/db",
"core/consensus/common",
"core/consensus/rhd",
"core/executor",
"core/finality-grandpa",
"core/keyring",
"core/misbehavior-check",
"core/network",
"core/primitives",
"core/rpc",
-20
View File
@@ -1,20 +0,0 @@
[package]
name = "substrate-bft"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
futures = "0.1.17"
parity-codec = "2.0"
substrate-primitives = { path = "../primitives" }
sr-primitives = { path = "../sr-primitives" }
sr-version = { path = "../sr-version" }
tokio = "0.1.7"
parking_lot = "0.4"
error-chain = "0.12"
log = "0.4"
rhododendron = "0.3"
[dev-dependencies]
substrate-keyring = { path = "../keyring" }
substrate-executor = { path = "../executor" }
-13
View File
@@ -1,13 +0,0 @@
= Substrate BFT
.Summary
[source, toml]
----
include::Cargo.toml[lines=2..5]
----
.Description
----
include::src/lib.rs[tag=description]
----
+1 -1
View File
@@ -12,7 +12,7 @@ hex-literal = "0.1"
futures = "0.1.17"
slog = "^2"
heapsize = "0.4"
substrate-bft = { path = "../bft" }
substrate-consensus-rhd = { path = "../consensus/rhd" }
parity-codec = "2.0"
substrate-executor = { path = "../executor" }
substrate-primitives = { path = "../primitives" }
+4 -5
View File
@@ -68,8 +68,7 @@ use kvdb::{KeyValueDB, DBTransaction};
use trie::MemoryDB;
use parking_lot::RwLock;
use primitives::{H256, AuthorityId, Blake2Hasher};
use runtime_primitives::generic::BlockId;
use runtime_primitives::bft::Justification;
use runtime_primitives::{generic::BlockId, Justification};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero, Digest, DigestItem};
use runtime_primitives::BuildStorage;
use state_machine::backend::Backend as StateBackend;
@@ -127,7 +126,7 @@ mod columns {
struct PendingBlock<Block: BlockT> {
header: Block::Header,
justification: Option<Justification<Block::Hash>>,
justification: Option<Justification>,
body: Option<Vec<Block::Extrinsic>>,
leaf_state: NewBlockState,
}
@@ -241,7 +240,7 @@ impl<Block: BlockT> client::blockchain::Backend<Block> for BlockchainDb<Block> {
}
}
fn justification(&self, id: BlockId<Block>) -> Result<Option<Justification<Block::Hash>>, client::error::Error> {
fn justification(&self, id: BlockId<Block>) -> Result<Option<Justification>, client::error::Error> {
match read_db(&*self.db, columns::HASH_LOOKUP, columns::JUSTIFICATION, id)? {
Some(justification) => match Decode::decode(&mut &justification[..]) {
Some(justification) => Ok(Some(justification)),
@@ -286,7 +285,7 @@ where Block: BlockT,
&mut self,
header: Block::Header,
body: Option<Vec<Block::Extrinsic>>,
justification: Option<Justification<Block::Hash>>,
justification: Option<Justification>,
leaf_state: NewBlockState,
) -> Result<(), client::error::Error> {
assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
+2 -3
View File
@@ -18,8 +18,7 @@
use error;
use primitives::AuthorityId;
use runtime_primitives::bft::Justification;
use runtime_primitives::generic::BlockId;
use runtime_primitives::{generic::BlockId, Justification};
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use state_machine::backend::Backend as StateBackend;
use state_machine::ChangesTrieStorage as StateChangesTrieStorage;
@@ -64,7 +63,7 @@ where
&mut self,
header: Block::Header,
body: Option<Vec<Block::Extrinsic>>,
justification: Option<Justification<Block::Hash>>,
justification: Option<Justification>,
state: NewBlockState,
) -> error::Result<()>;
+2 -3
View File
@@ -19,7 +19,7 @@
use primitives::AuthorityId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor};
use runtime_primitives::generic::BlockId;
use runtime_primitives::bft::Justification;
use runtime_primitives::Justification;
use error::{ErrorKind, Result};
@@ -47,10 +47,9 @@ pub trait Backend<Block: BlockT>: HeaderBackend<Block> {
/// Get block body. Returns `None` if block is not found.
fn body(&self, id: BlockId<Block>) -> Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get block justification. Returns `None` if justification does not exist.
fn justification(&self, id: BlockId<Block>) -> Result<Option<Justification<Block::Hash>>>;
fn justification(&self, id: BlockId<Block>) -> Result<Option<Justification>>;
/// Get last finalized block hash.
fn last_finalized(&self) -> Result<Block::Hash>;
/// Returns data cache reference, if it is enabled on this backend.
fn cache(&self) -> Option<&Cache<Block>>;
+76 -105
View File
@@ -22,7 +22,7 @@ use futures::sync::mpsc;
use parking_lot::{Mutex, RwLock};
use primitives::AuthorityId;
use runtime_primitives::{
bft::Justification,
Justification,
generic::{BlockId, SignedBlock, Block as RuntimeBlock},
transaction_validity::{TransactionValidity, TransactionTag},
};
@@ -44,7 +44,7 @@ use blockchain::{self, Info as ChainInfo, Backend as ChainBackend, HeaderBackend
use call_executor::{CallExecutor, LocalCallExecutor};
use executor::{RuntimeVersion, RuntimeInfo};
use notifications::{StorageNotifications, StorageEventStream};
use {cht, error, in_mem, block_builder, bft, genesis};
use {cht, error, in_mem, block_builder, genesis};
/// Type that implements `futures::Stream` of block import events.
pub type ImportNotifications<Block> = mpsc::UnboundedReceiver<BlockImportNotification<Block>>;
@@ -151,6 +151,53 @@ pub enum BlockOrigin {
File,
}
/// Data required to import a Block
pub struct ImportBlock<Block: BlockT> {
/// Origin of the Block
pub origin: BlockOrigin,
/// Header
pub header: Block::Header,
/// Justification provided for this block from the outside
pub external_justification: Justification,
/// Internal Justification for the block
pub internal_justification: Vec<u8>, // Block::Digest::DigestItem?
/// Block's body
pub body: Option<Vec<Block::Extrinsic>>,
/// Is this block finalized already?
/// `true` implies instant finality.
pub finalized: bool,
/// Auxiliary consensus data produced by the block.
/// Contains a list of key-value pairs. If values are `None`, the keys
/// will be deleted.
pub auxiliary: Vec<(Vec<u8>, Option<Vec<u8>>)>,
}
impl<Block: BlockT> ImportBlock<Block> {
/// Deconstruct the justified header into parts.
pub fn into_inner(self)
-> (
BlockOrigin,
<Block as BlockT>::Header,
Justification,
Justification,
Option<Vec<<Block as BlockT>::Extrinsic>>,
bool,
Vec<(Vec<u8>, Option<Vec<u8>>)>,
) {
(
self.origin,
self.header,
self.external_justification,
self.internal_justification,
self.body,
self.finalized,
self.auxiliary,
)
}
}
/// Summary of an imported block
#[derive(Clone, Debug)]
pub struct BlockImportNotification<Block: BlockT> {
@@ -175,21 +222,6 @@ pub struct FinalityNotification<Block: BlockT> {
pub header: Block::Header,
}
/// A header paired with a justification which has already been checked.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct JustifiedHeader<Block: BlockT> {
header: <Block as BlockT>::Header,
justification: ::bft::Justification<Block::Hash>,
authorities: Vec<AuthorityId>,
}
impl<Block: BlockT> JustifiedHeader<Block> {
/// Deconstruct the justified header into parts.
pub fn into_inner(self) -> (<Block as BlockT>::Header, ::bft::Justification<Block::Hash>, Vec<AuthorityId>) {
(self.header, self.justification, self.authorities)
}
}
/// Create an instance of in-memory client.
pub fn new_in_mem<E, Block, S>(
executor: E,
@@ -486,37 +518,24 @@ impl<B, E, Block> Client<B, E, Block> where
)
}
/// Check a header's justification.
pub fn check_justification(
&self,
header: <Block as BlockT>::Header,
justification: ::bft::UncheckedJustification<Block::Hash>,
) -> error::Result<JustifiedHeader<Block>> {
let parent_hash = header.parent_hash().clone();
let authorities = self.authorities_at(&BlockId::Hash(parent_hash))?;
let just = ::bft::check_justification::<Block>(&authorities[..], parent_hash, justification)
.map_err(|_|
error::ErrorKind::BadJustification(
format!("{}", header.hash())
)
)?;
Ok(JustifiedHeader {
header,
justification: just,
authorities,
})
}
/// Queue a block for import.
/// Import a checked and validated block
pub fn import_block(
&self,
origin: BlockOrigin,
header: JustifiedHeader<Block>,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
finalized: bool,
import_block: ImportBlock<Block>,
new_authorities: Option<Vec<AuthorityId>>,
) -> error::Result<ImportResult> {
let (header, justification, authorities) = header.into_inner();
let (
origin,
header,
_,
justification,
body,
finalized,
_aux, // TODO: write this to DB also
) = import_block.into_inner();
let parent_hash = header.parent_hash().clone();
match self.backend.blockchain().status(BlockId::Hash(parent_hash))? {
blockchain::BlockStatus::InChain => {},
blockchain::BlockStatus::Unknown => return Ok(ImportResult::UnknownParent),
@@ -532,8 +551,8 @@ impl<B, E, Block> Client<B, E, Block> where
header,
justification,
body,
authorities,
finalized
new_authorities,
finalized,
);
*self.importing_block.write() = None;
@@ -574,9 +593,9 @@ impl<B, E, Block> Client<B, E, Block> where
origin: BlockOrigin,
hash: Block::Hash,
header: Block::Header,
justification: bft::Justification<Block::Hash>,
justification: Justification,
body: Option<Vec<Block::Extrinsic>>,
authorities: Vec<AuthorityId>,
authorities: Option<Vec<AuthorityId>>,
finalized: bool,
) -> error::Result<ImportResult> {
let parent_hash = header.parent_hash().clone();
@@ -650,16 +669,17 @@ impl<B, E, Block> Client<B, E, Block> where
};
trace!("Imported {}, (#{}), best={}, origin={:?}", hash, header.number(), is_new_best, origin);
let unchecked: bft::UncheckedJustification<_> = justification.uncheck().into();
transaction.set_block_data(
header.clone(),
body,
Some(unchecked.into()),
Some(justification),
leaf_state,
)?;
transaction.update_authorities(authorities);
if let Some(authorities) = authorities {
transaction.update_authorities(authorities);
}
if let Some(storage_update) = storage_update {
transaction.update_storage(storage_update)?;
}
@@ -843,12 +863,14 @@ impl<B, E, Block> Client<B, E, Block> where
}
/// Get block justification set by id.
pub fn justification(&self, id: &BlockId<Block>) -> error::Result<Option<Justification<Block::Hash>>> {
pub fn justification(&self, id: &BlockId<Block>) -> error::Result<Option<Justification>> {
self.backend.blockchain().justification(*id)
}
/// Get full block by id.
pub fn block(&self, id: &BlockId<Block>) -> error::Result<Option<SignedBlock<Block::Header, Block::Extrinsic, Block::Hash>>> {
pub fn block(&self, id: &BlockId<Block>)
-> error::Result<Option<SignedBlock<Block::Header, Block::Extrinsic>>>
{
Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) {
(Some(header), Some(extrinsics), Some(justification)) =>
Some(SignedBlock { block: RuntimeBlock { header, extrinsics }, justification }),
@@ -985,57 +1007,6 @@ impl<B, E, Block> BlockNumberToHash for Client<B, E, Block> where
}
}
impl<B, E, Block> bft::BlockImport<Block> for Client<B, E, Block>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
{
fn import_block(
&self,
block: Block,
justification: ::bft::Justification<Block::Hash>,
authorities: &[AuthorityId],
) -> bool {
let (header, extrinsics) = block.deconstruct();
let justified_header = JustifiedHeader {
header: header,
justification,
authorities: authorities.to_vec(),
};
// TODO [rob]: non-instant finality.
self.import_block(
BlockOrigin::ConsensusBroadcast,
justified_header,
Some(extrinsics),
true
).is_ok()
}
}
impl<B, E, Block> bft::Authorities<Block> for Client<B, E, Block>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
{
fn authorities(&self, at: &BlockId<Block>) -> Result<Vec<AuthorityId>, bft::Error> {
let on_chain_version: Result<_, bft::Error> = self.runtime_version_at(at)
.map_err(|e| { trace!("Error getting runtime version {:?}", e); bft::ErrorKind::RuntimeVersionMissing.into() });
let on_chain_version = on_chain_version?;
let native_version: Result<_, bft::Error> = self.executor.native_runtime_version()
.ok_or_else(|| bft::ErrorKind::NativeRuntimeMissing.into());
let native_version = native_version?;
if !native_version.can_author_with(&on_chain_version) {
return Err(bft::ErrorKind::IncompatibleAuthoringRuntime(on_chain_version, native_version.runtime_version.clone()).into())
}
self.authorities_at(at).map_err(|_| {
let descriptor = format!("{:?}", at);
bft::ErrorKind::StateUnavailable(descriptor).into()
})
}
}
impl<B, E, Block> BlockchainEvents<Block> for Client<B, E, Block>
where
@@ -1095,7 +1066,7 @@ impl<B, E, Block> api::Core<Block, AuthorityId> for Client<B, E, Block> where
}
fn authorities(&self, at: &BlockId<Block>) -> Result<Vec<AuthorityId>, Self::Error> {
bft::Authorities::authorities(self, at).map_err(Into::into)
self.authorities_at(at)
}
fn execute_block(&self, at: &BlockId<Block>, block: &Block) -> Result<(), Self::Error> {
-5
View File
@@ -19,13 +19,8 @@
use std;
use state_machine;
use runtime_primitives::ApplyError;
use bft;
error_chain! {
links {
BFT(bft::error::Error, bft::error::ErrorKind) #[doc="BFT error"];
}
errors {
/// Backend error.
Backend(s: String) {
+9 -9
View File
@@ -26,7 +26,7 @@ use primitives::AuthorityId;
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero,
NumberFor, As, Digest, DigestItem};
use runtime_primitives::bft::Justification;
use runtime_primitives::Justification;
use blockchain::{self, BlockStatus, HeaderBackend};
use state_machine::backend::{Backend as StateBackend, InMemory};
use state_machine::InMemoryChangesTrieStorage;
@@ -42,12 +42,12 @@ struct PendingBlock<B: BlockT> {
#[derive(PartialEq, Eq, Clone)]
enum StoredBlock<B: BlockT> {
Header(B::Header, Option<Justification<B::Hash>>),
Full(B, Option<Justification<B::Hash>>),
Header(B::Header, Option<Justification>),
Full(B, Option<Justification>),
}
impl<B: BlockT> StoredBlock<B> {
fn new(header: B::Header, body: Option<Vec<B::Extrinsic>>, just: Option<Justification<B::Hash>>) -> Self {
fn new(header: B::Header, body: Option<Vec<B::Extrinsic>>, just: Option<Justification>) -> Self {
match body {
Some(body) => StoredBlock::Full(B::new(header, body), just),
None => StoredBlock::Header(header, just),
@@ -61,7 +61,7 @@ impl<B: BlockT> StoredBlock<B> {
}
}
fn justification(&self) -> Option<&Justification<B::Hash>> {
fn justification(&self) -> Option<&Justification> {
match *self {
StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref()
}
@@ -74,7 +74,7 @@ impl<B: BlockT> StoredBlock<B> {
}
}
fn into_inner(self) -> (B::Header, Option<Vec<B::Extrinsic>>, Option<Justification<B::Hash>>) {
fn into_inner(self) -> (B::Header, Option<Vec<B::Extrinsic>>, Option<Justification>) {
match self {
StoredBlock::Header(header, just) => (header, None, just),
StoredBlock::Full(block, just) => {
@@ -159,7 +159,7 @@ impl<Block: BlockT> Blockchain<Block> {
&self,
hash: Block::Hash,
header: <Block as BlockT>::Header,
justification: Option<Justification<Block::Hash>>,
justification: Option<Justification>,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
new_state: NewBlockState,
) -> ::error::Result<()> {
@@ -292,7 +292,7 @@ impl<Block: BlockT> blockchain::Backend<Block> for Blockchain<Block> {
}))
}
fn justification(&self, id: BlockId<Block>) -> error::Result<Option<Justification<Block::Hash>>> {
fn justification(&self, id: BlockId<Block>) -> error::Result<Option<Justification>> {
Ok(self.id(id).and_then(|hash| self.storage.read().blocks.get(&hash).and_then(|b|
b.justification().map(|x| x.clone()))
))
@@ -375,7 +375,7 @@ where
&mut self,
header: <Block as BlockT>::Header,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
justification: Option<Justification<Block::Hash>>,
justification: Option<Justification>,
state: NewBlockState,
) -> error::Result<()> {
assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
+1 -2
View File
@@ -19,7 +19,6 @@
#![warn(missing_docs)]
#![recursion_limit="128"]
extern crate substrate_bft as bft;
extern crate substrate_trie as trie;
extern crate parity_codec as codec;
extern crate substrate_primitives as primitives;
@@ -63,7 +62,7 @@ pub use client::{
new_with_backend,
new_in_mem,
BlockBody, BlockStatus, BlockOrigin, ImportNotifications, FinalityNotifications, BlockchainEvents,
Client, ClientInfo, ChainHead, ImportResult, JustifiedHeader,
Client, ClientInfo, ChainHead, ImportResult, ImportBlock,
};
pub use notifications::{StorageEventStream, StorageChangeSet};
pub use state_machine::ExecutionStrategy;
+3 -3
View File
@@ -22,9 +22,9 @@ use futures::{Future, IntoFuture};
use parking_lot::RwLock;
use primitives::AuthorityId;
use runtime_primitives::{bft::Justification, generic::BlockId};
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use runtime_primitives::{generic::BlockId, Justification};
use state_machine::{Backend as StateBackend, InMemoryChangesTrieStorage, TrieBackend};
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use backend::{Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState};
use blockchain::HeaderBackend as BlockchainHeaderBackend;
@@ -167,7 +167,7 @@ where
&mut self,
header: Block::Header,
_body: Option<Vec<Block::Extrinsic>>,
_justification: Option<Justification<Block::Hash>>,
_justification: Option<Justification>,
state: NewBlockState,
) -> ClientResult<()> {
self.leaf_state = state;
@@ -22,8 +22,8 @@ use futures::{Future, IntoFuture};
use parking_lot::Mutex;
use primitives::AuthorityId;
use runtime_primitives::{bft::Justification, generic::BlockId};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero};
use runtime_primitives::{Justification, generic::BlockId};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT,NumberFor, Zero};
use backend::NewBlockState;
use blockchain::{Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache,
@@ -139,7 +139,7 @@ impl<S, F, Block> BlockchainBackend<Block> for Blockchain<S, F> where Block: Blo
Ok(None)
}
fn justification(&self, _id: BlockId<Block>) -> ClientResult<Option<Justification<Block::Hash>>> {
fn justification(&self, _id: BlockId<Block>) -> ClientResult<Option<Justification>> {
Ok(None)
}
@@ -0,0 +1,8 @@
[package]
name = "substrate-consensus-common"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
description = "Common utilities for substrate consensus"
[dev-dependencies]
substrate-primitives = { path= "../../primitives"}
@@ -0,0 +1,33 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate Consensus Common.
// Substrate Demo is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate Consensus Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate Consensus Common. If not, see <http://www.gnu.org/licenses/>.
//! Tracks offline validators.
#![allow(dead_code)]
#![cfg(feature="rhd")]
extern crate substrate_primitives as primitives;
use primitives::{generic::BlockId, Justification};
use primitives::traits::{Block, Header};
/// Block import trait.
pub trait BlockImport<B: Block> {
/// Import a block alongside its corresponding justification.
fn import_block(&self, block: B, justification: Justification, authorities: &[AuthorityId]) -> bool;
}
pub mod offline_tracker;
+37
View File
@@ -0,0 +1,37 @@
[package]
name = "substrate-consensus-rhd"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
description = "Rhododendron Round-Based consensus-algorithm for substrate"
[dependencies]
futures = "0.1.17"
parity-codec = { version = "1.1" }
parity-codec-derive = { version = "2.0" }
substrate-primitives = { path = "../../primitives" }
srml-support = { path = "../../../srml/support" }
sr-primitives = { path = "../../sr-primitives" }
sr-version = { path = "../../sr-version" }
sr-io = { path = "../../sr-io" }
srml-consensus = { path = "../../../srml/consensus" }
tokio = "0.1.7"
parking_lot = "0.4"
error-chain = "0.12"
log = "0.3"
rhododendron = { git = "https://github.com/paritytech/rhododendron.git", features = ["codec"] }
serde = { version = "1.0", features = ["derive"] }
[dev-dependencies]
substrate-keyring = { path = "../../keyring" }
substrate-executor = { path = "../../executor" }
[features]
default = ["std"]
std = [
"serde/std",
"substrate-primitives/std",
"srml-support/std",
"sr-primitives/std",
"sr-version/std",
]
@@ -16,7 +16,6 @@
//! Error types in the BFT service.
use runtime_version::RuntimeVersion;
use primitives::ed25519;
error_chain! {
errors {
@@ -45,7 +44,7 @@ error_chain! {
}
/// Error checking signature
InvalidSignature(s: ed25519::Signature, a: ::primitives::AuthorityId) {
InvalidSignature(s: ::primitives::ed25519::Signature, a: ::primitives::AuthorityId) {
description("Message signature is invalid"),
display("Message signature {:?} by {:?} is invalid.", s, a),
}
@@ -14,7 +14,6 @@
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
// tag::description[]
//! BFT Agreement based on a rotating proposer in different rounds.
//!
//! Where this crate refers to input stream, should never logically conclude.
@@ -30,36 +29,49 @@
//! conclude without having witnessed the conclusion.
//! In general, this future should be pre-empted by the import of a justification
//! set for this block height.
// end::description[]
#![cfg(feature = "rhd")]
#![recursion_limit="128"]
pub mod error;
extern crate parity_codec as codec;
extern crate substrate_primitives as primitives;
extern crate srml_support as runtime_support;
extern crate sr_primitives as runtime_primitives;
extern crate sr_version as runtime_version;
extern crate sr_io as runtime_io;
extern crate tokio;
#[cfg(test)]
extern crate substrate_keyring as keyring;
extern crate parking_lot;
extern crate rhododendron;
#[macro_use]
extern crate log;
extern crate futures;
#[macro_use]
extern crate error_chain;
#[macro_use]
extern crate serde;
#[macro_use]
extern crate parity_codec_derive;
pub mod error;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::{Instant, Duration};
use codec::Encode;
use runtime_primitives::generic::BlockId;
use runtime_primitives::{generic::BlockId, Justification};
use runtime_primitives::traits::{Block, Header};
use runtime_primitives::bft::{Message as PrimitiveMessage, Action as PrimitiveAction, Justification as PrimitiveJustification};
use primitives::{AuthorityId, ed25519, ed25519::LocalizedSignature};
use futures::{Async, Stream, Sink, Future, IntoFuture};
@@ -67,9 +79,12 @@ use futures::sync::oneshot;
use tokio::timer::Delay;
use parking_lot::Mutex;
pub use rhododendron::{InputStreamConcluded, AdvanceRoundReason};
pub use rhododendron::{InputStreamConcluded, AdvanceRoundReason,
Message as RhdMessage, Vote as RhdMessageVote};
pub use error::{Error, ErrorKind};
// pub mod misbehaviour_check;
// statuses for an agreement
mod status {
pub const LIVE: usize = 0;
@@ -77,10 +92,6 @@ mod status {
pub const GOOD: usize = 2;
}
/// Messages over the proposal.
/// Each message carries an associated round number.
pub type Message<B> = rhododendron::Message<B, <B as Block>::Hash>;
/// Localized message type.
pub type LocalizedMessage<B> = rhododendron::LocalizedMessage<
B,
@@ -89,11 +100,13 @@ pub type LocalizedMessage<B> = rhododendron::LocalizedMessage<
LocalizedSignature
>;
/// Justification of some hash.
pub type Justification<H> = rhododendron::Justification<H, LocalizedSignature>;
pub struct RhdJustification<H>(rhododendron::Justification<H, LocalizedSignature>);
/// Justification of a prepare message.
pub type PrepareJustification<H> = rhododendron::PrepareJustification<H, LocalizedSignature>;
pub struct PrepareJustification<H>(rhododendron::PrepareJustification<H, LocalizedSignature>);
/// Unchecked justification.
pub struct UncheckedJustification<H>(rhododendron::UncheckedJustification<H, LocalizedSignature>);
@@ -109,14 +122,22 @@ impl<H> UncheckedJustification<H> {
}
}
impl<H> Into<Justification> for RhdJustification<H> {
fn into(self) -> Justification {
let p : Justification = UncheckedJustification(self.0.uncheck()).into();
p
}
}
impl<H> From<rhododendron::UncheckedJustification<H, LocalizedSignature>> for UncheckedJustification<H> {
fn from(inner: rhododendron::UncheckedJustification<H, LocalizedSignature>) -> Self {
UncheckedJustification(inner)
}
}
impl<H> From<PrimitiveJustification<H>> for UncheckedJustification<H> {
fn from(just: PrimitiveJustification<H>) -> Self {
impl<H> From<Justification> for UncheckedJustification<H> {
fn from(just: Justification) -> Self {
UncheckedJustification(rhododendron::UncheckedJustification {
round_number: just.round_number as usize,
digest: just.hash,
@@ -128,9 +149,9 @@ impl<H> From<PrimitiveJustification<H>> for UncheckedJustification<H> {
}
}
impl<H> Into<PrimitiveJustification<H>> for UncheckedJustification<H> {
fn into(self) -> PrimitiveJustification<H> {
PrimitiveJustification {
impl<H> Into<Justification> for UncheckedJustification<H> {
fn into(self) -> Justification {
Justification {
round_number: self.0.round_number as u32,
hash: self.0.digest,
signatures: self.0.signatures.into_iter().map(|s| (s.signer.into(), s.signature)).collect(),
@@ -194,12 +215,6 @@ pub trait Proposer<B: Block> {
fn on_round_end(&self, _round_number: usize, _proposed: bool) { }
}
/// Block import trait.
pub trait BlockImport<B: Block> {
/// Import a block alongside its corresponding justification.
fn import_block(&self, block: B, justification: Justification<B::Hash>, authorities: &[AuthorityId]) -> bool;
}
/// Trait for getting the authorities at a given block.
pub trait Authorities<B: Block> {
/// Get the authorities at the given block.
@@ -282,7 +297,7 @@ impl<B: Block, P: Proposer<B>> rhododendron::Context for BftInstance<B, P>
proposal.hash()
}
fn sign_local(&self, message: Message<B>) -> LocalizedMessage<B> {
fn sign_local(&self, message: RhdMessage<B, B::Hash>) -> LocalizedMessage<B> {
sign_message(message, &*self.key, self.parent_hash.clone())
}
@@ -313,7 +328,7 @@ impl<B: Block, P: Proposer<B>> rhododendron::Context for BftInstance<B, P>
use std::collections::HashSet;
let collect_pubkeys = |participants: HashSet<&Self::AuthorityId>| participants.into_iter()
.map(|p| ed25519::Public::from_raw(p.0))
.map(|p| ::ed25519::Public::from_raw(p.0))
.collect::<Vec<_>>();
let round_timeout = self.round_timeout_duration(next_round);
@@ -383,10 +398,11 @@ impl<B, P, I, InStream, OutSink> Future for BftFuture<B, P, I, InStream, OutSink
let hash = justified_block.hash();
info!(target: "bft", "Importing block #{} ({}) directly from BFT consensus",
justified_block.header().number(), hash);
let just : Justification = RhdJustification(committed.justification).into();
let import_ok = self.import.import_block(
justified_block,
committed.justification,
just,
&self.inner.context().authorities
);
@@ -616,110 +632,16 @@ pub fn bft_threshold(n: usize) -> usize {
n - max_faulty_of(n)
}
fn check_justification_signed_message<H>(authorities: &[AuthorityId], message: &[u8], just: UncheckedJustification<H>)
-> Result<Justification<H>, UncheckedJustification<H>>
{
// TODO: return additional error information.
just.0.check(authorities.len() - max_faulty_of(authorities.len()), |_, _, sig| {
let auth_id = sig.signer.clone().into();
if !authorities.contains(&auth_id) { return None }
if ed25519::verify_strong(&sig.signature, message, &sig.signer) {
Some(sig.signer.0)
} else {
None
}
}).map_err(UncheckedJustification)
}
/// Check a full justification for a header hash.
/// Provide all valid authorities.
///
/// On failure, returns the justification back.
pub fn check_justification<B: Block>(authorities: &[AuthorityId], parent: B::Hash, just: UncheckedJustification<B::Hash>)
-> Result<Justification<B::Hash>, UncheckedJustification<B::Hash>>
{
let message = Encode::encode(&PrimitiveMessage::<B, _> {
parent,
action: PrimitiveAction::Commit(just.0.round_number as u32, just.0.digest.clone()),
});
check_justification_signed_message(authorities, &message[..], just)
}
/// Check a prepare justification for a header hash.
/// Provide all valid authorities.
///
/// On failure, returns the justification back.
pub fn check_prepare_justification<B: Block>(authorities: &[AuthorityId], parent: B::Hash, just: UncheckedJustification<B::Hash>)
-> Result<PrepareJustification<B::Hash>, UncheckedJustification<B::Hash>>
{
let message = Encode::encode(&PrimitiveMessage::<B, _> {
parent,
action: PrimitiveAction::Prepare(just.0.round_number as u32, just.0.digest.clone()),
});
check_justification_signed_message(authorities, &message[..], just)
}
/// Check proposal message signatures and authority.
/// Provide all valid authorities.
pub fn check_proposal<B: Block + Clone>(
authorities: &[AuthorityId],
parent_hash: &B::Hash,
propose: &::rhododendron::LocalizedProposal<B, B::Hash, AuthorityId, LocalizedSignature>)
-> Result<(), Error>
{
if !authorities.contains(&propose.sender) {
return Err(ErrorKind::InvalidAuthority(propose.sender.into()).into());
}
let action_header = PrimitiveAction::ProposeHeader(propose.round_number as u32, propose.digest.clone());
let action_propose = PrimitiveAction::Propose(propose.round_number as u32, propose.proposal.clone());
check_action::<B>(action_header, parent_hash, &propose.digest_signature)?;
check_action::<B>(action_propose, parent_hash, &propose.full_signature)
}
/// Check vote message signatures and authority.
/// Provide all valid authorities.
pub fn check_vote<B: Block>(
authorities: &[AuthorityId],
parent_hash: &B::Hash,
vote: &::rhododendron::LocalizedVote<B::Hash, AuthorityId, LocalizedSignature>)
-> Result<(), Error>
{
if !authorities.contains(&vote.sender) {
return Err(ErrorKind::InvalidAuthority(vote.sender.into()).into());
}
let action = match vote.vote {
::rhododendron::Vote::Prepare(r, ref h) => PrimitiveAction::Prepare(r as u32, h.clone()),
::rhododendron::Vote::Commit(r, ref h) => PrimitiveAction::Commit(r as u32, h.clone()),
::rhododendron::Vote::AdvanceRound(r) => PrimitiveAction::AdvanceRound(r as u32),
};
check_action::<B>(action, parent_hash, &vote.signature)
}
fn check_action<B: Block>(action: PrimitiveAction<B, B::Hash>, parent_hash: &B::Hash, sig: &LocalizedSignature) -> Result<(), Error> {
let primitive = PrimitiveMessage {
parent: parent_hash.clone(),
action,
};
let message = Encode::encode(&primitive);
if ed25519::verify_strong(&sig.signature, &message, &sig.signer) {
Ok(())
} else {
Err(ErrorKind::InvalidSignature(sig.signature.into(), sig.signer.clone().into()).into())
}
}
/// Sign a BFT message with the given key.
pub fn sign_message<B: Block + Clone>(message: Message<B>, key: &ed25519::Pair, parent_hash: B::Hash) -> LocalizedMessage<B> {
// /// Sign a BFT message with the given key.
pub fn sign_message<B: Block + Clone>(
message: RhdMessage<B, B::Hash>,
key: &ed25519::Pair,
parent_hash: B::Hash
) -> LocalizedMessage<B> {
let signer = key.public();
let sign_action = |action: PrimitiveAction<B, B::Hash>| {
let primitive = PrimitiveMessage {
let sign_action = |action: ::rhododendron::Vote<B>| {
let primitive = ::rhododendron::LocalizedVote {
parent: parent_hash.clone(),
action,
};
@@ -732,10 +654,10 @@ pub fn sign_message<B: Block + Clone>(message: Message<B>, key: &ed25519::Pair,
};
match message {
::rhododendron::Message::Propose(r, proposal) => {
RhdMessage::Propose(r, proposal) => {
let header_hash = proposal.hash();
let action_header = PrimitiveAction::ProposeHeader(r as u32, header_hash.clone());
let action_propose = PrimitiveAction::Propose(r as u32, proposal.clone());
let action_header = ::rhododendron::ProposeHeader(r as u32, header_hash.clone());
let action_propose = ::rhododendron::Propose(r as u32, proposal.clone());
::rhododendron::LocalizedMessage::Propose(::rhododendron::LocalizedProposal {
round_number: r,
@@ -746,19 +668,13 @@ pub fn sign_message<B: Block + Clone>(message: Message<B>, key: &ed25519::Pair,
full_signature: sign_action(action_propose),
})
}
::rhododendron::Message::Vote(vote) => {
let action = match vote {
::rhododendron::Vote::Prepare(r, ref h) => PrimitiveAction::Prepare(r as u32, h.clone()),
::rhododendron::Vote::Commit(r, ref h) => PrimitiveAction::Commit(r as u32, h.clone()),
::rhododendron::Vote::AdvanceRound(r) => PrimitiveAction::AdvanceRound(r as u32),
};
::rhododendron::LocalizedMessage::Vote(::rhododendron::LocalizedVote {
RhdMessage::Vote(vote) => ::rhododendron::LocalizedMessage::Vote(
::rhododendron::LocalizedVote {
vote: vote,
sender: signer.clone().into(),
signature: sign_action(action),
})
}
}
)
}
}
@@ -780,7 +696,7 @@ mod tests {
}
impl BlockImport<TestBlock> for FakeClient {
fn import_block(&self, block: TestBlock, _justification: Justification<H256>, _authorities: &[AuthorityId]) -> bool {
fn import_block(&self, block: TestBlock, _justification: Justification, _authorities: &[AuthorityId]) -> bool {
assert!(self.imported_heights.lock().insert(block.header.number));
true
}
@@ -14,28 +14,13 @@
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
// tag::description[]
//! Utility for substrate-based runtimes that want to check misbehavior reports.
// end::description[]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate parity_codec as codec;
extern crate substrate_primitives as primitives;
extern crate sr_io as runtime_io;
extern crate sr_primitives as runtime_primitives;
#[cfg(test)]
extern crate substrate_bft;
#[cfg(test)]
extern crate substrate_keyring as keyring;
#[cfg(test)]
extern crate rhododendron;
use codec::{Codec, Encode};
use primitives::{AuthorityId, Signature};
use runtime_primitives::bft::{Action, Message, MisbehaviorKind};
use rhododendron::messages::{Action, Message, MisbehaviorKind};
use runtime_io;
// check a message signature. returns true if signed by that authority.
fn check_message_sig<B: Codec, H: Codec>(
@@ -91,13 +76,14 @@ mod tests {
use keyring::ed25519;
use keyring::Keyring;
use rhododendron;
use runtime_primitives::testing::{H256, Block as RawBlock};
type Block = RawBlock<u64>;
fn sign_prepare(key: &ed25519::Pair, round: u32, hash: H256, parent_hash: H256) -> (H256, Signature) {
let msg = substrate_bft::sign_message::<Block>(
let msg = ::sign_message::<Block>(
rhododendron::Message::Vote(rhododendron::Vote::Prepare(round as _, hash)),
key,
parent_hash
@@ -110,7 +96,7 @@ mod tests {
}
fn sign_commit(key: &ed25519::Pair, round: u32, hash: H256, parent_hash: H256) -> (H256, Signature) {
let msg = substrate_bft::sign_message::<Block>(
let msg = ::sign_message::<Block>(
rhododendron::Message::Vote(rhododendron::Vote::Commit(round as _, hash)),
key,
parent_hash
+3 -13
View File
@@ -625,22 +625,12 @@ mod tests {
type In = Box<Stream<Item=Vec<u8>,Error=()>>;
fn messages_for(&self, round: u64) -> Self::In {
use network::consensus_gossip::ConsensusMessage;
let messages = self.inner.lock().peer(self.peer_id)
.with_spec(|spec, _| spec.gossip.messages_for(round_to_topic(round)));
let messages = messages
.map_err(
move |_| panic!("Messages for round {} dropped too early", round)
)
.map(|msg| match msg {
ConsensusMessage::ChainSpecific(raw, _) => {
let message = GossipMessage::decode(&mut &raw[..]).unwrap();
message.data
}
_ => panic!("Only chain-specific messages come under this stream"),
});
let messages = messages.map_err(
move |_| panic!("Messages for round {} dropped too early", round)
);
Box::new(messages)
}
@@ -1,19 +0,0 @@
[package]
name = "substrate-misbehavior-check"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
parity-codec = { version = "2.0", default-features = false }
substrate-primitives = { path = "../primitives", default-features = false }
sr-primitives = { path = "../sr-primitives", default-features = false }
sr-io = { path = "../sr-io", default-features = false }
[dev-dependencies]
substrate-bft = { path = "../bft" }
rhododendron = "0.3"
substrate-keyring = { path = "../keyring" }
[features]
default = ["std"]
std = ["parity-codec/std", "substrate-primitives/std", "sr-primitives/std", "sr-io/std"]
@@ -1,13 +0,0 @@
= Misbehavior-check
.Summary
[source, toml]
----
include::Cargo.toml[lines=2..5]
----
.Description
----
include::src/lib.rs[tag=description]
----
+11 -8
View File
@@ -28,8 +28,10 @@ const MAX_PARALLEL_DOWNLOADS: u32 = 1;
/// Block data with origin.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct BlockData<B: BlockT> {
/// The Block Message from the wire
pub block: message::BlockData<B>,
pub origin: NodeIndex,
/// The peer, we received this from
pub origin: Option<NodeIndex>,
}
#[derive(Debug)]
@@ -92,7 +94,8 @@ impl<B: BlockT> BlockCollection<B> {
_ => (),
}
self.blocks.insert(start, BlockRangeState::Complete(blocks.into_iter().map(|b| BlockData { origin: who, block: b }).collect()));
self.blocks.insert(start, BlockRangeState::Complete(blocks.into_iter()
.map(|b| BlockData { origin: Some(who), block: b }).collect()));
}
/// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded.
@@ -244,14 +247,14 @@ mod test {
bc.insert(1, blocks[1..11].to_vec(), peer0);
assert_eq!(bc.needed_blocks(peer0, 40, 150, 0), Some(11 .. 41));
assert_eq!(bc.drain(1), blocks[1..11].iter().map(|b| BlockData { block: b.clone(), origin: 0 }).collect::<Vec<_>>());
assert_eq!(bc.drain(1), blocks[1..11].iter().map(|b| BlockData { block: b.clone(), origin: Some(0) }).collect::<Vec<_>>());
bc.clear_peer_download(peer0);
bc.insert(11, blocks[11..41].to_vec(), peer0);
let drained = bc.drain(12);
assert_eq!(drained[..30], blocks[11..41].iter().map(|b| BlockData { block: b.clone(), origin: 0 }).collect::<Vec<_>>()[..]);
assert_eq!(drained[30..], blocks[41..81].iter().map(|b| BlockData { block: b.clone(), origin: 1 }).collect::<Vec<_>>()[..]);
assert_eq!(drained[..30], blocks[11..41].iter().map(|b| BlockData { block: b.clone(), origin: Some(0) }).collect::<Vec<_>>()[..]);
assert_eq!(drained[30..], blocks[41..81].iter().map(|b| BlockData { block: b.clone(), origin: Some(1) }).collect::<Vec<_>>()[..]);
bc.clear_peer_download(peer2);
assert_eq!(bc.needed_blocks(peer2, 40, 150, 80), Some(81 .. 121));
@@ -262,8 +265,8 @@ mod test {
assert_eq!(bc.drain(80), vec![]);
let drained = bc.drain(81);
assert_eq!(drained[..40], blocks[81..121].iter().map(|b| BlockData { block: b.clone(), origin: 2 }).collect::<Vec<_>>()[..]);
assert_eq!(drained[40..], blocks[121..150].iter().map(|b| BlockData { block: b.clone(), origin: 1 }).collect::<Vec<_>>()[..]);
assert_eq!(drained[..40], blocks[81..121].iter().map(|b| BlockData { block: b.clone(), origin: Some(2) }).collect::<Vec<_>>()[..]);
assert_eq!(drained[40..], blocks[121..150].iter().map(|b| BlockData { block: b.clone(), origin: Some(1) }).collect::<Vec<_>>()[..]);
}
#[test]
@@ -273,7 +276,7 @@ mod test {
len: 128,
downloading: 1,
});
let blocks = generate_blocks(10).into_iter().map(|b| BlockData { block: b, origin: 0 }).collect();
let blocks = generate_blocks(10).into_iter().map(|b| BlockData { block: b, origin: None }).collect();
bc.blocks.insert(114305, BlockRangeState::Complete(blocks));
assert_eq!(bc.needed_blocks(0, 128, 10000, 000), Some(1 .. 100));
+9 -24
View File
@@ -16,24 +16,18 @@
//! Blockchain access trait
use client::{self, Client as SubstrateClient, ImportResult, ClientInfo, BlockStatus, BlockOrigin, CallExecutor};
use client::{self, Client as SubstrateClient, ImportBlock, ImportResult, ClientInfo, BlockStatus, CallExecutor};
use client::error::Error;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor};
use runtime_primitives::generic::BlockId;
use runtime_primitives::bft::Justification;
use primitives::{Blake2Hasher};
use runtime_primitives::Justification;
use primitives::{Blake2Hasher, AuthorityId};
/// Local client abstraction for the network.
pub trait Client<Block: BlockT>: Send + Sync {
/// Import a new block. Parent is supposed to be existing in the blockchain.
fn import(
&self,
origin: BlockOrigin,
header: Block::Header,
justification: Justification<Block::Hash>,
body: Option<Vec<Block::Extrinsic>>,
finalized: bool,
) -> Result<ImportResult, Error>;
fn import(&self, block: ImportBlock<Block>, new_authorities: Option<Vec<AuthorityId>>)
-> Result<ImportResult, Error>;
/// Get blockchain info.
fn info(&self) -> Result<ClientInfo<Block>, Error>;
@@ -51,7 +45,7 @@ pub trait Client<Block: BlockT>: Send + Sync {
fn body(&self, id: &BlockId<Block>) -> Result<Option<Vec<Block::Extrinsic>>, Error>;
/// Get block justification.
fn justification(&self, id: &BlockId<Block>) -> Result<Option<Justification<Block::Hash>>, Error>;
fn justification(&self, id: &BlockId<Block>) -> Result<Option<Justification>, Error>;
/// Get block header proof.
fn header_proof(&self, block_number: <Block::Header as HeaderT>::Number) -> Result<(Block::Header, Vec<Vec<u8>>), Error>;
@@ -77,17 +71,8 @@ impl<B, E, Block> Client<Block> for SubstrateClient<B, E, Block> where
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
Block: BlockT,
{
fn import(
&self,
origin: BlockOrigin,
header: Block::Header,
justification: Justification<Block::Hash>,
body: Option<Vec<Block::Extrinsic>>,
finalized: bool,
) -> Result<ImportResult, Error> {
// TODO: defer justification check and add finality.
let justified_header = self.check_justification(header, justification.into())?;
(self as &SubstrateClient<B, E, Block>).import_block(origin, justified_header, body, finalized)
fn import(&self, block: ImportBlock<Block>, new_authorities: Option<Vec<AuthorityId>>) -> Result<ImportResult, Error> {
(self as &SubstrateClient<B, E, Block>).import_block(block, new_authorities)
}
fn info(&self) -> Result<ClientInfo<Block>, Error> {
@@ -110,7 +95,7 @@ impl<B, E, Block> Client<Block> for SubstrateClient<B, E, Block> where
(self as &SubstrateClient<B, E, Block>).body(id)
}
fn justification(&self, id: &BlockId<Block>) -> Result<Option<Justification<Block::Hash>>, Error> {
fn justification(&self, id: &BlockId<Block>) -> Result<Option<Justification>, Error> {
(self as &SubstrateClient<B, E, Block>).justification(id)
}
+130 -193
View File
@@ -22,9 +22,9 @@ use futures::sync::mpsc;
use std::time::{Instant, Duration};
use rand::{self, Rng};
use network_libp2p::NodeIndex;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash, HashFor};
use runtime_primitives::generic::BlockId;
use message::{self, generic::Message as GenericMessage};
use message::generic::{Message, ConsensusMessage};
use protocol::Context;
use service::Roles;
use specialization::Specialization;
@@ -39,38 +39,33 @@ struct PeerConsensus<H> {
is_authority: bool,
}
/// Consensus messages.
#[derive(Debug, Clone, PartialEq)]
pub enum ConsensusMessage<B: BlockT> {
/// A message concerning BFT agreement
Bft(message::LocalizedBftMessage<B>),
/// A message concerning some chain-specific aspect of consensus
ChainSpecific(Vec<u8>, B::Hash),
}
struct MessageEntry<B: BlockT> {
hash: B::Hash,
message: ConsensusMessage<B>,
topic: B::Hash,
message_hash: B::Hash,
message: ConsensusMessage,
instant: Instant,
}
/// Consensus network protocol handler. Manages statements and candidate requests.
pub struct ConsensusGossip<B: BlockT> {
peers: HashMap<NodeIndex, PeerConsensus<B::Hash>>,
live_message_sinks: HashMap<B::Hash, mpsc::UnboundedSender<ConsensusMessage<B>>>,
peers: HashMap<NodeIndex, PeerConsensus<(B::Hash, B::Hash)>>,
live_message_sinks: HashMap<B::Hash, mpsc::UnboundedSender<ConsensusMessage>>,
messages: Vec<MessageEntry<B>>,
message_hashes: HashSet<B::Hash>,
known_messages: HashSet<(B::Hash, B::Hash)>,
session_start: Option<B::Hash>,
}
impl<B: BlockT> ConsensusGossip<B> where B::Header: HeaderT<Number=u64> {
impl<B: BlockT> ConsensusGossip<B>
where
B::Header: HeaderT<Number=u64>
{
/// Create a new instance.
pub fn new() -> Self {
ConsensusGossip {
peers: HashMap::new(),
live_message_sinks: HashMap::new(),
messages: Default::default(),
message_hashes: Default::default(),
known_messages: Default::default(),
session_start: None
}
}
@@ -88,13 +83,8 @@ impl<B: BlockT> ConsensusGossip<B> where B::Header: HeaderT<Number=u64> {
// TODO: limit by size
let mut known_messages = HashSet::new();
for entry in self.messages.iter() {
known_messages.insert(entry.hash);
let message = match entry.message {
ConsensusMessage::Bft(ref bft) => GenericMessage::BftMessage(bft.clone()),
ConsensusMessage::ChainSpecific(ref msg, _) => GenericMessage::ChainSpecific(msg.clone()),
};
protocol.send_message(who, message);
known_messages.insert((entry.topic, entry.message_hash));
protocol.send_message(who, Message::Consensus(entry.topic.clone(), entry.message.clone()));
}
self.peers.insert(who, PeerConsensus {
known_messages,
@@ -109,9 +99,17 @@ impl<B: BlockT> ConsensusGossip<B> where B::Header: HeaderT<Number=u64> {
}
}
fn propagate(&mut self, protocol: &mut Context<B>, message: message::Message<B>, hash: B::Hash) {
fn propagate<F>(
&mut self,
protocol: &mut Context<B>,
message_hash: B::Hash,
topic: B::Hash,
get_message: F,
)
where F: Fn() -> ConsensusMessage,
{
let mut non_authorities: Vec<_> = self.peers.iter()
.filter_map(|(id, ref peer)| if !peer.is_authority && !peer.known_messages.contains(&hash) { Some(*id) } else { None })
.filter_map(|(id, ref peer)| if !peer.is_authority && !peer.known_messages.contains(&(topic, message_hash)) { Some(*id) } else { None })
.collect();
rand::thread_rng().shuffle(&mut non_authorities);
@@ -123,79 +121,33 @@ impl<B: BlockT> ConsensusGossip<B> where B::Header: HeaderT<Number=u64> {
for (id, ref mut peer) in self.peers.iter_mut() {
if peer.is_authority {
if peer.known_messages.insert(hash.clone()) {
if peer.known_messages.insert((topic.clone(), message_hash.clone())) {
let message = get_message();
trace!(target:"gossip", "Propagating to authority {}: {:?}", id, message);
protocol.send_message(*id, message.clone());
protocol.send_message(*id, Message::Consensus(topic, message));
}
}
else if non_authorities.contains(&id) {
} else if non_authorities.contains(&id) {
let message = get_message();
trace!(target:"gossip", "Propagating to {}: {:?}", id, message);
peer.known_messages.insert(hash.clone());
protocol.send_message(*id, message.clone());
peer.known_messages.insert((topic.clone(), message_hash.clone()));
protocol.send_message(*id, Message::Consensus(topic, message));
}
}
}
fn register_message(&mut self, hash: B::Hash, message: ConsensusMessage<B>) {
if self.message_hashes.insert(hash) {
fn register_message<F>(&mut self, message_hash: B::Hash, topic: B::Hash, get_message: F)
where F: Fn() -> ConsensusMessage
{
if self.known_messages.insert((topic, message_hash)) {
self.messages.push(MessageEntry {
hash,
topic,
message_hash,
instant: Instant::now(),
message,
message: get_message(),
});
}
}
/// Handles incoming BFT message, passing to stream and repropagating.
pub fn on_bft_message(&mut self, protocol: &mut Context<B>, who: NodeIndex, message: message::LocalizedBftMessage<B>) {
if let Some((hash, message)) = self.handle_incoming(protocol, who, ConsensusMessage::Bft(message)) {
// propagate to other peers.
self.multicast(protocol, message, Some(hash));
}
}
/// Handles incoming chain-specific message and repropagates
pub fn on_chain_specific(&mut self, protocol: &mut Context<B>, who: NodeIndex, message: Vec<u8>, topic: B::Hash) {
debug!(target: "gossip", "received chain-specific gossip message");
if let Some((hash, message)) = self.handle_incoming(protocol, who, ConsensusMessage::ChainSpecific(message, topic)) {
debug!(target: "gossip", "handled incoming chain-specific message");
// propagate to other peers.
self.multicast(protocol, message, Some(hash));
}
}
/// Get a stream of messages relevant to consensus for the given topic.
pub fn messages_for(&mut self, topic: B::Hash) -> mpsc::UnboundedReceiver<ConsensusMessage<B>> {
let (sink, stream) = mpsc::unbounded();
for entry in self.messages.iter() {
let message_matches = match entry.message {
ConsensusMessage::Bft(ref msg) => msg.parent_hash == topic,
ConsensusMessage::ChainSpecific(_, ref h) => h == &topic,
};
if message_matches {
sink.unbounded_send(entry.message.clone()).expect("receiving end known to be open; qed");
}
}
self.live_message_sinks.insert(topic, sink);
stream
}
/// Multicast a chain-specific message to other authorities.
pub fn multicast_chain_specific(&mut self, protocol: &mut Context<B>, message: Vec<u8>, topic: B::Hash) {
trace!(target:"gossip", "sending chain-specific message");
self.multicast(protocol, ConsensusMessage::ChainSpecific(message, topic), None);
}
/// Multicast a BFT message to other authorities
pub fn multicast_bft_message(&mut self, protocol: &mut Context<B>, message: message::LocalizedBftMessage<B>) {
// Broadcast message to all authorities.
trace!(target:"gossip", "Broadcasting BFT message {:?}", message);
self.multicast(protocol, ConsensusMessage::Bft(message), None);
}
/// Call when a peer has been disconnected to stop tracking gossip status.
pub fn peer_disconnected(&mut self, _protocol: &mut Context<B>, who: NodeIndex) {
self.peers.remove(&who);
@@ -206,19 +158,14 @@ impl<B: BlockT> ConsensusGossip<B> where B::Header: HeaderT<Number=u64> {
pub fn collect_garbage<P: Fn(&B::Hash) -> bool>(&mut self, predicate: P) {
self.live_message_sinks.retain(|_, sink| !sink.is_closed());
let hashes = &mut self.message_hashes;
let hashes = &mut self.known_messages;
let before = self.messages.len();
let now = Instant::now();
self.messages.retain(|entry| {
let topic = match entry.message {
ConsensusMessage::Bft(ref msg) => &msg.parent_hash,
ConsensusMessage::ChainSpecific(_, ref h) => h,
};
if entry.instant + MESSAGE_LIFETIME >= now && predicate(topic) {
if entry.instant + MESSAGE_LIFETIME >= now && predicate(&entry.topic) {
true
} else {
hashes.remove(&entry.hash);
hashes.remove(&(entry.topic, entry.message_hash));
false
}
});
@@ -228,35 +175,32 @@ impl<B: BlockT> ConsensusGossip<B> where B::Header: HeaderT<Number=u64> {
}
}
fn handle_incoming(&mut self, protocol: &mut Context<B>, who: NodeIndex, message: ConsensusMessage<B>) -> Option<(B::Hash, ConsensusMessage<B>)> {
let (hash, topic, message) = match message {
ConsensusMessage::Bft(msg) => {
let parent = msg.parent_hash;
let generic = GenericMessage::BftMessage(msg);
(
::protocol::hash_message(&generic),
parent,
match generic {
GenericMessage::BftMessage(msg) => ConsensusMessage::Bft(msg),
_ => panic!("`generic` is known to be the `BftMessage` variant; qed"),
}
)
}
ConsensusMessage::ChainSpecific(msg, topic) => {
let generic = GenericMessage::ChainSpecific(msg);
(
::protocol::hash_message::<B>(&generic),
topic,
match generic {
GenericMessage::ChainSpecific(msg) => ConsensusMessage::ChainSpecific(msg, topic),
_ => panic!("`generic` is known to be the `ChainSpecific` variant; qed"),
}
)
}
};
/// Get all incoming messages for a topic.
pub fn messages_for(&mut self, topic: B::Hash) -> mpsc::UnboundedReceiver<ConsensusMessage> {
let (tx, rx) = mpsc::unbounded();
for entry in self.messages.iter().filter(|e| e.topic == topic) {
tx.unbounded_send(entry.message.clone()).expect("receiver known to be live; qed");
}
self.live_message_sinks.insert(topic, tx);
if self.message_hashes.contains(&hash) {
trace!(target:"gossip", "Ignored already known message from {}", who);
rx
}
/// Handle an incoming ConsensusMessage for topic by who via protocol. Discard message if topic
/// already known, the message is old, its source peers isn't a registered peer or the connection
/// to them is broken. Return `Some(topic, message)` if it was added to the internal queue, `None`
/// in all other cases.
pub fn on_incoming(
&mut self,
protocol: &mut Context<B>,
who: NodeIndex,
topic: B::Hash,
message: ConsensusMessage,
) -> Option<(B::Hash, ConsensusMessage)> {
let message_hash = HashFor::<B>::hash(&message[..]);
if self.known_messages.contains(&(topic, message_hash)) {
trace!(target:"gossip", "Ignored already known message from {} in {}", who, topic);
return None;
}
@@ -274,11 +218,12 @@ impl<B: BlockT> ConsensusGossip<B> where B::Header: HeaderT<Number=u64> {
(Ok(_), Ok(None)) => {},
}
if let Some(ref mut peer) = self.peers.get_mut(&who) {
use std::collections::hash_map::Entry;
peer.known_messages.insert(hash);
peer.known_messages.insert((topic, message_hash));
if let Entry::Occupied(mut entry) = self.live_message_sinks.entry(topic) {
debug!(target: "gossip", "Pushing relevant consensus message to sink.");
debug!(target: "gossip", "Pushing consensus message to sink for {}.", topic);
if let Err(e) = entry.get().unbounded_send(message.clone()) {
trace!(target:"gossip", "Error broadcasting message notification: {:?}", e);
}
@@ -292,18 +237,21 @@ impl<B: BlockT> ConsensusGossip<B> where B::Header: HeaderT<Number=u64> {
return None;
}
Some((hash, message))
self.multicast_inner(protocol, message_hash, topic, || message.clone());
Some((topic, message))
}
fn multicast(&mut self, protocol: &mut Context<B>, message: ConsensusMessage<B>, hash: Option<B::Hash>) {
let generic = match message {
ConsensusMessage::Bft(ref message) => GenericMessage::BftMessage(message.clone()),
ConsensusMessage::ChainSpecific(ref message, _) => GenericMessage::ChainSpecific(message.clone()),
};
/// Multicast a message to all peers.
pub fn multicast(&mut self, protocol: &mut Context<B>, topic: B::Hash, message: ConsensusMessage) {
let message_hash = HashFor::<B>::hash(&message);
self.multicast_inner(protocol, message_hash, topic, || message.clone());
}
let hash = hash.unwrap_or_else(|| ::protocol::hash_message(&generic));
self.register_message(hash, message);
self.propagate(protocol, generic, hash);
fn multicast_inner<F>(&mut self, protocol: &mut Context<B>, message_hash: B::Hash, topic: B::Hash, get_message: F)
where F: Fn() -> ConsensusMessage
{
self.register_message(message_hash, topic, &get_message);
self.propagate(protocol, message_hash, topic, get_message);
}
/// Note new consensus session.
@@ -329,12 +277,16 @@ impl<Block: BlockT> Specialization<Block> for ConsensusGossip<Block> where
self.peer_disconnected(ctx, who);
}
fn on_message(&mut self, ctx: &mut Context<Block>, who: NodeIndex, message: &mut Option<message::Message<Block>>) {
fn on_message(
&mut self,
ctx: &mut Context<Block>,
who: NodeIndex,
message: &mut Option<::message::Message<Block>>
) {
match message.take() {
Some(generic_message::Message::BftMessage(msg)) => {
trace!(target: "gossip", "BFT message from {}: {:?}", who, msg);
// TODO: check signature here? what if relevant block is unknown?
self.on_bft_message(ctx, who, msg)
Some(generic_message::Message::Consensus(topic, msg)) => {
trace!(target: "gossip", "Consensus message from {}: {:?}", who, msg);
self.on_incoming(ctx, who, topic, msg);
}
r => *message = r,
}
@@ -358,10 +310,8 @@ impl<Block: BlockT> Specialization<Block> for ConsensusGossip<Block> where
#[cfg(test)]
mod tests {
use runtime_primitives::bft::Justification;
use runtime_primitives::testing::{H256, Header, Block as RawBlock};
use runtime_primitives::testing::{H256, Block as RawBlock};
use std::time::Instant;
use message::{self, generic};
use super::*;
type Block = RawBlock<u64>;
@@ -374,90 +324,77 @@ mod tests {
let now = Instant::now();
let m1_hash = H256::random();
let m2_hash = H256::random();
let m1 = ConsensusMessage::Bft(message::LocalizedBftMessage {
parent_hash: prev_hash,
message: message::generic::BftMessage::Auxiliary(Justification {
round_number: 0,
hash: Default::default(),
signatures: Default::default(),
}),
});
let m2 = ConsensusMessage::ChainSpecific(vec![1, 2, 3], best_hash);
let m1 = vec![1, 2, 3];
let m2 = vec![4, 5, 6];
macro_rules! push_msg {
($hash:expr, $now: expr, $m:expr) => {
($topic:expr, $hash: expr, $now: expr, $m:expr) => {
consensus.messages.push(MessageEntry {
hash: $hash,
topic: $topic,
message_hash: $hash,
instant: $now,
message: $m,
})
}
}
push_msg!(m1_hash, now, m1);
push_msg!(m2_hash, now, m2.clone());
consensus.message_hashes.insert(m1_hash);
consensus.message_hashes.insert(m2_hash);
push_msg!(prev_hash, m1_hash, now, m1);
push_msg!(best_hash, m2_hash, now, m2.clone());
consensus.known_messages.insert((prev_hash, m1_hash));
consensus.known_messages.insert((best_hash, m2_hash));
// nothing to collect
consensus.collect_garbage(|_topic| true);
consensus.collect_garbage(|_t| true);
assert_eq!(consensus.messages.len(), 2);
assert_eq!(consensus.message_hashes.len(), 2);
// random header, nothing should be cleared
let mut header = Header {
parent_hash: H256::default(),
number: 0,
state_root: H256::default(),
extrinsics_root: H256::default(),
digest: Default::default(),
};
assert_eq!(consensus.known_messages.len(), 2);
// nothing to collect with default.
consensus.collect_garbage(|&topic| topic != Default::default());
assert_eq!(consensus.messages.len(), 2);
assert_eq!(consensus.message_hashes.len(), 2);
assert_eq!(consensus.known_messages.len(), 2);
// header that matches one of the messages
header.parent_hash = prev_hash;
// topic that was used in one message.
consensus.collect_garbage(|topic| topic != &prev_hash);
assert_eq!(consensus.messages.len(), 1);
assert_eq!(consensus.message_hashes.len(), 1);
assert!(consensus.message_hashes.contains(&m2_hash));
assert_eq!(consensus.known_messages.len(), 1);
assert!(consensus.known_messages.contains(&(best_hash, m2_hash)));
// make timestamp expired
consensus.messages.clear();
push_msg!(m2_hash, now - MESSAGE_LIFETIME, m2);
push_msg!(best_hash, m2_hash, now - MESSAGE_LIFETIME, m2);
consensus.collect_garbage(|_topic| true);
assert!(consensus.messages.is_empty());
assert!(consensus.message_hashes.is_empty());
assert!(consensus.known_messages.is_empty());
}
#[test]
fn message_stream_include_those_sent_before_asking_for_stream() {
use futures::Stream;
let mut consensus = ConsensusGossip::new();
let mut consensus = ConsensusGossip::<Block>::new();
let bft_message = generic::BftMessage::Consensus(generic::SignedConsensusMessage::Vote(generic::SignedConsensusVote {
vote: generic::ConsensusVote::AdvanceRound(0),
sender: [0; 32].into(),
signature: Default::default(),
}));
let message = vec![1, 2, 3];
let parent_hash = [1; 32].into();
let message_hash = HashFor::<Block>::hash(&message);
let topic = HashFor::<Block>::hash(&[1,2,3]);
let localized = ::message::LocalizedBftMessage::<Block> {
message: bft_message,
parent_hash: parent_hash,
};
let message = generic::Message::BftMessage(localized.clone());
let message_hash = ::protocol::hash_message::<Block>(&message);
let message = ConsensusMessage::Bft(localized);
consensus.register_message(message_hash, message.clone());
let stream = consensus.messages_for(parent_hash);
consensus.register_message(message_hash, topic, || message.clone());
let stream = consensus.messages_for(topic);
assert_eq!(stream.wait().next(), Some(Ok(message)));
}
#[test]
fn can_keep_multiple_messages_per_topic() {
let mut consensus = ConsensusGossip::<Block>::new();
let topic = [1; 32].into();
let msg_a = vec![1, 2, 3];
let msg_b = vec![4, 5, 6];
consensus.register_message(HashFor::<Block>::hash(&msg_a), topic, || msg_a.clone());
consensus.register_message(HashFor::<Block>::hash(&msg_b), topic, || msg_b.clone());
assert_eq!(consensus.messages.len(), 2);
}
}
+395 -243
View File
@@ -14,27 +14,69 @@
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Blocks import queue.
//! Import Queue primitive: something which can verify and import blocks.
//!
//! This serves as an intermediate and abstracted step between synchronization
//! and import. Each mode of consensus will have its own requirements for block verification.
//! Some algorithms can verify in parallel, while others only sequentially.
//!
//! The `ImportQueue` trait allows such verification strategies to be instantiated.
//! The `BasicQueue` and `BasicVerifier` traits allow serial queues to be
//! instantiated simply.
use std::collections::{HashSet, VecDeque};
use std::sync::{Arc, Weak};
use std::sync::atomic::{AtomicBool, Ordering};
use parking_lot::{Condvar, Mutex, RwLock};
use client::{BlockOrigin, ImportResult};
pub use client::{BlockOrigin, ImportBlock, ImportResult};
use network_libp2p::{NodeIndex, Severity};
use primitives::AuthorityId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero};
use blocks::BlockData;
pub use blocks::BlockData;
use chain::Client;
use error::{ErrorKind, Error};
use protocol::Context;
use service::ExecuteInContext;
use sync::ChainSync;
#[cfg(any(test, feature = "test-helpers"))]
use std::cell::RefCell;
/// Verify a justification of a block
pub trait Verifier<B: BlockT>: Send + Sync + Sized {
/// Verify the given data and return the ImportBlock and an optional
/// new set of validators to import. If not, err with an Error-Message
/// presented to the User in the logs.
fn verify(
&self,
origin: BlockOrigin,
header: B::Header,
justification: Vec<u8>,
body: Option<Vec<B::Extrinsic>>
) -> Result<(ImportBlock<B>, Option<Vec<AuthorityId>>), String>;
}
/// Blocks import queue API.
pub trait ImportQueue<B: BlockT>: Send + Sync {
/// Start background work for the queue as necessary.
///
/// This is called automatically by the network service when synchronization
/// begins.
fn start<E>(
&self,
_sync: Weak<RwLock<ChainSync<B>>>,
_service: Weak<E>,
_chain: Weak<Client<B>>
) -> Result<(), Error> where
Self: Sized,
E: 'static + ExecuteInContext<B>,
{
Ok(())
}
/// Clear the queue when sync is restarting.
fn clear(&self);
/// Clears the import queue and stops importing.
@@ -44,7 +86,7 @@ pub trait ImportQueue<B: BlockT>: Send + Sync {
/// Is block with given hash currently in the queue.
fn is_importing(&self, hash: &B::Hash) -> bool;
/// Import bunch of blocks.
fn import_blocks(&self, sync: &mut ChainSync<B>, protocol: &mut Context<B>, blocks: (BlockOrigin, Vec<BlockData<B>>));
fn import_blocks(&self, origin: BlockOrigin, blocks: Vec<BlockData<B>>);
}
/// Import queue status. It isn't completely accurate.
@@ -55,11 +97,12 @@ pub struct ImportQueueStatus<B: BlockT> {
pub best_importing_number: <<B as BlockT>::Header as HeaderT>::Number,
}
/// Blocks import queue that is importing blocks in the separate thread.
pub struct AsyncImportQueue<B: BlockT> {
/// Basic block import queue that is importing blocks sequentially in a separate thread,
/// with pluggable verification.
pub struct BasicQueue<B: BlockT, V: 'static + Verifier<B>> {
handle: Mutex<Option<::std::thread::JoinHandle<()>>>,
data: Arc<AsyncImportQueueData<B>>,
instant_finality: bool,
verifier: Arc<V>,
}
/// Locks order: queue, queue_blocks, best_importing_number
@@ -71,29 +114,19 @@ struct AsyncImportQueueData<B: BlockT> {
is_stopping: AtomicBool,
}
impl<B: BlockT> AsyncImportQueue<B> {
pub fn new(instant_finality: bool) -> Self {
impl<B: BlockT, V: Verifier<B>> BasicQueue<B, V> {
/// Instantiate a new basic queue, with given verifier.
pub fn new(verifier: Arc<V>) -> Self {
Self {
handle: Mutex::new(None),
data: Arc::new(AsyncImportQueueData::new()),
instant_finality,
verifier,
}
}
pub fn start<E: 'static + ExecuteInContext<B>>(&self, sync: Weak<RwLock<ChainSync<B>>>, service: Weak<E>, chain: Weak<Client<B>>) -> Result<(), Error> {
debug_assert!(self.handle.lock().is_none());
let qdata = self.data.clone();
let instant_finality = self.instant_finality;
*self.handle.lock() = Some(::std::thread::Builder::new().name("ImportQueue".into()).spawn(move || {
import_thread(sync, service, chain, qdata, instant_finality)
}).map_err(|err| Error::from(ErrorKind::Io(err)))?);
Ok(())
}
}
impl<B: BlockT> AsyncImportQueueData<B> {
pub fn new() -> Self {
fn new() -> Self {
Self {
signal: Default::default(),
queue: Mutex::new(VecDeque::new()),
@@ -104,7 +137,23 @@ impl<B: BlockT> AsyncImportQueueData<B> {
}
}
impl<B: BlockT> ImportQueue<B> for AsyncImportQueue<B> {
impl<B: BlockT, V: 'static + Verifier<B>> ImportQueue<B> for BasicQueue<B, V> {
fn start<E: 'static + ExecuteInContext<B>>(
&self,
sync: Weak<RwLock<ChainSync<B>>>,
service: Weak<E>,
chain: Weak<Client<B>>
) -> Result<(), Error> {
debug_assert!(self.handle.lock().is_none());
let qdata = self.data.clone();
let verifier = self.verifier.clone();
*self.handle.lock() = Some(::std::thread::Builder::new().name("ImportQueue".into()).spawn(move || {
import_thread(sync, service, chain, qdata, verifier)
}).map_err(|err| Error::from(ErrorKind::Io(err)))?);
Ok(())
}
fn clear(&self) {
let mut queue = self.data.queue.lock();
let mut queue_blocks = self.data.queue_blocks.write();
@@ -135,39 +184,39 @@ impl<B: BlockT> ImportQueue<B> for AsyncImportQueue<B> {
self.data.queue_blocks.read().contains(hash)
}
fn import_blocks(&self, _sync: &mut ChainSync<B>, _protocol: &mut Context<B>, blocks: (BlockOrigin, Vec<BlockData<B>>)) {
if blocks.1.is_empty() {
fn import_blocks(&self, origin: BlockOrigin, blocks: Vec<BlockData<B>>) {
if blocks.is_empty() {
return;
}
trace!(target:"sync", "Scheduling {} blocks for import", blocks.1.len());
trace!(target:"sync", "Scheduling {} blocks for import", blocks.len());
let mut queue = self.data.queue.lock();
let mut queue_blocks = self.data.queue_blocks.write();
let mut best_importing_number = self.data.best_importing_number.write();
let new_best_importing_number = blocks.1.last().and_then(|b| b.block.header.as_ref().map(|h| h.number().clone())).unwrap_or_else(|| Zero::zero());
queue_blocks.extend(blocks.1.iter().map(|b| b.block.hash.clone()));
let new_best_importing_number = blocks.last().and_then(|b| b.block.header.as_ref().map(|h| h.number().clone())).unwrap_or_else(|| Zero::zero());
queue_blocks.extend(blocks.iter().map(|b| b.block.hash.clone()));
if new_best_importing_number > *best_importing_number {
*best_importing_number = new_best_importing_number;
}
queue.push_back(blocks);
queue.push_back((origin, blocks));
self.data.signal.notify_one();
}
}
impl<B: BlockT> Drop for AsyncImportQueue<B> {
impl<B: BlockT, V: 'static + Verifier<B>> Drop for BasicQueue<B, V> {
fn drop(&mut self) {
self.stop();
}
}
/// Blocks import thread.
fn import_thread<B: BlockT, E: ExecuteInContext<B>>(
fn import_thread<B: BlockT, E: ExecuteInContext<B>, V: Verifier<B>>(
sync: Weak<RwLock<ChainSync<B>>>,
service: Weak<E>,
chain: Weak<Client<B>>,
qdata: Arc<AsyncImportQueueData<B>>,
instant_finality: bool,
verifier: Arc<V>
) {
trace!(target: "sync", "Starting import thread");
loop {
@@ -191,10 +240,10 @@ fn import_thread<B: BlockT, E: ExecuteInContext<B>>(
(Some(sync), Some(service), Some(chain)) => {
let blocks_hashes: Vec<B::Hash> = new_blocks.1.iter().map(|b| b.block.hash.clone()).collect();
if !import_many_blocks(
&mut SyncLink::Indirect(&sync, &*chain, &*service),
&mut SyncLink{chain: &sync, client: &*chain, context: &*service},
Some(&*qdata),
new_blocks,
instant_finality,
verifier.clone(),
) {
break;
}
@@ -210,7 +259,6 @@ fn import_thread<B: BlockT, E: ExecuteInContext<B>>(
trace!(target: "sync", "Stopping import thread");
}
/// ChainSync link trait.
trait SyncLinkApi<B: BlockT> {
/// Get chain reference.
@@ -227,200 +275,30 @@ trait SyncLinkApi<B: BlockT> {
fn restart(&mut self);
}
/// Link with the ChainSync service.
enum SyncLink<'a, B: 'a + BlockT, E: 'a + ExecuteInContext<B>> {
/// Indirect link (through service).
Indirect(&'a RwLock<ChainSync<B>>, &'a Client<B>, &'a E),
/// Direct references are given.
#[cfg(any(test, feature = "test-helpers"))]
Direct(&'a mut ChainSync<B>, &'a mut Context<B>),
}
/// Block import successful result.
#[derive(Debug, PartialEq)]
enum BlockImportResult<H: ::std::fmt::Debug + PartialEq, N: ::std::fmt::Debug + PartialEq> {
/// Imported known block.
ImportedKnown(H, N),
/// Imported unknown block.
ImportedUnknown(H, N),
}
/// Block import error.
#[derive(Debug, PartialEq)]
enum BlockImportError {
/// Disconnect from peer and continue import of next bunch of blocks.
Disconnect(NodeIndex),
/// Disconnect from peer and restart sync.
DisconnectAndRestart(NodeIndex),
/// Restart sync.
Restart,
}
/// Import a bunch of blocks.
fn import_many_blocks<'a, B: BlockT>(
link: &mut SyncLinkApi<B>,
qdata: Option<&AsyncImportQueueData<B>>,
blocks: (BlockOrigin, Vec<BlockData<B>>),
instant_finality: bool,
) -> bool
{
let (blocks_origin, blocks) = blocks;
let count = blocks.len();
let mut imported = 0;
let blocks_range = match (
blocks.first().and_then(|b| b.block.header.as_ref().map(|h| h.number())),
blocks.last().and_then(|b| b.block.header.as_ref().map(|h| h.number())),
) {
(Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last),
(Some(first), Some(_)) => format!(" ({})", first),
_ => Default::default(),
};
trace!(target:"sync", "Starting import of {} blocks{}", count, blocks_range);
// Blocks in the response/drain should be in ascending order.
for block in blocks {
let import_result = import_single_block(
link.chain(),
blocks_origin.clone(),
block,
instant_finality,
);
let is_import_failed = import_result.is_err();
imported += process_import_result(link, import_result);
if is_import_failed {
qdata.map(|qdata| *qdata.best_importing_number.write() = Zero::zero());
return true;
}
if qdata.map(|qdata| qdata.is_stopping.load(Ordering::SeqCst)).unwrap_or_default() {
return false;
}
}
trace!(target: "sync", "Imported {} of {}", imported, count);
link.maintain_sync();
true
}
/// Single block import function.
fn import_single_block<B: BlockT>(
chain: &Client<B>,
block_origin: BlockOrigin,
block: BlockData<B>,
instant_finality: bool,
) -> Result<BlockImportResult<B::Hash, <<B as BlockT>::Header as HeaderT>::Number>, BlockImportError>
{
let origin = block.origin;
let block = block.block;
match (block.header, block.justification) {
(Some(header), Some(justification)) => {
let number = header.number().clone();
let hash = header.hash();
let parent = header.parent_hash().clone();
let result = chain.import(
block_origin,
header,
justification,
block.body,
instant_finality,
);
match result {
Ok(ImportResult::AlreadyInChain) => {
trace!(target: "sync", "Block already in chain {}: {:?}", number, hash);
Ok(BlockImportResult::ImportedKnown(hash, number))
},
Ok(ImportResult::AlreadyQueued) => {
trace!(target: "sync", "Block already queued {}: {:?}", number, hash);
Ok(BlockImportResult::ImportedKnown(hash, number))
},
Ok(ImportResult::Queued) => {
trace!(target: "sync", "Block queued {}: {:?}", number, hash);
Ok(BlockImportResult::ImportedUnknown(hash, number))
},
Ok(ImportResult::UnknownParent) => {
debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent);
Err(BlockImportError::Restart)
},
Ok(ImportResult::KnownBad) => {
debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash);
Err(BlockImportError::DisconnectAndRestart(origin)) //TODO: use persistent ID
}
Err(e) => {
debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e);
Err(BlockImportError::Restart)
}
}
},
(None, _) => {
debug!(target: "sync", "Header {} was not provided by {} ", block.hash, origin);
Err(BlockImportError::Disconnect(origin)) //TODO: use persistent ID
},
(_, None) => {
debug!(target: "sync", "Justification set for block {} was not provided by {} ", block.hash, origin);
Err(BlockImportError::Disconnect(origin)) //TODO: use persistent ID
}
}
}
/// Process single block import result.
fn process_import_result<'a, B: BlockT>(
link: &mut SyncLinkApi<B>,
result: Result<BlockImportResult<B::Hash, <<B as BlockT>::Header as HeaderT>::Number>, BlockImportError>
) -> usize
{
match result {
Ok(BlockImportResult::ImportedKnown(hash, number)) => {
link.block_imported(&hash, number);
1
},
Ok(BlockImportResult::ImportedUnknown(hash, number)) => {
link.block_imported(&hash, number);
1
},
Err(BlockImportError::Disconnect(who)) => {
// TODO: FIXME: @arkpar BlockImport shouldn't be trying to manage the peer set.
// This should contain an actual reason.
link.useless_peer(who, "Import result was stated Disconnect");
0
},
Err(BlockImportError::DisconnectAndRestart(who)) => {
// TODO: FIXME: @arkpar BlockImport shouldn't be trying to manage the peer set.
// This should contain an actual reason.
link.note_useless_and_restart_sync(who, "Import result was stated DisconnectAndRestart");
0
},
Err(BlockImportError::Restart) => {
link.restart();
0
},
}
struct SyncLink<'a, B: 'a + BlockT, E: 'a + ExecuteInContext<B>> {
pub chain: &'a RwLock<ChainSync<B>>,
pub client: &'a Client<B>,
pub context: &'a E,
}
impl<'a, B: 'static + BlockT, E: 'a + ExecuteInContext<B>> SyncLink<'a, B, E> {
/// Execute closure with locked ChainSync.
/// Execute closure with locked ChainSync.
fn with_sync<F: Fn(&mut ChainSync<B>, &mut Context<B>)>(&mut self, closure: F) {
match *self {
#[cfg(any(test, feature = "test-helpers"))]
SyncLink::Direct(ref mut sync, ref mut protocol) =>
closure(*sync, *protocol),
SyncLink::Indirect(ref sync, _, ref service) =>
service.execute_in_context(move |protocol| {
let mut sync = sync.write();
closure(&mut *sync, protocol)
}),
}
let service = self.context;
let sync = self.chain;
service.execute_in_context(move |protocol| {
let mut sync = sync.write();
closure(&mut *sync, protocol)
});
}
}
impl<'a, B: 'static + BlockT, E: ExecuteInContext<B>> SyncLinkApi<B> for SyncLink<'a, B, E> {
impl<'a, B: 'static + BlockT, E: 'a + ExecuteInContext<B>> SyncLinkApi<B> for SyncLink<'a, B, E> {
fn chain(&self) -> &Client<B> {
match *self {
#[cfg(any(test, feature = "test-helpers"))]
SyncLink::Direct(_, ref protocol) => protocol.client(),
SyncLink::Indirect(_, ref chain, _) => *chain,
}
self.client
}
fn block_imported(&mut self, hash: &B::Hash, number: NumberFor<B>) {
@@ -447,14 +325,288 @@ impl<'a, B: 'static + BlockT, E: ExecuteInContext<B>> SyncLinkApi<B> for SyncLin
}
}
/// Block import successful result.
#[derive(Debug, PartialEq)]
enum BlockImportResult<H: ::std::fmt::Debug + PartialEq, N: ::std::fmt::Debug + PartialEq> {
/// Imported known block.
ImportedKnown(H, N),
/// Imported unknown block.
ImportedUnknown(H, N),
}
/// Block import error.
#[derive(Debug, PartialEq)]
enum BlockImportError {
/// Block missed header, can't be imported
IncompleteHeader(Option<NodeIndex>),
/// Block missed justification, can't be imported
IncompleteJustification(Option<NodeIndex>),
/// Block verification failed, can't be imported
VerificationFailed(Option<NodeIndex>, String),
/// Block is known to be Bad
BadBlock(Option<NodeIndex>),
/// Block has an unknown parent
UnknownParent,
/// Other Error.
Error,
}
/// Import a bunch of blocks.
fn import_many_blocks<'a, B: BlockT, V: Verifier<B>>(
link: &mut SyncLinkApi<B>,
qdata: Option<&AsyncImportQueueData<B>>,
blocks: (BlockOrigin, Vec<BlockData<B>>),
verifier: Arc<V>
) -> bool
{
let (blocks_origin, blocks) = blocks;
let count = blocks.len();
let mut imported = 0;
let blocks_range = match (
blocks.first().and_then(|b| b.block.header.as_ref().map(|h| h.number())),
blocks.last().and_then(|b| b.block.header.as_ref().map(|h| h.number())),
) {
(Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last),
(Some(first), Some(_)) => format!(" ({})", first),
_ => Default::default(),
};
trace!(target:"sync", "Starting import of {} blocks {}", count, blocks_range);
// Blocks in the response/drain should be in ascending order.
for block in blocks {
let import_result = import_single_block(
link.chain(),
blocks_origin.clone(),
block,
verifier.clone(),
);
let is_import_failed = import_result.is_err();
imported += process_import_result(link, import_result);
if is_import_failed {
qdata.map(|qdata| *qdata.best_importing_number.write() = Zero::zero());
return true;
}
if qdata.map(|qdata| qdata.is_stopping.load(Ordering::SeqCst)).unwrap_or_default() {
return false;
}
}
trace!(target: "sync", "Imported {} of {}", imported, count);
link.maintain_sync();
true
}
/// Single block import function.
fn import_single_block<B: BlockT, V: Verifier<B>>(
chain: &Client<B>,
block_origin: BlockOrigin,
block: BlockData<B>,
verifier: Arc<V>
) -> Result<BlockImportResult<B::Hash, <<B as BlockT>::Header as HeaderT>::Number>, BlockImportError>
{
let peer = block.origin;
let block = block.block;
let (header, justification) = match (block.header, block.justification) {
(Some(header), Some(justification)) => (header, justification),
(None, _) => {
if let Some(peer) = peer {
debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer);
} else {
debug!(target: "sync", "Header {} was not provided ", block.hash);
}
return Err(BlockImportError::IncompleteHeader(peer)) //TODO: use persistent ID
},
(_, None) => {
if let Some(peer) = peer {
debug!(target: "sync", "Justification set for block {} was not provided by {} ", block.hash, peer);
} else {
debug!(target: "sync", "Justification set for block {} was not provided", block.hash);
}
return Err(BlockImportError::IncompleteJustification(peer)) //TODO: use persistent ID
}
};
let number = header.number().clone();
let hash = header.hash();
let parent = header.parent_hash().clone();
let (import_block, new_authorities) = verifier.verify(block_origin, header, justification, block.body)
.map_err(|msg| {
if let Some(peer) = peer {
trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg);
} else {
trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg);
}
BlockImportError::VerificationFailed(peer, msg)
})?;
match chain.import(import_block, new_authorities) {
Ok(ImportResult::AlreadyInChain) => {
trace!(target: "sync", "Block already in chain {}: {:?}", number, hash);
Ok(BlockImportResult::ImportedKnown(hash, number))
},
Ok(ImportResult::AlreadyQueued) => {
trace!(target: "sync", "Block already queued {}: {:?}", number, hash);
Ok(BlockImportResult::ImportedKnown(hash, number))
},
Ok(ImportResult::Queued) => {
trace!(target: "sync", "Block queued {}: {:?}", number, hash);
Ok(BlockImportResult::ImportedUnknown(hash, number))
},
Ok(ImportResult::UnknownParent) => {
debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent);
Err(BlockImportError::UnknownParent)
},
Ok(ImportResult::KnownBad) => {
debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash);
Err(BlockImportError::BadBlock(peer)) //TODO: use persistent ID
}
Err(e) => {
debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e);
Err(BlockImportError::Error)
}
}
}
/// Process single block import result.
fn process_import_result<'a, B: BlockT>(
link: &mut SyncLinkApi<B>,
result: Result<BlockImportResult<B::Hash, <<B as BlockT>::Header as HeaderT>::Number>, BlockImportError>
) -> usize
{
match result {
Ok(BlockImportResult::ImportedKnown(hash, number)) => {
link.block_imported(&hash, number);
1
},
Ok(BlockImportResult::ImportedUnknown(hash, number)) => {
link.block_imported(&hash, number);
1
},
Err(BlockImportError::IncompleteJustification(who)) => {
if let Some(peer) = who {
link.useless_peer(peer, "Sent block with incomplete justification to import");
}
0
},
Err(BlockImportError::IncompleteHeader(who)) => {
if let Some(peer) = who {
link.useless_peer(peer, "Sent block with incomplete header to import");
}
0
},
Err(BlockImportError::VerificationFailed(who, e)) => {
if let Some(peer) = who {
link.useless_peer(peer, &format!("Verification failed: {}", e));
}
0
},
Err(BlockImportError::BadBlock(who)) => {
if let Some(peer) = who {
link.note_useless_and_restart_sync(peer, "Sent us a bad block");
}
0
},
Err(BlockImportError::UnknownParent) | Err(BlockImportError::Error) => {
link.restart();
0
},
}
}
#[cfg(any(test, feature = "test-helpers"))]
struct ImportCB<B: BlockT>(RefCell<Option<Box<dyn Fn(BlockOrigin, Vec<BlockData<B>>) -> bool>>>);
#[cfg(any(test, feature = "test-helpers"))]
impl<B: BlockT> ImportCB<B> {
fn new() -> Self {
ImportCB(RefCell::new(None))
}
fn set<F>(&self, cb: Box<F>)
where F: 'static + Fn(BlockOrigin, Vec<BlockData<B>>) -> bool
{
*self.0.borrow_mut() = Some(cb);
}
fn call(&self, origin: BlockOrigin, data: Vec<BlockData<B>>) -> bool {
let b = self.0.borrow();
b.as_ref().expect("The Callback has been set before. qed.")(origin, data)
}
}
#[cfg(any(test, feature = "test-helpers"))]
unsafe impl<B: BlockT> Send for ImportCB<B> {}
#[cfg(any(test, feature = "test-helpers"))]
unsafe impl<B: BlockT> Sync for ImportCB<B> {}
#[cfg(any(test, feature = "test-helpers"))]
/// A Verifier that accepts all blocks and passes them on with the configured
/// finality to be imported.
pub struct PassThroughVerifier(pub bool);
#[cfg(any(test, feature = "test-helpers"))]
/// This Verifiyer accepts all data as valid
impl<B: BlockT> Verifier<B> for PassThroughVerifier {
fn verify(
&self,
origin: BlockOrigin,
header: B::Header,
justification: Vec<u8>,
body: Option<Vec<B::Extrinsic>>
) -> Result<(ImportBlock<B>, Option<Vec<AuthorityId>>), String> {
Ok((ImportBlock {
origin,
header,
body,
finalized: self.0,
external_justification: justification,
internal_justification: vec![],
auxiliary: Vec::new(),
}, None))
}
}
#[cfg(any(test, feature = "test-helpers"))]
/// Blocks import queue that is importing blocks in the same thread.
/// The boolean value indicates whether blocks should be imported without instant finality.
pub struct SyncImportQueue<B: BlockT, V: Verifier<B>>(Arc<V>, ImportCB<B>);
#[cfg(any(test, feature = "test-helpers"))]
pub struct SyncImportQueue(pub bool);
impl<B: BlockT, V: Verifier<B>> SyncImportQueue<B, V> {
/// Create a new SyncImportQueue wrapping the given Verifier
pub fn new(verifier: Arc<V>) -> Self {
SyncImportQueue(verifier, ImportCB::new())
}
}
#[cfg(any(test, feature = "test-helpers"))]
impl<B: 'static + BlockT> ImportQueue<B> for SyncImportQueue {
impl<B: 'static + BlockT, V: 'static + Verifier<B>> ImportQueue<B> for SyncImportQueue<B, V>
{
fn start<E: 'static + ExecuteInContext<B>>(
&self,
sync: Weak<RwLock<ChainSync<B>>>,
service: Weak<E>,
chain: Weak<Client<B>>
) -> Result<(), Error> {
let v = self.0.clone();
self.1.set(Box::new(move | origin, new_blocks | {
let verifier = v.clone();
match (sync.upgrade(), service.upgrade(), chain.upgrade()) {
(Some(sync), Some(service), Some(chain)) =>
import_many_blocks(
&mut SyncLink{chain: &sync, client: &*chain, context: &*service},
None,
(origin, new_blocks),
verifier,
),
_ => false
}
}));
Ok(())
}
fn clear(&self) { }
fn stop(&self) { }
@@ -470,14 +622,8 @@ impl<B: 'static + BlockT> ImportQueue<B> for SyncImportQueue {
false
}
fn import_blocks(&self, sync: &mut ChainSync<B>, protocol: &mut Context<B>, blocks: (BlockOrigin, Vec<BlockData<B>>)) {
struct DummyExecuteInContext;
impl<B: 'static + BlockT> ExecuteInContext<B> for DummyExecuteInContext {
fn execute_in_context<F: Fn(&mut Context<B>)>(&self, _closure: F) { }
}
import_many_blocks(&mut SyncLink::Direct::<_, DummyExecuteInContext>(sync, protocol), None, blocks, self.0);
fn import_blocks(&self, origin: BlockOrigin, blocks: Vec<BlockData<B>>) {
self.1.call(origin, blocks);
}
}
@@ -540,14 +686,14 @@ pub mod tests {
justification: client.justification(&BlockId::Number(1)).unwrap(),
};
(client, hash, number, BlockData { block, origin: 0 })
(client, hash, number, BlockData { block, origin: Some(0) })
}
#[test]
fn import_single_good_block_works() {
let (_, hash, number, block) = prepare_good_block();
assert_eq!(
import_single_block(&test_client::new(), BlockOrigin::File, block, true),
import_single_block(&test_client::new(), BlockOrigin::File, block, Arc::new(PassThroughVerifier(true))),
Ok(BlockImportResult::ImportedUnknown(hash, number))
);
}
@@ -556,7 +702,7 @@ pub mod tests {
fn import_single_good_known_block_is_ignored() {
let (client, hash, number, block) = prepare_good_block();
assert_eq!(
import_single_block(&client, BlockOrigin::File, block, true),
import_single_block(&client, BlockOrigin::File, block, Arc::new(PassThroughVerifier(true))),
Ok(BlockImportResult::ImportedKnown(hash, number))
);
}
@@ -566,8 +712,8 @@ pub mod tests {
let (_, _, _, mut block) = prepare_good_block();
block.block.header = None;
assert_eq!(
import_single_block(&test_client::new(), BlockOrigin::File, block, true),
Err(BlockImportError::Disconnect(0))
import_single_block(&test_client::new(), BlockOrigin::File, block, Arc::new(PassThroughVerifier(true))),
Err(BlockImportError::IncompleteHeader(Some(0)))
);
}
@@ -576,8 +722,8 @@ pub mod tests {
let (_, _, _, mut block) = prepare_good_block();
block.block.justification = None;
assert_eq!(
import_single_block(&test_client::new(), BlockOrigin::File, block, true),
Err(BlockImportError::Disconnect(0))
import_single_block(&test_client::new(), BlockOrigin::File, block, Arc::new(PassThroughVerifier(true))),
Err(BlockImportError::IncompleteJustification(Some(0)))
);
}
@@ -598,18 +744,22 @@ pub mod tests {
assert_eq!(link.imported, 1);
let mut link = TestLink::new();
assert_eq!(process_import_result::<Block>(&mut link, Err(BlockImportError::Disconnect(0))), 0);
assert_eq!(process_import_result::<Block>(&mut link, Err(BlockImportError::IncompleteHeader(Some(0)))), 0);
assert_eq!(link.total(), 1);
assert_eq!(link.disconnects, 1);
let mut link = TestLink::new();
assert_eq!(process_import_result::<Block>(&mut link, Err(BlockImportError::DisconnectAndRestart(0))), 0);
assert_eq!(link.total(), 2);
assert_eq!(process_import_result::<Block>(&mut link, Err(BlockImportError::IncompleteJustification(Some(0)))), 0);
assert_eq!(link.total(), 1);
assert_eq!(link.disconnects, 1);
let mut link = TestLink::new();
assert_eq!(process_import_result::<Block>(&mut link, Err(BlockImportError::UnknownParent)), 0);
assert_eq!(link.total(), 1);
assert_eq!(link.restarts, 1);
let mut link = TestLink::new();
assert_eq!(process_import_result::<Block>(&mut link, Err(BlockImportError::Restart)), 0);
assert_eq!(process_import_result::<Block>(&mut link, Err(BlockImportError::Error)), 0);
assert_eq!(link.total(), 1);
assert_eq!(link.restarts, 1);
}
@@ -618,18 +768,20 @@ pub mod tests {
fn import_many_blocks_stops_when_stopping() {
let (_, _, _, block) = prepare_good_block();
let qdata = AsyncImportQueueData::new();
let verifier = Arc::new(PassThroughVerifier(true));
qdata.is_stopping.store(true, Ordering::SeqCst);
assert!(!import_many_blocks(
&mut TestLink::new(),
Some(&qdata),
(BlockOrigin::File, vec![block.clone(), block]),
true
verifier
));
}
#[test]
fn async_import_queue_drops() {
let queue = AsyncImportQueue::new(true);
let verifier = Arc::new(PassThroughVerifier(true));
let queue = BasicQueue::new(verifier);
let service = Arc::new(DummyExecutor);
let chain = Arc::new(test_client::new());
queue.start(Weak::new(), Arc::downgrade(&service), Arc::downgrade(&chain) as Weak<Client<Block>>).unwrap();
+3 -4
View File
@@ -56,7 +56,7 @@ mod config;
mod chain;
mod blocks;
mod on_demand;
mod import_queue;
pub mod import_queue;
pub mod consensus_gossip;
pub mod error;
pub mod message;
@@ -66,12 +66,11 @@ pub mod specialization;
pub mod test;
pub use chain::Client as ClientHandle;
pub use service::{Service, FetchFuture, ConsensusService, BftMessageStream,
TransactionPool, Params, ManageNetwork, SyncProvider};
pub use service::{Service, FetchFuture, TransactionPool, Params, ManageNetwork, SyncProvider};
pub use protocol::{ProtocolStatus, PeerInfo, Context};
pub use sync::{Status as SyncStatus, SyncState};
pub use network_libp2p::{NonReservedPeerMode, NetworkConfiguration, NodeIndex, ProtocolId, Severity, Protocol};
pub use message::{generic as generic_message, RequestId, BftMessage, LocalizedBftMessage, ConsensusVote, SignedConsensusVote, SignedConsensusMessage, SignedConsensusProposal, Status as StatusMessage};
pub use message::{generic as generic_message, RequestId, Status as StatusMessage};
pub use error::Error;
pub use config::{Roles, ProtocolConfig};
pub use on_demand::{OnDemand, OnDemandService, RemoteResponse};
+8 -97
View File
@@ -22,7 +22,7 @@ pub use self::generic::{
BlockAnnounce, RemoteCallRequest, RemoteReadRequest,
RemoteHeaderRequest, RemoteHeaderResponse,
RemoteChangesRequest, RemoteChangesResponse,
ConsensusVote, SignedConsensusVote, FromBlock
FromBlock
};
/// A unique ID of a request.
@@ -30,7 +30,6 @@ pub type RequestId = u64;
/// Type alias for using the message type using block type parameters.
pub type Message<B> = generic::Message<
B,
<B as BlockT>::Header,
<B as BlockT>::Hash,
<<B as BlockT>::Header as HeaderT>::Number,
@@ -49,11 +48,6 @@ pub type BlockRequest<B> = generic::BlockRequest<
<<B as BlockT>::Header as HeaderT>::Number,
>;
/// Type alias for using the localized bft message type using block type parameters.
pub type LocalizedBftMessage<B> = generic::LocalizedBftMessage<
B,
<B as BlockT>::Hash,
>;
/// Type alias for using the BlockData type using block type parameters.
pub type BlockData<B> = generic::BlockData<
@@ -69,24 +63,6 @@ pub type BlockResponse<B> = generic::BlockResponse<
<B as BlockT>::Extrinsic,
>;
/// Type alias for using the BftMessage type using block type parameters.
pub type BftMessage<B> = generic::BftMessage<
B,
<B as BlockT>::Hash,
>;
/// Type alias for using the SignedConsensusProposal type using block type parameters.
pub type SignedConsensusProposal<B> = generic::SignedConsensusProposal<
B,
<B as BlockT>::Hash,
>;
/// Type alias for using the SignedConsensusProposal type using block type parameters.
pub type SignedConsensusMessage<B> = generic::SignedConsensusProposal<
B,
<B as BlockT>::Hash,
>;
/// A set of transactions.
pub type Transactions<E> = Vec<E>;
@@ -148,13 +124,14 @@ pub struct RemoteReadResponse {
/// Generic types.
pub mod generic {
use primitives::{AuthorityId, ed25519};
use runtime_primitives::bft::Justification;
use runtime_primitives::Justification;
use service::Roles;
use super::{
BlockAttributes, RemoteCallResponse, RemoteReadResponse,
RequestId, Transactions, Direction
};
/// Consensus is opaque to us
pub type ConsensusMessage = Vec<u8>;
/// Block data sent in the response.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
@@ -170,7 +147,7 @@ pub mod generic {
/// Block message queue if requested.
pub message_queue: Option<Vec<u8>>,
/// Justification if requested.
pub justification: Option<Justification<Hash>>,
pub justification: Option<Justification>,
}
/// Identifies starting point of a block sequence.
@@ -182,75 +159,9 @@ pub mod generic {
Number(Number),
}
/// Communication that can occur between participants in consensus.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
pub enum BftMessage<Block, Hash> {
/// A consensus message (proposal or vote)
Consensus(SignedConsensusMessage<Block, Hash>),
/// Auxiliary communication (just proof-of-lock for now).
Auxiliary(Justification<Hash>),
}
/// BFT Consensus message with parent header hash attached to it.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
pub struct LocalizedBftMessage<Block, Hash> {
/// Consensus message.
pub message: BftMessage<Block, Hash>,
/// Parent header hash.
pub parent_hash: Hash,
}
/// A localized proposal message. Contains two signed pieces of data.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
pub struct SignedConsensusProposal<Block, Hash> {
/// The round number.
pub round_number: u32,
/// The proposal sent.
pub proposal: Block,
/// The digest of the proposal.
pub digest: Hash,
/// The sender of the proposal
pub sender: AuthorityId,
/// The signature on the message (propose, round number, digest)
pub digest_signature: ed25519::Signature,
/// The signature on the message (propose, round number, proposal)
pub full_signature: ed25519::Signature,
}
/// A localized vote message, including the sender.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
pub struct SignedConsensusVote<H> {
/// The message sent.
pub vote: ConsensusVote<H>,
/// The sender of the message
pub sender: AuthorityId,
/// The signature of the message.
pub signature: ed25519::Signature,
}
/// Votes during a consensus round.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
pub enum ConsensusVote<H> {
/// Prepare to vote for proposal with digest D.
Prepare(u32, H),
/// Commit to proposal with digest D..
Commit(u32, H),
/// Propose advancement to a new round.
AdvanceRound(u32),
}
/// A localized message.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
pub enum SignedConsensusMessage<Block, Hash> {
/// A proposal.
Propose(SignedConsensusProposal<Block, Hash>),
/// A vote.
Vote(SignedConsensusVote<Hash>),
}
/// A network message.
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)]
pub enum Message<Block, Header, Hash, Number, Extrinsic> {
pub enum Message<Header, Hash, Number, Extrinsic> {
/// Status packet.
Status(Status<Hash, Number>),
/// Block request.
@@ -261,8 +172,8 @@ pub mod generic {
BlockAnnounce(BlockAnnounce<Header>),
/// Transactions.
Transactions(Transactions<Extrinsic>),
/// BFT Consensus statement.
BftMessage(LocalizedBftMessage<Block, Hash>),
/// Consensus protocol message.
Consensus(Hash, ConsensusMessage), // topic, opaque Vec<u8>
/// Remote method call request.
RemoteCallRequest(RemoteCallRequest<Hash>),
/// Remote method call response.
+17 -11
View File
@@ -20,7 +20,7 @@ use std::sync::Arc;
use std::time;
use parking_lot::RwLock;
use rustc_hex::ToHex;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, As, Zero};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As, Zero};
use runtime_primitives::generic::BlockId;
use network_libp2p::{NodeIndex, Severity};
use codec::{Encode, Decode};
@@ -181,15 +181,15 @@ impl<'a, B: BlockT + 'a, H: ExHashT + 'a> Context<B> for ProtocolContext<'a, B,
pub(crate) struct ContextData<B: BlockT, H: ExHashT> {
// All connected peers
peers: RwLock<HashMap<NodeIndex, Peer<B, H>>>,
chain: Arc<Client<B>>,
pub chain: Arc<Client<B>>,
}
impl<B: BlockT, S: Specialization<B>, H: ExHashT> Protocol<B, S, H> {
/// Create a new instance.
pub fn new(
pub fn new<I: 'static + ImportQueue<B>>(
config: ProtocolConfig,
chain: Arc<Client<B>>,
import_queue: Arc<ImportQueue<B>>,
import_queue: Arc<I>,
on_demand: Option<Arc<OnDemandService<B>>>,
transaction_pool: Arc<TransactionPool<H, B>>,
specialization: S,
@@ -373,7 +373,19 @@ impl<B: BlockT, S: Specialization<B>, H: ExHashT> Protocol<B, S, H> {
trace!(target: "sync", "BlockResponse {} from {} with {} blocks{}",
response.id, peer, response.blocks.len(), blocks_range);
self.sync.write().on_block_data(&mut ProtocolContext::new(&self.context_data, io), peer, request, response);
// import_queue.import_blocks also acquires sync.write();
// Break the cycle by doing these separately from the outside;
let new_blocks = {
let mut sync = self.sync.write();
sync.on_block_data(&mut ProtocolContext::new(&self.context_data, io), peer, request, response)
};
if let Some((origin, new_blocks)) = new_blocks {
let import_queue = self.sync.read().import_queue();
import_queue.import_blocks(origin, new_blocks);
}
}
/// Perform time based maintenance.
@@ -704,12 +716,6 @@ fn send_message<B: BlockT, H: ExHashT>(peers: &RwLock<HashMap<NodeIndex, Peer<B,
io.send(who, message.encode());
}
/// Hash a message.
pub(crate) fn hash_message<B: BlockT>(message: &Message<B>) -> B::Hash {
let data = message.encode();
HashFor::<B>::hash(&data)
}
/// Construct a simple protocol that is composed of several sub protocols.
/// Each "sub protocol" needs to implement `Specialization` and needs to provide a `new()` function.
/// For more fine grained implementations, this macro is not usable.
+9 -20
View File
@@ -18,7 +18,7 @@ use std::collections::HashMap;
use std::sync::Arc;
use std::{io, thread};
use std::time::Duration;
use futures::{self, Future, Stream, stream, sync::{oneshot, mpsc}};
use futures::{self, Future, Stream, stream, sync::oneshot};
use parking_lot::Mutex;
use network_libp2p::{ProtocolId, PeerId, NetworkConfiguration, ErrorKind};
use network_libp2p::{start_service, Service as NetworkService, ServiceEvent as NetworkServiceEvent};
@@ -28,17 +28,14 @@ use protocol::{self, Protocol, ProtocolContext, Context, ProtocolStatus};
use config::{ProtocolConfig};
use error::Error;
use chain::Client;
use message::LocalizedBftMessage;
use specialization::Specialization;
use on_demand::OnDemandService;
use import_queue::AsyncImportQueue;
use import_queue::ImportQueue;
use runtime_primitives::traits::{Block as BlockT};
use tokio::{runtime::Runtime, timer::Interval};
/// Type that represents fetch completion future.
pub type FetchFuture = oneshot::Receiver<Vec<u8>>;
/// Type that represents bft messages stream.
pub type BftMessageStream<B> = mpsc::UnboundedReceiver<LocalizedBftMessage<B>>;
const TICK_TIMEOUT: Duration = Duration::from_millis(1000);
const PROPAGATE_TIMEOUT: Duration = Duration::from_millis(5000);
@@ -90,18 +87,6 @@ pub trait TransactionPool<H: ExHashT, B: BlockT>: Send + Sync {
fn on_broadcasted(&self, propagations: HashMap<H, Vec<String>>);
}
/// ConsensusService
pub trait ConsensusService<B: BlockT>: Send + Sync {
/// Maintain connectivity to given addresses.
fn connect_to_authorities(&self, addresses: &[String]);
/// Get BFT message stream for messages corresponding to consensus on given
/// parent hash.
fn bft_messages(&self, parent_hash: B::Hash) -> BftMessageStream<B>;
/// Send out a BFT message.
fn send_bft_message(&self, message: LocalizedBftMessage<B>);
}
/// Service able to execute closure in the network context.
pub trait ExecuteInContext<B: BlockT>: Send + Sync {
/// Execute closure in network context.
@@ -140,10 +125,13 @@ pub struct Service<B: BlockT + 'static, S: Specialization<B>, H: ExHashT> {
impl<B: BlockT + 'static, S: Specialization<B>, H: ExHashT> Service<B, S, H> {
/// Creates and register protocol with the network service
pub fn new(params: Params<B, S, H>, protocol_id: ProtocolId) -> Result<Arc<Service<B, S, H>>, Error> {
pub fn new<I: 'static + ImportQueue<B>>(
params: Params<B, S, H>,
protocol_id: ProtocolId,
import_queue: I,
) -> Result<Arc<Service<B, S, H>>, Error> {
let chain = params.chain.clone();
// TODO: non-instant finality.
let import_queue = Arc::new(AsyncImportQueue::new(true));
let import_queue = Arc::new(import_queue);
let handler = Arc::new(Protocol::new(
params.config,
params.chain,
@@ -155,6 +143,7 @@ impl<B: BlockT + 'static, S: Specialization<B>, H: ExHashT> Service<B, S, H> {
let versions = [(protocol::CURRENT_VERSION as u8)];
let registered = RegisteredProtocol::new(protocol_id, &versions[..]);
let (thread, network) = start_thread(params.network_config, handler.clone(), registered)?;
let sync = Arc::new(Service {
network,
protocol_id,
+14 -9
View File
@@ -171,7 +171,13 @@ impl<B: BlockT> ChainSync<B> {
}
}
pub(crate) fn on_block_data(&mut self, protocol: &mut Context<B>, who: NodeIndex, _request: message::BlockRequest<B>, response: message::BlockResponse<B>) {
pub(crate) fn on_block_data(
&mut self,
protocol: &mut Context<B>,
who: NodeIndex,
_request: message::BlockRequest<B>,
response: message::BlockResponse<B>
) -> Option<(BlockOrigin, Vec<blocks::BlockData<B>>)> {
let new_blocks = if let Some(ref mut peer) = self.peers.get_mut(&who) {
match peer.state {
PeerSyncState::DownloadingNew(start_block) => {
@@ -184,7 +190,7 @@ impl<B: BlockT> ChainSync<B> {
PeerSyncState::DownloadingStale(_) => {
peer.state = PeerSyncState::Available;
response.blocks.into_iter().map(|b| blocks::BlockData {
origin: who,
origin: Some(who),
block: b
}).collect()
},
@@ -207,23 +213,23 @@ impl<B: BlockT> ChainSync<B> {
let n = n - As::sa(1);
peer.state = PeerSyncState::AncestorSearch(n);
Self::request_ancestry(protocol, who, n);
return;
return None;
},
Ok(_) => { // genesis mismatch
trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who);
protocol.report_peer(who, Severity::Bad("Ancestry search: genesis mismatch for peer"));
return;
return None;
},
Err(e) => {
protocol.report_peer(who, Severity::Useless(&format!("Error answering legitimate blockchain query: {:?}", e)));
return;
return None;
}
}
},
None => {
trace!(target:"sync", "Invalid response when searching for ancestor from {}", who);
protocol.report_peer(who, Severity::Bad("Invalid response when searching for ancestor"));
return;
return None;
}
}
},
@@ -236,7 +242,6 @@ impl<B: BlockT> ChainSync<B> {
let best_seen = self.best_seen_block();
let is_best = new_blocks.first().and_then(|b| b.block.header.as_ref()).map(|h| best_seen.as_ref().map_or(false, |n| h.number() >= n));
let origin = if is_best.unwrap_or_default() { BlockOrigin::NetworkBroadcast } else { BlockOrigin::NetworkInitialSync };
let import_queue = self.import_queue.clone();
if let Some((hash, number)) = new_blocks.last()
.and_then(|b| b.block.header.as_ref().map(|h|(b.block.hash.clone(), *h.number())))
{
@@ -245,8 +250,8 @@ impl<B: BlockT> ChainSync<B> {
self.best_queued_hash = hash;
}
}
import_queue.import_blocks(self, protocol, (origin, new_blocks));
self.maintain_sync(protocol);
Some((origin, new_blocks))
}
pub fn maintain_sync(&mut self, protocol: &mut Context<B>) {
@@ -263,7 +268,7 @@ impl<B: BlockT> ChainSync<B> {
}
// Update common blocks
for (_, peer) in self.peers.iter_mut() {
trace!("Updating peer info ours={}, theirs={}", number, peer.best_number);
trace!(target: "sync", "Updating peer info ours={}, theirs={}", number, peer.best_number);
if peer.best_number >= number {
peer.common_number = number;
peer.common_hash = *hash;
+51 -27
View File
@@ -27,34 +27,40 @@ use client;
use client::block_builder::BlockBuilder;
use runtime_primitives::generic::BlockId;
use io::SyncIo;
use protocol::{Context, Protocol};
use protocol::{Context, Protocol, ProtocolContext};
use primitives::{Blake2Hasher};
use config::ProtocolConfig;
use service::TransactionPool;
use network_libp2p::{NodeIndex, PeerId, Severity};
use keyring::Keyring;
use codec::{Encode, Decode};
use import_queue::SyncImportQueue;
use codec::Encode;
use import_queue::{SyncImportQueue, PassThroughVerifier};
use test_client::{self, TestClient};
use specialization::Specialization;
use consensus_gossip::ConsensusGossip;
use import_queue::ImportQueue;
use service::ExecuteInContext;
pub use test_client::runtime::{Block, Hash, Transfer, Extrinsic};
struct DummyContextExecutor(Arc<Protocol<Block, DummySpecialization, Hash>>, Arc<RwLock<VecDeque<TestPacket>>>);
unsafe impl Send for DummyContextExecutor {}
unsafe impl Sync for DummyContextExecutor {}
impl ExecuteInContext<Block> for DummyContextExecutor {
fn execute_in_context<F: Fn(&mut Context<Block>)>(&self, closure: F) {
let mut io = TestIo::new(&self.1, None);
let mut context = ProtocolContext::new(&self.0.context_data(), &mut io);
closure(&mut context);
}
}
/// The test specialization.
pub struct DummySpecialization {
/// Consensus gossip handle.
pub gossip: ConsensusGossip<Block>,
}
#[derive(Encode, Decode)]
pub struct GossipMessage {
/// The topic to classify under.
pub topic: Hash,
/// The data to send.
pub data: Vec<u8>,
}
impl Specialization<Block> for DummySpecialization {
fn status(&self) -> Vec<u8> { vec![] }
@@ -66,11 +72,14 @@ impl Specialization<Block> for DummySpecialization {
self.gossip.peer_disconnected(ctx, peer_id);
}
fn on_message(&mut self, ctx: &mut Context<Block>, peer_id: NodeIndex, message: &mut Option<::message::Message<Block>>) {
if let Some(::message::generic::Message::ChainSpecific(data)) = message.take() {
let gossip_message = GossipMessage::decode(&mut &data[..])
.expect("gossip messages all in known format; qed");
self.gossip.on_chain_specific(ctx, peer_id, data, gossip_message.topic)
fn on_message(
&mut self,
ctx: &mut Context<Block>,
peer_id: NodeIndex,
message: &mut Option<::message::Message<Block>>
) {
if let Some(::message::generic::Message::Consensus(topic, data)) = message.take() {
self.gossip.on_incoming(ctx, peer_id, topic, data);
}
}
}
@@ -128,16 +137,31 @@ pub struct TestPacket {
pub struct Peer {
client: Arc<client::Client<test_client::Backend, test_client::Executor, Block>>,
pub sync: Protocol<Block, DummySpecialization, Hash>,
pub queue: RwLock<VecDeque<TestPacket>>,
pub sync: Arc<Protocol<Block, DummySpecialization, Hash>>,
pub queue: Arc<RwLock<VecDeque<TestPacket>>>,
import_queue: Arc<SyncImportQueue<Block, PassThroughVerifier>>,
executor: Arc<DummyContextExecutor>,
}
impl Peer {
fn new(
client: Arc<client::Client<test_client::Backend, test_client::Executor, Block>>,
sync: Arc<Protocol<Block, DummySpecialization, Hash>>,
queue: Arc<RwLock<VecDeque<TestPacket>>>,
import_queue: Arc<SyncImportQueue<Block, PassThroughVerifier>>,
) -> Self {
let executor = Arc::new(DummyContextExecutor(sync.clone(), queue.clone()));
Peer { client, sync, queue, import_queue, executor}
}
/// Called after blockchain has been populated to updated current state.
fn start(&self) {
// Update the sync state to the latest chain state.
let info = self.client.info().expect("In-mem client does not fail");
let header = self.client.header(&BlockId::Hash(info.chain.best_hash)).unwrap().unwrap();
self.import_queue.start(
Arc::downgrade(&self.sync.sync()),
Arc::downgrade(&self.executor),
Arc::downgrade(&self.sync.context_data().chain)).expect("Test ImportQueue always starts");
self.sync.on_block_imported(&mut TestIo::new(&self.queue, None), info.chain.best_hash, &header);
}
@@ -189,8 +213,7 @@ impl Peer {
/// `TestNet::sync_step` needs to be called to ensure it's propagated.
pub fn gossip_message(&self, topic: Hash, data: Vec<u8>) {
self.sync.with_spec(&mut TestIo::new(&self.queue, None), |spec, ctx| {
let message = GossipMessage { topic, data }.encode();
spec.gossip.multicast_chain_specific(ctx, message, topic);
spec.gossip.multicast(ctx, topic, data);
})
}
@@ -284,24 +307,25 @@ impl TestNet {
pub fn add_peer(&mut self, config: &ProtocolConfig) {
let client = Arc::new(test_client::new());
let tx_pool = Arc::new(EmptyTransactionPool);
let import_queue = Arc::new(SyncImportQueue(false));
let import_queue = Arc::new(SyncImportQueue::new(Arc::new(PassThroughVerifier(false))));
let specialization = DummySpecialization {
gossip: ConsensusGossip::new(),
};
let sync = Protocol::new(
config.clone(),
client.clone(),
import_queue,
import_queue.clone(),
None,
tx_pool,
specialization
).unwrap();
self.peers.push(Arc::new(Peer {
sync: sync,
client: client,
queue: RwLock::new(VecDeque::new()),
}));
self.peers.push(Arc::new(Peer::new(
client,
Arc::new(sync),
Arc::new(RwLock::new(VecDeque::new())),
import_queue
)));
}
/// Get reference to peer.
+4 -2
View File
@@ -47,7 +47,7 @@ build_rpc_trait! {
/// Get header and body of a relay chain block.
#[rpc(name = "chain_getBlock")]
fn block(&self, Trailing<Hash>) -> Result<Option<SignedBlock<Header, Extrinsic, Hash>>>;
fn block(&self, Trailing<Hash>) -> Result<Option<SignedBlock<Header, Extrinsic>>>;
/// Get hash of the n-th block in the canon chain.
///
@@ -114,7 +114,9 @@ impl<B, E, Block> ChainApi<Block::Hash, Block::Header, NumberFor<Block>, Block::
Ok(self.client.header(&BlockId::Hash(hash))?)
}
fn block(&self, hash: Trailing<Block::Hash>) -> Result<Option<SignedBlock<Block::Header, Block::Extrinsic, Block::Hash>>> {
fn block(&self, hash: Trailing<Block::Hash>)
-> Result<Option<SignedBlock<Block::Header, Block::Extrinsic>>>
{
let hash = self.unwrap_or_best(hash)?;
Ok(self.client.block(&BlockId::Hash(hash))?)
}
+31 -21
View File
@@ -22,8 +22,10 @@ use serde_json;
use client::BlockOrigin;
use runtime_primitives::generic::{SignedBlock, BlockId};
use runtime_primitives::traits::{As};
use components::{ServiceFactory, FactoryFullConfiguration, FactoryBlockNumber, RuntimeGenesis};
use runtime_primitives::traits::{As, Block, Header};
use network::import_queue::{ImportQueue, BlockData};
use network::message;
use components::{self, Components, ServiceFactory, FactoryFullConfiguration, FactoryBlockNumber, RuntimeGenesis};
use new_client;
use codec::{Decode, Encode};
use error;
@@ -33,7 +35,7 @@ use chain_spec::ChainSpec;
pub fn export_blocks<F, E, W>(config: FactoryFullConfiguration<F>, exit: E, mut output: W, from: FactoryBlockNumber<F>, to: Option<FactoryBlockNumber<F>>, json: bool) -> error::Result<()>
where F: ServiceFactory, E: Future<Item=(),Error=()> + Send + 'static, W: Write,
{
let client = new_client::<F>(config)?;
let client = new_client::<F>(&config)?;
let mut block = from;
let last = match to {
@@ -85,7 +87,8 @@ pub fn export_blocks<F, E, W>(config: FactoryFullConfiguration<F>, exit: E, mut
pub fn import_blocks<F, E, R>(config: FactoryFullConfiguration<F>, exit: E, mut input: R) -> error::Result<()>
where F: ServiceFactory, E: Future<Item=(),Error=()> + Send + 'static, R: Read,
{
let client = new_client::<F>(config)?;
let client = new_client::<F>(&config)?;
let queue = components::FullComponents::<F>::build_import_queue(&config, client.clone())?;
let (exit_send, exit_recv) = std::sync::mpsc::channel();
::std::thread::spawn(move || {
@@ -95,28 +98,35 @@ pub fn import_blocks<F, E, R>(config: FactoryFullConfiguration<F>, exit: E, mut
let count: u32 = Decode::decode(&mut input).ok_or("Error reading file")?;
info!("Importing {} blocks", count);
let mut block = 0;
for _ in 0 .. count {
let mut block_count = 0;
for b in 0 .. count {
if exit_recv.try_recv().is_ok() {
break;
}
match SignedBlock::decode(&mut input) {
Some(block) => {
// TODO: non-instant finality.
let header = client.check_justification(block.block.header, block.justification.into())?;
client.import_block(BlockOrigin::File, header, Some(block.block.extrinsics), true)?;
},
None => {
warn!("Error reading block data.");
break;
}
if let Some(signed) = SignedBlock::<<F::Block as Block>::Header, <F::Block as Block>::Extrinsic>::decode(&mut input) {
let header = signed.block.header;
let hash = header.hash();
let block = message::BlockData::<F::Block> {
hash: hash,
justification: Some(signed.justification),
header: Some(header),
body: Some(signed.block.extrinsics),
receipt: None,
message_queue: None
};
// import queue handles verification and importing it into the client
queue.import_blocks(BlockOrigin::File, vec![BlockData::<F::Block> { block, origin: None }]);
} else {
warn!("Error reading block data at {}.", b);
break;
}
block += 1;
if block % 1000 == 0 {
info!("#{}", block);
block_count = b;
if b % 1000 == 0 {
info!("#{}", b);
}
}
info!("Imported {} blocks. Best: #{}", block, client.info()?.chain.best_number);
info!("Imported {} blocks. Best: #{}", block_count, client.info()?.chain.best_number);
Ok(())
}
@@ -125,7 +135,7 @@ pub fn import_blocks<F, E, R>(config: FactoryFullConfiguration<F>, exit: E, mut
pub fn revert_chain<F>(config: FactoryFullConfiguration<F>, blocks: FactoryBlockNumber<F>) -> error::Result<()>
where F: ServiceFactory,
{
let client = new_client::<F>(config)?;
let client = new_client::<F>(&config)?;
let reverted = client.revert(blocks)?;
let info = client.info()?.chain;
info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash);
+7
View File
@@ -87,6 +87,7 @@ struct ChainSpecFile {
pub boot_nodes: Vec<String>,
pub telemetry_url: Option<String>,
pub protocol_id: Option<String>,
pub consensus_engine: Option<String>,
}
/// A configuration of a chain. Can be used to build a genesis block.
@@ -125,6 +126,10 @@ impl<G: RuntimeGenesis> ChainSpec<G> {
self.spec.protocol_id.as_ref().map(String::as_str)
}
pub fn consensus_engine(&self) -> Option<&str> {
self.spec.consensus_engine.as_ref().map(String::as_str)
}
/// Parse json content into a `ChainSpec`
pub fn from_embedded(json: &'static [u8]) -> Result<Self, String> {
let spec = json::from_slice(json).map_err(|e| format!("Error parsing spec file: {}", e))?;
@@ -152,6 +157,7 @@ impl<G: RuntimeGenesis> ChainSpec<G> {
boot_nodes: Vec<String>,
telemetry_url: Option<&str>,
protocol_id: Option<&str>,
consensus_engine: Option<&str>,
) -> Self
{
let spec = ChainSpecFile {
@@ -160,6 +166,7 @@ impl<G: RuntimeGenesis> ChainSpec<G> {
boot_nodes: boot_nodes,
telemetry_url: telemetry_url.map(str::to_owned),
protocol_id: protocol_id.map(str::to_owned),
consensus_engine: consensus_engine.map(str::to_owned),
};
ChainSpec {
spec,
+52
View File
@@ -139,6 +139,8 @@ pub trait ServiceFactory: 'static + Sized {
type FullService: Deref<Target = Service<FullComponents<Self>>> + Send + Sync + 'static;
/// Extended light service type.
type LightService: Deref<Target = Service<LightComponents<Self>>> + Send + Sync + 'static;
/// ImportQueue
type ImportQueue: network::import_queue::ImportQueue<Self::Block> + 'static;
//TODO: replace these with a constructor trait. that TransactionPool implements.
/// Extrinsic pool constructor for the full client.
@@ -158,6 +160,36 @@ pub trait ServiceFactory: 'static + Sized {
/// Build light service.
fn new_light(config: FactoryFullConfiguration<Self>, executor: TaskExecutor)
-> Result<Self::LightService, error::Error>;
/// ImportQueue for a full client
fn build_full_import_queue(
config: &FactoryFullConfiguration<Self>,
_client: Arc<FullClient<Self>>
) -> Result<Self::ImportQueue, error::Error> {
if let Some(name) = config.chain_spec.consensus_engine() {
match name {
_ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into())
}
} else {
Err("Chain Specification doesn't contain any consensus_engine name".into())
}
}
/// ImportQueue for a light client
fn build_light_import_queue(
config: &FactoryFullConfiguration<Self>,
_client: Arc<LightClient<Self>>
) -> Result<Self::ImportQueue, error::Error> {
if let Some(name) = config.chain_spec.consensus_engine() {
match name {
_ => Err(format!("Chain Specification defines unknown consensus engine '{}'", name).into())
}
} else {
Err("Chain Specification doesn't contain any consensus_engine name".into())
}
}
}
/// A collection of types and function to generalise over full / light client type.
@@ -187,6 +219,12 @@ pub trait Components: 'static {
/// Create extrinsic pool.
fn build_transaction_pool(config: TransactionPoolOptions, client: Arc<ComponentClient<Self>>)
-> Result<TransactionPool<Self::TransactionPoolApi>, error::Error>;
/// instance of import queue for clients
fn build_import_queue(
config: &FactoryFullConfiguration<Self::Factory>,
client: Arc<ComponentClient<Self>>
) -> Result<<Self::Factory as ServiceFactory>::ImportQueue, error::Error>;
}
/// A struct that implement `Components` for the full client.
@@ -228,6 +266,13 @@ impl<Factory: ServiceFactory> Components for FullComponents<Factory> {
{
Factory::build_full_transaction_pool(config, client)
}
fn build_import_queue(
config: &FactoryFullConfiguration<Self::Factory>,
client: Arc<ComponentClient<Self>>
) -> Result<<Self::Factory as ServiceFactory>::ImportQueue, error::Error> {
Factory::build_full_import_queue(config, client)
}
}
/// A struct that implement `Components` for the light client.
@@ -270,4 +315,11 @@ impl<Factory: ServiceFactory> Components for LightComponents<Factory> {
{
Factory::build_light_transaction_pool(config, client)
}
fn build_import_queue(
config: &FactoryFullConfiguration<Self::Factory>,
client: Arc<ComponentClient<Self>>
) -> Result<<Self::Factory as ServiceFactory>::ImportQueue, error::Error> {
Factory::build_light_import_queue(config, client)
}
}
+4 -3
View File
@@ -102,12 +102,12 @@ pub struct Service<Components: components::Components> {
}
/// Creates bare client without any networking.
pub fn new_client<Factory: components::ServiceFactory>(config: FactoryFullConfiguration<Factory>)
pub fn new_client<Factory: components::ServiceFactory>(config: &FactoryFullConfiguration<Factory>)
-> Result<Arc<ComponentClient<components::FullComponents<Factory>>>, error::Error>
{
let executor = NativeExecutor::new();
let (client, _) = components::FullComponents::<Factory>::build_client(
&config,
config,
executor,
)?;
Ok(client)
@@ -149,6 +149,7 @@ impl<Components> Service<Components>
};
let (client, on_demand) = Components::build_client(&config, executor)?;
let import_queue = Components::build_import_queue(&config, client.clone())?;
let best_header = client.best_block_header()?;
let version = config.full_version();
@@ -185,7 +186,7 @@ impl<Components> Service<Components>
let id_len = protocol_id_full.len().min(protocol_id.len());
&mut protocol_id[0..id_len].copy_from_slice(&protocol_id_full[0..id_len]);
let network = network::Service::new(network_params, protocol_id)?;
let network = network::Service::new(network_params, protocol_id, import_queue)?;
on_demand.map(|on_demand| on_demand.set_service_link(Arc::downgrade(&network)));
{
+4 -4
View File
@@ -47,7 +47,7 @@ use service::{
FactoryExtrinsic,
};
use network::{NetworkConfiguration, NonReservedPeerMode, Protocol, SyncProvider, ManageNetwork};
use client::{BlockOrigin, JustifiedHeader};
use client::ImportBlock;
use sr_primitives::traits::As;
use sr_primitives::generic::BlockId;
@@ -216,7 +216,7 @@ pub fn connectivity<F: ServiceFactory>(spec: FactoryChainSpec<F>) {
pub fn sync<F, B, E>(spec: FactoryChainSpec<F>, block_factory: B, extrinsic_factory: E)
where
F: ServiceFactory,
B: Fn(&F::FullService) -> (JustifiedHeader<F::Block>, Option<Vec<FactoryExtrinsic<F>>>),
B: Fn(&F::FullService) -> ImportBlock<F::Block>,
E: Fn(&F::FullService) -> FactoryExtrinsic<F>,
{
const NUM_NODES: u32 = 10;
@@ -230,8 +230,8 @@ where
if i % 128 == 0 {
info!("Generating #{}", i);
}
let (header, body) = block_factory(&first_service);
first_service.client().import_block(BlockOrigin::File, header, body, true).expect("Error importing test block");
let import_data = block_factory(&first_service);
first_service.client().import_block(import_data, None).expect("Error importing test block");
}
first_service.network().node_id().unwrap()
};
-195
View File
@@ -1,195 +0,0 @@
// Copyright 2017-2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Message formats for the BFT consensus layer.
use rstd::prelude::*;
use codec::{Decode, Encode, Input, Output};
use substrate_primitives::{AuthorityId, Signature};
/// Type alias for extracting message type from block.
pub type ActionFor<B> = Action<B, <B as ::traits::Block>::Hash>;
/// Actions which can be taken during the BFT process.
#[derive(Clone, PartialEq, Eq, Encode, Decode)]
#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))]
pub enum Action<Block, H> {
/// Proposal of a block candidate.
#[codec(index = "1")]
Propose(u32, Block),
/// Proposal header of a block candidate. Accompanies any proposal,
/// but is used for misbehavior reporting since blocks themselves are big.
#[codec(index = "2")]
ProposeHeader(u32, H),
/// Preparation to commit for a candidate.
#[codec(index = "3")]
Prepare(u32, H),
/// Vote to commit to a candidate.
#[codec(index = "4")]
Commit(u32, H),
/// Vote to advance round after inactive primary.
#[codec(index = "5")]
AdvanceRound(u32),
}
/// Type alias for extracting message type from block.
pub type MessageFor<B> = Message<B, <B as ::traits::Block>::Hash>;
/// Messages exchanged between participants in the BFT consensus.
#[derive(Clone, PartialEq, Eq, Encode, Decode)]
#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))]
pub struct Message<Block, Hash> {
/// The parent header hash this action is relative to.
pub parent: Hash,
/// The action being broadcasted.
pub action: Action<Block, Hash>,
}
/// Justification of a block.
#[derive(Clone, PartialEq, Eq, Encode, Decode)]
#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))]
pub struct Justification<H> {
/// The round consensus was reached in.
pub round_number: u32,
/// The hash of the header justified.
pub hash: H,
/// The signatures and signers of the hash.
pub signatures: Vec<(AuthorityId, Signature)>
}
// single-byte code to represent misbehavior kind.
#[repr(i8)]
enum MisbehaviorCode {
/// BFT: double prepare.
BftDoublePrepare = 0x11,
/// BFT: double commit.
BftDoubleCommit = 0x12,
}
impl MisbehaviorCode {
fn from_i8(x: i8) -> Option<Self> {
match x {
0x11 => Some(MisbehaviorCode::BftDoublePrepare),
0x12 => Some(MisbehaviorCode::BftDoubleCommit),
_ => None,
}
}
}
/// Misbehavior kinds.
#[derive(Clone, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))]
pub enum MisbehaviorKind<Hash> {
/// BFT: double prepare.
BftDoublePrepare(u32, (Hash, Signature), (Hash, Signature)),
/// BFT: double commit.
BftDoubleCommit(u32, (Hash, Signature), (Hash, Signature)),
}
impl<Hash: Encode> Encode for MisbehaviorKind<Hash> {
fn encode_to<T: Output>(&self, dest: &mut T) {
match *self {
MisbehaviorKind::BftDoublePrepare(ref round, (ref h_a, ref s_a), (ref h_b, ref s_b)) => {
dest.push(&(MisbehaviorCode::BftDoublePrepare as i8));
dest.push(round);
dest.push(h_a);
dest.push(s_a);
dest.push(h_b);
dest.push(s_b);
}
MisbehaviorKind::BftDoubleCommit(ref round, (ref h_a, ref s_a), (ref h_b, ref s_b)) => {
dest.push(&(MisbehaviorCode::BftDoubleCommit as i8));
dest.push(round);
dest.push(h_a);
dest.push(s_a);
dest.push(h_b);
dest.push(s_b);
}
}
}
}
impl<Hash: Decode> Decode for MisbehaviorKind<Hash> {
fn decode<I: Input>(input: &mut I) -> Option<Self> {
Some(match i8::decode(input).and_then(MisbehaviorCode::from_i8)? {
MisbehaviorCode::BftDoublePrepare => {
MisbehaviorKind::BftDoublePrepare(
u32::decode(input)?,
(Hash::decode(input)?, Signature::decode(input)?),
(Hash::decode(input)?, Signature::decode(input)?),
)
}
MisbehaviorCode::BftDoubleCommit => {
MisbehaviorKind::BftDoubleCommit(
u32::decode(input)?,
(Hash::decode(input)?, Signature::decode(input)?),
(Hash::decode(input)?, Signature::decode(input)?),
)
}
})
}
}
/// A report of misbehavior by an authority.
#[derive(Clone, PartialEq, Eq, Encode, Decode)]
#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))]
pub struct MisbehaviorReport<Hash, Number> {
/// The parent hash of the block where the misbehavior occurred.
pub parent_hash: Hash,
/// The parent number of the block where the misbehavior occurred.
pub parent_number: Number,
/// The authority who misbehavior.
pub target: AuthorityId,
/// The misbehavior kind.
pub misbehavior: MisbehaviorKind<Hash>,
}
#[cfg(test)]
mod test {
use super::*;
use substrate_primitives::H256;
#[test]
fn misbehavior_report_roundtrip() {
let report = MisbehaviorReport::<H256, u64> {
parent_hash: [0; 32].into(),
parent_number: 999,
target: [1; 32].into(),
misbehavior: MisbehaviorKind::BftDoubleCommit(
511,
([2; 32].into(), [3; 64].into()),
([4; 32].into(), [5; 64].into()),
),
};
let encoded = report.encode();
assert_eq!(MisbehaviorReport::<H256, u64>::decode(&mut &encoded[..]).unwrap(), report);
let report = MisbehaviorReport::<H256, u64> {
parent_hash: [0; 32].into(),
parent_number: 999,
target: [1; 32].into(),
misbehavior: MisbehaviorKind::BftDoublePrepare(
511,
([2; 32].into(), [3; 64].into()),
([4; 32].into(), [5; 64].into()),
),
};
let encoded = report.encode();
assert_eq!(MisbehaviorReport::<H256, u64>::decode(&mut &encoded[..]).unwrap(), report);
}
}
@@ -22,7 +22,7 @@ use std::fmt;
use rstd::prelude::*;
use codec::Codec;
use traits::{self, Member, Block as BlockT, Header as HeaderT};
use bft::Justification;
use ::Justification;
/// Something to identify a block.
#[derive(PartialEq, Eq, Clone)]
@@ -97,9 +97,9 @@ where
#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))]
#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))]
#[cfg_attr(feature = "std", serde(deny_unknown_fields))]
pub struct SignedBlock<Header, Extrinsic, Hash> {
pub struct SignedBlock<H, E> {
/// Full block.
pub block: Block<Header, Extrinsic>,
/// Block header justification.
pub justification: Justification<Hash>,
}
pub block: Block<H, E>,
/// Block justification.
pub justification: Justification,
}
+2 -1
View File
@@ -59,9 +59,10 @@ pub mod testing;
pub mod traits;
pub mod generic;
pub mod bft;
pub mod transaction_validity;
pub type Justification = Vec<u8>;
use traits::{Verify, Lazy};
#[cfg(feature = "std")]
+1 -1
View File
@@ -22,7 +22,7 @@ use codec::Codec;
use traits::{self, Checkable, Applyable, BlakeTwo256};
use generic::DigestItem as GenDigestItem;
pub use substrate_primitives::H256;
pub use substrate_primitives::{H256, AuthorityId};
pub type DigestItem = GenDigestItem<H256, u64>;
-2
View File
@@ -4,8 +4,6 @@ version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
rhododendron = "0.3"
substrate-bft = { path = "../bft" }
substrate-client = { path = "../client" }
parity-codec = "2.0"
substrate-executor = { path = "../executor" }
+11 -40
View File
@@ -16,14 +16,10 @@
//! Client extension for tests.
use client::{self, Client};
use keyring::Keyring;
use primitives::ed25519;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT};
use client::{self, ImportBlock, Client};
use runtime_primitives::generic::BlockId;
use primitives::Blake2Hasher;
use runtime;
use bft;
/// Extension trait for a test client.
pub trait TestClient {
@@ -43,15 +39,16 @@ impl<B, E> TestClient for Client<B, E, runtime::Block>
E: client::CallExecutor<runtime::Block, Blake2Hasher>
{
fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()> {
let authorities: [ed25519::Pair; 3] = [
Keyring::Alice.into(),
Keyring::Bob.into(),
Keyring::Charlie.into(),
];
let keys: Vec<&ed25519::Pair> = authorities.iter().collect();
let justification = fake_justify::<runtime::Block>(&block.header, &keys);
let justified = self.check_justification(block.header, justification)?;
self.import_block(origin, justified, Some(block.extrinsics), false)?;
let import = ImportBlock {
origin,
header: block.header,
external_justification: vec![],
internal_justification: vec![],
body: Some(block.extrinsics),
finalized: false,
auxiliary: Vec::new(),
};
self.import_block(import, None)?;
Ok(())
}
@@ -64,29 +61,3 @@ impl<B, E> TestClient for Client<B, E, runtime::Block>
self.block_hash(0).unwrap().unwrap()
}
}
/// Prepare fake justification for the header.
///
/// since we are in the client module we can create falsely justified
/// headers.
/// TODO: remove this in favor of custom verification pipelines for the
/// client
pub fn fake_justify<Block: BlockT>(header: &Block::Header, authorities: &[&ed25519::Pair]) -> bft::UncheckedJustification<Block::Hash> {
let hash = header.hash();
bft::UncheckedJustification::new(
hash,
authorities.iter().map(|key| {
let msg = bft::sign_message::<Block>(
::rhododendron::Vote::Commit(1, hash).into(),
key,
header.parent_hash().clone(),
);
match msg {
::rhododendron::LocalizedMessage::Vote(vote) => vote.signature,
_ => panic!("signing vote leads to signed vote"),
}
}).collect(),
1,
)
}
+1 -3
View File
@@ -20,8 +20,6 @@
#![warn(missing_docs)]
extern crate rhododendron;
extern crate substrate_bft as bft;
extern crate parity_codec as codec;
extern crate substrate_primitives as primitives;
extern crate sr_primitives as runtime_primitives;
@@ -35,7 +33,7 @@ pub mod client_ext;
pub mod trait_tests;
mod block_builder_ext;
pub use client_ext::{TestClient, fake_justify};
pub use client_ext::TestClient;
pub use block_builder_ext::BlockBuilderExt;
pub use client::blockchain;
pub use client::backend;
-1
View File
@@ -15,7 +15,6 @@ parking_lot = "0.4"
rhododendron = "0.3"
sr-primitives = { path = "../../core/sr-primitives" }
srml-system = { path = "../../srml/system" }
substrate-bft = { path = "../../core/bft" }
substrate-client = { path = "../../core/client" }
substrate-primitives = { path = "../../core/primitives" }
substrate-transaction-pool = { path = "../../core/transaction-pool" }
+2 -1
View File
@@ -16,6 +16,8 @@
//! This service uses BFT consensus provided by the substrate.
#![cfg(feature="rhd")]
extern crate node_runtime;
extern crate node_primitives;
@@ -68,7 +70,6 @@ pub use service::Service;
mod evaluation;
mod error;
mod offline_tracker;
mod service;
/// Shared offline validator tracker.
+5 -5
View File
@@ -22,7 +22,7 @@ use std::thread;
use std::time::{Duration, Instant};
use std::sync::Arc;
use bft::{self, BftService};
use rhd::{self, BftService};
use client::{BlockchainEvents, ChainHead, BlockBody};
use ed25519;
use futures::prelude::*;
@@ -47,11 +47,11 @@ fn start_bft<F, C, Block>(
header: <Block as BlockT>::Header,
bft_service: Arc<BftService<Block, F, C>>,
) where
F: bft::Environment<Block> + 'static,
C: bft::BlockImport<Block> + bft::Authorities<Block> + 'static,
F: rhd::Environment<Block> + 'static,
C: rhd::BlockImport<Block> + rhd::Authorities<Block> + 'static,
F::Error: ::std::fmt::Debug,
<F::Proposer as bft::Proposer<Block>>::Error: ::std::fmt::Display + Into<error::Error>,
<F as bft::Environment<Block>>::Error: ::std::fmt::Display,
<F::Proposer as rhd::Proposer<Block>>::Error: ::std::fmt::Display + Into<error::Error>,
<F as rhd::Environment<Block>>::Error: ::std::fmt::Display,
Block: BlockT,
{
let mut handle = LocalThreadHandle::current();
+1 -1
View File
@@ -7,7 +7,7 @@ description = "Substrate node networking protocol"
[dependencies]
node-consensus = { path = "../consensus" }
node-primitives = { path = "../primitives" }
substrate-bft = { path = "../../core/bft" }
substrate-consensus-rhd = { path = "../../core/consensus/rhd" }
substrate-network = { path = "../../core/network" }
substrate-primitives = { path = "../../core/primitives" }
futures = "0.1"
-13
View File
@@ -20,23 +20,10 @@
#![warn(unused_extern_crates)]
extern crate substrate_bft as bft;
#[macro_use]
extern crate substrate_network;
extern crate substrate_primitives;
extern crate node_consensus;
extern crate node_primitives;
extern crate futures;
extern crate tokio;
extern crate rhododendron;
#[macro_use]
extern crate log;
pub mod consensus;
use node_primitives::{Block, Hash};
use substrate_network::consensus_gossip::ConsensusGossip;
+2
View File
@@ -239,6 +239,8 @@ pub type Address = balances::Address<Runtime>;
pub type Header = generic::Header<BlockNumber, BlakeTwo256, Log>;
/// Block type as expected by this runtime.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// A Block signed with a Justification
pub type SignedBlock = generic::SignedBlock<Header, UncheckedExtrinsic>;
/// BlockId type as expected by this runtime.
pub type BlockId = generic::BlockId<Block>;
/// Unchecked extrinsic type as expected by this runtime.
File diff suppressed because it is too large Load Diff
-1
View File
@@ -28,7 +28,6 @@ tokio = "0.1.7"
[dev-dependencies]
substrate-service-test = { path = "../../core/service/test" }
substrate-bft = { path = "../../core/bft" }
substrate-test-client = { path = "../../core/test-client" }
substrate-keyring = { path = "../../core/keyring" }
rhododendron = "0.3"
+4 -3
View File
@@ -132,6 +132,7 @@ pub fn staging_testnet_config() -> ChainSpec<GenesisConfig> {
boot_nodes,
Some(STAGING_TELEMETRY_URL.into()),
None,
None,
)
}
@@ -227,7 +228,7 @@ fn development_config_genesis() -> GenesisConfig {
/// Development config (single validator Alice)
pub fn development_config() -> ChainSpec<GenesisConfig> {
ChainSpec::from_genesis("Development", "development", development_config_genesis, vec![], None, None)
ChainSpec::from_genesis("Development", "development", development_config_genesis, vec![], None, None, None)
}
fn local_testnet_genesis() -> GenesisConfig {
@@ -245,10 +246,10 @@ fn local_testnet_genesis_instant() -> GenesisConfig {
/// Local testnet config (multivalidator Alice + Bob)
pub fn local_testnet_config() -> ChainSpec<GenesisConfig> {
ChainSpec::from_genesis("Local Testnet", "local_testnet", local_testnet_genesis, vec![], None, None)
ChainSpec::from_genesis("Local Testnet", "local_testnet", local_testnet_genesis, vec![], None, None, None)
}
/// Local testnet config (multivalidator Alice + Bob)
pub fn integration_test_config() -> ChainSpec<GenesisConfig> {
ChainSpec::from_genesis("Integration Test", "test", local_testnet_genesis_instant, vec![], None, None)
ChainSpec::from_genesis("Integration Test", "test", local_testnet_genesis_instant, vec![], None, None, None)
}
+72 -86
View File
@@ -22,44 +22,33 @@ extern crate node_primitives;
extern crate node_runtime;
extern crate node_executor;
extern crate node_network;
extern crate node_consensus as consensus;
extern crate substrate_client as client;
extern crate substrate_network as network;
extern crate substrate_primitives as primitives;
extern crate substrate_service as service;
extern crate substrate_transaction_pool as transaction_pool;
extern crate parity_codec as codec;
extern crate tokio;
#[cfg(test)]
extern crate substrate_service_test as service_test;
#[macro_use]
extern crate log;
#[macro_use]
extern crate hex_literal;
#[cfg(test)]
extern crate parking_lot;
#[cfg(test)]
extern crate substrate_bft as bft;
#[cfg(test)]
extern crate substrate_test_client;
#[cfg(test)]
extern crate substrate_keyring as keyring;
#[cfg(test)]
#[cfg(all(test, feature="rhd"))]
extern crate rhododendron as rhd;
extern crate sr_primitives as runtime_primitives;
pub mod chain_spec;
use std::sync::Arc;
use codec::Decode;
use transaction_pool::txpool::{Pool as TransactionPool};
use node_primitives::{Block, Hash, Timestamp, BlockId};
use node_runtime::{GenesisConfig, BlockPeriod, StorageValue, Runtime};
use node_primitives::{Block, Hash};
use node_runtime::GenesisConfig;
use client::Client;
use consensus::AuthoringApi;
use node_network::{Protocol as DemoProtocol, consensus::ConsensusNetwork};
use node_network::Protocol as DemoProtocol;
use tokio::runtime::TaskExecutor;
use service::FactoryFullConfiguration;
use primitives::{Blake2Hasher, storage::StorageKey, twox_128};
use network::import_queue::{BasicQueue, BlockOrigin, ImportBlock, Verifier};
use runtime_primitives::{traits::Block as BlockT};
use primitives::{Blake2Hasher, AuthorityId};
pub use service::{Roles, PruningMode, TransactionPoolOptions, ServiceFactory,
ErrorKind, Error, ComponentBlock, LightComponents, FullComponents};
@@ -71,10 +60,33 @@ pub type ChainSpec = service::ChainSpec<GenesisConfig>;
pub type ComponentClient<C> = Client<<C as Components>::Backend, <C as Components>::Executor, Block>;
pub type NetworkService = network::Service<Block, <Factory as service::ServiceFactory>::NetworkProtocol, Hash>;
/// A verifier that doesn't actually do any checks
pub struct NoneVerifier;
/// This Verifiyer accepts all data as valid
impl<B: BlockT> Verifier<B> for NoneVerifier {
fn verify(
&self,
origin: BlockOrigin,
header: B::Header,
justification: Vec<u8>,
body: Option<Vec<B::Extrinsic>>
) -> Result<(ImportBlock<B>, Option<Vec<AuthorityId>>), String> {
Ok((ImportBlock {
origin,
header,
body,
finalized: true,
external_justification: justification,
internal_justification: vec![],
auxiliary: Vec::new(),
}, None))
}
}
/// A collection of type to generalise specific components over full / light client.
pub trait Components: service::Components {
/// Demo API.
type Api: 'static + AuthoringApi + Send + Sync;
type Api: 'static + Send + Sync;
/// Client backend.
type Backend: 'static + client::backend::Backend<Block, Blake2Hasher>;
/// Client executor.
@@ -114,6 +126,8 @@ impl service::ServiceFactory for Factory {
type Configuration = CustomConfiguration;
type FullService = Service<service::FullComponents<Self>>;
type LightService = Service<service::LightComponents<Self>>;
/// instance of import queue for clients
type ImportQueue = BasicQueue<Block, NoneVerifier>;
fn build_full_transaction_pool(config: TransactionPoolOptions, client: Arc<service::FullClient<Self>>)
-> Result<TransactionPool<Self::FullTransactionPoolApi>, Error>
@@ -133,6 +147,20 @@ impl service::ServiceFactory for Factory {
Ok(DemoProtocol::new())
}
fn build_full_import_queue(
_config: &FactoryFullConfiguration<Self>,
_client: Arc<service::FullClient<Self>>,
) -> Result<BasicQueue<Block, NoneVerifier>, service::Error> {
Ok(BasicQueue::new(Arc::new(NoneVerifier {})))
}
fn build_light_import_queue(
_config: &FactoryFullConfiguration<Self>,
_client: Arc<service::LightClient<Self>>,
) -> Result<BasicQueue<Block, NoneVerifier>, service::Error> {
Ok(BasicQueue::new(Arc::new(NoneVerifier {})))
}
fn new_light(config: Configuration, executor: TaskExecutor)
-> Result<Service<LightComponents<Factory>>, Error>
{
@@ -146,85 +174,39 @@ impl service::ServiceFactory for Factory {
fn new_full(config: Configuration, executor: TaskExecutor)
-> Result<Service<FullComponents<Factory>>, Error>
{
let is_validator = (config.roles & Roles::AUTHORITY) == Roles::AUTHORITY;
let service = service::Service::<FullComponents<Factory>>::new(config, executor.clone())?;
// Spin consensus service if configured
let consensus = if is_validator {
// Load the first available key
let key = service.keystore().load(&service.keystore().contents()?[0], "")?;
info!("Using authority key {}", key.public());
let client = service.client();
let consensus_net = ConsensusNetwork::new(service.network(), client.clone());
let block_id = BlockId::number(client.info().unwrap().chain.best_number);
// TODO: this needs to be dynamically adjustable
let block_delay = client.storage(&block_id, &StorageKey(twox_128(BlockPeriod::<Runtime>::key()).to_vec()))?
.and_then(|data| Timestamp::decode(&mut data.0.as_slice()))
.unwrap_or_else(|| {
warn!("Block period is missing in the storage.");
5
});
Some(consensus::Service::new(
client.clone(),
client.clone(),
consensus_net,
service.transaction_pool(),
executor,
key,
block_delay,
))
} else {
None
};
// FIXME: Spin consensus service if configured
let consensus = None;
Ok(Service {
inner: service,
_consensus: consensus,
})
}
}
/// Demo service.
pub struct Service<C: Components> {
inner: service::Service<C>,
_consensus: Option<consensus::Service>,
}
/// Creates bare client without any networking.
pub fn new_client(config: Configuration)
-> Result<Arc<service::ComponentClient<FullComponents<Factory>>>, Error>
{
service::new_client::<Factory>(config)
_consensus: Option<bool>, // FIXME: add actual consensus engine
}
impl<C: Components> ::std::ops::Deref for Service<C> {
type Target = service::Service<C>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
/// Creates bare client without any networking.
pub fn new_client(config: Configuration)
-> Result<Arc<service::ComponentClient<FullComponents<Factory>>>, Error>
{
service::new_client::<Factory>(&config)
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use parking_lot::RwLock;
use {service, service_test, Factory, chain_spec};
use consensus::{self, OfflineTracker};
use primitives::ed25519;
use runtime_primitives::traits::BlockNumberToHash;
use runtime_primitives::generic::Era;
use node_primitives::Block;
use bft::{Proposer, Environment};
use node_network::consensus::ConsensusNetwork;
use substrate_test_client::fake_justify;
use node_primitives::BlockId;
use keyring::Keyring;
use node_runtime::{UncheckedExtrinsic, Call, BalancesCall};
use node_primitives::UncheckedExtrinsic as OpaqueExtrinsic;
use codec::{Decode, Encode};
use node_runtime::RawAddress;
use {service_test, Factory, chain_spec};
#[test]
fn test_connectivity() {
@@ -232,7 +214,10 @@ mod tests {
}
#[test]
#[cfg(feature = "rhd")]
fn test_sync() {
use client::{ImportBlock, BlockOrigin};
let alice: Arc<ed25519::Pair> = Arc::new(Keyring::Alice.into());
let bob: Arc<ed25519::Pair> = Arc::new(Keyring::Bob.into());
let validators = vec![alice.public().0.into(), bob.public().0.into()];
@@ -253,9 +238,15 @@ mod tests {
};
let (proposer, _, _) = proposer_factory.init(&parent_header, &validators, alice.clone()).unwrap();
let block = proposer.propose().expect("Error making test block");
let justification = fake_justify::<Block>(&block.header, &keys);
let justification = service.client().check_justification(block.header, justification).unwrap();
(justification, Some(block.extrinsics))
ImportBlock {
origin: BlockOrigin::File,
external_justification: Vec::new(),
internal_justification: Vec::new(),
finalized: true,
body: Some(block.extrinsics),
header: block.header,
auxiliary: Vec::new(),
}
};
let extrinsic_factory = |service: &<Factory as service::ServiceFactory>::FullService| {
let payload = (0, Call::Balances(BalancesCall::transfer(RawAddress::Id(bob.public().0.into()), 69)), Era::immortal(), service.client().genesis_hash());
@@ -271,9 +262,4 @@ mod tests {
service_test::sync::<Factory, _, _>(chain_spec::integration_test_config(), block_factory, extrinsic_factory);
}
#[test]
fn test_consensus() {
service_test::consensus::<Factory>(chain_spec::integration_test_config(), vec!["Alice".into(), "Bob".into()]);
}
}
-1
View File
@@ -22,7 +22,6 @@ std = [
"serde/std",
"serde_derive",
"parity-codec/std",
"parity-codec-derive/std",
"substrate-primitives/std",
"sr-std/std",
"sr-io/std",
+2 -3
View File
@@ -46,7 +46,6 @@ use runtime_support::dispatch::Result;
use runtime_support::storage::StorageValue;
use runtime_support::storage::unhashed::StorageVec;
use primitives::traits::{MaybeSerializeDebug, OnFinalise, Member};
use primitives::bft::MisbehaviorReport;
use substrate_primitives::storage::well_known_keys;
use system::{ensure_signed, ensure_inherent};
@@ -140,7 +139,7 @@ decl_storage! {
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn report_misbehavior(origin, report: MisbehaviorReport<T::Hash, T::BlockNumber>) -> Result;
fn report_misbehavior(origin, report: Vec<u8>) -> Result;
fn note_offline(origin, offline_val_indices: Vec<u32>) -> Result;
fn remark(origin, remark: Vec<u8>) -> Result;
fn set_code(new: Vec<u8>) -> Result;
@@ -169,7 +168,7 @@ impl<T: Trait> Module<T> {
}
/// Report some misbehaviour.
fn report_misbehavior(origin: T::Origin, _report: MisbehaviorReport<T::Hash, T::BlockNumber>) -> Result {
fn report_misbehavior(origin: T::Origin, _report: Vec<u8>) -> Result {
ensure_signed(origin)?;
// TODO.
Ok(())
-2
View File
@@ -15,7 +15,6 @@ sr-std = { path = "../../core/sr-std", default-features = false }
sr-io = { path = "../../core/sr-io", default-features = false }
sr-primitives = { path = "../../core/sr-primitives", default-features = false }
srml-support = { path = "../support", default-features = false }
srml-consensus = { path = "../consensus", default-features = false }
srml-balances = { path = "../balances", default-features = false }
srml-democracy = { path = "../democracy", default-features = false }
srml-system = { path = "../system", default-features = false }
@@ -33,7 +32,6 @@ std = [
"sr-io/std",
"srml-support/std",
"sr-primitives/std",
"srml-consensus/std",
"srml-balances/std",
"srml-democracy/std",
"srml-system/std",
-2
View File
@@ -16,7 +16,6 @@ sr-io = { path = "../../core/sr-io", default-features = false }
sr-primitives = { path = "../../core/sr-primitives", default-features = false }
srml-support = { path = "../support", default-features = false }
srml-balances = { path = "../balances", default-features = false }
srml-consensus = { path = "../consensus", default-features = false }
srml-system = { path = "../system", default-features = false }
[features]
@@ -31,7 +30,6 @@ std = [
"sr-io/std",
"srml-support/std",
"sr-primitives/std",
"srml-consensus/std",
"srml-balances/std",
"srml-system/std",
]