mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-01 13:37:57 +00:00
Minimal parachains part 2: Parachain statement and data routing (#173)
* dynamic inclusion threshold calculator * collators interface * collation helpers * initial proposal-creation future * create proposer when asked to propose * remove local_availability duty * statement table tracks includable parachain count * beginnings of timing future * finish proposal logic * remove stray println * extract shared table to separate module * change ordering * includability tracking * fix doc * initial changes to parachains module * initialise dummy block before API calls * give polkadot control over round proposer based on random seed * propose only after enough candidates * flesh out parachains module a bit more * set_heads * actually introduce set_heads to runtime * update block_builder to accept parachains * split block validity errors from real errors in evaluation * update WASM runtimes * polkadot-api methods for parachains additions * delay evaluation until candidates are ready * comments * fix dynamic inclusion with zero initial * test for includability tracker * wasm validation of parachain candidates * move primitives to primitives crate * remove runtime-std dependency from codec * adjust doc * polkadot-parachain-primitives * kill legacy polkadot-validator crate * basic-add test chain * test for basic_add parachain * move to test-chains dir * use wasm-build * new wasm directory layout * reorganize a bit more * Fix for rh-minimal-parachain (#141) * Remove extern "C" We already encountered such behavior (bug?) in pwasm-std, I believe. * Fix `panic_fmt` signature by adding `_col` Wrong `panic_fmt` signature can inhibit some optimizations in LTO mode. * Add linker flags and use wasm-gc in build script Pass --import-memory to LLD to emit wasm binary with imported memory. Also use wasm-gc instead of wasm-build. * Fix effective_max. I'm not sure why it was the way it was actually. * Recompile wasm. * Fix indent * more basic_add tests * validate parachain WASM * produce statements on receiving statements * tests for reactive statement production * fix build * add OOM lang item to runtime-io * use dynamic_inclusion when evaluating as well * fix update_includable_count * remove dead code * grumbles * actually defer round_proposer logic * update wasm * address a few more grumbles * schedule collation work as soon as BFT is started * impl future in collator * fix comment * governance proposals for adding and removing parachains * bump protocol version * tear out polkadot-specific pieces of substrate-network * extract out polkadot-specific stuff from substrate-network * begin polkadot network subsystem * grumbles * update WASM checkins * parse status from polkadot peer * allow invoke of network specialization * begin statement router implementation * remove dependency on tokio-timer * fix sanity check and have proposer factory create communication streams * pull out statement routing from consensus library * fix comments * adjust typedefs * extract consensus_gossip out of main network protocol handler * port substrate-bft to new tokio * port polkadot-consensus to new tokio * fix typo * start message processing task * initial consensus network implementation * remove known tracking from statement-table crate * extract router into separate module * defer statements until later * double signature is invalid * propagating statements * grumbles * request block data * fix compilation * embed new consensus network into service * port demo CLI to tokio * all test crates compile * some tests for fetching block data * whitespace * adjusting some tokio stuff * update exit-future * remove overly noisy warning * clean up collation work a bit * address review grumbles * fix lock order in protocol handler * rebuild wasm artifacts * tag AuthorityId::from_slice for std only * address formatting grumbles * rename event_loop to executor * some more docs for polkadot-network crate
This commit is contained in:
committed by
GitHub
parent
b8115b257f
commit
6bfcbd6d59
@@ -18,13 +18,14 @@ lazy_static = "1.0"
|
||||
triehash = "0.1"
|
||||
ed25519 = { path = "../../substrate/ed25519" }
|
||||
app_dirs = "1.2"
|
||||
tokio-core = "0.1.12"
|
||||
tokio = "0.1.7"
|
||||
futures = "0.1.17"
|
||||
ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" }
|
||||
fdlimit = "0.1"
|
||||
parking_lot = "0.4"
|
||||
serde_json = "1.0"
|
||||
serde = "1.0"
|
||||
exit-future = "0.1"
|
||||
substrate-client = { path = "../../substrate/client" }
|
||||
substrate-codec = { path = "../../substrate/codec" }
|
||||
substrate-network = { path = "../../substrate/network" }
|
||||
|
||||
@@ -17,9 +17,10 @@
|
||||
//! Console informant. Prints sync progress and block events. Runs on the calling thread.
|
||||
|
||||
use std::time::{Duration, Instant};
|
||||
use futures::stream::Stream;
|
||||
use futures::{Future, Stream};
|
||||
use service::{Service, Components};
|
||||
use tokio_core::reactor;
|
||||
use tokio::runtime::TaskExecutor;
|
||||
use tokio::timer::Interval;
|
||||
use network::{SyncState, SyncProvider};
|
||||
use polkadot_primitives::Block;
|
||||
use state_machine;
|
||||
@@ -28,13 +29,12 @@ use client::{self, BlockchainEvents};
|
||||
const TIMER_INTERVAL_MS: u64 = 5000;
|
||||
|
||||
/// Spawn informant on the event loop
|
||||
pub fn start<C>(service: &Service<C>, handle: reactor::Handle)
|
||||
pub fn start<C>(service: &Service<C>, exit: ::exit_future::Exit, handle: TaskExecutor)
|
||||
where
|
||||
C: Components,
|
||||
client::error::Error: From<<<<C as Components>::Backend as client::backend::Backend<Block>>::State as state_machine::Backend>::Error>,
|
||||
{
|
||||
let interval = reactor::Interval::new_at(Instant::now(), Duration::from_millis(TIMER_INTERVAL_MS), &handle)
|
||||
.expect("Error creating informant timer");
|
||||
let interval = Interval::new(Instant::now(), Duration::from_millis(TIMER_INTERVAL_MS));
|
||||
|
||||
let network = service.network();
|
||||
let client = service.client();
|
||||
@@ -73,8 +73,8 @@ pub fn start<C>(service: &Service<C>, handle: reactor::Handle)
|
||||
telemetry!("txpool.import"; "mem_usage" => status.mem_usage, "count" => status.transaction_count, "sender" => status.senders);
|
||||
Ok(())
|
||||
});
|
||||
handle.spawn(display_notifications);
|
||||
handle.spawn(display_block_import);
|
||||
handle.spawn(display_txpool_import);
|
||||
|
||||
let informant_work = display_notifications.join3(display_block_import, display_txpool_import);
|
||||
handle.spawn(exit.until(informant_work).map(|_| ()));
|
||||
}
|
||||
|
||||
|
||||
+31
-16
@@ -25,7 +25,7 @@ extern crate ansi_term;
|
||||
extern crate regex;
|
||||
extern crate time;
|
||||
extern crate futures;
|
||||
extern crate tokio_core;
|
||||
extern crate tokio;
|
||||
extern crate ctrlc;
|
||||
extern crate fdlimit;
|
||||
extern crate ed25519;
|
||||
@@ -50,6 +50,7 @@ extern crate slog; // needed until we can reexport `slog_info` from `substrate_t
|
||||
#[macro_use]
|
||||
extern crate substrate_telemetry;
|
||||
extern crate polkadot_transaction_pool as txpool;
|
||||
extern crate exit_future;
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
@@ -76,9 +77,8 @@ use codec::Slicable;
|
||||
use client::BlockOrigin;
|
||||
use runtime_primitives::generic::SignedBlock;
|
||||
|
||||
use futures::sync::mpsc;
|
||||
use futures::{Sink, Future, Stream};
|
||||
use tokio_core::reactor;
|
||||
use futures::Future;
|
||||
use tokio::runtime::Runtime;
|
||||
use service::PruningMode;
|
||||
|
||||
const DEFAULT_TELEMETRY_URL: &str = "ws://telemetry.polkadot.io:1024";
|
||||
@@ -188,13 +188,14 @@ pub fn run<I, T>(args: I) -> error::Result<()> where
|
||||
let role =
|
||||
if matches.is_present("collator") {
|
||||
info!("Starting collator");
|
||||
service::Role::COLLATOR
|
||||
// TODO [rob]: collation node implementation
|
||||
service::Role::FULL
|
||||
} else if matches.is_present("light") {
|
||||
info!("Starting (light)");
|
||||
service::Role::LIGHT
|
||||
} else if matches.is_present("validator") || matches.is_present("dev") {
|
||||
info!("Starting validator");
|
||||
service::Role::VALIDATOR
|
||||
service::Role::AUTHORITY
|
||||
} else {
|
||||
info!("Starting (heavy)");
|
||||
service::Role::FULL
|
||||
@@ -231,6 +232,9 @@ pub fn run<I, T>(args: I) -> error::Result<()> where
|
||||
chain_name: config.chain_spec.name().to_owned(),
|
||||
};
|
||||
|
||||
let mut runtime = Runtime::new()?;
|
||||
let executor = runtime.executor();
|
||||
|
||||
let _guard = if matches.is_present("telemetry") || matches.value_of("telemetry-url").is_some() {
|
||||
let name = config.name.clone();
|
||||
let chain_name = config.chain_spec.name().to_owned();
|
||||
@@ -250,11 +254,14 @@ pub fn run<I, T>(args: I) -> error::Result<()> where
|
||||
None
|
||||
};
|
||||
|
||||
let core = reactor::Core::new().expect("tokio::Core could not be created");
|
||||
match role == service::Role::LIGHT {
|
||||
true => run_until_exit(core, service::new_light(config)?, &matches, sys_conf),
|
||||
false => run_until_exit(core, service::new_full(config)?, &matches, sys_conf),
|
||||
true => run_until_exit(&mut runtime, service::new_light(config, executor)?, &matches, sys_conf)?,
|
||||
false => run_until_exit(&mut runtime, service::new_full(config, executor)?, &matches, sys_conf)?,
|
||||
}
|
||||
|
||||
// TODO: hard exit if this stalls?
|
||||
runtime.shutdown_on_idle().wait().expect("failed to shut down event loop");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_spec(matches: &clap::ArgMatches) -> error::Result<()> {
|
||||
@@ -370,29 +377,37 @@ fn import_blocks(matches: &clap::ArgMatches) -> error::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_until_exit<C>(mut core: reactor::Core, service: service::Service<C>, matches: &clap::ArgMatches, sys_conf: SystemConfiguration) -> error::Result<()>
|
||||
fn run_until_exit<C>(runtime: &mut Runtime, service: service::Service<C>, matches: &clap::ArgMatches, sys_conf: SystemConfiguration) -> error::Result<()>
|
||||
where
|
||||
C: service::Components,
|
||||
client::error::Error: From<<<<C as service::Components>::Backend as client::backend::Backend<Block>>::State as state_machine::Backend>::Error>,
|
||||
{
|
||||
let exit = {
|
||||
// can't use signal directly here because CtrlC takes only `Fn`.
|
||||
let (exit_send, exit) = mpsc::channel(1);
|
||||
let (exit_send, exit) = exit_future::signal();
|
||||
let exit_send = ::std::cell::RefCell::new(Some(exit_send));
|
||||
ctrlc::CtrlC::set_handler(move || {
|
||||
exit_send.clone().send(()).wait().expect("Error sending exit notification");
|
||||
let exit_send = exit_send
|
||||
.try_borrow_mut()
|
||||
.expect("only borrowed in non-reetrant signal handler; qed")
|
||||
.take();
|
||||
|
||||
if let Some(signal) = exit_send {
|
||||
signal.fire();
|
||||
}
|
||||
});
|
||||
|
||||
exit
|
||||
};
|
||||
|
||||
informant::start(&service, core.handle());
|
||||
let executor = runtime.executor();
|
||||
informant::start(&service, exit.clone(), executor.clone());
|
||||
|
||||
let _rpc_servers = {
|
||||
let http_address = parse_address("127.0.0.1:9933", "rpc-port", matches)?;
|
||||
let ws_address = parse_address("127.0.0.1:9944", "ws-port", matches)?;
|
||||
|
||||
let handler = || {
|
||||
let chain = rpc::apis::chain::Chain::new(service.client(), core.remote());
|
||||
let chain = rpc::apis::chain::Chain::new(service.client(), executor.clone());
|
||||
let author = rpc::apis::author::Author::new(service.client(), service.transaction_pool());
|
||||
rpc::rpc_handler::<Block, _, _, _, _>(
|
||||
service.client(),
|
||||
@@ -407,7 +422,7 @@ fn run_until_exit<C>(mut core: reactor::Core, service: service::Service<C>, matc
|
||||
)
|
||||
};
|
||||
|
||||
core.run(exit.into_future()).expect("Error running informant event loop");
|
||||
let _ = exit.wait();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -9,5 +9,5 @@ futures = "0.1.17"
|
||||
substrate-codec = { path = "../../substrate/codec", version = "0.1" }
|
||||
substrate-primitives = { path = "../../substrate/primitives", version = "0.1" }
|
||||
polkadot-runtime = { path = "../runtime", version = "0.1" }
|
||||
polkadot-parachain = { path = "../parachain", version = "0.1" }
|
||||
polkadot-primitives = { path = "../primitives", version = "0.1" }
|
||||
polkadot-parachain = { path = "../parachain", version = "0.1" }
|
||||
|
||||
@@ -85,7 +85,7 @@ pub trait RelayChainContext {
|
||||
|
||||
/// Collate the necessary ingress queue using the given context.
|
||||
pub fn collate_ingress<'a, R>(relay_context: R)
|
||||
-> Box<Future<Item=ConsolidatedIngress, Error=R::Error> + 'a>
|
||||
-> impl Future<Item=ConsolidatedIngress, Error=R::Error> + 'a
|
||||
where
|
||||
R: RelayChainContext,
|
||||
R::Error: 'a,
|
||||
@@ -106,7 +106,7 @@ pub fn collate_ingress<'a, R>(relay_context: R)
|
||||
// and then by the parachain ID.
|
||||
//
|
||||
// then transform that into the consolidated egress queue.
|
||||
Box::new(stream::futures_unordered(egress_fetch)
|
||||
stream::futures_unordered(egress_fetch)
|
||||
.fold(BTreeMap::new(), |mut map, (routing_id, egresses)| {
|
||||
for (depth, egress) in egresses.into_iter().rev().enumerate() {
|
||||
let depth = -(depth as i64);
|
||||
@@ -117,19 +117,19 @@ pub fn collate_ingress<'a, R>(relay_context: R)
|
||||
})
|
||||
.map(|ordered| ordered.into_iter().map(|((_, id), egress)| (id, egress)))
|
||||
.map(|i| i.collect::<Vec<_>>())
|
||||
.map(ConsolidatedIngress))
|
||||
.map(ConsolidatedIngress)
|
||||
}
|
||||
|
||||
/// Produce a candidate for the parachain.
|
||||
pub fn collate<'a, R, P>(local_id: ParaId, relay_context: R, para_context: P)
|
||||
-> Box<Future<Item=parachain::Candidate, Error=R::Error> + 'a>
|
||||
pub fn collate<'a, R: 'a, P>(local_id: ParaId, relay_context: R, para_context: P)
|
||||
-> impl Future<Item=parachain::Candidate, Error=R::Error> + 'a
|
||||
where
|
||||
R: RelayChainContext,
|
||||
R::Error: 'a,
|
||||
R::FutureEgress: 'a,
|
||||
P: ParachainContext + 'a,
|
||||
{
|
||||
Box::new(collate_ingress(relay_context).map(move |ingress| {
|
||||
collate_ingress(relay_context).map(move |ingress| {
|
||||
let (block_data, _, signature) = para_context.produce_candidate(
|
||||
ingress.0.iter().flat_map(|&(id, ref msgs)| msgs.iter().cloned().map(move |msg| (id, msg)))
|
||||
);
|
||||
@@ -140,7 +140,7 @@ pub fn collate<'a, R, P>(local_id: ParaId, relay_context: R, para_context: P)
|
||||
block: block_data,
|
||||
unprocessed_ingress: ingress,
|
||||
}
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
[dependencies]
|
||||
futures = "0.1.17"
|
||||
parking_lot = "0.4"
|
||||
tokio-core = "0.1.12"
|
||||
tokio = "0.1.7"
|
||||
ed25519 = { path = "../../substrate/ed25519" }
|
||||
error-chain = "0.12"
|
||||
log = "0.3"
|
||||
@@ -22,7 +22,8 @@ substrate-bft = { path = "../../substrate/bft" }
|
||||
substrate-codec = { path = "../../substrate/codec" }
|
||||
substrate-primitives = { path = "../../substrate/primitives" }
|
||||
substrate-runtime-support = { path = "../../substrate/runtime-support" }
|
||||
substrate-network = { path = "../../substrate/network" }
|
||||
substrate-keyring = { path = "../../substrate/keyring" }
|
||||
substrate-client = { path = "../../substrate/client" }
|
||||
substrate-runtime-primitives = { path = "../../substrate/runtime/primitives" }
|
||||
|
||||
[dev-dependencies]
|
||||
substrate-keyring = { path = "../../substrate/keyring" }
|
||||
|
||||
@@ -23,7 +23,7 @@ use std::sync::Arc;
|
||||
|
||||
use polkadot_api::PolkadotApi;
|
||||
use polkadot_primitives::{Hash, AccountId, BlockId};
|
||||
use polkadot_primitives::parachain::{Id as ParaId, Chain, BlockData, Extrinsic, CandidateReceipt};
|
||||
use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic, CandidateReceipt};
|
||||
|
||||
use futures::prelude::*;
|
||||
|
||||
@@ -55,7 +55,7 @@ pub trait Collators: Clone {
|
||||
///
|
||||
/// This future is fused.
|
||||
pub struct CollationFetch<C: Collators, P: PolkadotApi> {
|
||||
parachain: Option<ParaId>,
|
||||
parachain: ParaId,
|
||||
relay_parent_hash: Hash,
|
||||
relay_parent: BlockId,
|
||||
collators: C,
|
||||
@@ -65,16 +65,13 @@ pub struct CollationFetch<C: Collators, P: PolkadotApi> {
|
||||
|
||||
impl<C: Collators, P: PolkadotApi> CollationFetch<C, P> {
|
||||
/// Create a new collation fetcher for the given chain.
|
||||
pub fn new(parachain: Chain, relay_parent: BlockId, relay_parent_hash: Hash, collators: C, client: Arc<P>) -> Self {
|
||||
pub fn new(parachain: ParaId, relay_parent: BlockId, relay_parent_hash: Hash, collators: C, client: Arc<P>) -> Self {
|
||||
CollationFetch {
|
||||
relay_parent_hash,
|
||||
relay_parent,
|
||||
collators,
|
||||
client,
|
||||
parachain: match parachain {
|
||||
Chain::Parachain(id) => Some(id),
|
||||
Chain::Relay => None,
|
||||
},
|
||||
parachain,
|
||||
live_fetch: None,
|
||||
}
|
||||
}
|
||||
@@ -85,26 +82,19 @@ impl<C: Collators, P: PolkadotApi> Future for CollationFetch<C, P> {
|
||||
type Error = C::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<(Collation, Extrinsic), C::Error> {
|
||||
let parachain = match self.parachain.as_ref() {
|
||||
Some(p) => p.clone(),
|
||||
None => return Ok(Async::NotReady),
|
||||
};
|
||||
|
||||
loop {
|
||||
let x = {
|
||||
let parachain = self.parachain.clone();
|
||||
let (r, c) = (self.relay_parent_hash, &self.collators);
|
||||
let poll = self.live_fetch
|
||||
.get_or_insert_with(move || c.collate(parachain, r).into_future())
|
||||
.poll();
|
||||
|
||||
if let Err(_) = poll { self.parachain = None }
|
||||
try_ready!(poll)
|
||||
};
|
||||
|
||||
match validate_collation(&*self.client, &self.relay_parent, &x) {
|
||||
Ok(()) => {
|
||||
self.parachain = None;
|
||||
|
||||
// TODO: generate extrinsic while verifying.
|
||||
return Ok(Async::Ready((x, Extrinsic)));
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ impl DynamicInclusion {
|
||||
/// would be enough, or `None` if it is sufficient now.
|
||||
///
|
||||
/// Panics if `now` is earlier than the `start`.
|
||||
pub fn acceptable_in(&self, now: Instant, included: usize) -> Option<Duration> {
|
||||
pub fn acceptable_in(&self, now: Instant, included: usize) -> Option<Instant> {
|
||||
let elapsed = now.duration_since(self.start);
|
||||
let elapsed = duration_to_micros(&elapsed);
|
||||
|
||||
@@ -70,7 +70,8 @@ impl DynamicInclusion {
|
||||
if elapsed >= valid_after {
|
||||
None
|
||||
} else {
|
||||
Some(Duration::from_millis((valid_after - elapsed) as u64 / 1000))
|
||||
let until = Duration::from_millis((valid_after - elapsed) as u64 / 1000);
|
||||
Some(now + until)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -104,7 +105,7 @@ mod tests {
|
||||
Duration::from_millis(4000),
|
||||
);
|
||||
|
||||
assert_eq!(dynamic.acceptable_in(now, 5), Some(Duration::from_millis(2000)));
|
||||
assert_eq!(dynamic.acceptable_in(now, 5), Some(now + Duration::from_millis(2000)));
|
||||
assert!(dynamic.acceptable_in(now + Duration::from_millis(2000), 5).is_none());
|
||||
assert!(dynamic.acceptable_in(now + Duration::from_millis(3000), 5).is_none());
|
||||
assert!(dynamic.acceptable_in(now + Duration::from_millis(4000), 5).is_none());
|
||||
|
||||
@@ -37,7 +37,7 @@ error_chain! {
|
||||
description("Proposer destroyed before finishing proposing or evaluating"),
|
||||
display("Proposer destroyed before finishing proposing or evaluating"),
|
||||
}
|
||||
Timer(e: String) {
|
||||
Timer(e: ::tokio::timer::Error) {
|
||||
description("Failed to register or resolve async timer."),
|
||||
display("Timer failed: {}", e),
|
||||
}
|
||||
|
||||
+192
-167
@@ -44,11 +44,10 @@ extern crate substrate_codec as codec;
|
||||
extern crate substrate_primitives as primitives;
|
||||
extern crate substrate_runtime_support as runtime_support;
|
||||
extern crate substrate_runtime_primitives as runtime_primitives;
|
||||
extern crate substrate_network;
|
||||
extern crate substrate_client as client;
|
||||
|
||||
extern crate exit_future;
|
||||
extern crate tokio_core;
|
||||
extern crate substrate_client as client;
|
||||
extern crate tokio;
|
||||
|
||||
#[macro_use]
|
||||
extern crate error_chain;
|
||||
@@ -67,33 +66,32 @@ use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use codec::Slicable;
|
||||
use table::generic::Statement as GenericStatement;
|
||||
use runtime_support::Hashable;
|
||||
use polkadot_api::PolkadotApi;
|
||||
use polkadot_primitives::{Hash, Block, BlockId, BlockNumber, Header, Timestamp};
|
||||
use polkadot_primitives::parachain::{Id as ParaId, Chain, DutyRoster, BlockData, Extrinsic as ParachainExtrinsic, CandidateReceipt};
|
||||
use polkadot_runtime::BareExtrinsic;
|
||||
use polkadot_primitives::{Hash, Block, BlockId, BlockNumber, Header, Timestamp, SessionKey};
|
||||
use polkadot_primitives::parachain::{Id as ParaId, Chain, DutyRoster, BlockData, Extrinsic as ParachainExtrinsic, CandidateReceipt, CandidateSignature};
|
||||
use primitives::AuthorityId;
|
||||
use transaction_pool::{TransactionPool};
|
||||
use tokio_core::reactor::{Handle, Timeout, Interval};
|
||||
use transaction_pool::TransactionPool;
|
||||
use tokio::runtime::TaskExecutor;
|
||||
use tokio::timer::{Delay, Interval};
|
||||
|
||||
use futures::prelude::*;
|
||||
use futures::future::{self, Shared};
|
||||
use futures::future;
|
||||
use collation::CollationFetch;
|
||||
use dynamic_inclusion::DynamicInclusion;
|
||||
|
||||
pub use self::collation::{Collators, Collation};
|
||||
pub use self::collation::{validate_collation, Collators, Collation};
|
||||
pub use self::error::{ErrorKind, Error};
|
||||
pub use self::shared_table::{SharedTable, StatementSource, StatementProducer, ProducedStatements};
|
||||
pub use self::shared_table::{SharedTable, StatementProducer, ProducedStatements, Statement, SignedStatement, GenericStatement};
|
||||
pub use service::Service;
|
||||
|
||||
mod collation;
|
||||
mod dynamic_inclusion;
|
||||
mod evaluation;
|
||||
mod error;
|
||||
mod service;
|
||||
mod shared_table;
|
||||
|
||||
pub mod collation;
|
||||
|
||||
// block size limit.
|
||||
const MAX_TRANSACTIONS_SIZE: usize = 4 * 1024 * 1024;
|
||||
|
||||
@@ -108,8 +106,9 @@ pub trait TableRouter: Clone {
|
||||
/// Future that resolves when extrinsic candidate data is fetched.
|
||||
type FetchExtrinsic: IntoFuture<Item=ParachainExtrinsic,Error=Self::Error>;
|
||||
|
||||
/// Note local candidate data, making it available on the network to other validators.
|
||||
fn local_candidate_data(&self, hash: Hash, block_data: BlockData, extrinsic: ParachainExtrinsic);
|
||||
/// Call with local candidate data. This will make the data available on the network,
|
||||
/// and sign, import, and broadcast a statement about the candidate.
|
||||
fn local_candidate(&self, candidate: CandidateReceipt, block_data: BlockData, extrinsic: ParachainExtrinsic);
|
||||
|
||||
/// Fetch block data for a specific candidate.
|
||||
fn fetch_block_data(&self, candidate: &CandidateReceipt) -> Self::FetchCandidate;
|
||||
@@ -118,23 +117,28 @@ pub trait TableRouter: Clone {
|
||||
fn fetch_extrinsic_data(&self, candidate: &CandidateReceipt) -> Self::FetchExtrinsic;
|
||||
}
|
||||
|
||||
/// A long-lived network which can create statement table routing instances.
|
||||
/// A long-lived network which can create parachain statement and BFT message routing processes on demand.
|
||||
pub trait Network {
|
||||
/// The table router type. This should handle importing of any statements,
|
||||
/// routing statements to peers, and driving completion of any `StatementProducers`.
|
||||
type TableRouter: TableRouter;
|
||||
/// The input stream of BFT messages. Should never logically conclude.
|
||||
type Input: Stream<Item=bft::Communication<Block>,Error=Error>;
|
||||
/// The output sink of BFT messages. Messages sent here should eventually pass to all
|
||||
/// current authorities.
|
||||
type Output: Sink<SinkItem=bft::Communication<Block>,SinkError=Error>;
|
||||
|
||||
/// Instantiate a table router using the given shared table.
|
||||
fn table_router(&self, table: Arc<SharedTable>) -> Self::TableRouter;
|
||||
/// Instantiate a table router using the given shared table and task executor.
|
||||
fn communication_for(&self, validators: &[SessionKey], table: Arc<SharedTable>, task_executor: TaskExecutor) -> (Self::TableRouter, Self::Input, Self::Output);
|
||||
}
|
||||
|
||||
/// Information about a specific group.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct GroupInfo {
|
||||
/// Authorities meant to check validity of candidates.
|
||||
pub validity_guarantors: HashSet<AuthorityId>,
|
||||
pub validity_guarantors: HashSet<SessionKey>,
|
||||
/// Authorities meant to check availability of candidate data.
|
||||
pub availability_guarantors: HashSet<AuthorityId>,
|
||||
pub availability_guarantors: HashSet<SessionKey>,
|
||||
/// Number of votes needed for validity.
|
||||
pub needed_validity: usize,
|
||||
/// Number of votes needed for availability.
|
||||
@@ -144,20 +148,21 @@ pub struct GroupInfo {
|
||||
/// Sign a table statement against a parent hash.
|
||||
/// The actual message signed is the encoded statement concatenated with the
|
||||
/// parent hash.
|
||||
pub fn sign_table_statement(statement: &table::Statement, key: &ed25519::Pair, parent_hash: &Hash) -> ed25519::Signature {
|
||||
use polkadot_primitives::parachain::Statement as RawStatement;
|
||||
|
||||
let raw = match *statement {
|
||||
GenericStatement::Candidate(ref c) => RawStatement::Candidate(c.clone()),
|
||||
GenericStatement::Valid(h) => RawStatement::Valid(h),
|
||||
GenericStatement::Invalid(h) => RawStatement::Invalid(h),
|
||||
GenericStatement::Available(h) => RawStatement::Available(h),
|
||||
};
|
||||
|
||||
let mut encoded = raw.encode();
|
||||
pub fn sign_table_statement(statement: &Statement, key: &ed25519::Pair, parent_hash: &Hash) -> CandidateSignature {
|
||||
let mut encoded = statement.encode();
|
||||
encoded.extend(&parent_hash.0);
|
||||
|
||||
key.sign(&encoded)
|
||||
key.sign(&encoded).into()
|
||||
}
|
||||
|
||||
/// Check signature on table statement.
|
||||
pub fn check_statement(statement: &Statement, signature: &CandidateSignature, signer: SessionKey, parent_hash: &Hash) -> bool {
|
||||
use runtime_primitives::traits::Verify;
|
||||
|
||||
let mut encoded = statement.encode();
|
||||
encoded.extend(&parent_hash.0);
|
||||
|
||||
signature.verify(&encoded[..], &signer.into())
|
||||
}
|
||||
|
||||
fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId], local_id: AuthorityId) -> Result<(HashMap<ParaId, GroupInfo>, LocalDuty), Error> {
|
||||
@@ -217,41 +222,43 @@ fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId], local_id: Au
|
||||
}
|
||||
}
|
||||
|
||||
fn timer_error(e: &::std::io::Error) -> Error {
|
||||
ErrorKind::Timer(format!("{}", e)).into()
|
||||
}
|
||||
|
||||
/// Polkadot proposer factory.
|
||||
pub struct ProposerFactory<C, N, P> {
|
||||
/// The client instance.
|
||||
pub client: Arc<C>,
|
||||
pub client: Arc<P>,
|
||||
/// The transaction pool.
|
||||
pub transaction_pool: Arc<TransactionPool<C>>,
|
||||
pub transaction_pool: Arc<TransactionPool<P>>,
|
||||
/// The backing network handle.
|
||||
pub network: N,
|
||||
/// Parachain collators.
|
||||
pub collators: P,
|
||||
/// The timer used to schedule proposal intervals.
|
||||
pub handle: Handle,
|
||||
pub collators: C,
|
||||
/// handle to remote task executor
|
||||
pub handle: TaskExecutor,
|
||||
/// The duration after which parachain-empty blocks will be allowed.
|
||||
pub parachain_empty_duration: Duration,
|
||||
}
|
||||
|
||||
impl<C, N, P> bft::ProposerFactory<Block> for ProposerFactory<C, N, P>
|
||||
impl<C, N, P> bft::Environment<Block> for ProposerFactory<C, N, P>
|
||||
where
|
||||
C: PolkadotApi + Send + Sync,
|
||||
C: Collators + Send + 'static,
|
||||
N: Network,
|
||||
P: Collators,
|
||||
P: PolkadotApi + Send + Sync + 'static,
|
||||
<C::Collation as IntoFuture>::Future: Send + 'static,
|
||||
N::TableRouter: Send + 'static,
|
||||
{
|
||||
type Proposer = Proposer<C, N::TableRouter, P>;
|
||||
type Proposer = Proposer<P>;
|
||||
type Input = N::Input;
|
||||
type Output = N::Output;
|
||||
type Error = Error;
|
||||
|
||||
fn init(&self, parent_header: &Header, authorities: &[AuthorityId], sign_with: Arc<ed25519::Pair>) -> Result<Self::Proposer, Error> {
|
||||
use std::time::Duration;
|
||||
|
||||
fn init(&self,
|
||||
parent_header: &Header,
|
||||
authorities: &[AuthorityId],
|
||||
sign_with: Arc<ed25519::Pair>
|
||||
) -> Result<(Self::Proposer, Self::Input, Self::Output), Error> {
|
||||
const DELAY_UNTIL: Duration = Duration::from_millis(5000);
|
||||
|
||||
let parent_hash = parent_header.blake2_256().into();
|
||||
let parent_hash = parent_header.hash().into();
|
||||
|
||||
let id = BlockId::hash(parent_hash);
|
||||
let duty_roster = self.client.duty_roster(&id)?;
|
||||
@@ -267,70 +274,116 @@ impl<C, N, P> bft::ProposerFactory<Block> for ProposerFactory<C, N, P>
|
||||
|
||||
let n_parachains = active_parachains.len();
|
||||
let table = Arc::new(SharedTable::new(group_info, sign_with.clone(), parent_hash));
|
||||
let router = self.network.table_router(table.clone());
|
||||
let dynamic_inclusion = DynamicInclusion::new(
|
||||
n_parachains,
|
||||
Instant::now(),
|
||||
self.parachain_empty_duration.clone(),
|
||||
let (router, input, output) = self.network.communication_for(
|
||||
authorities,
|
||||
table.clone(),
|
||||
self.handle.clone()
|
||||
);
|
||||
|
||||
let timeout = Timeout::new(DELAY_UNTIL, &self.handle)
|
||||
.map_err(|e| timer_error(&e))?;
|
||||
let now = Instant::now();
|
||||
let dynamic_inclusion = DynamicInclusion::new(
|
||||
n_parachains,
|
||||
now,
|
||||
self.parachain_empty_duration.clone(),
|
||||
);
|
||||
|
||||
debug!(target: "bft", "Initialising consensus proposer. Refusing to evaluate for {:?} from now.",
|
||||
DELAY_UNTIL);
|
||||
|
||||
// TODO [PoC-2]: kick off collation process.
|
||||
Ok(Proposer {
|
||||
let validation_para = match local_duty.validation {
|
||||
Chain::Relay => None,
|
||||
Chain::Parachain(id) => Some(id),
|
||||
};
|
||||
|
||||
let collation_work = validation_para.map(|para| CollationFetch::new(
|
||||
para,
|
||||
id.clone(),
|
||||
parent_hash.clone(),
|
||||
self.collators.clone(),
|
||||
self.client.clone(),
|
||||
));
|
||||
let drop_signal = dispatch_collation_work(
|
||||
router.clone(),
|
||||
&self.handle,
|
||||
collation_work,
|
||||
);
|
||||
|
||||
let proposer = Proposer {
|
||||
client: self.client.clone(),
|
||||
collators: self.collators.clone(),
|
||||
delay: timeout.shared(),
|
||||
handle: self.handle.clone(),
|
||||
dynamic_inclusion,
|
||||
local_duty,
|
||||
local_key: sign_with,
|
||||
minimum_delay: now + DELAY_UNTIL,
|
||||
parent_hash,
|
||||
parent_id: id,
|
||||
parent_number: parent_header.number,
|
||||
random_seed,
|
||||
router,
|
||||
table,
|
||||
transaction_pool: self.transaction_pool.clone(),
|
||||
})
|
||||
_drop_signal: drop_signal,
|
||||
};
|
||||
|
||||
Ok((proposer, input, output))
|
||||
}
|
||||
}
|
||||
|
||||
// dispatch collation work to be done in the background. returns a signal object
|
||||
// that should fire when the collation work is no longer necessary (e.g. when the proposer object is dropped)
|
||||
fn dispatch_collation_work<R, C, P>(
|
||||
router: R,
|
||||
handle: &TaskExecutor,
|
||||
work: Option<CollationFetch<C, P>>,
|
||||
) -> exit_future::Signal where
|
||||
C: Collators + Send + 'static,
|
||||
P: PolkadotApi + Send + Sync + 'static,
|
||||
<C::Collation as IntoFuture>::Future: Send + 'static,
|
||||
R: TableRouter + Send + 'static,
|
||||
{
|
||||
let (signal, exit) = exit_future::signal();
|
||||
let handled_work = work.then(move |result| match result {
|
||||
Ok(Some((collation, extrinsic))) => {
|
||||
router.local_candidate(collation.receipt, collation.block_data, extrinsic);
|
||||
Ok(())
|
||||
}
|
||||
Ok(None) => Ok(()),
|
||||
Err(_e) => {
|
||||
warn!(target: "consensus", "Failed to collate candidate");
|
||||
Ok(())
|
||||
}
|
||||
});
|
||||
|
||||
let cancellable_work = handled_work.select(exit).then(|_| Ok(()));
|
||||
|
||||
// spawn onto thread pool.
|
||||
handle.spawn(cancellable_work);
|
||||
signal
|
||||
}
|
||||
|
||||
struct LocalDuty {
|
||||
validation: Chain,
|
||||
}
|
||||
|
||||
/// The Polkadot proposer logic.
|
||||
pub struct Proposer<C: PolkadotApi, R, P> {
|
||||
pub struct Proposer<C: PolkadotApi> {
|
||||
client: Arc<C>,
|
||||
collators: P,
|
||||
delay: Shared<Timeout>,
|
||||
dynamic_inclusion: DynamicInclusion,
|
||||
handle: Handle,
|
||||
local_duty: LocalDuty,
|
||||
local_key: Arc<ed25519::Pair>,
|
||||
minimum_delay: Instant,
|
||||
parent_hash: Hash,
|
||||
parent_id: BlockId,
|
||||
parent_number: BlockNumber,
|
||||
random_seed: Hash,
|
||||
router: R,
|
||||
table: Arc<SharedTable>,
|
||||
transaction_pool: Arc<TransactionPool<C>>,
|
||||
_drop_signal: exit_future::Signal,
|
||||
}
|
||||
|
||||
impl<C, R, P> bft::Proposer<Block> for Proposer<C, R, P>
|
||||
impl<C> bft::Proposer<Block> for Proposer<C>
|
||||
where
|
||||
C: PolkadotApi + Send + Sync,
|
||||
R: TableRouter,
|
||||
P: Collators,
|
||||
{
|
||||
type Error = Error;
|
||||
type Create = future::Either<
|
||||
CreateProposal<C, R, P>,
|
||||
CreateProposal<C>,
|
||||
future::FutureResult<Block, Error>,
|
||||
>;
|
||||
type Evaluate = Box<Future<Item=bool, Error=Error>>;
|
||||
@@ -339,32 +392,24 @@ impl<C, R, P> bft::Proposer<Block> for Proposer<C, R, P>
|
||||
const ATTEMPT_PROPOSE_EVERY: Duration = Duration::from_millis(100);
|
||||
|
||||
let initial_included = self.table.includable_count();
|
||||
let now = Instant::now();
|
||||
let enough_candidates = self.dynamic_inclusion.acceptable_in(
|
||||
Instant::now(),
|
||||
now,
|
||||
initial_included,
|
||||
).unwrap_or_default();
|
||||
).unwrap_or_else(|| now + Duration::from_millis(1));
|
||||
|
||||
let timing = {
|
||||
let delay = self.delay.clone();
|
||||
let dynamic_inclusion = self.dynamic_inclusion.clone();
|
||||
let make_timing = move |handle| -> Result<ProposalTiming, ::std::io::Error> {
|
||||
let attempt_propose = Interval::new(ATTEMPT_PROPOSE_EVERY, handle)?;
|
||||
let enough_candidates = Timeout::new(enough_candidates, handle)?;
|
||||
Ok(ProposalTiming {
|
||||
attempt_propose,
|
||||
enough_candidates,
|
||||
dynamic_inclusion,
|
||||
minimum_delay: Some(delay),
|
||||
last_included: initial_included,
|
||||
})
|
||||
};
|
||||
let minimum_delay = if self.minimum_delay > now + ATTEMPT_PROPOSE_EVERY {
|
||||
Some(Delay::new(self.minimum_delay))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match make_timing(&self.handle) {
|
||||
Ok(timing) => timing,
|
||||
Err(e) => {
|
||||
return future::Either::B(future::err(timer_error(&e)));
|
||||
}
|
||||
}
|
||||
let timing = ProposalTiming {
|
||||
attempt_propose: Interval::new(now + ATTEMPT_PROPOSE_EVERY, ATTEMPT_PROPOSE_EVERY),
|
||||
enough_candidates: Delay::new(enough_candidates),
|
||||
dynamic_inclusion: self.dynamic_inclusion.clone(),
|
||||
minimum_delay,
|
||||
last_included: initial_included,
|
||||
};
|
||||
|
||||
future::Either::A(CreateProposal {
|
||||
@@ -373,15 +418,7 @@ impl<C, R, P> bft::Proposer<Block> for Proposer<C, R, P>
|
||||
parent_id: self.parent_id.clone(),
|
||||
client: self.client.clone(),
|
||||
transaction_pool: self.transaction_pool.clone(),
|
||||
collation: CollationFetch::new(
|
||||
self.local_duty.validation,
|
||||
self.parent_id.clone(),
|
||||
self.parent_hash.clone(),
|
||||
self.collators.clone(),
|
||||
self.client.clone()
|
||||
),
|
||||
table: self.table.clone(),
|
||||
router: self.router.clone(),
|
||||
timing,
|
||||
})
|
||||
}
|
||||
@@ -415,9 +452,7 @@ impl<C, R, P> bft::Proposer<Block> for Proposer<C, R, P>
|
||||
};
|
||||
|
||||
let vote_delays = {
|
||||
// delay casting vote until able (according to minimum block time)
|
||||
let minimum_delay = self.delay.clone()
|
||||
.map_err(|e| timer_error(&*e));
|
||||
let now = Instant::now();
|
||||
|
||||
let included_candidate_hashes = proposal
|
||||
.parachain_heads()
|
||||
@@ -431,33 +466,35 @@ impl<C, R, P> bft::Proposer<Block> for Proposer<C, R, P>
|
||||
|
||||
// the duration at which the given number of parachains is acceptable.
|
||||
let count_delay = self.dynamic_inclusion.acceptable_in(
|
||||
Instant::now(),
|
||||
now,
|
||||
proposal.parachain_heads().len(),
|
||||
);
|
||||
|
||||
// the duration until the given timestamp is current
|
||||
let proposed_timestamp = proposal.timestamp();
|
||||
let timestamp_delay = if proposed_timestamp > current_timestamp {
|
||||
Some(Duration::from_secs(proposed_timestamp - current_timestamp))
|
||||
Some(now + Duration::from_secs(proposed_timestamp - current_timestamp))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// delay casting vote until able according to minimum block time,
|
||||
// timestamp delay, and count delay.
|
||||
// construct a future from the maximum of the two durations.
|
||||
let temporary_delay = match ::std::cmp::max(timestamp_delay, count_delay) {
|
||||
Some(duration) => {
|
||||
let maybe_timeout = Timeout::new(duration, &self.handle);
|
||||
let max_delay = [timestamp_delay, count_delay, Some(self.minimum_delay)]
|
||||
.iter()
|
||||
.cloned()
|
||||
.max()
|
||||
.expect("iterator not empty; thus max returns `Some`; qed");
|
||||
|
||||
let f = future::result(maybe_timeout)
|
||||
.and_then(|timeout| timeout)
|
||||
.map_err(|e| timer_error(&e));
|
||||
|
||||
future::Either::A(f)
|
||||
}
|
||||
let temporary_delay = match max_delay {
|
||||
Some(duration) => future::Either::A(
|
||||
Delay::new(duration).map_err(|e| Error::from(ErrorKind::Timer(e)))
|
||||
),
|
||||
None => future::Either::B(future::ok(())),
|
||||
};
|
||||
|
||||
minimum_delay.join3(includability_tracker, temporary_delay)
|
||||
includability_tracker.join(temporary_delay)
|
||||
};
|
||||
|
||||
// evaluate whether the block is actually valid.
|
||||
@@ -497,7 +534,7 @@ impl<C, R, P> bft::Proposer<Block> for Proposer<C, R, P>
|
||||
use bft::generic::Misbehavior as GenericMisbehavior;
|
||||
use runtime_primitives::bft::{MisbehaviorKind, MisbehaviorReport};
|
||||
use runtime_primitives::MaybeUnsigned;
|
||||
use polkadot_runtime::{Call, Extrinsic, UncheckedExtrinsic, ConsensusCall};
|
||||
use polkadot_runtime::{Call, Extrinsic, BareExtrinsic, UncheckedExtrinsic, ConsensusCall};
|
||||
|
||||
let local_id = self.local_key.public().0.into();
|
||||
let mut next_index = {
|
||||
@@ -569,71 +606,59 @@ fn current_timestamp() -> Timestamp {
|
||||
struct ProposalTiming {
|
||||
attempt_propose: Interval,
|
||||
dynamic_inclusion: DynamicInclusion,
|
||||
enough_candidates: Timeout,
|
||||
minimum_delay: Option<Shared<Timeout>>,
|
||||
enough_candidates: Delay,
|
||||
minimum_delay: Option<Delay>,
|
||||
last_included: usize,
|
||||
}
|
||||
|
||||
impl ProposalTiming {
|
||||
// whether it's time to attempt a proposal.
|
||||
// shouldn't be called outside of the context of a task.
|
||||
fn poll(&mut self, included: usize) -> Poll<(), Error> {
|
||||
fn poll(&mut self, included: usize) -> Poll<(), ErrorKind> {
|
||||
// first drain from the interval so when the minimum delay is up
|
||||
// we don't have any notifications built up.
|
||||
//
|
||||
// this interval is just meant to produce periodic task wakeups
|
||||
// that lead to the `dynamic_inclusion` getting updated as necessary.
|
||||
if let Async::Ready(x) = self.attempt_propose.poll()
|
||||
.map_err(|e| timer_error(&e))?
|
||||
{
|
||||
if let Async::Ready(x) = self.attempt_propose.poll().map_err(ErrorKind::Timer)? {
|
||||
x.expect("timer still alive; intervals never end; qed");
|
||||
}
|
||||
|
||||
if let Some(ref mut min) = self.minimum_delay {
|
||||
try_ready!(min.poll().map_err(|e| timer_error(&*e)));
|
||||
try_ready!(min.poll().map_err(ErrorKind::Timer));
|
||||
}
|
||||
|
||||
self.minimum_delay = None; // after this point, the future must have completed.
|
||||
|
||||
if included == self.last_included {
|
||||
return self.enough_candidates.poll().map_err(|e| timer_error(&e));
|
||||
return self.enough_candidates.poll().map_err(ErrorKind::Timer);
|
||||
}
|
||||
|
||||
// the amount of includable candidates has changed. schedule a wakeup
|
||||
// if it's not sufficient anymore.
|
||||
let now = Instant::now();
|
||||
match self.dynamic_inclusion.acceptable_in(now, included) {
|
||||
Some(duration) => {
|
||||
match self.dynamic_inclusion.acceptable_in(Instant::now(), included) {
|
||||
Some(instant) => {
|
||||
self.last_included = included;
|
||||
self.enough_candidates.reset(now + duration);
|
||||
self.enough_candidates.poll().map_err(|e| timer_error(&e))
|
||||
}
|
||||
None => {
|
||||
Ok(Async::Ready(()))
|
||||
self.enough_candidates.reset(instant);
|
||||
self.enough_candidates.poll().map_err(ErrorKind::Timer)
|
||||
}
|
||||
None => Ok(Async::Ready(())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Future which resolves upon the creation of a proposal.
|
||||
pub struct CreateProposal<C: PolkadotApi, R, P: Collators> {
|
||||
pub struct CreateProposal<C: PolkadotApi> {
|
||||
parent_hash: Hash,
|
||||
parent_number: BlockNumber,
|
||||
parent_id: BlockId,
|
||||
client: Arc<C>,
|
||||
transaction_pool: Arc<TransactionPool<C>>,
|
||||
collation: CollationFetch<P, C>,
|
||||
router: R,
|
||||
table: Arc<SharedTable>,
|
||||
timing: ProposalTiming,
|
||||
}
|
||||
|
||||
impl<C, R, P> CreateProposal<C, R, P>
|
||||
where
|
||||
C: PolkadotApi,
|
||||
R: TableRouter,
|
||||
P: Collators,
|
||||
{
|
||||
impl<C> CreateProposal<C> where C: PolkadotApi {
|
||||
fn propose_with(&self, candidates: Vec<CandidateReceipt>) -> Result<Block, Error> {
|
||||
use polkadot_api::BlockBuilder;
|
||||
use runtime_primitives::traits::{Hashing, BlakeTwo256};
|
||||
@@ -702,35 +727,17 @@ impl<C, R, P> CreateProposal<C, R, P>
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, R, P> Future for CreateProposal<C, R, P>
|
||||
where
|
||||
C: PolkadotApi,
|
||||
R: TableRouter,
|
||||
P: Collators,
|
||||
{
|
||||
impl<C> Future for CreateProposal<C> where C: PolkadotApi {
|
||||
type Item = Block;
|
||||
type Error = Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Block, Error> {
|
||||
// 1. poll local collation future.
|
||||
match self.collation.poll() {
|
||||
Ok(Async::Ready((collation, extrinsic))) => {
|
||||
let hash = collation.receipt.hash();
|
||||
self.router.local_candidate_data(hash, collation.block_data, extrinsic);
|
||||
|
||||
// TODO: if we are an availability guarantor also, we should produce an availability statement.
|
||||
self.table.sign_and_import(&self.router, GenericStatement::Candidate(collation.receipt));
|
||||
}
|
||||
Ok(Async::NotReady) => {},
|
||||
Err(_) => {}, // TODO: handle this failure to collate.
|
||||
}
|
||||
|
||||
// 2. try to propose if we have enough includable candidates and other
|
||||
// 1. try to propose if we have enough includable candidates and other
|
||||
// delays have concluded.
|
||||
let included = self.table.includable_count();
|
||||
try_ready!(self.timing.poll(included));
|
||||
|
||||
// 3. propose
|
||||
// 2. propose
|
||||
let proposed_candidates = self.table.with_proposal(|proposed_set| {
|
||||
proposed_set.into_iter().cloned().collect()
|
||||
});
|
||||
@@ -738,3 +745,21 @@ impl<C, R, P> Future for CreateProposal<C, R, P>
|
||||
self.propose_with(proposed_candidates).map(Async::Ready)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use substrate_keyring::Keyring;
|
||||
|
||||
#[test]
|
||||
fn sign_and_check_statement() {
|
||||
let statement: Statement = GenericStatement::Valid([1; 32].into());
|
||||
let parent_hash = [2; 32].into();
|
||||
|
||||
let sig = sign_table_statement(&statement, &Keyring::Alice.pair(), &parent_hash);
|
||||
|
||||
assert!(check_statement(&statement, &sig, Keyring::Alice.to_raw_public().into(), &parent_hash));
|
||||
assert!(!check_statement(&statement, &sig, Keyring::Alice.to_raw_public().into(), &[0xff; 32].into()));
|
||||
assert!(!check_statement(&statement, &sig, Keyring::Bob.to_raw_public().into(), &parent_hash));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,10 @@
|
||||
|
||||
/// Consensus service. A long runnung service that manages BFT agreement and parachain
|
||||
/// candidate agreement over the network.
|
||||
///
|
||||
/// This uses a handle to an underlying thread pool to dispatch heavy work
|
||||
/// such as candidate verification while performing event-driven work
|
||||
/// on a local event loop.
|
||||
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
@@ -27,197 +31,37 @@ use bft::{self, BftService};
|
||||
use client::{BlockchainEvents, ChainHead};
|
||||
use ed25519;
|
||||
use futures::prelude::*;
|
||||
use futures::{future, Canceled};
|
||||
use polkadot_api::LocalPolkadotApi;
|
||||
use polkadot_primitives::{BlockId, Block, Header, Hash, AccountId};
|
||||
use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic, CandidateReceipt};
|
||||
use primitives::AuthorityId;
|
||||
use runtime_support::Hashable;
|
||||
use substrate_network as net;
|
||||
use tokio_core::reactor;
|
||||
use polkadot_primitives::{Block, Header};
|
||||
use transaction_pool::TransactionPool;
|
||||
|
||||
use super::{TableRouter, SharedTable, ProposerFactory};
|
||||
use tokio::executor::current_thread::TaskExecutor as LocalThreadHandle;
|
||||
use tokio::runtime::TaskExecutor as ThreadPoolHandle;
|
||||
use tokio::runtime::current_thread::Runtime as LocalRuntime;
|
||||
use tokio::timer::Interval;
|
||||
|
||||
use super::{Network, Collators, ProposerFactory};
|
||||
use error;
|
||||
|
||||
const TIMER_DELAY_MS: u64 = 5000;
|
||||
const TIMER_INTERVAL_MS: u64 = 500;
|
||||
|
||||
struct BftSink<E> {
|
||||
network: Arc<net::ConsensusService<Block>>,
|
||||
parent_hash: Hash,
|
||||
_e: ::std::marker::PhantomData<E>,
|
||||
}
|
||||
|
||||
struct Messages {
|
||||
network_stream: net::BftMessageStream<Block>,
|
||||
local_id: AuthorityId,
|
||||
authorities: Vec<AuthorityId>,
|
||||
}
|
||||
|
||||
impl Stream for Messages {
|
||||
type Item = bft::Communication<Block>;
|
||||
type Error = bft::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
// check the network
|
||||
loop {
|
||||
match self.network_stream.poll() {
|
||||
Err(_) => return Err(bft::InputStreamConcluded.into()),
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(None)) => return Ok(Async::NotReady), // the input stream for agreements is never meant to logically end.
|
||||
Ok(Async::Ready(Some(message))) => {
|
||||
match process_message(message, &self.local_id, &self.authorities) {
|
||||
Ok(Some(message)) => return Ok(Async::Ready(Some(message))),
|
||||
Ok(None) => {} // ignored local message.
|
||||
Err(e) => {
|
||||
debug!("Message validation failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_message(msg: net::LocalizedBftMessage<Block>, local_id: &AuthorityId, authorities: &[AuthorityId]) -> Result<Option<bft::Communication<Block>>, bft::Error> {
|
||||
Ok(Some(match msg.message {
|
||||
net::generic_message::BftMessage::Consensus(c) => bft::generic::Communication::Consensus(match c {
|
||||
net::generic_message::SignedConsensusMessage::Propose(proposal) => bft::generic::LocalizedMessage::Propose({
|
||||
if &proposal.sender == local_id { return Ok(None) }
|
||||
let proposal = bft::generic::LocalizedProposal {
|
||||
round_number: proposal.round_number as usize,
|
||||
proposal: proposal.proposal,
|
||||
digest: proposal.digest,
|
||||
sender: proposal.sender,
|
||||
digest_signature: ed25519::LocalizedSignature {
|
||||
signature: proposal.digest_signature,
|
||||
signer: proposal.sender.into(),
|
||||
},
|
||||
full_signature: ed25519::LocalizedSignature {
|
||||
signature: proposal.full_signature,
|
||||
signer: proposal.sender.into(),
|
||||
}
|
||||
};
|
||||
bft::check_proposal(authorities, &msg.parent_hash, &proposal)?;
|
||||
|
||||
trace!(target: "bft", "importing proposal message for round {} from {}", proposal.round_number, proposal.sender);
|
||||
proposal
|
||||
}),
|
||||
net::generic_message::SignedConsensusMessage::Vote(vote) => bft::generic::LocalizedMessage::Vote({
|
||||
if &vote.sender == local_id { return Ok(None) }
|
||||
let vote = bft::generic::LocalizedVote {
|
||||
sender: vote.sender,
|
||||
signature: ed25519::LocalizedSignature {
|
||||
signature: vote.signature,
|
||||
signer: vote.sender.into(),
|
||||
},
|
||||
vote: match vote.vote {
|
||||
net::generic_message::ConsensusVote::Prepare(r, h) => bft::generic::Vote::Prepare(r as usize, h),
|
||||
net::generic_message::ConsensusVote::Commit(r, h) => bft::generic::Vote::Commit(r as usize, h),
|
||||
net::generic_message::ConsensusVote::AdvanceRound(r) => bft::generic::Vote::AdvanceRound(r as usize),
|
||||
}
|
||||
};
|
||||
bft::check_vote::<Block>(authorities, &msg.parent_hash, &vote)?;
|
||||
|
||||
trace!(target: "bft", "importing vote {:?} from {}", vote.vote, vote.sender);
|
||||
vote
|
||||
}),
|
||||
}),
|
||||
net::generic_message::BftMessage::Auxiliary(a) => {
|
||||
let justification = bft::UncheckedJustification::<Hash>::from(a);
|
||||
// TODO: get proper error
|
||||
let justification: Result<_, bft::Error> = bft::check_prepare_justification::<Block>(authorities, msg.parent_hash, justification)
|
||||
.map_err(|_| bft::ErrorKind::InvalidJustification.into());
|
||||
bft::generic::Communication::Auxiliary(justification?)
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
impl<E> Sink for BftSink<E> {
|
||||
type SinkItem = bft::Communication<Block>;
|
||||
// TODO: replace this with the ! type when that's stabilized
|
||||
type SinkError = E;
|
||||
|
||||
fn start_send(&mut self, message: bft::Communication<Block>) -> ::futures::StartSend<bft::Communication<Block>, E> {
|
||||
let network_message = net::generic_message::LocalizedBftMessage {
|
||||
message: match message {
|
||||
bft::generic::Communication::Consensus(c) => net::generic_message::BftMessage::Consensus(match c {
|
||||
bft::generic::LocalizedMessage::Propose(proposal) => net::generic_message::SignedConsensusMessage::Propose(net::generic_message::SignedConsensusProposal {
|
||||
round_number: proposal.round_number as u32,
|
||||
proposal: proposal.proposal,
|
||||
digest: proposal.digest,
|
||||
sender: proposal.sender,
|
||||
digest_signature: proposal.digest_signature.signature,
|
||||
full_signature: proposal.full_signature.signature,
|
||||
}),
|
||||
bft::generic::LocalizedMessage::Vote(vote) => net::generic_message::SignedConsensusMessage::Vote(net::generic_message::SignedConsensusVote {
|
||||
sender: vote.sender,
|
||||
signature: vote.signature.signature,
|
||||
vote: match vote.vote {
|
||||
bft::generic::Vote::Prepare(r, h) => net::generic_message::ConsensusVote::Prepare(r as u32, h),
|
||||
bft::generic::Vote::Commit(r, h) => net::generic_message::ConsensusVote::Commit(r as u32, h),
|
||||
bft::generic::Vote::AdvanceRound(r) => net::generic_message::ConsensusVote::AdvanceRound(r as u32),
|
||||
}
|
||||
}),
|
||||
}),
|
||||
bft::generic::Communication::Auxiliary(justification) => net::generic_message::BftMessage::Auxiliary(justification.uncheck().into()),
|
||||
},
|
||||
parent_hash: self.parent_hash,
|
||||
};
|
||||
self.network.send_bft_message(network_message);
|
||||
Ok(::futures::AsyncSink::Ready)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> ::futures::Poll<(), E> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
struct Network(Arc<net::ConsensusService<Block>>);
|
||||
|
||||
impl super::Network for Network {
|
||||
type TableRouter = Router;
|
||||
fn table_router(&self, _table: Arc<SharedTable>) -> Self::TableRouter {
|
||||
Router {
|
||||
network: self.0.clone()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// spin up an instance of BFT agreement on the current thread's executor.
|
||||
// panics if there is no current thread executor.
|
||||
fn start_bft<F, C>(
|
||||
header: &Header,
|
||||
handle: reactor::Handle,
|
||||
client: &bft::Authorities<Block>,
|
||||
network: Arc<net::ConsensusService<Block>>,
|
||||
bft_service: &BftService<Block, F, C>,
|
||||
) where
|
||||
F: bft::ProposerFactory<Block> + 'static,
|
||||
F: bft::Environment<Block> + 'static,
|
||||
C: bft::BlockImport<Block> + bft::Authorities<Block> + 'static,
|
||||
<F as bft::ProposerFactory<Block>>::Error: ::std::fmt::Debug,
|
||||
F::Error: ::std::fmt::Debug,
|
||||
<F::Proposer as bft::Proposer<Block>>::Error: ::std::fmt::Display + Into<error::Error>,
|
||||
{
|
||||
let parent_hash = header.hash();
|
||||
if bft_service.live_agreement().map_or(false, |h| h == parent_hash) {
|
||||
return;
|
||||
}
|
||||
let authorities = match client.authorities(&BlockId::hash(parent_hash)) {
|
||||
Ok(authorities) => authorities,
|
||||
Err(e) => {
|
||||
debug!("Error reading authorities: {:?}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let input = Messages {
|
||||
network_stream: network.bft_messages(parent_hash),
|
||||
local_id: bft_service.local_id(),
|
||||
authorities,
|
||||
};
|
||||
|
||||
let output = BftSink { network: network, parent_hash: parent_hash, _e: Default::default() };
|
||||
match bft_service.build_upon(&header, input.map_err(Into::into), output) {
|
||||
Ok(Some(bft)) => handle.spawn(bft),
|
||||
let mut handle = LocalThreadHandle::current();
|
||||
match bft_service.build_upon(&header) {
|
||||
Ok(Some(bft)) => if let Err(e) = handle.spawn_local(Box::new(bft)) {
|
||||
debug!(target: "bft", "Couldn't initialize BFT agreement: {:?}", e);
|
||||
},
|
||||
Ok(None) => {},
|
||||
Err(e) => debug!(target: "bft", "BFT agreement error: {:?}", e),
|
||||
}
|
||||
@@ -231,54 +75,56 @@ pub struct Service {
|
||||
|
||||
impl Service {
|
||||
/// Create and start a new instance.
|
||||
pub fn new<A, C>(
|
||||
pub fn new<A, C, N>(
|
||||
client: Arc<C>,
|
||||
api: Arc<A>,
|
||||
network: Arc<net::ConsensusService<Block>>,
|
||||
network: N,
|
||||
transaction_pool: Arc<TransactionPool<A>>,
|
||||
thread_pool: ThreadPoolHandle,
|
||||
parachain_empty_duration: Duration,
|
||||
key: ed25519::Pair,
|
||||
) -> Service
|
||||
where
|
||||
A: LocalPolkadotApi + Send + Sync + 'static,
|
||||
C: BlockchainEvents<Block> + ChainHead<Block> + bft::BlockImport<Block> + bft::Authorities<Block> + Send + Sync + 'static,
|
||||
N: Network + Collators + Send + 'static,
|
||||
N::TableRouter: Send + 'static,
|
||||
<N::Collation as IntoFuture>::Future: Send + 'static,
|
||||
{
|
||||
let (signal, exit) = ::exit_future::signal();
|
||||
let thread = thread::spawn(move || {
|
||||
let mut core = reactor::Core::new().expect("tokio::Core could not be created");
|
||||
let mut runtime = LocalRuntime::new().expect("Could not create local runtime");
|
||||
let key = Arc::new(key);
|
||||
|
||||
let factory = ProposerFactory {
|
||||
client: api.clone(),
|
||||
transaction_pool: transaction_pool.clone(),
|
||||
network: Network(network.clone()),
|
||||
collators: NoCollators,
|
||||
collators: network.clone(),
|
||||
network,
|
||||
parachain_empty_duration,
|
||||
handle: core.handle(),
|
||||
handle: thread_pool,
|
||||
};
|
||||
let bft_service = Arc::new(BftService::new(client.clone(), key, factory));
|
||||
|
||||
let notifications = {
|
||||
let handle = core.handle();
|
||||
let network = network.clone();
|
||||
let client = client.clone();
|
||||
let bft_service = bft_service.clone();
|
||||
|
||||
client.import_notification_stream().for_each(move |notification| {
|
||||
if notification.is_new_best {
|
||||
start_bft(¬ification.header, handle.clone(), &*client, network.clone(), &*bft_service);
|
||||
start_bft(¬ification.header, &*bft_service);
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
};
|
||||
|
||||
let interval = reactor::Interval::new_at(
|
||||
let interval = Interval::new(
|
||||
Instant::now() + Duration::from_millis(TIMER_DELAY_MS),
|
||||
Duration::from_millis(TIMER_INTERVAL_MS),
|
||||
&core.handle(),
|
||||
).expect("it is always possible to create an interval with valid params");
|
||||
);
|
||||
|
||||
let mut prev_best = match client.best_block_header() {
|
||||
Ok(header) => header.blake2_256(),
|
||||
Ok(header) => header.hash(),
|
||||
Err(e) => {
|
||||
warn!("Cant's start consensus service. Error reading best block header: {:?}", e);
|
||||
return;
|
||||
@@ -288,15 +134,13 @@ impl Service {
|
||||
let timed = {
|
||||
let c = client.clone();
|
||||
let s = bft_service.clone();
|
||||
let n = network.clone();
|
||||
let handle = core.handle();
|
||||
|
||||
interval.map_err(|e| debug!("Timer error: {:?}", e)).for_each(move |_| {
|
||||
if let Ok(best_block) = c.best_block_header() {
|
||||
let hash = best_block.blake2_256();
|
||||
let hash = best_block.hash();
|
||||
if hash == prev_best {
|
||||
debug!("Starting consensus round after a timeout");
|
||||
start_bft(&best_block, handle.clone(), &*c, n.clone(), &*s);
|
||||
start_bft(&best_block, &*s);
|
||||
}
|
||||
prev_best = hash;
|
||||
}
|
||||
@@ -304,9 +148,9 @@ impl Service {
|
||||
})
|
||||
};
|
||||
|
||||
core.handle().spawn(notifications);
|
||||
core.handle().spawn(timed);
|
||||
if let Err(e) = core.run(exit) {
|
||||
runtime.spawn(notifications);
|
||||
runtime.spawn(timed);
|
||||
if let Err(e) = runtime.block_on(exit) {
|
||||
debug!("BFT event loop error {:?}", e);
|
||||
}
|
||||
});
|
||||
@@ -328,42 +172,3 @@ impl Drop for Service {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collators implementation which never collates anything.
|
||||
// TODO: do a real implementation.
|
||||
#[derive(Clone, Copy)]
|
||||
struct NoCollators;
|
||||
|
||||
impl ::collation::Collators for NoCollators {
|
||||
type Error = ();
|
||||
type Collation = future::Empty<::collation::Collation, ()>;
|
||||
|
||||
fn collate(&self, _parachain: ParaId, _relay_parent: Hash) -> Self::Collation {
|
||||
future::empty()
|
||||
}
|
||||
|
||||
fn note_bad_collator(&self, _collator: AccountId) { }
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Router {
|
||||
network: Arc<net::ConsensusService<Block>>,
|
||||
}
|
||||
|
||||
impl TableRouter for Router {
|
||||
type Error = Canceled;
|
||||
type FetchCandidate = future::Empty<BlockData, Self::Error>;
|
||||
type FetchExtrinsic = future::FutureResult<Extrinsic, Self::Error>;
|
||||
|
||||
fn local_candidate_data(&self, _hash: Hash, _block_data: BlockData, _extrinsic: Extrinsic) {
|
||||
// TODO
|
||||
}
|
||||
|
||||
fn fetch_block_data(&self, _candidate: &CandidateReceipt) -> Self::FetchCandidate {
|
||||
future::empty()
|
||||
}
|
||||
|
||||
fn fetch_extrinsic_data(&self, _candidate: &CandidateReceipt) -> Self::FetchExtrinsic {
|
||||
future::ok(Extrinsic)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,11 +21,9 @@ use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use table::{self, Table, Context as TableContextTrait};
|
||||
use table::generic::Statement as GenericStatement;
|
||||
use collation::Collation;
|
||||
use polkadot_primitives::Hash;
|
||||
use polkadot_primitives::{Hash, SessionKey};
|
||||
use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic, CandidateReceipt};
|
||||
use primitives::AuthorityId;
|
||||
|
||||
use parking_lot::Mutex;
|
||||
use futures::{future, prelude::*};
|
||||
@@ -36,6 +34,8 @@ use self::includable::IncludabilitySender;
|
||||
mod includable;
|
||||
|
||||
pub use self::includable::Includable;
|
||||
pub use table::{SignedStatement, Statement};
|
||||
pub use table::generic::Statement as GenericStatement;
|
||||
|
||||
struct TableContext {
|
||||
parent_hash: Hash,
|
||||
@@ -44,11 +44,11 @@ struct TableContext {
|
||||
}
|
||||
|
||||
impl table::Context for TableContext {
|
||||
fn is_member_of(&self, authority: &AuthorityId, group: &ParaId) -> bool {
|
||||
fn is_member_of(&self, authority: &SessionKey, group: &ParaId) -> bool {
|
||||
self.groups.get(group).map_or(false, |g| g.validity_guarantors.contains(authority))
|
||||
}
|
||||
|
||||
fn is_availability_guarantor_of(&self, authority: &AuthorityId, group: &ParaId) -> bool {
|
||||
fn is_availability_guarantor_of(&self, authority: &SessionKey, group: &ParaId) -> bool {
|
||||
self.groups.get(group).map_or(false, |g| g.availability_guarantors.contains(authority))
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ impl table::Context for TableContext {
|
||||
}
|
||||
|
||||
impl TableContext {
|
||||
fn local_id(&self) -> AuthorityId {
|
||||
fn local_id(&self) -> SessionKey {
|
||||
self.key.public().into()
|
||||
}
|
||||
|
||||
@@ -76,14 +76,6 @@ impl TableContext {
|
||||
}
|
||||
}
|
||||
|
||||
/// Source of statements
|
||||
pub enum StatementSource {
|
||||
/// Locally produced statement.
|
||||
Local,
|
||||
/// Received statement from remote source, with optional sender.
|
||||
Remote(Option<AuthorityId>),
|
||||
}
|
||||
|
||||
// A shared table object.
|
||||
struct SharedTableInner {
|
||||
table: Table<TableContext>,
|
||||
@@ -96,28 +88,21 @@ struct SharedTableInner {
|
||||
impl SharedTableInner {
|
||||
// Import a single statement. Provide a handle to a table router and a function
|
||||
// used to determine if a referenced candidate is valid.
|
||||
fn import_statement<R: TableRouter, C: FnMut(Collation) -> bool>(
|
||||
//
|
||||
// the statement producer, if any, will produce only statements concerning the same candidate
|
||||
// as the one just imported
|
||||
fn import_remote_statement<R: TableRouter>(
|
||||
&mut self,
|
||||
context: &TableContext,
|
||||
router: &R,
|
||||
statement: table::SignedStatement,
|
||||
statement_source: StatementSource,
|
||||
check_candidate: C,
|
||||
) -> StatementProducer<
|
||||
) -> Option<StatementProducer<
|
||||
<R::FetchCandidate as IntoFuture>::Future,
|
||||
<R::FetchExtrinsic as IntoFuture>::Future,
|
||||
C,
|
||||
> {
|
||||
// this blank producer does nothing until we attach some futures
|
||||
// and set a candidate digest.
|
||||
let received_from = match statement_source {
|
||||
StatementSource::Local => return Default::default(),
|
||||
StatementSource::Remote(from) => from,
|
||||
};
|
||||
|
||||
let summary = match self.table.import_statement(context, statement, received_from) {
|
||||
>> {
|
||||
let summary = match self.table.import_statement(context, statement) {
|
||||
Some(summary) => summary,
|
||||
None => return Default::default(),
|
||||
None => return None,
|
||||
};
|
||||
|
||||
self.update_trackers(&summary.candidate, context);
|
||||
@@ -159,7 +144,6 @@ impl SharedTableInner {
|
||||
fetch_block_data,
|
||||
fetch_extrinsic,
|
||||
evaluate: checking_validity,
|
||||
check_candidate,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -167,10 +151,10 @@ impl SharedTableInner {
|
||||
None
|
||||
};
|
||||
|
||||
StatementProducer {
|
||||
work.map(|work| StatementProducer {
|
||||
produced_statements: Default::default(),
|
||||
work,
|
||||
}
|
||||
work
|
||||
})
|
||||
}
|
||||
|
||||
fn update_trackers(&mut self, candidate: &Hash, context: &TableContext) {
|
||||
@@ -199,71 +183,78 @@ pub struct ProducedStatements {
|
||||
}
|
||||
|
||||
/// Future that produces statements about a specific candidate.
|
||||
pub struct StatementProducer<D: Future, E: Future, C> {
|
||||
pub struct StatementProducer<D: Future, E: Future> {
|
||||
produced_statements: ProducedStatements,
|
||||
work: Option<Work<D, E, C>>,
|
||||
work: Work<D, E>,
|
||||
}
|
||||
|
||||
struct Work<D: Future, E: Future, C> {
|
||||
candidate_receipt: CandidateReceipt,
|
||||
fetch_block_data: future::Fuse<D>,
|
||||
fetch_extrinsic: Option<future::Fuse<E>>,
|
||||
evaluate: bool,
|
||||
check_candidate: C
|
||||
}
|
||||
|
||||
impl<D: Future, E: Future, C> Default for StatementProducer<D, E, C> {
|
||||
fn default() -> Self {
|
||||
StatementProducer {
|
||||
produced_statements: Default::default(),
|
||||
work: None,
|
||||
impl<D: Future, E: Future> StatementProducer<D, E> {
|
||||
/// Attach a function for verifying fetched collation to the statement producer.
|
||||
/// This will transform it into a future.
|
||||
///
|
||||
/// The collation-checking function should return `true` if known to be valid,
|
||||
/// `false` if known to be invalid, and `None` if unable to determine.
|
||||
pub fn prime<C: FnMut(Collation) -> Option<bool>>(self, check_candidate: C) -> PrimedStatementProducer<D, E, C> {
|
||||
PrimedStatementProducer {
|
||||
inner: self,
|
||||
check_candidate,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D, E, C, Err> Future for StatementProducer<D, E, C>
|
||||
struct Work<D: Future, E: Future> {
|
||||
candidate_receipt: CandidateReceipt,
|
||||
fetch_block_data: future::Fuse<D>,
|
||||
fetch_extrinsic: Option<future::Fuse<E>>,
|
||||
evaluate: bool,
|
||||
}
|
||||
|
||||
/// Primed statement producer.
|
||||
pub struct PrimedStatementProducer<D: Future, E: Future, C> {
|
||||
inner: StatementProducer<D, E>,
|
||||
check_candidate: C,
|
||||
}
|
||||
|
||||
impl<D, E, C, Err> Future for PrimedStatementProducer<D, E, C>
|
||||
where
|
||||
D: Future<Item=BlockData,Error=Err>,
|
||||
E: Future<Item=Extrinsic,Error=Err>,
|
||||
C: FnMut(Collation) -> bool,
|
||||
C: FnMut(Collation) -> Option<bool>,
|
||||
{
|
||||
type Item = ProducedStatements;
|
||||
type Error = Err;
|
||||
|
||||
fn poll(&mut self) -> Poll<ProducedStatements, Err> {
|
||||
let work = match self.work {
|
||||
Some(ref mut work) => work,
|
||||
None => return Ok(Async::Ready(::std::mem::replace(&mut self.produced_statements, Default::default()))),
|
||||
};
|
||||
let work = &mut self.inner.work;
|
||||
|
||||
if let Async::Ready(block_data) = work.fetch_block_data.poll()? {
|
||||
self.produced_statements.block_data = Some(block_data.clone());
|
||||
self.inner.produced_statements.block_data = Some(block_data.clone());
|
||||
if work.evaluate {
|
||||
let is_good = (work.check_candidate)(Collation {
|
||||
let is_good = (self.check_candidate)(Collation {
|
||||
block_data,
|
||||
receipt: work.candidate_receipt.clone(),
|
||||
});
|
||||
|
||||
let hash = work.candidate_receipt.hash();
|
||||
self.produced_statements.validity = Some(if is_good {
|
||||
GenericStatement::Valid(hash)
|
||||
} else {
|
||||
GenericStatement::Invalid(hash)
|
||||
});
|
||||
self.inner.produced_statements.validity = match is_good {
|
||||
Some(true) => Some(GenericStatement::Valid(hash)),
|
||||
Some(false) => Some(GenericStatement::Invalid(hash)),
|
||||
None => None,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref mut fetch_extrinsic) = work.fetch_extrinsic {
|
||||
if let Async::Ready(extrinsic) = fetch_extrinsic.poll()? {
|
||||
self.produced_statements.extrinsic = Some(extrinsic);
|
||||
self.inner.produced_statements.extrinsic = Some(extrinsic);
|
||||
}
|
||||
}
|
||||
|
||||
let done = self.produced_statements.block_data.is_some() && {
|
||||
let done = self.inner.produced_statements.block_data.is_some() && {
|
||||
if work.evaluate {
|
||||
true
|
||||
} else if self.produced_statements.extrinsic.is_some() {
|
||||
self.produced_statements.availability =
|
||||
} else if self.inner.produced_statements.extrinsic.is_some() {
|
||||
self.inner.produced_statements.availability =
|
||||
Some(GenericStatement::Available(work.candidate_receipt.hash()));
|
||||
|
||||
true
|
||||
@@ -273,7 +264,7 @@ impl<D, E, C, Err> Future for StatementProducer<D, E, C>
|
||||
};
|
||||
|
||||
if done {
|
||||
Ok(Async::Ready(::std::mem::replace(&mut self.produced_statements, Default::default())))
|
||||
Ok(Async::Ready(::std::mem::replace(&mut self.inner.produced_statements, Default::default())))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
@@ -313,29 +304,60 @@ impl SharedTable {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the parent hash this table should hold statements localized to.
|
||||
pub fn consensus_parent_hash(&self) -> &Hash {
|
||||
&self.context.parent_hash
|
||||
}
|
||||
|
||||
/// Get the local validator session key.
|
||||
pub fn session_key(&self) -> SessionKey {
|
||||
self.context.local_id()
|
||||
}
|
||||
|
||||
/// Get group info.
|
||||
pub fn group_info(&self) -> &HashMap<ParaId, GroupInfo> {
|
||||
&self.context.groups
|
||||
}
|
||||
|
||||
/// Import a single statement. Provide a handle to a table router
|
||||
/// for dispatching any other requests which come up.
|
||||
pub fn import_statement<R: TableRouter, C: FnMut(Collation) -> bool>(
|
||||
/// Import a single statement with remote source, whose signature has already been checked.
|
||||
///
|
||||
/// The statement producer, if any, will produce only statements concerning the same candidate
|
||||
/// as the one just imported
|
||||
pub fn import_remote_statement<R: TableRouter>(
|
||||
&self,
|
||||
router: &R,
|
||||
statement: table::SignedStatement,
|
||||
received_from: StatementSource,
|
||||
check_candidate: C,
|
||||
) -> StatementProducer<<R::FetchCandidate as IntoFuture>::Future, <R::FetchExtrinsic as IntoFuture>::Future, C> {
|
||||
self.inner.lock().import_statement(&*self.context, router, statement, received_from, check_candidate)
|
||||
) -> Option<StatementProducer<
|
||||
<R::FetchCandidate as IntoFuture>::Future,
|
||||
<R::FetchExtrinsic as IntoFuture>::Future,
|
||||
>> {
|
||||
self.inner.lock().import_remote_statement(&*self.context, router, statement)
|
||||
}
|
||||
|
||||
/// Import many statements at once.
|
||||
///
|
||||
/// Provide an iterator yielding remote, pre-checked statements.
|
||||
///
|
||||
/// The statement producer, if any, will produce only statements concerning the same candidate
|
||||
/// as the one just imported
|
||||
pub fn import_remote_statements<R, I, U>(&self, router: &R, iterable: I) -> U
|
||||
where
|
||||
R: TableRouter,
|
||||
I: IntoIterator<Item=table::SignedStatement>,
|
||||
U: ::std::iter::FromIterator<Option<StatementProducer<
|
||||
<R::FetchCandidate as IntoFuture>::Future,
|
||||
<R::FetchExtrinsic as IntoFuture>::Future,
|
||||
>>>,
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
iterable.into_iter().map(move |statement| {
|
||||
inner.import_remote_statement(&*self.context, router, statement)
|
||||
}).collect()
|
||||
}
|
||||
|
||||
/// Sign and import a local statement.
|
||||
pub fn sign_and_import<R: TableRouter>(
|
||||
&self,
|
||||
router: &R,
|
||||
statement: table::Statement,
|
||||
) {
|
||||
pub fn sign_and_import(&self, statement: table::Statement) -> SignedStatement {
|
||||
let proposed_digest = match statement {
|
||||
GenericStatement::Candidate(ref c) => Some(c.hash()),
|
||||
_ => None,
|
||||
@@ -348,36 +370,8 @@ impl SharedTable {
|
||||
inner.proposed_digest = proposed_digest;
|
||||
}
|
||||
|
||||
let producer = inner.import_statement(
|
||||
&*self.context,
|
||||
router,
|
||||
signed_statement,
|
||||
StatementSource::Local,
|
||||
|_| true,
|
||||
);
|
||||
|
||||
assert!(producer.work.is_none(), "local statement import never leads to additional work; qed");
|
||||
}
|
||||
|
||||
/// Import many statements at once.
|
||||
///
|
||||
/// Provide an iterator yielding pairs of (statement, statement_source).
|
||||
pub fn import_statements<R, I, C, U>(&self, router: &R, iterable: I) -> U
|
||||
where
|
||||
R: TableRouter,
|
||||
I: IntoIterator<Item=(table::SignedStatement, StatementSource, C)>,
|
||||
C: FnMut(Collation) -> bool,
|
||||
U: ::std::iter::FromIterator<StatementProducer<
|
||||
<R::FetchCandidate as IntoFuture>::Future,
|
||||
<R::FetchExtrinsic as IntoFuture>::Future,
|
||||
C,
|
||||
>>,
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
iterable.into_iter().map(move |(statement, statement_source, check_candidate)| {
|
||||
inner.import_statement(&*self.context, router, statement, statement_source, check_candidate)
|
||||
}).collect()
|
||||
inner.table.import_statement(&*self.context, signed_statement.clone());
|
||||
signed_statement
|
||||
}
|
||||
|
||||
/// Execute a closure using a specific candidate.
|
||||
@@ -406,15 +400,10 @@ impl SharedTable {
|
||||
}
|
||||
|
||||
/// Get all witnessed misbehavior.
|
||||
pub fn get_misbehavior(&self) -> HashMap<AuthorityId, table::Misbehavior> {
|
||||
pub fn get_misbehavior(&self) -> HashMap<SessionKey, table::Misbehavior> {
|
||||
self.inner.lock().table.get_misbehavior().clone()
|
||||
}
|
||||
|
||||
/// Fill a statement batch.
|
||||
pub fn fill_batch<B: table::StatementBatch>(&self, batch: &mut B) {
|
||||
self.inner.lock().table.fill_batch(batch);
|
||||
}
|
||||
|
||||
/// Track includability of a given set of candidate hashes.
|
||||
pub fn track_includability<I>(&self, iterable: I) -> Includable
|
||||
where I: IntoIterator<Item=Hash>
|
||||
@@ -446,17 +435,12 @@ mod tests {
|
||||
type FetchCandidate = ::futures::future::Empty<BlockData,()>;
|
||||
type FetchExtrinsic = ::futures::future::Empty<Extrinsic,()>;
|
||||
|
||||
/// Note local candidate data, making it available on the network to other validators.
|
||||
fn local_candidate_data(&self, _hash: Hash, _block_data: BlockData, _extrinsic: Extrinsic) {
|
||||
fn local_candidate(&self, _candidate: CandidateReceipt, _block_data: BlockData, _extrinsic: Extrinsic) {
|
||||
|
||||
}
|
||||
|
||||
/// Fetch block data for a specific candidate.
|
||||
fn fetch_block_data(&self, _candidate: &CandidateReceipt) -> Self::FetchCandidate {
|
||||
::futures::future::empty()
|
||||
}
|
||||
|
||||
/// Fetch extrinsic data for a specific candidate.
|
||||
fn fetch_extrinsic_data(&self, _candidate: &CandidateReceipt) -> Self::FetchExtrinsic {
|
||||
::futures::future::empty()
|
||||
}
|
||||
@@ -490,6 +474,7 @@ mod tests {
|
||||
balance_uploads: Vec::new(),
|
||||
egress_queue_roots: Vec::new(),
|
||||
fees: 1_000_000,
|
||||
block_data_hash: [2; 32].into(),
|
||||
};
|
||||
|
||||
let candidate_statement = GenericStatement::Candidate(candidate);
|
||||
@@ -501,15 +486,13 @@ mod tests {
|
||||
sender: validity_other,
|
||||
};
|
||||
|
||||
let producer = shared_table.import_statement(
|
||||
let producer = shared_table.import_remote_statement(
|
||||
&DummyRouter,
|
||||
signed_statement,
|
||||
StatementSource::Remote(None),
|
||||
|_| true,
|
||||
);
|
||||
).expect("candidate and local validity group are same");
|
||||
|
||||
assert!(producer.work.is_some(), "candidate and local validity group are same");
|
||||
assert!(producer.work.as_ref().unwrap().evaluate, "should evaluate validity");
|
||||
assert!(producer.work.evaluate, "should evaluate validity");
|
||||
assert!(producer.work.fetch_extrinsic.is_none(), "should not fetch extrinsic");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -540,6 +523,7 @@ mod tests {
|
||||
balance_uploads: Vec::new(),
|
||||
egress_queue_roots: Vec::new(),
|
||||
fees: 1_000_000,
|
||||
block_data_hash: [2; 32].into(),
|
||||
};
|
||||
|
||||
let candidate_statement = GenericStatement::Candidate(candidate);
|
||||
@@ -551,15 +535,12 @@ mod tests {
|
||||
sender: validity_other,
|
||||
};
|
||||
|
||||
let producer = shared_table.import_statement(
|
||||
let producer = shared_table.import_remote_statement(
|
||||
&DummyRouter,
|
||||
signed_statement,
|
||||
StatementSource::Remote(None),
|
||||
|_| true,
|
||||
);
|
||||
).expect("should produce work");
|
||||
|
||||
assert!(producer.work.is_some(), "candidate and local availability group are same");
|
||||
assert!(producer.work.as_ref().unwrap().fetch_extrinsic.is_some(), "should fetch extrinsic when guaranteeing availability");
|
||||
assert!(!producer.work.as_ref().unwrap().evaluate, "should not evaluate validity");
|
||||
assert!(producer.work.fetch_extrinsic.is_some(), "should fetch extrinsic when guaranteeing availability");
|
||||
assert!(!producer.work.evaluate, "should not evaluate validity");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "polkadot-network"
|
||||
version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
description = "Polkadot-specific networking protocol"
|
||||
|
||||
[dependencies]
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
parking_lot = "0.4"
|
||||
polkadot-api = { path = "../api" }
|
||||
polkadot-consensus = { path = "../consensus" }
|
||||
polkadot-primitives = { path = "../primitives" }
|
||||
substrate-bft = { path = "../../substrate/bft" }
|
||||
substrate-codec = { path = "../../substrate/codec" }
|
||||
substrate-network = { path = "../../substrate/network" }
|
||||
substrate-primitives = { path = "../../substrate/primitives" }
|
||||
ed25519 = { path = "../../substrate/ed25519" }
|
||||
futures = "0.1"
|
||||
tokio = "0.1.7"
|
||||
log = "0.4"
|
||||
@@ -0,0 +1,313 @@
|
||||
// Copyright 2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! The "consensus" networking code built on top of the base network service.
|
||||
//! This fulfills the `polkadot_consensus::Network` trait, providing a hook to be called
|
||||
//! each time consensus begins on a new chain head.
|
||||
|
||||
use bft;
|
||||
use ed25519;
|
||||
use substrate_network::{self as net, generic_message as msg};
|
||||
use substrate_network::consensus_gossip::ConsensusMessage;
|
||||
use polkadot_api::{PolkadotApi, LocalPolkadotApi};
|
||||
use polkadot_consensus::{Network, SharedTable, Collators, Collation};
|
||||
use polkadot_primitives::{AccountId, Block, Hash, SessionKey};
|
||||
use polkadot_primitives::parachain::Id as ParaId;
|
||||
|
||||
use futures::{future, prelude::*};
|
||||
use futures::sync::mpsc;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::runtime::TaskExecutor;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use super::{Message, NetworkService, Knowledge, CurrentConsensus};
|
||||
use router::Router;
|
||||
|
||||
/// Sink for output BFT messages.
|
||||
pub struct BftSink<E> {
|
||||
network: Arc<NetworkService>,
|
||||
parent_hash: Hash,
|
||||
_marker: ::std::marker::PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<E> Sink for BftSink<E> {
|
||||
type SinkItem = bft::Communication<Block>;
|
||||
// TODO: replace this with the ! type when that's stabilized
|
||||
type SinkError = E;
|
||||
|
||||
fn start_send(&mut self, message: bft::Communication<Block>) -> ::futures::StartSend<bft::Communication<Block>, E> {
|
||||
let network_message = net::LocalizedBftMessage {
|
||||
message: match message {
|
||||
bft::generic::Communication::Consensus(c) => msg::BftMessage::Consensus(match c {
|
||||
bft::generic::LocalizedMessage::Propose(proposal) => msg::SignedConsensusMessage::Propose(msg::SignedConsensusProposal {
|
||||
round_number: proposal.round_number as u32,
|
||||
proposal: proposal.proposal,
|
||||
digest: proposal.digest,
|
||||
sender: proposal.sender,
|
||||
digest_signature: proposal.digest_signature.signature,
|
||||
full_signature: proposal.full_signature.signature,
|
||||
}),
|
||||
bft::generic::LocalizedMessage::Vote(vote) => msg::SignedConsensusMessage::Vote(msg::SignedConsensusVote {
|
||||
sender: vote.sender,
|
||||
signature: vote.signature.signature,
|
||||
vote: match vote.vote {
|
||||
bft::generic::Vote::Prepare(r, h) => msg::ConsensusVote::Prepare(r as u32, h),
|
||||
bft::generic::Vote::Commit(r, h) => msg::ConsensusVote::Commit(r as u32, h),
|
||||
bft::generic::Vote::AdvanceRound(r) => msg::ConsensusVote::AdvanceRound(r as u32),
|
||||
}
|
||||
}),
|
||||
}),
|
||||
bft::generic::Communication::Auxiliary(justification) => msg::BftMessage::Auxiliary(justification.uncheck().into()),
|
||||
},
|
||||
parent_hash: self.parent_hash,
|
||||
};
|
||||
self.network.with_spec(
|
||||
move |spec, ctx| spec.consensus_gossip.multicast_bft_message(ctx, network_message)
|
||||
);
|
||||
Ok(::futures::AsyncSink::Ready)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> ::futures::Poll<(), E> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
// check signature and authority validity of message.
|
||||
fn process_bft_message(msg: msg::LocalizedBftMessage<Block, Hash>, local_id: &SessionKey, authorities: &[SessionKey]) -> Result<Option<bft::Communication<Block>>, bft::Error> {
|
||||
Ok(Some(match msg.message {
|
||||
msg::BftMessage::Consensus(c) => bft::generic::Communication::Consensus(match c {
|
||||
msg::SignedConsensusMessage::Propose(proposal) => bft::generic::LocalizedMessage::Propose({
|
||||
if &proposal.sender == local_id { return Ok(None) }
|
||||
let proposal = bft::generic::LocalizedProposal {
|
||||
round_number: proposal.round_number as usize,
|
||||
proposal: proposal.proposal,
|
||||
digest: proposal.digest,
|
||||
sender: proposal.sender,
|
||||
digest_signature: ed25519::LocalizedSignature {
|
||||
signature: proposal.digest_signature,
|
||||
signer: ed25519::Public(proposal.sender.into()),
|
||||
},
|
||||
full_signature: ed25519::LocalizedSignature {
|
||||
signature: proposal.full_signature,
|
||||
signer: ed25519::Public(proposal.sender.into()),
|
||||
}
|
||||
};
|
||||
bft::check_proposal(authorities, &msg.parent_hash, &proposal)?;
|
||||
|
||||
trace!(target: "bft", "importing proposal message for round {} from {}", proposal.round_number, Hash::from(proposal.sender.0));
|
||||
proposal
|
||||
}),
|
||||
msg::SignedConsensusMessage::Vote(vote) => bft::generic::LocalizedMessage::Vote({
|
||||
if &vote.sender == local_id { return Ok(None) }
|
||||
let vote = bft::generic::LocalizedVote {
|
||||
sender: vote.sender,
|
||||
signature: ed25519::LocalizedSignature {
|
||||
signature: vote.signature,
|
||||
signer: ed25519::Public(vote.sender.0),
|
||||
},
|
||||
vote: match vote.vote {
|
||||
msg::ConsensusVote::Prepare(r, h) => bft::generic::Vote::Prepare(r as usize, h),
|
||||
msg::ConsensusVote::Commit(r, h) => bft::generic::Vote::Commit(r as usize, h),
|
||||
msg::ConsensusVote::AdvanceRound(r) => bft::generic::Vote::AdvanceRound(r as usize),
|
||||
}
|
||||
};
|
||||
bft::check_vote::<Block>(authorities, &msg.parent_hash, &vote)?;
|
||||
|
||||
trace!(target: "bft", "importing vote {:?} from {}", vote.vote, Hash::from(vote.sender.0));
|
||||
vote
|
||||
}),
|
||||
}),
|
||||
msg::BftMessage::Auxiliary(a) => {
|
||||
let justification = bft::UncheckedJustification::from(a);
|
||||
// TODO: get proper error
|
||||
let justification: Result<_, bft::Error> = bft::check_prepare_justification::<Block>(authorities, msg.parent_hash, justification)
|
||||
.map_err(|_| bft::ErrorKind::InvalidJustification.into());
|
||||
bft::generic::Communication::Auxiliary(justification?)
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
// task that processes all gossipped consensus messages,
|
||||
// checking signatures
|
||||
struct MessageProcessTask<P: PolkadotApi> {
|
||||
inner_stream: mpsc::UnboundedReceiver<ConsensusMessage<Block>>,
|
||||
bft_messages: mpsc::UnboundedSender<bft::Communication<Block>>,
|
||||
validators: Vec<SessionKey>,
|
||||
table_router: Router<P>,
|
||||
}
|
||||
|
||||
impl<P: LocalPolkadotApi + Send + Sync + 'static> MessageProcessTask<P> {
|
||||
fn process_message(&self, msg: ConsensusMessage<Block>) -> Option<Async<()>> {
|
||||
match msg {
|
||||
ConsensusMessage::Bft(msg) => {
|
||||
let local_id = self.table_router.session_key();
|
||||
match process_bft_message(msg, &local_id, &self.validators[..]) {
|
||||
Ok(Some(msg)) => {
|
||||
if let Err(_) = self.bft_messages.unbounded_send(msg) {
|
||||
// if the BFT receiving stream has ended then
|
||||
// we should just bail.
|
||||
trace!(target: "bft", "BFT message stream appears to have closed");
|
||||
return Some(Async::Ready(()));
|
||||
}
|
||||
}
|
||||
Ok(None) => {} // ignored local message
|
||||
Err(e) => {
|
||||
debug!("Message validation failed: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
ConsensusMessage::ChainSpecific(msg, _) => {
|
||||
if let Ok(Message::Statement(parent_hash, statement)) = ::serde_json::from_slice(&msg) {
|
||||
if ::polkadot_consensus::check_statement(&statement.statement, &statement.signature, statement.sender, &parent_hash) {
|
||||
self.table_router.import_statement(statement);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: LocalPolkadotApi + Send + Sync + 'static> Future for MessageProcessTask<P> {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
loop {
|
||||
match self.inner_stream.poll() {
|
||||
Ok(Async::Ready(Some(val))) => if let Some(async) = self.process_message(val) {
|
||||
return Ok(async);
|
||||
},
|
||||
Ok(Async::Ready(None)) => return Ok(Async::Ready(())),
|
||||
Ok(Async::NotReady) => (),
|
||||
Err(e) => debug!(target: "p_net", "Error getting consensus message: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Input stream from the consensus network.
|
||||
pub struct InputAdapter {
|
||||
input: mpsc::UnboundedReceiver<bft::Communication<Block>>,
|
||||
}
|
||||
|
||||
impl Stream for InputAdapter {
|
||||
type Item = bft::Communication<Block>;
|
||||
type Error = ::polkadot_consensus::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
match self.input.poll() {
|
||||
Err(_) | Ok(Async::Ready(None)) => Err(bft::InputStreamConcluded.into()),
|
||||
Ok(x) => Ok(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper around the network service
|
||||
pub struct ConsensusNetwork<P> {
|
||||
network: Arc<NetworkService>,
|
||||
api: Arc<P>,
|
||||
}
|
||||
|
||||
impl<P> ConsensusNetwork<P> {
|
||||
/// Create a new consensus networking object.
|
||||
pub fn new(network: Arc<NetworkService>, api: Arc<P>) -> Self {
|
||||
ConsensusNetwork { network, api }
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> Clone for ConsensusNetwork<P> {
|
||||
fn clone(&self) -> Self {
|
||||
ConsensusNetwork {
|
||||
network: self.network.clone(),
|
||||
api: self.api.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A long-lived network which can create parachain statement and BFT message routing processes on demand.
|
||||
impl<P: LocalPolkadotApi + Send + Sync + 'static> Network for ConsensusNetwork<P> {
|
||||
type TableRouter = Router<P>;
|
||||
/// The input stream of BFT messages. Should never logically conclude.
|
||||
type Input = InputAdapter;
|
||||
/// The output sink of BFT messages. Messages sent here should eventually pass to all
|
||||
/// current validators.
|
||||
type Output = BftSink<::polkadot_consensus::Error>;
|
||||
|
||||
/// Instantiate a table router using the given shared table.
|
||||
fn communication_for(&self, validators: &[SessionKey], table: Arc<SharedTable>, task_executor: TaskExecutor) -> (Self::TableRouter, Self::Input, Self::Output) {
|
||||
let parent_hash = table.consensus_parent_hash().clone();
|
||||
|
||||
let sink = BftSink {
|
||||
network: self.network.clone(),
|
||||
parent_hash,
|
||||
_marker: Default::default(),
|
||||
};
|
||||
|
||||
let (bft_send, bft_recv) = mpsc::unbounded();
|
||||
|
||||
let knowledge = Arc::new(Mutex::new(Knowledge::new()));
|
||||
|
||||
let local_session_key = table.session_key();
|
||||
let table_router = Router::new(
|
||||
table,
|
||||
self.network.clone(),
|
||||
self.api.clone(),
|
||||
task_executor.clone(),
|
||||
parent_hash,
|
||||
knowledge.clone(),
|
||||
);
|
||||
|
||||
// spin up a task in the background that processes all incoming statements
|
||||
// TODO: propagate statements on a timer?
|
||||
let process_task = self.network.with_spec(|spec, ctx| {
|
||||
spec.new_consensus(ctx, CurrentConsensus {
|
||||
knowledge,
|
||||
parent_hash,
|
||||
local_session_key,
|
||||
session_keys: Default::default(),
|
||||
});
|
||||
|
||||
MessageProcessTask {
|
||||
inner_stream: spec.consensus_gossip.messages_for(parent_hash),
|
||||
bft_messages: bft_send,
|
||||
validators: validators.to_vec(),
|
||||
table_router: table_router.clone(),
|
||||
}
|
||||
});
|
||||
|
||||
match process_task {
|
||||
Some(task) => task_executor.spawn(task),
|
||||
None => warn!(target: "p_net", "Cannot process incoming messages: network appears to be down"),
|
||||
}
|
||||
|
||||
(table_router, InputAdapter { input: bft_recv }, sink)
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: LocalPolkadotApi + Send + Sync + 'static> Collators for ConsensusNetwork<P> {
|
||||
type Error = ();
|
||||
type Collation = future::Empty<Collation, ()>;
|
||||
|
||||
fn collate(&self, _parachain: ParaId, _relay_parent: Hash) -> Self::Collation {
|
||||
future::empty()
|
||||
}
|
||||
|
||||
fn note_bad_collator(&self, _collator: AccountId) { }
|
||||
}
|
||||
@@ -0,0 +1,488 @@
|
||||
// Copyright 2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Polkadot-specific network implementation.
|
||||
//!
|
||||
//! This manages gossip of consensus messages for BFT and for parachain statements,
|
||||
//! parachain block and extrinsic data fetching, communication between collators and validators,
|
||||
//! and more.
|
||||
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
|
||||
extern crate substrate_bft as bft;
|
||||
extern crate substrate_codec as codec;
|
||||
extern crate substrate_network;
|
||||
extern crate substrate_primitives;
|
||||
|
||||
extern crate polkadot_api;
|
||||
extern crate polkadot_consensus;
|
||||
extern crate polkadot_primitives;
|
||||
|
||||
extern crate ed25519;
|
||||
extern crate futures;
|
||||
extern crate parking_lot;
|
||||
extern crate tokio;
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
mod router;
|
||||
pub mod consensus;
|
||||
|
||||
use codec::Slicable;
|
||||
use futures::sync::oneshot;
|
||||
use parking_lot::Mutex;
|
||||
use polkadot_consensus::{Statement, SignedStatement, GenericStatement};
|
||||
use polkadot_primitives::{Block, SessionKey, Hash};
|
||||
use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic, CandidateReceipt};
|
||||
use substrate_network::{PeerId, RequestId, Context};
|
||||
use substrate_network::consensus_gossip::ConsensusGossip;
|
||||
use substrate_network::{message, generic_message};
|
||||
use substrate_network::specialization::Specialization;
|
||||
use substrate_network::StatusMessage as GenericFullStatus;
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// Polkadot protocol id.
|
||||
pub const DOT_PROTOCOL_ID: ::substrate_network::ProtocolId = *b"dot";
|
||||
|
||||
type FullStatus = GenericFullStatus<Block>;
|
||||
|
||||
/// Specialization of the network service for the polkadot protocol.
|
||||
pub type NetworkService = ::substrate_network::Service<Block, PolkadotProtocol>;
|
||||
|
||||
/// Status of a Polkadot node.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub struct Status {
|
||||
collating_for: Option<ParaId>,
|
||||
}
|
||||
|
||||
impl Slicable for Status {
|
||||
fn encode(&self) -> Vec<u8> {
|
||||
let mut v = Vec::new();
|
||||
match self.collating_for {
|
||||
Some(ref id) => {
|
||||
v.push(1);
|
||||
id.using_encoded(|s| v.extend(s));
|
||||
}
|
||||
None => {
|
||||
v.push(0);
|
||||
}
|
||||
}
|
||||
v
|
||||
}
|
||||
|
||||
fn decode<I: ::codec::Input>(input: &mut I) -> Option<Self> {
|
||||
let collating_for = match input.read_byte()? {
|
||||
0 => None,
|
||||
1 => Some(ParaId::decode(input)?),
|
||||
_ => return None,
|
||||
};
|
||||
Some(Status { collating_for })
|
||||
}
|
||||
}
|
||||
|
||||
struct BlockDataRequest {
|
||||
attempted_peers: HashSet<SessionKey>,
|
||||
consensus_parent: Hash,
|
||||
candidate_hash: Hash,
|
||||
block_data_hash: Hash,
|
||||
sender: oneshot::Sender<BlockData>,
|
||||
}
|
||||
|
||||
struct PeerInfo {
|
||||
status: Status,
|
||||
validator: bool,
|
||||
session_keys: HashMap<Hash, SessionKey>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct KnowledgeEntry {
|
||||
knows_block_data: Vec<SessionKey>,
|
||||
knows_extrinsic: Vec<SessionKey>,
|
||||
block_data: Option<BlockData>,
|
||||
extrinsic: Option<Extrinsic>,
|
||||
}
|
||||
|
||||
/// Tracks knowledge of peers.
|
||||
struct Knowledge {
|
||||
candidates: HashMap<Hash, KnowledgeEntry>,
|
||||
}
|
||||
|
||||
impl Knowledge {
|
||||
pub fn new() -> Self {
|
||||
Knowledge {
|
||||
candidates: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn note_statement(&mut self, from: SessionKey, statement: &Statement) {
|
||||
match *statement {
|
||||
GenericStatement::Candidate(ref c) => {
|
||||
let mut entry = self.candidates.entry(c.hash()).or_insert_with(Default::default);
|
||||
entry.knows_block_data.push(from);
|
||||
entry.knows_extrinsic.push(from);
|
||||
}
|
||||
GenericStatement::Available(ref hash) => {
|
||||
let mut entry = self.candidates.entry(*hash).or_insert_with(Default::default);
|
||||
entry.knows_block_data.push(from);
|
||||
entry.knows_extrinsic.push(from);
|
||||
}
|
||||
GenericStatement::Valid(ref hash) | GenericStatement::Invalid(ref hash) => self.candidates.entry(*hash)
|
||||
.or_insert_with(Default::default)
|
||||
.knows_block_data
|
||||
.push(from),
|
||||
}
|
||||
}
|
||||
|
||||
fn note_candidate(&mut self, hash: Hash, block_data: Option<BlockData>, extrinsic: Option<Extrinsic>) {
|
||||
let entry = self.candidates.entry(hash).or_insert_with(Default::default);
|
||||
entry.block_data = entry.block_data.take().or(block_data);
|
||||
entry.extrinsic = entry.extrinsic.take().or(extrinsic);
|
||||
}
|
||||
}
|
||||
|
||||
struct CurrentConsensus {
|
||||
knowledge: Arc<Mutex<Knowledge>>,
|
||||
parent_hash: Hash,
|
||||
session_keys: HashMap<SessionKey, PeerId>,
|
||||
local_session_key: SessionKey,
|
||||
}
|
||||
|
||||
impl CurrentConsensus {
|
||||
// get locally stored block data for a candidate.
|
||||
fn block_data(&self, hash: &Hash) -> Option<BlockData> {
|
||||
self.knowledge.lock().candidates.get(hash)
|
||||
.and_then(|entry| entry.block_data.clone())
|
||||
}
|
||||
|
||||
fn peer_disconnected(&mut self, peer: &PeerInfo) {
|
||||
if let Some(key) = peer.session_keys.get(&self.parent_hash) {
|
||||
self.session_keys.remove(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Polkadot-specific messages.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub enum Message {
|
||||
/// signed statement and localized parent hash.
|
||||
Statement(Hash, SignedStatement),
|
||||
/// Tell the peer your session key for the current block.
|
||||
// TODO: do this with a random challenge protocol
|
||||
SessionKey(Hash, SessionKey),
|
||||
/// Requesting parachain block data by candidate hash.
|
||||
RequestBlockData(RequestId, Hash),
|
||||
/// Provide block data by candidate hash or nothing if unknown.
|
||||
BlockData(RequestId, Option<BlockData>),
|
||||
}
|
||||
|
||||
fn send_polkadot_message(ctx: &mut Context<Block>, to: PeerId, message: Message) {
|
||||
let encoded = ::serde_json::to_vec(&message).expect("serialization of messages infallible; qed");
|
||||
ctx.send_message(to, generic_message::Message::ChainSpecific(encoded))
|
||||
}
|
||||
|
||||
/// Polkadot protocol attachment for substrate.
|
||||
pub struct PolkadotProtocol {
|
||||
peers: HashMap<PeerId, PeerInfo>,
|
||||
consensus_gossip: ConsensusGossip<Block>,
|
||||
collators: HashMap<ParaId, Vec<PeerId>>,
|
||||
collating_for: Option<ParaId>,
|
||||
live_consensus: Option<CurrentConsensus>,
|
||||
in_flight: HashMap<(RequestId, PeerId), BlockDataRequest>,
|
||||
pending: Vec<BlockDataRequest>,
|
||||
next_req_id: u64,
|
||||
}
|
||||
|
||||
impl PolkadotProtocol {
|
||||
/// Instantiate a polkadot protocol handler.
|
||||
pub fn new() -> Self {
|
||||
PolkadotProtocol {
|
||||
peers: HashMap::new(),
|
||||
consensus_gossip: ConsensusGossip::new(),
|
||||
collators: HashMap::new(),
|
||||
collating_for: None,
|
||||
live_consensus: None,
|
||||
in_flight: HashMap::new(),
|
||||
pending: Vec::new(),
|
||||
next_req_id: 1,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a statement to a validator.
|
||||
fn send_statement(&mut self, ctx: &mut Context<Block>, _val: SessionKey, parent_hash: Hash, statement: SignedStatement) {
|
||||
// TODO: something more targeted than gossip.
|
||||
let raw = ::serde_json::to_vec(&Message::Statement(parent_hash, statement))
|
||||
.expect("message serialization infallible; qed");
|
||||
|
||||
self.consensus_gossip.multicast_chain_specific(ctx, raw, parent_hash);
|
||||
}
|
||||
|
||||
/// Fetch block data by candidate receipt.
|
||||
fn fetch_block_data(&mut self, ctx: &mut Context<Block>, candidate: &CandidateReceipt, relay_parent: Hash) -> oneshot::Receiver<BlockData> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
self.pending.push(BlockDataRequest {
|
||||
attempted_peers: Default::default(),
|
||||
consensus_parent: relay_parent,
|
||||
candidate_hash: candidate.hash(),
|
||||
block_data_hash: candidate.block_data_hash,
|
||||
sender: tx,
|
||||
});
|
||||
|
||||
self.dispatch_pending_requests(ctx);
|
||||
rx
|
||||
}
|
||||
|
||||
/// Note new consensus session.
|
||||
fn new_consensus(&mut self, ctx: &mut Context<Block>, mut consensus: CurrentConsensus) {
|
||||
let parent_hash = consensus.parent_hash;
|
||||
let old_parent = self.live_consensus.as_ref().map(|c| c.parent_hash);
|
||||
|
||||
for (id, info) in self.peers.iter_mut().filter(|&(_, ref info)| info.validator) {
|
||||
send_polkadot_message(
|
||||
ctx,
|
||||
*id,
|
||||
Message::SessionKey(parent_hash, consensus.local_session_key)
|
||||
);
|
||||
|
||||
if let Some(key) = info.session_keys.get(&parent_hash) {
|
||||
consensus.session_keys.insert(*key, *id);
|
||||
}
|
||||
|
||||
if let Some(ref old_parent) = old_parent {
|
||||
info.session_keys.remove(old_parent);
|
||||
}
|
||||
}
|
||||
|
||||
self.live_consensus = Some(consensus);
|
||||
self.consensus_gossip.collect_garbage(old_parent.as_ref());
|
||||
}
|
||||
|
||||
fn dispatch_pending_requests(&mut self, ctx: &mut Context<Block>) {
|
||||
let consensus = match self.live_consensus {
|
||||
Some(ref mut c) => c,
|
||||
None => {
|
||||
self.pending.clear();
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let knowledge = consensus.knowledge.lock();
|
||||
let mut new_pending = Vec::new();
|
||||
for mut pending in ::std::mem::replace(&mut self.pending, Vec::new()) {
|
||||
if pending.consensus_parent != consensus.parent_hash { continue }
|
||||
|
||||
if let Some(entry) = knowledge.candidates.get(&pending.candidate_hash) {
|
||||
// answer locally
|
||||
if let Some(ref data) = entry.block_data {
|
||||
let _ = pending.sender.send(data.clone());
|
||||
continue;
|
||||
}
|
||||
|
||||
let next_peer = entry.knows_block_data.iter()
|
||||
.filter_map(|x| consensus.session_keys.get(x).map(|id| (*x, *id)))
|
||||
.find(|&(ref key, _)| pending.attempted_peers.insert(*key))
|
||||
.map(|(_, id)| id);
|
||||
|
||||
// dispatch to peer
|
||||
if let Some(peer_id) = next_peer {
|
||||
let req_id = self.next_req_id;
|
||||
self.next_req_id += 1;
|
||||
|
||||
send_polkadot_message(
|
||||
ctx,
|
||||
peer_id,
|
||||
Message::RequestBlockData(req_id, pending.candidate_hash)
|
||||
);
|
||||
|
||||
self.in_flight.insert((req_id, peer_id), pending);
|
||||
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
new_pending.push(pending);
|
||||
}
|
||||
|
||||
self.pending = new_pending;
|
||||
}
|
||||
|
||||
fn on_polkadot_message(&mut self, ctx: &mut Context<Block>, peer_id: PeerId, raw: Vec<u8>, msg: Message) {
|
||||
match msg {
|
||||
Message::Statement(parent_hash, _statement) =>
|
||||
self.consensus_gossip.on_chain_specific(ctx, peer_id, raw, parent_hash),
|
||||
Message::SessionKey(parent_hash, key) => {
|
||||
{
|
||||
let info = match self.peers.get_mut(&peer_id) {
|
||||
Some(peer) => peer,
|
||||
None => return,
|
||||
};
|
||||
|
||||
if !info.validator {
|
||||
ctx.disable_peer(peer_id);
|
||||
return;
|
||||
}
|
||||
|
||||
match self.live_consensus {
|
||||
Some(ref mut consensus) if consensus.parent_hash == parent_hash => {
|
||||
consensus.session_keys.insert(key, peer_id);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
info.session_keys.insert(parent_hash, key);
|
||||
}
|
||||
self.dispatch_pending_requests(ctx);
|
||||
}
|
||||
Message::RequestBlockData(req_id, hash) => {
|
||||
let block_data = self.live_consensus.as_ref()
|
||||
.and_then(|c| c.block_data(&hash));
|
||||
|
||||
send_polkadot_message(ctx, peer_id, Message::BlockData(req_id, block_data));
|
||||
}
|
||||
Message::BlockData(req_id, data) => self.on_block_data(ctx, peer_id, req_id, data),
|
||||
}
|
||||
}
|
||||
|
||||
fn on_block_data(&mut self, ctx: &mut Context<Block>, peer_id: PeerId, req_id: RequestId, data: Option<BlockData>) {
|
||||
match self.in_flight.remove(&(req_id, peer_id)) {
|
||||
Some(req) => {
|
||||
if let Some(data) = data {
|
||||
if data.hash() == req.block_data_hash {
|
||||
let _ = req.sender.send(data);
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
self.pending.push(req);
|
||||
self.dispatch_pending_requests(ctx);
|
||||
}
|
||||
None => ctx.disable_peer(peer_id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Specialization<Block> for PolkadotProtocol {
|
||||
fn status(&self) -> Vec<u8> {
|
||||
Status { collating_for: self.collating_for.clone() }.encode()
|
||||
}
|
||||
|
||||
fn on_connect(&mut self, ctx: &mut Context<Block>, peer_id: PeerId, status: FullStatus) {
|
||||
let local_status = match Status::decode(&mut &status.chain_status[..]) {
|
||||
Some(status) => status,
|
||||
None => {
|
||||
ctx.disable_peer(peer_id);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(ref para_id) = local_status.collating_for {
|
||||
self.collators.entry(para_id.clone())
|
||||
.or_insert_with(Vec::new)
|
||||
.push(peer_id);
|
||||
}
|
||||
|
||||
let validator = status.roles.iter().any(|r| *r == message::Role::Authority);
|
||||
self.peers.insert(peer_id, PeerInfo {
|
||||
status: local_status,
|
||||
session_keys: Default::default(),
|
||||
validator,
|
||||
});
|
||||
|
||||
self.consensus_gossip.new_peer(ctx, peer_id, &status.roles);
|
||||
|
||||
if let (true, &Some(ref consensus)) = (validator, &self.live_consensus) {
|
||||
send_polkadot_message(
|
||||
ctx,
|
||||
peer_id,
|
||||
Message::SessionKey(consensus.parent_hash, consensus.local_session_key)
|
||||
);
|
||||
}
|
||||
|
||||
self.dispatch_pending_requests(ctx);
|
||||
}
|
||||
|
||||
fn on_disconnect(&mut self, ctx: &mut Context<Block>, peer_id: PeerId) {
|
||||
if let Some(info) = self.peers.remove(&peer_id) {
|
||||
if let Some(collators) = info.status.collating_for.and_then(|id| self.collators.get_mut(&id)) {
|
||||
if let Some(pos) = collators.iter().position(|x| x == &peer_id) {
|
||||
collators.swap_remove(pos);
|
||||
}
|
||||
}
|
||||
|
||||
if let (true, &mut Some(ref mut consensus)) = (info.validator, &mut self.live_consensus) {
|
||||
consensus.peer_disconnected(&info);
|
||||
}
|
||||
|
||||
{
|
||||
let pending = &mut self.pending;
|
||||
self.in_flight.retain(|&(_, ref peer), val| {
|
||||
let retain = peer != &peer_id;
|
||||
if !retain {
|
||||
let (sender, _) = oneshot::channel();
|
||||
pending.push(::std::mem::replace(val, BlockDataRequest {
|
||||
attempted_peers: Default::default(),
|
||||
consensus_parent: Default::default(),
|
||||
candidate_hash: Default::default(),
|
||||
block_data_hash: Default::default(),
|
||||
sender,
|
||||
}));
|
||||
}
|
||||
|
||||
retain
|
||||
});
|
||||
}
|
||||
self.consensus_gossip.peer_disconnected(ctx, peer_id);
|
||||
self.dispatch_pending_requests(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
fn on_message(&mut self, ctx: &mut Context<Block>, peer_id: PeerId, message: message::Message<Block>) {
|
||||
match message {
|
||||
generic_message::Message::BftMessage(msg) => {
|
||||
// TODO: check signature here? what if relevant block is unknown?
|
||||
self.consensus_gossip.on_bft_message(ctx, peer_id, msg)
|
||||
}
|
||||
generic_message::Message::ChainSpecific(raw) => {
|
||||
match serde_json::from_slice(&raw) {
|
||||
Ok(msg) => self.on_polkadot_message(ctx, peer_id, raw, msg),
|
||||
Err(e) => {
|
||||
trace!(target: "p_net", "Bad message from {}: {}", peer_id, e);
|
||||
ctx.disable_peer(peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn on_abort(&mut self) {
|
||||
self.consensus_gossip.abort();
|
||||
}
|
||||
|
||||
fn maintain_peers(&mut self, ctx: &mut Context<Block>) {
|
||||
self.consensus_gossip.collect_garbage(None);
|
||||
self.dispatch_pending_requests(ctx);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,349 @@
|
||||
// Copyright 2017 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Statement routing and consensus table router implementation.
|
||||
//!
|
||||
//! During the consensus process, validators exchange statements on validity and availability
|
||||
//! of parachain candidates.
|
||||
//! The `Router` in this file hooks into the underlying network to fulfill
|
||||
//! the `TableRouter` trait from `polkadot-consensus`, which is expected to call into a shared statement table
|
||||
//! and dispatch evaluation work as necessary when new statements come in.
|
||||
|
||||
use polkadot_api::{PolkadotApi, LocalPolkadotApi};
|
||||
use polkadot_consensus::{SharedTable, TableRouter, SignedStatement, GenericStatement, StatementProducer};
|
||||
use polkadot_primitives::{Hash, BlockId, SessionKey};
|
||||
use polkadot_primitives::parachain::{BlockData, Extrinsic, CandidateReceipt, Id as ParaId};
|
||||
|
||||
use futures::prelude::*;
|
||||
use tokio::runtime::TaskExecutor;
|
||||
use parking_lot::Mutex;
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::{NetworkService, Knowledge};
|
||||
|
||||
/// Table routing implementation.
|
||||
pub struct Router<P: PolkadotApi> {
|
||||
table: Arc<SharedTable>,
|
||||
network: Arc<NetworkService>,
|
||||
api: Arc<P>,
|
||||
task_executor: TaskExecutor,
|
||||
parent_hash: Hash,
|
||||
knowledge: Arc<Mutex<Knowledge>>,
|
||||
deferred_statements: Arc<Mutex<DeferredStatements>>,
|
||||
}
|
||||
|
||||
impl<P: PolkadotApi> Router<P> {
|
||||
pub(crate) fn new(
|
||||
table: Arc<SharedTable>,
|
||||
network: Arc<NetworkService>,
|
||||
api: Arc<P>,
|
||||
task_executor: TaskExecutor,
|
||||
parent_hash: Hash,
|
||||
knowledge: Arc<Mutex<Knowledge>>,
|
||||
) -> Self {
|
||||
Router {
|
||||
table,
|
||||
network,
|
||||
api,
|
||||
task_executor,
|
||||
parent_hash,
|
||||
knowledge,
|
||||
deferred_statements: Arc::new(Mutex::new(DeferredStatements::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn session_key(&self) -> SessionKey {
|
||||
self.table.session_key()
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: PolkadotApi> Clone for Router<P> {
|
||||
fn clone(&self) -> Self {
|
||||
Router {
|
||||
table: self.table.clone(),
|
||||
network: self.network.clone(),
|
||||
api: self.api.clone(),
|
||||
task_executor: self.task_executor.clone(),
|
||||
parent_hash: self.parent_hash.clone(),
|
||||
deferred_statements: self.deferred_statements.clone(),
|
||||
knowledge: self.knowledge.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: LocalPolkadotApi + Send + Sync + 'static> Router<P> {
|
||||
/// Import a statement whose signature has been checked already.
|
||||
pub(crate) fn import_statement(&self, statement: SignedStatement) {
|
||||
// defer any statements for which we haven't imported the candidate yet
|
||||
let (c_hash, parachain_index) = {
|
||||
let candidate_data = match statement.statement {
|
||||
GenericStatement::Candidate(ref c) => Some((c.hash(), c.parachain_index)),
|
||||
GenericStatement::Valid(ref hash)
|
||||
| GenericStatement::Invalid(ref hash)
|
||||
| GenericStatement::Available(ref hash)
|
||||
=> self.table.with_candidate(hash, |c| c.map(|c| (*hash, c.parachain_index))),
|
||||
};
|
||||
match candidate_data {
|
||||
Some(x) => x,
|
||||
None => {
|
||||
self.deferred_statements.lock().push(statement);
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// import all statements pending on this candidate
|
||||
let (mut statements, _traces) = if let GenericStatement::Candidate(_) = statement.statement {
|
||||
self.deferred_statements.lock().get_deferred(&c_hash)
|
||||
} else {
|
||||
(Vec::new(), Vec::new())
|
||||
};
|
||||
|
||||
// prepend the candidate statement.
|
||||
statements.insert(0, statement);
|
||||
let producers: Vec<_> = self.table.import_remote_statements(
|
||||
self,
|
||||
statements.iter().cloned(),
|
||||
);
|
||||
// dispatch future work as necessary.
|
||||
for (producer, statement) in producers.into_iter().zip(statements) {
|
||||
let producer = match producer {
|
||||
Some(p) => p,
|
||||
None => continue, // statement redundant
|
||||
};
|
||||
|
||||
self.knowledge.lock().note_statement(statement.sender, &statement.statement);
|
||||
self.dispatch_work(c_hash, producer, parachain_index);
|
||||
}
|
||||
}
|
||||
|
||||
fn dispatch_work<D, E>(&self, candidate_hash: Hash, producer: StatementProducer<D, E>, parachain: ParaId) where
|
||||
D: Future<Item=BlockData,Error=()> + Send + 'static,
|
||||
E: Future<Item=Extrinsic,Error=()> + Send + 'static,
|
||||
{
|
||||
let parent_hash = self.parent_hash.clone();
|
||||
|
||||
let api = self.api.clone();
|
||||
let validate = move |collation| -> Option<bool> {
|
||||
let id = BlockId::hash(parent_hash);
|
||||
match ::polkadot_consensus::validate_collation(&*api, &id, &collation) {
|
||||
Ok(()) => Some(true),
|
||||
Err(e) => {
|
||||
debug!(target: "p_net", "Encountered bad collation: {}", e);
|
||||
Some(false)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let table = self.table.clone();
|
||||
let network = self.network.clone();
|
||||
let knowledge = self.knowledge.clone();
|
||||
|
||||
let work = producer.prime(validate).map(move |produced| {
|
||||
// store the data before broadcasting statements, so other peers can fetch.
|
||||
knowledge.lock().note_candidate(candidate_hash, produced.block_data, produced.extrinsic);
|
||||
|
||||
// propagate the statements
|
||||
if let Some(validity) = produced.validity {
|
||||
let signed = table.sign_and_import(validity.clone());
|
||||
route_statement(&*network, &*table, parachain, parent_hash, signed);
|
||||
}
|
||||
|
||||
if let Some(availability) = produced.availability {
|
||||
let signed = table.sign_and_import(availability);
|
||||
route_statement(&*network, &*table, parachain, parent_hash, signed);
|
||||
}
|
||||
});
|
||||
|
||||
self.task_executor.spawn(work);
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: LocalPolkadotApi + Send> TableRouter for Router<P> {
|
||||
type Error = ();
|
||||
type FetchCandidate = BlockDataReceiver;
|
||||
type FetchExtrinsic = Result<Extrinsic, Self::Error>;
|
||||
|
||||
fn local_candidate(&self, receipt: CandidateReceipt, block_data: BlockData, extrinsic: Extrinsic) {
|
||||
// give to network to make available.
|
||||
let hash = receipt.hash();
|
||||
let para_id = receipt.parachain_index;
|
||||
let signed = self.table.sign_and_import(GenericStatement::Candidate(receipt));
|
||||
|
||||
self.knowledge.lock().note_candidate(hash, Some(block_data), Some(extrinsic));
|
||||
route_statement(&*self.network, &*self.table, para_id, self.parent_hash, signed);
|
||||
}
|
||||
|
||||
fn fetch_block_data(&self, candidate: &CandidateReceipt) -> BlockDataReceiver {
|
||||
let parent_hash = self.parent_hash;
|
||||
let rx = self.network.with_spec(|spec, ctx| { spec.fetch_block_data(ctx, candidate, parent_hash) });
|
||||
BlockDataReceiver { inner: rx }
|
||||
}
|
||||
|
||||
fn fetch_extrinsic_data(&self, _candidate: &CandidateReceipt) -> Self::FetchExtrinsic {
|
||||
Ok(Extrinsic)
|
||||
}
|
||||
}
|
||||
|
||||
/// Receiver for block data.
|
||||
pub struct BlockDataReceiver {
|
||||
inner: Option<::futures::sync::oneshot::Receiver<BlockData>>,
|
||||
}
|
||||
|
||||
impl Future for BlockDataReceiver {
|
||||
type Item = BlockData;
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<BlockData, ()> {
|
||||
match self.inner {
|
||||
Some(ref mut inner) => inner.poll().map_err(|_| ()),
|
||||
None => return Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get statement to relevant validators.
|
||||
fn route_statement(network: &NetworkService, table: &SharedTable, para_id: ParaId, parent_hash: Hash, statement: SignedStatement) {
|
||||
let broadcast = |i: &mut Iterator<Item=&SessionKey>| {
|
||||
let local_key = table.session_key();
|
||||
network.with_spec(|spec, ctx| {
|
||||
for val in i.filter(|&x| x != &local_key) {
|
||||
spec.send_statement(ctx, *val, parent_hash, statement.clone());
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
let g_info = table
|
||||
.group_info()
|
||||
.get(¶_id)
|
||||
.expect("statements only produced about groups which exist");
|
||||
|
||||
match statement.statement {
|
||||
GenericStatement::Candidate(_) =>
|
||||
broadcast(&mut g_info.validity_guarantors.iter().chain(g_info.availability_guarantors.iter())),
|
||||
GenericStatement::Valid(_) | GenericStatement::Invalid(_) =>
|
||||
broadcast(&mut g_info.validity_guarantors.iter()),
|
||||
GenericStatement::Available(_) =>
|
||||
broadcast(&mut g_info.availability_guarantors.iter()),
|
||||
}
|
||||
}
|
||||
|
||||
// A unique trace for valid statements issued by a validator.
|
||||
#[derive(Hash, PartialEq, Eq, Clone, Debug)]
|
||||
enum StatementTrace {
|
||||
Valid(SessionKey, Hash),
|
||||
Invalid(SessionKey, Hash),
|
||||
Available(SessionKey, Hash),
|
||||
}
|
||||
|
||||
// helper for deferring statements whose associated candidate is unknown.
|
||||
struct DeferredStatements {
|
||||
deferred: HashMap<Hash, Vec<SignedStatement>>,
|
||||
known_traces: HashSet<StatementTrace>,
|
||||
}
|
||||
|
||||
impl DeferredStatements {
|
||||
fn new() -> Self {
|
||||
DeferredStatements {
|
||||
deferred: HashMap::new(),
|
||||
known_traces: HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn push(&mut self, statement: SignedStatement) {
|
||||
let (hash, trace) = match statement.statement {
|
||||
GenericStatement::Candidate(_) => return,
|
||||
GenericStatement::Valid(hash) => (hash, StatementTrace::Valid(statement.sender, hash)),
|
||||
GenericStatement::Invalid(hash) => (hash, StatementTrace::Invalid(statement.sender, hash)),
|
||||
GenericStatement::Available(hash) => (hash, StatementTrace::Available(statement.sender, hash)),
|
||||
};
|
||||
|
||||
if self.known_traces.insert(trace) {
|
||||
self.deferred.entry(hash).or_insert_with(Vec::new).push(statement);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_deferred(&mut self, hash: &Hash) -> (Vec<SignedStatement>, Vec<StatementTrace>) {
|
||||
match self.deferred.remove(hash) {
|
||||
None => (Vec::new(), Vec::new()),
|
||||
Some(deferred) => {
|
||||
let mut traces = Vec::new();
|
||||
for statement in deferred.iter() {
|
||||
let trace = match statement.statement {
|
||||
GenericStatement::Candidate(_) => continue,
|
||||
GenericStatement::Valid(hash) => StatementTrace::Valid(statement.sender, hash),
|
||||
GenericStatement::Invalid(hash) => StatementTrace::Invalid(statement.sender, hash),
|
||||
GenericStatement::Available(hash) => StatementTrace::Available(statement.sender, hash),
|
||||
};
|
||||
|
||||
self.known_traces.remove(&trace);
|
||||
traces.push(trace);
|
||||
}
|
||||
|
||||
(deferred, traces)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use substrate_primitives::H512;
|
||||
|
||||
#[test]
|
||||
fn deferred_statements_works() {
|
||||
let mut deferred = DeferredStatements::new();
|
||||
let hash = [1; 32].into();
|
||||
let sig = H512([2; 64]).into();
|
||||
let sender = [255; 32].into();
|
||||
|
||||
let statement = SignedStatement {
|
||||
statement: GenericStatement::Valid(hash),
|
||||
sender,
|
||||
signature: sig,
|
||||
};
|
||||
|
||||
// pre-push.
|
||||
{
|
||||
let (signed, traces) = deferred.get_deferred(&hash);
|
||||
assert!(signed.is_empty());
|
||||
assert!(traces.is_empty());
|
||||
}
|
||||
|
||||
deferred.push(statement.clone());
|
||||
deferred.push(statement.clone());
|
||||
|
||||
// draining: second push should have been ignored.
|
||||
{
|
||||
let (signed, traces) = deferred.get_deferred(&hash);
|
||||
assert_eq!(signed.len(), 1);
|
||||
|
||||
assert_eq!(traces.len(), 1);
|
||||
assert_eq!(signed[0].clone(), statement);
|
||||
assert_eq!(traces[0].clone(), StatementTrace::Valid(sender, hash));
|
||||
}
|
||||
|
||||
// after draining
|
||||
{
|
||||
let (signed, traces) = deferred.get_deferred(&hash);
|
||||
assert!(signed.is_empty());
|
||||
assert!(traces.is_empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,204 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Tests for polkadot and consensus network.
|
||||
|
||||
use super::{PolkadotProtocol, Status, CurrentConsensus, Knowledge, Message, FullStatus};
|
||||
|
||||
use parking_lot::Mutex;
|
||||
use polkadot_consensus::GenericStatement;
|
||||
use polkadot_primitives::{Block, Hash, SessionKey};
|
||||
use polkadot_primitives::parachain::{CandidateReceipt, HeadData, BlockData};
|
||||
use codec::Slicable;
|
||||
use substrate_network::{PeerId, PeerInfo, ClientHandle, Context, message::Message as SubstrateMessage, message::Role, specialization::Specialization, generic_message::Message as GenericMessage};
|
||||
|
||||
use std::sync::Arc;
|
||||
use futures::Future;
|
||||
|
||||
#[derive(Default)]
|
||||
struct TestContext {
|
||||
disabled: Vec<PeerId>,
|
||||
disconnected: Vec<PeerId>,
|
||||
messages: Vec<(PeerId, SubstrateMessage<Block>)>,
|
||||
}
|
||||
|
||||
impl Context<Block> for TestContext {
|
||||
fn client(&self) -> &ClientHandle<Block> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn disable_peer(&mut self, peer: PeerId) {
|
||||
self.disabled.push(peer);
|
||||
}
|
||||
|
||||
fn disconnect_peer(&mut self, peer: PeerId) {
|
||||
self.disconnected.push(peer);
|
||||
}
|
||||
|
||||
fn peer_info(&self, _peer: PeerId) -> Option<PeerInfo<Block>> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn send_message(&mut self, peer_id: PeerId, data: SubstrateMessage<Block>) {
|
||||
self.messages.push((peer_id, data))
|
||||
}
|
||||
}
|
||||
|
||||
impl TestContext {
|
||||
fn has_message(&self, to: PeerId, message: Message) -> bool {
|
||||
use substrate_network::generic_message::Message as GenericMessage;
|
||||
|
||||
let encoded = ::serde_json::to_vec(&message).unwrap();
|
||||
self.messages.iter().any(|&(ref peer, ref msg)| match msg {
|
||||
GenericMessage::ChainSpecific(ref data) => peer == &to && data == &encoded,
|
||||
_ => false,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn make_status(status: &Status, roles: Vec<Role>) -> FullStatus {
|
||||
FullStatus {
|
||||
version: 1,
|
||||
roles,
|
||||
best_number: 0,
|
||||
best_hash: Default::default(),
|
||||
genesis_hash: Default::default(),
|
||||
chain_status: status.encode(),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_consensus(parent_hash: Hash, local_key: SessionKey) -> (CurrentConsensus, Arc<Mutex<Knowledge>>) {
|
||||
let knowledge = Arc::new(Mutex::new(Knowledge::new()));
|
||||
let c = CurrentConsensus {
|
||||
knowledge: knowledge.clone(),
|
||||
parent_hash,
|
||||
session_keys: Default::default(),
|
||||
local_session_key: local_key,
|
||||
};
|
||||
|
||||
(c, knowledge)
|
||||
}
|
||||
|
||||
fn on_message(protocol: &mut PolkadotProtocol, ctx: &mut TestContext, from: PeerId, message: Message) {
|
||||
let encoded = ::serde_json::to_vec(&message).unwrap();
|
||||
protocol.on_message(ctx, from, GenericMessage::ChainSpecific(encoded));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sends_session_key() {
|
||||
let mut protocol = PolkadotProtocol::new();
|
||||
|
||||
let peer_a = 1;
|
||||
let peer_b = 2;
|
||||
let parent_hash = [0; 32].into();
|
||||
let local_key = [1; 32].into();
|
||||
|
||||
let status = Status { collating_for: None };
|
||||
|
||||
{
|
||||
let mut ctx = TestContext::default();
|
||||
protocol.on_connect(&mut ctx, peer_a, make_status(&status, vec![Role::Authority]));
|
||||
assert!(ctx.messages.is_empty());
|
||||
}
|
||||
|
||||
{
|
||||
let mut ctx = TestContext::default();
|
||||
let (consensus, _knowledge) = make_consensus(parent_hash, local_key);
|
||||
protocol.new_consensus(&mut ctx, consensus);
|
||||
|
||||
assert!(ctx.has_message(peer_a, Message::SessionKey(parent_hash, local_key)));
|
||||
}
|
||||
|
||||
{
|
||||
let mut ctx = TestContext::default();
|
||||
protocol.on_connect(&mut ctx, peer_b, make_status(&status, vec![Role::Authority]));
|
||||
assert!(ctx.has_message(peer_b, Message::SessionKey(parent_hash, local_key)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fetches_from_those_with_knowledge() {
|
||||
let mut protocol = PolkadotProtocol::new();
|
||||
|
||||
let peer_a = 1;
|
||||
let peer_b = 2;
|
||||
let parent_hash = [0; 32].into();
|
||||
let local_key = [1; 32].into();
|
||||
|
||||
let block_data = BlockData(vec![1, 2, 3, 4]);
|
||||
let block_data_hash = block_data.hash();
|
||||
let candidate_receipt = CandidateReceipt {
|
||||
parachain_index: 5.into(),
|
||||
collator: [255; 32].into(),
|
||||
head_data: HeadData(vec![9, 9, 9]),
|
||||
balance_uploads: Vec::new(),
|
||||
egress_queue_roots: Vec::new(),
|
||||
fees: 1_000_000,
|
||||
block_data_hash,
|
||||
};
|
||||
|
||||
let candidate_hash = candidate_receipt.hash();
|
||||
let a_key = [3; 32].into();
|
||||
let b_key = [4; 32].into();
|
||||
|
||||
let status = Status { collating_for: None };
|
||||
|
||||
let (consensus, knowledge) = make_consensus(parent_hash, local_key);
|
||||
protocol.new_consensus(&mut TestContext::default(), consensus);
|
||||
|
||||
knowledge.lock().note_statement(a_key, &GenericStatement::Valid(candidate_hash));
|
||||
let recv = protocol.fetch_block_data(&mut TestContext::default(), &candidate_receipt, parent_hash);
|
||||
|
||||
// connect peer A
|
||||
{
|
||||
let mut ctx = TestContext::default();
|
||||
protocol.on_connect(&mut ctx, peer_a, make_status(&status, vec![Role::Authority]));
|
||||
assert!(ctx.has_message(peer_a, Message::SessionKey(parent_hash, local_key)));
|
||||
}
|
||||
|
||||
// peer A gives session key and gets asked for data.
|
||||
{
|
||||
let mut ctx = TestContext::default();
|
||||
on_message(&mut protocol, &mut ctx, peer_a, Message::SessionKey(parent_hash, a_key));
|
||||
assert!(ctx.has_message(peer_a, Message::RequestBlockData(1, candidate_hash)));
|
||||
}
|
||||
|
||||
knowledge.lock().note_statement(b_key, &GenericStatement::Valid(candidate_hash));
|
||||
|
||||
// peer B connects and sends session key. request already assigned to A
|
||||
{
|
||||
let mut ctx = TestContext::default();
|
||||
protocol.on_connect(&mut ctx, peer_b, make_status(&status, vec![Role::Authority]));
|
||||
on_message(&mut protocol, &mut ctx, peer_b, Message::SessionKey(parent_hash, b_key));
|
||||
assert!(!ctx.has_message(peer_b, Message::RequestBlockData(2, candidate_hash)));
|
||||
|
||||
}
|
||||
|
||||
// peer A disconnects, triggering reassignment
|
||||
{
|
||||
let mut ctx = TestContext::default();
|
||||
protocol.on_disconnect(&mut ctx, peer_a);
|
||||
assert!(ctx.has_message(peer_b, Message::RequestBlockData(2, candidate_hash)));
|
||||
}
|
||||
|
||||
// peer B comes back with block data.
|
||||
{
|
||||
let mut ctx = TestContext::default();
|
||||
on_message(&mut protocol, &mut ctx, peer_b, Message::BlockData(2, Some(block_data.clone())));
|
||||
drop(protocol);
|
||||
assert_eq!(recv.wait().unwrap(), block_data);
|
||||
}
|
||||
}
|
||||
@@ -172,6 +172,8 @@ pub struct CandidateReceipt {
|
||||
pub egress_queue_roots: Vec<(Id, Hash)>,
|
||||
/// Fees paid from the chain to the relay chain validators
|
||||
pub fees: u64,
|
||||
/// blake2-256 Hash of block data.
|
||||
pub block_data_hash: Hash,
|
||||
}
|
||||
|
||||
impl Slicable for CandidateReceipt {
|
||||
@@ -184,6 +186,7 @@ impl Slicable for CandidateReceipt {
|
||||
self.balance_uploads.using_encoded(|s| v.extend(s));
|
||||
self.egress_queue_roots.using_encoded(|s| v.extend(s));
|
||||
self.fees.using_encoded(|s| v.extend(s));
|
||||
self.block_data_hash.using_encoded(|s| v.extend(s));
|
||||
|
||||
v
|
||||
}
|
||||
@@ -196,6 +199,7 @@ impl Slicable for CandidateReceipt {
|
||||
balance_uploads: Slicable::decode(input)?,
|
||||
egress_queue_roots: Slicable::decode(input)?,
|
||||
fees: Slicable::decode(input)?,
|
||||
block_data_hash: Slicable::decode(input)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -243,6 +247,15 @@ pub struct ConsolidatedIngress(pub Vec<(Id, Vec<Message>)>);
|
||||
#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))]
|
||||
pub struct BlockData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec<u8>);
|
||||
|
||||
impl BlockData {
|
||||
/// Compute hash of block data.
|
||||
#[cfg(feature = "std")]
|
||||
pub fn hash(&self) -> Hash {
|
||||
use runtime_primitives::traits::{BlakeTwo256, Hashing};
|
||||
BlakeTwo256::hash(&self.0[..])
|
||||
}
|
||||
}
|
||||
|
||||
/// Parachain header raw bytes wrapper type.
|
||||
#[derive(PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))]
|
||||
@@ -273,19 +286,9 @@ impl Slicable for Activity {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "std", derive(Debug))]
|
||||
#[repr(u8)]
|
||||
enum StatementKind {
|
||||
Candidate = 1,
|
||||
Valid = 2,
|
||||
Invalid = 3,
|
||||
Available = 4,
|
||||
}
|
||||
|
||||
/// Statements which can be made about parachain candidates.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "std", derive(Debug))]
|
||||
#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))]
|
||||
pub enum Statement {
|
||||
/// Proposal of a parachain candidate.
|
||||
Candidate(CandidateReceipt),
|
||||
@@ -296,47 +299,3 @@ pub enum Statement {
|
||||
/// Vote to advance round after inactive primary.
|
||||
Available(Hash),
|
||||
}
|
||||
|
||||
impl Slicable for Statement {
|
||||
fn encode(&self) -> Vec<u8> {
|
||||
let mut v = Vec::new();
|
||||
match *self {
|
||||
Statement::Candidate(ref candidate) => {
|
||||
v.push(StatementKind::Candidate as u8);
|
||||
candidate.using_encoded(|s| v.extend(s));
|
||||
}
|
||||
Statement::Valid(ref hash) => {
|
||||
v.push(StatementKind::Valid as u8);
|
||||
hash.using_encoded(|s| v.extend(s));
|
||||
}
|
||||
Statement::Invalid(ref hash) => {
|
||||
v.push(StatementKind::Invalid as u8);
|
||||
hash.using_encoded(|s| v.extend(s));
|
||||
}
|
||||
Statement::Available(ref hash) => {
|
||||
v.push(StatementKind::Available as u8);
|
||||
hash.using_encoded(|s| v.extend(s));
|
||||
}
|
||||
}
|
||||
|
||||
v
|
||||
}
|
||||
|
||||
fn decode<I: Input>(value: &mut I) -> Option<Self> {
|
||||
match value.read_byte() {
|
||||
Some(x) if x == StatementKind::Candidate as u8 => {
|
||||
Slicable::decode(value).map(Statement::Candidate)
|
||||
}
|
||||
Some(x) if x == StatementKind::Valid as u8 => {
|
||||
Slicable::decode(value).map(Statement::Valid)
|
||||
}
|
||||
Some(x) if x == StatementKind::Invalid as u8 => {
|
||||
Slicable::decode(value).map(Statement::Invalid)
|
||||
}
|
||||
Some(x) if x == StatementKind::Available as u8 => {
|
||||
Slicable::decode(value).map(Statement::Available)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,6 +218,7 @@ impl_outer_dispatch! {
|
||||
Democracy = 5,
|
||||
Council = 6,
|
||||
CouncilVoting = 7,
|
||||
Parachains = 8,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -395,4 +396,10 @@ mod tests {
|
||||
let v = Slicable::encode(&xt);
|
||||
assert_eq!(Extrinsic::decode(&mut &v[..]).unwrap(), xt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parachain_calls_are_privcall() {
|
||||
let _register = PrivCall::Parachains(parachains::PrivCall::register_parachain(0.into(), vec![1, 2, 3], vec![]));
|
||||
let _deregister = PrivCall::Parachains(parachains::PrivCall::deregister_parachain(0.into()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,15 +16,14 @@
|
||||
|
||||
//! Main parachains logic. For now this is just the determination of which validators do what.
|
||||
|
||||
use primitives;
|
||||
use rstd::prelude::*;
|
||||
use codec::{Slicable, Joiner};
|
||||
use codec::Slicable;
|
||||
|
||||
use runtime_primitives::traits::{Executable, RefInto, MaybeEmpty};
|
||||
use runtime_primitives::traits::{Hashing, BlakeTwo256, Executable, RefInto, MaybeEmpty};
|
||||
use primitives::parachain::{Id, Chain, DutyRoster, CandidateReceipt};
|
||||
use {system, session};
|
||||
|
||||
use substrate_runtime_support::{Hashable, StorageValue, StorageMap};
|
||||
use substrate_runtime_support::{StorageValue, StorageMap};
|
||||
use substrate_runtime_support::dispatch::Result;
|
||||
|
||||
#[cfg(any(feature = "std", test))]
|
||||
@@ -33,7 +32,7 @@ use rstd::marker::PhantomData;
|
||||
#[cfg(any(feature = "std", test))]
|
||||
use {runtime_io, runtime_primitives};
|
||||
|
||||
pub trait Trait: session::Trait<Hash = primitives::Hash> {
|
||||
pub trait Trait: system::Trait<Hash = ::primitives::Hash> + session::Trait {
|
||||
/// The position of the set_heads call in the block.
|
||||
const SET_POSITION: u32;
|
||||
|
||||
@@ -43,13 +42,19 @@ pub trait Trait: session::Trait<Hash = primitives::Hash> {
|
||||
decl_module! {
|
||||
/// Parachains module.
|
||||
pub struct Module<T: Trait>;
|
||||
|
||||
/// Call type for parachains.
|
||||
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
|
||||
pub enum Call where aux: <T as Trait>::PublicAux {
|
||||
// provide candidate receipts for parachains, in ascending order by id.
|
||||
fn set_heads(aux, heads: Vec<CandidateReceipt>) -> Result = 0;
|
||||
}
|
||||
|
||||
/// Private calls for parachains.
|
||||
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
|
||||
pub enum PrivCall {
|
||||
fn register_parachain(id: Id, code: Vec<u8>, initial_head_data: Vec<u8>) -> Result = 0;
|
||||
fn deregister_parachain(id: Id) -> Result = 1;
|
||||
}
|
||||
}
|
||||
|
||||
decl_storage! {
|
||||
@@ -83,8 +88,9 @@ impl<T: Trait> Module<T> {
|
||||
|
||||
let mut roles_gua = roles_val.clone();
|
||||
|
||||
let random_seed = system::Module::<T>::random_seed();
|
||||
let mut seed = random_seed.to_vec().and(b"validator_role_pairs").blake2_256();
|
||||
let mut random_seed = system::Module::<T>::random_seed().to_vec();
|
||||
random_seed.extend(b"validator_role_pairs");
|
||||
let mut seed = BlakeTwo256::hash(&random_seed);
|
||||
|
||||
// shuffle
|
||||
for i in 0..(validator_count - 1) {
|
||||
@@ -100,7 +106,7 @@ impl<T: Trait> Module<T> {
|
||||
|
||||
if offset == 24 {
|
||||
// into the last 8 bytes - rehash to gather new entropy
|
||||
seed = seed.blake2_256();
|
||||
seed = BlakeTwo256::hash(&seed);
|
||||
}
|
||||
|
||||
// exchange last item with randomly chosen first.
|
||||
@@ -116,20 +122,22 @@ impl<T: Trait> Module<T> {
|
||||
|
||||
/// Register a parachain with given code.
|
||||
/// Fails if given ID is already used.
|
||||
pub fn register_parachain(id: Id, code: Vec<u8>, initial_head_data: Vec<u8>) {
|
||||
pub fn register_parachain(id: Id, code: Vec<u8>, initial_head_data: Vec<u8>) -> Result {
|
||||
let mut parachains = Self::active_parachains();
|
||||
match parachains.binary_search(&id) {
|
||||
Ok(_) => panic!("Parachain with id {} already exists", id.into_inner()),
|
||||
Ok(_) => fail!("Parachain already exists"),
|
||||
Err(idx) => parachains.insert(idx, id),
|
||||
}
|
||||
|
||||
<Code<T>>::insert(id, code);
|
||||
<Parachains<T>>::put(parachains);
|
||||
<Heads<T>>::insert(id, initial_head_data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deregister a parachain with given id
|
||||
pub fn deregister_parachain(id: Id) {
|
||||
pub fn deregister_parachain(id: Id) -> Result {
|
||||
let mut parachains = Self::active_parachains();
|
||||
match parachains.binary_search(&id) {
|
||||
Ok(idx) => { parachains.remove(idx); }
|
||||
@@ -139,6 +147,7 @@ impl<T: Trait> Module<T> {
|
||||
<Code<T>>::remove(id);
|
||||
<Heads<T>>::remove(id);
|
||||
<Parachains<T>>::put(parachains);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_heads(aux: &<T as Trait>::PublicAux, heads: Vec<CandidateReceipt>) -> Result {
|
||||
@@ -317,12 +326,12 @@ mod tests {
|
||||
assert_eq!(Parachains::parachain_code(&5u32.into()), Some(vec![1,2,3]));
|
||||
assert_eq!(Parachains::parachain_code(&100u32.into()), Some(vec![4,5,6]));
|
||||
|
||||
Parachains::register_parachain(99u32.into(), vec![7,8,9], vec![1, 1, 1]);
|
||||
Parachains::register_parachain(99u32.into(), vec![7,8,9], vec![1, 1, 1]).unwrap();
|
||||
|
||||
assert_eq!(Parachains::active_parachains(), vec![5u32.into(), 99u32.into(), 100u32.into()]);
|
||||
assert_eq!(Parachains::parachain_code(&99u32.into()), Some(vec![7,8,9]));
|
||||
|
||||
Parachains::deregister_parachain(5u32.into());
|
||||
Parachains::deregister_parachain(5u32.into()).unwrap();
|
||||
|
||||
assert_eq!(Parachains::active_parachains(), vec![99u32.into(), 100u32.into()]);
|
||||
assert_eq!(Parachains::parachain_code(&5u32.into()), None);
|
||||
|
||||
BIN
Binary file not shown.
Binary file not shown.
@@ -6,13 +6,12 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
||||
[dependencies]
|
||||
futures = "0.1.17"
|
||||
parking_lot = "0.4"
|
||||
tokio-timer = "0.1.2"
|
||||
error-chain = "0.12"
|
||||
lazy_static = "1.0"
|
||||
log = "0.3"
|
||||
slog = "^2"
|
||||
clap = "2.27"
|
||||
tokio-core = "0.1.12"
|
||||
tokio = "0.1.7"
|
||||
exit-future = "0.1"
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
@@ -25,6 +24,7 @@ polkadot-consensus = { path = "../consensus" }
|
||||
polkadot-executor = { path = "../executor" }
|
||||
polkadot-api = { path = "../api" }
|
||||
polkadot-transaction-pool = { path = "../transaction-pool" }
|
||||
polkadot-network = { path = "../network" }
|
||||
substrate-keystore = { path = "../../substrate/keystore" }
|
||||
substrate-runtime-io = { path = "../../substrate/runtime-io" }
|
||||
substrate-runtime-primitives = { path = "../../substrate/runtime/primitives" }
|
||||
|
||||
@@ -18,20 +18,22 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use client::{self, Client};
|
||||
use chain_spec::ChainSpec;
|
||||
use client_db;
|
||||
use client::{self, Client};
|
||||
use codec::{self, Slicable};
|
||||
use consensus;
|
||||
use error;
|
||||
use keystore::Store as Keystore;
|
||||
use network;
|
||||
use network::{self, OnDemand};
|
||||
use polkadot_api;
|
||||
use polkadot_executor::Executor as LocalDispatch;
|
||||
use polkadot_network::NetworkService;
|
||||
use polkadot_primitives::{Block, BlockId, Hash};
|
||||
use state_machine;
|
||||
use substrate_executor::NativeExecutor;
|
||||
use transaction_pool::{self, TransactionPool};
|
||||
use error;
|
||||
use chain_spec::ChainSpec;
|
||||
use tokio::runtime::TaskExecutor;
|
||||
|
||||
/// Code executor.
|
||||
pub type CodeExecutor = NativeExecutor<LocalDispatch>;
|
||||
@@ -49,7 +51,7 @@ pub trait Components {
|
||||
|
||||
/// Create client.
|
||||
fn build_client(&self, settings: client_db::DatabaseSettings, executor: CodeExecutor, chain_spec: &ChainSpec)
|
||||
-> Result<(Arc<Client<Self::Backend, Self::Executor, Block>>, Option<Arc<network::OnDemand<Block, network::Service<Block>>>>), error::Error>;
|
||||
-> Result<(Arc<Client<Self::Backend, Self::Executor, Block>>, Option<Arc<OnDemand<Block, NetworkService>>>), error::Error>;
|
||||
|
||||
/// Create api.
|
||||
fn build_api(&self, client: Arc<Client<Self::Backend, Self::Executor, Block>>) -> Arc<Self::Api>;
|
||||
@@ -59,7 +61,14 @@ pub trait Components {
|
||||
-> Arc<network::TransactionPool<Block>>;
|
||||
|
||||
/// Create consensus service.
|
||||
fn build_consensus(&self, client: Arc<Client<Self::Backend, Self::Executor, Block>>, network: Arc<network::Service<Block>>, tx_pool: Arc<TransactionPool<Self::Api>>, keystore: &Keystore)
|
||||
fn build_consensus(
|
||||
&self,
|
||||
client: Arc<Client<Self::Backend, Self::Executor, Block>>,
|
||||
network: Arc<NetworkService>,
|
||||
tx_pool: Arc<TransactionPool<Self::Api>>,
|
||||
keystore: &Keystore,
|
||||
task_executor: TaskExecutor,
|
||||
)
|
||||
-> Result<Option<consensus::Service>, error::Error>;
|
||||
}
|
||||
|
||||
@@ -75,7 +84,7 @@ impl Components for FullComponents {
|
||||
type Executor = client::LocalCallExecutor<client_db::Backend<Block>, NativeExecutor<LocalDispatch>>;
|
||||
|
||||
fn build_client(&self, db_settings: client_db::DatabaseSettings, executor: CodeExecutor, chain_spec: &ChainSpec)
|
||||
-> Result<(Arc<client::Client<Self::Backend, Self::Executor, Block>>, Option<Arc<network::OnDemand<Block, network::Service<Block>>>>), error::Error> {
|
||||
-> Result<(Arc<client::Client<Self::Backend, Self::Executor, Block>>, Option<Arc<OnDemand<Block, NetworkService>>>), error::Error> {
|
||||
Ok((Arc::new(client_db::new_client(db_settings, executor, chain_spec)?), None))
|
||||
}
|
||||
|
||||
@@ -92,20 +101,31 @@ impl Components for FullComponents {
|
||||
})
|
||||
}
|
||||
|
||||
fn build_consensus(&self, client: Arc<client::Client<Self::Backend, Self::Executor, Block>>, network: Arc<network::Service<Block>>, tx_pool: Arc<TransactionPool<Self::Api>>, keystore: &Keystore)
|
||||
-> Result<Option<consensus::Service>, error::Error> {
|
||||
fn build_consensus(
|
||||
&self,
|
||||
client: Arc<client::Client<Self::Backend, Self::Executor, Block>>,
|
||||
network: Arc<NetworkService>,
|
||||
tx_pool: Arc<TransactionPool<Self::Api>>,
|
||||
keystore: &Keystore,
|
||||
task_executor: TaskExecutor,
|
||||
)
|
||||
-> Result<Option<consensus::Service>, error::Error>
|
||||
{
|
||||
if !self.is_validator {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Load the first available key
|
||||
let key = keystore.load(&keystore.contents()?[0], "")?;
|
||||
info!("Using authority key: {}", key.public());
|
||||
info!("Using authority key {}", key.public());
|
||||
|
||||
let consensus_net = ::polkadot_network::consensus::ConsensusNetwork::new(network, client.clone());
|
||||
Ok(Some(consensus::Service::new(
|
||||
client.clone(),
|
||||
client.clone(),
|
||||
network.clone(),
|
||||
consensus_net,
|
||||
tx_pool.clone(),
|
||||
task_executor,
|
||||
::std::time::Duration::from_millis(4000), // TODO: dynamic
|
||||
key,
|
||||
)))
|
||||
@@ -116,14 +136,14 @@ impl Components for FullComponents {
|
||||
pub struct LightComponents;
|
||||
|
||||
impl Components for LightComponents {
|
||||
type Backend = client::light::backend::Backend<client_db::light::LightStorage<Block>, network::OnDemand<Block, network::Service<Block>>>;
|
||||
type Backend = client::light::backend::Backend<client_db::light::LightStorage<Block>, network::OnDemand<Block, NetworkService>>;
|
||||
type Api = polkadot_api::light::RemotePolkadotApiWrapper<Self::Backend, Self::Executor>;
|
||||
type Executor = client::light::call_executor::RemoteCallExecutor<
|
||||
client::light::blockchain::Blockchain<client_db::light::LightStorage<Block>, network::OnDemand<Block, network::Service<Block>>>,
|
||||
network::OnDemand<Block, network::Service<Block>>>;
|
||||
client::light::blockchain::Blockchain<client_db::light::LightStorage<Block>, network::OnDemand<Block, NetworkService>>,
|
||||
network::OnDemand<Block, NetworkService>>;
|
||||
|
||||
fn build_client(&self, db_settings: client_db::DatabaseSettings, executor: CodeExecutor, spec: &ChainSpec)
|
||||
-> Result<(Arc<client::Client<Self::Backend, Self::Executor, Block>>, Option<Arc<network::OnDemand<Block, network::Service<Block>>>>), error::Error> {
|
||||
-> Result<(Arc<client::Client<Self::Backend, Self::Executor, Block>>, Option<Arc<OnDemand<Block, NetworkService>>>), error::Error> {
|
||||
let db_storage = client_db::light::LightStorage::new(db_settings)?;
|
||||
let light_blockchain = client::light::new_light_blockchain(db_storage);
|
||||
let fetch_checker = Arc::new(client::light::new_fetch_checker(light_blockchain.clone(), executor));
|
||||
@@ -146,8 +166,16 @@ impl Components for LightComponents {
|
||||
})
|
||||
}
|
||||
|
||||
fn build_consensus(&self, _client: Arc<client::Client<Self::Backend, Self::Executor, Block>>, _network: Arc<network::Service<Block>>, _tx_pool: Arc<TransactionPool<Self::Api>>, _keystore: &Keystore)
|
||||
-> Result<Option<consensus::Service>, error::Error> {
|
||||
fn build_consensus(
|
||||
&self,
|
||||
_client: Arc<client::Client<Self::Backend, Self::Executor, Block>>,
|
||||
_network: Arc<NetworkService>,
|
||||
_tx_pool: Arc<TransactionPool<Self::Api>>,
|
||||
_keystore: &Keystore,
|
||||
_task_executor: TaskExecutor,
|
||||
)
|
||||
-> Result<Option<consensus::Service>, error::Error>
|
||||
{
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
+52
-65
@@ -21,7 +21,6 @@ extern crate futures;
|
||||
extern crate ed25519;
|
||||
extern crate clap;
|
||||
extern crate exit_future;
|
||||
extern crate tokio_timer;
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
extern crate polkadot_primitives;
|
||||
@@ -30,18 +29,17 @@ extern crate polkadot_executor;
|
||||
extern crate polkadot_api;
|
||||
extern crate polkadot_consensus as consensus;
|
||||
extern crate polkadot_transaction_pool as transaction_pool;
|
||||
extern crate polkadot_network;
|
||||
extern crate substrate_keystore as keystore;
|
||||
extern crate substrate_runtime_io as runtime_io;
|
||||
extern crate substrate_primitives as primitives;
|
||||
extern crate substrate_runtime_primitives as runtime_primitives;
|
||||
extern crate substrate_network as network;
|
||||
extern crate substrate_codec as codec;
|
||||
extern crate substrate_executor;
|
||||
extern crate substrate_state_machine as state_machine;
|
||||
|
||||
extern crate tokio_core;
|
||||
extern crate substrate_client as client;
|
||||
extern crate substrate_client_db as client_db;
|
||||
extern crate tokio;
|
||||
|
||||
#[macro_use]
|
||||
extern crate substrate_telemetry;
|
||||
@@ -62,9 +60,7 @@ mod config;
|
||||
mod chain_spec;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use futures::prelude::*;
|
||||
use tokio_core::reactor::Core;
|
||||
use transaction_pool::TransactionPool;
|
||||
use keystore::Store as Keystore;
|
||||
use polkadot_api::PolkadotApi;
|
||||
@@ -72,6 +68,8 @@ use polkadot_primitives::{Block, BlockId, Hash};
|
||||
use client::{Client, BlockchainEvents};
|
||||
use network::ManageNetwork;
|
||||
use exit_future::Signal;
|
||||
use polkadot_network::{NetworkService, PolkadotProtocol};
|
||||
use tokio::runtime::TaskExecutor;
|
||||
|
||||
pub use self::error::{ErrorKind, Error};
|
||||
pub use self::components::{Components, FullComponents, LightComponents};
|
||||
@@ -80,23 +78,22 @@ pub use chain_spec::ChainSpec;
|
||||
|
||||
/// Polkadot service.
|
||||
pub struct Service<Components: components::Components> {
|
||||
thread: Option<thread::JoinHandle<()>>,
|
||||
client: Arc<Client<Components::Backend, Components::Executor, Block>>,
|
||||
network: Arc<network::Service<Block>>,
|
||||
network: Arc<NetworkService>,
|
||||
transaction_pool: Arc<TransactionPool<Components::Api>>,
|
||||
signal: Option<Signal>,
|
||||
_consensus: Option<consensus::Service>,
|
||||
}
|
||||
|
||||
/// Creates light client and register protocol with the network service
|
||||
pub fn new_light(config: Configuration) -> Result<Service<components::LightComponents>, error::Error> {
|
||||
Service::new(components::LightComponents, config)
|
||||
pub fn new_light(config: Configuration, executor: TaskExecutor) -> Result<Service<components::LightComponents>, error::Error> {
|
||||
Service::new(components::LightComponents, config, executor)
|
||||
}
|
||||
|
||||
/// Creates full client and register protocol with the network service
|
||||
pub fn new_full(config: Configuration) -> Result<Service<components::FullComponents>, error::Error> {
|
||||
let is_validator = (config.roles & Role::VALIDATOR) == Role::VALIDATOR;
|
||||
Service::new(components::FullComponents { is_validator }, config)
|
||||
pub fn new_full(config: Configuration, executor: TaskExecutor) -> Result<Service<components::FullComponents>, error::Error> {
|
||||
let is_validator = (config.roles & Role::AUTHORITY) == Role::AUTHORITY;
|
||||
Service::new(components::FullComponents { is_validator }, config, executor)
|
||||
}
|
||||
|
||||
/// Creates bare client without any networking.
|
||||
@@ -112,7 +109,7 @@ pub fn new_client(config: Configuration) -> Result<Arc<Client<
|
||||
pruning: config.pruning,
|
||||
};
|
||||
let executor = polkadot_executor::Executor::new();
|
||||
let is_validator = (config.roles & Role::VALIDATOR) == Role::VALIDATOR;
|
||||
let is_validator = (config.roles & Role::AUTHORITY) == Role::AUTHORITY;
|
||||
let components = components::FullComponents { is_validator };
|
||||
let (client, _) = components.build_client(db_settings, executor, &config.chain_spec)?;
|
||||
Ok(client)
|
||||
@@ -124,9 +121,7 @@ impl<Components> Service<Components>
|
||||
client::error::Error: From<<<<Components as components::Components>::Backend as client::backend::Backend<Block>>::State as state_machine::Backend>::Error>,
|
||||
{
|
||||
/// Creates and register protocol with the network service
|
||||
fn new(components: Components, config: Configuration) -> Result<Self, error::Error> {
|
||||
use std::sync::Barrier;
|
||||
|
||||
fn new(components: Components, config: Configuration, task_executor: TaskExecutor) -> Result<Self, error::Error> {
|
||||
let (signal, exit) = ::exit_future::signal();
|
||||
|
||||
// Create client
|
||||
@@ -165,61 +160,55 @@ impl<Components> Service<Components>
|
||||
chain: client.clone(),
|
||||
on_demand: on_demand.clone().map(|d| d as Arc<network::OnDemandService<Block>>),
|
||||
transaction_pool: transaction_pool_adapter,
|
||||
specialization: PolkadotProtocol::new(),
|
||||
};
|
||||
let network = network::Service::new(network_params)?;
|
||||
let barrier = ::std::sync::Arc::new(Barrier::new(2));
|
||||
|
||||
let network = network::Service::new(network_params, ::polkadot_network::DOT_PROTOCOL_ID)?;
|
||||
on_demand.map(|on_demand| on_demand.set_service_link(Arc::downgrade(&network)));
|
||||
|
||||
let thread = {
|
||||
let client = client.clone();
|
||||
network.start_network();
|
||||
|
||||
{
|
||||
// block notifications
|
||||
let network = network.clone();
|
||||
let txpool = transaction_pool.clone();
|
||||
|
||||
let thread_barrier = barrier.clone();
|
||||
thread::spawn(move || {
|
||||
network.start_network();
|
||||
let events = client.import_notification_stream()
|
||||
.for_each(move |notification| {
|
||||
network.on_block_imported(notification.hash, ¬ification.header);
|
||||
prune_imported(&*txpool, notification.hash);
|
||||
Ok(())
|
||||
})
|
||||
.select(exit.clone())
|
||||
.then(|_| Ok(()));
|
||||
task_executor.spawn(events);
|
||||
}
|
||||
|
||||
thread_barrier.wait();
|
||||
let mut core = Core::new().expect("tokio::Core could not be created");
|
||||
{
|
||||
// transaction notifications
|
||||
let network = network.clone();
|
||||
let events = transaction_pool.import_notification_stream()
|
||||
// TODO [ToDr] Consider throttling?
|
||||
.for_each(move |_| {
|
||||
network.trigger_repropagate();
|
||||
Ok(())
|
||||
})
|
||||
.select(exit.clone())
|
||||
.then(|_| Ok(()));
|
||||
|
||||
// block notifications
|
||||
let network1 = network.clone();
|
||||
let txpool1 = txpool.clone();
|
||||
|
||||
let events = client.import_notification_stream()
|
||||
.for_each(move |notification| {
|
||||
network1.on_block_imported(notification.hash, ¬ification.header);
|
||||
prune_imported(&*txpool1, notification.hash);
|
||||
|
||||
Ok(())
|
||||
});
|
||||
core.handle().spawn(events);
|
||||
|
||||
// transaction notifications
|
||||
let events = txpool.import_notification_stream()
|
||||
// TODO [ToDr] Consider throttling?
|
||||
.for_each(move |_| {
|
||||
network.trigger_repropagate();
|
||||
Ok(())
|
||||
});
|
||||
core.handle().spawn(events);
|
||||
|
||||
if let Err(e) = core.run(exit) {
|
||||
debug!("Polkadot service event loop shutdown with {:?}", e);
|
||||
}
|
||||
debug!("Polkadot service shutdown");
|
||||
})
|
||||
};
|
||||
|
||||
// wait for the network to start up before starting the consensus
|
||||
// service.
|
||||
barrier.wait();
|
||||
task_executor.spawn(events);
|
||||
}
|
||||
|
||||
// Spin consensus service if configured
|
||||
let consensus_service = components.build_consensus(client.clone(), network.clone(), transaction_pool.clone(), &keystore)?;
|
||||
let consensus_service = components.build_consensus(
|
||||
client.clone(),
|
||||
network.clone(),
|
||||
transaction_pool.clone(),
|
||||
&keystore,
|
||||
task_executor,
|
||||
)?;
|
||||
|
||||
Ok(Service {
|
||||
thread: Some(thread),
|
||||
client: client,
|
||||
network: network,
|
||||
transaction_pool: transaction_pool,
|
||||
@@ -234,7 +223,7 @@ impl<Components> Service<Components>
|
||||
}
|
||||
|
||||
/// Get shared network instance.
|
||||
pub fn network(&self) -> Arc<network::Service<Block>> {
|
||||
pub fn network(&self) -> Arc<NetworkService> {
|
||||
self.network.clone()
|
||||
}
|
||||
|
||||
@@ -260,14 +249,12 @@ pub fn prune_imported<A>(pool: &TransactionPool<A>, hash: Hash)
|
||||
|
||||
impl<Components> Drop for Service<Components> where Components: components::Components {
|
||||
fn drop(&mut self) {
|
||||
debug!(target: "service", "Polkadot service shutdown");
|
||||
|
||||
self.network.stop_network();
|
||||
|
||||
if let Some(signal) = self.signal.take() {
|
||||
signal.fire();
|
||||
}
|
||||
|
||||
if let Some(thread) = self.thread.take() {
|
||||
thread.join().expect("The service thread has panicked");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,5 +4,8 @@ version = "0.1.0"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
|
||||
[dependencies]
|
||||
substrate-codec = { path = "../../substrate/codec" }
|
||||
substrate-primitives = { path = "../../substrate/primitives" }
|
||||
polkadot-primitives = { path = "../primitives" }
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
|
||||
@@ -27,26 +27,11 @@
|
||||
//! propose and attest to validity of candidates, and those who can only attest
|
||||
//! to availability.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::collections::hash_map::{HashMap, Entry};
|
||||
use std::hash::Hash;
|
||||
use std::fmt::Debug;
|
||||
|
||||
/// A batch of statements to send out.
|
||||
pub trait StatementBatch<V, T> {
|
||||
/// Get the target authorities of these statements.
|
||||
fn targets(&self) -> &[V];
|
||||
|
||||
/// If the batch is empty.
|
||||
fn is_empty(&self) -> bool;
|
||||
|
||||
/// Push a statement onto the batch. Returns false when the batch is full.
|
||||
///
|
||||
/// This is meant to do work like incrementally serializing the statements
|
||||
/// into a vector of bytes while making sure the length is below a certain
|
||||
/// amount.
|
||||
fn push(&mut self, statement: T) -> bool;
|
||||
}
|
||||
use codec::{Slicable, Input};
|
||||
|
||||
/// Context for the statement table.
|
||||
pub trait Context {
|
||||
@@ -85,7 +70,7 @@ pub trait Context {
|
||||
}
|
||||
|
||||
/// Statements circulated among peers.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum Statement<C, D> {
|
||||
/// Broadcast by a authority to indicate that this is his candidate for
|
||||
/// inclusion.
|
||||
@@ -103,8 +88,61 @@ pub enum Statement<C, D> {
|
||||
Invalid(D),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
enum StatementKind {
|
||||
Candidate = 1,
|
||||
Valid = 2,
|
||||
Invalid = 3,
|
||||
Available = 4,
|
||||
}
|
||||
|
||||
impl<C: Slicable, D: Slicable> Slicable for Statement<C, D> {
|
||||
fn encode(&self) -> Vec<u8> {
|
||||
let mut v = Vec::new();
|
||||
match *self {
|
||||
Statement::Candidate(ref candidate) => {
|
||||
v.push(StatementKind::Candidate as u8);
|
||||
candidate.using_encoded(|s| v.extend(s));
|
||||
}
|
||||
Statement::Valid(ref digest) => {
|
||||
v.push(StatementKind::Valid as u8);
|
||||
digest.using_encoded(|s| v.extend(s));
|
||||
}
|
||||
Statement::Invalid(ref digest) => {
|
||||
v.push(StatementKind::Invalid as u8);
|
||||
digest.using_encoded(|s| v.extend(s));
|
||||
}
|
||||
Statement::Available(ref digest) => {
|
||||
v.push(StatementKind::Available as u8);
|
||||
digest.using_encoded(|s| v.extend(s));
|
||||
}
|
||||
}
|
||||
|
||||
v
|
||||
}
|
||||
|
||||
fn decode<I: Input>(value: &mut I) -> Option<Self> {
|
||||
match value.read_byte() {
|
||||
Some(x) if x == StatementKind::Candidate as u8 => {
|
||||
Slicable::decode(value).map(Statement::Candidate)
|
||||
}
|
||||
Some(x) if x == StatementKind::Valid as u8 => {
|
||||
Slicable::decode(value).map(Statement::Valid)
|
||||
}
|
||||
Some(x) if x == StatementKind::Invalid as u8 => {
|
||||
Slicable::decode(value).map(Statement::Invalid)
|
||||
}
|
||||
Some(x) if x == StatementKind::Available as u8 => {
|
||||
Slicable::decode(value).map(Statement::Available)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A signed statement.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SignedStatement<C, D, V, S> {
|
||||
/// The statement.
|
||||
pub statement: Statement<C, D>,
|
||||
@@ -114,26 +152,6 @@ pub struct SignedStatement<C, D, V, S> {
|
||||
pub sender: V,
|
||||
}
|
||||
|
||||
// A unique trace for a class of valid statements issued by a authority.
|
||||
//
|
||||
// We keep track of which statements we have received or sent to other authorities
|
||||
// in order to prevent relaying the same data multiple times.
|
||||
//
|
||||
// The signature of the statement is replaced by the authority because the authority
|
||||
// is unique while signatures are not (at least under common schemes like
|
||||
// Schnorr or ECDSA).
|
||||
#[derive(Hash, PartialEq, Eq, Clone)]
|
||||
enum StatementTrace<V, D> {
|
||||
/// The candidate proposed by the authority.
|
||||
Candidate(V),
|
||||
/// A validity statement from that authority about the given digest.
|
||||
Valid(V, D),
|
||||
/// An invalidity statement from that authority about the given digest.
|
||||
Invalid(V, D),
|
||||
/// An availability statement from that authority about the given digest.
|
||||
Available(V, D),
|
||||
}
|
||||
|
||||
/// Misbehavior: voting more than one way on candidate validity.
|
||||
///
|
||||
/// Since there are three possible ways to vote, a double vote is possible in
|
||||
@@ -148,6 +166,19 @@ pub enum ValidityDoubleVote<C, D, S> {
|
||||
ValidityAndInvalidity(D, S, S),
|
||||
}
|
||||
|
||||
/// Misbehavior: multiple signatures on same statement.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub enum DoubleSign<C, D, S> {
|
||||
/// On candidate.
|
||||
Candidate(C, S, S),
|
||||
/// On validity.
|
||||
Validity(D, S, S),
|
||||
/// On invalidity.
|
||||
Invalidity(D, S, S),
|
||||
/// On availability.
|
||||
Availability(D, S, S),
|
||||
}
|
||||
|
||||
/// Misbehavior: declaring multiple candidates.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
pub struct MultipleCandidates<C, S> {
|
||||
@@ -172,20 +203,14 @@ pub enum Misbehavior<C, D, V, S> {
|
||||
ValidityDoubleVote(ValidityDoubleVote<C, D, S>),
|
||||
/// Submitted multiple candidates.
|
||||
MultipleCandidates(MultipleCandidates<C, S>),
|
||||
/// Submitted a message withou
|
||||
/// Submitted a message that was unauthorized.
|
||||
UnauthorizedStatement(UnauthorizedStatement<C, D, V, S>),
|
||||
/// Submitted two valid signatures for the same message.
|
||||
DoubleSign(DoubleSign<C, D, S>),
|
||||
}
|
||||
|
||||
/// Fancy work-around for a type alias of context-based misbehavior
|
||||
/// without producing compiler warnings.
|
||||
pub trait ResolveMisbehavior {
|
||||
/// The misbehavior type.
|
||||
type Misbehavior;
|
||||
}
|
||||
|
||||
impl<C: Context + ?Sized> ResolveMisbehavior for C {
|
||||
type Misbehavior = Misbehavior<C::Candidate, C::Digest, C::AuthorityId, C::Signature>;
|
||||
}
|
||||
/// Type alias for misbehavior corresponding to context type.
|
||||
pub type MisbehaviorFor<C> = Misbehavior<<C as Context>::Candidate, <C as Context>::Digest, <C as Context>::AuthorityId, <C as Context>::Signature>;
|
||||
|
||||
// kinds of votes for validity
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
@@ -251,22 +276,26 @@ impl<C: Context> CandidateData<C> {
|
||||
// authority metadata
|
||||
struct AuthorityData<C: Context> {
|
||||
proposal: Option<(C::Digest, C::Signature)>,
|
||||
known_statements: HashSet<StatementTrace<C::AuthorityId, C::Digest>>,
|
||||
}
|
||||
|
||||
impl<C: Context> Default for AuthorityData<C> {
|
||||
fn default() -> Self {
|
||||
AuthorityData {
|
||||
proposal: None,
|
||||
known_statements: HashSet::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type alias for the result of a statement import.
|
||||
pub type ImportResult<C> = Result<
|
||||
Option<Summary<<C as Context>::Digest, <C as Context>::GroupId>>,
|
||||
MisbehaviorFor<C>
|
||||
>;
|
||||
|
||||
/// Stores votes
|
||||
pub struct Table<C: Context> {
|
||||
authority_data: HashMap<C::AuthorityId, AuthorityData<C>>,
|
||||
detected_misbehavior: HashMap<C::AuthorityId, <C as ResolveMisbehavior>::Misbehavior>,
|
||||
detected_misbehavior: HashMap<C::AuthorityId, MisbehaviorFor<C>>,
|
||||
candidate_votes: HashMap<C::Digest, CandidateData<C>>,
|
||||
includable_count: HashMap<C::GroupId, usize>,
|
||||
}
|
||||
@@ -328,26 +357,17 @@ impl<C: Context> Table<C> {
|
||||
}
|
||||
|
||||
/// Import a signed statement. Signatures should be checked for validity, and the
|
||||
/// sender should be checked to actually be a authority.
|
||||
/// sender should be checked to actually be an authority.
|
||||
///
|
||||
/// This can note the origin of the statement to indicate that he has
|
||||
/// seen it already.
|
||||
/// If this returns `None`, the statement was either duplicate or invalid.
|
||||
pub fn import_statement(
|
||||
&mut self,
|
||||
context: &C,
|
||||
statement: SignedStatement<C::Candidate, C::Digest, C::AuthorityId, C::Signature>,
|
||||
from: Option<C::AuthorityId>
|
||||
) -> Option<Summary<C::Digest, C::GroupId>> {
|
||||
let SignedStatement { statement, signature, sender: signer } = statement;
|
||||
|
||||
let trace = match statement {
|
||||
Statement::Candidate(_) => StatementTrace::Candidate(signer.clone()),
|
||||
Statement::Valid(ref d) => StatementTrace::Valid(signer.clone(), d.clone()),
|
||||
Statement::Invalid(ref d) => StatementTrace::Invalid(signer.clone(), d.clone()),
|
||||
Statement::Available(ref d) => StatementTrace::Available(signer.clone(), d.clone()),
|
||||
};
|
||||
|
||||
let (maybe_misbehavior, maybe_summary) = match statement {
|
||||
let res = match statement {
|
||||
Statement::Candidate(candidate) => self.import_candidate(
|
||||
context,
|
||||
signer.clone(),
|
||||
@@ -374,19 +394,15 @@ impl<C: Context> Table<C> {
|
||||
),
|
||||
};
|
||||
|
||||
if let Some(misbehavior) = maybe_misbehavior {
|
||||
// all misbehavior in agreement is provable and actively malicious.
|
||||
// punishments are not cumulative.
|
||||
self.detected_misbehavior.insert(signer, misbehavior);
|
||||
} else {
|
||||
if let Some(from) = from {
|
||||
self.note_trace_seen(trace.clone(), from);
|
||||
match res {
|
||||
Ok(maybe_summary) => maybe_summary,
|
||||
Err(misbehavior) => {
|
||||
// all misbehavior in agreement is provable and actively malicious.
|
||||
// punishments are not cumulative.
|
||||
self.detected_misbehavior.insert(signer, misbehavior);
|
||||
None
|
||||
}
|
||||
|
||||
self.note_trace_seen(trace, signer);
|
||||
}
|
||||
|
||||
maybe_summary
|
||||
}
|
||||
|
||||
/// Get a candidate by digest.
|
||||
@@ -396,7 +412,7 @@ impl<C: Context> Table<C> {
|
||||
|
||||
/// Access all witnessed misbehavior.
|
||||
pub fn get_misbehavior(&self)
|
||||
-> &HashMap<C::AuthorityId, <C as ResolveMisbehavior>::Misbehavior>
|
||||
-> &HashMap<C::AuthorityId, MisbehaviorFor<C>>
|
||||
{
|
||||
&self.detected_misbehavior
|
||||
}
|
||||
@@ -406,155 +422,22 @@ impl<C: Context> Table<C> {
|
||||
self.includable_count.len()
|
||||
}
|
||||
|
||||
/// Fill a statement batch and note messages as seen by the targets.
|
||||
pub fn fill_batch<B>(&mut self, batch: &mut B)
|
||||
where B: StatementBatch<
|
||||
C::AuthorityId,
|
||||
SignedStatement<C::Candidate, C::Digest, C::AuthorityId, C::Signature>,
|
||||
>
|
||||
{
|
||||
// naively iterate all statements so far, taking any that
|
||||
// at least one of the targets has not seen.
|
||||
|
||||
// workaround for the fact that it's inconvenient to borrow multiple
|
||||
// entries out of a hashmap mutably -- we just move them out and
|
||||
// replace them when we're done.
|
||||
struct SwappedTargetData<'a, C: 'a + Context> {
|
||||
authority_data: &'a mut HashMap<C::AuthorityId, AuthorityData<C>>,
|
||||
target_data: Vec<(C::AuthorityId, AuthorityData<C>)>,
|
||||
}
|
||||
|
||||
impl<'a, C: 'a + Context> Drop for SwappedTargetData<'a, C> {
|
||||
fn drop(&mut self) {
|
||||
for (id, data) in self.target_data.drain(..) {
|
||||
self.authority_data.insert(id, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pre-fetch authority data for all the targets.
|
||||
let mut target_data = {
|
||||
let authority_data = &mut self.authority_data;
|
||||
let mut target_data = Vec::with_capacity(batch.targets().len());
|
||||
for target in batch.targets() {
|
||||
let active_data = match authority_data.get_mut(target) {
|
||||
None => Default::default(),
|
||||
Some(x) => ::std::mem::replace(x, Default::default()),
|
||||
};
|
||||
|
||||
target_data.push((target.clone(), active_data));
|
||||
}
|
||||
|
||||
SwappedTargetData {
|
||||
authority_data,
|
||||
target_data
|
||||
}
|
||||
};
|
||||
|
||||
let target_data = &mut target_data.target_data;
|
||||
|
||||
macro_rules! attempt_send {
|
||||
($trace:expr, sender=$sender:expr, sig=$sig:expr, statement=$statement:expr) => {{
|
||||
let trace = $trace;
|
||||
let can_send = target_data.iter()
|
||||
.any(|t| !t.1.known_statements.contains(&trace));
|
||||
|
||||
if can_send {
|
||||
let statement = SignedStatement {
|
||||
statement: $statement,
|
||||
signature: $sig,
|
||||
sender: $sender,
|
||||
};
|
||||
|
||||
if batch.push(statement) {
|
||||
for target in target_data.iter_mut() {
|
||||
target.1.known_statements.insert(trace.clone());
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
// reconstruct statements for anything whose trace passes the filter.
|
||||
for (digest, candidate) in self.candidate_votes.iter() {
|
||||
let issuance_iter = candidate.validity_votes.iter()
|
||||
.filter(|&(_, x)| if let ValidityVote::Issued(_) = *x { true } else { false });
|
||||
|
||||
let validity_iter = candidate.validity_votes.iter()
|
||||
.filter(|&(_, x)| if let ValidityVote::Issued(_) = *x { false } else { true });
|
||||
|
||||
// send issuance statements before votes.
|
||||
for (sender, vote) in issuance_iter.chain(validity_iter) {
|
||||
match *vote {
|
||||
ValidityVote::Issued(ref sig) => {
|
||||
attempt_send!(
|
||||
StatementTrace::Candidate(sender.clone()),
|
||||
sender = sender.clone(),
|
||||
sig = sig.clone(),
|
||||
statement = Statement::Candidate(candidate.candidate.clone())
|
||||
)
|
||||
}
|
||||
ValidityVote::Valid(ref sig) => {
|
||||
attempt_send!(
|
||||
StatementTrace::Valid(sender.clone(), digest.clone()),
|
||||
sender = sender.clone(),
|
||||
sig = sig.clone(),
|
||||
statement = Statement::Valid(digest.clone())
|
||||
)
|
||||
}
|
||||
ValidityVote::Invalid(ref sig) => {
|
||||
attempt_send!(
|
||||
StatementTrace::Invalid(sender.clone(), digest.clone()),
|
||||
sender = sender.clone(),
|
||||
sig = sig.clone(),
|
||||
statement = Statement::Invalid(digest.clone())
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// and lastly send availability.
|
||||
for (sender, sig) in candidate.availability_votes.iter() {
|
||||
attempt_send!(
|
||||
StatementTrace::Available(sender.clone(), digest.clone()),
|
||||
sender = sender.clone(),
|
||||
sig = sig.clone(),
|
||||
statement = Statement::Available(digest.clone())
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fn note_trace_seen(&mut self, trace: StatementTrace<C::AuthorityId, C::Digest>, known_by: C::AuthorityId) {
|
||||
self.authority_data.entry(known_by).or_insert_with(|| AuthorityData {
|
||||
proposal: None,
|
||||
known_statements: HashSet::default(),
|
||||
}).known_statements.insert(trace);
|
||||
}
|
||||
|
||||
fn import_candidate(
|
||||
&mut self,
|
||||
context: &C,
|
||||
from: C::AuthorityId,
|
||||
candidate: C::Candidate,
|
||||
signature: C::Signature,
|
||||
) -> (Option<<C as ResolveMisbehavior>::Misbehavior>, Option<Summary<C::Digest, C::GroupId>>) {
|
||||
) -> ImportResult<C> {
|
||||
let group = C::candidate_group(&candidate);
|
||||
if !context.is_member_of(&from, &group) {
|
||||
return (
|
||||
Some(Misbehavior::UnauthorizedStatement(UnauthorizedStatement {
|
||||
statement: SignedStatement {
|
||||
signature,
|
||||
statement: Statement::Candidate(candidate),
|
||||
sender: from,
|
||||
},
|
||||
})),
|
||||
None,
|
||||
);
|
||||
return Err(Misbehavior::UnauthorizedStatement(UnauthorizedStatement {
|
||||
statement: SignedStatement {
|
||||
signature,
|
||||
statement: Statement::Candidate(candidate),
|
||||
sender: from,
|
||||
},
|
||||
}));
|
||||
}
|
||||
|
||||
// check that authority hasn't already specified another candidate.
|
||||
@@ -578,13 +461,10 @@ impl<C: Context> Table<C> {
|
||||
.candidate
|
||||
.clone();
|
||||
|
||||
return (
|
||||
Some(Misbehavior::MultipleCandidates(MultipleCandidates {
|
||||
first: (old_candidate, old_sig.clone()),
|
||||
second: (candidate, signature.clone()),
|
||||
})),
|
||||
None,
|
||||
);
|
||||
return Err(Misbehavior::MultipleCandidates(MultipleCandidates {
|
||||
first: (old_candidate, old_sig.clone()),
|
||||
second: (candidate, signature.clone()),
|
||||
}));
|
||||
}
|
||||
|
||||
false
|
||||
@@ -596,7 +476,6 @@ impl<C: Context> Table<C> {
|
||||
Entry::Vacant(vacant) => {
|
||||
vacant.insert(AuthorityData {
|
||||
proposal: Some((digest.clone(), signature.clone())),
|
||||
known_statements: HashSet::new(),
|
||||
});
|
||||
true
|
||||
}
|
||||
@@ -628,9 +507,9 @@ impl<C: Context> Table<C> {
|
||||
from: C::AuthorityId,
|
||||
digest: C::Digest,
|
||||
vote: ValidityVote<C::Signature>,
|
||||
) -> (Option<<C as ResolveMisbehavior>::Misbehavior>, Option<Summary<C::Digest, C::GroupId>>) {
|
||||
) -> ImportResult<C> {
|
||||
let votes = match self.candidate_votes.get_mut(&digest) {
|
||||
None => return (None, None), // TODO: queue up but don't get DoS'ed
|
||||
None => return Ok(None),
|
||||
Some(votes) => votes,
|
||||
};
|
||||
|
||||
@@ -647,50 +526,56 @@ impl<C: Context> Table<C> {
|
||||
checking group membership of issuer; qed"),
|
||||
};
|
||||
|
||||
return (
|
||||
Some(Misbehavior::UnauthorizedStatement(UnauthorizedStatement {
|
||||
statement: SignedStatement {
|
||||
signature: sig,
|
||||
sender: from,
|
||||
statement: if valid {
|
||||
Statement::Valid(digest)
|
||||
} else {
|
||||
Statement::Invalid(digest)
|
||||
}
|
||||
return Err(Misbehavior::UnauthorizedStatement(UnauthorizedStatement {
|
||||
statement: SignedStatement {
|
||||
signature: sig,
|
||||
sender: from,
|
||||
statement: if valid {
|
||||
Statement::Valid(digest)
|
||||
} else {
|
||||
Statement::Invalid(digest)
|
||||
}
|
||||
})),
|
||||
None,
|
||||
);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
// check for double votes.
|
||||
match votes.validity_votes.entry(from.clone()) {
|
||||
Entry::Occupied(occ) => {
|
||||
if occ.get() != &vote {
|
||||
let double_vote_proof = match (occ.get().clone(), vote) {
|
||||
let make_vdv = |v| Misbehavior::ValidityDoubleVote(v);
|
||||
let make_ds = |ds| Misbehavior::DoubleSign(ds);
|
||||
return if occ.get() != &vote {
|
||||
Err(match (occ.get().clone(), vote) {
|
||||
// valid vote conflicting with candidate statement
|
||||
(ValidityVote::Issued(iss), ValidityVote::Valid(good)) |
|
||||
(ValidityVote::Valid(good), ValidityVote::Issued(iss)) =>
|
||||
ValidityDoubleVote::IssuedAndValidity((votes.candidate.clone(), iss), (digest, good)),
|
||||
make_vdv(ValidityDoubleVote::IssuedAndValidity((votes.candidate.clone(), iss), (digest, good))),
|
||||
|
||||
// invalid vote conflicting with candidate statement
|
||||
(ValidityVote::Issued(iss), ValidityVote::Invalid(bad)) |
|
||||
(ValidityVote::Invalid(bad), ValidityVote::Issued(iss)) =>
|
||||
ValidityDoubleVote::IssuedAndInvalidity((votes.candidate.clone(), iss), (digest, bad)),
|
||||
make_vdv(ValidityDoubleVote::IssuedAndInvalidity((votes.candidate.clone(), iss), (digest, bad))),
|
||||
|
||||
// valid vote conflicting with invalid vote
|
||||
(ValidityVote::Valid(good), ValidityVote::Invalid(bad)) |
|
||||
(ValidityVote::Invalid(bad), ValidityVote::Valid(good)) =>
|
||||
ValidityDoubleVote::ValidityAndInvalidity(digest, good, bad),
|
||||
_ => {
|
||||
// this would occur if two different but valid signatures
|
||||
// on the same kind of vote occurred.
|
||||
return (None, None);
|
||||
}
|
||||
};
|
||||
make_vdv(ValidityDoubleVote::ValidityAndInvalidity(digest, good, bad)),
|
||||
|
||||
return (
|
||||
Some(Misbehavior::ValidityDoubleVote(double_vote_proof)),
|
||||
None,
|
||||
)
|
||||
// two signatures on same candidate
|
||||
(ValidityVote::Issued(a), ValidityVote::Issued(b)) =>
|
||||
make_ds(DoubleSign::Candidate(votes.candidate.clone(), a, b)),
|
||||
|
||||
// two signatures on same validity vote
|
||||
(ValidityVote::Valid(a), ValidityVote::Valid(b)) =>
|
||||
make_ds(DoubleSign::Validity(digest, a, b)),
|
||||
|
||||
// two signature on same invalidity vote
|
||||
(ValidityVote::Invalid(a), ValidityVote::Invalid(b)) =>
|
||||
make_ds(DoubleSign::Invalidity(digest, a, b)),
|
||||
})
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
return (None, None);
|
||||
}
|
||||
Entry::Vacant(vacant) => {
|
||||
if let ValidityVote::Invalid(_) = vote {
|
||||
@@ -704,7 +589,7 @@ impl<C: Context> Table<C> {
|
||||
let is_includable = votes.can_be_included(v_threshold, a_threshold);
|
||||
update_includable_count(&mut self.includable_count, &votes.group_id, was_includable, is_includable);
|
||||
|
||||
(None, Some(votes.summary(digest)))
|
||||
Ok(Some(votes.summary(digest)))
|
||||
}
|
||||
|
||||
fn availability_vote(
|
||||
@@ -713,9 +598,9 @@ impl<C: Context> Table<C> {
|
||||
from: C::AuthorityId,
|
||||
digest: C::Digest,
|
||||
signature: C::Signature,
|
||||
) -> (Option<<C as ResolveMisbehavior>::Misbehavior>, Option<Summary<C::Digest, C::GroupId>>) {
|
||||
) -> ImportResult<C> {
|
||||
let votes = match self.candidate_votes.get_mut(&digest) {
|
||||
None => return (None, None), // TODO: queue up but don't get DoS'ed
|
||||
None => return Ok(None),
|
||||
Some(votes) => votes,
|
||||
};
|
||||
|
||||
@@ -724,24 +609,26 @@ impl<C: Context> Table<C> {
|
||||
|
||||
// check that this authority actually can vote in this group.
|
||||
if !context.is_availability_guarantor_of(&from, &votes.group_id) {
|
||||
return (
|
||||
Some(Misbehavior::UnauthorizedStatement(UnauthorizedStatement {
|
||||
statement: SignedStatement {
|
||||
signature: signature.clone(),
|
||||
statement: Statement::Available(digest),
|
||||
sender: from,
|
||||
}
|
||||
})),
|
||||
None
|
||||
);
|
||||
return Err(Misbehavior::UnauthorizedStatement(UnauthorizedStatement {
|
||||
statement: SignedStatement {
|
||||
signature: signature,
|
||||
statement: Statement::Available(digest),
|
||||
sender: from,
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
votes.availability_votes.insert(from, signature);
|
||||
match votes.availability_votes.entry(from) {
|
||||
Entry::Occupied(ref occ) if occ.get() != &signature => return Err(
|
||||
Misbehavior::DoubleSign(DoubleSign::Availability(digest, signature, occ.get().clone()))
|
||||
),
|
||||
entry => { let _ = entry.or_insert(signature); },
|
||||
}
|
||||
|
||||
let is_includable = votes.can_be_included(v_threshold, a_threshold);
|
||||
update_includable_count(&mut self.includable_count, &votes.group_id, was_includable, is_includable);
|
||||
|
||||
(None, Some(votes.summary(digest)))
|
||||
Ok(Some(votes.summary(digest)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -765,26 +652,6 @@ mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct VecBatch<V, T> {
|
||||
pub max_len: usize,
|
||||
pub targets: Vec<V>,
|
||||
pub items: Vec<T>,
|
||||
}
|
||||
|
||||
impl<V, T> ::generic::StatementBatch<V, T> for VecBatch<V, T> {
|
||||
fn targets(&self) -> &[V] { &self.targets }
|
||||
fn is_empty(&self) -> bool { self.items.is_empty() }
|
||||
fn push(&mut self, item: T) -> bool {
|
||||
if self.items.len() == self.max_len {
|
||||
false
|
||||
} else {
|
||||
self.items.push(item);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create<C: Context>() -> Table<C> {
|
||||
Table::default()
|
||||
}
|
||||
@@ -878,10 +745,10 @@ mod tests {
|
||||
sender: AuthorityId(1),
|
||||
};
|
||||
|
||||
table.import_statement(&context, statement_a, None);
|
||||
table.import_statement(&context, statement_a);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
|
||||
table.import_statement(&context, statement_b, None);
|
||||
table.import_statement(&context, statement_b);
|
||||
assert_eq!(
|
||||
table.detected_misbehavior.get(&AuthorityId(1)).unwrap(),
|
||||
&Misbehavior::MultipleCandidates(MultipleCandidates {
|
||||
@@ -908,7 +775,7 @@ mod tests {
|
||||
sender: AuthorityId(1),
|
||||
};
|
||||
|
||||
table.import_statement(&context, statement, None);
|
||||
table.import_statement(&context, statement);
|
||||
|
||||
assert_eq!(
|
||||
table.detected_misbehavior.get(&AuthorityId(1)).unwrap(),
|
||||
@@ -949,8 +816,8 @@ mod tests {
|
||||
};
|
||||
let candidate_b_digest = Digest(987);
|
||||
|
||||
table.import_statement(&context, candidate_a, None);
|
||||
table.import_statement(&context, candidate_b, None);
|
||||
table.import_statement(&context, candidate_a);
|
||||
table.import_statement(&context, candidate_b);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2)));
|
||||
|
||||
@@ -960,7 +827,7 @@ mod tests {
|
||||
signature: Signature(1),
|
||||
sender: AuthorityId(1),
|
||||
};
|
||||
table.import_statement(&context, bad_availability_vote, None);
|
||||
table.import_statement(&context, bad_availability_vote);
|
||||
|
||||
assert_eq!(
|
||||
table.detected_misbehavior.get(&AuthorityId(1)).unwrap(),
|
||||
@@ -979,7 +846,7 @@ mod tests {
|
||||
signature: Signature(2),
|
||||
sender: AuthorityId(2),
|
||||
};
|
||||
table.import_statement(&context, bad_validity_vote, None);
|
||||
table.import_statement(&context, bad_validity_vote);
|
||||
|
||||
assert_eq!(
|
||||
table.detected_misbehavior.get(&AuthorityId(2)).unwrap(),
|
||||
@@ -1012,7 +879,7 @@ mod tests {
|
||||
};
|
||||
let candidate_digest = Digest(100);
|
||||
|
||||
table.import_statement(&context, statement, None);
|
||||
table.import_statement(&context, statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
|
||||
let valid_statement = SignedStatement {
|
||||
@@ -1027,10 +894,10 @@ mod tests {
|
||||
sender: AuthorityId(2),
|
||||
};
|
||||
|
||||
table.import_statement(&context, valid_statement, None);
|
||||
table.import_statement(&context, valid_statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2)));
|
||||
|
||||
table.import_statement(&context, invalid_statement, None);
|
||||
table.import_statement(&context, invalid_statement);
|
||||
|
||||
assert_eq!(
|
||||
table.detected_misbehavior.get(&AuthorityId(2)).unwrap(),
|
||||
@@ -1042,6 +909,102 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn candidate_double_signature_is_misbehavior() {
|
||||
let context = TestContext {
|
||||
authorities: {
|
||||
let mut map = HashMap::new();
|
||||
map.insert(AuthorityId(1), (GroupId(2), GroupId(455)));
|
||||
map.insert(AuthorityId(2), (GroupId(2), GroupId(246)));
|
||||
map
|
||||
}
|
||||
};
|
||||
|
||||
let mut table = create();
|
||||
let statement = SignedStatement {
|
||||
statement: Statement::Candidate(Candidate(2, 100)),
|
||||
signature: Signature(1),
|
||||
sender: AuthorityId(1),
|
||||
};
|
||||
|
||||
table.import_statement(&context, statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
|
||||
let invalid_statement = SignedStatement {
|
||||
statement: Statement::Candidate(Candidate(2, 100)),
|
||||
signature: Signature(999),
|
||||
sender: AuthorityId(1),
|
||||
};
|
||||
|
||||
table.import_statement(&context, invalid_statement);
|
||||
assert!(table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validity_invalidity_double_signature_is_misbehavior() {
|
||||
let context = TestContext {
|
||||
authorities: {
|
||||
let mut map = HashMap::new();
|
||||
map.insert(AuthorityId(1), (GroupId(2), GroupId(455)));
|
||||
map.insert(AuthorityId(2), (GroupId(2), GroupId(246)));
|
||||
map.insert(AuthorityId(3), (GroupId(2), GroupId(222)));
|
||||
map
|
||||
}
|
||||
};
|
||||
|
||||
let mut table = create();
|
||||
let statement = SignedStatement {
|
||||
statement: Statement::Candidate(Candidate(2, 100)),
|
||||
signature: Signature(1),
|
||||
sender: AuthorityId(1),
|
||||
};
|
||||
|
||||
table.import_statement(&context, statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
|
||||
// insert two validity votes from authority 2 with different signatures
|
||||
{
|
||||
let statement = SignedStatement {
|
||||
statement: Statement::Valid(Digest(100)),
|
||||
signature: Signature(2),
|
||||
sender: AuthorityId(2),
|
||||
};
|
||||
|
||||
table.import_statement(&context, statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2)));
|
||||
|
||||
let invalid_statement = SignedStatement {
|
||||
statement: Statement::Valid(Digest(100)),
|
||||
signature: Signature(222),
|
||||
sender: AuthorityId(2),
|
||||
};
|
||||
|
||||
table.import_statement(&context, invalid_statement);
|
||||
assert!(table.detected_misbehavior.contains_key(&AuthorityId(2)));
|
||||
}
|
||||
|
||||
// insert two invalidity votes from authority 2 with different signatures
|
||||
{
|
||||
let statement = SignedStatement {
|
||||
statement: Statement::Invalid(Digest(100)),
|
||||
signature: Signature(3),
|
||||
sender: AuthorityId(3),
|
||||
};
|
||||
|
||||
table.import_statement(&context, statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(3)));
|
||||
|
||||
let invalid_statement = SignedStatement {
|
||||
statement: Statement::Invalid(Digest(100)),
|
||||
signature: Signature(333),
|
||||
sender: AuthorityId(3),
|
||||
};
|
||||
|
||||
table.import_statement(&context, invalid_statement);
|
||||
assert!(table.detected_misbehavior.contains_key(&AuthorityId(3)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn issue_and_vote_is_misbehavior() {
|
||||
let context = TestContext {
|
||||
@@ -1060,7 +1023,7 @@ mod tests {
|
||||
};
|
||||
let candidate_digest = Digest(100);
|
||||
|
||||
table.import_statement(&context, statement, None);
|
||||
table.import_statement(&context, statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
|
||||
let extra_vote = SignedStatement {
|
||||
@@ -1069,7 +1032,7 @@ mod tests {
|
||||
sender: AuthorityId(1),
|
||||
};
|
||||
|
||||
table.import_statement(&context, extra_vote, None);
|
||||
table.import_statement(&context, extra_vote);
|
||||
assert_eq!(
|
||||
table.detected_misbehavior.get(&AuthorityId(1)).unwrap(),
|
||||
&Misbehavior::ValidityDoubleVote(ValidityDoubleVote::IssuedAndValidity(
|
||||
@@ -1133,7 +1096,7 @@ mod tests {
|
||||
};
|
||||
let candidate_digest = Digest(100);
|
||||
|
||||
table.import_statement(&context, statement, None);
|
||||
table.import_statement(&context, statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
assert!(!table.candidate_includable(&candidate_digest, &context));
|
||||
assert!(table.includable_count.is_empty());
|
||||
@@ -1144,7 +1107,7 @@ mod tests {
|
||||
sender: AuthorityId(2),
|
||||
};
|
||||
|
||||
table.import_statement(&context, vote, None);
|
||||
table.import_statement(&context, vote);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2)));
|
||||
assert!(!table.candidate_includable(&candidate_digest, &context));
|
||||
assert!(table.includable_count.is_empty());
|
||||
@@ -1156,7 +1119,7 @@ mod tests {
|
||||
sender: AuthorityId(4),
|
||||
};
|
||||
|
||||
table.import_statement(&context, vote, None);
|
||||
table.import_statement(&context, vote);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(4)));
|
||||
assert!(table.candidate_includable(&candidate_digest, &context));
|
||||
assert!(table.includable_count.get(&GroupId(2)).is_some());
|
||||
@@ -1168,7 +1131,7 @@ mod tests {
|
||||
sender: AuthorityId(3),
|
||||
};
|
||||
|
||||
table.import_statement(&context, vote, None);
|
||||
table.import_statement(&context, vote);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2)));
|
||||
assert!(!table.candidate_includable(&candidate_digest, &context));
|
||||
assert!(table.includable_count.is_empty());
|
||||
@@ -1191,7 +1154,7 @@ mod tests {
|
||||
sender: AuthorityId(1),
|
||||
};
|
||||
|
||||
let summary = table.import_statement(&context, statement, None)
|
||||
let summary = table.import_statement(&context, statement)
|
||||
.expect("candidate import to give summary");
|
||||
|
||||
assert_eq!(summary.candidate, Digest(100));
|
||||
@@ -1219,7 +1182,7 @@ mod tests {
|
||||
};
|
||||
let candidate_digest = Digest(100);
|
||||
|
||||
table.import_statement(&context, statement, None);
|
||||
table.import_statement(&context, statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
|
||||
let vote = SignedStatement {
|
||||
@@ -1228,7 +1191,7 @@ mod tests {
|
||||
sender: AuthorityId(2),
|
||||
};
|
||||
|
||||
let summary = table.import_statement(&context, vote, None)
|
||||
let summary = table.import_statement(&context, vote)
|
||||
.expect("candidate vote to give summary");
|
||||
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2)));
|
||||
@@ -1258,7 +1221,7 @@ mod tests {
|
||||
};
|
||||
let candidate_digest = Digest(100);
|
||||
|
||||
table.import_statement(&context, statement, None);
|
||||
table.import_statement(&context, statement);
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1)));
|
||||
|
||||
let vote = SignedStatement {
|
||||
@@ -1267,7 +1230,7 @@ mod tests {
|
||||
sender: AuthorityId(2),
|
||||
};
|
||||
|
||||
let summary = table.import_statement(&context, vote, None)
|
||||
let summary = table.import_statement(&context, vote)
|
||||
.expect("candidate vote to give summary");
|
||||
|
||||
assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2)));
|
||||
@@ -1277,55 +1240,4 @@ mod tests {
|
||||
assert_eq!(summary.validity_votes, 1);
|
||||
assert_eq!(summary.availability_votes, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn filling_batch_sets_known_flag() {
|
||||
let context = TestContext {
|
||||
authorities: {
|
||||
let mut map = HashMap::new();
|
||||
for i in 1..10 {
|
||||
map.insert(AuthorityId(i), (GroupId(2), GroupId(400 + i)));
|
||||
}
|
||||
map
|
||||
}
|
||||
};
|
||||
|
||||
let mut table = create();
|
||||
let statement = SignedStatement {
|
||||
statement: Statement::Candidate(Candidate(2, 100)),
|
||||
signature: Signature(1),
|
||||
sender: AuthorityId(1),
|
||||
};
|
||||
|
||||
table.import_statement(&context, statement, None);
|
||||
|
||||
for i in 2..10 {
|
||||
let statement = SignedStatement {
|
||||
statement: Statement::Valid(Digest(100)),
|
||||
signature: Signature(i),
|
||||
sender: AuthorityId(i),
|
||||
};
|
||||
|
||||
table.import_statement(&context, statement, None);
|
||||
}
|
||||
|
||||
let mut batch = VecBatch {
|
||||
max_len: 5,
|
||||
targets: (1..10).map(AuthorityId).collect(),
|
||||
items: Vec::new(),
|
||||
};
|
||||
|
||||
// 9 statements in the table, each seen by one.
|
||||
table.fill_batch(&mut batch);
|
||||
assert_eq!(batch.items.len(), 5);
|
||||
|
||||
// 9 statements in the table, 5 of which seen by all targets.
|
||||
batch.items.clear();
|
||||
table.fill_batch(&mut batch);
|
||||
assert_eq!(batch.items.len(), 4);
|
||||
|
||||
batch.items.clear();
|
||||
table.fill_batch(&mut batch);
|
||||
assert!(batch.items.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,9 +14,14 @@
|
||||
//! propose and attest to validity of candidates, and those who can only attest
|
||||
//! to availability.
|
||||
|
||||
extern crate substrate_codec as codec;
|
||||
extern crate substrate_primitives;
|
||||
extern crate polkadot_primitives as primitives;
|
||||
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
||||
pub mod generic;
|
||||
|
||||
pub use generic::Table;
|
||||
@@ -82,27 +87,3 @@ impl<C: Context> generic::Context for C {
|
||||
Context::requisite_votes(self, group)
|
||||
}
|
||||
}
|
||||
|
||||
/// A batch of statements to send out.
|
||||
pub trait StatementBatch {
|
||||
/// Get the target authorities of these statements.
|
||||
fn targets(&self) -> &[SessionKey];
|
||||
|
||||
/// If the batch is empty.
|
||||
fn is_empty(&self) -> bool;
|
||||
|
||||
/// Push a statement onto the batch. Returns false when the batch is full.
|
||||
///
|
||||
/// This is meant to do work like incrementally serializing the statements
|
||||
/// into a vector of bytes while making sure the length is below a certain
|
||||
/// amount.
|
||||
fn push(&mut self, statement: SignedStatement) -> bool;
|
||||
}
|
||||
|
||||
impl<T: StatementBatch> generic::StatementBatch<SessionKey, SignedStatement> for T {
|
||||
fn targets(&self) -> &[SessionKey] { StatementBatch::targets(self ) }
|
||||
fn is_empty(&self) -> bool { StatementBatch::is_empty(self) }
|
||||
fn push(&mut self, statement: SignedStatement) -> bool {
|
||||
StatementBatch::push(self, statement)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user