Reduce network bandwidth, improve parablock times: optimize approval-distribution (#5164)

* gossip-support: be explicit about dimensions

* some guide updates

* update network-bridge to distinguish x and y dimensions

* get everything to compile

* beginnings

* some TODOs

* polkadot runtime: use relevant_authorities

* make gossip topologies per-session

* better formatting

* gossip support: use current session validators

* expand in comment

* adjust tests and fix index bug

* add past/present/future connection test and clean up code

* fmt

* network bridge: updated types

* update protocols to new gossip topology message

* guide updates

* add session to BlockApprovalMeta

* add session to block info

* refactor knowledge and remove most unify logic

* start replacing gossip_peers with new SessionTopologies

* add routing information to message state

* add some utilities to SessionTopology

* implement new gossip topology logic

* re-implement unify_with_peer

* distribute assignments according to topology

* finish grid topology implementation

* refactor network bridge slightly

* issue connection requests on all past/present/future

* fmt

* address grumbles

* tighten invariants in unify_with_peer

* implement random propagation

* refactor: extract required routing adjustment logic

* some block-age logic

* aggressively propagate messages when finality is slow

* overhaul aggression system to have 3 levels

* add aggression metrics

* remove aggression L3

* reduce random circulation

* remove PeerData

* get approval tests compiling

* use btree_map in known_by to make deterministic

* Revert "use btree_map in known_by to make deterministic"

This reverts commit 330d65343a7bb6fe4dd0f24bd8dbc15c0cbdbd9d.

* test XY grid propagation

* remove stray println

* test unshared dimension propagation

* add random gossip check

* test unify_with_peer better

* test sending after getting gossip topology

* test L1 aggression on originator

* test L1 aggression for non-originators

* test non-originator aggression L2

* fnt

* ~spellcheck

* fix statement-distribution tests

* fix flaky test

* fix metrics typo

* re-send periodically

* test resending

* typo

Co-authored-by: Bernhard Schuster <bernhard@ahoi.io>

* add more metrics about apd messages

* add back unify_with_peer logs

* make Resend an enum

* be more explicit when resending

* fmt

* fix error

* add a TODO for refactoring

* remove debug metrics

* add some guide stuff

* fmt

* update runtime API in test-runtim

Co-authored-by: Bernhard Schuster <bernhard@ahoi.io>
This commit is contained in:
asynchronous rob
2022-04-19 13:26:55 -05:00
committed by GitHub
parent edfa24bbc5
commit 79ecc53801
25 changed files with 2563 additions and 499 deletions
+5
View File
@@ -6333,8 +6333,11 @@ dependencies = [
"polkadot-node-subsystem-test-helpers",
"polkadot-node-subsystem-util",
"polkadot-primitives",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rand_core 0.5.1",
"schnorrkel",
"sp-authority-discovery",
"sp-core",
"tracing-gum",
]
@@ -6587,6 +6590,7 @@ dependencies = [
"rand_chacha 0.3.1",
"sc-network",
"sp-application-crypto",
"sp-authority-discovery",
"sp-consensus-babe",
"sp-core",
"sp-keyring",
@@ -7615,6 +7619,7 @@ dependencies = [
"sc-keystore",
"sc-network",
"sp-application-crypto",
"sp-authority-discovery",
"sp-core",
"sp-keyring",
"sp-keystore",
@@ -574,6 +574,7 @@ pub(crate) async fn handle_new_head(
parent_hash: block_header.parent_hash,
candidates: included_candidates.iter().map(|(hash, _, _, _)| *hash).collect(),
slot,
session: session_index,
});
imported_candidates.push(BlockImportedCandidates {
@@ -1003,6 +1003,7 @@ fn distribution_messages_for_activation(
parent_hash: block_entry.parent_hash(),
candidates: block_entry.candidates().iter().map(|(_, c_hash)| *c_hash).collect(),
slot: block_entry.slot(),
session: block_entry.session(),
});
for (i, (_, candidate_hash)) in block_entry.candidates().iter().enumerate() {
@@ -10,11 +10,13 @@ polkadot-node-network-protocol = { path = "../protocol" }
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-primitives = { path = "../../../primitives" }
rand = "0.8"
futures = "0.3.21"
gum = { package = "tracing-gum", path = "../../gum" }
[dev-dependencies]
sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
@@ -23,5 +25,6 @@ polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
assert_matches = "1.4.0"
schnorrkel = { version = "0.9.1", default-features = false }
rand_core = "0.5.1" # should match schnorrkel
rand_chacha = "0.3.1"
env_logger = "0.9.0"
log = "0.4.16"
File diff suppressed because it is too large Load Diff
@@ -25,6 +25,8 @@ struct MetricsInner {
assignments_imported_total: prometheus::Counter<prometheus::U64>,
approvals_imported_total: prometheus::Counter<prometheus::U64>,
unified_with_peer_total: prometheus::Counter<prometheus::U64>,
aggression_l1_messages_total: prometheus::Counter<prometheus::U64>,
aggression_l2_messages_total: prometheus::Counter<prometheus::U64>,
time_unify_with_peer: prometheus::Histogram,
time_import_pending_now_known: prometheus::Histogram,
@@ -69,6 +71,18 @@ impl Metrics {
.as_ref()
.map(|metrics| metrics.time_awaiting_approval_voting.start_timer())
}
pub(crate) fn on_aggression_l1(&self) {
if let Some(metrics) = &self.0 {
metrics.aggression_l1_messages_total.inc();
}
}
pub(crate) fn on_aggression_l2(&self) {
if let Some(metrics) = &self.0 {
metrics.aggression_l2_messages_total.inc();
}
}
}
impl MetricsTrait for Metrics {
@@ -95,6 +109,20 @@ impl MetricsTrait for Metrics {
)?,
registry,
)?,
aggression_l1_messages_total: prometheus::register(
prometheus::Counter::new(
"polkadot_parachain_approval_distribution_aggression_l1_messages_total",
"Number of messages in approval distribution for which aggression L1 has been triggered",
)?,
registry,
)?,
aggression_l2_messages_total: prometheus::register(
prometheus::Counter::new(
"polkadot_parachain_approval_distribution_aggression_l2_messages_total",
"Number of messages in approval distribution for which aggression L2 has been triggered",
)?,
registry,
)?,
time_unify_with_peer: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_time_unify_with_peer",
File diff suppressed because it is too large Load Diff
@@ -523,7 +523,14 @@ async fn handle_network_msg<Context>(
// get rid of superfluous data
state.peer_views.remove(&peer);
},
NetworkBridgeEvent::NewGossipTopology(peers) => {
NetworkBridgeEvent::NewGossipTopology(topology) => {
// Combine all peers in the x & y direction as we don't make any distinction.
let peers: HashSet<PeerId> = topology
.our_neighbors_x
.values()
.chain(topology.our_neighbors_y.values())
.flat_map(|peer_info| peer_info.peer_ids.iter().cloned())
.collect();
let newly_added: Vec<PeerId> = peers.difference(&state.gossip_peers).cloned().collect();
state.gossip_peers = peers;
for new_peer in newly_added {
+49 -17
View File
@@ -31,10 +31,13 @@ use polkadot_node_network_protocol::{
};
use polkadot_node_subsystem_util::metrics::{self, prometheus};
use polkadot_overseer::gen::{OverseerError, Subsystem};
use polkadot_primitives::v2::{BlockNumber, Hash};
use polkadot_primitives::v2::{AuthorityDiscoveryId, BlockNumber, Hash, ValidatorIndex};
use polkadot_subsystem::{
errors::{SubsystemError, SubsystemResult},
messages::{AllMessages, CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeMessage},
messages::{
network_bridge_event::{NewGossipTopology, TopologyPeerInfo},
AllMessages, CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeMessage,
},
overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem,
SubsystemContext, SubsystemSender,
};
@@ -45,7 +48,8 @@ use polkadot_subsystem::{
pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority};
use std::{
collections::{hash_map, HashMap, HashSet},
collections::{hash_map, HashMap},
iter::ExactSizeIterator,
sync::Arc,
};
@@ -590,30 +594,36 @@ where
).await;
}
NetworkBridgeMessage::NewGossipTopology {
our_neighbors,
session,
our_neighbors_x,
our_neighbors_y,
} => {
gum::debug!(
target: LOG_TARGET,
action = "NewGossipTopology",
neighbors = our_neighbors.len(),
neighbors_x = our_neighbors_x.len(),
neighbors_y = our_neighbors_y.len(),
"Gossip topology has changed",
);
let ads = &mut authority_discovery_service;
let mut gossip_peers = HashSet::with_capacity(our_neighbors.len());
for authority in our_neighbors {
let addr = get_peer_id_by_authority_id(
ads,
authority.clone(),
).await;
let gossip_peers_x = update_gossip_peers_1d(
&mut authority_discovery_service,
our_neighbors_x,
).await;
if let Some(peer_id) = addr {
gossip_peers.insert(peer_id);
}
}
let gossip_peers_y = update_gossip_peers_1d(
&mut authority_discovery_service,
our_neighbors_y,
).await;
dispatch_validation_event_to_all_unbounded(
NetworkBridgeEvent::NewGossipTopology(gossip_peers),
NetworkBridgeEvent::NewGossipTopology(
NewGossipTopology {
session,
our_neighbors_x: gossip_peers_x,
our_neighbors_y: gossip_peers_y,
}
),
ctx.sender(),
);
}
@@ -624,6 +634,28 @@ where
}
}
async fn update_gossip_peers_1d<AD, N>(
ads: &mut AD,
neighbors: N,
) -> HashMap<AuthorityDiscoveryId, TopologyPeerInfo>
where
AD: validator_discovery::AuthorityDiscovery,
N: IntoIterator<Item = (AuthorityDiscoveryId, ValidatorIndex)>,
N::IntoIter: std::iter::ExactSizeIterator,
{
let neighbors = neighbors.into_iter();
let mut peers = HashMap::with_capacity(neighbors.len());
for (authority, validator_index) in neighbors {
let addr = get_peer_id_by_authority_id(ads, authority.clone()).await;
if let Some(peer_id) = addr {
peers.insert(authority, TopologyPeerInfo { peer_ids: vec![peer_id], validator_index });
}
}
peers
}
async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
mut sender: impl SubsystemSender,
mut network_service: impl Network,
@@ -971,7 +971,7 @@ where
PeerMessage(remote, msg) => {
handle_incoming_peer_message(ctx, runtime, state, remote, msg).await?;
},
NewGossipTopology(..) => {
NewGossipTopology { .. } => {
// impossible!
},
}
@@ -1090,7 +1090,7 @@ where
state.peer_data.remove(&peer_id);
state.metrics.note_collator_peer_count(state.peer_data.len());
},
NewGossipTopology(..) => {
NewGossipTopology { .. } => {
// impossible!
},
PeerViewChange(peer_id, view) => {
@@ -25,6 +25,7 @@ gum = { package = "tracing-gum", path = "../../gum" }
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
+125 -52
View File
@@ -49,10 +49,12 @@ use polkadot_node_subsystem::{
RuntimeApiRequest,
},
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext,
SubsystemError, SubsystemSender,
SubsystemError,
};
use polkadot_node_subsystem_util as util;
use polkadot_primitives::v2::{AuthorityDiscoveryId, Hash, SessionIndex};
use polkadot_primitives::v2::{
AuthorityDiscoveryId, Hash, SessionIndex, SessionInfo, ValidatorIndex,
};
#[cfg(test)]
mod tests;
@@ -213,6 +215,24 @@ where
if force_request { leaf_session } else { maybe_new_session };
if let Some((session_index, relay_parent)) = maybe_issue_connection {
let session_info =
util::request_session_info(leaf, session_index, ctx.sender()).await.await??;
let session_info = match session_info {
Some(s) => s,
None => {
gum::warn!(
relay_parent = ?leaf,
session_index = self.last_session_index,
"Failed to get session info.",
);
continue
},
};
// Note: we only update `last_session_index` once we've
// successfully gotten the `SessionInfo`.
let is_new_session = maybe_new_session.is_some();
if is_new_session {
gum::debug!(
@@ -223,45 +243,52 @@ where
self.last_session_index = Some(session_index);
}
let all_authorities = determine_relevant_authorities(ctx, relay_parent).await?;
let our_index = ensure_i_am_an_authority(&self.keystore, &all_authorities).await?;
let other_authorities = {
let mut authorities = all_authorities.clone();
authorities.swap_remove(our_index);
authorities
};
// Connect to authorities from the past/present/future.
//
// This is maybe not the right place for this logic to live,
// but at the moment we're limited by the network bridge's ability
// to handle connection requests (it only allows one, globally).
//
// Certain network protocols - mostly req/res, but some gossip,
// will require being connected to past/future validators as well
// as current. That is, the old authority sets are not made obsolete
// by virtue of a new session being entered. Therefore we maintain
// connections to a much broader set of validators.
{
let mut connections = authorities_past_present_future(ctx, leaf).await?;
self.issue_connection_request(ctx, other_authorities).await;
// Remove all of our locally controlled validator indices so we don't connect to ourself.
// If we control none of them, don't issue connection requests - we're outside
// of the 'clique' of recent validators.
if remove_all_controlled(&self.keystore, &mut connections).await != 0 {
self.issue_connection_request(ctx, connections).await;
}
}
// Gossip topology is only relevant for authorities in the current session.
let our_index =
ensure_i_am_an_authority(&self.keystore, &session_info.discovery_keys).await?;
if is_new_session {
update_gossip_topology(ctx, our_index, all_authorities, relay_parent).await?;
self.update_authority_status_metrics(leaf, ctx.sender()).await?;
self.update_authority_status_metrics(&session_info).await;
update_gossip_topology(
ctx,
our_index,
session_info.discovery_keys,
relay_parent,
session_index,
)
.await?;
}
}
}
Ok(())
}
async fn update_authority_status_metrics(
&mut self,
leaf: Hash,
sender: &mut impl SubsystemSender,
) -> Result<(), util::Error> {
if let Some(session_info) = util::request_session_info(
leaf,
self.last_session_index
.expect("Last session index is always set on every session index change"),
sender,
)
.await
.await??
{
let maybe_index = match ensure_i_am_an_authority(
&self.keystore,
&session_info.discovery_keys,
)
.await
{
async fn update_authority_status_metrics(&mut self, session_info: &SessionInfo) {
let maybe_index =
match ensure_i_am_an_authority(&self.keystore, &session_info.discovery_keys).await {
Ok(index) => {
self.metrics.on_is_authority();
Some(index)
@@ -275,21 +302,19 @@ where
Err(_) => None,
};
if let Some(validator_index) = maybe_index {
// The subset of authorities participating in parachain consensus.
let parachain_validators_this_session = session_info.validators;
if let Some(validator_index) = maybe_index {
// The subset of authorities participating in parachain consensus.
let parachain_validators_this_session = session_info.validators.len();
// First `maxValidators` entries are the parachain validators. We'll check
// if our index is in this set to avoid searching for the keys.
// https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148
if validator_index < parachain_validators_this_session.len() {
self.metrics.on_is_parachain_validator();
} else {
self.metrics.on_is_not_parachain_validator();
}
// First `maxValidators` entries are the parachain validators. We'll check
// if our index is in this set to avoid searching for the keys.
// https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148
if validator_index < parachain_validators_this_session {
self.metrics.on_is_parachain_validator();
} else {
self.metrics.on_is_not_parachain_validator();
}
}
Ok(())
}
async fn issue_connection_request<Context>(
@@ -378,7 +403,7 @@ where
},
NetworkBridgeEvent::OurViewChange(_) => {},
NetworkBridgeEvent::PeerViewChange(_, _) => {},
NetworkBridgeEvent::NewGossipTopology(_) => {},
NetworkBridgeEvent::NewGossipTopology { .. } => {},
NetworkBridgeEvent::PeerMessage(_, v) => {
match v {};
},
@@ -416,7 +441,8 @@ where
}
}
async fn determine_relevant_authorities<Context>(
// Get the authorities of the past, present, and future.
async fn authorities_past_present_future<Context>(
ctx: &mut Context,
relay_parent: Hash,
) -> Result<Vec<AuthorityDiscoveryId>, util::Error>
@@ -428,7 +454,7 @@ where
gum::debug!(
target: LOG_TARGET,
authority_count = ?authorities.len(),
"Determined relevant authorities",
"Determined past/present/future authorities",
);
Ok(authorities)
}
@@ -447,6 +473,25 @@ async fn ensure_i_am_an_authority(
Err(util::Error::NotAValidator)
}
/// Filter out all controlled keys in the given set. Returns the number of keys removed.
async fn remove_all_controlled(
keystore: &SyncCryptoStorePtr,
authorities: &mut Vec<AuthorityDiscoveryId>,
) -> usize {
let mut to_remove = Vec::new();
for (i, v) in authorities.iter().enumerate() {
if CryptoStore::has_keys(&**keystore, &[(v.to_raw_vec(), AuthorityDiscoveryId::ID)]).await {
to_remove.push(i);
}
}
for i in to_remove.iter().rev().copied() {
authorities.remove(i);
}
to_remove.len()
}
/// We partition the list of all sorted `authorities` into `sqrt(len)` groups of `sqrt(len)` size
/// and form a matrix where each validator is connected to all validators in its row and column.
/// This is similar to `[web3]` research proposed topology, except for the groups are not parachain
@@ -460,6 +505,7 @@ async fn update_gossip_topology<Context>(
our_index: usize,
authorities: Vec<AuthorityDiscoveryId>,
relay_parent: Hash,
session_index: SessionIndex,
) -> Result<(), util::Error>
where
Context: SubsystemContext<Message = GossipSupportMessage>,
@@ -469,6 +515,8 @@ where
let random_seed = {
let (tx, rx) = oneshot::channel();
// TODO https://github.com/paritytech/polkadot/issues/5316:
// get the random seed from the `SessionInfo` instead.
ctx.send_message(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::CurrentBabeEpoch(tx),
@@ -493,16 +541,38 @@ where
.expect("our_index < len; indices contains it; qed");
let neighbors = matrix_neighbors(our_shuffled_position, len);
let our_neighbors = neighbors.map(|i| authorities[indices[i]].clone()).collect();
let row_neighbors = neighbors
.row_neighbors
.map(|i| indices[i])
.map(|i| (authorities[i].clone(), ValidatorIndex::from(i as u32)))
.collect();
ctx.send_message(NetworkBridgeMessage::NewGossipTopology { our_neighbors })
.await;
let column_neighbors = neighbors
.column_neighbors
.map(|i| indices[i])
.map(|i| (authorities[i].clone(), ValidatorIndex::from(i as u32)))
.collect();
ctx.send_message(NetworkBridgeMessage::NewGossipTopology {
session: session_index,
our_neighbors_x: row_neighbors,
our_neighbors_y: column_neighbors,
})
.await;
Ok(())
}
struct MatrixNeighbors<R, C> {
row_neighbors: R,
column_neighbors: C,
}
/// Compute our row and column neighbors in a matrix
fn matrix_neighbors(our_index: usize, len: usize) -> impl Iterator<Item = usize> {
fn matrix_neighbors(
our_index: usize,
len: usize,
) -> MatrixNeighbors<impl Iterator<Item = usize>, impl Iterator<Item = usize>> {
assert!(our_index < len, "our_index is computed using `enumerate`; qed");
// e.g. for size 11 the matrix would be
@@ -520,7 +590,10 @@ fn matrix_neighbors(our_index: usize, len: usize) -> impl Iterator<Item = usize>
let row_neighbors = our_row * sqrt..std::cmp::min(our_row * sqrt + sqrt, len);
let column_neighbors = (our_column..len).step_by(sqrt);
row_neighbors.chain(column_neighbors).filter(move |i| *i != our_index)
MatrixNeighbors {
row_neighbors: row_neighbors.filter(move |i| *i != our_index),
column_neighbors: column_neighbors.filter(move |i| *i != our_index),
}
}
impl<Context, AD> overseer::Subsystem<Context, SubsystemError> for GossipSupport<AD>
+218 -78
View File
@@ -24,7 +24,9 @@ use futures::{executor, future, Future};
use lazy_static::lazy_static;
use sc_network::multiaddr::Protocol;
use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair;
use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch};
use sp_core::crypto::Pair as PairT;
use sp_keyring::Sr25519Keyring;
use polkadot_node_subsystem::{
@@ -38,25 +40,46 @@ use test_helpers::mock::make_ferdie_keystore;
use super::*;
const AUTHORITY_KEYRINGS: &[Sr25519Keyring] = &[
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
Sr25519Keyring::Eve,
Sr25519Keyring::One,
Sr25519Keyring::Two,
Sr25519Keyring::Ferdie,
];
lazy_static! {
static ref MOCK_AUTHORITY_DISCOVERY: MockAuthorityDiscovery = MockAuthorityDiscovery::new();
static ref AUTHORITIES: Vec<AuthorityDiscoveryId> = {
let mut authorities = OTHER_AUTHORITIES.clone();
authorities.push(Sr25519Keyring::Ferdie.public().into());
authorities
static ref AUTHORITIES: Vec<AuthorityDiscoveryId> =
AUTHORITY_KEYRINGS.iter().map(|k| k.public().into()).collect();
static ref AUTHORITIES_WITHOUT_US: Vec<AuthorityDiscoveryId> = {
let mut a = AUTHORITIES.clone();
a.pop(); // remove FERDIE.
a
};
static ref OTHER_AUTHORITIES: Vec<AuthorityDiscoveryId> = vec![
Sr25519Keyring::Alice.public().into(),
Sr25519Keyring::Bob.public().into(),
Sr25519Keyring::Charlie.public().into(),
Sr25519Keyring::Eve.public().into(),
Sr25519Keyring::One.public().into(),
Sr25519Keyring::Two.public().into(),
static ref PAST_PRESENT_FUTURE_AUTHORITIES: Vec<AuthorityDiscoveryId> = {
(0..50)
.map(|_| AuthorityDiscoveryPair::generate().0.public())
.chain(AUTHORITIES.clone())
.collect()
};
// [2 6]
// [4 5]
// [1 3]
// [0 ]
static ref ROW_NEIGHBORS: Vec<(AuthorityDiscoveryId, ValidatorIndex)> = vec![
(Sr25519Keyring::Charlie.public().into(), ValidatorIndex::from(2)),
];
static ref NEIGHBORS: Vec<AuthorityDiscoveryId> = vec![
Sr25519Keyring::Two.public().into(),
Sr25519Keyring::Charlie.public().into(),
Sr25519Keyring::Eve.public().into(),
static ref COLUMN_NEIGHBORS: Vec<(AuthorityDiscoveryId, ValidatorIndex)> = vec![
(Sr25519Keyring::Two.public().into(), ValidatorIndex::from(5)),
(Sr25519Keyring::Eve.public().into(), ValidatorIndex::from(3)),
];
}
@@ -70,8 +93,11 @@ struct MockAuthorityDiscovery {
impl MockAuthorityDiscovery {
fn new() -> Self {
let authorities: HashMap<_, _> =
AUTHORITIES.clone().into_iter().map(|a| (PeerId::random(), a)).collect();
let authorities: HashMap<_, _> = PAST_PRESENT_FUTURE_AUTHORITIES
.clone()
.into_iter()
.map(|a| (PeerId::random(), a))
.collect();
let addrs = authorities
.clone()
.into_iter()
@@ -103,10 +129,10 @@ impl AuthorityDiscovery for MockAuthorityDiscovery {
}
}
async fn get_other_authorities_addrs() -> Vec<HashSet<Multiaddr>> {
let mut addrs = Vec::with_capacity(OTHER_AUTHORITIES.len());
async fn get_multiaddrs(authorities: Vec<AuthorityDiscoveryId>) -> Vec<HashSet<Multiaddr>> {
let mut addrs = Vec::with_capacity(authorities.len());
let mut discovery = MOCK_AUTHORITY_DISCOVERY.clone();
for authority in OTHER_AUTHORITIES.iter().cloned() {
for authority in authorities.into_iter() {
if let Some(addr) = discovery.get_addresses_by_authority_id(authority).await {
addrs.push(addr);
}
@@ -114,10 +140,12 @@ async fn get_other_authorities_addrs() -> Vec<HashSet<Multiaddr>> {
addrs
}
async fn get_other_authorities_addrs_map() -> HashMap<AuthorityDiscoveryId, HashSet<Multiaddr>> {
let mut addrs = HashMap::with_capacity(OTHER_AUTHORITIES.len());
async fn get_address_map(
authorities: Vec<AuthorityDiscoveryId>,
) -> HashMap<AuthorityDiscoveryId, HashSet<Multiaddr>> {
let mut addrs = HashMap::with_capacity(authorities.len());
let mut discovery = MOCK_AUTHORITY_DISCOVERY.clone();
for authority in OTHER_AUTHORITIES.iter().cloned() {
for authority in authorities.into_iter() {
if let Some(addr) = discovery.get_addresses_by_authority_id(authority.clone()).await {
addrs.insert(authority, addr);
}
@@ -179,13 +207,32 @@ async fn overseer_signal_active_leaves(overseer: &mut VirtualOverseer, leaf: Has
.expect("signal send timeout");
}
fn make_session_info() -> SessionInfo {
let all_validator_indices: Vec<_> = (0..6).map(ValidatorIndex::from).collect();
SessionInfo {
active_validator_indices: all_validator_indices.clone(),
random_seed: [0; 32],
dispute_period: 6,
validators: AUTHORITY_KEYRINGS.iter().map(|k| k.public().into()).collect(),
discovery_keys: AUTHORITIES.clone(),
assignment_keys: AUTHORITY_KEYRINGS.iter().map(|k| k.public().into()).collect(),
validator_groups: vec![all_validator_indices],
n_cores: 1,
zeroth_delay_tranche_width: 1,
relay_vrf_modulo_samples: 1,
n_delay_tranches: 1,
no_show_slots: 1,
needed_approvals: 1,
}
}
async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
let msg = overseer.recv().timeout(TIMEOUT).await.expect("msg recv timeout");
msg
}
async fn test_neighbors(overseer: &mut VirtualOverseer) {
async fn test_neighbors(overseer: &mut VirtualOverseer, expected_session: SessionIndex) {
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
@@ -209,11 +256,17 @@ async fn test_neighbors(overseer: &mut VirtualOverseer) {
assert_matches!(
overseer_recv(overseer).await,
AllMessages::NetworkBridge(NetworkBridgeMessage::NewGossipTopology {
our_neighbors,
session: got_session,
our_neighbors_x,
our_neighbors_y,
}) => {
let mut got: Vec<_> = our_neighbors.into_iter().collect();
got.sort();
assert_eq!(got, NEIGHBORS.clone());
assert_eq!(expected_session, got_session);
let mut got_row: Vec<_> = our_neighbors_x.into_iter().collect();
let mut got_column: Vec<_> = our_neighbors_y.into_iter().collect();
got_row.sort();
got_column.sort();
assert_eq!(got_row, ROW_NEIGHBORS.clone());
assert_eq!(got_column, COLUMN_NEIGHBORS.clone());
}
);
}
@@ -235,6 +288,18 @@ fn issues_a_connection_request_on_new_session() {
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionInfo(s, tx),
)) => {
assert_eq!(relay_parent, hash);
assert_eq!(s, 1);
tx.send(Ok(Some(make_session_info()))).unwrap();
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
@@ -252,23 +317,12 @@ fn issues_a_connection_request_on_new_session() {
validator_addrs,
peer_set,
}) => {
assert_eq!(validator_addrs, get_other_authorities_addrs().await);
assert_eq!(validator_addrs, get_multiaddrs(AUTHORITIES_WITHOUT_US.clone()).await);
assert_eq!(peer_set, PeerSet::Validation);
}
);
test_neighbors(overseer).await;
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionInfo(1, sender),
)) => {
assert_eq!(relay_parent, hash);
sender.send(Ok(None)).unwrap();
}
);
test_neighbors(overseer, 1).await;
virtual_overseer
});
@@ -313,6 +367,18 @@ fn issues_a_connection_request_on_new_session() {
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionInfo(s, tx),
)) => {
assert_eq!(relay_parent, hash);
assert_eq!(s, 2);
tx.send(Ok(Some(make_session_info()))).unwrap();
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
@@ -330,23 +396,12 @@ fn issues_a_connection_request_on_new_session() {
validator_addrs,
peer_set,
}) => {
assert_eq!(validator_addrs, get_other_authorities_addrs().await);
assert_eq!(validator_addrs, get_multiaddrs(AUTHORITIES_WITHOUT_US.clone()).await);
assert_eq!(peer_set, PeerSet::Validation);
}
);
test_neighbors(overseer).await;
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionInfo(2, sender),
)) => {
assert_eq!(relay_parent, hash);
sender.send(Ok(None)).unwrap();
}
);
test_neighbors(overseer, 2).await;
virtual_overseer
});
@@ -354,6 +409,72 @@ fn issues_a_connection_request_on_new_session() {
assert!(state.last_failure.is_none());
}
#[test]
fn issues_connection_request_to_past_present_future() {
let hash = Hash::repeat_byte(0xAA);
test_harness(make_subsystem(), |mut virtual_overseer| async move {
let overseer = &mut virtual_overseer;
overseer_signal_active_leaves(overseer, hash).await;
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionIndexForChild(tx),
)) => {
assert_eq!(relay_parent, hash);
tx.send(Ok(1)).unwrap();
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionInfo(s, tx),
)) => {
assert_eq!(relay_parent, hash);
assert_eq!(s, 1);
tx.send(Ok(Some(make_session_info()))).unwrap();
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::Authorities(tx),
)) => {
assert_eq!(relay_parent, hash);
tx.send(Ok(PAST_PRESENT_FUTURE_AUTHORITIES.clone())).unwrap();
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::NetworkBridge(NetworkBridgeMessage::ConnectToResolvedValidators {
validator_addrs,
peer_set,
}) => {
let all_without_ferdie: Vec<_> = PAST_PRESENT_FUTURE_AUTHORITIES
.iter()
.cloned()
.filter(|p| p != &Sr25519Keyring::Ferdie.public().into())
.collect();
let addrs = get_multiaddrs(all_without_ferdie).await;
assert_eq!(validator_addrs, addrs);
assert_eq!(peer_set, PeerSet::Validation);
}
);
// Ensure neighbors are unaffected
test_neighbors(overseer, 1).await;
virtual_overseer
});
}
#[test]
fn test_log_output() {
sp_tracing::try_init_simple();
@@ -407,6 +528,18 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() {
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionInfo(s, tx),
)) => {
assert_eq!(relay_parent, hash);
assert_eq!(s, 1);
tx.send(Ok(Some(make_session_info()))).unwrap();
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
@@ -424,7 +557,7 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() {
validator_addrs,
peer_set,
}) => {
let mut expected = get_other_authorities_addrs_map().await;
let mut expected = get_address_map(AUTHORITIES_WITHOUT_US.clone()).await;
expected.remove(&alice);
expected.remove(&bob);
let expected: HashSet<Multiaddr> = expected.into_iter().map(|(_,v)| v.into_iter()).flatten().collect();
@@ -433,18 +566,7 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() {
}
);
test_neighbors(overseer).await;
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionInfo(1, sender),
)) => {
assert_eq!(relay_parent, hash);
sender.send(Ok(None)).unwrap();
}
);
test_neighbors(overseer, 1).await;
virtual_overseer
})
@@ -470,6 +592,19 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() {
tx.send(Ok(1)).unwrap();
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
relay_parent,
RuntimeApiRequest::SessionInfo(s, tx),
)) => {
assert_eq!(relay_parent, hash);
assert_eq!(s, 1);
tx.send(Ok(Some(make_session_info()))).unwrap();
}
);
assert_matches!(
overseer_recv(overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
@@ -487,7 +622,7 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() {
validator_addrs,
peer_set,
}) => {
let mut expected = get_other_authorities_addrs_map().await;
let mut expected = get_address_map(AUTHORITIES_WITHOUT_US.clone()).await;
expected.remove(&bob);
let expected: HashSet<Multiaddr> = expected.into_iter().map(|(_,v)| v.into_iter()).flatten().collect();
assert_eq!(validator_addrs.into_iter().map(|v| v.into_iter()).flatten().collect::<HashSet<_>>(), expected);
@@ -504,18 +639,23 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() {
#[test]
fn test_matrix_neighbors() {
for (our_index, len, expected) in vec![
(0usize, 1usize, vec![]),
(1, 2, vec![0usize]),
(0, 9, vec![1, 2, 3, 6]),
(9, 10, vec![0, 3, 6]),
(10, 11, vec![1, 4, 7, 9]),
(7, 11, vec![1, 4, 6, 8, 10]),
for (our_index, len, expected_row, expected_column) in vec![
(0usize, 1usize, vec![], vec![]),
(1, 2, vec![], vec![0usize]),
(0, 9, vec![1, 2], vec![3, 6]),
(9, 10, vec![], vec![0, 3, 6]),
(10, 11, vec![9], vec![1, 4, 7]),
(7, 11, vec![6, 8], vec![1, 4, 10]),
]
.into_iter()
{
let mut result: Vec<_> = matrix_neighbors(our_index, len).collect();
result.sort();
assert_eq!(result, expected);
let matrix = matrix_neighbors(our_index, len);
let mut row_result: Vec<_> = matrix.row_neighbors.collect();
let mut column_result: Vec<_> = matrix.column_neighbors.collect();
row_result.sort();
column_result.sort();
assert_eq!(row_result, expected_row);
assert_eq!(column_result, expected_column);
}
}
@@ -24,6 +24,7 @@ fatality = "0.0.6"
[dev-dependencies]
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
assert_matches = "1.4.0"
sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -1632,7 +1632,14 @@ async fn handle_network_update(
});
}
},
NetworkBridgeEvent::NewGossipTopology(new_peers) => {
NetworkBridgeEvent::NewGossipTopology(topology) => {
// Combine all peers in the x & y direction as we don't make any distinction.
let new_peers: HashSet<PeerId> = topology
.our_neighbors_x
.values()
.chain(topology.our_neighbors_y.values())
.flat_map(|peer_info| peer_info.peer_ids.iter().cloned())
.collect();
let _ = metrics.time_network_bridge_update_v1("new_gossip_topology");
let newly_added: Vec<PeerId> = new_peers.difference(gossip_peers).cloned().collect();
*gossip_peers = new_peers;
@@ -34,11 +34,12 @@ use polkadot_primitives_test_helpers::{
};
use polkadot_subsystem::{
jaeger,
messages::{RuntimeApiMessage, RuntimeApiRequest},
messages::{network_bridge_event, RuntimeApiMessage, RuntimeApiRequest},
ActivatedLeaf, LeafStatus,
};
use sc_keystore::LocalKeystore;
use sp_application_crypto::{sr25519::Pair, AppKey, Pair as TraitPair};
use sp_authority_discovery::AuthorityPair;
use sp_keyring::Sr25519Keyring;
use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr};
use std::{iter::FromIterator as _, sync::Arc, time::Duration};
@@ -1964,12 +1965,34 @@ fn handle_multiple_seconded_statements() {
// Explicitly add all `lucky` peers to the gossip peers to ensure that neither `peerA` not `peerB`
// receive statements
let gossip_topology = {
let mut t = network_bridge_event::NewGossipTopology {
session: 1,
our_neighbors_x: HashMap::new(),
our_neighbors_y: HashMap::new(),
};
// This is relying on the fact that statement distribution
// just extracts the peer IDs from this struct and does nothing else
// with it.
for (i, peer) in lucky_peers.iter().enumerate() {
let authority_id = AuthorityPair::generate().0.public();
t.our_neighbors_x.insert(
authority_id,
network_bridge_event::TopologyPeerInfo {
peer_ids: vec![peer.clone()],
validator_index: (i as u32).into(),
},
);
}
t
};
handle
.send(FromOverseer::Communication {
msg: StatementDistributionMessage::NetworkBridgeUpdateV1(
NetworkBridgeEvent::NewGossipTopology(
lucky_peers.iter().cloned().collect::<HashSet<_>>(),
),
NetworkBridgeEvent::NewGossipTopology(gossip_topology),
),
})
.await;
+4 -2
View File
@@ -21,8 +21,8 @@ pub use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof};
use parity_scale_codec::{Decode, Encode};
use polkadot_primitives::v2::{
BlockNumber, CandidateHash, CandidateIndex, CoreIndex, Hash, Header, ValidatorIndex,
ValidatorSignature,
BlockNumber, CandidateHash, CandidateIndex, CoreIndex, Hash, Header, SessionIndex,
ValidatorIndex, ValidatorSignature,
};
use sp_application_crypto::ByteArray;
use sp_consensus_babe as babe_primitives;
@@ -128,6 +128,8 @@ pub struct BlockApprovalMeta {
pub candidates: Vec<CandidateHash>,
/// The consensus slot of the block.
pub slot: Slot,
/// The session of the block.
pub session: SessionIndex,
}
/// Errors that can occur during the approvals protocol.
+15 -4
View File
@@ -49,7 +49,7 @@ use polkadot_primitives::v2::{
};
use polkadot_statement_table::v2::Misbehavior;
use std::{
collections::{BTreeMap, HashSet},
collections::{BTreeMap, HashMap, HashSet},
sync::Arc,
time::Duration,
};
@@ -378,9 +378,20 @@ pub enum NetworkBridgeMessage {
/// Inform the distribution subsystems about the new
/// gossip network topology formed.
NewGossipTopology {
/// Ids of our neighbors in the new gossip topology.
/// We're not necessarily connected to all of them, but we should.
our_neighbors: HashSet<AuthorityDiscoveryId>,
/// The session info this gossip topology is concerned with.
session: SessionIndex,
/// Ids of our neighbors in the X dimensions of the new gossip topology,
/// along with their validator indices within the session.
///
/// We're not necessarily connected to all of them, but we should
/// try to be.
our_neighbors_x: HashMap<AuthorityDiscoveryId, ValidatorIndex>,
/// Ids of our neighbors in the X dimensions of the new gossip topology,
/// along with their validator indices within the session.
///
/// We're not necessarily connected to all of them, but we should
/// try to be.
our_neighbors_y: HashMap<AuthorityDiscoveryId, ValidatorIndex>,
},
}
@@ -14,12 +14,36 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use std::collections::HashSet;
use std::{
collections::{HashMap, HashSet},
convert::TryFrom,
};
pub use sc_network::{PeerId, ReputationChange};
use polkadot_node_network_protocol::{ObservedRole, OurView, View, WrongVariant};
use polkadot_primitives::v2::AuthorityDiscoveryId;
use polkadot_primitives::v2::{AuthorityDiscoveryId, SessionIndex, ValidatorIndex};
/// Information about a peer in the gossip topology for a session.
#[derive(Debug, Clone, PartialEq)]
pub struct TopologyPeerInfo {
/// The validator's known peer IDs.
pub peer_ids: Vec<PeerId>,
/// The index of the validator in the discovery keys of the corresponding
/// `SessionInfo`. This can extend _beyond_ the set of active parachain validators.
pub validator_index: ValidatorIndex,
}
/// A struct indicating new gossip topology.
#[derive(Debug, Clone, PartialEq)]
pub struct NewGossipTopology {
/// The session index this topology corresponds to.
pub session: SessionIndex,
/// Neighbors in the 'X' dimension of the grid.
pub our_neighbors_x: HashMap<AuthorityDiscoveryId, TopologyPeerInfo>,
/// Neighbors in the 'Y' dimension of the grid.
pub our_neighbors_y: HashMap<AuthorityDiscoveryId, TopologyPeerInfo>,
}
/// Events from network.
#[derive(Debug, Clone, PartialEq)]
@@ -30,14 +54,14 @@ pub enum NetworkBridgeEvent<M> {
/// A peer has disconnected.
PeerDisconnected(PeerId),
/// Our neighbors in the new gossip topology.
/// Our neighbors in the new gossip topology for the session.
/// We're not necessarily connected to all of them.
///
/// This message is issued only on the validation peer set.
///
/// Note, that the distribution subsystems need to handle the last
/// view update of the newly added gossip peers manually.
NewGossipTopology(HashSet<PeerId>),
NewGossipTopology(NewGossipTopology),
/// Peer has sent a message.
PeerMessage(PeerId, M),
@@ -77,8 +101,8 @@ impl<M> NetworkBridgeEvent<M> {
NetworkBridgeEvent::PeerConnected(peer.clone(), role.clone(), authority_id.clone()),
NetworkBridgeEvent::PeerDisconnected(ref peer) =>
NetworkBridgeEvent::PeerDisconnected(peer.clone()),
NetworkBridgeEvent::NewGossipTopology(ref peers) =>
NetworkBridgeEvent::NewGossipTopology(peers.clone()),
NetworkBridgeEvent::NewGossipTopology(ref topology) =>
NetworkBridgeEvent::NewGossipTopology(topology.clone()),
NetworkBridgeEvent::PeerViewChange(ref peer, ref view) =>
NetworkBridgeEvent::PeerViewChange(peer.clone(), view.clone()),
NetworkBridgeEvent::OurViewChange(ref view) =>
@@ -22,6 +22,16 @@ For assignments, what we need to be checking is whether we are aware of the (blo
However, awareness on its own of a (block, candidate) pair would imply that even ancient candidates all the way back to the genesis are relevant. We are actually not interested in anything before finality.
We gossip assignments along a grid topology produced by the [Gossip Support Subsystem](../utility/gossip-support.md) and also to a few random peers. The first time we accept an assignment or approval, regardless of the source, which originates from a validator peer in a shared dimension of the grid, we propagate the message to validator peers in the unshared dimension as well as a few random peers.
But, in case these mechanisms don't work on their own, we need to trade bandwidth for protocol liveness by introducing aggression.
Aggression has 3 levels:
Aggression Level 0: The basic behaviors described above.
Aggression Level 1: The originator of a message sends to all peers. Other peers follow the rules above.
Aggression Level 2: All peers send all messages to all their row and column neighbors. This means that each validator will, on average, receive each message approximately 2*sqrt(n) times.
These aggression levels are chosen based on how long a block has taken to finalize: assignments and approvals related to the unfinalized block will be propagated with more aggression. In particular, it's only the earliest unfinalized blocks that aggression should be applied to, because descendants may be unfinalized only by virtue of being descendants.
## Protocol
@@ -142,6 +142,23 @@ enum CollationProtocolV1 {
These updates are posted from the [Network Bridge Subsystem](../node/utility/network-bridge.md) to other subsystems based on registered listeners.
```rust
struct NewGossipTopology {
/// The session index this topology corresponds to.
session: SessionIndex,
/// Neighbors in the 'X' dimension of the grid.
our_neighbors_x: HashMap<AuthorityDiscoveryId, TopologyPeerInfo>,
/// Neighbors in the 'Y' dimension of the grid.
our_neighbors_y: HashMap<AuthorityDiscoveryId, TopologyPeerInfo>,
}
struct TopologyPeerInfo {
/// The validator's known peer IDs.
peer_ids: Vec<PeerId>,
/// The index of the validator in the discovery keys of the corresponding
/// `SessionInfo`. This can extend _beyond_ the set of active parachain validators.
validator_index: ValidatorIndex,
}
enum NetworkBridgeEvent<M> {
/// A peer with given ID is now connected.
PeerConnected(PeerId, ObservedRole, Option<HashSet<AuthorityDiscoveryId>>),
@@ -154,7 +171,7 @@ enum NetworkBridgeEvent<M> {
///
/// Note, that the distribution subsystems need to handle the last
/// view update of the newly added gossip peers manually.
NewGossipTopology(HashSet<PeerId>),
NewGossipTopology(NewGossipTopology),
/// We received a message from the given peer.
PeerMessage(PeerId, M),
/// The given peer has updated its description of its view.
@@ -175,6 +175,8 @@ struct BlockApprovalMeta {
candidates: Vec<CandidateHash>,
/// The consensus slot of the block.
slot: Slot,
/// The session of the block.
session: SessionIndex,
}
enum ApprovalDistributionMessage {
@@ -553,9 +555,14 @@ enum NetworkBridgeMessage {
/// Inform the distribution subsystems about the new
/// gossip network topology formed.
NewGossipTopology {
/// Ids of our neighbors in the new gossip topology.
/// We're not necessarily connected to all of them, but we should.
our_neighbors: HashSet<AuthorityDiscoveryId>,
/// The session this topology corresponds to.
session: SessionIndex,
/// Ids of our neighbors in the X dimension of the new gossip topology.
/// We're not necessarily connected to all of them, but we should try to be.
our_neighbors_x: HashSet<AuthorityDiscoveryId>,
/// Ids of our neighbors in the Y dimension of the new gossip topology.
/// We're not necessarily connected to all of them, but we should try to be.
our_neighbors_y: HashSet<AuthorityDiscoveryId>,
}
}
```
+1 -1
View File
@@ -2047,7 +2047,7 @@ sp_api::impl_runtime_apis! {
impl authority_discovery_primitives::AuthorityDiscoveryApi<Block> for Runtime {
fn authorities() -> Vec<AuthorityDiscoveryId> {
AuthorityDiscovery::authorities()
parachains_runtime_api_impl::relevant_authority_ids::<Runtime>()
}
}
+1 -1
View File
@@ -801,7 +801,7 @@ sp_api::impl_runtime_apis! {
impl authority_discovery_primitives::AuthorityDiscoveryApi<Block> for Runtime {
fn authorities() -> Vec<AuthorityDiscoveryId> {
AuthorityDiscovery::authorities()
runtime_impl::relevant_authority_ids::<Runtime>()
}
}