From 8db384bed3ac4cb378d9befb15abd00bc0b54139 Mon Sep 17 00:00:00 2001 From: Maciej Hirsz Date: Tue, 8 Jun 2021 12:17:00 +0200 Subject: [PATCH 001/134] Squashed diff from mh-backend-shard --- backend/Cargo.lock | 203 +++++++++++++-- backend/Cargo.toml | 2 + backend/core/Cargo.toml | 6 +- backend/core/src/aggregator.rs | 69 +++-- backend/core/src/chain.rs | 164 ++++++------ backend/core/src/feed.rs | 73 ++++-- backend/core/src/feed/connector.rs | 2 +- backend/core/src/{util => }/location.rs | 2 +- backend/core/src/main.rs | 18 +- backend/core/src/node.rs | 8 +- backend/core/src/node/connector.rs | 89 ++----- backend/core/src/shard.rs | 12 - backend/core/src/shard/connector.rs | 159 +++++++----- backend/core/src/util/hash.rs | 89 ------- backend/shard/Cargo.toml | 25 ++ backend/shard/src/aggregator.rs | 239 ++++++++++++++++++ backend/shard/src/main.rs | 125 +++++++++ backend/shard/src/node.rs | 201 +++++++++++++++ backend/shared/Cargo.toml | 24 ++ backend/shared/src/lib.rs | 5 + .../node/message.rs => shared/src/node.rs} | 100 +++++--- backend/shared/src/shard.rs | 38 +++ backend/{core => shared}/src/types.rs | 45 ++-- backend/{core => shared}/src/util.rs | 4 +- .../{core => shared}/src/util/dense_map.rs | 0 backend/shared/src/util/hash.rs | 209 +++++++++++++++ .../{core => shared}/src/util/mean_list.rs | 0 backend/shared/src/util/null.rs | 136 ++++++++++ .../{core => shared}/src/util/num_stats.rs | 0 backend/shared/src/ws.rs | 98 +++++++ 30 files changed, 1688 insertions(+), 457 deletions(-) rename backend/core/src/{util => }/location.rs (99%) delete mode 100644 backend/core/src/util/hash.rs create mode 100644 backend/shard/Cargo.toml create mode 100644 backend/shard/src/aggregator.rs create mode 100644 backend/shard/src/main.rs create mode 100644 backend/shard/src/node.rs create mode 100644 backend/shared/Cargo.toml create mode 100644 backend/shared/src/lib.rs rename backend/{core/src/node/message.rs => shared/src/node.rs} (61%) create mode 100644 backend/shared/src/shard.rs rename backend/{core => shared}/src/types.rs (80%) rename backend/{core => shared}/src/util.rs (90%) rename backend/{core => shared}/src/util/dense_map.rs (100%) create mode 100644 backend/shared/src/util/hash.rs rename backend/{core => shared}/src/util/mean_list.rs (100%) create mode 100644 backend/shared/src/util/null.rs rename backend/{core => shared}/src/util/num_stats.rs (100%) create mode 100644 backend/shared/src/ws.rs diff --git a/backend/Cargo.lock b/backend/Cargo.lock index f3fca55..9c53118 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -11,7 +11,7 @@ dependencies = [ "actix-rt", "actix_derive", "bitflags", - "bytes", + "bytes 1.0.1", "crossbeam-channel", "futures-core", "futures-sink", @@ -33,7 +33,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90673465c6187bd0829116b02be465dc0195a74d7719f76ffff0effef934a92e" dependencies = [ "bitflags", - "bytes", + "bytes 1.0.1", "futures-core", "futures-sink", "log", @@ -54,9 +54,9 @@ dependencies = [ "actix-tls", "actix-utils", "ahash", - "base64", + "base64 0.13.0", "bitflags", - "bytes", + "bytes 1.0.1", "bytestring", "cfg-if 1.0.0", "derive_more", @@ -195,7 +195,7 @@ dependencies = [ "actix-web-codegen", "ahash", "awc", - "bytes", + "bytes 1.0.1", "derive_more", "either", "encoding_rs", @@ -224,7 +224,7 @@ dependencies = [ "actix-codec", "actix-http", "actix-web", - "bytes", + "bytes 1.0.1", "bytestring", "futures-core", "pin-project", @@ -273,6 +273,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "anyhow" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28b2cd92db5cbd74e8e5028f7e27dd7aa3090e89e4f2a197cc7c8dfb69c7063b" + [[package]] name = "arc-swap" version = "0.4.7" @@ -281,9 +287,9 @@ checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" [[package]] name = "arrayvec" -version = "0.5.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "5a2f58b0bb10c380af2b26e57212856b8c9a59e0925b4c20f4a174a49734eaf7" [[package]] name = "atty" @@ -312,8 +318,8 @@ dependencies = [ "actix-http", "actix-rt", "actix-service", - "base64", - "bytes", + "base64 0.13.0", + "bytes 1.0.1", "cfg-if 1.0.0", "derive_more", "futures-core", @@ -334,6 +340,12 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + [[package]] name = "base64" version = "0.13.0" @@ -394,6 +406,12 @@ version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +[[package]] +name = "bytes" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" + [[package]] name = "bytes" version = "1.0.1" @@ -406,7 +424,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90706ba19e97b90786e19dc0d5e2abd80008d99d4c0c5d1ad0b5e72cec7c494d" dependencies = [ - "bytes", + "bytes 1.0.1", ] [[package]] @@ -648,6 +666,21 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" +[[package]] +name = "futures" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95314d38584ffbfda215621d723e0a3906f032e03ae5551e650058dac83d4797" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.7" @@ -655,6 +688,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0448174b01148032eed37ac4aed28963aaaa8cfa93569a08e5b479bbc6c2c151" dependencies = [ "futures-core", + "futures-sink", ] [[package]] @@ -663,12 +697,35 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18eaa56102984bed2c88ea39026cff3ce3b4c7f508ca970cedf2450ea10d4e46" +[[package]] +name = "futures-executor" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5f8e0c9258abaea85e78ebdda17ef9666d390e987f006be6080dfe354b708cb" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + [[package]] name = "futures-io" version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" +[[package]] +name = "futures-macro" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36fccf3fc58563b4a14d265027c627c3b665d7fed489427e88e7cc929559efe" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "futures-sink" version = "0.3.7" @@ -690,13 +747,17 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abcb44342f62e6f3e8ac427b8aa815f724fd705dfad060b18ac7866c15bb8e34" dependencies = [ + "futures-channel", "futures-core", "futures-io", + "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project", "pin-utils", + "proc-macro-hack", + "proc-macro-nested", "slab", ] @@ -738,7 +799,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "futures-core", "futures-sink", @@ -787,7 +848,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes", + "bytes 1.0.1", "fnv", "itoa", ] @@ -798,7 +859,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" dependencies = [ - "bytes", + "bytes 1.0.1", "http", "pin-project-lite 0.2.6", ] @@ -821,7 +882,7 @@ version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" dependencies = [ - "bytes", + "bytes 1.0.1", "futures-channel", "futures-core", "futures-util", @@ -845,7 +906,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes", + "bytes 1.0.1", "hyper", "native-tls", "tokio", @@ -1106,9 +1167,9 @@ checksum = "2ac6fe3538f701e339953a3ebbe4f39941aababa8a3f6964635b24ab526daeac" [[package]] name = "parity-scale-codec" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cd3dab59b5cf4bc81069ade0fc470341a1ef3ad5fa73e5a8943bed2ec12b2e8" +checksum = "e0f518afaa5a47d0d6386229b0a6e01e86427291d643aa4cabb4992219f504f8" dependencies = [ "arrayvec", "bitvec", @@ -1240,6 +1301,12 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99c605b9a0adc77b7211c6b1f722dcb613d68d66859a44f3d485a6da332b0598" +[[package]] +name = "proc-macro-nested" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" + [[package]] name = "proc-macro2" version = "1.0.24" @@ -1384,8 +1451,8 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf12057f289428dbf5c591c74bf10392e4a8003f993405a902f20117019022d4" dependencies = [ - "base64", - "bytes", + "base64 0.13.0", + "bytes 1.0.1", "encoding_rs", "futures-core", "futures-util", @@ -1556,6 +1623,51 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +[[package]] +name = "shard" +version = "0.1.0" +dependencies = [ + "actix", + "actix-http", + "actix-web", + "actix-web-actors", + "anyhow", + "bincode", + "bytes 1.0.1", + "clap", + "log", + "rustc-hash", + "serde", + "serde_json", + "shared", + "simple_logger", + "soketto", + "tokio", + "tokio-stream", + "tokio-util", +] + +[[package]] +name = "shared" +version = "0.1.0" +dependencies = [ + "actix", + "actix-http", + "actix-web", + "actix-web-actors", + "bincode", + "bytes 1.0.1", + "fnv", + "hex", + "log", + "num-traits", + "primitive-types", + "rustc-hash", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "signal-hook-registry" version = "1.2.1" @@ -1602,6 +1714,21 @@ dependencies = [ "winapi", ] +[[package]] +name = "soketto" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" +dependencies = [ + "base64 0.12.3", + "bytes 0.5.6", + "futures", + "httparse", + "log", + "rand 0.7.3", + "sha-1", +] + [[package]] name = "standback" version = "0.2.17" @@ -1698,22 +1825,18 @@ dependencies = [ "actix-web", "actix-web-actors", "bincode", - "bytes", + "bytes 1.0.1", "chrono", "clap", "ctor", - "fnv", - "hex", "log", - "num-traits", "parking_lot", - "primitive-types", "reqwest", "rustc-hash", "serde", "serde_json", + "shared", "simple_logger", - "thiserror", ] [[package]] @@ -1839,7 +1962,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" dependencies = [ "autocfg", - "bytes", + "bytes 1.0.1", "libc", "memchr", "mio", @@ -1848,9 +1971,21 @@ dependencies = [ "parking_lot", "pin-project-lite 0.2.6", "signal-hook-registry", + "tokio-macros", "winapi", ] +[[package]] +name = "tokio-macros" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c49e3df43841dafb86046472506755d8501c5615673955f6aa17181125d13c37" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "tokio-native-tls" version = "0.3.0" @@ -1861,14 +1996,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8864d706fdb3cc0843a49647ac892720dac98a6eeb818b77190592cf4994066" +dependencies = [ + "futures-core", + "pin-project-lite 0.2.6", + "tokio", +] + [[package]] name = "tokio-util" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" dependencies = [ - "bytes", + "bytes 1.0.1", "futures-core", + "futures-io", "futures-sink", "log", "pin-project-lite 0.2.6", diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 2a6589f..e6a403c 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -1,6 +1,8 @@ [workspace] members = [ "core", + "shared", + "shard", ] [profile.dev] diff --git a/backend/core/Cargo.toml b/backend/core/Cargo.toml index 8ea9c50..bac723c 100644 --- a/backend/core/Cargo.toml +++ b/backend/core/Cargo.toml @@ -13,15 +13,11 @@ actix-http = "3.0.0-beta.4" bincode = "1.3.3" bytes = "1.0.1" chrono = { version = "0.4", features = ["serde"] } -fnv = "1.0.7" -hex = "0.4.3" serde = { version = "1.0", features = ["derive"] } serde_json = { version = "1.0", features = ["raw_value"] } -thiserror = "1.0.24" -primitive-types = { version = "0.9.0", features = ["serde"] } +shared = { path = "../shared" } log = "0.4" simple_logger = "1.11.0" -num-traits = "0.2" parking_lot = "0.11" reqwest = { version = "0.11.1", features = ["blocking", "json"] } rustc-hash = "1.1.0" diff --git a/backend/core/src/aggregator.rs b/backend/core/src/aggregator.rs index 0694c8a..dd38321 100644 --- a/backend/core/src/aggregator.rs +++ b/backend/core/src/aggregator.rs @@ -1,14 +1,16 @@ use actix::prelude::*; -use actix_web_actors::ws::{CloseCode, CloseReason}; use ctor::ctor; use std::collections::{HashMap, HashSet}; +use crate::shard::connector::ShardConnector; use crate::chain::{self, Chain, ChainId, Label}; use crate::feed::connector::{Connected, FeedConnector, FeedId}; use crate::feed::{self, FeedMessageSerializer}; -use crate::node::connector::{Mute, NodeConnector}; -use crate::types::{ConnId, NodeDetails}; -use crate::util::{DenseMap, Hash}; +use crate::node::connector::NodeConnector; +use shared::ws::MuteReason; +use shared::shard::ShardConnId; +use shared::types::{ConnId, NodeDetails}; +use shared::util::{DenseMap, Hash}; pub struct Aggregator { genesis_hashes: HashMap, @@ -124,10 +126,24 @@ pub struct AddNode { pub node: NodeDetails, /// Genesis [`Hash`] of the chain the node is being added to. pub genesis_hash: Hash, - /// Connection id used by the node connector for multiplexing parachains - pub conn_id: ConnId, - /// Address of the NodeConnector actor - pub node_connector: Addr, + /// Source from which this node is being added (Direct | Shard) + pub source: NodeSource, +} + +pub enum NodeSource { + Direct { + /// Connection id used by the node connector for multiplexing parachains + conn_id: ConnId, + /// Address of the NodeConnector actor + node_connector: Addr, + }, + // TODO + Shard { + /// `ShardConnId` that identifies the node connection within a shard. + sid: ShardConnId, + /// Address to the ShardConnector actor + shard_connector: Addr, + } } /// Message sent from the Chain to the Aggregator when the Chain loses all nodes @@ -183,25 +199,36 @@ pub struct NodeCount(pub ChainId, pub usize); #[rtype(result = "usize")] pub struct GetHealth; +impl NodeSource { + pub fn mute(&self, reason: MuteReason) { + match self { + NodeSource::Direct { node_connector, .. } => { + node_connector.do_send(reason); + }, + // TODO + NodeSource::Shard { shard_connector, .. } => { + // shard_connector.do_send(Mute { reason }); + }, + } + } +} + impl Handler for Aggregator { type Result = (); fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) { if self.denylist.contains(&*msg.node.chain) { log::warn!(target: "Aggregator::AddNode", "'{}' is on the denylist.", msg.node.chain); - let AddNode { node_connector, .. } = msg; - let reason = CloseReason { - code: CloseCode::Abnormal, - description: Some("Denied".into()), - }; - node_connector.do_send(Mute { reason }); + + msg.source.mute(MuteReason::Denied); return; } let AddNode { node, genesis_hash, - conn_id, - node_connector, + source, + // conn_id, + // node_connector, } = msg; log::trace!(target: "Aggregator::AddNode", "New node connected. Chain '{}'", node.chain); @@ -213,16 +240,12 @@ impl Handler for Aggregator { if chain.nodes < chain.max_nodes { chain.addr.do_send(chain::AddNode { node, - conn_id, - node_connector, + source, }); } else { log::warn!(target: "Aggregator::AddNode", "Chain {} is over quota ({})", chain.label, chain.max_nodes); - let reason = CloseReason { - code: CloseCode::Again, - description: Some("Overquota".into()), - }; - node_connector.do_send(Mute { reason }); + + source.mute(MuteReason::Overquota); } } } diff --git a/backend/core/src/chain.rs b/backend/core/src/chain.rs index aa5488f..f975073 100644 --- a/backend/core/src/chain.rs +++ b/backend/core/src/chain.rs @@ -3,16 +3,13 @@ use rustc_hash::FxHashMap; use std::collections::HashMap; use std::sync::Arc; -use crate::aggregator::{Aggregator, DropChain, NodeCount, RenameChain}; +use crate::aggregator::{Aggregator, DropChain, NodeCount, NodeSource, RenameChain}; use crate::feed::connector::{FeedConnector, FeedId, Subscribed, Unsubscribed}; use crate::feed::{self, FeedMessageSerializer}; -use crate::node::{ - connector::{Initialize, NodeConnector}, - message::Payload, - Node, -}; -use crate::types::{Block, BlockNumber, ConnId, NodeDetails, NodeId, NodeLocation, Timestamp}; -use crate::util::{now, DenseMap, NumStats}; +use crate::node::Node; +use shared::types::{Block, NodeDetails, NodeId, NodeLocation, Timestamp}; +use shared::util::{now, DenseMap, NumStats}; +use shared::node::Payload; const STALE_TIMEOUT: u64 = 2 * 60 * 1000; // 2 minutes @@ -204,10 +201,8 @@ impl Actor for Chain { pub struct AddNode { /// Details of the node being added to the aggregator pub node: NodeDetails, - /// Connection id used by the node connector for multiplexing parachains - pub conn_id: ConnId, - /// Address of the NodeConnector actor to which we send [`Initialize`] or [`Mute`] messages. - pub node_connector: Addr, + /// Source from which this node is being added (Direct | Shard) + pub source: NodeSource, } /// Message sent from the NodeConnector to the Chain when it receives new telemetry data @@ -249,14 +244,38 @@ pub struct LocateNode { pub location: Arc, } +impl NodeSource { + pub fn init(self, nid: NodeId, chain: Addr) -> bool { + match self { + NodeSource::Direct { conn_id, node_connector } => { + node_connector + .try_send(crate::node::connector::Initialize { + nid, + conn_id, + chain, + }) + .is_ok() + }, + NodeSource::Shard { sid, shard_connector } => { + shard_connector + .try_send(crate::shard::connector::Initialize { + nid, + sid, + chain, + }) + .is_ok() + } + } + } +} + impl Handler for Chain { type Result = (); fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) { let AddNode { node, - conn_id, - node_connector, + source, } = msg; log::trace!(target: "Chain::AddNode", "New node connected. Chain '{}', node count goes from {} to {}", node.chain, self.nodes.len(), self.nodes.len() + 1); self.increment_label_count(&node.chain); @@ -264,14 +283,7 @@ impl Handler for Chain { let nid = self.nodes.add(Node::new(node)); let chain = ctx.address(); - if node_connector - .try_send(Initialize { - nid, - conn_id, - chain, - }) - .is_err() - { + if source.init(nid, chain) { self.nodes.remove(nid); } else if let Some(node) = self.nodes.get(nid) { self.serializer.push(feed::AddedNode(nid, node)); @@ -355,60 +367,60 @@ impl Handler for Chain { self.serializer.push(feed::NodeIOUpdate(nid, io)); } } - Payload::AfgAuthoritySet(authority) => { - node.set_validator_address(authority.authority_id.clone()); - self.broadcast(); - return; - } - Payload::AfgFinalized(finalized) => { - if let Ok(finalized_number) = finalized.finalized_number.parse::() - { - if let Some(addr) = node.details().validator.clone() { - self.serializer.push(feed::AfgFinalized( - addr, - finalized_number, - finalized.finalized_hash, - )); - self.broadcast_finality(); - } - } - return; - } - Payload::AfgReceivedPrecommit(precommit) => { - if let Ok(finalized_number) = - precommit.received.target_number.parse::() - { - if let Some(addr) = node.details().validator.clone() { - let voter = precommit.received.voter.clone(); - self.serializer.push(feed::AfgReceivedPrecommit( - addr, - finalized_number, - precommit.received.target_hash, - voter, - )); - self.broadcast_finality(); - } - } - return; - } - Payload::AfgReceivedPrevote(prevote) => { - if let Ok(finalized_number) = - prevote.received.target_number.parse::() - { - if let Some(addr) = node.details().validator.clone() { - let voter = prevote.received.voter.clone(); - self.serializer.push(feed::AfgReceivedPrevote( - addr, - finalized_number, - prevote.received.target_hash, - voter, - )); - self.broadcast_finality(); - } - } - return; - } - Payload::AfgReceivedCommit(_) => {} + // Payload::AfgAuthoritySet(authority) => { + // node.set_validator_address(authority.authority_id.clone()); + // self.broadcast(); + // return; + // } + // Payload::AfgFinalized(finalized) => { + // if let Ok(finalized_number) = finalized.finalized_number.parse::() + // { + // if let Some(addr) = node.details().validator.clone() { + // self.serializer.push(feed::AfgFinalized( + // addr, + // finalized_number, + // finalized.finalized_hash, + // )); + // self.broadcast_finality(); + // } + // } + // return; + // } + // Payload::AfgReceivedPrecommit(precommit) => { + // if let Ok(finalized_number) = + // precommit.received.target_number.parse::() + // { + // if let Some(addr) = node.details().validator.clone() { + // let voter = precommit.received.voter.clone(); + // self.serializer.push(feed::AfgReceivedPrecommit( + // addr, + // finalized_number, + // precommit.received.target_hash, + // voter, + // )); + // self.broadcast_finality(); + // } + // } + // return; + // } + // Payload::AfgReceivedPrevote(prevote) => { + // if let Ok(finalized_number) = + // prevote.received.target_number.parse::() + // { + // if let Some(addr) = node.details().validator.clone() { + // let voter = prevote.received.voter.clone(); + // self.serializer.push(feed::AfgReceivedPrevote( + // addr, + // finalized_number, + // prevote.received.target_hash, + // voter, + // )); + // self.broadcast_finality(); + // } + // } + // return; + // } + // Payload::AfgReceivedCommit(_) => {} _ => (), } diff --git a/backend/core/src/feed.rs b/backend/core/src/feed.rs index e1915f4..90603a2 100644 --- a/backend/core/src/feed.rs +++ b/backend/core/src/feed.rs @@ -3,20 +3,33 @@ use serde::Serialize; use std::mem; use crate::node::Node; -use crate::types::{ - Address, BlockDetails, BlockHash, BlockNumber, NodeHardware, NodeIO, NodeId, NodeStats, - Timestamp, -}; use serde_json::to_writer; +use shared::types::{ + Address, BlockDetails, BlockHash, BlockNumber, NodeHardware, NodeIO, NodeId, NodeStats, + Timestamp, NodeDetails, +}; pub mod connector; use connector::Serialized; -pub trait FeedMessage: Serialize { +pub trait FeedMessage { const ACTION: u8; } +pub trait FeedMessageWrite: FeedMessage { + fn write_to_feed(&self, ser: &mut FeedMessageSerializer); +} + +impl FeedMessageWrite for T +where + T: FeedMessage + Serialize, +{ + fn write_to_feed(&self, ser: &mut FeedMessageSerializer) { + ser.write(self) + } +} + pub struct FeedMessageSerializer { /// Current buffer, buffer: Vec, @@ -33,7 +46,7 @@ impl FeedMessageSerializer { pub fn push(&mut self, msg: Message) where - Message: FeedMessage, + Message: FeedMessageWrite, { let glue = match self.buffer.len() { 0 => b'[', @@ -41,9 +54,16 @@ impl FeedMessageSerializer { }; self.buffer.push(glue); - let _ = to_writer(&mut self.buffer, &Message::ACTION); + self.write(&Message::ACTION); self.buffer.push(b','); - let _ = to_writer(&mut self.buffer, &msg); + msg.write_to_feed(self); + } + + fn write(&mut self, value: &S) + where + S: Serialize, + { + let _ = to_writer(&mut self.buffer, value); } pub fn finalize(&mut self) -> Option { @@ -175,21 +195,28 @@ pub struct AfgAuthoritySet( #[derive(Serialize)] pub struct StaleNode(pub NodeId); -impl Serialize for AddedNode<'_> { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { +impl FeedMessageWrite for AddedNode<'_> { + fn write_to_feed(&self, ser: &mut FeedMessageSerializer) { let AddedNode(nid, node) = self; - let mut tup = serializer.serialize_tuple(8)?; - tup.serialize_element(nid)?; - tup.serialize_element(node.details())?; - tup.serialize_element(node.stats())?; - tup.serialize_element(node.io())?; - tup.serialize_element(node.hardware())?; - tup.serialize_element(node.block_details())?; - tup.serialize_element(&node.location())?; - tup.serialize_element(&node.startup_time())?; - tup.end() + + let details = node.details(); + let details = ( + &details.name, + &details.implementation, + &details.version, + &details.validator, + &details.network_id, + ); + + ser.write(&( + nid, + details, + node.stats(), + node.io(), + node.hardware(), + node.block_details(), + &node.location(), + &node.startup_time(), + )); } } diff --git a/backend/core/src/feed/connector.rs b/backend/core/src/feed/connector.rs index 926a7f9..a75c3c5 100644 --- a/backend/core/src/feed/connector.rs +++ b/backend/core/src/feed/connector.rs @@ -1,10 +1,10 @@ use crate::aggregator::{Aggregator, Connect, Disconnect, NoMoreFinality, SendFinality, Subscribe}; use crate::chain::Unsubscribe; use crate::feed::{FeedMessageSerializer, Pong}; -use crate::util::fnv; use actix::prelude::*; use actix_web_actors::ws; use bytes::Bytes; +use shared::util::fnv; use std::time::{Duration, Instant}; pub type FeedId = usize; diff --git a/backend/core/src/util/location.rs b/backend/core/src/location.rs similarity index 99% rename from backend/core/src/util/location.rs rename to backend/core/src/location.rs index 777e0c1..3237f65 100644 --- a/backend/core/src/util/location.rs +++ b/backend/core/src/location.rs @@ -7,7 +7,7 @@ use rustc_hash::FxHashMap; use serde::Deserialize; use crate::chain::{Chain, LocateNode}; -use crate::types::{NodeId, NodeLocation}; +use shared::types::{NodeId, NodeLocation}; #[derive(Clone)] pub struct Locator { diff --git a/backend/core/src/main.rs b/backend/core/src/main.rs index 9e373f9..241a577 100644 --- a/backend/core/src/main.rs +++ b/backend/core/src/main.rs @@ -12,21 +12,20 @@ use simple_logger::SimpleLogger; mod aggregator; mod chain; mod feed; +mod location; mod node; mod shard; -mod types; -mod util; use aggregator::{Aggregator, GetHealth}; use feed::connector::FeedConnector; +use location::{Locator, LocatorFactory}; use node::connector::NodeConnector; use shard::connector::ShardConnector; -use util::{Locator, LocatorFactory}; const VERSION: &str = env!("CARGO_PKG_VERSION"); const AUTHORS: &str = env!("CARGO_PKG_AUTHORS"); -const NAME: &str = "Substrate Telemetry Backend"; -const ABOUT: &str = "This is the Telemetry Backend that injects and provide the data sent by Substrate/Polkadot nodes"; +const NAME: &str = "Substrate Telemetry Backend Core"; +const ABOUT: &str = "This is the Telemetry Backend Core that injects and provide the data sent by Substrate/Polkadot nodes"; #[derive(Clap, Debug)] #[clap(name = NAME, version = VERSION, author = AUTHORS, about = ABOUT)] @@ -109,17 +108,21 @@ async fn shard_route( req: HttpRequest, stream: web::Payload, aggregator: web::Data>, + locator: web::Data>, path: web::Path>, ) -> Result { let hash_str = path.into_inner(); let genesis_hash = hash_str.parse()?; + println!("Genesis hash {}", genesis_hash); + let mut res = ws::handshake(&req)?; let aggregator = aggregator.get_ref().clone(); + let locator = locator.get_ref().clone().recipient(); Ok(res.streaming(ws::WebsocketContext::with_codec( - ShardConnector::new(aggregator, genesis_hash), + ShardConnector::new(aggregator, locator, genesis_hash), stream, Codec::new().max_size(10 * 1024 * 1024), // 10mb frame limit ))) @@ -171,7 +174,7 @@ async fn main() -> std::io::Result<()> { let aggregator = Aggregator::new(denylist).start(); let factory = LocatorFactory::new(); let locator = SyncArbiter::start(4, move || factory.create()); - log::info!("Starting telemetry version: {}", env!("CARGO_PKG_VERSION")); + log::info!("Starting Telemetry Core version: {}", env!("CARGO_PKG_VERSION")); HttpServer::new(move || { App::new() .wrap(middleware::NormalizePath::default()) @@ -179,6 +182,7 @@ async fn main() -> std::io::Result<()> { .data(locator.clone()) .service(node_route) .service(feed_route) + .service(shard_route) .service(health) }) .bind(opts.socket)? diff --git a/backend/core/src/node.rs b/backend/core/src/node.rs index ac2ef2f..216753f 100644 --- a/backend/core/src/node.rs +++ b/backend/core/src/node.rs @@ -1,15 +1,13 @@ use std::sync::Arc; -use crate::types::{ +use shared::types::{ Block, BlockDetails, NodeDetails, NodeHardware, NodeIO, NodeId, NodeLocation, NodeStats, Timestamp, }; -use crate::util::now; +use shared::util::now; +use shared::node::SystemInterval; pub mod connector; -pub mod message; - -use message::SystemInterval; /// Minimum time between block below broadcasting updates to the browser gets throttled, in ms. const THROTTLE_THRESHOLD: u64 = 100; diff --git a/backend/core/src/node/connector.rs b/backend/core/src/node/connector.rs index 81fddd8..4256c1c 100644 --- a/backend/core/src/node/connector.rs +++ b/backend/core/src/node/connector.rs @@ -1,25 +1,22 @@ use std::collections::BTreeMap; -use std::mem; use std::net::Ipv4Addr; use std::time::{Duration, Instant}; -use crate::aggregator::{AddNode, Aggregator}; +use crate::aggregator::{AddNode, Aggregator, NodeSource}; use crate::chain::{Chain, RemoveNode, UpdateNode}; -use crate::node::message::{NodeMessage, Payload}; +use crate::location::LocateRequest; use crate::node::NodeId; -use crate::types::ConnId; -use crate::util::LocateRequest; use actix::prelude::*; -use actix_http::ws::Item; use actix_web_actors::ws::{self, CloseReason}; -use bytes::{Bytes, BytesMut}; +use bytes::Bytes; +use shared::types::ConnId; +use shared::ws::{MultipartHandler, WsMessage, MuteReason}; +use shared::node::{NodeMessage, Payload}; /// How often heartbeat pings are sent const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20); /// How long before lack of client response causes a timeout const CLIENT_TIMEOUT: Duration = Duration::from_secs(60); -/// Continuation buffer limit, 10mb -const CONT_BUF_LIMIT: usize = 10 * 1024 * 1024; pub struct NodeConnector { /// Multiplexing connections by id @@ -32,8 +29,8 @@ pub struct NodeConnector { ip: Option, /// Actix address of location services locator: Recipient, - /// Buffer for constructing continuation messages - contbuf: BytesMut, + /// Helper for handling continuation messages + multipart: MultipartHandler, } enum ConnMultiplex { @@ -85,7 +82,7 @@ impl NodeConnector { aggregator, ip, locator, - contbuf: BytesMut::new(), + multipart: MultipartHandler::default(), } } @@ -123,8 +120,10 @@ impl NodeConnector { self.aggregator.do_send(AddNode { node: connected.node, genesis_hash: connected.genesis_hash, - conn_id, - node_connector: ctx.address(), + source: NodeSource::Direct { + conn_id, + node_connector: ctx.address(), + }, }); } else { if backlog.len() >= 10 { @@ -136,42 +135,14 @@ impl NodeConnector { } } } - - fn start_frame(&mut self, bytes: &[u8]) { - if !self.contbuf.is_empty() { - log::error!("Unused continuation buffer"); - self.contbuf.clear(); - } - self.continue_frame(bytes); - } - - fn continue_frame(&mut self, bytes: &[u8]) { - if self.contbuf.len() + bytes.len() <= CONT_BUF_LIMIT { - self.contbuf.extend_from_slice(&bytes); - } else { - log::error!("Continuation buffer overflow"); - self.contbuf = BytesMut::new(); - } - } - - fn finish_frame(&mut self) -> Bytes { - mem::replace(&mut self.contbuf, BytesMut::new()).freeze() - } } -#[derive(Message)] -#[rtype(result = "()")] -pub struct Mute { - pub reason: CloseReason, -} - -impl Handler for NodeConnector { +impl Handler for NodeConnector { type Result = (); - fn handle(&mut self, msg: Mute, ctx: &mut Self::Context) { - let Mute { reason } = msg; - log::debug!(target: "NodeConnector::Mute", "Muting a node. Reason: {:?}", reason.description); + fn handle(&mut self, msg: MuteReason, ctx: &mut Self::Context) { + log::debug!(target: "NodeConnector::Mute", "Muting a node. Reason: {:?}", msg); - ctx.close(Some(reason)); + ctx.close(Some(msg.into())); ctx.stop(); } } @@ -221,34 +192,18 @@ impl StreamHandler> for NodeConnector { fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { self.hb = Instant::now(); - let data = match msg { - Ok(ws::Message::Ping(msg)) => { + let data = match msg.map(|msg| self.multipart.handle(msg)) { + Ok(WsMessage::Nop) => return, + Ok(WsMessage::Ping(msg)) => { ctx.pong(&msg); return; } - Ok(ws::Message::Pong(_)) => return, - Ok(ws::Message::Text(text)) => text.into_bytes(), - Ok(ws::Message::Binary(data)) => data, - Ok(ws::Message::Close(reason)) => { + Ok(WsMessage::Data(data)) => data, + Ok(WsMessage::Close(reason)) => { ctx.close(reason); ctx.stop(); return; } - Ok(ws::Message::Nop) => return, - Ok(ws::Message::Continuation(cont)) => match cont { - Item::FirstText(bytes) | Item::FirstBinary(bytes) => { - self.start_frame(&bytes); - return; - } - Item::Continue(bytes) => { - self.continue_frame(&bytes); - return; - } - Item::Last(bytes) => { - self.continue_frame(&bytes); - self.finish_frame() - } - }, Err(error) => { log::error!("{:?}", error); ctx.stop(); diff --git a/backend/core/src/shard.rs b/backend/core/src/shard.rs index 30c7678..77bba25 100644 --- a/backend/core/src/shard.rs +++ b/backend/core/src/shard.rs @@ -1,13 +1 @@ -use crate::node::message::Payload; -use serde::Deserialize; - pub mod connector; - -/// Alias for the ID of the node connection -type ShardConnId = usize; - -#[derive(Deserialize)] -pub struct ShardMessage { - pub conn_id: ShardConnId, - pub payload: Payload, -} diff --git a/backend/core/src/shard/connector.rs b/backend/core/src/shard/connector.rs index 111ad1f..f3e9a0c 100644 --- a/backend/core/src/shard/connector.rs +++ b/backend/core/src/shard/connector.rs @@ -1,23 +1,22 @@ -use std::mem; use std::time::{Duration, Instant}; +use std::collections::BTreeMap; +use std::net::Ipv4Addr; -use crate::aggregator::{AddNode, Aggregator}; +use crate::aggregator::{AddNode, Aggregator, NodeSource}; use crate::chain::{Chain, RemoveNode, UpdateNode}; -use crate::shard::ShardMessage; -use crate::types::NodeId; -use crate::util::{DenseMap, Hash}; +use crate::location::LocateRequest; use actix::prelude::*; -use actix_http::ws::Item; use actix_web_actors::ws::{self, CloseReason}; use bincode::Options; -use bytes::{Bytes, BytesMut}; +use shared::types::NodeId; +use shared::util::Hash; +use shared::ws::{MultipartHandler, WsMessage}; +use shared::shard::{ShardMessage, ShardConnId, BackendMessage}; /// How often heartbeat pings are sent const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20); /// How long before lack of client response causes a timeout const CLIENT_TIMEOUT: Duration = Duration::from_secs(60); -/// Continuation buffer limit, 10mb -const CONT_BUF_LIMIT: usize = 10 * 1024 * 1024; pub struct ShardConnector { /// Client must send ping at least once every 60 seconds (CLIENT_TIMEOUT), @@ -26,12 +25,16 @@ pub struct ShardConnector { aggregator: Addr, /// Genesis hash of the chain this connection will be submitting data for genesis_hash: Hash, - /// Chain address to which this multiplex connector is delegating messages + /// Chain address to which this shard connector is delegating messages chain: Option>, - /// Mapping `ShardConnId` to `NodeId` - nodes: DenseMap, - /// Buffer for constructing continuation messages - contbuf: BytesMut, + /// Transient mapping of `ShardConnId` to external IP address. + ips: BTreeMap, + /// Mapping of `ShardConnId` to initialized `NodeId`s. + nodes: BTreeMap, + /// Actix address of location services + locator: Recipient, + /// Container for handling continuation messages + multipart: MultipartHandler, } impl Actor for ShardConnector { @@ -43,7 +46,7 @@ impl Actor for ShardConnector { fn stopped(&mut self, _: &mut Self::Context) { if let Some(ref chain) = self.chain { - for (_, nid) in self.nodes.iter() { + for nid in self.nodes.values() { chain.do_send(RemoveNode(*nid)) } } @@ -51,17 +54,31 @@ impl Actor for ShardConnector { } impl ShardConnector { - pub fn new(aggregator: Addr, genesis_hash: Hash) -> Self { + pub fn new( + aggregator: Addr, + locator: Recipient, + genesis_hash: Hash, + ) -> Self { Self { hb: Instant::now(), aggregator, genesis_hash, chain: None, - nodes: DenseMap::new(), - contbuf: BytesMut::new(), + ips: BTreeMap::new(), + nodes: BTreeMap::new(), + locator, + multipart: MultipartHandler::default(), } } + fn shard_send(msg: BackendMessage, ctx: &mut ::Context) { + let bytes = bincode::options().serialize(&msg).expect("Must be able to serialize to vec; qed"); + + println!("Sending back {} bytes", bytes.len()); + + ctx.binary(bytes); + } + fn heartbeat(&self, ctx: &mut ::Context) { ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| { // check client heartbeats @@ -77,30 +94,66 @@ impl ShardConnector { } fn handle_message(&mut self, msg: ShardMessage, ctx: &mut ::Context) { - let ShardMessage { conn_id, payload } = msg; + println!("{:?}", msg); - // TODO: get `NodeId` for `ShardConnId` and proxy payload to `self.chain`. - } + match msg { + ShardMessage::AddNode { ip, node, sid } => { + if let Some(ip) = ip { + self.ips.insert(sid, ip); + } - fn start_frame(&mut self, bytes: &[u8]) { - if !self.contbuf.is_empty() { - log::error!("Unused continuation buffer"); - self.contbuf.clear(); - } - self.continue_frame(bytes); - } - - fn continue_frame(&mut self, bytes: &[u8]) { - if self.contbuf.len() + bytes.len() <= CONT_BUF_LIMIT { - self.contbuf.extend_from_slice(&bytes); - } else { - log::error!("Continuation buffer overflow"); - self.contbuf = BytesMut::new(); + self.aggregator.do_send(AddNode { + node, + genesis_hash: self.genesis_hash, + source: NodeSource::Shard { + sid, + shard_connector: ctx.address(), + } + }); + }, + ShardMessage::UpdateNode { nid, payload } => { + if let Some(chain) = self.chain.as_ref() { + chain.do_send(UpdateNode { + nid, + payload, + }); + } + }, } } +} - fn finish_frame(&mut self) -> Bytes { - mem::replace(&mut self.contbuf, BytesMut::new()).freeze() +#[derive(Message)] +#[rtype(result = "()")] +pub struct Initialize { + pub nid: NodeId, + pub sid: ShardConnId, + pub chain: Addr, +} + +impl Handler for ShardConnector { + type Result = (); + + fn handle(&mut self, msg: Initialize, ctx: &mut Self::Context) { + let Initialize { + nid, + sid, + chain, + } = msg; + log::trace!(target: "ShardConnector::Initialize", "Initializing a node, nid={}, on conn_id={}", nid, 0); + + if self.chain.is_none() { + self.chain = Some(chain.clone()); + } + + let be_msg = BackendMessage::Initialize { sid, nid }; + + Self::shard_send(be_msg, ctx); + + // Acquire the node's physical location + if let Some(ip) = self.ips.remove(&sid) { + let _ = self.locator.do_send(LocateRequest { ip, nid, chain }); + } } } @@ -108,34 +161,18 @@ impl StreamHandler> for ShardConnector { fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { self.hb = Instant::now(); - let data = match msg { - Ok(ws::Message::Ping(msg)) => { + let data = match msg.map(|msg| self.multipart.handle(msg)) { + Ok(WsMessage::Nop) => return, + Ok(WsMessage::Ping(msg)) => { ctx.pong(&msg); return; } - Ok(ws::Message::Pong(_)) => return, - Ok(ws::Message::Text(text)) => text.into_bytes(), - Ok(ws::Message::Binary(data)) => data, - Ok(ws::Message::Close(reason)) => { + Ok(WsMessage::Data(data)) => data, + Ok(WsMessage::Close(reason)) => { ctx.close(reason); ctx.stop(); return; } - Ok(ws::Message::Nop) => return, - Ok(ws::Message::Continuation(cont)) => match cont { - Item::FirstText(bytes) | Item::FirstBinary(bytes) => { - self.start_frame(&bytes); - return; - } - Item::Continue(bytes) => { - self.continue_frame(&bytes); - return; - } - Item::Last(bytes) => { - self.continue_frame(&bytes); - self.finish_frame() - } - }, Err(error) => { log::error!("{:?}", error); ctx.stop(); @@ -145,12 +182,12 @@ impl StreamHandler> for ShardConnector { match bincode::options().deserialize(&data) { Ok(msg) => self.handle_message(msg, ctx), - #[cfg(debug)] + // #[cfg(debug)] Err(err) => { log::warn!("Failed to parse shard message: {}", err,) } - #[cfg(not(debug))] - Err(_) => (), + // #[cfg(not(debug))] + // Err(_) => (), } } } diff --git a/backend/core/src/util/hash.rs b/backend/core/src/util/hash.rs deleted file mode 100644 index 727602d..0000000 --- a/backend/core/src/util/hash.rs +++ /dev/null @@ -1,89 +0,0 @@ -use std::fmt::{self, Debug, Display}; -use std::str::FromStr; - -use actix_web::error::ResponseError; -use serde::de::{self, Deserialize, Deserializer, Unexpected, Visitor}; - -const HASH_BYTES: usize = 32; - -/// Newtype wrapper for 32-byte hash values, implementing readable `Debug` and `serde::Deserialize`. -// We could use primitive_types::H256 here, but opted for a custom type to avoid more dependencies. -#[derive(Hash, PartialEq, Eq, Clone, Copy)] -pub struct Hash([u8; HASH_BYTES]); - -struct HashVisitor; - -impl<'de> Visitor<'de> for HashVisitor { - type Value = Hash; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("hexidecimal string of 32 bytes beginning with 0x") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - value - .parse() - .map_err(|_| de::Error::invalid_value(Unexpected::Str(value), &self)) - } -} - -impl FromStr for Hash { - type Err = HashParseError; - - fn from_str(value: &str) -> Result { - if !value.starts_with("0x") { - return Err(HashParseError::InvalidPrefix); - } - - let mut hash = [0; HASH_BYTES]; - - hex::decode_to_slice(&value[2..], &mut hash).map_err(HashParseError::HexError)?; - - Ok(Hash(hash)) - } -} - -impl<'de> Deserialize<'de> for Hash { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_str(HashVisitor) - } -} - -impl Display for Hash { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("0x")?; - - let mut ascii = [0; HASH_BYTES * 2]; - - hex::encode_to_slice(self.0, &mut ascii) - .expect("Encoding 32 bytes into 64 bytes of ascii; qed"); - - f.write_str(std::str::from_utf8(&ascii).expect("ASCII hex encoded bytes canot fail; qed")) - } -} - -impl Debug for Hash { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(self, f) - } -} - -#[derive(thiserror::Error, Debug)] -pub enum HashParseError { - HexError(hex::FromHexError), - InvalidPrefix, -} - -impl Display for HashParseError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(self, f) - } -} - -impl ResponseError for HashParseError {} diff --git a/backend/shard/Cargo.toml b/backend/shard/Cargo.toml new file mode 100644 index 0000000..81162c1 --- /dev/null +++ b/backend/shard/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "shard" +version = "0.1.0" +authors = ["Parity Technologies Ltd. "] +edition = "2018" + +[dependencies] +actix = "0.11.1" +actix-web = { version = "4.0.0-beta.4", default-features = false } +actix-web-actors = "4.0.0-beta.3" +actix-http = "3.0.0-beta.4" +anyhow = "1.0.40" +bincode = "1.3.3" +bytes = "1.0.1" +clap = "3.0.0-beta.2" +log = "0.4" +rustc-hash = "1.1.0" +serde = { version = "1.0", features = ["derive"] } +serde_json = { version = "1.0", features = ["raw_value"] } +shared = { path = "../shared" } +simple_logger = "1.11.0" +soketto = "0.4.2" +tokio = { version = "1", features = ["full"] } +tokio-util = { version = "0.6", features = ["compat"] } +tokio-stream = { version = "0.1", features = ["net"] } diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs new file mode 100644 index 0000000..97c5ff9 --- /dev/null +++ b/backend/shard/src/aggregator.rs @@ -0,0 +1,239 @@ +use std::net::Ipv4Addr; +use std::fmt; +// use std::sync::mpsc::{self, Sender}; + +use actix::prelude::*; +use actix_http::http::Uri; +use bincode::Options; +use rustc_hash::FxHashMap; +use shared::util::{Hash, DenseMap}; +use shared::types::{ConnId, NodeDetails, NodeId}; +use shared::node::Payload; +use shared::shard::{ShardConnId, ShardMessage, BackendMessage}; +use soketto::handshake::{Client, ServerResponse}; +use crate::node::{NodeConnector, Initialize}; +use tokio::net::TcpStream; +use tokio::sync::mpsc::{self, UnboundedSender}; +use tokio_util::compat::{Compat, TokioAsyncReadCompatExt}; + +type WsSender = soketto::Sender>; +type WsReceiver = soketto::Receiver>; + +#[derive(Default)] +pub struct Aggregator { + url: Uri, + chains: FxHashMap>, +} + +impl Actor for Aggregator { + type Context = Context; +} + +impl Aggregator { + pub fn new(url: Uri) -> Self { + Aggregator { + url, + chains: Default::default(), + } + } +} + +pub struct Chain { + /// Base URL of Backend Core + url: Uri, + /// Genesis hash of the chain, required to construct the URL to connect to the Backend Core + genesis_hash: Hash, + /// Dense mapping of SharedConnId -> Addr + multiplexing ConnId sent from the node. + nodes: DenseMap<(Addr, ConnId)>, +} + +impl Chain { + pub fn new(url: Uri, genesis_hash: Hash) -> Self { + Chain { + url, + genesis_hash, + nodes: DenseMap::new(), + } + } + + pub fn spawn(mut self) -> UnboundedSender { + let (tx_ret, mut rx) = mpsc::unbounded_channel(); + + let tx = tx_ret.clone(); + + tokio::task::spawn(async move { + let mut sender = match self.connect(tx.clone()).await { + Ok(pair) => pair, + Err(err) => { + log::error!("Failed to connect to Backend Core: {:?}", err); + return; + } + }; + + // tokio::task::spawn(async move { + + // }); + + loop { + match rx.recv().await { + Some(ChainMessage::AddNode(msg)) => { + println!("Add node {:?}", msg); + + let AddNode { node, ip, conn_id, node_connector, .. } = msg; + let sid = self.nodes.add((node_connector, conn_id)) as ShardConnId; + + let bytes = bincode::options().serialize(&ShardMessage::AddNode { + ip, + node, + sid, + }).unwrap(); + + println!("Sending {} bytes", bytes.len()); + + let _ = sender.send_binary_mut(bytes).await; + let _ = sender.flush().await; + }, + Some(ChainMessage::UpdateNode(nid, payload)) => { + let msg = ShardMessage::UpdateNode { + nid, + payload, + }; + + println!("Serialize {:?}", msg); + + let bytes = bincode::options().serialize(&msg).unwrap(); + + println!("Sending update: {} bytes", bytes.len()); + + let _ = sender.send_binary_mut(bytes).await; + let _ = sender.flush().await; + }, + Some(ChainMessage::Backend(BackendMessage::Initialize { sid, nid })) => { + if let Some((addr, conn_id)) = self.nodes.get(sid as usize) { + addr.do_send(Initialize { + nid, + conn_id: *conn_id, + chain: tx.clone(), + }) + } + }, + Some(ChainMessage::Backend(BackendMessage::Mute { sid, reason })) => { + // TODO + }, + None => (), + } + } + // let mut client = Client::new(socket.compat(), host, &path); + + // let (mut sender, mut receiver) = match client.handshake().await? { + // ServerResponse::Accepted { .. } => client.into_builder().finish(), + // ServerResponse::Redirect { status_code, location } => unimplemented!("follow location URL"), + // ServerResponse::Rejected { status_code } => unimplemented!("handle failure") + // }; + }); + + tx_ret + } + + pub async fn connect(&self, tx: UnboundedSender) -> anyhow::Result { + let host = self.url.host().unwrap_or("127.0.0.1"); + let port = self.url.port_u16().unwrap_or(8000); + let path = format!("{}{}", self.url.path(), self.genesis_hash); + + let socket = TcpStream::connect((host, port)).await?; + + socket.set_nodelay(true).unwrap(); + + let mut client = Client::new(socket.compat(), host, &path); + + let (sender, receiver) = match client.handshake().await? { + ServerResponse::Accepted { .. } => client.into_builder().finish(), + ServerResponse::Redirect { status_code, .. } | + ServerResponse::Rejected { status_code } => { + return Err(anyhow::anyhow!("Failed to connect, status code: {}", status_code)); + } + }; + + async fn read(tx: UnboundedSender, mut receiver: WsReceiver) -> anyhow::Result<()> { + let mut data = Vec::with_capacity(128); + + loop { + data.clear(); + + receiver.receive_data(&mut data).await?; + + println!("Received {} bytes from Backend Core", data.len()); + + match bincode::options().deserialize(&data) { + Ok(msg) => tx.send(ChainMessage::Backend(msg))?, + Err(err) => { + log::error!("Failed to read message from Backend Core: {:?}", err); + } + } + + } + } + + tokio::task::spawn(read(tx, receiver)); + + Ok(sender) + } +} + +impl Actor for Chain { + type Context = Context; +} + +#[derive(Message)] +#[rtype(result = "()")] +pub struct AddNode { + pub ip: Option, + pub genesis_hash: Hash, + pub node: NodeDetails, + pub conn_id: ConnId, + pub node_connector: Addr, +} + +#[derive(Debug)] +pub enum ChainMessage { + AddNode(AddNode), + UpdateNode(NodeId, Payload), + Backend(BackendMessage), +} + +impl fmt::Debug for AddNode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("AddNode") + } +} + +impl Handler for Aggregator { + type Result = (); + + fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) { + let AddNode { genesis_hash, .. } = msg; + + let url = &self.url; + let chain = self + .chains + .entry(genesis_hash) + .or_insert_with(move || Chain::new(url.clone(), genesis_hash).spawn()); + + if let Err(err) = chain.send(ChainMessage::AddNode(msg)) { + let msg = err.0; + log::error!("Failed to add node to chain, shutting down chain"); + self.chains.remove(&genesis_hash); + // TODO: Send a message back to clean up node connections + } + } +} + +impl Handler for Chain { + type Result = (); + + fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) { + let AddNode { ip, node_connector, .. } = msg; + + println!("Node connected to {}: {:?}", self.genesis_hash, ip); + } +} diff --git a/backend/shard/src/main.rs b/backend/shard/src/main.rs new file mode 100644 index 0000000..421e024 --- /dev/null +++ b/backend/shard/src/main.rs @@ -0,0 +1,125 @@ +use std::net::Ipv4Addr; + +use actix::prelude::*; +use actix_http::ws::Codec; +use actix_http::http::Uri; +use actix_web::{get, middleware, web, App, Error, HttpRequest, HttpResponse, HttpServer}; +use actix_web_actors::ws; +use clap::Clap; +use simple_logger::SimpleLogger; + +mod aggregator; +mod node; + +use aggregator::Aggregator; +use node::NodeConnector; + +const VERSION: &str = env!("CARGO_PKG_VERSION"); +const AUTHORS: &str = env!("CARGO_PKG_AUTHORS"); +const NAME: &str = "Substrate Telemetry Backend Shard"; +const ABOUT: &str = "This is the Telemetry Backend Shard that forwards the data sent by Substrate/Polkadot nodes to the Backend Core"; + +#[derive(Clap, Debug)] +#[clap(name = NAME, version = VERSION, author = AUTHORS, about = ABOUT)] +struct Opts { + #[clap( + short = 'l', + long = "listen", + default_value = "127.0.0.1:8001", + about = "This is the socket address Telemetry is listening to. This is restricted to localhost (127.0.0.1) by default and should be fine for most use cases. If you are using Telemetry in a container, you likely want to set this to '0.0.0.0:8000'" + )] + socket: std::net::SocketAddr, + #[clap( + arg_enum, + required = false, + long = "log", + default_value = "info", + about = "Log level." + )] + log_level: LogLevel, + #[clap( + short = 'c', + long = "core", + default_value = "ws://127.0.0.1:8000/shard_submit/", + about = "Url to the Backend Core endpoint accepting shard connections" + )] + core_url: Uri, +} + +#[derive(Clap, Debug, PartialEq)] +enum LogLevel { + Error, + Warn, + Info, + Debug, + Trace, +} + +impl From<&LogLevel> for log::LevelFilter { + fn from(log_level: &LogLevel) -> Self { + match log_level { + LogLevel::Error => log::LevelFilter::Error, + LogLevel::Warn => log::LevelFilter::Warn, + LogLevel::Info => log::LevelFilter::Info, + LogLevel::Debug => log::LevelFilter::Debug, + LogLevel::Trace => log::LevelFilter::Trace, + } + } +} + +/// Entry point for connecting nodes +#[get("/submit")] +async fn node_route( + req: HttpRequest, + stream: web::Payload, + aggregator: web::Data>, +) -> Result { + let ip = req + .connection_info() + .realip_remote_addr() + .and_then(|mut addr| { + if let Some(port_idx) = addr.find(':') { + addr = &addr[..port_idx]; + } + addr.parse::().ok() + }); + + let mut res = ws::handshake(&req)?; + let aggregator = aggregator.get_ref().clone(); + + Ok(res.streaming(ws::WebsocketContext::with_codec( + NodeConnector::new(aggregator, ip), + stream, + Codec::new().max_size(10 * 1024 * 1024), // 10mb frame limit + ))) +} + +/// Telemetry entry point. Listening by default on 127.0.0.1:8000. +/// This can be changed using the `PORT` and `BIND` ENV variables. +#[actix_web::main] +async fn main() -> std::io::Result<()> { + let opts = Opts::parse(); + let log_level = &opts.log_level; + SimpleLogger::new() + .with_level(log_level.into()) + .init() + .expect("Must be able to start a logger"); + + println!("URL? {:?} {:?}", opts.core_url.host(), opts.core_url.port_u16()); + + let aggregator = Aggregator::new(opts.core_url).start(); + + log::info!( + "Starting Telemetry Shard version: {}", + env!("CARGO_PKG_VERSION") + ); + HttpServer::new(move || { + App::new() + .wrap(middleware::NormalizePath::default()) + .data(aggregator.clone()) + .service(node_route) + }) + .bind(opts.socket)? + .run() + .await +} diff --git a/backend/shard/src/node.rs b/backend/shard/src/node.rs new file mode 100644 index 0000000..1e598b3 --- /dev/null +++ b/backend/shard/src/node.rs @@ -0,0 +1,201 @@ +use std::collections::BTreeMap; +use std::net::Ipv4Addr; +use std::time::{Duration, Instant}; + +use crate::aggregator::{AddNode, Aggregator, ChainMessage}; +// use crate::chain::{Chain, RemoveNode, UpdateNode}; +use actix::prelude::*; +use actix_web_actors::ws::{self, CloseReason}; +use shared::node::{NodeMessage, Payload}; +use shared::types::{ConnId, NodeId}; +use shared::ws::{MultipartHandler, WsMessage}; +use tokio::sync::mpsc::UnboundedSender; + +/// How often heartbeat pings are sent +const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20); +/// How long before lack of client response causes a timeout +const CLIENT_TIMEOUT: Duration = Duration::from_secs(60); + +pub struct NodeConnector { + /// Multiplexing connections by id + multiplex: BTreeMap, + /// Client must send ping at least once every 60 seconds (CLIENT_TIMEOUT), + hb: Instant, + /// Aggregator actor address + aggregator: Addr, + /// IP address of the node this connector is responsible for + ip: Option, + /// Helper for handling continuation messages + multipart: MultipartHandler, +} + +enum ConnMultiplex { + Connected { + /// Id of the node this multiplex connector is responsible for handling + nid: NodeId, + /// Chain address to which this multiplex connector is delegating messages + chain: UnboundedSender, + }, + Waiting { + /// Backlog of messages to be sent once we get a recipient handle to the chain + backlog: Vec, + }, +} + +impl Default for ConnMultiplex { + fn default() -> Self { + ConnMultiplex::Waiting { + backlog: Vec::new(), + } + } +} + +impl Actor for NodeConnector { + type Context = ws::WebsocketContext; + + fn started(&mut self, ctx: &mut Self::Context) { + self.heartbeat(ctx); + } + + fn stopped(&mut self, _: &mut Self::Context) { + // for mx in self.multiplex.values() { + // if let ConnMultiplex::Connected { chain, nid } = mx { + // chain.do_send(RemoveNode(*nid)); + // } + // } + } +} + +impl NodeConnector { + pub fn new(aggregator: Addr, ip: Option) -> Self { + Self { + multiplex: BTreeMap::new(), + hb: Instant::now(), + aggregator, + ip, + multipart: MultipartHandler::default(), + } + } + + fn heartbeat(&self, ctx: &mut ::Context) { + ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| { + // check client heartbeats + if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT { + // stop actor + ctx.close(Some(CloseReason { + code: ws::CloseCode::Abnormal, + description: Some("Missed heartbeat".into()), + })); + ctx.stop(); + } + }); + } + + fn handle_message( + &mut self, + msg: NodeMessage, + ctx: &mut ::Context, + ) { + let conn_id = msg.id(); + let payload = msg.into(); + + match self.multiplex.entry(conn_id).or_default() { + ConnMultiplex::Connected { nid, chain } => { + // TODO: error handle + let _ = chain.send(ChainMessage::UpdateNode(*nid, payload)); + } + ConnMultiplex::Waiting { backlog } => { + if let Payload::SystemConnected(connected) = payload { + println!("Node connected {:?}", connected.node); + self.aggregator.do_send(AddNode { + genesis_hash: connected.genesis_hash, + ip: self.ip, + node: connected.node, + conn_id, + node_connector: ctx.address(), + }); + } else { + if backlog.len() >= 10 { + backlog.remove(0); + } + + backlog.push(payload); + } + } + } + } +} + +#[derive(Message)] +#[rtype(result = "()")] +pub struct Initialize { + pub nid: NodeId, + pub conn_id: ConnId, + pub chain: UnboundedSender, +} + +impl Handler for NodeConnector { + type Result = (); + + fn handle(&mut self, msg: Initialize, _: &mut Self::Context) { + let Initialize { + nid, + conn_id, + chain, + } = msg; + log::trace!(target: "NodeConnector::Initialize", "Initializing a node, nid={}, on conn_id={}", nid, conn_id); + let mx = self.multiplex.entry(conn_id).or_default(); + + if let ConnMultiplex::Waiting { backlog } = mx { + for payload in backlog.drain(..) { + // TODO: error handle. + let _ = chain.send(ChainMessage::UpdateNode(nid, payload)); + } + + *mx = ConnMultiplex::Connected { + nid, + chain, + }; + }; + } +} + +impl StreamHandler> for NodeConnector { + fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { + self.hb = Instant::now(); + + let data = match msg.map(|msg| self.multipart.handle(msg)) { + Ok(WsMessage::Nop) => return, + Ok(WsMessage::Ping(msg)) => { + ctx.pong(&msg); + return; + } + Ok(WsMessage::Data(data)) => data, + Ok(WsMessage::Close(reason)) => { + ctx.close(reason); + ctx.stop(); + return; + } + Err(error) => { + log::error!("{:?}", error); + ctx.stop(); + return; + } + }; + + match serde_json::from_slice(&data) { + Ok(msg) => self.handle_message(msg, ctx), + #[cfg(debug)] + Err(err) => { + let data: &[u8] = data.get(..512).unwrap_or_else(|| &data); + log::warn!( + "Failed to parse node message: {} {}", + err, + std::str::from_utf8(data).unwrap_or_else(|_| "INVALID UTF8") + ) + } + #[cfg(not(debug))] + Err(_) => (), + } + } +} diff --git a/backend/shared/Cargo.toml b/backend/shared/Cargo.toml new file mode 100644 index 0000000..8690aae --- /dev/null +++ b/backend/shared/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "shared" +version = "0.1.0" +authors = ["Parity Technologies Ltd. "] +edition = "2018" + +[dependencies] +actix = "0.11.1" +actix-web = { version = "4.0.0-beta.4", default-features = false } +actix-web-actors = "4.0.0-beta.3" +actix-http = "3.0.0-beta.4" +bytes = "1.0.1" +fnv = "1.0.7" +hex = "0.4.3" +log = "0.4" +num-traits = "0.2" +primitive-types = { version = "0.9.0", features = ["serde"] } +rustc-hash = "1.1.0" +serde = { version = "1.0", features = ["derive"] } +serde_json = { version = "1.0", features = ["raw_value"] } +thiserror = "1.0.24" + +[dev-dependencies] +bincode = "1.3.3" diff --git a/backend/shared/src/lib.rs b/backend/shared/src/lib.rs new file mode 100644 index 0000000..dc147b4 --- /dev/null +++ b/backend/shared/src/lib.rs @@ -0,0 +1,5 @@ +pub mod node; +pub mod shard; +pub mod types; +pub mod util; +pub mod ws; diff --git a/backend/core/src/node/message.rs b/backend/shared/src/node.rs similarity index 61% rename from backend/core/src/node/message.rs rename to backend/shared/src/node.rs index 64cd8a5..420e568 100644 --- a/backend/core/src/node/message.rs +++ b/backend/shared/src/node.rs @@ -1,9 +1,8 @@ -use crate::node::NodeDetails; -use crate::types::{Block, BlockHash, BlockNumber, ConnId}; -use crate::util::Hash; +use crate::types::{Block, BlockHash, BlockNumber, ConnId, NodeDetails}; +use crate::util::{Hash, NullAny}; use actix::prelude::*; -use serde::de::IgnoredAny; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; +use serde::ser::Serializer; #[derive(Deserialize, Debug, Message)] #[rtype(result = "()")] @@ -49,33 +48,52 @@ pub enum Payload { #[serde(rename = "notify.finalized")] NotifyFinalized(Finalized), #[serde(rename = "txpool.import")] - TxPoolImport(IgnoredAny), - #[serde(rename = "afg.finalized")] - AfgFinalized(AfgFinalized), - #[serde(rename = "afg.received_precommit")] - AfgReceivedPrecommit(AfgReceivedPrecommit), - #[serde(rename = "afg.received_prevote")] - AfgReceivedPrevote(AfgReceivedPrevote), - #[serde(rename = "afg.received_commit")] - AfgReceivedCommit(AfgReceivedCommit), - #[serde(rename = "afg.authority_set")] - AfgAuthoritySet(AfgAuthoritySet), - #[serde(rename = "afg.finalized_blocks_up_to")] - AfgFinalizedBlocksUpTo(IgnoredAny), - #[serde(rename = "aura.pre_sealed_block")] - AuraPreSealedBlock(IgnoredAny), + TxPoolImport(NullAny), + // #[serde(rename = "afg.finalized")] + // AfgFinalized(AfgFinalized), + // #[serde(rename = "afg.received_precommit")] + // AfgReceivedPrecommit(AfgReceivedPrecommit), + // #[serde(rename = "afg.received_prevote")] + // AfgReceivedPrevote(AfgReceivedPrevote), + // #[serde(rename = "afg.received_commit")] + // AfgReceivedCommit(AfgReceivedCommit), + // #[serde(rename = "afg.authority_set")] + // AfgAuthoritySet(AfgAuthoritySet), + // #[serde(rename = "afg.finalized_blocks_up_to")] + // AfgFinalizedBlocksUpTo(NullAny), + // #[serde(rename = "aura.pre_sealed_block")] + // AuraPreSealedBlock(NullAny), #[serde(rename = "prepared_block_for_proposing")] - PreparedBlockForProposing(IgnoredAny), + PreparedBlockForProposing(NullAny), } -#[derive(Deserialize, Debug)] +impl Serialize for Payload { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use Payload::*; + + match self { + SystemConnected(val) => serializer.serialize_newtype_variant("Payload", 0, "system.connected", val), + SystemInterval(val) => serializer.serialize_newtype_variant("Payload", 1, "system.interval", val), + BlockImport(val) => serializer.serialize_newtype_variant("Payload", 3, "block.import", val), + NotifyFinalized(val) => serializer.serialize_newtype_variant("Payload", 4, "notify.finalized", val), + TxPoolImport(_) => serializer.serialize_unit_variant("Payload", 3, "txpool.import"), + PreparedBlockForProposing(_) => serializer.serialize_unit_variant("Payload", 4, "prepared_block_for_proposing"), + _ => unimplemented!() + } + } +} + +#[derive(Deserialize, Serialize, Debug)] pub struct SystemConnected { pub genesis_hash: Hash, #[serde(flatten)] pub node: NodeDetails, } -#[derive(Deserialize, Debug)] +#[derive(Deserialize, Serialize, Debug)] pub struct SystemInterval { pub peers: Option, pub txcount: Option, @@ -88,60 +106,51 @@ pub struct SystemInterval { pub used_state_cache_size: Option, } -#[derive(Deserialize, Debug)] +#[derive(Deserialize, Serialize, Debug)] pub struct Finalized { #[serde(rename = "best")] pub hash: BlockHash, pub height: Box, } -#[derive(Deserialize, Debug)] +#[derive(Deserialize, Serialize, Debug)] pub struct AfgAuthoritySet { pub authority_id: Box, pub authorities: Box, pub authority_set_id: Box, } -#[derive(Deserialize, Debug, Clone)] +#[derive(Deserialize, Serialize, Debug, Clone)] pub struct AfgFinalized { pub finalized_hash: BlockHash, pub finalized_number: Box, } -#[derive(Deserialize, Debug, Clone)] +#[derive(Deserialize, Serialize, Debug, Clone)] pub struct AfgReceived { pub target_hash: BlockHash, pub target_number: Box, pub voter: Option>, } -#[derive(Deserialize, Debug, Clone)] +#[derive(Deserialize, Serialize, Debug, Clone)] pub struct AfgReceivedPrecommit { #[serde(flatten)] pub received: AfgReceived, } -#[derive(Deserialize, Debug, Clone)] +#[derive(Deserialize, Serialize, Debug, Clone)] pub struct AfgReceivedPrevote { #[serde(flatten)] pub received: AfgReceived, } -#[derive(Deserialize, Debug, Clone)] +#[derive(Deserialize, Serialize, Debug, Clone)] pub struct AfgReceivedCommit { #[serde(flatten)] pub received: AfgReceived, } -impl Block { - pub fn zero() -> Self { - Block { - hash: BlockHash::from([0; 32]), - height: 0, - } - } -} - impl Payload { pub fn best_block(&self) -> Option<&Block> { match self { @@ -169,6 +178,7 @@ impl Payload { #[cfg(test)] mod tests { use super::*; + use bincode::Options; #[test] fn message_v1() { @@ -193,4 +203,16 @@ mod tests { "message did not match variant V2", ); } + + #[test] + fn bincode_block_zero() { + let raw = Block::zero(); + + let bytes = bincode::options().serialize(&raw).unwrap(); + + let deserialized: Block = bincode::options().deserialize(&bytes).unwrap(); + + assert_eq!(raw.hash, deserialized.hash); + assert_eq!(raw.height, deserialized.height); + } } diff --git a/backend/shared/src/shard.rs b/backend/shared/src/shard.rs new file mode 100644 index 0000000..125a826 --- /dev/null +++ b/backend/shared/src/shard.rs @@ -0,0 +1,38 @@ +use std::net::Ipv4Addr; + +use crate::ws::MuteReason; +use crate::node::Payload; +use crate::types::{NodeId, NodeDetails}; +use serde::{Deserialize, Serialize}; + +/// Alias for the ID of the node connection +pub type ShardConnId = u32; + +/// Message sent from the shard to the backend core +#[derive(Deserialize, Serialize, Debug)] +pub enum ShardMessage { + /// Get a connection id for a new node, passing IPv4 + AddNode { + ip: Option, + node: NodeDetails, + sid: ShardConnId, + }, + /// Send a message payload for a given node + UpdateNode { + nid: NodeId, + payload: Payload, + }, +} + +/// Message sent form the backend core to the shard +#[derive(Deserialize, Serialize, Debug)] +pub enum BackendMessage { + Initialize { + sid: ShardConnId, + nid: NodeId, + }, + Mute { + sid: ShardConnId, + reason: MuteReason, + }, +} diff --git a/backend/core/src/types.rs b/backend/shared/src/types.rs similarity index 80% rename from backend/core/src/types.rs rename to backend/shared/src/types.rs index a93a709..a16ce5a 100644 --- a/backend/core/src/types.rs +++ b/backend/shared/src/types.rs @@ -1,5 +1,5 @@ -use serde::ser::{Serialize, SerializeTuple, Serializer}; -use serde::Deserialize; +use serde::ser::{SerializeTuple, Serializer}; +use serde::{Deserialize, Serialize}; use crate::util::{now, MeanList}; @@ -10,7 +10,7 @@ pub type Timestamp = u64; pub type Address = Box; pub use primitive_types::H256 as BlockHash; -#[derive(Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct NodeDetails { pub chain: Box, pub name: Box, @@ -32,13 +32,22 @@ pub struct NodeIO { pub used_state_cache_size: MeanList, } -#[derive(Deserialize, Debug, Clone, Copy)] +#[derive(Deserialize, Serialize, Debug, Clone, Copy)] pub struct Block { #[serde(rename = "best")] pub hash: BlockHash, pub height: BlockNumber, } +impl Block { + pub fn zero() -> Self { + Block { + hash: BlockHash::from([0; 32]), + height: 0, + } + } +} + #[derive(Debug, Clone, Copy)] pub struct BlockDetails { pub block: Block, @@ -75,20 +84,20 @@ pub struct NodeLocation { pub city: Box, } -impl Serialize for NodeDetails { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut tup = serializer.serialize_tuple(6)?; - tup.serialize_element(&self.name)?; - tup.serialize_element(&self.implementation)?; - tup.serialize_element(&self.version)?; - tup.serialize_element(&self.validator)?; - tup.serialize_element(&self.network_id)?; - tup.end() - } -} +// impl Serialize for NodeDetails { +// fn serialize(&self, serializer: S) -> Result +// where +// S: Serializer, +// { +// let mut tup = serializer.serialize_tuple(6)?; +// tup.serialize_element(&self.name)?; +// tup.serialize_element(&self.implementation)?; +// tup.serialize_element(&self.version)?; +// tup.serialize_element(&self.validator)?; +// tup.serialize_element(&self.network_id)?; +// tup.end() +// } +// } impl Serialize for NodeStats { fn serialize(&self, serializer: S) -> Result diff --git a/backend/core/src/util.rs b/backend/shared/src/util.rs similarity index 90% rename from backend/core/src/util.rs rename to backend/shared/src/util.rs index 374ec78..a78310e 100644 --- a/backend/core/src/util.rs +++ b/backend/shared/src/util.rs @@ -1,13 +1,13 @@ mod dense_map; mod hash; -mod location; mod mean_list; +mod null; mod num_stats; pub use dense_map::DenseMap; pub use hash::Hash; -pub use location::{LocateRequest, Locator, LocatorFactory}; pub use mean_list::MeanList; +pub use null::NullAny; pub use num_stats::NumStats; pub fn fnv>(data: D) -> u64 { diff --git a/backend/core/src/util/dense_map.rs b/backend/shared/src/util/dense_map.rs similarity index 100% rename from backend/core/src/util/dense_map.rs rename to backend/shared/src/util/dense_map.rs diff --git a/backend/shared/src/util/hash.rs b/backend/shared/src/util/hash.rs new file mode 100644 index 0000000..23b10be --- /dev/null +++ b/backend/shared/src/util/hash.rs @@ -0,0 +1,209 @@ +use std::fmt::{self, Debug, Display}; +use std::str::FromStr; + +use actix_web::error::ResponseError; +use serde::ser::{Serialize, Serializer}; +use serde::de::{self, Deserialize, Deserializer, Unexpected, Visitor, SeqAccess}; + +const HASH_BYTES: usize = 32; + +/// Newtype wrapper for 32-byte hash values, implementing readable `Debug` and `serde::Deserialize`. +// We could use primitive_types::H256 here, but opted for a custom type to avoid more dependencies. +#[derive(Hash, PartialEq, Eq, Clone, Copy)] +pub struct Hash([u8; HASH_BYTES]); + +struct HashVisitor; + +impl<'de> Visitor<'de> for HashVisitor { + type Value = Hash; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("byte array of length 32, or hexidecimal string of 32 bytes beginning with 0x") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + value + .parse() + .map_err(|_| de::Error::invalid_value(Unexpected::Str(value), &self)) + } + + fn visit_bytes(self, value: &[u8]) -> Result + where + E: de::Error, + { + if value.len() == HASH_BYTES { + let mut hash = [0; HASH_BYTES]; + + hash.copy_from_slice(value); + + return Ok(Hash(hash)); + } + + Hash::from_ascii(value) + .map_err(|_| de::Error::invalid_value(Unexpected::Bytes(value), &self)) + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let mut hash = [0u8; HASH_BYTES]; + + for (i, byte) in hash.iter_mut().enumerate() { + match seq.next_element()? { + Some(b) => *byte = b, + None => return Err(de::Error::invalid_length(i, &"an array of 32 bytes")) + } + } + + if seq.next_element::()?.is_some() { + return Err(de::Error::invalid_length(33, &"an array of 32 bytes")); + } + + Ok(Hash(hash)) + } +} + +impl Hash { + pub fn from_ascii(value: &[u8]) -> Result { + if !value.starts_with(b"0x") { + return Err(HashParseError::InvalidPrefix); + } + + let mut hash = [0; HASH_BYTES]; + + hex::decode_to_slice(&value[2..], &mut hash).map_err(HashParseError::HexError)?; + + Ok(Hash(hash)) + } +} + +impl FromStr for Hash { + type Err = HashParseError; + + fn from_str(value: &str) -> Result { + Hash::from_ascii(value.as_bytes()) + } +} + +impl<'de> Deserialize<'de> for Hash { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_bytes(HashVisitor) + } +} + +impl Serialize for Hash { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_bytes(&self.0) + } +} + +impl Display for Hash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("0x")?; + + let mut ascii = [0; HASH_BYTES * 2]; + + hex::encode_to_slice(self.0, &mut ascii) + .expect("Encoding 32 bytes into 64 bytes of ascii; qed"); + + f.write_str(std::str::from_utf8(&ascii).expect("ASCII hex encoded bytes canot fail; qed")) + } +} + +impl Debug for Hash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt(self, f) + } +} + +#[derive(thiserror::Error, Debug)] +pub enum HashParseError { + HexError(hex::FromHexError), + InvalidPrefix, +} + +impl Display for HashParseError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Debug::fmt(self, f) + } +} + +impl ResponseError for HashParseError {} + +#[cfg(test)] +mod tests { + use super::Hash; + use bincode::Options; + + const DUMMY: Hash = { + let mut hash = [0; 32]; + hash[0] = 0xDE; + hash[1] = 0xAD; + hash[2] = 0xBE; + hash[3] = 0xEF; + Hash(hash) + }; + + #[test] + fn deserialize_json_hash_str() { + let json = r#""0xdeadBEEF00000000000000000000000000000000000000000000000000000000""#; + + let hash: Hash = serde_json::from_str(json).unwrap(); + + assert_eq!(hash, DUMMY); + } + + #[test] + fn deserialize_json_array() { + let json = r#"[222,173,190,239,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]"#; + + let hash: Hash = serde_json::from_str(json).unwrap(); + + assert_eq!(hash, DUMMY); + } + + + #[test] + fn deserialize_json_array_too_short() { + let json = r#"[222,173,190,239,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]"#; + + let res = serde_json::from_str::(json); + + assert!(res.is_err()); + } + + #[test] + fn deserialize_json_array_too_long() { + let json = r#"[222,173,190,239,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]"#; + + let res = serde_json::from_str::(json); + + assert!(res.is_err()); + } + + #[test] + fn bincode() { + let bytes = bincode::options().serialize(&DUMMY).unwrap(); + + let mut expected = [0; 33]; + + expected[0] = 32; // length + expected[1..].copy_from_slice(&DUMMY.0); + + assert_eq!(bytes, &expected); + + let deserialized: Hash = bincode::options().deserialize(&bytes).unwrap(); + + assert_eq!(DUMMY, deserialized); + } +} diff --git a/backend/core/src/util/mean_list.rs b/backend/shared/src/util/mean_list.rs similarity index 100% rename from backend/core/src/util/mean_list.rs rename to backend/shared/src/util/mean_list.rs diff --git a/backend/shared/src/util/null.rs b/backend/shared/src/util/null.rs new file mode 100644 index 0000000..baf6e81 --- /dev/null +++ b/backend/shared/src/util/null.rs @@ -0,0 +1,136 @@ +use serde::de::{Deserialize, Deserializer, IgnoredAny}; +use serde::ser::{Serialize, Serializer}; + +/// Alternative to `serde::de::IgnoreAny` that implements `Serialize`. +/// Will serialize to `null` in JSON, or empty data in bincode. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct NullAny; + +impl<'de> Deserialize<'de> for NullAny { + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // `bincode` is going to throw an error here as it does not support `IgnoredAny`. + // + // When using `bincode` `NullAny` will always serialize to unit (aka no data), so + // this safely becomes a no-op. + let _ = deserializer.deserialize_ignored_any(IgnoredAny); + + Ok(NullAny) + } +} + +impl Serialize for NullAny { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_unit() + } +} + +#[cfg(test)] +mod tests { + use super::NullAny; + use bincode::Options; + use serde::{Deserialize, Serialize}; + + #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] + struct Dummy { + ignore: NullAny, + } + + #[test] + fn deserialize_json_null() { + let dummy: Dummy = serde_json::from_str(r#"{"ignore":null}"#).unwrap(); + + assert_eq!(dummy, Dummy { ignore: NullAny }); + } + + #[test] + fn deserialize_json_struct() { + let dummy: Dummy = serde_json::from_str(r#"{"ignore":{"foo":"bar"}}"#).unwrap(); + + assert_eq!(dummy, Dummy { ignore: NullAny }); + } + + #[test] + fn deserialize_json_struct_invalid() { + let dummy = serde_json::from_str::(r#"{"ignore":{"foo":"bar"}"#); + + assert!(dummy.is_err()); + } + + #[test] + fn deserialize_json_vec_any() { + let raw = [NullAny; 10]; + let json = r#"[null,true,false,10,{},[],[null],{"foo":"bar"},[9,9,9],"ten"]"#; + + let deserialized: Vec = serde_json::from_str(json).unwrap(); + + assert_eq!(&raw[..], &deserialized); + } + + #[test] + fn serialize_json_null() { + let dummy = Dummy { ignore: NullAny }; + + let json = serde_json::to_string(&dummy).unwrap(); + + assert_eq!(json, r#"{"ignore":null}"#); + } + + #[test] + fn bincode_vec() { + let raw = vec![NullAny; 10]; + + let bytes = bincode::options().serialize(&raw).unwrap(); + + assert_eq!(bytes, &[10u8]); + + let deserialized: Vec = bincode::options().deserialize(&bytes).unwrap(); + + assert_eq!(raw, deserialized); + } + + #[test] + fn bincode_tuple() { + let raw = (NullAny, "Hello world".to_string()); + + let bytes = bincode::options().serialize(&raw).unwrap(); + + assert_eq!(bytes, b"\x0BHello world"); // 0B = 11 = length of string + + let deserialized: (NullAny, String) = bincode::options().deserialize(&bytes).unwrap(); + + assert_eq!(raw, deserialized); + } + + #[test] + fn json_vec() { + let raw = vec![NullAny; 10]; + + let json = serde_json::to_string(&raw).unwrap(); + + assert_eq!(json, "[null,null,null,null,null,null,null,null,null,null]"); + + let deserialized: Vec = serde_json::from_str(&json).unwrap(); + + assert_eq!(raw, deserialized); + } + + #[test] + fn json_tuple() { + let raw = (NullAny, "Hello world".to_string()); + + let json = serde_json::to_string(&raw).unwrap(); + + assert_eq!(json, r#"[null,"Hello world"]"#); + + let deserialized: (NullAny, String) = serde_json::from_str(&json).unwrap(); + + assert_eq!(raw, deserialized); + } +} diff --git a/backend/core/src/util/num_stats.rs b/backend/shared/src/util/num_stats.rs similarity index 100% rename from backend/core/src/util/num_stats.rs rename to backend/shared/src/util/num_stats.rs diff --git a/backend/shared/src/ws.rs b/backend/shared/src/ws.rs new file mode 100644 index 0000000..6b6cb85 --- /dev/null +++ b/backend/shared/src/ws.rs @@ -0,0 +1,98 @@ +use actix_http::ws::Item; +use actix_web_actors::ws::{self, CloseReason, CloseCode}; +use bytes::{Bytes, BytesMut}; +use serde::{Serialize, Deserialize}; +use actix::prelude::Message; + +/// Helper that will buffer continuation messages from actix +/// until completion, capping at 10mb. +#[derive(Default)] +pub struct MultipartHandler { + buf: BytesMut, +} + +/// Message to signal that a node should be muted for a reason that's +/// cheap to transfer between Actors or over the wire for shards. +#[derive(Serialize, Deserialize, Message, Clone, Copy, Debug)] +#[rtype("()")] +pub enum MuteReason { + /// Node was denied connection for any arbitrary reason, + /// and should not attempt to reconnect. + Denied, + /// Node was denied because the chain it belongs to is currently + /// at the limit of allowed nodes, and it may attempt to reconnect. + Overquota, +} + +impl From for CloseReason { + fn from(mute: MuteReason) -> CloseReason { + match mute { + MuteReason::Denied => CloseReason { + code: CloseCode::Abnormal, + description: Some("Denied".into()), + }, + MuteReason::Overquota => CloseReason { + code: CloseCode::Again, + description: Some("Overquota".into()), + }, + } + } +} + +/// Continuation buffer limit, 10mb +const CONT_BUF_LIMIT: usize = 10 * 1024 * 1024; + +pub enum WsMessage { + Nop, + Ping(Bytes), + Data(Bytes), + Close(Option), +} + +impl MultipartHandler { + pub fn handle(&mut self, msg: ws::Message) -> WsMessage { + match msg { + ws::Message::Ping(msg) => WsMessage::Ping(msg), + ws::Message::Pong(_) => WsMessage::Nop, + ws::Message::Text(text) => WsMessage::Data(text.into_bytes()), + ws::Message::Binary(data) => WsMessage::Data(data), + ws::Message::Close(reason) => WsMessage::Close(reason), + ws::Message::Nop => WsMessage::Nop, + ws::Message::Continuation(cont) => match cont { + Item::FirstText(bytes) | Item::FirstBinary(bytes) => { + self.start_frame(&bytes); + WsMessage::Nop + } + Item::Continue(bytes) => { + self.continue_frame(&bytes); + WsMessage::Nop + } + Item::Last(bytes) => { + self.continue_frame(&bytes); + WsMessage::Data(self.finish_frame()) + } + }, + } + } + + fn start_frame(&mut self, bytes: &[u8]) { + if !self.buf.is_empty() { + log::error!("Unused continuation buffer"); + self.buf.clear(); + } + self.continue_frame(bytes); + } + + fn continue_frame(&mut self, bytes: &[u8]) { + if self.buf.len() + bytes.len() <= CONT_BUF_LIMIT { + self.buf.extend_from_slice(&bytes); + } else { + log::error!("Continuation buffer overflow"); + self.buf = BytesMut::new(); + } + } + + fn finish_frame(&mut self) -> Bytes { + std::mem::replace(&mut self.buf, BytesMut::new()).freeze() + } +} From c276c2065a4a5587224a1ad1c89aaac1509b5a63 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Mon, 14 Jun 2021 10:57:06 +0100 Subject: [PATCH 002/134] rename shared to common to disambiguate from 'shard' --- backend/Cargo.lock | 46 +++++++++---------- backend/Cargo.toml | 4 +- backend/{shared => common}/Cargo.toml | 2 +- backend/{shared => common}/src/lib.rs | 0 backend/{shared => common}/src/node.rs | 0 backend/{shared => common}/src/shard.rs | 0 backend/{shared => common}/src/types.rs | 0 backend/{shared => common}/src/util.rs | 0 .../{shared => common}/src/util/dense_map.rs | 0 backend/{shared => common}/src/util/hash.rs | 0 .../{shared => common}/src/util/mean_list.rs | 0 backend/{shared => common}/src/util/null.rs | 0 .../{shared => common}/src/util/num_stats.rs | 0 backend/{shared => common}/src/ws.rs | 0 backend/core/Cargo.toml | 2 +- backend/core/src/aggregator.rs | 8 ++-- backend/core/src/chain.rs | 6 +-- backend/core/src/feed.rs | 2 +- backend/core/src/feed/connector.rs | 2 +- backend/core/src/location.rs | 2 +- backend/core/src/node.rs | 6 +-- backend/core/src/node/connector.rs | 6 +-- backend/core/src/shard/connector.rs | 8 ++-- backend/shard/Cargo.toml | 2 +- backend/shard/src/aggregator.rs | 8 ++-- backend/shard/src/node.rs | 6 +-- 26 files changed, 55 insertions(+), 55 deletions(-) rename backend/{shared => common}/Cargo.toml (97%) rename backend/{shared => common}/src/lib.rs (100%) rename backend/{shared => common}/src/node.rs (100%) rename backend/{shared => common}/src/shard.rs (100%) rename backend/{shared => common}/src/types.rs (100%) rename backend/{shared => common}/src/util.rs (100%) rename backend/{shared => common}/src/util/dense_map.rs (100%) rename backend/{shared => common}/src/util/hash.rs (100%) rename backend/{shared => common}/src/util/mean_list.rs (100%) rename backend/{shared => common}/src/util/null.rs (100%) rename backend/{shared => common}/src/util/num_stats.rs (100%) rename backend/{shared => common}/src/ws.rs (100%) diff --git a/backend/Cargo.lock b/backend/Cargo.lock index 9c53118..db29d1c 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -511,6 +511,27 @@ dependencies = [ "winapi", ] +[[package]] +name = "common" +version = "0.1.0" +dependencies = [ + "actix", + "actix-http", + "actix-web", + "actix-web-actors", + "bincode", + "bytes 1.0.1", + "fnv", + "hex", + "log", + "num-traits", + "primitive-types", + "rustc-hash", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "const_fn" version = "0.4.2" @@ -1635,11 +1656,11 @@ dependencies = [ "bincode", "bytes 1.0.1", "clap", + "common", "log", "rustc-hash", "serde", "serde_json", - "shared", "simple_logger", "soketto", "tokio", @@ -1647,27 +1668,6 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "shared" -version = "0.1.0" -dependencies = [ - "actix", - "actix-http", - "actix-web", - "actix-web-actors", - "bincode", - "bytes 1.0.1", - "fnv", - "hex", - "log", - "num-traits", - "primitive-types", - "rustc-hash", - "serde", - "serde_json", - "thiserror", -] - [[package]] name = "signal-hook-registry" version = "1.2.1" @@ -1828,6 +1828,7 @@ dependencies = [ "bytes 1.0.1", "chrono", "clap", + "common", "ctor", "log", "parking_lot", @@ -1835,7 +1836,6 @@ dependencies = [ "rustc-hash", "serde", "serde_json", - "shared", "simple_logger", ] diff --git a/backend/Cargo.toml b/backend/Cargo.toml index e6a403c..569abc9 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -1,8 +1,8 @@ [workspace] members = [ + "common", "core", - "shared", - "shard", + "shard" ] [profile.dev] diff --git a/backend/shared/Cargo.toml b/backend/common/Cargo.toml similarity index 97% rename from backend/shared/Cargo.toml rename to backend/common/Cargo.toml index 8690aae..3bd9911 100644 --- a/backend/shared/Cargo.toml +++ b/backend/common/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "shared" +name = "common" version = "0.1.0" authors = ["Parity Technologies Ltd. "] edition = "2018" diff --git a/backend/shared/src/lib.rs b/backend/common/src/lib.rs similarity index 100% rename from backend/shared/src/lib.rs rename to backend/common/src/lib.rs diff --git a/backend/shared/src/node.rs b/backend/common/src/node.rs similarity index 100% rename from backend/shared/src/node.rs rename to backend/common/src/node.rs diff --git a/backend/shared/src/shard.rs b/backend/common/src/shard.rs similarity index 100% rename from backend/shared/src/shard.rs rename to backend/common/src/shard.rs diff --git a/backend/shared/src/types.rs b/backend/common/src/types.rs similarity index 100% rename from backend/shared/src/types.rs rename to backend/common/src/types.rs diff --git a/backend/shared/src/util.rs b/backend/common/src/util.rs similarity index 100% rename from backend/shared/src/util.rs rename to backend/common/src/util.rs diff --git a/backend/shared/src/util/dense_map.rs b/backend/common/src/util/dense_map.rs similarity index 100% rename from backend/shared/src/util/dense_map.rs rename to backend/common/src/util/dense_map.rs diff --git a/backend/shared/src/util/hash.rs b/backend/common/src/util/hash.rs similarity index 100% rename from backend/shared/src/util/hash.rs rename to backend/common/src/util/hash.rs diff --git a/backend/shared/src/util/mean_list.rs b/backend/common/src/util/mean_list.rs similarity index 100% rename from backend/shared/src/util/mean_list.rs rename to backend/common/src/util/mean_list.rs diff --git a/backend/shared/src/util/null.rs b/backend/common/src/util/null.rs similarity index 100% rename from backend/shared/src/util/null.rs rename to backend/common/src/util/null.rs diff --git a/backend/shared/src/util/num_stats.rs b/backend/common/src/util/num_stats.rs similarity index 100% rename from backend/shared/src/util/num_stats.rs rename to backend/common/src/util/num_stats.rs diff --git a/backend/shared/src/ws.rs b/backend/common/src/ws.rs similarity index 100% rename from backend/shared/src/ws.rs rename to backend/common/src/ws.rs diff --git a/backend/core/Cargo.toml b/backend/core/Cargo.toml index bac723c..1705048 100644 --- a/backend/core/Cargo.toml +++ b/backend/core/Cargo.toml @@ -15,7 +15,7 @@ bytes = "1.0.1" chrono = { version = "0.4", features = ["serde"] } serde = { version = "1.0", features = ["derive"] } serde_json = { version = "1.0", features = ["raw_value"] } -shared = { path = "../shared" } +common = { path = "../common" } log = "0.4" simple_logger = "1.11.0" parking_lot = "0.11" diff --git a/backend/core/src/aggregator.rs b/backend/core/src/aggregator.rs index dd38321..4e1a289 100644 --- a/backend/core/src/aggregator.rs +++ b/backend/core/src/aggregator.rs @@ -7,10 +7,10 @@ use crate::chain::{self, Chain, ChainId, Label}; use crate::feed::connector::{Connected, FeedConnector, FeedId}; use crate::feed::{self, FeedMessageSerializer}; use crate::node::connector::NodeConnector; -use shared::ws::MuteReason; -use shared::shard::ShardConnId; -use shared::types::{ConnId, NodeDetails}; -use shared::util::{DenseMap, Hash}; +use common::ws::MuteReason; +use common::shard::ShardConnId; +use common::types::{ConnId, NodeDetails}; +use common::util::{DenseMap, Hash}; pub struct Aggregator { genesis_hashes: HashMap, diff --git a/backend/core/src/chain.rs b/backend/core/src/chain.rs index f975073..6a0c4d7 100644 --- a/backend/core/src/chain.rs +++ b/backend/core/src/chain.rs @@ -7,9 +7,9 @@ use crate::aggregator::{Aggregator, DropChain, NodeCount, NodeSource, RenameChai use crate::feed::connector::{FeedConnector, FeedId, Subscribed, Unsubscribed}; use crate::feed::{self, FeedMessageSerializer}; use crate::node::Node; -use shared::types::{Block, NodeDetails, NodeId, NodeLocation, Timestamp}; -use shared::util::{now, DenseMap, NumStats}; -use shared::node::Payload; +use common::types::{Block, NodeDetails, NodeId, NodeLocation, Timestamp}; +use common::util::{now, DenseMap, NumStats}; +use common::node::Payload; const STALE_TIMEOUT: u64 = 2 * 60 * 1000; // 2 minutes diff --git a/backend/core/src/feed.rs b/backend/core/src/feed.rs index 90603a2..c4aca8b 100644 --- a/backend/core/src/feed.rs +++ b/backend/core/src/feed.rs @@ -4,7 +4,7 @@ use std::mem; use crate::node::Node; use serde_json::to_writer; -use shared::types::{ +use common::types::{ Address, BlockDetails, BlockHash, BlockNumber, NodeHardware, NodeIO, NodeId, NodeStats, Timestamp, NodeDetails, }; diff --git a/backend/core/src/feed/connector.rs b/backend/core/src/feed/connector.rs index a75c3c5..16e3a8e 100644 --- a/backend/core/src/feed/connector.rs +++ b/backend/core/src/feed/connector.rs @@ -4,7 +4,7 @@ use crate::feed::{FeedMessageSerializer, Pong}; use actix::prelude::*; use actix_web_actors::ws; use bytes::Bytes; -use shared::util::fnv; +use common::util::fnv; use std::time::{Duration, Instant}; pub type FeedId = usize; diff --git a/backend/core/src/location.rs b/backend/core/src/location.rs index 3237f65..96c481b 100644 --- a/backend/core/src/location.rs +++ b/backend/core/src/location.rs @@ -7,7 +7,7 @@ use rustc_hash::FxHashMap; use serde::Deserialize; use crate::chain::{Chain, LocateNode}; -use shared::types::{NodeId, NodeLocation}; +use common::types::{NodeId, NodeLocation}; #[derive(Clone)] pub struct Locator { diff --git a/backend/core/src/node.rs b/backend/core/src/node.rs index 216753f..666049e 100644 --- a/backend/core/src/node.rs +++ b/backend/core/src/node.rs @@ -1,11 +1,11 @@ use std::sync::Arc; -use shared::types::{ +use common::types::{ Block, BlockDetails, NodeDetails, NodeHardware, NodeIO, NodeId, NodeLocation, NodeStats, Timestamp, }; -use shared::util::now; -use shared::node::SystemInterval; +use common::util::now; +use common::node::SystemInterval; pub mod connector; diff --git a/backend/core/src/node/connector.rs b/backend/core/src/node/connector.rs index 4256c1c..7015fcd 100644 --- a/backend/core/src/node/connector.rs +++ b/backend/core/src/node/connector.rs @@ -9,9 +9,9 @@ use crate::node::NodeId; use actix::prelude::*; use actix_web_actors::ws::{self, CloseReason}; use bytes::Bytes; -use shared::types::ConnId; -use shared::ws::{MultipartHandler, WsMessage, MuteReason}; -use shared::node::{NodeMessage, Payload}; +use common::types::ConnId; +use common::ws::{MultipartHandler, WsMessage, MuteReason}; +use common::node::{NodeMessage, Payload}; /// How often heartbeat pings are sent const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20); diff --git a/backend/core/src/shard/connector.rs b/backend/core/src/shard/connector.rs index f3e9a0c..62414cd 100644 --- a/backend/core/src/shard/connector.rs +++ b/backend/core/src/shard/connector.rs @@ -8,10 +8,10 @@ use crate::location::LocateRequest; use actix::prelude::*; use actix_web_actors::ws::{self, CloseReason}; use bincode::Options; -use shared::types::NodeId; -use shared::util::Hash; -use shared::ws::{MultipartHandler, WsMessage}; -use shared::shard::{ShardMessage, ShardConnId, BackendMessage}; +use common::types::NodeId; +use common::util::Hash; +use common::ws::{MultipartHandler, WsMessage}; +use common::shard::{ShardMessage, ShardConnId, BackendMessage}; /// How often heartbeat pings are sent const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20); diff --git a/backend/shard/Cargo.toml b/backend/shard/Cargo.toml index 81162c1..b327178 100644 --- a/backend/shard/Cargo.toml +++ b/backend/shard/Cargo.toml @@ -17,7 +17,7 @@ log = "0.4" rustc-hash = "1.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = { version = "1.0", features = ["raw_value"] } -shared = { path = "../shared" } +common = { path = "../common" } simple_logger = "1.11.0" soketto = "0.4.2" tokio = { version = "1", features = ["full"] } diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs index 97c5ff9..1557c32 100644 --- a/backend/shard/src/aggregator.rs +++ b/backend/shard/src/aggregator.rs @@ -6,10 +6,10 @@ use actix::prelude::*; use actix_http::http::Uri; use bincode::Options; use rustc_hash::FxHashMap; -use shared::util::{Hash, DenseMap}; -use shared::types::{ConnId, NodeDetails, NodeId}; -use shared::node::Payload; -use shared::shard::{ShardConnId, ShardMessage, BackendMessage}; +use common::util::{Hash, DenseMap}; +use common::types::{ConnId, NodeDetails, NodeId}; +use common::node::Payload; +use common::shard::{ShardConnId, ShardMessage, BackendMessage}; use soketto::handshake::{Client, ServerResponse}; use crate::node::{NodeConnector, Initialize}; use tokio::net::TcpStream; diff --git a/backend/shard/src/node.rs b/backend/shard/src/node.rs index 1e598b3..9a985a7 100644 --- a/backend/shard/src/node.rs +++ b/backend/shard/src/node.rs @@ -6,9 +6,9 @@ use crate::aggregator::{AddNode, Aggregator, ChainMessage}; // use crate::chain::{Chain, RemoveNode, UpdateNode}; use actix::prelude::*; use actix_web_actors::ws::{self, CloseReason}; -use shared::node::{NodeMessage, Payload}; -use shared::types::{ConnId, NodeId}; -use shared::ws::{MultipartHandler, WsMessage}; +use common::node::{NodeMessage, Payload}; +use common::types::{ConnId, NodeId}; +use common::ws::{MultipartHandler, WsMessage}; use tokio::sync::mpsc::UnboundedSender; /// How often heartbeat pings are sent From 8e25b4fbdf05e8eac5a476eeaa9e986c2b91d669 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 16 Jun 2021 11:08:33 +0100 Subject: [PATCH 003/134] Split msg into JSON and internal variant, and other bits --- backend/Cargo.lock | 1 + backend/common/src/{util => json}/hash.rs | 8 +- backend/common/src/json/mod.rs | 7 + backend/common/src/json/node_message.rs | 192 ++++++++++++ backend/common/src/lib.rs | 1 + backend/common/src/node.rs | 347 ++++++++++++++++------ backend/common/src/types.rs | 25 +- backend/common/src/util.rs | 2 - backend/core/src/aggregator.rs | 12 +- backend/core/src/main.rs | 34 +-- backend/core/src/shard/connector.rs | 7 +- backend/shard/Cargo.toml | 1 + backend/shard/src/aggregator.rs | 12 +- backend/shard/src/node.rs | 4 +- 14 files changed, 506 insertions(+), 147 deletions(-) rename backend/common/src/{util => json}/hash.rs (96%) create mode 100644 backend/common/src/json/mod.rs create mode 100644 backend/common/src/json/node_message.rs diff --git a/backend/Cargo.lock b/backend/Cargo.lock index db29d1c..4d1fffd 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -1658,6 +1658,7 @@ dependencies = [ "clap", "common", "log", + "primitive-types", "rustc-hash", "serde", "serde_json", diff --git a/backend/common/src/util/hash.rs b/backend/common/src/json/hash.rs similarity index 96% rename from backend/common/src/util/hash.rs rename to backend/common/src/json/hash.rs index 23b10be..01fbe1e 100644 --- a/backend/common/src/util/hash.rs +++ b/backend/common/src/json/hash.rs @@ -8,10 +8,16 @@ use serde::de::{self, Deserialize, Deserializer, Unexpected, Visitor, SeqAccess} const HASH_BYTES: usize = 32; /// Newtype wrapper for 32-byte hash values, implementing readable `Debug` and `serde::Deserialize`. -// We could use primitive_types::H256 here, but opted for a custom type to avoid more dependencies. +/// This can deserialize from a JSON string or array. #[derive(Hash, PartialEq, Eq, Clone, Copy)] pub struct Hash([u8; HASH_BYTES]); +impl From for crate::types::BlockHash { + fn from(hash: Hash) -> Self { + hash.0.into() + } +} + struct HashVisitor; impl<'de> Visitor<'de> for HashVisitor { diff --git a/backend/common/src/json/mod.rs b/backend/common/src/json/mod.rs new file mode 100644 index 0000000..22f233f --- /dev/null +++ b/backend/common/src/json/mod.rs @@ -0,0 +1,7 @@ +//! This module contains the types we need to deserialize JSON messages from nodes + +mod hash; +mod node_message; + +pub use node_message::*; +pub use hash::Hash; \ No newline at end of file diff --git a/backend/common/src/json/node_message.rs b/backend/common/src/json/node_message.rs new file mode 100644 index 0000000..b23ece5 --- /dev/null +++ b/backend/common/src/json/node_message.rs @@ -0,0 +1,192 @@ +//! The structs and enums defined in this module are largely identical to those +//! we'll use elsewhere internally, but are kept separate so that the JSON structure +//! is defined (almost) from just this file, and we don't have to worry about breaking +//! compatibility with the input data when we make changes to our internal data +//! structures (for example, to support bincode better). +use super::hash::Hash; +use serde::{Deserialize}; + +/// This struct represents a telemetry message sent from a node as +/// a JSON payload. Since JSON is self describing, we can use attributes +/// like serde(untagged) and serde(flatten) without issue. +/// +/// Internally, we want to minimise the amount of data sent from shards to +/// the core node. For that reason, we use a non-self-describing serialization +/// format like bincode, which doesn't support things like `[serde(flatten)]` (which +/// internally wants to serialize to a map of unknown length) or `[serde(tag/untagged)]` +/// (which relies on the data to know which variant to deserialize to.) +/// +/// So, this can be converted fairly cheaply into an enum we'll use internally +/// which is compatible with formats like bincode. +#[derive(Deserialize, Debug)] +#[serde(untagged)] +pub enum NodeMessage { + V1 { + #[serde(flatten)] + payload: Payload, + }, + V2 { + id: ConnId, + payload: Payload, + }, +} + +#[derive(Deserialize, Debug)] +#[serde(tag = "msg")] +pub enum Payload { + #[serde(rename = "system.connected")] + SystemConnected(SystemConnected), + #[serde(rename = "system.interval")] + SystemInterval(SystemInterval), + #[serde(rename = "block.import")] + BlockImport(Block), + #[serde(rename = "notify.finalized")] + NotifyFinalized(Finalized), + #[serde(rename = "txpool.import")] + TxPoolImport, + #[serde(rename = "afg.finalized")] + AfgFinalized(AfgFinalized), + #[serde(rename = "afg.received_precommit")] + AfgReceivedPrecommit(AfgReceivedPrecommit), + #[serde(rename = "afg.received_prevote")] + AfgReceivedPrevote(AfgReceivedPrevote), + #[serde(rename = "afg.received_commit")] + AfgReceivedCommit(AfgReceivedCommit), + #[serde(rename = "afg.authority_set")] + AfgAuthoritySet(AfgAuthoritySet), + #[serde(rename = "afg.finalized_blocks_up_to")] + AfgFinalizedBlocksUpTo, + #[serde(rename = "aura.pre_sealed_block")] + AuraPreSealedBlock, + #[serde(rename = "prepared_block_for_proposing")] + PreparedBlockForProposing, +} + +#[derive(Deserialize, Debug)] +pub struct SystemConnected { + pub genesis_hash: Hash, + #[serde(flatten)] + pub node: NodeDetails, +} + +#[derive(Deserialize, Debug)] +pub struct SystemInterval { + pub peers: Option, + pub txcount: Option, + pub bandwidth_upload: Option, + pub bandwidth_download: Option, + pub finalized_height: Option, + pub finalized_hash: Option, + #[serde(flatten)] + pub block: Option, + pub used_state_cache_size: Option, +} + +#[derive(Deserialize, Debug)] +pub struct Finalized { + #[serde(rename = "best")] + pub hash: Hash, + pub height: Box, +} + +#[derive(Deserialize, Debug)] +pub struct AfgAuthoritySet { + pub authority_id: Box, + pub authorities: Box, + pub authority_set_id: Box, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct AfgFinalized { + pub finalized_hash: Hash, + pub finalized_number: Box, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct AfgReceived { + pub target_hash: Hash, + pub target_number: Box, + pub voter: Option>, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct AfgReceivedPrecommit { + #[serde(flatten)] + pub received: AfgReceived, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct AfgReceivedPrevote { + #[serde(flatten)] + pub received: AfgReceived, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct AfgReceivedCommit { + #[serde(flatten)] + pub received: AfgReceived, +} + +#[derive(Deserialize, Debug, Clone, Copy)] +pub struct Block { + #[serde(rename = "best")] + pub hash: Hash, + pub height: BlockNumber, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct NodeDetails { + pub chain: Box, + pub name: Box, + pub implementation: Box, + pub version: Box, + pub validator: Option>, + pub network_id: Option>, + pub startup_time: Option>, +} + +type ConnId = u64; +type BlockNumber = u64; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn message_v1() { + let json = r#"{ + "msg":"notify.finalized", + "level":"INFO", + "ts":"2021-01-13T12:38:25.410794650+01:00", + "best":"0x031c3521ca2f9c673812d692fc330b9a18e18a2781e3f9976992f861fd3ea0cb", + "height":"50" + }"#; + assert!( + matches!( + serde_json::from_str::(json).unwrap(), + NodeMessage::V1 { .. }, + ), + "message did not match variant V1", + ); + } + + #[test] + fn message_v2() { + let json = r#"{ + "id":1, + "ts":"2021-01-13T12:22:20.053527101+01:00", + "payload":{ + "best":"0xcc41708573f2acaded9dd75e07dac2d4163d136ca35b3061c558d7a35a09dd8d", + "height":"209", + "msg":"notify.finalized" + } + }"#; + assert!( + matches!( + serde_json::from_str::(json).unwrap(), + NodeMessage::V2 { .. }, + ), + "message did not match variant V2", + ); + } +} diff --git a/backend/common/src/lib.rs b/backend/common/src/lib.rs index dc147b4..a18980d 100644 --- a/backend/common/src/lib.rs +++ b/backend/common/src/lib.rs @@ -3,3 +3,4 @@ pub mod shard; pub mod types; pub mod util; pub mod ws; +pub mod json; \ No newline at end of file diff --git a/backend/common/src/node.rs b/backend/common/src/node.rs index 420e568..ad66da2 100644 --- a/backend/common/src/node.rs +++ b/backend/common/src/node.rs @@ -1,15 +1,13 @@ use crate::types::{Block, BlockHash, BlockNumber, ConnId, NodeDetails}; -use crate::util::{Hash, NullAny}; +use crate::json; + use actix::prelude::*; use serde::{Deserialize, Serialize}; -use serde::ser::Serializer; -#[derive(Deserialize, Debug, Message)] +#[derive(Serialize, Deserialize, Debug, Message)] #[rtype(result = "()")] -#[serde(untagged)] pub enum NodeMessage { V1 { - #[serde(flatten)] payload: Payload, }, V2 { @@ -36,64 +34,98 @@ impl From for Payload { } } -#[derive(Deserialize, Debug)] -#[serde(tag = "msg")] -pub enum Payload { - #[serde(rename = "system.connected")] - SystemConnected(SystemConnected), - #[serde(rename = "system.interval")] - SystemInterval(SystemInterval), - #[serde(rename = "block.import")] - BlockImport(Block), - #[serde(rename = "notify.finalized")] - NotifyFinalized(Finalized), - #[serde(rename = "txpool.import")] - TxPoolImport(NullAny), - // #[serde(rename = "afg.finalized")] - // AfgFinalized(AfgFinalized), - // #[serde(rename = "afg.received_precommit")] - // AfgReceivedPrecommit(AfgReceivedPrecommit), - // #[serde(rename = "afg.received_prevote")] - // AfgReceivedPrevote(AfgReceivedPrevote), - // #[serde(rename = "afg.received_commit")] - // AfgReceivedCommit(AfgReceivedCommit), - // #[serde(rename = "afg.authority_set")] - // AfgAuthoritySet(AfgAuthoritySet), - // #[serde(rename = "afg.finalized_blocks_up_to")] - // AfgFinalizedBlocksUpTo(NullAny), - // #[serde(rename = "aura.pre_sealed_block")] - // AuraPreSealedBlock(NullAny), - #[serde(rename = "prepared_block_for_proposing")] - PreparedBlockForProposing(NullAny), -} - -impl Serialize for Payload { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - use Payload::*; - - match self { - SystemConnected(val) => serializer.serialize_newtype_variant("Payload", 0, "system.connected", val), - SystemInterval(val) => serializer.serialize_newtype_variant("Payload", 1, "system.interval", val), - BlockImport(val) => serializer.serialize_newtype_variant("Payload", 3, "block.import", val), - NotifyFinalized(val) => serializer.serialize_newtype_variant("Payload", 4, "notify.finalized", val), - TxPoolImport(_) => serializer.serialize_unit_variant("Payload", 3, "txpool.import"), - PreparedBlockForProposing(_) => serializer.serialize_unit_variant("Payload", 4, "prepared_block_for_proposing"), - _ => unimplemented!() +impl From for NodeMessage { + fn from(msg: json::NodeMessage) -> Self { + match msg { + json::NodeMessage::V1 { payload } => { + NodeMessage::V1 { payload: payload.into() } + }, + json::NodeMessage::V2 { id, payload } => { + NodeMessage::V2 { id, payload: payload.into() } + }, } } } -#[derive(Deserialize, Serialize, Debug)] +#[derive(Serialize, Deserialize, Debug)] +pub enum Payload { + SystemConnected(SystemConnected), + SystemInterval(SystemInterval), + BlockImport(Block), + NotifyFinalized(Finalized), + TxPoolImport, + AfgFinalized(AfgFinalized), + AfgReceivedPrecommit(AfgReceived), + AfgReceivedPrevote(AfgReceived), + AfgReceivedCommit(AfgReceived), + AfgAuthoritySet(AfgAuthoritySet), + AfgFinalizedBlocksUpTo, + AuraPreSealedBlock, + PreparedBlockForProposing, +} + +impl From for Payload { + fn from(msg: json::Payload) -> Self { + match msg { + json::Payload::SystemConnected(m) => { + Payload::SystemConnected(m.into()) + }, + json::Payload::SystemInterval(m) => { + Payload::SystemInterval(m.into()) + }, + json::Payload::BlockImport(m) => { + Payload::BlockImport(m.into()) + }, + json::Payload::NotifyFinalized(m) => { + Payload::NotifyFinalized(m.into()) + }, + json::Payload::TxPoolImport => { + Payload::TxPoolImport + }, + json::Payload::AfgFinalized(m) => { + Payload::AfgFinalized(m.into()) + }, + json::Payload::AfgReceivedPrecommit(m) => { + Payload::AfgReceivedPrecommit(m.received.into()) + }, + json::Payload::AfgReceivedPrevote(m) => { + Payload::AfgReceivedPrevote(m.received.into()) + }, + json::Payload::AfgReceivedCommit(m) => { + Payload::AfgReceivedCommit(m.received.into()) + }, + json::Payload::AfgAuthoritySet(m) => { + Payload::AfgAuthoritySet(m.into()) + }, + json::Payload::AfgFinalizedBlocksUpTo => { + Payload::AfgFinalizedBlocksUpTo + }, + json::Payload::AuraPreSealedBlock => { + Payload::AuraPreSealedBlock + }, + json::Payload::PreparedBlockForProposing => { + Payload::PreparedBlockForProposing + }, + } + } +} + +#[derive(Serialize, Deserialize, Debug)] pub struct SystemConnected { - pub genesis_hash: Hash, - #[serde(flatten)] + pub genesis_hash: BlockHash, pub node: NodeDetails, } -#[derive(Deserialize, Serialize, Debug)] +impl From for SystemConnected { + fn from(msg: json::SystemConnected) -> Self { + SystemConnected { + genesis_hash: msg.genesis_hash.into(), + node: msg.node.into() + } + } +} + +#[derive(Serialize, Deserialize, Debug)] pub struct SystemInterval { pub peers: Option, pub txcount: Option, @@ -101,54 +133,87 @@ pub struct SystemInterval { pub bandwidth_download: Option, pub finalized_height: Option, pub finalized_hash: Option, - #[serde(flatten)] pub block: Option, pub used_state_cache_size: Option, } -#[derive(Deserialize, Serialize, Debug)] +impl From for SystemInterval { + fn from(msg: json::SystemInterval) -> Self { + SystemInterval { + peers: msg.peers, + txcount: msg.txcount, + bandwidth_upload: msg.bandwidth_upload, + bandwidth_download: msg.bandwidth_download, + finalized_height: msg.finalized_height, + finalized_hash: msg.finalized_hash.map(|h| h.into()), + block: msg.block.map(|b| b.into()), + used_state_cache_size: msg.used_state_cache_size, + } + } +} + +#[derive(Serialize, Deserialize, Debug)] pub struct Finalized { - #[serde(rename = "best")] pub hash: BlockHash, pub height: Box, } -#[derive(Deserialize, Serialize, Debug)] -pub struct AfgAuthoritySet { - pub authority_id: Box, - pub authorities: Box, - pub authority_set_id: Box, +impl From for Finalized { + fn from(msg: json::Finalized) -> Self { + Finalized { + hash: msg.hash.into(), + height: msg.height, + } + } } -#[derive(Deserialize, Serialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct AfgFinalized { pub finalized_hash: BlockHash, pub finalized_number: Box, } -#[derive(Deserialize, Serialize, Debug, Clone)] +impl From for AfgFinalized { + fn from(msg: json::AfgFinalized) -> Self { + AfgFinalized { + finalized_hash: msg.finalized_hash.into(), + finalized_number: msg.finalized_number, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct AfgReceived { pub target_hash: BlockHash, pub target_number: Box, pub voter: Option>, } -#[derive(Deserialize, Serialize, Debug, Clone)] -pub struct AfgReceivedPrecommit { - #[serde(flatten)] - pub received: AfgReceived, +impl From for AfgReceived { + fn from(msg: json::AfgReceived) -> Self { + AfgReceived { + target_hash: msg.target_hash.into(), + target_number: msg.target_number, + voter: msg.voter, + } + } } -#[derive(Deserialize, Serialize, Debug, Clone)] -pub struct AfgReceivedPrevote { - #[serde(flatten)] - pub received: AfgReceived, +#[derive(Serialize, Deserialize, Debug)] +pub struct AfgAuthoritySet { + pub authority_id: Box, + pub authorities: Box, + pub authority_set_id: Box, } -#[derive(Deserialize, Serialize, Debug, Clone)] -pub struct AfgReceivedCommit { - #[serde(flatten)] - pub received: AfgReceived, +impl From for AfgAuthoritySet { + fn from(msg: json::AfgAuthoritySet) -> Self { + AfgAuthoritySet { + authority_id: msg.authority_id, + authorities: msg.authorities, + authority_set_id: msg.authority_set_id, + } + } } impl Payload { @@ -180,27 +245,123 @@ mod tests { use super::*; use bincode::Options; + // Without adding a derive macro and marker trait (and enforcing their use), we don't really + // know whether things can (de)serialize to bincode or not at runtime without failing unless + // we test the different types we want to (de)serialize ourselves. We just need to test each + // type, not each variant. + fn bincode_can_serialize_and_deserialize<'de, T>(item: T) + where T: Serialize + serde::de::DeserializeOwned + { + let bytes = bincode::serialize(&item).expect("Serialization should work"); + let _: T = bincode::deserialize(&bytes).expect("Deserialization should work"); + } + #[test] - fn message_v1() { - let json = r#"{"msg":"notify.finalized","level":"INFO","ts":"2021-01-13T12:38:25.410794650+01:00","best":"0x031c3521ca2f9c673812d692fc330b9a18e18a2781e3f9976992f861fd3ea0cb","height":"50"}"#; - assert!( - matches!( - serde_json::from_str::(json).unwrap(), - NodeMessage::V1 { .. }, - ), - "message did not match variant V1", + fn bincode_can_serialize_and_deserialize_node_message_system_connected() { + bincode_can_serialize_and_deserialize( + NodeMessage::V1 { + payload: Payload::SystemConnected(SystemConnected { + genesis_hash: BlockHash::zero(), + node: NodeDetails { + chain: "foo".into(), + name: "foo".into(), + implementation: "foo".into(), + version: "foo".into(), + validator: None, + network_id: None, + startup_time: None, + }, + }) + } ); } #[test] - fn message_v2() { - let json = r#"{"id":1,"ts":"2021-01-13T12:22:20.053527101+01:00","payload":{"best":"0xcc41708573f2acaded9dd75e07dac2d4163d136ca35b3061c558d7a35a09dd8d","height":"209","msg":"notify.finalized"}}"#; - assert!( - matches!( - serde_json::from_str::(json).unwrap(), - NodeMessage::V2 { .. }, - ), - "message did not match variant V2", + fn bincode_can_serialize_and_deserialize_node_message_system_interval() { + bincode_can_serialize_and_deserialize( + NodeMessage::V1 { + payload: Payload::SystemInterval(SystemInterval { + peers: None, + txcount: None, + bandwidth_upload: None, + bandwidth_download: None, + finalized_height: None, + finalized_hash: None, + block: None, + used_state_cache_size: None, + }) + } + ); + } + + #[test] + fn bincode_can_serialize_and_deserialize_node_message_block_import() { + bincode_can_serialize_and_deserialize( + NodeMessage::V1 { + payload: Payload::BlockImport(Block { + hash: BlockHash([0; 32]), + height: 0, + }) + } + ); + } + + #[test] + fn bincode_can_serialize_and_deserialize_node_message_notify_finalized() { + bincode_can_serialize_and_deserialize( + NodeMessage::V1 { + payload: Payload::NotifyFinalized(Finalized { + hash: BlockHash::zero(), + height: "foo".into(), + }) + } + ); + } + + #[test] + fn bincode_can_serialize_and_deserialize_node_message_tx_pool_import() { + bincode_can_serialize_and_deserialize( + NodeMessage::V1 { + payload: Payload::TxPoolImport + } + ); + } + + #[test] + fn bincode_can_serialize_and_deserialize_node_message_afg_finalized() { + bincode_can_serialize_and_deserialize( + NodeMessage::V1 { + payload: Payload::AfgFinalized(AfgFinalized { + finalized_hash: BlockHash::zero(), + finalized_number: "foo".into(), + }) + } + ); + } + + #[test] + fn bincode_can_serialize_and_deserialize_node_message_afg_received() { + bincode_can_serialize_and_deserialize( + NodeMessage::V1 { + payload: Payload::AfgReceivedPrecommit(AfgReceived { + target_hash: BlockHash::zero(), + target_number: "foo".into(), + voter: None, + }) + } + ); + } + + #[test] + fn bincode_can_serialize_and_deserialize_node_message_afg_authority_set() { + bincode_can_serialize_and_deserialize( + NodeMessage::V1 { + payload: Payload::AfgAuthoritySet(AfgAuthoritySet { + authority_id: "foo".into(), + authorities: "foo".into(), + authority_set_id: "foo".into(), + }) + } ); } diff --git a/backend/common/src/types.rs b/backend/common/src/types.rs index a16ce5a..aa85996 100644 --- a/backend/common/src/types.rs +++ b/backend/common/src/types.rs @@ -2,6 +2,7 @@ use serde::ser::{SerializeTuple, Serializer}; use serde::{Deserialize, Serialize}; use crate::util::{now, MeanList}; +use crate::json; pub type NodeId = usize; pub type ConnId = u64; @@ -21,6 +22,20 @@ pub struct NodeDetails { pub startup_time: Option>, } +impl From for NodeDetails { + fn from(details: json::NodeDetails) -> Self { + NodeDetails { + chain: details.chain, + name: details.name, + implementation: details.implementation, + version: details.version, + validator: details.validator, + network_id: details.network_id, + startup_time: details.startup_time, + } + } +} + #[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq, Default)] pub struct NodeStats { pub peers: u64, @@ -34,11 +49,19 @@ pub struct NodeIO { #[derive(Deserialize, Serialize, Debug, Clone, Copy)] pub struct Block { - #[serde(rename = "best")] pub hash: BlockHash, pub height: BlockNumber, } +impl From for Block { + fn from(block: json::Block) -> Self { + Block { + hash: block.hash.into(), + height: block.height + } + } +} + impl Block { pub fn zero() -> Self { Block { diff --git a/backend/common/src/util.rs b/backend/common/src/util.rs index a78310e..66dddae 100644 --- a/backend/common/src/util.rs +++ b/backend/common/src/util.rs @@ -1,11 +1,9 @@ mod dense_map; -mod hash; mod mean_list; mod null; mod num_stats; pub use dense_map::DenseMap; -pub use hash::Hash; pub use mean_list::MeanList; pub use null::NullAny; pub use num_stats::NumStats; diff --git a/backend/core/src/aggregator.rs b/backend/core/src/aggregator.rs index 4e1a289..8dd3d98 100644 --- a/backend/core/src/aggregator.rs +++ b/backend/core/src/aggregator.rs @@ -9,11 +9,11 @@ use crate::feed::{self, FeedMessageSerializer}; use crate::node::connector::NodeConnector; use common::ws::MuteReason; use common::shard::ShardConnId; -use common::types::{ConnId, NodeDetails}; -use common::util::{DenseMap, Hash}; +use common::types::{ConnId, NodeDetails, BlockHash}; +use common::util::{DenseMap}; pub struct Aggregator { - genesis_hashes: HashMap, + genesis_hashes: HashMap, labels: HashMap, chains: DenseMap, feeds: DenseMap>, @@ -26,7 +26,7 @@ pub struct ChainEntry { /// Address to the `Chain` agent addr: Addr, /// Genesis [`Hash`] of the chain - genesis_hash: Hash, + genesis_hash: BlockHash, /// String name of the chain label: Label, /// Node count @@ -66,7 +66,7 @@ impl Aggregator { /// or the address is disconnected (actor dropped), create a new one. pub fn lazy_chain( &mut self, - genesis_hash: Hash, + genesis_hash: BlockHash, label: &str, ctx: &mut ::Context, ) -> ChainId { @@ -125,7 +125,7 @@ pub struct AddNode { /// Details of the node being added to the aggregator pub node: NodeDetails, /// Genesis [`Hash`] of the chain the node is being added to. - pub genesis_hash: Hash, + pub genesis_hash: BlockHash, /// Source from which this node is being added (Direct | Shard) pub source: NodeSource, } diff --git a/backend/core/src/main.rs b/backend/core/src/main.rs index 241a577..958af9d 100644 --- a/backend/core/src/main.rs +++ b/backend/core/src/main.rs @@ -1,6 +1,5 @@ use std::collections::HashSet; use std::iter::FromIterator; -use std::net::Ipv4Addr; use actix::prelude::*; use actix_http::ws::Codec; @@ -19,7 +18,6 @@ mod shard; use aggregator::{Aggregator, GetHealth}; use feed::connector::FeedConnector; use location::{Locator, LocatorFactory}; -use node::connector::NodeConnector; use shard::connector::ShardConnector; const VERSION: &str = env!("CARGO_PKG_VERSION"); @@ -74,35 +72,6 @@ impl From<&LogLevel> for log::LevelFilter { } } -/// Entry point for connecting nodes -#[get("/submit")] -async fn node_route( - req: HttpRequest, - stream: web::Payload, - aggregator: web::Data>, - locator: web::Data>, -) -> Result { - let ip = req - .connection_info() - .realip_remote_addr() - .and_then(|mut addr| { - if let Some(port_idx) = addr.find(':') { - addr = &addr[..port_idx]; - } - addr.parse::().ok() - }); - - let mut res = ws::handshake(&req)?; - let aggregator = aggregator.get_ref().clone(); - let locator = locator.get_ref().clone().recipient(); - - Ok(res.streaming(ws::WebsocketContext::with_codec( - NodeConnector::new(aggregator, locator, ip), - stream, - Codec::new().max_size(10 * 1024 * 1024), // 10mb frame limit - ))) -} - #[get("/shard_submit/{chain_hash}")] async fn shard_route( req: HttpRequest, @@ -112,7 +81,7 @@ async fn shard_route( path: web::Path>, ) -> Result { let hash_str = path.into_inner(); - let genesis_hash = hash_str.parse()?; + let genesis_hash = hash_str.parse::()?.into(); println!("Genesis hash {}", genesis_hash); @@ -180,7 +149,6 @@ async fn main() -> std::io::Result<()> { .wrap(middleware::NormalizePath::default()) .data(aggregator.clone()) .data(locator.clone()) - .service(node_route) .service(feed_route) .service(shard_route) .service(health) diff --git a/backend/core/src/shard/connector.rs b/backend/core/src/shard/connector.rs index 62414cd..e83912a 100644 --- a/backend/core/src/shard/connector.rs +++ b/backend/core/src/shard/connector.rs @@ -8,8 +8,7 @@ use crate::location::LocateRequest; use actix::prelude::*; use actix_web_actors::ws::{self, CloseReason}; use bincode::Options; -use common::types::NodeId; -use common::util::Hash; +use common::types::{BlockHash, NodeId}; use common::ws::{MultipartHandler, WsMessage}; use common::shard::{ShardMessage, ShardConnId, BackendMessage}; @@ -24,7 +23,7 @@ pub struct ShardConnector { /// Aggregator actor address aggregator: Addr, /// Genesis hash of the chain this connection will be submitting data for - genesis_hash: Hash, + genesis_hash: BlockHash, /// Chain address to which this shard connector is delegating messages chain: Option>, /// Transient mapping of `ShardConnId` to external IP address. @@ -57,7 +56,7 @@ impl ShardConnector { pub fn new( aggregator: Addr, locator: Recipient, - genesis_hash: Hash, + genesis_hash: BlockHash, ) -> Self { Self { hb: Instant::now(), diff --git a/backend/shard/Cargo.toml b/backend/shard/Cargo.toml index b327178..ccfc2bf 100644 --- a/backend/shard/Cargo.toml +++ b/backend/shard/Cargo.toml @@ -17,6 +17,7 @@ log = "0.4" rustc-hash = "1.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = { version = "1.0", features = ["raw_value"] } +primitive-types = { version = "0.9.0", features = ["serde"] } common = { path = "../common" } simple_logger = "1.11.0" soketto = "0.4.2" diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs index 1557c32..031a44d 100644 --- a/backend/shard/src/aggregator.rs +++ b/backend/shard/src/aggregator.rs @@ -6,8 +6,8 @@ use actix::prelude::*; use actix_http::http::Uri; use bincode::Options; use rustc_hash::FxHashMap; -use common::util::{Hash, DenseMap}; -use common::types::{ConnId, NodeDetails, NodeId}; +use common::util::{DenseMap}; +use common::types::{ConnId, NodeDetails, NodeId, BlockHash}; use common::node::Payload; use common::shard::{ShardConnId, ShardMessage, BackendMessage}; use soketto::handshake::{Client, ServerResponse}; @@ -22,7 +22,7 @@ type WsReceiver = soketto::Receiver>; #[derive(Default)] pub struct Aggregator { url: Uri, - chains: FxHashMap>, + chains: FxHashMap>, } impl Actor for Aggregator { @@ -42,13 +42,13 @@ pub struct Chain { /// Base URL of Backend Core url: Uri, /// Genesis hash of the chain, required to construct the URL to connect to the Backend Core - genesis_hash: Hash, + genesis_hash: BlockHash, /// Dense mapping of SharedConnId -> Addr + multiplexing ConnId sent from the node. nodes: DenseMap<(Addr, ConnId)>, } impl Chain { - pub fn new(url: Uri, genesis_hash: Hash) -> Self { + pub fn new(url: Uri, genesis_hash: BlockHash) -> Self { Chain { url, genesis_hash, @@ -188,7 +188,7 @@ impl Actor for Chain { #[rtype(result = "()")] pub struct AddNode { pub ip: Option, - pub genesis_hash: Hash, + pub genesis_hash: BlockHash, pub node: NodeDetails, pub conn_id: ConnId, pub node_connector: Addr, diff --git a/backend/shard/src/node.rs b/backend/shard/src/node.rs index 9a985a7..f6f3474 100644 --- a/backend/shard/src/node.rs +++ b/backend/shard/src/node.rs @@ -8,6 +8,7 @@ use actix::prelude::*; use actix_web_actors::ws::{self, CloseReason}; use common::node::{NodeMessage, Payload}; use common::types::{ConnId, NodeId}; +use common::json; use common::ws::{MultipartHandler, WsMessage}; use tokio::sync::mpsc::UnboundedSender; @@ -93,9 +94,10 @@ impl NodeConnector { fn handle_message( &mut self, - msg: NodeMessage, + msg: json::NodeMessage, ctx: &mut ::Context, ) { + let msg: NodeMessage = msg.into(); let conn_id = msg.id(); let payload = msg.into(); From 588f1ea027b9b9ab53e2376a5d19be4e8971e5b4 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 16 Jun 2021 11:24:00 +0100 Subject: [PATCH 004/134] remove a few unnecessary structs --- backend/common/src/json/node_message.rs | 73 ++++++++++++++++++------- backend/common/src/node.rs | 6 +- 2 files changed, 55 insertions(+), 24 deletions(-) diff --git a/backend/common/src/json/node_message.rs b/backend/common/src/json/node_message.rs index b23ece5..a8fce2c 100644 --- a/backend/common/src/json/node_message.rs +++ b/backend/common/src/json/node_message.rs @@ -47,11 +47,11 @@ pub enum Payload { #[serde(rename = "afg.finalized")] AfgFinalized(AfgFinalized), #[serde(rename = "afg.received_precommit")] - AfgReceivedPrecommit(AfgReceivedPrecommit), + AfgReceivedPrecommit(AfgReceived), #[serde(rename = "afg.received_prevote")] - AfgReceivedPrevote(AfgReceivedPrevote), + AfgReceivedPrevote(AfgReceived), #[serde(rename = "afg.received_commit")] - AfgReceivedCommit(AfgReceivedCommit), + AfgReceivedCommit(AfgReceived), #[serde(rename = "afg.authority_set")] AfgAuthoritySet(AfgAuthoritySet), #[serde(rename = "afg.finalized_blocks_up_to")] @@ -109,24 +109,6 @@ pub struct AfgReceived { pub voter: Option>, } -#[derive(Deserialize, Debug, Clone)] -pub struct AfgReceivedPrecommit { - #[serde(flatten)] - pub received: AfgReceived, -} - -#[derive(Deserialize, Debug, Clone)] -pub struct AfgReceivedPrevote { - #[serde(flatten)] - pub received: AfgReceived, -} - -#[derive(Deserialize, Debug, Clone)] -pub struct AfgReceivedCommit { - #[serde(flatten)] - pub received: AfgReceived, -} - #[derive(Deserialize, Debug, Clone, Copy)] pub struct Block { #[serde(rename = "best")] @@ -189,4 +171,53 @@ mod tests { "message did not match variant V2", ); } + + #[test] + fn message_v2_received_precommit() { + let json = r#"{ + "id":1, + "ts":"2021-01-13T12:22:20.053527101+01:00", + "payload":{ + "target_hash":"0xcc41708573f2acaded9dd75e07dac2d4163d136ca35b3061c558d7a35a09dd8d", + "target_number":"209", + "voter":"foo", + "msg":"afg.received_precommit" + } + }"#; + assert!( + matches!( + serde_json::from_str::(json).unwrap(), + NodeMessage::V2 { + payload: Payload::AfgReceivedPrecommit(..), + .. + }, + ), + "message did not match the expected output", + ); + } + + #[test] + fn message_v2_tx_pool_import() { + // We should happily ignore any fields we don't care about. + let json = r#"{ + "id":1, + "ts":"2021-01-13T12:22:20.053527101+01:00", + "payload":{ + "foo":"Something", + "bar":123, + "wibble":"wobble", + "msg":"txpool.import" + } + }"#; + assert!( + matches!( + serde_json::from_str::(json).unwrap(), + NodeMessage::V2 { + payload: Payload::TxPoolImport, + .. + }, + ), + "message did not match the expected output", + ); + } } diff --git a/backend/common/src/node.rs b/backend/common/src/node.rs index ad66da2..bbc0214 100644 --- a/backend/common/src/node.rs +++ b/backend/common/src/node.rs @@ -86,13 +86,13 @@ impl From for Payload { Payload::AfgFinalized(m.into()) }, json::Payload::AfgReceivedPrecommit(m) => { - Payload::AfgReceivedPrecommit(m.received.into()) + Payload::AfgReceivedPrecommit(m.into()) }, json::Payload::AfgReceivedPrevote(m) => { - Payload::AfgReceivedPrevote(m.received.into()) + Payload::AfgReceivedPrevote(m.into()) }, json::Payload::AfgReceivedCommit(m) => { - Payload::AfgReceivedCommit(m.received.into()) + Payload::AfgReceivedCommit(m.into()) }, json::Payload::AfgAuthoritySet(m) => { Payload::AfgAuthoritySet(m.into()) From 5b01179ca2a4119c60bde6f78d3c4dcdc89a95a7 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 16 Jun 2021 11:31:19 +0100 Subject: [PATCH 005/134] tweak CI to work with shards+core split --- .github/workflows/backend.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 796dc6f..56d1381 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -14,15 +14,18 @@ jobs: steps: - uses: actions/checkout@v2 - - name: Build - working-directory: ./backend + - name: Build Telemetry Core + working-directory: ./backend/core run: cargo build --verbose - name: Run tests working-directory: ./backend run: cargo test --verbose - - name: Build release and call executable + - name: Build release and call telemetry executable working-directory: ./backend - run: cargo run --release -- --help + run: cargo run --bin telemetry --release -- --help + - name: Build release and call telemetry executable + working-directory: ./backend + run: cargo run --bin shard --release -- --help - name: Login to Dockerhub uses: docker/login-action@v1 with: From 2b0accbbb610d52f304c4a41d71b692b4c35d46e Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 16 Jun 2021 11:34:33 +0100 Subject: [PATCH 006/134] Ci tidy --- .github/workflows/backend.yml | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 56d1381..f63e5e6 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -14,24 +14,30 @@ jobs: steps: - uses: actions/checkout@v2 - - name: Build Telemetry Core + + - name: Build telemetry executables (in debug mode) working-directory: ./backend/core run: cargo build --verbose + - name: Run tests working-directory: ./backend run: cargo test --verbose - - name: Build release and call telemetry executable + + - name: Build, release and call telemetry executable working-directory: ./backend run: cargo run --bin telemetry --release -- --help - - name: Build release and call telemetry executable + + - name: Build, release and call shard executable working-directory: ./backend run: cargo run --bin shard --release -- --help + - name: Login to Dockerhub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - - name: Build and Push template image + + - name: Build and push template image for tagged commit uses: docker/build-push-action@v2 # https://github.com/docker/build-push-action with: context: './backend' From 3a527e69c840277411d5f24a3ecc3654b4cc72c7 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 16 Jun 2021 11:57:58 +0100 Subject: [PATCH 007/134] Get chatter between shard and core working --- backend/common/src/json/hash.rs | 6 ++++++ backend/shard/src/aggregator.rs | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/backend/common/src/json/hash.rs b/backend/common/src/json/hash.rs index 01fbe1e..ed7ba4a 100644 --- a/backend/common/src/json/hash.rs +++ b/backend/common/src/json/hash.rs @@ -18,6 +18,12 @@ impl From for crate::types::BlockHash { } } +impl From for Hash { + fn from(hash: crate::types::BlockHash) -> Self { + Hash(hash.0) + } +} + struct HashVisitor; impl<'de> Visitor<'de> for HashVisitor { diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs index 031a44d..ba19559 100644 --- a/backend/shard/src/aggregator.rs +++ b/backend/shard/src/aggregator.rs @@ -10,6 +10,7 @@ use common::util::{DenseMap}; use common::types::{ConnId, NodeDetails, NodeId, BlockHash}; use common::node::Payload; use common::shard::{ShardConnId, ShardMessage, BackendMessage}; +use common::json; use soketto::handshake::{Client, ServerResponse}; use crate::node::{NodeConnector, Initialize}; use tokio::net::TcpStream; @@ -138,7 +139,8 @@ impl Chain { pub async fn connect(&self, tx: UnboundedSender) -> anyhow::Result { let host = self.url.host().unwrap_or("127.0.0.1"); let port = self.url.port_u16().unwrap_or(8000); - let path = format!("{}{}", self.url.path(), self.genesis_hash); + let json_hash: json::Hash = self.genesis_hash.into(); + let path = format!("{}{}", self.url.path(), json_hash); let socket = TcpStream::connect((host, port)).await?; From 9741b0f910ff85a49b926d2d9c710768f79648a7 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 16 Jun 2021 14:58:17 +0100 Subject: [PATCH 008/134] Remove NodeConnector from core for now; only messages from shards until refactor --- backend/core/src/aggregator.rs | 11 -- backend/core/src/chain.rs | 9 - backend/core/src/feed/connector.rs | 2 + backend/core/src/{feed.rs => feed/mod.rs} | 0 backend/core/src/main.rs | 4 +- backend/core/src/node.rs | 2 - backend/core/src/node/connector.rs | 229 ---------------------- backend/core/src/shard.rs | 1 - backend/core/src/shard/mod.rs | 1 + backend/shard/src/aggregator.rs | 2 - 10 files changed, 6 insertions(+), 255 deletions(-) rename backend/core/src/{feed.rs => feed/mod.rs} (100%) delete mode 100644 backend/core/src/node/connector.rs delete mode 100644 backend/core/src/shard.rs create mode 100644 backend/core/src/shard/mod.rs diff --git a/backend/core/src/aggregator.rs b/backend/core/src/aggregator.rs index 8dd3d98..0c0b693 100644 --- a/backend/core/src/aggregator.rs +++ b/backend/core/src/aggregator.rs @@ -6,7 +6,6 @@ use crate::shard::connector::ShardConnector; use crate::chain::{self, Chain, ChainId, Label}; use crate::feed::connector::{Connected, FeedConnector, FeedId}; use crate::feed::{self, FeedMessageSerializer}; -use crate::node::connector::NodeConnector; use common::ws::MuteReason; use common::shard::ShardConnId; use common::types::{ConnId, NodeDetails, BlockHash}; @@ -131,13 +130,6 @@ pub struct AddNode { } pub enum NodeSource { - Direct { - /// Connection id used by the node connector for multiplexing parachains - conn_id: ConnId, - /// Address of the NodeConnector actor - node_connector: Addr, - }, - // TODO Shard { /// `ShardConnId` that identifies the node connection within a shard. sid: ShardConnId, @@ -202,9 +194,6 @@ pub struct GetHealth; impl NodeSource { pub fn mute(&self, reason: MuteReason) { match self { - NodeSource::Direct { node_connector, .. } => { - node_connector.do_send(reason); - }, // TODO NodeSource::Shard { shard_connector, .. } => { // shard_connector.do_send(Mute { reason }); diff --git a/backend/core/src/chain.rs b/backend/core/src/chain.rs index 6a0c4d7..c27393a 100644 --- a/backend/core/src/chain.rs +++ b/backend/core/src/chain.rs @@ -247,15 +247,6 @@ pub struct LocateNode { impl NodeSource { pub fn init(self, nid: NodeId, chain: Addr) -> bool { match self { - NodeSource::Direct { conn_id, node_connector } => { - node_connector - .try_send(crate::node::connector::Initialize { - nid, - conn_id, - chain, - }) - .is_ok() - }, NodeSource::Shard { sid, shard_connector } => { shard_connector .try_send(crate::shard::connector::Initialize { diff --git a/backend/core/src/feed/connector.rs b/backend/core/src/feed/connector.rs index 16e3a8e..8d7f984 100644 --- a/backend/core/src/feed/connector.rs +++ b/backend/core/src/feed/connector.rs @@ -78,6 +78,8 @@ impl FeedConnector { fn handle_cmd(&mut self, cmd: &str, payload: &str, ctx: &mut ::Context) { match cmd { "subscribe" => { + // Hash the chain label the frontend wants to subscribe to. + // If it's already subscribed to the same chain, nothing to do. match fnv(payload) { hash if hash == self.chain_label_hash => return, hash => self.chain_label_hash = hash, diff --git a/backend/core/src/feed.rs b/backend/core/src/feed/mod.rs similarity index 100% rename from backend/core/src/feed.rs rename to backend/core/src/feed/mod.rs diff --git a/backend/core/src/main.rs b/backend/core/src/main.rs index 958af9d..a0fb567 100644 --- a/backend/core/src/main.rs +++ b/backend/core/src/main.rs @@ -104,8 +104,10 @@ async fn feed_route( stream: web::Payload, aggregator: web::Data>, ) -> Result { + let aggregator = aggregator.get_ref().clone(); + ws::start( - FeedConnector::new(aggregator.get_ref().clone()), + FeedConnector::new(aggregator), &req, stream, ) diff --git a/backend/core/src/node.rs b/backend/core/src/node.rs index 666049e..613b5ab 100644 --- a/backend/core/src/node.rs +++ b/backend/core/src/node.rs @@ -7,8 +7,6 @@ use common::types::{ use common::util::now; use common::node::SystemInterval; -pub mod connector; - /// Minimum time between block below broadcasting updates to the browser gets throttled, in ms. const THROTTLE_THRESHOLD: u64 = 100; /// Minimum time of intervals for block updates sent to the browser when throttled, in ms. diff --git a/backend/core/src/node/connector.rs b/backend/core/src/node/connector.rs deleted file mode 100644 index 7015fcd..0000000 --- a/backend/core/src/node/connector.rs +++ /dev/null @@ -1,229 +0,0 @@ -use std::collections::BTreeMap; -use std::net::Ipv4Addr; -use std::time::{Duration, Instant}; - -use crate::aggregator::{AddNode, Aggregator, NodeSource}; -use crate::chain::{Chain, RemoveNode, UpdateNode}; -use crate::location::LocateRequest; -use crate::node::NodeId; -use actix::prelude::*; -use actix_web_actors::ws::{self, CloseReason}; -use bytes::Bytes; -use common::types::ConnId; -use common::ws::{MultipartHandler, WsMessage, MuteReason}; -use common::node::{NodeMessage, Payload}; - -/// How often heartbeat pings are sent -const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20); -/// How long before lack of client response causes a timeout -const CLIENT_TIMEOUT: Duration = Duration::from_secs(60); - -pub struct NodeConnector { - /// Multiplexing connections by id - multiplex: BTreeMap, - /// Client must send ping at least once every 60 seconds (CLIENT_TIMEOUT), - hb: Instant, - /// Aggregator actor address - aggregator: Addr, - /// IP address of the node this connector is responsible for - ip: Option, - /// Actix address of location services - locator: Recipient, - /// Helper for handling continuation messages - multipart: MultipartHandler, -} - -enum ConnMultiplex { - Connected { - /// Id of the node this multiplex connector is responsible for handling - nid: NodeId, - /// Chain address to which this multiplex connector is delegating messages - chain: Addr, - }, - Waiting { - /// Backlog of messages to be sent once we get a recipient handle to the chain - backlog: Vec, - }, -} - -impl Default for ConnMultiplex { - fn default() -> Self { - ConnMultiplex::Waiting { - backlog: Vec::new(), - } - } -} - -impl Actor for NodeConnector { - type Context = ws::WebsocketContext; - - fn started(&mut self, ctx: &mut Self::Context) { - self.heartbeat(ctx); - } - - fn stopped(&mut self, _: &mut Self::Context) { - for mx in self.multiplex.values() { - if let ConnMultiplex::Connected { chain, nid } = mx { - chain.do_send(RemoveNode(*nid)); - } - } - } -} - -impl NodeConnector { - pub fn new( - aggregator: Addr, - locator: Recipient, - ip: Option, - ) -> Self { - Self { - multiplex: BTreeMap::new(), - hb: Instant::now(), - aggregator, - ip, - locator, - multipart: MultipartHandler::default(), - } - } - - fn heartbeat(&self, ctx: &mut ::Context) { - ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| { - // check client heartbeats - if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT { - // stop actor - ctx.close(Some(CloseReason { - code: ws::CloseCode::Abnormal, - description: Some("Missed heartbeat".into()), - })); - ctx.stop(); - } - }); - } - - fn handle_message( - &mut self, - msg: NodeMessage, - ctx: &mut ::Context, - ) { - let conn_id = msg.id(); - let payload = msg.into(); - - match self.multiplex.entry(conn_id).or_default() { - ConnMultiplex::Connected { nid, chain } => { - chain.do_send(UpdateNode { - nid: *nid, - payload, - }); - } - ConnMultiplex::Waiting { backlog } => { - if let Payload::SystemConnected(connected) = payload { - self.aggregator.do_send(AddNode { - node: connected.node, - genesis_hash: connected.genesis_hash, - source: NodeSource::Direct { - conn_id, - node_connector: ctx.address(), - }, - }); - } else { - if backlog.len() >= 10 { - backlog.remove(0); - } - - backlog.push(payload); - } - } - } - } -} - -impl Handler for NodeConnector { - type Result = (); - fn handle(&mut self, msg: MuteReason, ctx: &mut Self::Context) { - log::debug!(target: "NodeConnector::Mute", "Muting a node. Reason: {:?}", msg); - - ctx.close(Some(msg.into())); - ctx.stop(); - } -} - -#[derive(Message)] -#[rtype(result = "()")] -pub struct Initialize { - pub nid: NodeId, - pub conn_id: ConnId, - pub chain: Addr, -} - -impl Handler for NodeConnector { - type Result = (); - - fn handle(&mut self, msg: Initialize, _: &mut Self::Context) { - let Initialize { - nid, - conn_id, - chain, - } = msg; - log::trace!(target: "NodeConnector::Initialize", "Initializing a node, nid={}, on conn_id={}", nid, conn_id); - let mx = self.multiplex.entry(conn_id).or_default(); - - if let ConnMultiplex::Waiting { backlog } = mx { - for payload in backlog.drain(..) { - chain.do_send(UpdateNode { - nid, - payload, - }); - } - - *mx = ConnMultiplex::Connected { - nid, - chain: chain.clone(), - }; - }; - - // Acquire the node's physical location - if let Some(ip) = self.ip { - let _ = self.locator.do_send(LocateRequest { ip, nid, chain }); - } - } -} - -impl StreamHandler> for NodeConnector { - fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { - self.hb = Instant::now(); - - let data = match msg.map(|msg| self.multipart.handle(msg)) { - Ok(WsMessage::Nop) => return, - Ok(WsMessage::Ping(msg)) => { - ctx.pong(&msg); - return; - } - Ok(WsMessage::Data(data)) => data, - Ok(WsMessage::Close(reason)) => { - ctx.close(reason); - ctx.stop(); - return; - } - Err(error) => { - log::error!("{:?}", error); - ctx.stop(); - return; - } - }; - - match serde_json::from_slice(&data) { - Ok(msg) => self.handle_message(msg, ctx), - #[cfg(debug)] - Err(err) => { - let data: &[u8] = data.get(..512).unwrap_or_else(|| &data); - log::warn!( - "Failed to parse node message: {} {}", - err, - std::str::from_utf8(data).unwrap_or_else(|_| "INVALID UTF8") - ) - } - #[cfg(not(debug))] - Err(_) => (), - } - } -} diff --git a/backend/core/src/shard.rs b/backend/core/src/shard.rs deleted file mode 100644 index 77bba25..0000000 --- a/backend/core/src/shard.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod connector; diff --git a/backend/core/src/shard/mod.rs b/backend/core/src/shard/mod.rs new file mode 100644 index 0000000..563893c --- /dev/null +++ b/backend/core/src/shard/mod.rs @@ -0,0 +1 @@ +pub mod connector; \ No newline at end of file diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs index ba19559..8a7cd42 100644 --- a/backend/shard/src/aggregator.rs +++ b/backend/shard/src/aggregator.rs @@ -100,8 +100,6 @@ impl Chain { payload, }; - println!("Serialize {:?}", msg); - let bytes = bincode::options().serialize(&msg).unwrap(); println!("Sending update: {} bytes", bytes.len()); From dfe016597ed1e42747555dc4441542421357004c Mon Sep 17 00:00:00 2001 From: James Wilson Date: Mon, 21 Jun 2021 10:45:31 +0100 Subject: [PATCH 009/134] Rework: Shard working, Telemetry Core needs logic filling in --- backend/Cargo.lock | 1543 +++++------------ backend/Cargo.toml | 2 +- backend/common/Cargo.toml | 4 - backend/common/src/internal_messages.rs | 36 + backend/common/src/json/hash.rs | 12 +- backend/common/src/json/node_message.rs | 4 +- backend/common/src/lib.rs | 6 +- backend/common/src/log_level.rs | 36 + backend/common/src/node.rs | 37 +- backend/common/src/shard.rs | 38 - backend/common/src/types.rs | 1 - backend/common/src/ws.rs | 98 -- backend/core/Cargo.toml | 25 - backend/core/src/aggregator.rs | 395 ----- backend/core/src/chain.rs | 567 ------ backend/core/src/feed/connector.rs | 219 --- backend/core/src/location.rs | 191 -- backend/core/src/main.rs | 161 -- backend/core/src/shard/connector.rs | 192 -- backend/core/src/shard/mod.rs | 1 - backend/shard/Cargo.toml | 28 +- backend/shard/src/aggregator.rs | 398 ++--- backend/shard/src/connection.rs | 166 ++ backend/shard/src/main.rs | 264 ++- backend/shard/src/node.rs | 203 --- backend/telemetry/Cargo.toml | 24 + backend/telemetry/src/aggregator.rs | 202 +++ .../mod.rs => telemetry/src/feed_message.rs} | 57 +- backend/telemetry/src/main.rs | 285 +++ backend/{core => telemetry}/src/node.rs | 0 30 files changed, 1595 insertions(+), 3600 deletions(-) create mode 100644 backend/common/src/internal_messages.rs create mode 100644 backend/common/src/log_level.rs delete mode 100644 backend/common/src/shard.rs delete mode 100644 backend/common/src/ws.rs delete mode 100644 backend/core/Cargo.toml delete mode 100644 backend/core/src/aggregator.rs delete mode 100644 backend/core/src/chain.rs delete mode 100644 backend/core/src/feed/connector.rs delete mode 100644 backend/core/src/location.rs delete mode 100644 backend/core/src/main.rs delete mode 100644 backend/core/src/shard/connector.rs delete mode 100644 backend/core/src/shard/mod.rs create mode 100644 backend/shard/src/connection.rs delete mode 100644 backend/shard/src/node.rs create mode 100644 backend/telemetry/Cargo.toml create mode 100644 backend/telemetry/src/aggregator.rs rename backend/{core/src/feed/mod.rs => telemetry/src/feed_message.rs} (85%) create mode 100644 backend/telemetry/src/main.rs rename backend/{core => telemetry}/src/node.rs (100%) diff --git a/backend/Cargo.lock b/backend/Cargo.lock index 4d1fffd..4329710 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -1,295 +1,25 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 - [[package]] -name = "actix" -version = "0.11.1" +name = "ansi_term" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "543c47e7827f8fcc9d1445bd98ba402137bfce80ee2187429de49c52b5131bd3" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" dependencies = [ - "actix-rt", - "actix_derive", - "bitflags", - "bytes 1.0.1", - "crossbeam-channel", - "futures-core", - "futures-sink", - "futures-task", - "futures-util", - "log", - "once_cell", - "parking_lot", - "pin-project-lite 0.2.6", - "smallvec", - "tokio", - "tokio-util", -] - -[[package]] -name = "actix-codec" -version = "0.4.0-beta.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90673465c6187bd0829116b02be465dc0195a74d7719f76ffff0effef934a92e" -dependencies = [ - "bitflags", - "bytes 1.0.1", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.6", - "tokio", - "tokio-util", -] - -[[package]] -name = "actix-http" -version = "3.0.0-beta.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a01f9e0681608afa887d4269a0857ac4226f09ba5ceda25939e8391c9da610a" -dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-tls", - "actix-utils", - "ahash", - "base64 0.13.0", - "bitflags", - "bytes 1.0.1", - "bytestring", - "cfg-if 1.0.0", - "derive_more", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "httparse", - "itoa", - "language-tags", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project", - "rand 0.8.3", - "regex", - "serde", - "serde_json", - "serde_urlencoded", - "sha-1", - "smallvec", - "time 0.2.26", - "tokio", -] - -[[package]] -name = "actix-macros" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbcb2b608f0accc2f5bcf3dd872194ce13d94ee45b571487035864cf966b04ef" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "actix-router" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad299af73649e1fc893e333ccf86f377751eb95ff875d095131574c6f43452c" -dependencies = [ - "bytestring", - "http", - "log", - "regex", - "serde", -] - -[[package]] -name = "actix-rt" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b4e57bc1a3915e71526d128baf4323700bd1580bc676239e2298a4c5b001f18" -dependencies = [ - "actix-macros", - "futures-core", - "tokio", -] - -[[package]] -name = "actix-server" -version = "2.0.0-beta.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a99198727204a48f82559c18e4b0ba3197b97d5f4576a32bdbef371f3b4599c1" -dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-utils", - "futures-core", - "log", - "mio", - "num_cpus", - "slab", - "tokio", -] - -[[package]] -name = "actix-service" -version = "2.0.0-beta.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf82340ad9f4e4caf43737fd3bbc999778a268015cdc54675f60af6240bd2b05" -dependencies = [ - "futures-core", - "pin-project-lite 0.2.6", -] - -[[package]] -name = "actix-tls" -version = "3.0.0-beta.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b1455e3f7a26d40cfc1080b571f41e8165e5a88e937ed579f7a4b3d55b0370" -dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-utils", - "derive_more", - "futures-core", - "http", - "log", - "tokio-util", -] - -[[package]] -name = "actix-utils" -version = "3.0.0-beta.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458795e09a29bc5557604f9ff6f32236fd0ee457d631672e4ec8f6a0103bb292" -dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.6", -] - -[[package]] -name = "actix-web" -version = "4.0.0-beta.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d95e50c9e32e8456220b5804867de76e97a86ab8c38b51c9edcccc0f0fddca7" -dependencies = [ - "actix-codec", - "actix-http", - "actix-macros", - "actix-router", - "actix-rt", - "actix-server", - "actix-service", - "actix-utils", - "actix-web-codegen", - "ahash", - "awc", - "bytes 1.0.1", - "derive_more", - "either", - "encoding_rs", - "futures-core", - "futures-util", - "log", - "mime", - "pin-project", - "regex", - "serde", - "serde_json", - "serde_urlencoded", - "smallvec", - "socket2", - "time 0.2.26", - "url", -] - -[[package]] -name = "actix-web-actors" -version = "4.0.0-beta.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd978e384657c95bc7391b68de75d970b2789350ca384dc1fdb30f2473c74da" -dependencies = [ - "actix", - "actix-codec", - "actix-http", - "actix-web", - "bytes 1.0.1", - "bytestring", - "futures-core", - "pin-project", - "tokio", -] - -[[package]] -name = "actix-web-codegen" -version = "0.5.0-beta.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f138ac357a674c3b480ddb7bbd894b13c1b6e8927d728bc9ea5e17eee2f8fc9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "actix_derive" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d44b8fee1ced9671ba043476deddef739dd0959bf77030b26b738cc591737a7" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "ahash" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f200cbb1e856866d9eade941cf3aa0c5d7dd36f74311c4273b494f4ef036957" -dependencies = [ - "getrandom 0.2.2", - "once_cell", - "version_check", -] - -[[package]] -name = "aho-corasick" -version = "0.7.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b476ce7103678b0c6d3d395dbbae31d48ff910bd28be979ba5d48c6351131d0d" -dependencies = [ - "memchr", + "winapi", ] [[package]] name = "anyhow" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b2cd92db5cbd74e8e5028f7e27dd7aa3090e89e4f2a197cc7c8dfb69c7063b" - -[[package]] -name = "arc-swap" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" +checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61" [[package]] name = "arrayvec" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a2f58b0bb10c380af2b26e57212856b8c9a59e0925b4c20f4a174a49734eaf7" +checksum = "be4dc07131ffa69b8072d35f5007352af944213cde02545e2103680baed38fcd" [[package]] name = "atty" @@ -308,44 +38,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" -[[package]] -name = "awc" -version = "3.0.0-beta.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09aecd8728f6491a62b27454ea4b36fb7e50faf32928b0369b644e402c651f4e" -dependencies = [ - "actix-codec", - "actix-http", - "actix-rt", - "actix-service", - "base64 0.13.0", - "bytes 1.0.1", - "cfg-if 1.0.0", - "derive_more", - "futures-core", - "itoa", - "log", - "mime", - "percent-encoding", - "pin-project-lite 0.2.6", - "rand 0.8.3", - "serde", - "serde_json", - "serde_urlencoded", -] - -[[package]] -name = "base-x" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b20b618342cf9891c292c4f5ac2cde7287cc5c87e87e9c769d617793607dec1" - -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -369,9 +61,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bitvec" -version = "0.20.2" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f682656975d3a682daff957be4ddeb65d6ad656737cd821f2d00685ae466af1" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" dependencies = [ "funty", "radium", @@ -389,10 +81,14 @@ dependencies = [ ] [[package]] -name = "bumpalo" -version = "3.4.0" +name = "buf_redux" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +dependencies = [ + "memchr", + "safemem", +] [[package]] name = "byte-slice-cast" @@ -402,15 +98,9 @@ checksum = "65c1bf4a04a88c54f589125563643d773f3254b5c38571395e2b591c693bbc81" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" - -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" @@ -418,27 +108,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" -[[package]] -name = "bytestring" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90706ba19e97b90786e19dc0d5e2abd80008d99d4c0c5d1ad0b5e72cec7c494d" -dependencies = [ - "bytes 1.0.1", -] - -[[package]] -name = "cc" -version = "1.0.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -454,52 +123,25 @@ dependencies = [ "libc", "num-integer", "num-traits", - "serde", - "time 0.1.44", + "time", "winapi", ] [[package]] name = "clap" -version = "3.0.0-beta.2" +version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bd1061998a501ee7d4b6d449020df3266ca3124b941ec56cf2005c3779ca142" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ + "ansi_term", "atty", "bitflags", - "clap_derive", - "indexmap", - "lazy_static", - "os_str_bytes", "strsim", - "termcolor", "textwrap", "unicode-width", "vec_map", ] -[[package]] -name = "clap_derive" -version = "3.0.0-beta.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370f715b81112975b1b69db93e0b56ea4cd4e5002ac43b2da8474106a54096a1" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "cloudabi" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" -dependencies = [ - "bitflags", -] - [[package]] name = "colored" version = "1.9.3" @@ -515,12 +157,8 @@ dependencies = [ name = "common" version = "0.1.0" dependencies = [ - "actix", - "actix-http", - "actix-web", - "actix-web-actors", "bincode", - "bytes 1.0.1", + "bytes", "fnv", "hex", "log", @@ -533,81 +171,20 @@ dependencies = [ ] [[package]] -name = "const_fn" -version = "0.4.2" +name = "cpufeatures" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce90df4c658c62f12d78f7508cf92f9173e5184a539c10bfe54a3107b3ffd0f2" - -[[package]] -name = "core-foundation" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8" dependencies = [ - "core-foundation-sys", "libc", ] -[[package]] -name = "core-foundation-sys" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" - -[[package]] -name = "cpuid-bool" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" - -[[package]] -name = "crossbeam-channel" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" -dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "lazy_static", -] - [[package]] name = "crunchy" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "ctor" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e98e2ad1a782e33928b96fc3948e7c355e5af34ba4de7670fe8bac2a3b2006d" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "derive_more" -version = "0.99.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "digest" version = "0.9.0" @@ -617,27 +194,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "encoding_rs" -version = "0.8.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a51b8cf747471cb9499b6d59e59b0444f4c90eba8968c4e44874e92b5b64ace2" -dependencies = [ - "cfg-if 0.1.10", -] - [[package]] name = "fixed-hash" version = "0.7.0" @@ -645,7 +201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand 0.8.3", + "rand 0.8.4", "rustc-hex", "static_assertions", ] @@ -656,21 +212,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.0.1" @@ -689,9 +230,9 @@ checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" [[package]] name = "futures" -version = "0.3.7" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95314d38584ffbfda215621d723e0a3906f032e03ae5551e650058dac83d4797" +checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" dependencies = [ "futures-channel", "futures-core", @@ -704,9 +245,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.7" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0448174b01148032eed37ac4aed28963aaaa8cfa93569a08e5b479bbc6c2c151" +checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" dependencies = [ "futures-core", "futures-sink", @@ -714,15 +255,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.7" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18eaa56102984bed2c88ea39026cff3ce3b4c7f508ca970cedf2450ea10d4e46" +checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" [[package]] name = "futures-executor" -version = "0.3.7" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5f8e0c9258abaea85e78ebdda17ef9666d390e987f006be6080dfe354b708cb" +checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" dependencies = [ "futures-core", "futures-task", @@ -731,16 +272,17 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" +checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" [[package]] name = "futures-macro" -version = "0.3.7" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36fccf3fc58563b4a14d265027c627c3b665d7fed489427e88e7cc929559efe" +checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" dependencies = [ + "autocfg", "proc-macro-hack", "proc-macro2", "quote", @@ -749,25 +291,23 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.7" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e3ca3f17d6e8804ae5d3df7a7d35b2b3a6fe89dac84b31872720fc3060a0b11" +checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" [[package]] name = "futures-task" -version = "0.3.7" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d502af37186c4fef99453df03e374683f8a1eec9dcc1e66b3b82dc8278ce3c" -dependencies = [ - "once_cell", -] +checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" [[package]] name = "futures-util" -version = "0.3.7" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abcb44342f62e6f3e8ac427b8aa815f724fd705dfad060b18ac7866c15bb8e34" +checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" dependencies = [ + "autocfg", "futures-channel", "futures-core", "futures-io", @@ -775,7 +315,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project", + "pin-project-lite", "pin-utils", "proc-macro-hack", "proc-macro-nested", @@ -794,33 +334,33 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.10.2+wasi-snapshot-preview1", ] [[package]] name = "h2" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" +checksum = "825343c4eef0b63f541f8903f395dc5beb362a979b5799a84062527ef1e37726" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "futures-core", "futures-sink", @@ -840,19 +380,44 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" [[package]] -name = "heck" -version = "0.3.1" +name = "headers" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "f0b7591fb62902706ae8e7aaff416b1b0fa2c0fd0878b46dc13baa3712d8a855" +dependencies = [ + "base64", + "bitflags", + "bytes", + "headers-core", + "http", + "mime", + "sha-1", + "time", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http", +] + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" dependencies = [ "libc", ] @@ -865,45 +430,45 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] [[package]] name = "http-body" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" +checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" dependencies = [ - "bytes 1.0.1", + "bytes", "http", - "pin-project-lite 0.2.6", + "pin-project-lite", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" [[package]] name = "httpdate" -version = "0.3.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" [[package]] name = "hyper" -version = "0.14.4" +version = "0.14.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e946c2b1349055e0b72ae281b238baf1a3ea7307c7e9f9d64673bdd9c26ac7" +checksum = "07d6baa1b441335f3ce5098ac421fb6547c46dda735ca1bc6d0153c838f9dd83" dependencies = [ - "bytes 1.0.1", + "bytes", "futures-channel", "futures-core", "futures-util", @@ -913,7 +478,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project", + "pin-project-lite", "socket2", "tokio", "tower-service", @@ -921,24 +486,11 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes 1.0.1", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "idna" -version = "0.2.0" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" dependencies = [ "matches", "unicode-bidi", @@ -965,49 +517,37 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.6.0" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" +checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" dependencies = [ "autocfg", "hashbrown", ] [[package]] -name = "instant" -version = "0.1.8" +name = "input_buffer" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb1fc4429a33e1f80d41dc9fea4d108a88bec1de8053878898ae448a0b52f613" +checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" dependencies = [ - "cfg-if 1.0.0", + "bytes", ] [[package]] -name = "ipnet" -version = "2.3.0" +name = "instant" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +dependencies = [ + "cfg-if", +] [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" - -[[package]] -name = "js-sys" -version = "0.3.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc15e39392125075f60c95ba416f5381ff6c3a948ff02ab12464715adf56c821" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "language-tags" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "lazy_static" @@ -1017,26 +557,26 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.91" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8916b1f6ca17130ec6568feccee27c156ad12037880833a3b842a823236502e7" +checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" [[package]] name = "lock_api" -version = "0.4.1" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" +checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" dependencies = [ "scopeguard", ] [[package]] name = "log" -version = "0.4.11" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -1047,9 +587,9 @@ checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "memchr" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" [[package]] name = "mime" @@ -1058,10 +598,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" [[package]] -name = "mio" -version = "0.7.11" +name = "mime_guess" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" +checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "mio" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" dependencies = [ "libc", "log", @@ -1080,21 +630,21 @@ dependencies = [ ] [[package]] -name = "native-tls" -version = "0.2.7" +name = "multipart" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" +checksum = "d050aeedc89243f5347c3e237e3e13dc76fbe4ae3742a57b94dc14f69acf76d4" dependencies = [ - "lazy_static", - "libc", + "buf_redux", + "httparse", "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", + "mime", + "mime_guess", + "quick-error", + "rand 0.7.3", + "safemem", "tempfile", + "twoway", ] [[package]] @@ -1108,9 +658,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ "autocfg", "num-traits", @@ -1118,9 +668,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", ] @@ -1137,9 +687,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "opaque-debug" @@ -1147,50 +697,11 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl" -version = "0.10.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" -dependencies = [ - "bitflags", - "cfg-if 0.1.10", - "foreign-types", - "lazy_static", - "libc", - "openssl-sys", -] - -[[package]] -name = "openssl-probe" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" - -[[package]] -name = "openssl-sys" -version = "0.9.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" -dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "os_str_bytes" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac6fe3538f701e339953a3ebbe4f39941aababa8a3f6964635b24ab526daeac" - [[package]] name = "parity-scale-codec" -version = "2.1.1" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f518afaa5a47d0d6386229b0a6e01e86427291d643aa4cabb4992219f504f8" +checksum = "b310f220c335f9df1b3d2e9fbe3890bbfeef5030dad771620f48c5c229877cd3" dependencies = [ "arrayvec", "bitvec", @@ -1200,9 +711,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" dependencies = [ "instant", "lock_api", @@ -1211,12 +722,11 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ - "cfg-if 0.1.10", - "cloudabi", + "cfg-if", "instant", "libc", "redox_syscall", @@ -1232,30 +742,24 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "1.0.1" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee41d838744f60d959d7074e3afb6b35c7456d0f61cad38a24e35e6553f73841" +checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.1" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a4ffa594b66bff340084d4081df649a7dc049ac8d7fc458d8e628bfbbb2f86" +checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "pin-project-lite" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" - [[package]] name = "pin-project-lite" version = "0.2.6" @@ -1268,17 +772,11 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkg-config" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" - [[package]] name = "ppv-lite86" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "primitive-types" @@ -1318,9 +816,9 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.18" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99c605b9a0adc77b7211c6b1f722dcb613d68d66859a44f3d485a6da332b0598" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" @@ -1330,13 +828,19 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" dependencies = [ "unicode-xid", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.9" @@ -1358,7 +862,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", @@ -1367,14 +871,14 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.2", - "rand_hc 0.3.0", + "rand_chacha 0.3.1", + "rand_core 0.6.3", + "rand_hc 0.3.1", ] [[package]] @@ -1389,12 +893,12 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] @@ -1403,16 +907,16 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.15", + "getrandom 0.1.16", ] [[package]] name = "rand_core" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.2", + "getrandom 0.2.3", ] [[package]] @@ -1426,37 +930,22 @@ dependencies = [ [[package]] name = "rand_hc" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" dependencies = [ - "rand_core 0.6.2", + "rand_core 0.6.3", ] [[package]] name = "redox_syscall" -version = "0.1.57" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - -[[package]] -name = "regex" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8963b85b8ce3074fecffde43b4b0dded83ce2f367dc8d363afc56679f3ee820b" +checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", - "thread_local", + "bitflags", ] -[[package]] -name = "regex-syntax" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cab7a364d15cde1e505267766a2d3c4e22a843e1a601f0fa7564c0f82ced11c" - [[package]] name = "remove_dir_all" version = "0.5.3" @@ -1466,41 +955,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "reqwest" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf12057f289428dbf5c591c74bf10392e4a8003f993405a902f20117019022d4" -dependencies = [ - "base64 0.13.0", - "bytes 1.0.1", - "encoding_rs", - "futures-core", - "futures-util", - "http", - "http-body", - "hyper", - "hyper-tls", - "ipnet", - "js-sys", - "lazy_static", - "log", - "mime", - "native-tls", - "percent-encoding", - "pin-project-lite 0.2.6", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-native-tls", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - [[package]] name = "rustc-hash" version = "1.1.0" @@ -1513,15 +967,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - [[package]] name = "ryu" version = "1.0.5" @@ -1529,14 +974,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" [[package]] -name = "schannel" -version = "0.1.19" +name = "safemem" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = [ - "lazy_static", - "winapi", -] +checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" + +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" [[package]] name = "scopeguard" @@ -1544,58 +991,20 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "security-framework" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d493c5f39e02dfb062cd8f33301f90f9b13b650e8c1b1d0fd75c19dd64bff69d" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee48cdde5ed250b0d3252818f646e174ab414036edb884dde62d80a3ac6082d" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" -version = "1.0.117" +version = "1.0.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" +checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.117" +version = "1.0.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" +checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" dependencies = [ "proc-macro2", "quote", @@ -1604,9 +1013,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.59" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" dependencies = [ "itoa", "ryu", @@ -1627,55 +1036,46 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.9.1" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "170a36ea86c864a3f16dd2687712dd6646f7019f301e57537c7f4dc9f5916770" +checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16" dependencies = [ "block-buffer", - "cfg-if 0.1.10", - "cpuid-bool", + "cfg-if", + "cpufeatures", "digest", "opaque-debug", ] -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - [[package]] name = "shard" version = "0.1.0" dependencies = [ - "actix", - "actix-http", - "actix-web", - "actix-web-actors", "anyhow", "bincode", - "bytes 1.0.1", - "clap", "common", + "futures", + "hex", + "http", "log", "primitive-types", - "rustc-hash", "serde", "serde_json", "simple_logger", "soketto", + "structopt", + "thiserror", "tokio", - "tokio-stream", "tokio-util", + "warp", ] [[package]] name = "signal-hook-registry" -version = "1.2.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e12110bc539e657a646068aaf5eb5b63af9d0c1f7b29c97113fad80e15f035" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" dependencies = [ - "arc-swap", "libc", ] @@ -1694,9 +1094,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" [[package]] name = "smallvec" @@ -1706,105 +1106,70 @@ checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" [[package]] name = "socket2" -version = "0.3.19" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" dependencies = [ - "cfg-if 1.0.0", "libc", "winapi", ] [[package]] name = "soketto" -version = "0.4.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" +checksum = "a74e48087dbeed4833785c2f3352b59140095dc192dce966a3bfc155020a439f" dependencies = [ - "base64 0.12.3", - "bytes 0.5.6", + "base64", + "bytes", "futures", "httparse", "log", - "rand 0.7.3", + "rand 0.8.4", "sha-1", ] -[[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - [[package]] name = "strsim" -version = "0.10.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "structopt" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" +dependencies = [ + "clap", + "lazy_static", + "structopt-derive", +] + +[[package]] +name = "structopt-derive" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] [[package]] name = "syn" -version = "1.0.65" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1d708c221c5a612956ef9f75b37e454e88d1f7b899fbd3a18d4252012d663" +checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" dependencies = [ "proc-macro2", "quote", @@ -1819,158 +1184,110 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "telemetry" -version = "0.3.0" +version = "0.1.0" dependencies = [ - "actix", - "actix-http", - "actix-web", - "actix-web-actors", + "anyhow", "bincode", - "bytes 1.0.1", - "chrono", - "clap", "common", - "ctor", + "futures", + "hex", + "http", "log", - "parking_lot", - "reqwest", - "rustc-hash", + "primitive-types", "serde", "serde_json", "simple_logger", + "soketto", + "structopt", + "thiserror", + "tokio", + "tokio-util", + "warp", ] [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", "libc", - "rand 0.7.3", + "rand 0.8.4", "redox_syscall", "remove_dir_all", "winapi", ] -[[package]] -name = "termcolor" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" -dependencies = [ - "winapi-util", -] - [[package]] name = "textwrap" -version = "0.12.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "203008d98caf094106cfaba70acfed15e18ed3ddb7d94e49baec153a2b462789" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ "unicode-width", ] [[package]] name = "thiserror" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" +checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" +checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "thread_local" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -dependencies = [ - "lazy_static", -] - [[package]] name = "time" -version = "0.1.44" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" dependencies = [ "libc", - "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] -[[package]] -name = "time" -version = "0.2.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi", -] - -[[package]] -name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros-impl" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - [[package]] name = "tinyvec" -version = "0.3.4" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117" +checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.4.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" +checksum = "c79ba603c337335df6ba6dd6afc38c38a7d5e1b0c871678439ea973cd62a118e" dependencies = [ "autocfg", - "bytes 1.0.1", + "bytes", "libc", "memchr", "mio", "num_cpus", "once_cell", "parking_lot", - "pin-project-lite 0.2.6", + "pin-project-lite", "signal-hook-registry", "tokio-macros", "winapi", @@ -1987,16 +1304,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" -dependencies = [ - "native-tls", - "tokio", -] - [[package]] name = "tokio-stream" version = "0.1.6" @@ -2004,22 +1311,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8864d706fdb3cc0843a49647ac892720dac98a6eeb818b77190592cf4994066" dependencies = [ "futures-core", - "pin-project-lite 0.2.6", + "pin-project-lite", "tokio", ] [[package]] -name = "tokio-util" -version = "0.6.5" +name = "tokio-tungstenite" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" +checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" dependencies = [ - "bytes 1.0.1", + "futures-util", + "log", + "pin-project", + "tokio", + "tungstenite", +] + +[[package]] +name = "tokio-util" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +dependencies = [ + "bytes", "futures-core", "futures-io", "futures-sink", "log", - "pin-project-lite 0.2.6", + "pin-project-lite", "tokio", ] @@ -2031,20 +1351,21 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.21" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" +checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" dependencies = [ - "cfg-if 0.1.10", - "pin-project-lite 0.1.11", + "cfg-if", + "log", + "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" dependencies = [ "lazy_static", ] @@ -2056,10 +1377,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] -name = "typenum" -version = "1.12.0" +name = "tungstenite" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +dependencies = [ + "base64", + "byteorder", + "bytes", + "http", + "httparse", + "input_buffer", + "log", + "rand 0.8.4", + "sha-1", + "url", + "utf-8", +] + +[[package]] +name = "twoway" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +dependencies = [ + "memchr", +] + +[[package]] +name = "typenum" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" [[package]] name = "uint" @@ -2074,28 +1423,37 @@ dependencies = [ ] [[package]] -name = "unicode-bidi" -version = "0.3.4" +name = "unicase" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" dependencies = [ "matches", ] [[package]] name = "unicode-normalization" -version = "0.1.13" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" [[package]] name = "unicode-width" @@ -2105,15 +1463,15 @@ checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" [[package]] name = "unicode-xid" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "url" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" +checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" dependencies = [ "form_urlencoded", "idna", @@ -2122,10 +1480,10 @@ dependencies = [ ] [[package]] -name = "vcpkg" -version = "0.2.10" +name = "utf-8" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "vec_map" @@ -2135,9 +1493,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "want" @@ -2149,6 +1507,35 @@ dependencies = [ "try-lock", ] +[[package]] +name = "warp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332d47745e9a0c38636dbd454729b147d16bd1ed08ae67b3ab281c4506771054" +dependencies = [ + "bytes", + "futures", + "headers", + "http", + "hyper", + "log", + "mime", + "mime_guess", + "multipart", + "percent-encoding", + "pin-project", + "scoped-tls", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-stream", + "tokio-tungstenite", + "tokio-util", + "tower-service", + "tracing", +] + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" @@ -2157,87 +1544,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - -[[package]] -name = "wasm-bindgen" -version = "0.2.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe8f61dba8e5d645a4d8132dc7a0a66861ed5e1045d2c0ed940fab33bac0fbe" -dependencies = [ - "cfg-if 1.0.0", - "serde", - "serde_json", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ceba58ff062da072c7cb4ba5b22a37f00a302483f7e2a6cdc18fedbdc1fd3" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73157efb9af26fb564bb59a009afd1c7c334a44db171d280690d0c3faaec3468" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9aa01d36cda046f797c57959ff5f3c615c9cc63997a8d545831ec7976819b" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96eb45c1b2ee33545a813a92dbb53856418bf7eb54ab34f7f7ff1448a5b3735d" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7148f4696fb4960a346eaa60bbfb42a1ac4ebba21f750f75fc1375b098d5ffa" - -[[package]] -name = "web-sys" -version = "0.3.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fe19d70f5dacc03f6e46777213facae5ac3801575d56ca6cbd4c93dcd12310" -dependencies = [ - "js-sys", - "wasm-bindgen", -] +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "winapi" @@ -2255,30 +1564,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -[[package]] -name = "winreg" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" -dependencies = [ - "winapi", -] - [[package]] name = "wyz" version = "0.2.0" diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 569abc9..167ab3d 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -1,7 +1,7 @@ [workspace] members = [ "common", - "core", + "telemetry", "shard" ] diff --git a/backend/common/Cargo.toml b/backend/common/Cargo.toml index 3bd9911..e873306 100644 --- a/backend/common/Cargo.toml +++ b/backend/common/Cargo.toml @@ -5,10 +5,6 @@ authors = ["Parity Technologies Ltd. "] edition = "2018" [dependencies] -actix = "0.11.1" -actix-web = { version = "4.0.0-beta.4", default-features = false } -actix-web-actors = "4.0.0-beta.3" -actix-http = "3.0.0-beta.4" bytes = "1.0.1" fnv = "1.0.7" hex = "0.4.3" diff --git a/backend/common/src/internal_messages.rs b/backend/common/src/internal_messages.rs new file mode 100644 index 0000000..593ea9b --- /dev/null +++ b/backend/common/src/internal_messages.rs @@ -0,0 +1,36 @@ +use std::net::IpAddr; + +use crate::node::Payload; +use crate::types::{NodeDetails}; +use serde::{Deserialize, Serialize}; + +/// The shard-local ID of a given node, where a single connection +/// might send data on behalf of more than one chain. +pub type LocalId = u64; + +/// A global ID assigned to messages from each different pair of ConnId+LocalId. +pub type GlobalId = u64; + +/// Message sent from the shard to the backend core +#[derive(Deserialize, Serialize, Debug, Clone)] +pub enum FromShardAggregator { + /// Get information about a new node, passing IPv4 + AddNode { + ip: Option, + node: NodeDetails, + local_id: LocalId, + }, + /// Send a message payload to update details for a node + UpdateNode { + local_id: LocalId, + payload: Payload, + }, +} + +/// Message sent form the backend core to the shard +#[derive(Deserialize, Serialize, Debug, Clone)] +pub enum FromTelemetryCore { + Mute { + local_id: LocalId + } +} diff --git a/backend/common/src/json/hash.rs b/backend/common/src/json/hash.rs index ed7ba4a..76aff5d 100644 --- a/backend/common/src/json/hash.rs +++ b/backend/common/src/json/hash.rs @@ -1,7 +1,5 @@ use std::fmt::{self, Debug, Display}; use std::str::FromStr; - -use actix_web::error::ResponseError; use serde::ser::{Serialize, Serializer}; use serde::de::{self, Deserialize, Deserializer, Unexpected, Visitor, SeqAccess}; @@ -140,18 +138,12 @@ impl Debug for Hash { #[derive(thiserror::Error, Debug)] pub enum HashParseError { + #[error("Error parsing string into hex: {0}")] HexError(hex::FromHexError), + #[error("Invalid hex prefix: expected '0x'")] InvalidPrefix, } -impl Display for HashParseError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(self, f) - } -} - -impl ResponseError for HashParseError {} - #[cfg(test)] mod tests { use super::Hash; diff --git a/backend/common/src/json/node_message.rs b/backend/common/src/json/node_message.rs index a8fce2c..af93eda 100644 --- a/backend/common/src/json/node_message.rs +++ b/backend/common/src/json/node_message.rs @@ -26,7 +26,7 @@ pub enum NodeMessage { payload: Payload, }, V2 { - id: ConnId, + id: NodeMessageId, payload: Payload, }, } @@ -127,7 +127,7 @@ pub struct NodeDetails { pub startup_time: Option>, } -type ConnId = u64; +type NodeMessageId = u64; type BlockNumber = u64; #[cfg(test)] diff --git a/backend/common/src/lib.rs b/backend/common/src/lib.rs index a18980d..78c887a 100644 --- a/backend/common/src/lib.rs +++ b/backend/common/src/lib.rs @@ -1,6 +1,6 @@ pub mod node; -pub mod shard; +pub mod internal_messages; pub mod types; pub mod util; -pub mod ws; -pub mod json; \ No newline at end of file +pub mod json; +pub mod log_level; \ No newline at end of file diff --git a/backend/common/src/log_level.rs b/backend/common/src/log_level.rs new file mode 100644 index 0000000..1de2f05 --- /dev/null +++ b/backend/common/src/log_level.rs @@ -0,0 +1,36 @@ +/// A log level that can be used as an expected argument, +/// decoded from a string, and converted into a [`log::LevelFilter`] +#[derive(Debug, PartialEq)] +pub enum LogLevel { + Error, + Warn, + Info, + Debug, + Trace, +} + +impl std::str::FromStr for LogLevel { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + "error" => Ok(LogLevel::Error), + "warn" => Ok(LogLevel::Warn), + "info" => Ok(LogLevel::Info), + "debug" => Ok(LogLevel::Debug), + "trace" => Ok(LogLevel::Trace), + _ => Err("expected 'error', 'warn', 'info', 'debug' or 'trace'") + } + } +} + +impl From<&LogLevel> for log::LevelFilter { + fn from(log_level: &LogLevel) -> Self { + match log_level { + LogLevel::Error => log::LevelFilter::Error, + LogLevel::Warn => log::LevelFilter::Warn, + LogLevel::Info => log::LevelFilter::Info, + LogLevel::Debug => log::LevelFilter::Debug, + LogLevel::Trace => log::LevelFilter::Trace, + } + } +} \ No newline at end of file diff --git a/backend/common/src/node.rs b/backend/common/src/node.rs index bbc0214..1eff573 100644 --- a/backend/common/src/node.rs +++ b/backend/common/src/node.rs @@ -1,36 +1,41 @@ -use crate::types::{Block, BlockHash, BlockNumber, ConnId, NodeDetails}; +use crate::types::{Block, BlockHash, BlockNumber, NodeDetails}; use crate::json; - -use actix::prelude::*; use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Debug, Message)] -#[rtype(result = "()")] +pub type NodeMessageId = u64; + +#[derive(Serialize, Deserialize, Debug)] pub enum NodeMessage { V1 { payload: Payload, }, V2 { - id: ConnId, + id: NodeMessageId, payload: Payload, }, } impl NodeMessage { - /// Returns the connection ID or 0 if there is no ID. - pub fn id(&self) -> ConnId { + /// Returns the ID associated with the node message, or 0 + /// if the message has no ID. + pub fn id(&self) -> NodeMessageId { match self { NodeMessage::V1 { .. } => 0, NodeMessage::V2 { id, .. } => *id, } } + /// Return the payload associated with the message. + pub fn into_payload(self) -> Payload { + match self { + NodeMessage::V1 { payload, .. } | + NodeMessage::V2 { payload, .. } => payload, + } + } } impl From for Payload { fn from(msg: NodeMessage) -> Payload { - match msg { - NodeMessage::V1 { payload, .. } | NodeMessage::V2 { payload, .. } => payload, - } + msg.into_payload() } } @@ -47,7 +52,7 @@ impl From for NodeMessage { } } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub enum Payload { SystemConnected(SystemConnected), SystemInterval(SystemInterval), @@ -110,7 +115,7 @@ impl From for Payload { } } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct SystemConnected { pub genesis_hash: BlockHash, pub node: NodeDetails, @@ -125,7 +130,7 @@ impl From for SystemConnected { } } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct SystemInterval { pub peers: Option, pub txcount: Option, @@ -152,7 +157,7 @@ impl From for SystemInterval { } } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct Finalized { pub hash: BlockHash, pub height: Box, @@ -199,7 +204,7 @@ impl From for AfgReceived { } } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct AfgAuthoritySet { pub authority_id: Box, pub authorities: Box, diff --git a/backend/common/src/shard.rs b/backend/common/src/shard.rs deleted file mode 100644 index 125a826..0000000 --- a/backend/common/src/shard.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::net::Ipv4Addr; - -use crate::ws::MuteReason; -use crate::node::Payload; -use crate::types::{NodeId, NodeDetails}; -use serde::{Deserialize, Serialize}; - -/// Alias for the ID of the node connection -pub type ShardConnId = u32; - -/// Message sent from the shard to the backend core -#[derive(Deserialize, Serialize, Debug)] -pub enum ShardMessage { - /// Get a connection id for a new node, passing IPv4 - AddNode { - ip: Option, - node: NodeDetails, - sid: ShardConnId, - }, - /// Send a message payload for a given node - UpdateNode { - nid: NodeId, - payload: Payload, - }, -} - -/// Message sent form the backend core to the shard -#[derive(Deserialize, Serialize, Debug)] -pub enum BackendMessage { - Initialize { - sid: ShardConnId, - nid: NodeId, - }, - Mute { - sid: ShardConnId, - reason: MuteReason, - }, -} diff --git a/backend/common/src/types.rs b/backend/common/src/types.rs index aa85996..bdd25ae 100644 --- a/backend/common/src/types.rs +++ b/backend/common/src/types.rs @@ -5,7 +5,6 @@ use crate::util::{now, MeanList}; use crate::json; pub type NodeId = usize; -pub type ConnId = u64; pub type BlockNumber = u64; pub type Timestamp = u64; pub type Address = Box; diff --git a/backend/common/src/ws.rs b/backend/common/src/ws.rs deleted file mode 100644 index 6b6cb85..0000000 --- a/backend/common/src/ws.rs +++ /dev/null @@ -1,98 +0,0 @@ -use actix_http::ws::Item; -use actix_web_actors::ws::{self, CloseReason, CloseCode}; -use bytes::{Bytes, BytesMut}; -use serde::{Serialize, Deserialize}; -use actix::prelude::Message; - -/// Helper that will buffer continuation messages from actix -/// until completion, capping at 10mb. -#[derive(Default)] -pub struct MultipartHandler { - buf: BytesMut, -} - -/// Message to signal that a node should be muted for a reason that's -/// cheap to transfer between Actors or over the wire for shards. -#[derive(Serialize, Deserialize, Message, Clone, Copy, Debug)] -#[rtype("()")] -pub enum MuteReason { - /// Node was denied connection for any arbitrary reason, - /// and should not attempt to reconnect. - Denied, - /// Node was denied because the chain it belongs to is currently - /// at the limit of allowed nodes, and it may attempt to reconnect. - Overquota, -} - -impl From for CloseReason { - fn from(mute: MuteReason) -> CloseReason { - match mute { - MuteReason::Denied => CloseReason { - code: CloseCode::Abnormal, - description: Some("Denied".into()), - }, - MuteReason::Overquota => CloseReason { - code: CloseCode::Again, - description: Some("Overquota".into()), - }, - } - } -} - -/// Continuation buffer limit, 10mb -const CONT_BUF_LIMIT: usize = 10 * 1024 * 1024; - -pub enum WsMessage { - Nop, - Ping(Bytes), - Data(Bytes), - Close(Option), -} - -impl MultipartHandler { - pub fn handle(&mut self, msg: ws::Message) -> WsMessage { - match msg { - ws::Message::Ping(msg) => WsMessage::Ping(msg), - ws::Message::Pong(_) => WsMessage::Nop, - ws::Message::Text(text) => WsMessage::Data(text.into_bytes()), - ws::Message::Binary(data) => WsMessage::Data(data), - ws::Message::Close(reason) => WsMessage::Close(reason), - ws::Message::Nop => WsMessage::Nop, - ws::Message::Continuation(cont) => match cont { - Item::FirstText(bytes) | Item::FirstBinary(bytes) => { - self.start_frame(&bytes); - WsMessage::Nop - } - Item::Continue(bytes) => { - self.continue_frame(&bytes); - WsMessage::Nop - } - Item::Last(bytes) => { - self.continue_frame(&bytes); - WsMessage::Data(self.finish_frame()) - } - }, - } - } - - fn start_frame(&mut self, bytes: &[u8]) { - if !self.buf.is_empty() { - log::error!("Unused continuation buffer"); - self.buf.clear(); - } - self.continue_frame(bytes); - } - - fn continue_frame(&mut self, bytes: &[u8]) { - if self.buf.len() + bytes.len() <= CONT_BUF_LIMIT { - self.buf.extend_from_slice(&bytes); - } else { - log::error!("Continuation buffer overflow"); - self.buf = BytesMut::new(); - } - } - - fn finish_frame(&mut self) -> Bytes { - std::mem::replace(&mut self.buf, BytesMut::new()).freeze() - } -} diff --git a/backend/core/Cargo.toml b/backend/core/Cargo.toml deleted file mode 100644 index 1705048..0000000 --- a/backend/core/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "telemetry" -version = "0.3.0" -authors = ["Parity Technologies Ltd. "] -edition = "2018" -license = "GPL-3.0" - -[dependencies] -actix = "0.11.1" -actix-web = { version = "4.0.0-beta.4", default-features = false } -actix-web-actors = "4.0.0-beta.3" -actix-http = "3.0.0-beta.4" -bincode = "1.3.3" -bytes = "1.0.1" -chrono = { version = "0.4", features = ["serde"] } -serde = { version = "1.0", features = ["derive"] } -serde_json = { version = "1.0", features = ["raw_value"] } -common = { path = "../common" } -log = "0.4" -simple_logger = "1.11.0" -parking_lot = "0.11" -reqwest = { version = "0.11.1", features = ["blocking", "json"] } -rustc-hash = "1.1.0" -clap = "3.0.0-beta.2" -ctor = "0.1.20" diff --git a/backend/core/src/aggregator.rs b/backend/core/src/aggregator.rs deleted file mode 100644 index 0c0b693..0000000 --- a/backend/core/src/aggregator.rs +++ /dev/null @@ -1,395 +0,0 @@ -use actix::prelude::*; -use ctor::ctor; -use std::collections::{HashMap, HashSet}; - -use crate::shard::connector::ShardConnector; -use crate::chain::{self, Chain, ChainId, Label}; -use crate::feed::connector::{Connected, FeedConnector, FeedId}; -use crate::feed::{self, FeedMessageSerializer}; -use common::ws::MuteReason; -use common::shard::ShardConnId; -use common::types::{ConnId, NodeDetails, BlockHash}; -use common::util::{DenseMap}; - -pub struct Aggregator { - genesis_hashes: HashMap, - labels: HashMap, - chains: DenseMap, - feeds: DenseMap>, - serializer: FeedMessageSerializer, - /// Denylist for networks we do not want to allow connecting. - denylist: HashSet, -} - -pub struct ChainEntry { - /// Address to the `Chain` agent - addr: Addr, - /// Genesis [`Hash`] of the chain - genesis_hash: BlockHash, - /// String name of the chain - label: Label, - /// Node count - nodes: usize, - /// Maximum allowed nodes - max_nodes: usize, -} - -#[ctor] -/// Labels of chains we consider "first party". These chains allow any -/// number of nodes to connect. -static FIRST_PARTY_NETWORKS: HashSet<&'static str> = { - let mut set = HashSet::new(); - set.insert("Polkadot"); - set.insert("Kusama"); - set.insert("Westend"); - set.insert("Rococo"); - set -}; - -/// Max number of nodes allowed to connect to the telemetry server. -const THIRD_PARTY_NETWORKS_MAX_NODES: usize = 500; - -impl Aggregator { - pub fn new(denylist: HashSet) -> Self { - Aggregator { - genesis_hashes: HashMap::new(), - labels: HashMap::new(), - chains: DenseMap::new(), - feeds: DenseMap::new(), - serializer: FeedMessageSerializer::new(), - denylist, - } - } - - /// Get an address to the chain actor by name. If the address is not found, - /// or the address is disconnected (actor dropped), create a new one. - pub fn lazy_chain( - &mut self, - genesis_hash: BlockHash, - label: &str, - ctx: &mut ::Context, - ) -> ChainId { - let cid = match self.genesis_hashes.get(&genesis_hash).copied() { - Some(cid) => cid, - None => { - self.serializer.push(feed::AddedChain(&label, 1)); - - let addr = ctx.address(); - let max_nodes = max_nodes(label); - let label: Label = label.into(); - let cid = self.chains.add_with(|cid| ChainEntry { - addr: Chain::new(cid, addr, label.clone()).start(), - genesis_hash, - label: label.clone(), - nodes: 1, - max_nodes, - }); - - self.labels.insert(label, cid); - self.genesis_hashes.insert(genesis_hash, cid); - - self.broadcast(); - - cid - } - }; - - cid - } - - fn get_chain(&mut self, label: &str) -> Option<&mut ChainEntry> { - let chains = &mut self.chains; - self.labels - .get(label) - .and_then(move |&cid| chains.get_mut(cid)) - } - - fn broadcast(&mut self) { - if let Some(msg) = self.serializer.finalize() { - for (_, feed) in self.feeds.iter() { - feed.do_send(msg.clone()); - } - } - } -} - -impl Actor for Aggregator { - type Context = Context; -} - -/// Message sent from the NodeConnector to the Aggregator upon getting all node details -#[derive(Message)] -#[rtype(result = "()")] -pub struct AddNode { - /// Details of the node being added to the aggregator - pub node: NodeDetails, - /// Genesis [`Hash`] of the chain the node is being added to. - pub genesis_hash: BlockHash, - /// Source from which this node is being added (Direct | Shard) - pub source: NodeSource, -} - -pub enum NodeSource { - Shard { - /// `ShardConnId` that identifies the node connection within a shard. - sid: ShardConnId, - /// Address to the ShardConnector actor - shard_connector: Addr, - } -} - -/// Message sent from the Chain to the Aggregator when the Chain loses all nodes -#[derive(Message)] -#[rtype(result = "()")] -pub struct DropChain(pub ChainId); - -#[derive(Message)] -#[rtype(result = "()")] -pub struct RenameChain(pub ChainId, pub Label); - -/// Message sent from the FeedConnector to the Aggregator when subscribing to a new chain -#[derive(Message)] -#[rtype(result = "bool")] -pub struct Subscribe { - pub chain: Label, - pub feed: Addr, -} - -/// Message sent from the FeedConnector to the Aggregator consensus requested -#[derive(Message)] -#[rtype(result = "()")] -pub struct SendFinality { - pub chain: Label, - pub fid: FeedId, -} - -/// Message sent from the FeedConnector to the Aggregator no more consensus required -#[derive(Message)] -#[rtype(result = "()")] -pub struct NoMoreFinality { - pub chain: Label, - pub fid: FeedId, -} - -/// Message sent from the FeedConnector to the Aggregator when first connected -#[derive(Message)] -#[rtype(result = "()")] -pub struct Connect(pub Addr); - -/// Message sent from the FeedConnector to the Aggregator when disconnecting -#[derive(Message)] -#[rtype(result = "()")] -pub struct Disconnect(pub FeedId); - -/// Message sent from the Chain to the Aggergator when the node count on the chain changes -#[derive(Message)] -#[rtype(result = "()")] -pub struct NodeCount(pub ChainId, pub usize); - -/// Message sent to the Aggregator to get a health check -#[derive(Message)] -#[rtype(result = "usize")] -pub struct GetHealth; - -impl NodeSource { - pub fn mute(&self, reason: MuteReason) { - match self { - // TODO - NodeSource::Shard { shard_connector, .. } => { - // shard_connector.do_send(Mute { reason }); - }, - } - } -} - -impl Handler for Aggregator { - type Result = (); - - fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) { - if self.denylist.contains(&*msg.node.chain) { - log::warn!(target: "Aggregator::AddNode", "'{}' is on the denylist.", msg.node.chain); - - msg.source.mute(MuteReason::Denied); - return; - } - let AddNode { - node, - genesis_hash, - source, - // conn_id, - // node_connector, - } = msg; - log::trace!(target: "Aggregator::AddNode", "New node connected. Chain '{}'", node.chain); - - let cid = self.lazy_chain(genesis_hash, &node.chain, ctx); - let chain = self - .chains - .get_mut(cid) - .expect("Entry just created above; qed"); - if chain.nodes < chain.max_nodes { - chain.addr.do_send(chain::AddNode { - node, - source, - }); - } else { - log::warn!(target: "Aggregator::AddNode", "Chain {} is over quota ({})", chain.label, chain.max_nodes); - - source.mute(MuteReason::Overquota); - } - } -} - -impl Handler for Aggregator { - type Result = (); - - fn handle(&mut self, msg: DropChain, _: &mut Self::Context) { - let DropChain(cid) = msg; - - if let Some(entry) = self.chains.remove(cid) { - let label = &entry.label; - self.genesis_hashes.remove(&entry.genesis_hash); - self.labels.remove(label); - self.serializer.push(feed::RemovedChain(label)); - log::info!("Dropped chain [{}] from the aggregator", label); - self.broadcast(); - } - } -} - -impl Handler for Aggregator { - type Result = (); - - fn handle(&mut self, msg: RenameChain, _: &mut Self::Context) { - let RenameChain(cid, new) = msg; - - if let Some(entry) = self.chains.get_mut(cid) { - if entry.label == new { - return; - } - - // Update UI - self.serializer.push(feed::RemovedChain(&entry.label)); - self.serializer.push(feed::AddedChain(&new, entry.nodes)); - - // Update labels -> cid map - self.labels.remove(&entry.label); - self.labels.insert(new.clone(), cid); - - // Update entry - entry.label = new; - - self.broadcast(); - } - } -} - -impl Handler for Aggregator { - type Result = bool; - - fn handle(&mut self, msg: Subscribe, _: &mut Self::Context) -> bool { - let Subscribe { chain, feed } = msg; - - if let Some(chain) = self.get_chain(&chain) { - chain.addr.do_send(chain::Subscribe(feed)); - true - } else { - false - } - } -} - -impl Handler for Aggregator { - type Result = (); - - fn handle(&mut self, msg: SendFinality, _: &mut Self::Context) { - let SendFinality { chain, fid } = msg; - if let Some(chain) = self.get_chain(&chain) { - chain.addr.do_send(chain::SendFinality(fid)); - } - } -} - -impl Handler for Aggregator { - type Result = (); - - fn handle(&mut self, msg: NoMoreFinality, _: &mut Self::Context) { - let NoMoreFinality { chain, fid } = msg; - if let Some(chain) = self.get_chain(&chain) { - chain.addr.do_send(chain::NoMoreFinality(fid)); - } - } -} - -impl Handler for Aggregator { - type Result = (); - - fn handle(&mut self, msg: Connect, _: &mut Self::Context) { - let Connect(connector) = msg; - - let fid = self.feeds.add(connector.clone()); - - log::info!("Feed #{} connected", fid); - - connector.do_send(Connected(fid)); - - self.serializer.push(feed::Version(31)); - - // TODO: keep track on number of nodes connected to each chain - for (_, entry) in self.chains.iter() { - self.serializer - .push(feed::AddedChain(&entry.label, entry.nodes)); - } - - if let Some(msg) = self.serializer.finalize() { - connector.do_send(msg); - } - } -} - -impl Handler for Aggregator { - type Result = (); - - fn handle(&mut self, msg: Disconnect, _: &mut Self::Context) { - let Disconnect(fid) = msg; - - log::info!("Feed #{} disconnected", fid); - - self.feeds.remove(fid); - } -} - -impl Handler for Aggregator { - type Result = (); - - fn handle(&mut self, msg: NodeCount, _: &mut Self::Context) { - let NodeCount(cid, count) = msg; - - if let Some(entry) = self.chains.get_mut(cid) { - entry.nodes = count; - - if count != 0 { - self.serializer.push(feed::AddedChain(&entry.label, count)); - self.broadcast(); - } - } - } -} - -impl Handler for Aggregator { - type Result = usize; - - fn handle(&mut self, _: GetHealth, _: &mut Self::Context) -> Self::Result { - self.chains.len() - } -} - -/// First party networks (Polkadot, Kusama etc) are allowed any number of nodes. -/// Third party networks are allowed `THIRD_PARTY_NETWORKS_MAX_NODES` nodes and -/// no more. -fn max_nodes(label: &str) -> usize { - if FIRST_PARTY_NETWORKS.contains(label) { - usize::MAX - } else { - THIRD_PARTY_NETWORKS_MAX_NODES - } -} diff --git a/backend/core/src/chain.rs b/backend/core/src/chain.rs deleted file mode 100644 index c27393a..0000000 --- a/backend/core/src/chain.rs +++ /dev/null @@ -1,567 +0,0 @@ -use actix::prelude::*; -use rustc_hash::FxHashMap; -use std::collections::HashMap; -use std::sync::Arc; - -use crate::aggregator::{Aggregator, DropChain, NodeCount, NodeSource, RenameChain}; -use crate::feed::connector::{FeedConnector, FeedId, Subscribed, Unsubscribed}; -use crate::feed::{self, FeedMessageSerializer}; -use crate::node::Node; -use common::types::{Block, NodeDetails, NodeId, NodeLocation, Timestamp}; -use common::util::{now, DenseMap, NumStats}; -use common::node::Payload; - -const STALE_TIMEOUT: u64 = 2 * 60 * 1000; // 2 minutes - -pub type ChainId = usize; -pub type Label = Arc; - -pub struct Chain { - cid: ChainId, - /// Who to inform if the Chain drops itself - aggregator: Addr, - /// Label of this chain, along with count of nodes that use this label - label: (Label, usize), - /// Dense mapping of NodeId -> Node - nodes: DenseMap, - /// Dense mapping of FeedId -> Addr, - feeds: DenseMap>, - /// Mapping of FeedId -> Addr for feeds requiring finality info, - finality_feeds: FxHashMap>, - /// Best block - best: Block, - /// Finalized block - finalized: Block, - /// Block times history, stored so we can calculate averages - block_times: NumStats, - /// Calculated average block time - average_block_time: Option, - /// Message serializer - serializer: FeedMessageSerializer, - /// When the best block first arrived - timestamp: Option, - /// Some nodes might manifest a different label, note them here - labels: HashMap, -} - -impl Chain { - pub fn new(cid: ChainId, aggregator: Addr, label: Label) -> Self { - log::info!("[{}] Created", label); - - Chain { - cid, - aggregator, - label: (label, 0), - nodes: DenseMap::new(), - feeds: DenseMap::new(), - finality_feeds: FxHashMap::default(), - best: Block::zero(), - finalized: Block::zero(), - block_times: NumStats::new(50), - average_block_time: None, - serializer: FeedMessageSerializer::new(), - timestamp: None, - labels: HashMap::default(), - } - } - - fn increment_label_count(&mut self, label: &str) { - let count = match self.labels.get_mut(label) { - Some(count) => { - *count += 1; - *count - } - None => { - self.labels.insert(label.into(), 1); - 1 - } - }; - - if &*self.label.0 == label { - self.label.1 += 1; - } else if count > self.label.1 { - self.rename(label.into(), count); - } - } - - fn decrement_label_count(&mut self, label: &str) { - match self.labels.get_mut(label) { - Some(count) => *count -= 1, - None => return, - }; - - if &*self.label.0 == label { - self.label.1 -= 1; - - for (label, &count) in self.labels.iter() { - if count > self.label.1 { - let label: Arc<_> = label.clone(); - self.rename(label, count); - break; - } - } - } - } - - fn rename(&mut self, label: Label, count: usize) { - self.label = (label, count); - - self.aggregator - .do_send(RenameChain(self.cid, self.label.0.clone())); - } - - fn broadcast(&mut self) { - if let Some(msg) = self.serializer.finalize() { - for (_, feed) in self.feeds.iter() { - feed.do_send(msg.clone()); - } - } - } - - fn broadcast_finality(&mut self) { - if let Some(msg) = self.serializer.finalize() { - for feed in self.finality_feeds.values() { - feed.do_send(msg.clone()); - } - } - } - - /// Triggered when the number of nodes in this chain has changed, Aggregator will - /// propagate new counts to all connected feeds - fn update_count(&self) { - self.aggregator - .do_send(NodeCount(self.cid, self.nodes.len())); - } - - /// Check if the chain is stale (has not received a new best block in a while). - /// If so, find a new best block, ignoring any stale nodes and marking them as such. - fn update_stale_nodes(&mut self, now: u64) { - let threshold = now - STALE_TIMEOUT; - let timestamp = match self.timestamp { - Some(ts) => ts, - None => return, - }; - - if timestamp > threshold { - // Timestamp is in range, nothing to do - return; - } - - let mut best = Block::zero(); - let mut finalized = Block::zero(); - let mut timestamp = None; - - for (nid, node) in self.nodes.iter_mut() { - if !node.update_stale(threshold) { - if node.best().height > best.height { - best = *node.best(); - timestamp = Some(node.best_timestamp()); - } - - if node.finalized().height > finalized.height { - finalized = *node.finalized(); - } - } else { - self.serializer.push(feed::StaleNode(nid)); - } - } - - if self.best.height != 0 || self.finalized.height != 0 { - self.best = best; - self.finalized = finalized; - self.block_times.reset(); - self.timestamp = timestamp; - - self.serializer.push(feed::BestBlock( - self.best.height, - timestamp.unwrap_or(now), - None, - )); - self.serializer - .push(feed::BestFinalized(finalized.height, finalized.hash)); - } - } -} - -impl Actor for Chain { - type Context = Context; - - fn stopped(&mut self, _: &mut Self::Context) { - self.aggregator.do_send(DropChain(self.cid)); - - for (_, feed) in self.feeds.iter() { - feed.do_send(Unsubscribed) - } - } -} - -/// Message sent from the Aggregator to the Chain when new Node is connected -#[derive(Message)] -#[rtype(result = "()")] -pub struct AddNode { - /// Details of the node being added to the aggregator - pub node: NodeDetails, - /// Source from which this node is being added (Direct | Shard) - pub source: NodeSource, -} - -/// Message sent from the NodeConnector to the Chain when it receives new telemetry data -#[derive(Message)] -#[rtype(result = "()")] -pub struct UpdateNode { - pub nid: NodeId, - pub payload: Payload, -} - -/// Message sent from the NodeConnector to the Chain when the connector disconnects -#[derive(Message)] -#[rtype(result = "()")] -pub struct RemoveNode(pub NodeId); - -/// Message sent from the Aggregator to the Chain when the connector wants to subscribe to that chain -#[derive(Message)] -#[rtype(result = "()")] -pub struct Subscribe(pub Addr); - -/// Message sent from the FeedConnector before it subscribes to a new chain, or if it disconnects -#[derive(Message)] -#[rtype(result = "()")] -pub struct Unsubscribe(pub FeedId); - -#[derive(Message)] -#[rtype(result = "()")] -pub struct SendFinality(pub FeedId); - -#[derive(Message)] -#[rtype(result = "()")] -pub struct NoMoreFinality(pub FeedId); - -/// Message sent from the NodeConnector to the Chain when it receives location data -#[derive(Message)] -#[rtype(result = "()")] -pub struct LocateNode { - pub nid: NodeId, - pub location: Arc, -} - -impl NodeSource { - pub fn init(self, nid: NodeId, chain: Addr) -> bool { - match self { - NodeSource::Shard { sid, shard_connector } => { - shard_connector - .try_send(crate::shard::connector::Initialize { - nid, - sid, - chain, - }) - .is_ok() - } - } - } -} - -impl Handler for Chain { - type Result = (); - - fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) { - let AddNode { - node, - source, - } = msg; - log::trace!(target: "Chain::AddNode", "New node connected. Chain '{}', node count goes from {} to {}", node.chain, self.nodes.len(), self.nodes.len() + 1); - self.increment_label_count(&node.chain); - - let nid = self.nodes.add(Node::new(node)); - let chain = ctx.address(); - - if source.init(nid, chain) { - self.nodes.remove(nid); - } else if let Some(node) = self.nodes.get(nid) { - self.serializer.push(feed::AddedNode(nid, node)); - self.broadcast(); - } - - self.update_count(); - } -} - -impl Chain { - fn handle_block(&mut self, block: &Block, nid: NodeId) { - let mut propagation_time = None; - let now = now(); - let nodes_len = self.nodes.len(); - - self.update_stale_nodes(now); - - let node = match self.nodes.get_mut(nid) { - Some(node) => node, - None => return, - }; - - if node.update_block(*block) { - if block.height > self.best.height { - self.best = *block; - log::debug!( - "[{}] [nodes={}/feeds={}] new best block={}/{:?}", - self.label.0, - nodes_len, - self.feeds.len(), - self.best.height, - self.best.hash, - ); - if let Some(timestamp) = self.timestamp { - self.block_times.push(now - timestamp); - self.average_block_time = Some(self.block_times.average()); - } - self.timestamp = Some(now); - self.serializer.push(feed::BestBlock( - self.best.height, - now, - self.average_block_time, - )); - propagation_time = Some(0); - } else if block.height == self.best.height { - if let Some(timestamp) = self.timestamp { - propagation_time = Some(now - timestamp); - } - } - - if let Some(details) = node.update_details(now, propagation_time) { - self.serializer.push(feed::ImportedBlock(nid, details)); - } - } - } -} - -impl Handler for Chain { - type Result = (); - - fn handle(&mut self, msg: UpdateNode, _: &mut Self::Context) { - let UpdateNode { nid, payload } = msg; - - if let Some(block) = payload.best_block() { - self.handle_block(block, nid); - } - - if let Some(node) = self.nodes.get_mut(nid) { - match payload { - Payload::SystemInterval(ref interval) => { - if node.update_hardware(interval) { - self.serializer.push(feed::Hardware(nid, node.hardware())); - } - - if let Some(stats) = node.update_stats(interval) { - self.serializer.push(feed::NodeStatsUpdate(nid, stats)); - } - - if let Some(io) = node.update_io(interval) { - self.serializer.push(feed::NodeIOUpdate(nid, io)); - } - } - // Payload::AfgAuthoritySet(authority) => { - // node.set_validator_address(authority.authority_id.clone()); - // self.broadcast(); - // return; - // } - // Payload::AfgFinalized(finalized) => { - // if let Ok(finalized_number) = finalized.finalized_number.parse::() - // { - // if let Some(addr) = node.details().validator.clone() { - // self.serializer.push(feed::AfgFinalized( - // addr, - // finalized_number, - // finalized.finalized_hash, - // )); - // self.broadcast_finality(); - // } - // } - // return; - // } - // Payload::AfgReceivedPrecommit(precommit) => { - // if let Ok(finalized_number) = - // precommit.received.target_number.parse::() - // { - // if let Some(addr) = node.details().validator.clone() { - // let voter = precommit.received.voter.clone(); - // self.serializer.push(feed::AfgReceivedPrecommit( - // addr, - // finalized_number, - // precommit.received.target_hash, - // voter, - // )); - // self.broadcast_finality(); - // } - // } - // return; - // } - // Payload::AfgReceivedPrevote(prevote) => { - // if let Ok(finalized_number) = - // prevote.received.target_number.parse::() - // { - // if let Some(addr) = node.details().validator.clone() { - // let voter = prevote.received.voter.clone(); - // self.serializer.push(feed::AfgReceivedPrevote( - // addr, - // finalized_number, - // prevote.received.target_hash, - // voter, - // )); - // self.broadcast_finality(); - // } - // } - // return; - // } - // Payload::AfgReceivedCommit(_) => {} - _ => (), - } - - if let Some(block) = payload.finalized_block() { - if let Some(finalized) = node.update_finalized(block) { - self.serializer.push(feed::FinalizedBlock( - nid, - finalized.height, - finalized.hash, - )); - - if finalized.height > self.finalized.height { - self.finalized = *finalized; - self.serializer - .push(feed::BestFinalized(finalized.height, finalized.hash)); - } - } - } - } - - self.broadcast(); - } -} - -impl Handler for Chain { - type Result = (); - - fn handle(&mut self, msg: LocateNode, _: &mut Self::Context) { - let LocateNode { nid, location } = msg; - - if let Some(node) = self.nodes.get_mut(nid) { - self.serializer.push(feed::LocatedNode( - nid, - location.latitude, - location.longitude, - &location.city, - )); - - node.update_location(location); - } - } -} - -impl Handler for Chain { - type Result = (); - - fn handle(&mut self, msg: RemoveNode, ctx: &mut Self::Context) { - let RemoveNode(nid) = msg; - - if let Some(node) = self.nodes.remove(nid) { - self.decrement_label_count(&node.details().chain); - } - - if self.nodes.is_empty() { - log::info!("[{}] Lost all nodes, dropping...", self.label.0); - ctx.stop(); - } - - self.serializer.push(feed::RemovedNode(nid)); - self.broadcast(); - self.update_count(); - } -} - -impl Handler for Chain { - type Result = (); - - fn handle(&mut self, msg: Subscribe, ctx: &mut Self::Context) { - let Subscribe(feed) = msg; - let fid = self.feeds.add(feed.clone()); - - feed.do_send(Subscribed(fid, ctx.address().recipient())); - - self.serializer.push(feed::SubscribedTo(&self.label.0)); - self.serializer.push(feed::TimeSync(now())); - self.serializer.push(feed::BestBlock( - self.best.height, - self.timestamp.unwrap_or(0), - self.average_block_time, - )); - self.serializer.push(feed::BestFinalized( - self.finalized.height, - self.finalized.hash, - )); - - for (idx, (nid, node)) in self.nodes.iter().enumerate() { - // Send subscription confirmation and chain head before doing all the nodes, - // and continue sending batches of 32 nodes a time over the wire subsequently - if idx % 32 == 0 { - if let Some(serialized) = self.serializer.finalize() { - feed.do_send(serialized); - } - } - - self.serializer.push(feed::AddedNode(nid, node)); - self.serializer.push(feed::FinalizedBlock( - nid, - node.finalized().height, - node.finalized().hash, - )); - if node.stale() { - self.serializer.push(feed::StaleNode(nid)); - } - } - - if let Some(serialized) = self.serializer.finalize() { - feed.do_send(serialized); - } - } -} - -impl Handler for Chain { - type Result = (); - - fn handle(&mut self, msg: SendFinality, _ctx: &mut Self::Context) { - let SendFinality(fid) = msg; - if let Some(feed) = self.feeds.get(fid) { - self.finality_feeds.insert(fid, feed.clone()); - } - - // info!("Added new finality feed {}", fid); - } -} - -impl Handler for Chain { - type Result = (); - - fn handle(&mut self, msg: NoMoreFinality, _: &mut Self::Context) { - let NoMoreFinality(fid) = msg; - - // info!("Removed finality feed {}", fid); - self.finality_feeds.remove(&fid); - } -} - -impl Handler for Chain { - type Result = (); - - fn handle(&mut self, msg: Unsubscribe, _: &mut Self::Context) { - let Unsubscribe(fid) = msg; - - if let Some(feed) = self.feeds.get(fid) { - self.serializer.push(feed::UnsubscribedFrom(&self.label.0)); - - if let Some(serialized) = self.serializer.finalize() { - feed.do_send(serialized); - } - } - - self.feeds.remove(fid); - self.finality_feeds.remove(&fid); - } -} diff --git a/backend/core/src/feed/connector.rs b/backend/core/src/feed/connector.rs deleted file mode 100644 index 8d7f984..0000000 --- a/backend/core/src/feed/connector.rs +++ /dev/null @@ -1,219 +0,0 @@ -use crate::aggregator::{Aggregator, Connect, Disconnect, NoMoreFinality, SendFinality, Subscribe}; -use crate::chain::Unsubscribe; -use crate::feed::{FeedMessageSerializer, Pong}; -use actix::prelude::*; -use actix_web_actors::ws; -use bytes::Bytes; -use common::util::fnv; -use std::time::{Duration, Instant}; - -pub type FeedId = usize; - -/// How often heartbeat pings are sent -const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20); -/// How long before lack of client response causes a timeout -const CLIENT_TIMEOUT: Duration = Duration::from_secs(60); - -pub struct FeedConnector { - /// FeedId that Aggregator holds of this actor - fid_aggregator: FeedId, - /// FeedId that Chain holds of this actor - fid_chain: FeedId, - /// Client must send ping at least once per 10 seconds (CLIENT_TIMEOUT), - hb: Instant, - /// Aggregator actor address - aggregator: Addr, - /// Chain actor address - chain: Option>, - /// FNV hash of the chain label, optimization to avoid double-subscribing - chain_label_hash: u64, - /// Message serializer - serializer: FeedMessageSerializer, -} - -impl Actor for FeedConnector { - type Context = ws::WebsocketContext; - - fn started(&mut self, ctx: &mut Self::Context) { - self.heartbeat(ctx); - self.aggregator.do_send(Connect(ctx.address())); - } - - fn stopped(&mut self, _: &mut Self::Context) { - if let Some(chain) = self.chain.take() { - let _ = chain.do_send(Unsubscribe(self.fid_chain)); - } - - self.aggregator.do_send(Disconnect(self.fid_aggregator)); - } -} - -impl FeedConnector { - pub fn new(aggregator: Addr) -> Self { - Self { - // Garbage id, will be replaced by the Connected message - fid_aggregator: !0, - // Garbage id, will be replaced by the Subscribed message - fid_chain: !0, - hb: Instant::now(), - aggregator, - chain: None, - chain_label_hash: 0, - serializer: FeedMessageSerializer::new(), - } - } - - fn heartbeat(&self, ctx: &mut ::Context) { - ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| { - // check client heartbeats - if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT { - // stop actor - ctx.stop(); - } else { - ctx.ping(b"") - } - }); - } - - fn handle_cmd(&mut self, cmd: &str, payload: &str, ctx: &mut ::Context) { - match cmd { - "subscribe" => { - // Hash the chain label the frontend wants to subscribe to. - // If it's already subscribed to the same chain, nothing to do. - match fnv(payload) { - hash if hash == self.chain_label_hash => return, - hash => self.chain_label_hash = hash, - } - - self.aggregator - .send(Subscribe { - chain: payload.into(), - feed: ctx.address(), - }) - .into_actor(self) - .then(|res, actor, _| { - match res { - Ok(true) => (), - // Chain not found, reset hash - _ => actor.chain_label_hash = 0, - } - async {}.into_actor(actor) - }) - .wait(ctx); - } - "send-finality" => { - self.aggregator.do_send(SendFinality { - chain: payload.into(), - fid: self.fid_chain, - }); - } - "no-more-finality" => { - self.aggregator.do_send(NoMoreFinality { - chain: payload.into(), - fid: self.fid_chain, - }); - } - "ping" => { - self.serializer.push(Pong(payload)); - if let Some(serialized) = self.serializer.finalize() { - ctx.binary(serialized.0); - } - } - _ => (), - } - } -} - -/// Message sent form Chain to the FeedConnector upon successful subscription -#[derive(Message)] -#[rtype(result = "()")] -pub struct Subscribed(pub FeedId, pub Recipient); - -#[derive(Message)] -#[rtype(result = "()")] -pub struct Unsubscribed; - -/// Message sent from Aggregator to FeedConnector upon successful connection -#[derive(Message)] -#[rtype(result = "()")] -pub struct Connected(pub FeedId); - -/// Message sent from either Aggregator or Chain to FeedConnector containing -/// serialized message(s) for the frontend -/// -/// Since Bytes is ARC'ed, this is cheap to clone -#[derive(Message, Clone)] -#[rtype(result = "()")] -pub struct Serialized(pub Bytes); - -impl StreamHandler> for FeedConnector { - fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { - match msg { - Ok(ws::Message::Ping(msg)) => { - self.hb = Instant::now(); - ctx.pong(&msg); - } - Ok(ws::Message::Pong(_)) => self.hb = Instant::now(), - Ok(ws::Message::Text(text)) => { - if let Some(idx) = text.find(':') { - let cmd = &text[..idx]; - let payload = &text[idx + 1..]; - - log::info!("New FEED message: {}", cmd); - - self.handle_cmd(cmd, payload, ctx); - } - } - Ok(ws::Message::Close(_)) => ctx.stop(), - Ok(_) => (), - Err(error) => { - log::error!("{:?}", error); - ctx.stop(); - } - } - } -} - -impl Handler for FeedConnector { - type Result = (); - - fn handle(&mut self, msg: Subscribed, _: &mut Self::Context) { - let Subscribed(fid_chain, chain) = msg; - - if let Some(current) = self.chain.take() { - let _ = current.do_send(Unsubscribe(self.fid_chain)); - } - - self.fid_chain = fid_chain; - self.chain = Some(chain); - } -} - -impl Handler for FeedConnector { - type Result = (); - - fn handle(&mut self, _: Unsubscribed, _: &mut Self::Context) { - self.chain = None; - self.chain_label_hash = 0; - } -} - -impl Handler for FeedConnector { - type Result = (); - - fn handle(&mut self, msg: Connected, _: &mut Self::Context) { - let Connected(fid_aggregator) = msg; - - self.fid_aggregator = fid_aggregator; - } -} - -impl Handler for FeedConnector { - type Result = (); - - fn handle(&mut self, msg: Serialized, ctx: &mut Self::Context) { - let Serialized(bytes) = msg; - - ctx.binary(bytes); - } -} diff --git a/backend/core/src/location.rs b/backend/core/src/location.rs deleted file mode 100644 index 96c481b..0000000 --- a/backend/core/src/location.rs +++ /dev/null @@ -1,191 +0,0 @@ -use std::net::Ipv4Addr; -use std::sync::Arc; - -use actix::prelude::*; -use parking_lot::RwLock; -use rustc_hash::FxHashMap; -use serde::Deserialize; - -use crate::chain::{Chain, LocateNode}; -use common::types::{NodeId, NodeLocation}; - -#[derive(Clone)] -pub struct Locator { - client: reqwest::blocking::Client, - cache: Arc>>>>, -} - -pub struct LocatorFactory { - cache: Arc>>>>, -} - -impl LocatorFactory { - pub fn new() -> Self { - let mut cache = FxHashMap::default(); - - // Default entry for localhost - cache.insert( - Ipv4Addr::new(127, 0, 0, 1), - Some(Arc::new(NodeLocation { - latitude: 52.516_6667, - longitude: 13.4, - city: "Berlin".into(), - })), - ); - - LocatorFactory { - cache: Arc::new(RwLock::new(cache)), - } - } - - pub fn create(&self) -> Locator { - Locator { - client: reqwest::blocking::Client::new(), - cache: self.cache.clone(), - } - } -} - -impl Actor for Locator { - type Context = SyncContext; -} - -#[derive(Message)] -#[rtype(result = "()")] -pub struct LocateRequest { - pub ip: Ipv4Addr, - pub nid: NodeId, - pub chain: Addr, -} - -#[derive(Deserialize)] -pub struct IPApiLocate { - city: Box, - loc: Box, -} - -impl IPApiLocate { - fn into_node_location(self) -> Option { - let IPApiLocate { city, loc } = self; - - let mut loc = loc.split(',').map(|n| n.parse()); - - let latitude = loc.next()?.ok()?; - let longitude = loc.next()?.ok()?; - - // Guarantee that the iterator has been exhausted - if loc.next().is_some() { - return None; - } - - Some(NodeLocation { - latitude, - longitude, - city, - }) - } -} - -impl Handler for Locator { - type Result = (); - - fn handle(&mut self, msg: LocateRequest, _: &mut Self::Context) { - let LocateRequest { ip, nid, chain } = msg; - - if let Some(item) = self.cache.read().get(&ip) { - if let Some(location) = item { - return chain.do_send(LocateNode { - nid, - location: location.clone(), - }); - } - - return; - } - - let location = match self.iplocate(ip) { - Ok(location) => location, - Err(err) => return log::debug!("GET error for ip location: {:?}", err), - }; - - self.cache.write().insert(ip, location.clone()); - - if let Some(location) = location { - chain.do_send(LocateNode { nid, location }); - } - } -} - -impl Locator { - fn iplocate(&self, ip: Ipv4Addr) -> Result>, reqwest::Error> { - let location = self.iplocate_ipapi_co(ip)?; - - match location { - Some(location) => Ok(Some(location)), - None => self.iplocate_ipinfo_io(ip), - } - } - - fn iplocate_ipapi_co(&self, ip: Ipv4Addr) -> Result>, reqwest::Error> { - let location = self - .query(&format!("https://ipapi.co/{}/json", ip))? - .map(Arc::new); - - Ok(location) - } - - fn iplocate_ipinfo_io( - &self, - ip: Ipv4Addr, - ) -> Result>, reqwest::Error> { - let location = self - .query(&format!("https://ipinfo.io/{}/json", ip))? - .and_then(|loc: IPApiLocate| loc.into_node_location().map(Arc::new)); - - Ok(location) - } - - fn query(&self, url: &str) -> Result, reqwest::Error> - where - for<'de> T: Deserialize<'de>, - { - match self.client.get(url).send()?.json::() { - Ok(result) => Ok(Some(result)), - Err(err) => { - log::debug!("JSON error for ip location: {:?}", err); - Ok(None) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn ipapi_locate_to_node_location() { - let ipapi = IPApiLocate { - loc: "12.5,56.25".into(), - city: "Foobar".into(), - }; - - let location = ipapi.into_node_location().unwrap(); - - assert_eq!(location.latitude, 12.5); - assert_eq!(location.longitude, 56.25); - assert_eq!(&*location.city, "Foobar"); - } - - #[test] - fn ipapi_locate_to_node_location_too_many() { - let ipapi = IPApiLocate { - loc: "12.5,56.25,1.0".into(), - city: "Foobar".into(), - }; - - let location = ipapi.into_node_location(); - - assert!(location.is_none()); - } -} diff --git a/backend/core/src/main.rs b/backend/core/src/main.rs deleted file mode 100644 index a0fb567..0000000 --- a/backend/core/src/main.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::collections::HashSet; -use std::iter::FromIterator; - -use actix::prelude::*; -use actix_http::ws::Codec; -use actix_web::{get, middleware, web, App, Error, HttpRequest, HttpResponse, HttpServer}; -use actix_web_actors::ws; -use clap::Clap; -use simple_logger::SimpleLogger; - -mod aggregator; -mod chain; -mod feed; -mod location; -mod node; -mod shard; - -use aggregator::{Aggregator, GetHealth}; -use feed::connector::FeedConnector; -use location::{Locator, LocatorFactory}; -use shard::connector::ShardConnector; - -const VERSION: &str = env!("CARGO_PKG_VERSION"); -const AUTHORS: &str = env!("CARGO_PKG_AUTHORS"); -const NAME: &str = "Substrate Telemetry Backend Core"; -const ABOUT: &str = "This is the Telemetry Backend Core that injects and provide the data sent by Substrate/Polkadot nodes"; - -#[derive(Clap, Debug)] -#[clap(name = NAME, version = VERSION, author = AUTHORS, about = ABOUT)] -struct Opts { - #[clap( - short = 'l', - long = "listen", - default_value = "127.0.0.1:8000", - about = "This is the socket address Telemetry is listening to. This is restricted to localhost (127.0.0.1) by default and should be fine for most use cases. If you are using Telemetry in a container, you likely want to set this to '0.0.0.0:8000'" - )] - socket: std::net::SocketAddr, - #[clap( - required = false, - long = "denylist", - about = "Space delimited list of chains that are not allowed to connect to telemetry. Case sensitive." - )] - denylist: Vec, - #[clap( - arg_enum, - required = false, - long = "log", - default_value = "info", - about = "Log level." - )] - log_level: LogLevel, -} - -#[derive(Clap, Debug, PartialEq)] -enum LogLevel { - Error, - Warn, - Info, - Debug, - Trace, -} - -impl From<&LogLevel> for log::LevelFilter { - fn from(log_level: &LogLevel) -> Self { - match log_level { - LogLevel::Error => log::LevelFilter::Error, - LogLevel::Warn => log::LevelFilter::Warn, - LogLevel::Info => log::LevelFilter::Info, - LogLevel::Debug => log::LevelFilter::Debug, - LogLevel::Trace => log::LevelFilter::Trace, - } - } -} - -#[get("/shard_submit/{chain_hash}")] -async fn shard_route( - req: HttpRequest, - stream: web::Payload, - aggregator: web::Data>, - locator: web::Data>, - path: web::Path>, -) -> Result { - let hash_str = path.into_inner(); - let genesis_hash = hash_str.parse::()?.into(); - - println!("Genesis hash {}", genesis_hash); - - let mut res = ws::handshake(&req)?; - - let aggregator = aggregator.get_ref().clone(); - let locator = locator.get_ref().clone().recipient(); - - Ok(res.streaming(ws::WebsocketContext::with_codec( - ShardConnector::new(aggregator, locator, genesis_hash), - stream, - Codec::new().max_size(10 * 1024 * 1024), // 10mb frame limit - ))) -} - -/// Entry point for connecting feeds -#[get("/feed")] -async fn feed_route( - req: HttpRequest, - stream: web::Payload, - aggregator: web::Data>, -) -> Result { - let aggregator = aggregator.get_ref().clone(); - - ws::start( - FeedConnector::new(aggregator), - &req, - stream, - ) -} - -/// Entry point for health check monitoring bots -#[get("/health")] -async fn health(aggregator: web::Data>) -> Result { - match aggregator.send(GetHealth).await { - Ok(count) => { - let body = format!("Connected chains: {}", count); - - HttpResponse::Ok().body(body).await - } - Err(error) => { - log::error!("Health check mailbox error: {:?}", error); - - HttpResponse::InternalServerError().await - } - } -} - -/// Telemetry entry point. Listening by default on 127.0.0.1:8000. -/// This can be changed using the `PORT` and `BIND` ENV variables. -#[actix_web::main] -async fn main() -> std::io::Result<()> { - let opts = Opts::parse(); - let log_level = &opts.log_level; - SimpleLogger::new() - .with_level(log_level.into()) - .init() - .expect("Must be able to start a logger"); - - let denylist = HashSet::from_iter(opts.denylist); - let aggregator = Aggregator::new(denylist).start(); - let factory = LocatorFactory::new(); - let locator = SyncArbiter::start(4, move || factory.create()); - log::info!("Starting Telemetry Core version: {}", env!("CARGO_PKG_VERSION")); - HttpServer::new(move || { - App::new() - .wrap(middleware::NormalizePath::default()) - .data(aggregator.clone()) - .data(locator.clone()) - .service(feed_route) - .service(shard_route) - .service(health) - }) - .bind(opts.socket)? - .run() - .await -} diff --git a/backend/core/src/shard/connector.rs b/backend/core/src/shard/connector.rs deleted file mode 100644 index e83912a..0000000 --- a/backend/core/src/shard/connector.rs +++ /dev/null @@ -1,192 +0,0 @@ -use std::time::{Duration, Instant}; -use std::collections::BTreeMap; -use std::net::Ipv4Addr; - -use crate::aggregator::{AddNode, Aggregator, NodeSource}; -use crate::chain::{Chain, RemoveNode, UpdateNode}; -use crate::location::LocateRequest; -use actix::prelude::*; -use actix_web_actors::ws::{self, CloseReason}; -use bincode::Options; -use common::types::{BlockHash, NodeId}; -use common::ws::{MultipartHandler, WsMessage}; -use common::shard::{ShardMessage, ShardConnId, BackendMessage}; - -/// How often heartbeat pings are sent -const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20); -/// How long before lack of client response causes a timeout -const CLIENT_TIMEOUT: Duration = Duration::from_secs(60); - -pub struct ShardConnector { - /// Client must send ping at least once every 60 seconds (CLIENT_TIMEOUT), - hb: Instant, - /// Aggregator actor address - aggregator: Addr, - /// Genesis hash of the chain this connection will be submitting data for - genesis_hash: BlockHash, - /// Chain address to which this shard connector is delegating messages - chain: Option>, - /// Transient mapping of `ShardConnId` to external IP address. - ips: BTreeMap, - /// Mapping of `ShardConnId` to initialized `NodeId`s. - nodes: BTreeMap, - /// Actix address of location services - locator: Recipient, - /// Container for handling continuation messages - multipart: MultipartHandler, -} - -impl Actor for ShardConnector { - type Context = ws::WebsocketContext; - - fn started(&mut self, ctx: &mut Self::Context) { - self.heartbeat(ctx); - } - - fn stopped(&mut self, _: &mut Self::Context) { - if let Some(ref chain) = self.chain { - for nid in self.nodes.values() { - chain.do_send(RemoveNode(*nid)) - } - } - } -} - -impl ShardConnector { - pub fn new( - aggregator: Addr, - locator: Recipient, - genesis_hash: BlockHash, - ) -> Self { - Self { - hb: Instant::now(), - aggregator, - genesis_hash, - chain: None, - ips: BTreeMap::new(), - nodes: BTreeMap::new(), - locator, - multipart: MultipartHandler::default(), - } - } - - fn shard_send(msg: BackendMessage, ctx: &mut ::Context) { - let bytes = bincode::options().serialize(&msg).expect("Must be able to serialize to vec; qed"); - - println!("Sending back {} bytes", bytes.len()); - - ctx.binary(bytes); - } - - fn heartbeat(&self, ctx: &mut ::Context) { - ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| { - // check client heartbeats - if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT { - // stop actor - ctx.close(Some(CloseReason { - code: ws::CloseCode::Abnormal, - description: Some("Missed heartbeat".into()), - })); - ctx.stop(); - } - }); - } - - fn handle_message(&mut self, msg: ShardMessage, ctx: &mut ::Context) { - println!("{:?}", msg); - - match msg { - ShardMessage::AddNode { ip, node, sid } => { - if let Some(ip) = ip { - self.ips.insert(sid, ip); - } - - self.aggregator.do_send(AddNode { - node, - genesis_hash: self.genesis_hash, - source: NodeSource::Shard { - sid, - shard_connector: ctx.address(), - } - }); - }, - ShardMessage::UpdateNode { nid, payload } => { - if let Some(chain) = self.chain.as_ref() { - chain.do_send(UpdateNode { - nid, - payload, - }); - } - }, - } - } -} - -#[derive(Message)] -#[rtype(result = "()")] -pub struct Initialize { - pub nid: NodeId, - pub sid: ShardConnId, - pub chain: Addr, -} - -impl Handler for ShardConnector { - type Result = (); - - fn handle(&mut self, msg: Initialize, ctx: &mut Self::Context) { - let Initialize { - nid, - sid, - chain, - } = msg; - log::trace!(target: "ShardConnector::Initialize", "Initializing a node, nid={}, on conn_id={}", nid, 0); - - if self.chain.is_none() { - self.chain = Some(chain.clone()); - } - - let be_msg = BackendMessage::Initialize { sid, nid }; - - Self::shard_send(be_msg, ctx); - - // Acquire the node's physical location - if let Some(ip) = self.ips.remove(&sid) { - let _ = self.locator.do_send(LocateRequest { ip, nid, chain }); - } - } -} - -impl StreamHandler> for ShardConnector { - fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { - self.hb = Instant::now(); - - let data = match msg.map(|msg| self.multipart.handle(msg)) { - Ok(WsMessage::Nop) => return, - Ok(WsMessage::Ping(msg)) => { - ctx.pong(&msg); - return; - } - Ok(WsMessage::Data(data)) => data, - Ok(WsMessage::Close(reason)) => { - ctx.close(reason); - ctx.stop(); - return; - } - Err(error) => { - log::error!("{:?}", error); - ctx.stop(); - return; - } - }; - - match bincode::options().deserialize(&data) { - Ok(msg) => self.handle_message(msg, ctx), - // #[cfg(debug)] - Err(err) => { - log::warn!("Failed to parse shard message: {}", err,) - } - // #[cfg(not(debug))] - // Err(_) => (), - } - } -} diff --git a/backend/core/src/shard/mod.rs b/backend/core/src/shard/mod.rs deleted file mode 100644 index 563893c..0000000 --- a/backend/core/src/shard/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod connector; \ No newline at end of file diff --git a/backend/shard/Cargo.toml b/backend/shard/Cargo.toml index ccfc2bf..8335e93 100644 --- a/backend/shard/Cargo.toml +++ b/backend/shard/Cargo.toml @@ -5,22 +5,20 @@ authors = ["Parity Technologies Ltd. "] edition = "2018" [dependencies] -actix = "0.11.1" -actix-web = { version = "4.0.0-beta.4", default-features = false } -actix-web-actors = "4.0.0-beta.3" -actix-http = "3.0.0-beta.4" -anyhow = "1.0.40" +anyhow = "1.0.41" bincode = "1.3.3" -bytes = "1.0.1" -clap = "3.0.0-beta.2" -log = "0.4" -rustc-hash = "1.1.0" -serde = { version = "1.0", features = ["derive"] } -serde_json = { version = "1.0", features = ["raw_value"] } -primitive-types = { version = "0.9.0", features = ["serde"] } common = { path = "../common" } +futures = "0.3.15" +hex = "0.4.3" +http = "0.2.4" +log = "0.4.14" +primitive-types = { version = "0.9.0", features = ["serde"] } +serde = { version = "1.0.126", features = ["derive"] } +serde_json = "1.0.64" simple_logger = "1.11.0" -soketto = "0.4.2" -tokio = { version = "1", features = ["full"] } +soketto = "0.6.0" +structopt = "0.3.21" +thiserror = "1.0.25" +tokio = { version = "1.7.0", features = ["full"] } tokio-util = { version = "0.6", features = ["compat"] } -tokio-stream = { version = "0.1", features = ["net"] } +warp = "0.3.1" diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs index 8a7cd42..a2461a9 100644 --- a/backend/shard/src/aggregator.rs +++ b/backend/shard/src/aggregator.rs @@ -1,239 +1,213 @@ -use std::net::Ipv4Addr; -use std::fmt; -// use std::sync::mpsc::{self, Sender}; +use common::{internal_messages::{self, LocalId}, node}; +use std::sync::Arc; +use std::sync::atomic::AtomicU64; +use futures::{channel::mpsc, future}; +use futures::{ Sink, SinkExt, StreamExt }; +use std::collections::{ HashMap, HashSet }; +use crate::connection::{ create_ws_connection, Message }; -use actix::prelude::*; -use actix_http::http::Uri; -use bincode::Options; -use rustc_hash::FxHashMap; -use common::util::{DenseMap}; -use common::types::{ConnId, NodeDetails, NodeId, BlockHash}; -use common::node::Payload; -use common::shard::{ShardConnId, ShardMessage, BackendMessage}; -use common::json; -use soketto::handshake::{Client, ServerResponse}; -use crate::node::{NodeConnector, Initialize}; -use tokio::net::TcpStream; -use tokio::sync::mpsc::{self, UnboundedSender}; -use tokio_util::compat::{Compat, TokioAsyncReadCompatExt}; +/// A unique Id is assigned per websocket connection (or more accurately, +/// per thing-that-subscribes-to-the-aggregator). That connection might send +/// data on behalf of multiple chains, so this ID is local to the aggregator, +/// and a unique ID is assigned per batch of data too ([`internal_messages::LocalId`]). +type ConnId = u64; -type WsSender = soketto::Sender>; -type WsReceiver = soketto::Receiver>; - -#[derive(Default)] -pub struct Aggregator { - url: Uri, - chains: FxHashMap>, +/// Incoming messages are either from websocket connections or +/// from the telemetry core. This can be private since the only +/// external messages are via subscriptions that take +/// [`FromWebsocket`] instances. +#[derive(Clone,Debug)] +enum ToAggregator { + DisconnectedFromTelemetryCore, + ConnectedToTelemetryCore, + FromWebsocket(ConnId, FromWebsocket), + FromTelemetryCore(internal_messages::FromTelemetryCore) } -impl Actor for Aggregator { - type Context = Context; +/// An incoming socket connection can provide these messages. +/// Until a node has been Added via [`FromWebsocket::Add`], +/// messages from it will be ignored. +#[derive(Clone,Debug)] +pub enum FromWebsocket { + /// Tell the aggregator about a new node. + Add { + message_id: node::NodeMessageId, + ip: Option, + node: common::types::NodeDetails, + /// When a message is sent back up this channel, we terminate + /// the websocket connection and force the node to reconnect + /// so that it sends its system info again incase the telemetry + /// core has restarted. + close_connection: mpsc::Sender<()> + }, + /// Update/pass through details about a node. + Update { + message_id: node::NodeMessageId, + payload: node::Payload + } +} + +pub type FromAggregator = internal_messages::FromShardAggregator; + +#[derive(Clone)] +pub struct Aggregator(Arc); + +struct AggregatorInternal { + /// Nodes that connect are each assigned a unique connection ID. Nodes + /// can send messages on behalf of more than one chain, and so this ID is + /// only really used inside the Aggregator in conjunction with a per-message + /// ID. + conn_id: AtomicU64, + /// Send messages to the aggregator from websockets via this. This is + /// stored here so that anybody holding an `Aggregator` handle can + /// make use of it. + tx_to_aggregator: mpsc::Sender } impl Aggregator { - pub fn new(url: Uri) -> Self { - Aggregator { - url, - chains: Default::default(), - } - } -} + /// Spawn a new Aggregator. This connects to the telemetry backend + pub async fn spawn(telemetry_uri: http::Uri) -> anyhow::Result { + let (tx_to_aggregator, rx_from_external) = mpsc::channel(10); -pub struct Chain { - /// Base URL of Backend Core - url: Uri, - /// Genesis hash of the chain, required to construct the URL to connect to the Backend Core - genesis_hash: BlockHash, - /// Dense mapping of SharedConnId -> Addr + multiplexing ConnId sent from the node. - nodes: DenseMap<(Addr, ConnId)>, -} - -impl Chain { - pub fn new(url: Uri, genesis_hash: BlockHash) -> Self { - Chain { - url, - genesis_hash, - nodes: DenseMap::new(), - } - } - - pub fn spawn(mut self) -> UnboundedSender { - let (tx_ret, mut rx) = mpsc::unbounded_channel(); - - let tx = tx_ret.clone(); - - tokio::task::spawn(async move { - let mut sender = match self.connect(tx.clone()).await { - Ok(pair) => pair, - Err(err) => { - log::error!("Failed to connect to Backend Core: {:?}", err); - return; - } - }; - - // tokio::task::spawn(async move { - - // }); - - loop { - match rx.recv().await { - Some(ChainMessage::AddNode(msg)) => { - println!("Add node {:?}", msg); - - let AddNode { node, ip, conn_id, node_connector, .. } = msg; - let sid = self.nodes.add((node_connector, conn_id)) as ShardConnId; - - let bytes = bincode::options().serialize(&ShardMessage::AddNode { - ip, - node, - sid, - }).unwrap(); - - println!("Sending {} bytes", bytes.len()); - - let _ = sender.send_binary_mut(bytes).await; - let _ = sender.flush().await; - }, - Some(ChainMessage::UpdateNode(nid, payload)) => { - let msg = ShardMessage::UpdateNode { - nid, - payload, - }; - - let bytes = bincode::options().serialize(&msg).unwrap(); - - println!("Sending update: {} bytes", bytes.len()); - - let _ = sender.send_binary_mut(bytes).await; - let _ = sender.flush().await; - }, - Some(ChainMessage::Backend(BackendMessage::Initialize { sid, nid })) => { - if let Some((addr, conn_id)) = self.nodes.get(sid as usize) { - addr.do_send(Initialize { - nid, - conn_id: *conn_id, - chain: tx.clone(), - }) - } - }, - Some(ChainMessage::Backend(BackendMessage::Mute { sid, reason })) => { - // TODO - }, - None => (), - } - } - // let mut client = Client::new(socket.compat(), host, &path); - - // let (mut sender, mut receiver) = match client.handshake().await? { - // ServerResponse::Accepted { .. } => client.into_builder().finish(), - // ServerResponse::Redirect { status_code, location } => unimplemented!("follow location URL"), - // ServerResponse::Rejected { status_code } => unimplemented!("handle failure") - // }; + // Map responses from our connection into messages that will be sent to the aggregator: + let tx_from_connection = tx_to_aggregator.clone().with(|msg| { + future::ok::<_,mpsc::SendError>(match msg { + Message::Connected => ToAggregator::ConnectedToTelemetryCore, + Message::Disconnected => ToAggregator::DisconnectedFromTelemetryCore, + Message::Data(data) => ToAggregator::FromTelemetryCore(data) + }) }); - tx_ret + // Establish a resiliant connection to the core (this retries as needed): + let tx_to_telemetry_core = create_ws_connection( + tx_from_connection, + telemetry_uri + ).await; + + // Handle any incoming messages in our handler loop: + tokio::spawn(Aggregator::handle_messages(rx_from_external, tx_to_telemetry_core)); + + // Return a handle to our aggregator: + Ok(Aggregator(Arc::new(AggregatorInternal { + conn_id: AtomicU64::new(1), + tx_to_aggregator, + }))) } - pub async fn connect(&self, tx: UnboundedSender) -> anyhow::Result { - let host = self.url.host().unwrap_or("127.0.0.1"); - let port = self.url.port_u16().unwrap_or(8000); - let json_hash: json::Hash = self.genesis_hash.into(); - let path = format!("{}{}", self.url.path(), json_hash); + // This is spawned into a separate task and handles any messages coming + // in to the aggregator. If nobody is tolding the tx side of the channel + // any more, this task will gracefully end. + async fn handle_messages(mut rx_from_external: mpsc::Receiver, mut tx_to_telemetry_core: mpsc::Sender) { + use internal_messages::{ FromShardAggregator, FromTelemetryCore }; - let socket = TcpStream::connect((host, port)).await?; + let mut next_local_id: LocalId = 1; - socket.set_nodelay(true).unwrap(); + // Just as an optimisation, we can keep track of whether we're connected to the backend + // or not, and ignore incoming messages while we aren't. + let mut connected_to_telemetry_core = false; - let mut client = Client::new(socket.compat(), host, &path); + // A list of close channels for the current connections. Send an empty tuple to + // these to ask the connections to be closed. + let mut close_connections: Vec> = vec![]; - let (sender, receiver) = match client.handshake().await? { - ServerResponse::Accepted { .. } => client.into_builder().finish(), - ServerResponse::Redirect { status_code, .. } | - ServerResponse::Rejected { status_code } => { - return Err(anyhow::anyhow!("Failed to connect, status code: {}", status_code)); - } - }; + // Maintain mappings from the connection ID and node message ID to the "local ID" which we + // broadcast to the telemetry core. + let mut to_local_id: HashMap<(ConnId, node::NodeMessageId), LocalId> = HashMap::new(); + let mut from_local_id: HashMap = HashMap::new(); - async fn read(tx: UnboundedSender, mut receiver: WsReceiver) -> anyhow::Result<()> { - let mut data = Vec::with_capacity(128); + // Any messages coming from nodes that have been muted are ignored: + let mut muted: HashSet = HashSet::new(); - loop { - data.clear(); + // Now, loop and receive messages to handle. + while let Some(msg) = rx_from_external.next().await { + match msg { + ToAggregator::ConnectedToTelemetryCore => { + // Take hold of the connection closers and run them all. + let closers = close_connections; - receiver.receive_data(&mut data).await?; - - println!("Received {} bytes from Backend Core", data.len()); - - match bincode::options().deserialize(&data) { - Ok(msg) => tx.send(ChainMessage::Backend(msg))?, - Err(err) => { - log::error!("Failed to read message from Backend Core: {:?}", err); + for mut closer in closers { + // if this fails, it probably means the connection has died already anyway. + let _ = closer.send(()); } - } + // We've told everything to disconnect. Now, reset our state: + close_connections = vec![]; + to_local_id = HashMap::new(); + from_local_id = HashMap::new(); + muted = HashSet::new(); + connected_to_telemetry_core = true; + log::info!("Connected to telemetry core"); + }, + ToAggregator::DisconnectedFromTelemetryCore => { + connected_to_telemetry_core = false; + log::info!("Disconnected from telemetry core"); + }, + ToAggregator::FromWebsocket(conn_id, FromWebsocket::Add { message_id, ip, node, close_connection }) => { + // Keep the close_connection channel incase we need it: + close_connections.push(close_connection); + + // Don't bother doing anything else if we're disconnected, since we'll force the + // ndoe to reconnect anyway when the backend does: + if !connected_to_telemetry_core { continue } + + // Generate a new "local ID" for messages from this connection: + let local_id = next_local_id; + next_local_id += 1; + + // Store mapping to/from local_id to conn/message ID paid: + to_local_id.insert((conn_id, message_id), local_id); + from_local_id.insert(local_id, (conn_id, message_id)); + + // Send the message to the telemetry core with this local ID: + let _ = tx_to_telemetry_core.send(FromShardAggregator::AddNode { + ip, + node, + local_id + }).await; + }, + ToAggregator::FromWebsocket(conn_id, FromWebsocket::Update { message_id, payload }) => { + // Ignore incoming messages if we're not connected to the backend: + if !connected_to_telemetry_core { continue } + + // Get the local ID, ignoring the message if none match: + let local_id = match to_local_id.get(&(conn_id, message_id)) { + Some(id) => *id, + None => continue + }; + + // ignore the message if this node has been muted: + if muted.contains(&local_id) { + continue; + } + + // Send the message to the telemetry core with this local ID: + let _ = tx_to_telemetry_core.send(FromShardAggregator::UpdateNode { + local_id, + payload + }).await; + }, + ToAggregator::FromTelemetryCore(FromTelemetryCore::Mute { local_id }) => { + // Ignore incoming messages if we're not connected to the backend: + if !connected_to_telemetry_core { continue } + + // Mute the local ID we've been told to: + muted.insert(local_id); + } } } - - tokio::task::spawn(read(tx, receiver)); - - Ok(sender) } -} -impl Actor for Chain { - type Context = Context; -} + /// Return a sink that a node can send messages into to be handled by the aggregator. + pub fn subscribe_node(&self) -> impl Sink + Unpin { + // Assign a unique aggregator-local ID to each connection that subscribes, and pass + // that along with every message to the aggregator loop: + let conn_id: ConnId = self.0.conn_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let tx_to_aggregator = self.0.tx_to_aggregator.clone(); -#[derive(Message)] -#[rtype(result = "()")] -pub struct AddNode { - pub ip: Option, - pub genesis_hash: BlockHash, - pub node: NodeDetails, - pub conn_id: ConnId, - pub node_connector: Addr, -} - -#[derive(Debug)] -pub enum ChainMessage { - AddNode(AddNode), - UpdateNode(NodeId, Payload), - Backend(BackendMessage), -} - -impl fmt::Debug for AddNode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("AddNode") + // Calling `send` on this Sink requires Unpin. There may be a nicer way than this, + // but pinning by boxing is the easy solution for now: + Box::pin(tx_to_aggregator.with(move |msg| async move { + Ok(ToAggregator::FromWebsocket(conn_id, msg)) + })) } -} - -impl Handler for Aggregator { - type Result = (); - - fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) { - let AddNode { genesis_hash, .. } = msg; - - let url = &self.url; - let chain = self - .chains - .entry(genesis_hash) - .or_insert_with(move || Chain::new(url.clone(), genesis_hash).spawn()); - - if let Err(err) = chain.send(ChainMessage::AddNode(msg)) { - let msg = err.0; - log::error!("Failed to add node to chain, shutting down chain"); - self.chains.remove(&genesis_hash); - // TODO: Send a message back to clean up node connections - } - } -} - -impl Handler for Chain { - type Result = (); - - fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) { - let AddNode { ip, node_connector, .. } = msg; - - println!("Node connected to {}: {:?}", self.genesis_hash, ip); - } -} +} \ No newline at end of file diff --git a/backend/shard/src/connection.rs b/backend/shard/src/connection.rs new file mode 100644 index 0000000..89df934 --- /dev/null +++ b/backend/shard/src/connection.rs @@ -0,0 +1,166 @@ +use futures::channel::{ mpsc }; +use futures::{ Sink, SinkExt, StreamExt }; +use tokio::net::TcpStream; +use tokio_util::compat::{ TokioAsyncReadCompatExt }; + +#[derive(Clone,Debug)] +pub enum Message { + Connected, + Disconnected, + Data(Out) +} + +/// Connect to a websocket server, retrying the connection if we're disconnected. +/// - Sends messages when disconnected, reconnected or data received from the connection. +/// - Returns a channel that allows you to send messages to the connection. +/// - Messages all encoded/decoded from bincode. +pub async fn create_ws_connection(mut tx_to_external: S, telemetry_uri: http::Uri) -> mpsc::Sender +where + S: Sink, Error = E> + Unpin + Send + Clone + 'static, + E: std::fmt::Debug + std::fmt::Display + Send + 'static, + In: serde::Serialize + Send + 'static, + Out: serde::de::DeserializeOwned + Send + 'static +{ + // Set up a proxy channel to relay messages to the telemetry core, and return one end of it. + // Once a connection to the backend is established, we pass messages along to it. If the connection + // fails, we + let (tx_to_connection_proxy, mut rx_from_external_proxy) = mpsc::channel(10); + tokio::spawn(async move { + let mut connected = false; + + loop { + // Throw away any pending messages from the incoming channel so that it + // doesn't get blocked up while we're looping and waiting for a reconnection. + while let Ok(Some(_)) = rx_from_external_proxy.try_next() {} + + // The connection will pass messages back to this. + let tx_from_connection = tx_to_external.clone(); + + // Attempt to reconnect. + match create_ws_connection_no_retry(tx_from_connection, telemetry_uri.clone()).await { + Ok(mut tx_to_connection) => { + connected = true; + + // Inform the handler loop that we've reconnected. + tx_to_external.send(Message::Connected) + .await + .expect("must be able to send reconnect msg"); + + // Start forwarding messages on to the backend. + while let Some(msg) = rx_from_external_proxy.next().await { + if let Err(e) = tx_to_connection.send(msg).await { + // Issue forwarding a message to the telemetry core? + // Give up and try to reconnect on the next loop iteration. + log::error!("Error sending message to websocker server (will reconnect): {}", e); + break; + } + } + }, + Err(e) => { + // Issue connecting? Wait and try again on the next loop iteration. + log::error!("Error connecting to websocker server (will reconnect): {}", e); + } + }; + + // Tell the aggregator that we're disconnected so that, if we like, we can discard + // messages without doing any futher processing on them. + if connected { + connected = false; + let _ = tx_to_external.send(Message::Disconnected).await; + } + + // Wait a little before trying to reconnect. + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + }); + + tx_to_connection_proxy +} + +/// This spawns a connection to a websocket server, serializing/deserialziing +/// from bincode as messages are sent or received. +async fn create_ws_connection_no_retry(mut tx_to_external: S, telemetry_uri: http::Uri) -> anyhow::Result> +where + S: Sink, Error = E> + Unpin + Send + 'static, + E: std::fmt::Debug + std::fmt::Display, + In: serde::Serialize + Send + 'static, + Out: serde::de::DeserializeOwned + Send + 'static +{ + use soketto::handshake::{Client, ServerResponse}; + use bincode::Options; + + let host = telemetry_uri.host().unwrap_or("127.0.0.1"); + let port = telemetry_uri.port_u16().unwrap_or(8000); + let path = telemetry_uri.path(); + + let socket = TcpStream::connect((host, port)).await?; + socket.set_nodelay(true).unwrap(); + + // Open a websocket connection with the relemetry core: + let mut client = Client::new(socket.compat(), host, &path); + let (mut ws_to_connection, mut ws_from_connection) = match client.handshake().await? { + ServerResponse::Accepted { .. } => client.into_builder().finish(), + ServerResponse::Redirect { status_code, .. } | + ServerResponse::Rejected { status_code } => { + return Err(anyhow::anyhow!("Failed to connect to {}{}, status code: {}", host, path, status_code)); + } + }; + + // This task reads data sent from the telemetry core and + // forwards it on to our aggregator loop: + tokio::spawn(async move { + let mut data = Vec::with_capacity(128); + loop { + // Clear the buffer and wait for the next message to arrive: + data.clear(); + if let Err(e) = ws_from_connection.receive_data(&mut data).await { + // Couldn't receive data may mean all senders are gone, so log + // the error and shut this down: + log::error!("Shutting down websocket connection: Failed to receive data: {}", e); + return; + } + + // Attempt to deserialize, and send to our handler loop: + match bincode::options().deserialize(&data) { + Ok(msg) => { + if let Err(e) = tx_to_external.send(Message::Data(msg)).await { + // Failure to send to our loop likely means it's hit an + // issue and shut down, so bail on this loop as well: + log::error!("Shutting down websocket connection: Failed to send data out: {}", e); + return; + } + }, + Err(err) => { + // Log the error but otherwise ignore it and keep running: + log::warn!("Failed to decode message from Backend Core: {:?}", err); + } + } + } + }); + + // This task receives messages from the aggregator, + // encodes them and sends them to the telemetry core: + let (tx_to_connection, mut rx_from_aggregator) = mpsc::channel(10); + tokio::spawn(async move { + while let Some(msg) = rx_from_aggregator.next().await { + let bytes = bincode::options() + .serialize(&msg) + .expect("must be able to serialize msg"); + + // Any errors sending the message leads to this task ending, which should cascade to + // the entire connection being ended. + if let Err(e) = ws_to_connection.send_binary_mut(bytes).await { + log::error!("Shutting down websocket connection: Failed to send data in: {}", e); + return; + } + if let Err(e) = ws_to_connection.flush().await { + log::error!("Shutting down websocket connection: Failed to flush data: {}", e); + return; + } + } + }); + + // We return a channel that you can send messages down in order to have + // them sent to the telemetry core: + Ok(tx_to_connection) +} \ No newline at end of file diff --git a/backend/shard/src/main.rs b/backend/shard/src/main.rs index 421e024..e4e8d8a 100644 --- a/backend/shard/src/main.rs +++ b/backend/shard/src/main.rs @@ -1,125 +1,207 @@ -use std::net::Ipv4Addr; - -use actix::prelude::*; -use actix_http::ws::Codec; -use actix_http::http::Uri; -use actix_web::{get, middleware, web, App, Error, HttpRequest, HttpResponse, HttpServer}; -use actix_web_actors::ws; -use clap::Clap; -use simple_logger::SimpleLogger; - mod aggregator; -mod node; +mod connection; -use aggregator::Aggregator; -use node::NodeConnector; +use std::net::SocketAddr; + +use structopt::StructOpt; +use http::Uri; +use simple_logger::SimpleLogger; +use futures::{StreamExt, SinkExt, channel::mpsc}; +use warp::Filter; +use warp::filters::ws; +use common::{json, node, log_level::LogLevel}; +use aggregator::{ Aggregator, FromWebsocket }; const VERSION: &str = env!("CARGO_PKG_VERSION"); const AUTHORS: &str = env!("CARGO_PKG_AUTHORS"); const NAME: &str = "Substrate Telemetry Backend Shard"; -const ABOUT: &str = "This is the Telemetry Backend Shard that forwards the data sent by Substrate/Polkadot nodes to the Backend Core"; +const ABOUT: &str = "This is the Telemetry Backend Shard that forwards the \ + data sent by Substrate/Polkadot nodes to the Backend Core"; -#[derive(Clap, Debug)] -#[clap(name = NAME, version = VERSION, author = AUTHORS, about = ABOUT)] +#[derive(StructOpt, Debug)] +#[structopt(name = NAME, version = VERSION, author = AUTHORS, about = ABOUT)] struct Opts { - #[clap( - short = 'l', + /// This is the socket address that this shard is listening to. This is restricted to + /// localhost (127.0.0.1) by default and should be fine for most use cases. If + /// you are using Telemetry in a container, you likely want to set this to '0.0.0.0:8000' + #[structopt( + short = "l", long = "listen", default_value = "127.0.0.1:8001", - about = "This is the socket address Telemetry is listening to. This is restricted to localhost (127.0.0.1) by default and should be fine for most use cases. If you are using Telemetry in a container, you likely want to set this to '0.0.0.0:8000'" )] socket: std::net::SocketAddr, - #[clap( - arg_enum, + /// The desired log level; one of 'error', 'warn', 'info', 'debug' or 'trace', where + /// 'error' only logs errors and 'trace' logs everything. + #[structopt( required = false, long = "log", default_value = "info", about = "Log level." )] log_level: LogLevel, - #[clap( - short = 'c', + /// Url to the Backend Core endpoint accepting shard connections + #[structopt( + short = "c", long = "core", default_value = "ws://127.0.0.1:8000/shard_submit/", - about = "Url to the Backend Core endpoint accepting shard connections" )] core_url: Uri, } -#[derive(Clap, Debug, PartialEq)] -enum LogLevel { - Error, - Warn, - Info, - Debug, - Trace, -} - -impl From<&LogLevel> for log::LevelFilter { - fn from(log_level: &LogLevel) -> Self { - match log_level { - LogLevel::Error => log::LevelFilter::Error, - LogLevel::Warn => log::LevelFilter::Warn, - LogLevel::Info => log::LevelFilter::Info, - LogLevel::Debug => log::LevelFilter::Debug, - LogLevel::Trace => log::LevelFilter::Trace, - } - } -} - -/// Entry point for connecting nodes -#[get("/submit")] -async fn node_route( - req: HttpRequest, - stream: web::Payload, - aggregator: web::Data>, -) -> Result { - let ip = req - .connection_info() - .realip_remote_addr() - .and_then(|mut addr| { - if let Some(port_idx) = addr.find(':') { - addr = &addr[..port_idx]; - } - addr.parse::().ok() - }); - - let mut res = ws::handshake(&req)?; - let aggregator = aggregator.get_ref().clone(); - - Ok(res.streaming(ws::WebsocketContext::with_codec( - NodeConnector::new(aggregator, ip), - stream, - Codec::new().max_size(10 * 1024 * 1024), // 10mb frame limit - ))) -} - -/// Telemetry entry point. Listening by default on 127.0.0.1:8000. -/// This can be changed using the `PORT` and `BIND` ENV variables. -#[actix_web::main] -async fn main() -> std::io::Result<()> { - let opts = Opts::parse(); +#[tokio::main] +async fn main() { + let opts = Opts::from_args(); let log_level = &opts.log_level; + SimpleLogger::new() .with_level(log_level.into()) .init() .expect("Must be able to start a logger"); - println!("URL? {:?} {:?}", opts.core_url.host(), opts.core_url.port_u16()); - - let aggregator = Aggregator::new(opts.core_url).start(); - log::info!( "Starting Telemetry Shard version: {}", - env!("CARGO_PKG_VERSION") + VERSION ); - HttpServer::new(move || { - App::new() - .wrap(middleware::NormalizePath::default()) - .data(aggregator.clone()) - .service(node_route) - }) - .bind(opts.socket)? - .run() - .await + + if let Err(e) = start_server(opts).await { + log::error!("Error starting server: {}", e); + } } + +/// Declare our routes and start the server. +async fn start_server(opts: Opts) -> anyhow::Result<()> { + + let aggregator = Aggregator::spawn(opts.core_url).await?; + + // Handle requests to /health by returning OK. + let health_route = + warp::path("health") + .map(|| "OK"); + + // Handle websocket requests to /submit. + let ws_route = + warp::path("submit") + .and(warp::ws()) + .and(warp::filters::addr::remote()) + .map(move |ws: ws::Ws, addr: Option| { + // Send messages from the websocket connection to this sink + // to have them pass to the aggregator. + let tx_to_aggregator = aggregator.subscribe_node(); + log::info!("Opening /submit connection from {:?}", addr); + ws.on_upgrade(move |websocket| async move { + handle_websocket_connection(websocket, tx_to_aggregator, addr).await; + log::info!("Closing /submit connection from {:?}", addr); + }) + }); + + // Merge the routes and start our server: + let routes = ws_route.or(health_route); + warp::serve(routes).run(opts.socket).await; + Ok(()) +} + +/// This takes care of handling messages from an established socket connection. +async fn handle_websocket_connection(websocket: ws::WebSocket, mut tx_to_aggregator: S, addr: Option) + where S: futures::Sink + Unpin +{ + let mut websocket = websocket.fuse(); + + // This could be a oneshot channel, but it's useful to be able to clone + // messages, and we can't clone oneshot channel senders. + let (close_connection_tx, mut close_connection_rx) = mpsc::channel(0); + + // First, we wait until we receive a SystemConnected message. + // Until this turns up, we ignore other messages. We could buffer + // a few quite easily if we liked. + while let Some(msg) = websocket.next().await { + let node_message = match deserialize_ws_message(msg) { + Ok(Some(msg)) => msg, + Ok(None) => continue, + Err(e) => { log::error!("{}", e); break } + }; + + let message_id = node_message.id(); + let payload = node_message.into_payload(); + + if let node::Payload::SystemConnected(info) = payload { + let _ = tx_to_aggregator.send(FromWebsocket::Add { + message_id, + ip: addr.map(|a| a.ip()), + node: info.node, + close_connection: close_connection_tx, + }).await; + break; + } + } + + // Now, the node has been added, so we forward messages along as updates. + // We keep an eye on the close_connection channel; if that resolves, then + // end this loop and let the connection close gracefully. + loop { + futures::select_biased! { + // The close channel has fired, so end the loop: + _ = close_connection_rx.next() => { + break + }, + // A message was received; handle it: + msg = websocket.next() => { + let msg = match msg { + Some(msg) => msg, + None => break + }; + + let node_message = match deserialize_ws_message(msg) { + Ok(Some(msg)) => msg, + Ok(None) => continue, + Err(e) => { log::error!("{}", e); break } + }; + + let message_id = node_message.id(); + let payload = node_message.into_payload(); + + if let Err(e) = tx_to_aggregator.send(FromWebsocket::Update { message_id, payload } ).await { + log::error!("Failed to send node message to aggregator: {}", e); + continue; + } + } + } + } + + // loops ended; attempt to close the connection gracefully: + let _ = websocket.close().await; +} + +/// Deserialize an incoming websocket message, returning an error if something +/// fatal went wrong, [`Some`] message if all went well, and [`None`] if a non-fatal +/// issue was encountered and the message should simply be ignored. +fn deserialize_ws_message(msg: Result) -> anyhow::Result> { + // If we see any errors, log them and end our loop: + let msg = match msg { + Err(e) => { + return Err(anyhow::anyhow!("Error in node websocket connection: {}", e)); + }, + Ok(msg) => msg + }; + + // If the message isn't something we want to handle, just ignore it. + // This includes system messages like "pings" and such, so don't log anything. + if !msg.is_binary() && !msg.is_text() { + return Ok(None); + } + + // Deserialize from JSON, warning if deserialization fails: + let bytes = msg.as_bytes(); + let node_message: json::NodeMessage = match serde_json::from_slice(bytes) { + Ok(node_message) => node_message, + Err(_e) => { + // let bytes: &[u8] = bytes.get(..512).unwrap_or_else(|| &bytes); + // let msg_start = std::str::from_utf8(bytes).unwrap_or_else(|_| "INVALID UTF8"); + // log::warn!("Failed to parse node message ({}): {}", msg_start, e); + return Ok(None) + } + }; + + // Pull relevant details from the message: + let node_message: node::NodeMessage = node_message.into(); + Ok(Some(node_message)) +} \ No newline at end of file diff --git a/backend/shard/src/node.rs b/backend/shard/src/node.rs deleted file mode 100644 index f6f3474..0000000 --- a/backend/shard/src/node.rs +++ /dev/null @@ -1,203 +0,0 @@ -use std::collections::BTreeMap; -use std::net::Ipv4Addr; -use std::time::{Duration, Instant}; - -use crate::aggregator::{AddNode, Aggregator, ChainMessage}; -// use crate::chain::{Chain, RemoveNode, UpdateNode}; -use actix::prelude::*; -use actix_web_actors::ws::{self, CloseReason}; -use common::node::{NodeMessage, Payload}; -use common::types::{ConnId, NodeId}; -use common::json; -use common::ws::{MultipartHandler, WsMessage}; -use tokio::sync::mpsc::UnboundedSender; - -/// How often heartbeat pings are sent -const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20); -/// How long before lack of client response causes a timeout -const CLIENT_TIMEOUT: Duration = Duration::from_secs(60); - -pub struct NodeConnector { - /// Multiplexing connections by id - multiplex: BTreeMap, - /// Client must send ping at least once every 60 seconds (CLIENT_TIMEOUT), - hb: Instant, - /// Aggregator actor address - aggregator: Addr, - /// IP address of the node this connector is responsible for - ip: Option, - /// Helper for handling continuation messages - multipart: MultipartHandler, -} - -enum ConnMultiplex { - Connected { - /// Id of the node this multiplex connector is responsible for handling - nid: NodeId, - /// Chain address to which this multiplex connector is delegating messages - chain: UnboundedSender, - }, - Waiting { - /// Backlog of messages to be sent once we get a recipient handle to the chain - backlog: Vec, - }, -} - -impl Default for ConnMultiplex { - fn default() -> Self { - ConnMultiplex::Waiting { - backlog: Vec::new(), - } - } -} - -impl Actor for NodeConnector { - type Context = ws::WebsocketContext; - - fn started(&mut self, ctx: &mut Self::Context) { - self.heartbeat(ctx); - } - - fn stopped(&mut self, _: &mut Self::Context) { - // for mx in self.multiplex.values() { - // if let ConnMultiplex::Connected { chain, nid } = mx { - // chain.do_send(RemoveNode(*nid)); - // } - // } - } -} - -impl NodeConnector { - pub fn new(aggregator: Addr, ip: Option) -> Self { - Self { - multiplex: BTreeMap::new(), - hb: Instant::now(), - aggregator, - ip, - multipart: MultipartHandler::default(), - } - } - - fn heartbeat(&self, ctx: &mut ::Context) { - ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| { - // check client heartbeats - if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT { - // stop actor - ctx.close(Some(CloseReason { - code: ws::CloseCode::Abnormal, - description: Some("Missed heartbeat".into()), - })); - ctx.stop(); - } - }); - } - - fn handle_message( - &mut self, - msg: json::NodeMessage, - ctx: &mut ::Context, - ) { - let msg: NodeMessage = msg.into(); - let conn_id = msg.id(); - let payload = msg.into(); - - match self.multiplex.entry(conn_id).or_default() { - ConnMultiplex::Connected { nid, chain } => { - // TODO: error handle - let _ = chain.send(ChainMessage::UpdateNode(*nid, payload)); - } - ConnMultiplex::Waiting { backlog } => { - if let Payload::SystemConnected(connected) = payload { - println!("Node connected {:?}", connected.node); - self.aggregator.do_send(AddNode { - genesis_hash: connected.genesis_hash, - ip: self.ip, - node: connected.node, - conn_id, - node_connector: ctx.address(), - }); - } else { - if backlog.len() >= 10 { - backlog.remove(0); - } - - backlog.push(payload); - } - } - } - } -} - -#[derive(Message)] -#[rtype(result = "()")] -pub struct Initialize { - pub nid: NodeId, - pub conn_id: ConnId, - pub chain: UnboundedSender, -} - -impl Handler for NodeConnector { - type Result = (); - - fn handle(&mut self, msg: Initialize, _: &mut Self::Context) { - let Initialize { - nid, - conn_id, - chain, - } = msg; - log::trace!(target: "NodeConnector::Initialize", "Initializing a node, nid={}, on conn_id={}", nid, conn_id); - let mx = self.multiplex.entry(conn_id).or_default(); - - if let ConnMultiplex::Waiting { backlog } = mx { - for payload in backlog.drain(..) { - // TODO: error handle. - let _ = chain.send(ChainMessage::UpdateNode(nid, payload)); - } - - *mx = ConnMultiplex::Connected { - nid, - chain, - }; - }; - } -} - -impl StreamHandler> for NodeConnector { - fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { - self.hb = Instant::now(); - - let data = match msg.map(|msg| self.multipart.handle(msg)) { - Ok(WsMessage::Nop) => return, - Ok(WsMessage::Ping(msg)) => { - ctx.pong(&msg); - return; - } - Ok(WsMessage::Data(data)) => data, - Ok(WsMessage::Close(reason)) => { - ctx.close(reason); - ctx.stop(); - return; - } - Err(error) => { - log::error!("{:?}", error); - ctx.stop(); - return; - } - }; - - match serde_json::from_slice(&data) { - Ok(msg) => self.handle_message(msg, ctx), - #[cfg(debug)] - Err(err) => { - let data: &[u8] = data.get(..512).unwrap_or_else(|| &data); - log::warn!( - "Failed to parse node message: {} {}", - err, - std::str::from_utf8(data).unwrap_or_else(|_| "INVALID UTF8") - ) - } - #[cfg(not(debug))] - Err(_) => (), - } - } -} diff --git a/backend/telemetry/Cargo.toml b/backend/telemetry/Cargo.toml new file mode 100644 index 0000000..08f5a14 --- /dev/null +++ b/backend/telemetry/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "telemetry" +version = "0.1.0" +authors = ["Parity Technologies Ltd. "] +edition = "2018" + +[dependencies] +anyhow = "1.0.41" +bincode = "1.3.3" +common = { path = "../common" } +futures = "0.3.15" +hex = "0.4.3" +http = "0.2.4" +log = "0.4.14" +primitive-types = { version = "0.9.0", features = ["serde"] } +serde = { version = "1.0.126", features = ["derive"] } +serde_json = "1.0.64" +simple_logger = "1.11.0" +soketto = "0.6.0" +structopt = "0.3.21" +thiserror = "1.0.25" +tokio = { version = "1.7.0", features = ["full"] } +tokio-util = { version = "0.6", features = ["compat"] } +warp = "0.3.1" diff --git a/backend/telemetry/src/aggregator.rs b/backend/telemetry/src/aggregator.rs new file mode 100644 index 0000000..ad86fc0 --- /dev/null +++ b/backend/telemetry/src/aggregator.rs @@ -0,0 +1,202 @@ +use common::{internal_messages::{self, LocalId}, node}; +use std::{str::FromStr, sync::Arc}; +use std::sync::atomic::AtomicU64; +use futures::channel::{ mpsc, oneshot }; +use futures::{ Sink, SinkExt, StreamExt }; +use tokio::net::TcpStream; +use tokio_util::compat::{ TokioAsyncReadCompatExt }; +use std::collections::{ HashMap, HashSet }; + +/// A unique Id is assigned per websocket connection (or more accurately, +/// per feed socket and per shard socket). This can be combined with the +/// [`LocalId`] of messages to give us a global ID. +type ConnId = u64; + +/// Incoming messages come via subscriptions, and end up looking like this. +#[derive(Debug)] +enum ToAggregator { + FromShardWebsocket(ConnId, FromShardWebsocket), + FromFeedWebsocket(ConnId, FromFeedWebsocket), +} + +/// An incoming shard connection can send these messages to the aggregator. +#[derive(Debug)] +pub enum FromShardWebsocket { + /// When the socket is opened, it'll send this first + /// so that we have a way to communicate back to it. + Initialize { + channel: mpsc::Sender, + }, + /// Tell the aggregator about a new node. + Add { + local_id: LocalId, + ip: Option, + node: common::types::NodeDetails, + }, + /// Update/pass through details about a node. + Update { + local_id: LocalId, + payload: node::Payload + } +} + +/// The aggregator can these messages back to a shard connection. +#[derive(Debug)] +pub enum ToShardWebsocket { + /// Mute messages to the core by passing the shard-local ID of them. + Mute { + local_id: LocalId + } +} + +/// An incoming feed connection can send these messages to the aggregator. +#[derive(Debug)] +pub enum FromFeedWebsocket { + /// When the socket is opened, it'll send this first + /// so that we have a way to communicate back to it. + Initialize { + channel: mpsc::Sender, + }, + /// The feed can subscribe to a chain to receive + /// messages relating to it. + Subscribe { + chain: Box + }, + /// The feed wants finality info for the chain, too. + SendFinality { + chain: Box + }, + /// The feed doesn't want any more finality info for the chain. + NoMoreFinality { + chain: Box + }, + /// An explicit ping message. + Ping { + chain: Box + } +} + +// The frontend sends text based commands; parse them into these messages: +impl FromStr for FromFeedWebsocket { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + let (cmd, chain) = match s.find(':') { + Some(idx) => (&s[..idx], s[idx+1..].into()), + None => return Err(anyhow::anyhow!("Expecting format `CMD:CHAIN_NAME`")) + }; + match cmd { + "ping" => Ok(FromFeedWebsocket::Ping { chain }), + "subscribe" => Ok(FromFeedWebsocket::Subscribe { chain }), + "send-finality" => Ok(FromFeedWebsocket::SendFinality { chain }), + "no-more-finality" => Ok(FromFeedWebsocket::NoMoreFinality { chain }), + _ => return Err(anyhow::anyhow!("Command {} not recognised", cmd)) + } + } +} + +/// The aggregator can these messages back to a feed connection. +#[derive(Debug)] +pub enum ToFeedWebsocket { + +} + +#[derive(Clone)] +pub struct Aggregator(Arc); + +struct AggregatorInternal { + /// Shards that connect are each assigned a unique connection ID. + /// This helps us know who to send messages back to (especially in + /// conjunction with the [`LocalId`] that messages will come with). + shard_conn_id: AtomicU64, + /// Feeds that connect have their own unique connection ID, too. + feed_conn_id: AtomicU64, + /// Send messages in to the aggregator from the outside via this. This is + /// stored here so that anybody holding an `Aggregator` handle can + /// make use of it. + tx_to_aggregator: mpsc::Sender +} + +impl Aggregator { + /// Spawn a new Aggregator. This connects to the telemetry backend + pub async fn spawn(denylist: Vec) -> anyhow::Result { + let (tx_to_aggregator, rx_from_external) = mpsc::channel(10); + + // Handle any incoming messages in our handler loop: + tokio::spawn(Aggregator::handle_messages(rx_from_external, denylist)); + + // Return a handle to our aggregator: + Ok(Aggregator(Arc::new(AggregatorInternal { + shard_conn_id: AtomicU64::new(1), + feed_conn_id: AtomicU64::new(1), + tx_to_aggregator, + }))) + } + + // This is spawned into a separate task and handles any messages coming + // in to the aggregator. If nobody is tolding the tx side of the channel + // any more, this task will gracefully end. + async fn handle_messages(mut rx_from_external: mpsc::Receiver, denylist: Vec) { + + // Temporary: if we drop channels to shards, they will be booted: + let mut to_shards = vec![]; + + // Now, loop and receive messages to handle. + while let Some(msg) = rx_from_external.next().await { + match msg { + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Initialize { channel }) => { + + }, + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Ping { chain }) => { + + }, + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Subscribe { chain }) => { + + }, + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::SendFinality { chain }) => { + + }, + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::NoMoreFinality { chain }) => { + + }, + ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Initialize { channel }) => { + to_shards.push(channel); + }, + ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Add { local_id, ip, node }) => { + + }, + ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Update { local_id, payload }) => { + + }, + } + } + } + + /// Return a sink that a shard can send messages into to be handled by the aggregator. + pub fn subscribe_shard(&self) -> impl Sink + Unpin { + // Assign a unique aggregator-local ID to each connection that subscribes, and pass + // that along with every message to the aggregator loop: + let shard_conn_id: ConnId = self.0.shard_conn_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let tx_to_aggregator = self.0.tx_to_aggregator.clone(); + + // Calling `send` on this Sink requires Unpin. There may be a nicer way than this, + // but pinning by boxing is the easy solution for now: + Box::pin(tx_to_aggregator.with(move |msg| async move { + Ok(ToAggregator::FromShardWebsocket(shard_conn_id, msg)) + })) + } + + /// Return a sink that a feed can send messages into to be handled by the aggregator. + pub fn subscribe_feed(&self) -> impl Sink + Unpin { + // Assign a unique aggregator-local ID to each connection that subscribes, and pass + // that along with every message to the aggregator loop: + let feed_conn_id: ConnId = self.0.feed_conn_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let tx_to_aggregator = self.0.tx_to_aggregator.clone(); + + // Calling `send` on this Sink requires Unpin. There may be a nicer way than this, + // but pinning by boxing is the easy solution for now: + Box::pin(tx_to_aggregator.with(move |msg| async move { + Ok(ToAggregator::FromFeedWebsocket(feed_conn_id, msg)) + })) + } + +} \ No newline at end of file diff --git a/backend/core/src/feed/mod.rs b/backend/telemetry/src/feed_message.rs similarity index 85% rename from backend/core/src/feed/mod.rs rename to backend/telemetry/src/feed_message.rs index c4aca8b..e0ea5d6 100644 --- a/backend/core/src/feed/mod.rs +++ b/backend/telemetry/src/feed_message.rs @@ -1,3 +1,6 @@ +//! This module provides the messages that will be +//! sent to subscribing feeds. + use serde::ser::{SerializeTuple, Serializer}; use serde::Serialize; use std::mem; @@ -9,10 +12,6 @@ use common::types::{ Timestamp, NodeDetails, }; -pub mod connector; - -use connector::Serialized; - pub trait FeedMessage { const ACTION: u8; } @@ -66,16 +65,16 @@ impl FeedMessageSerializer { let _ = to_writer(&mut self.buffer, value); } - pub fn finalize(&mut self) -> Option { + pub fn finalize(&mut self) -> Option> { if self.buffer.is_empty() { return None; } self.buffer.push(b']'); - let bytes = mem::replace(&mut self.buffer, Vec::with_capacity(BUFCAP)).into(); + let bytes = mem::replace(&mut self.buffer, Vec::with_capacity(BUFCAP)); - Some(Serialized(bytes)) + Some(bytes) } } @@ -90,28 +89,28 @@ macro_rules! actions { } actions! { - 0x00: Version, - 0x01: BestBlock, - 0x02: BestFinalized, - 0x03: AddedNode<'_>, - 0x04: RemovedNode, - 0x05: LocatedNode<'_>, - 0x06: ImportedBlock<'_>, - 0x07: FinalizedBlock, - 0x08: NodeStatsUpdate<'_>, - 0x09: Hardware<'_>, - 0x0A: TimeSync, - 0x0B: AddedChain<'_>, - 0x0C: RemovedChain<'_>, - 0x0D: SubscribedTo<'_>, - 0x0E: UnsubscribedFrom<'_>, - 0x0F: Pong<'_>, - 0x10: AfgFinalized, - 0x11: AfgReceivedPrevote, - 0x12: AfgReceivedPrecommit, - 0x13: AfgAuthoritySet, - 0x14: StaleNode, - 0x15: NodeIOUpdate<'_>, + 0: Version, + 1: BestBlock, + 2: BestFinalized, + 3: AddedNode<'_>, + 4: RemovedNode, + 5: LocatedNode<'_>, + 6: ImportedBlock<'_>, + 7: FinalizedBlock, + 8: NodeStatsUpdate<'_>, + 9: Hardware<'_>, + 10: TimeSync, + 11: AddedChain<'_>, + 12: RemovedChain<'_>, + 13: SubscribedTo<'_>, + 14: UnsubscribedFrom<'_>, + 15: Pong<'_>, + 16: AfgFinalized, + 17: AfgReceivedPrevote, + 18: AfgReceivedPrecommit, + 19: AfgAuthoritySet, + 20: StaleNode, + 21: NodeIOUpdate<'_>, } #[derive(Serialize)] diff --git a/backend/telemetry/src/main.rs b/backend/telemetry/src/main.rs new file mode 100644 index 0000000..23fc200 --- /dev/null +++ b/backend/telemetry/src/main.rs @@ -0,0 +1,285 @@ +mod aggregator; +mod feed_message; +mod node; + +use std::net::SocketAddr; +use std::str::FromStr; + +use bincode::Options; +use common::internal_messages; +use structopt::StructOpt; +use simple_logger::SimpleLogger; +use futures::{StreamExt, SinkExt, channel::mpsc}; +use warp::Filter; +use warp::filters::ws; +use common::{log_level::LogLevel}; +use aggregator::{ Aggregator, FromFeedWebsocket, ToFeedWebsocket, FromShardWebsocket, ToShardWebsocket }; + +const VERSION: &str = env!("CARGO_PKG_VERSION"); +const AUTHORS: &str = env!("CARGO_PKG_AUTHORS"); +const NAME: &str = "Substrate Telemetry Backend Core"; +const ABOUT: &str = "This is the Telemetry Backend Core that receives telemetry messages \ + from Substrate/Polkadot nodes and provides the data to a subsribed feed"; + +#[derive(StructOpt, Debug)] +#[structopt(name = NAME, version = VERSION, author = AUTHORS, about = ABOUT)] +struct Opts { + /// This is the socket address that Telemetryis listening to. This is restricted to + /// localhost (127.0.0.1) by default and should be fine for most use cases. If + /// you are using Telemetry in a container, you likely want to set this to '0.0.0.0:8000' + #[structopt( + short = "l", + long = "listen", + default_value = "127.0.0.1:8000", + )] + socket: std::net::SocketAddr, + /// The desired log level; one of 'error', 'warn', 'info', 'debug' or 'trace', where + /// 'error' only logs errors and 'trace' logs everything. + #[structopt( + required = false, + long = "log", + default_value = "info", + about = "Log level." + )] + log_level: LogLevel, + /// Space delimited list of the names of chains that are not allowed to connect to + /// telemetry. Case sensitive. + #[structopt( + required = false, + long = "denylist", + )] + denylist: Vec, +} + +#[tokio::main] +async fn main() { + let opts = Opts::from_args(); + let log_level = &opts.log_level; + + SimpleLogger::new() + .with_level(log_level.into()) + .init() + .expect("Must be able to start a logger"); + + log::info!( + "Starting Telemetry Core version: {}", + VERSION + ); + + if let Err(e) = start_server(opts).await { + log::error!("Error starting server: {}", e); + } +} + +/// Declare our routes and start the server. +async fn start_server(opts: Opts) -> anyhow::Result<()> { + + let shard_aggregator = Aggregator::spawn(opts.denylist).await?; + let feed_aggregator = shard_aggregator.clone(); + + // Handle requests to /health by returning OK. + let health_route = + warp::path("health") + .map(|| "OK"); + + // Handle websocket requests from shards. + let ws_shard_submit_route = + warp::path("shard_submit") + .and(warp::ws()) + .and(warp::filters::addr::remote()) + .map(move |ws: ws::Ws, addr: Option| { + let tx_to_aggregator = shard_aggregator.subscribe_shard(); + log::info!("Opening /shard_submit connection from {:?}", addr); + ws.on_upgrade(move |websocket| async move { + let websocket = handle_shard_websocket_connection(websocket, tx_to_aggregator).await; + log::info!("Closing /shard_submit connection from {:?}", addr); + let _ = websocket.close().await; + }) + }); + + // Handle websocket requests from frontends. + let ws_feed_route = + warp::path("feed") + .and(warp::ws()) + .and(warp::filters::addr::remote()) + .map(move |ws: ws::Ws, addr: Option| { + let tx_to_aggregator = feed_aggregator.subscribe_feed(); + log::info!("Opening /feed connection from {:?}", addr); + ws.on_upgrade(move |websocket| async move { + let websocket = handle_feed_websocket_connection(websocket, tx_to_aggregator).await; + log::info!("Closing /feed connection from {:?}", addr); + let _ = websocket.close().await; + }) + }); + + // Merge the routes and start our server: + let routes = ws_shard_submit_route + .or(ws_feed_route) + .or(health_route); + warp::serve(routes).run(opts.socket).await; + Ok(()) +} + +/// This handles messages coming to/from a shard connection +async fn handle_shard_websocket_connection(mut websocket: ws::WebSocket, mut tx_to_aggregator: S) -> ws::WebSocket + where S: futures::Sink + Unpin +{ + let (tx_to_shard_conn, mut rx_from_aggregator) = mpsc::channel(10); + + // Tell the aggregator about this new connection, and give it a way to send messages to us: + let init_msg = FromShardWebsocket::Initialize { + channel: tx_to_shard_conn + }; + if let Err(e) = tx_to_aggregator.send(init_msg).await { + log::error!("Error sending message to aggregator: {}", e); + return websocket; + } + + // Loop, handling new messages from the shard or from the aggregator: + loop { + tokio::select! { + // AGGREGATOR -> SHARD + msg = rx_from_aggregator.next() => { + // End the loop when connection from aggregator ends: + let msg = match msg { + Some(msg) => msg, + None => break + }; + + let internal_msg = match msg { + ToShardWebsocket::Mute { local_id } => { + internal_messages::FromTelemetryCore::Mute { local_id } + } + }; + + let bytes = bincode::options() + .serialize(&internal_msg) + .expect("message to shard should serialize"); + + if let Err(e) = websocket.send(ws::Message::binary(bytes)).await { + log::error!("Error sending message to shard; booting it: {}", e); + break + } + } + // SHARD -> AGGREGATOR + msg = websocket.next() => { + // End the loop when connection from shard ends: + let msg = match msg { + Some(msg) => msg, + None => break + }; + + let msg = match msg { + Err(e) => { + log::error!("Error receiving message from shard; booting it: {}", e); + break; + }, + Ok(msg) => msg + }; + + // If the message isn't something we want to handle, just ignore it. + // This includes system messages like "pings" and such, so don't log anything. + if !msg.is_binary() && !msg.is_text() { + continue; + } + + let bytes = msg.as_bytes(); + let msg: internal_messages::FromShardAggregator = match bincode::options().deserialize(bytes) { + Ok(msg) => msg, + Err(e) => { + log::error!("Failed to deserialize message from shard; booting it: {}", e); + break; + } + }; + + // Convert and send to the aggregator: + let aggregator_msg = match msg { + internal_messages::FromShardAggregator::AddNode { ip, node, local_id } => { + FromShardWebsocket::Add { ip, node, local_id } + }, + internal_messages::FromShardAggregator::UpdateNode { payload, local_id } => { + FromShardWebsocket::Update { local_id, payload } + }, + }; + if let Err(e) = tx_to_aggregator.send(aggregator_msg).await { + log::error!("Failed to send message to aggregator; closing shard: {}", e); + break; + } + } + } + } + + // loop ended; give socket back to parent: + websocket +} + +/// This handles messages coming from a feed connection +async fn handle_feed_websocket_connection(mut websocket: ws::WebSocket, mut tx_to_aggregator: S) -> ws::WebSocket + where S: futures::Sink + Unpin +{ + let (tx_to_feed_conn, mut rx_from_aggregator) = mpsc::channel(10); + + // Tell the aggregator about this new connection, and give it a way to send messages to us: + let init_msg = FromFeedWebsocket::Initialize { + channel: tx_to_feed_conn + }; + if let Err(e) = tx_to_aggregator.send(init_msg).await { + log::error!("Error sending message to aggregator: {}", e); + return websocket; + } + + // Loop, handling new messages from the shard or from the aggregator: + loop { + tokio::select! { + // AGGREGATOR -> FRONTEND + msg = rx_from_aggregator.next() => { + // End the loop when connection from aggregator ends: + let msg = match msg { + Some(msg) => msg, + None => break + }; + + println!("TODO: encode message and send down feed websocket: {:?}", msg); + } + // FRONTEND -> AGGREGATOR + msg = websocket.next() => { + // End the loop when connection from feed ends: + let msg = match msg { + Some(msg) => msg, + None => break + }; + + // If we see any errors, log them and end our loop: + let msg = match msg { + Err(e) => { + log::error!("Error in node websocket connection: {}", e); + break; + }, + Ok(msg) => msg + }; + + // We ignore all but text messages from the frontend: + let text = match msg.to_str() { + Ok(s) => s, + Err(_) => continue + }; + + // Parse the message into a command we understand and send it to the aggregator: + let cmd = match FromFeedWebsocket::from_str(text) { + Ok(cmd) => cmd, + Err(e) => { + log::warn!("Ignoring invalid command '{}' from the frontend: {}", text, e); + continue + } + }; + if let Err(e) = tx_to_aggregator.send(cmd).await { + log::error!("Failed to send message to aggregator; closing feed: {}", e); + break; + } + } + } + } + + // loop ended; give socket back to parent: + websocket +} diff --git a/backend/core/src/node.rs b/backend/telemetry/src/node.rs similarity index 100% rename from backend/core/src/node.rs rename to backend/telemetry/src/node.rs From 486418e5e9af1981931463252577763b1826efc6 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Mon, 21 Jun 2021 10:54:46 +0100 Subject: [PATCH 010/134] Fix CI --- .github/workflows/backend.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index f63e5e6..7bf7fb5 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -16,8 +16,8 @@ jobs: - uses: actions/checkout@v2 - name: Build telemetry executables (in debug mode) - working-directory: ./backend/core - run: cargo build --verbose + working-directory: ./backend + run: cargo build --bins --verbose - name: Run tests working-directory: ./backend From 20524ac8aefaa6d11acdc47313ff2ea4356061c8 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Mon, 21 Jun 2021 14:18:44 +0100 Subject: [PATCH 011/134] wrap assigning local/global IDs into struct to avoid things getting out of sync --- backend/common/src/assign_id.rs | 72 +++++++++++++++++++ backend/common/src/internal_messages.rs | 5 +- backend/common/src/lib.rs | 3 +- backend/shard/src/aggregator.rs | 26 +++---- backend/telemetry/src/aggregator.rs | 16 ++++- backend/telemetry/src/main.rs | 3 +- backend/telemetry/src/state/chain.rs | 3 + .../telemetry/src/{ => state}/feed_message.rs | 2 +- backend/telemetry/src/state/mod.rs | 7 ++ backend/telemetry/src/{ => state}/node.rs | 0 backend/telemetry/src/state/state.rs | 14 ++++ 11 files changed, 125 insertions(+), 26 deletions(-) create mode 100644 backend/common/src/assign_id.rs create mode 100644 backend/telemetry/src/state/chain.rs rename backend/telemetry/src/{ => state}/feed_message.rs (99%) create mode 100644 backend/telemetry/src/state/mod.rs rename backend/telemetry/src/{ => state}/node.rs (100%) create mode 100644 backend/telemetry/src/state/state.rs diff --git a/backend/common/src/assign_id.rs b/backend/common/src/assign_id.rs new file mode 100644 index 0000000..ed60d78 --- /dev/null +++ b/backend/common/src/assign_id.rs @@ -0,0 +1,72 @@ +use std::{collections::HashMap, hash::Hash}; +use serde::{Serialize,Deserialize}; + +#[derive(Clone,Copy,Debug,Hash,PartialEq,Eq,Serialize,Deserialize)] +pub struct Id(usize); + +impl std::convert::From for usize { + fn from(id: Id) -> usize { + id.0 + } +} + +/// A struct that allows you to assign ID to an arbitrary set of +/// details (so long as they are Eq+Hash+Clone), and then access +/// the assigned ID given those details or access the details given +/// the ID. +#[derive(Debug)] +pub struct AssignId
{ + current_id: Id, + from_details: HashMap, + from_id: HashMap +} + +impl
AssignId
where Details: Eq + Hash + Clone { + pub fn new() -> Self { + Self { + current_id: Id(0), + from_details: HashMap::new(), + from_id: HashMap::new() + } + } + + pub fn assign_id(&mut self, details: Details) -> Id { + let this_id = self.current_id; + self.current_id.0 += 1; + + self.from_details.insert(details.clone(), this_id); + self.from_id.insert(this_id, details); + + this_id + } + + pub fn get_details(&mut self, id: Id) -> Option<&Details> { + self.from_id.get(&id) + } + + pub fn get_id(&mut self, details: &Details) -> Option { + self.from_details.get(details).map(|id| *id) + } + + pub fn remove_by_id(&mut self, id: Id) -> Option
{ + if let Some(details) = self.from_id.remove(&id) { + self.from_details.remove(&details); + Some(details) + } else { + None + } + } + + pub fn remove_by_details(&mut self, details: &Details) -> Option { + if let Some(id) = self.from_details.remove(&details) { + self.from_id.remove(&id); + Some(id) + } else { + None + } + } + + pub fn clear(&mut self) { + *self = AssignId::new() + } +} \ No newline at end of file diff --git a/backend/common/src/internal_messages.rs b/backend/common/src/internal_messages.rs index 593ea9b..1aebab0 100644 --- a/backend/common/src/internal_messages.rs +++ b/backend/common/src/internal_messages.rs @@ -2,14 +2,15 @@ use std::net::IpAddr; use crate::node::Payload; use crate::types::{NodeDetails}; +use crate::assign_id::Id; use serde::{Deserialize, Serialize}; /// The shard-local ID of a given node, where a single connection /// might send data on behalf of more than one chain. -pub type LocalId = u64; +pub type LocalId = Id; /// A global ID assigned to messages from each different pair of ConnId+LocalId. -pub type GlobalId = u64; +pub type GlobalId = Id; /// Message sent from the shard to the backend core #[derive(Deserialize, Serialize, Debug, Clone)] diff --git a/backend/common/src/lib.rs b/backend/common/src/lib.rs index 78c887a..ec5b967 100644 --- a/backend/common/src/lib.rs +++ b/backend/common/src/lib.rs @@ -3,4 +3,5 @@ pub mod internal_messages; pub mod types; pub mod util; pub mod json; -pub mod log_level; \ No newline at end of file +pub mod log_level; +pub mod assign_id; \ No newline at end of file diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs index a2461a9..5c80a87 100644 --- a/backend/shard/src/aggregator.rs +++ b/backend/shard/src/aggregator.rs @@ -1,9 +1,9 @@ -use common::{internal_messages::{self, LocalId}, node}; +use common::{internal_messages::{self, LocalId}, node, assign_id::AssignId}; use std::sync::Arc; use std::sync::atomic::AtomicU64; use futures::{channel::mpsc, future}; use futures::{ Sink, SinkExt, StreamExt }; -use std::collections::{ HashMap, HashSet }; +use std::collections::{ HashSet }; use crate::connection::{ create_ws_connection, Message }; /// A unique Id is assigned per websocket connection (or more accurately, @@ -100,8 +100,6 @@ impl Aggregator { async fn handle_messages(mut rx_from_external: mpsc::Receiver, mut tx_to_telemetry_core: mpsc::Sender) { use internal_messages::{ FromShardAggregator, FromTelemetryCore }; - let mut next_local_id: LocalId = 1; - // Just as an optimisation, we can keep track of whether we're connected to the backend // or not, and ignore incoming messages while we aren't. let mut connected_to_telemetry_core = false; @@ -112,8 +110,7 @@ impl Aggregator { // Maintain mappings from the connection ID and node message ID to the "local ID" which we // broadcast to the telemetry core. - let mut to_local_id: HashMap<(ConnId, node::NodeMessageId), LocalId> = HashMap::new(); - let mut from_local_id: HashMap = HashMap::new(); + let mut to_local_id = AssignId::new(); // Any messages coming from nodes that have been muted are ignored: let mut muted: HashSet = HashSet::new(); @@ -132,9 +129,9 @@ impl Aggregator { // We've told everything to disconnect. Now, reset our state: close_connections = vec![]; - to_local_id = HashMap::new(); - from_local_id = HashMap::new(); - muted = HashSet::new(); + to_local_id.clear(); + muted.clear(); + connected_to_telemetry_core = true; log::info!("Connected to telemetry core"); }, @@ -151,12 +148,7 @@ impl Aggregator { if !connected_to_telemetry_core { continue } // Generate a new "local ID" for messages from this connection: - let local_id = next_local_id; - next_local_id += 1; - - // Store mapping to/from local_id to conn/message ID paid: - to_local_id.insert((conn_id, message_id), local_id); - from_local_id.insert(local_id, (conn_id, message_id)); + let local_id = to_local_id.assign_id((conn_id, message_id)); // Send the message to the telemetry core with this local ID: let _ = tx_to_telemetry_core.send(FromShardAggregator::AddNode { @@ -170,8 +162,8 @@ impl Aggregator { if !connected_to_telemetry_core { continue } // Get the local ID, ignoring the message if none match: - let local_id = match to_local_id.get(&(conn_id, message_id)) { - Some(id) => *id, + let local_id = match to_local_id.get_id(&(conn_id, message_id)) { + Some(id) => id, None => continue }; diff --git a/backend/telemetry/src/aggregator.rs b/backend/telemetry/src/aggregator.rs index ad86fc0..7f7c22c 100644 --- a/backend/telemetry/src/aggregator.rs +++ b/backend/telemetry/src/aggregator.rs @@ -1,4 +1,4 @@ -use common::{internal_messages::{self, LocalId}, node}; +use common::{internal_messages::{GlobalId, LocalId}, node, assign_id::AssignId}; use std::{str::FromStr, sync::Arc}; use std::sync::atomic::AtomicU64; use futures::channel::{ mpsc, oneshot }; @@ -6,6 +6,7 @@ use futures::{ Sink, SinkExt, StreamExt }; use tokio::net::TcpStream; use tokio_util::compat::{ TokioAsyncReadCompatExt }; use std::collections::{ HashMap, HashSet }; +use crate::state::State; /// A unique Id is assigned per websocket connection (or more accurately, /// per feed socket and per shard socket). This can be combined with the @@ -137,6 +138,12 @@ impl Aggregator { // any more, this task will gracefully end. async fn handle_messages(mut rx_from_external: mpsc::Receiver, denylist: Vec) { + let mut nodes_state = State::new(); + + // Maintain mappings from the shard connection ID and local ID of messages to a global ID + // that uniquely identifies nodes in our node state. + let mut to_global_id = AssignId::new(); + // Temporary: if we drop channels to shards, they will be booted: let mut to_shards = vec![]; @@ -162,10 +169,13 @@ impl Aggregator { to_shards.push(channel); }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Add { local_id, ip, node }) => { - + let global_id = to_global_id.assign_id((shard_conn_id, local_id)); }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Update { local_id, payload }) => { - + let global_id = match to_global_id.get_id(&(shard_conn_id, local_id)) { + Some(id) => id, + None => continue + }; }, } } diff --git a/backend/telemetry/src/main.rs b/backend/telemetry/src/main.rs index 23fc200..de8f0f5 100644 --- a/backend/telemetry/src/main.rs +++ b/backend/telemetry/src/main.rs @@ -1,6 +1,5 @@ mod aggregator; -mod feed_message; -mod node; +mod state; use std::net::SocketAddr; use std::str::FromStr; diff --git a/backend/telemetry/src/state/chain.rs b/backend/telemetry/src/state/chain.rs new file mode 100644 index 0000000..a44d50a --- /dev/null +++ b/backend/telemetry/src/state/chain.rs @@ -0,0 +1,3 @@ +pub struct Chain { + +} \ No newline at end of file diff --git a/backend/telemetry/src/feed_message.rs b/backend/telemetry/src/state/feed_message.rs similarity index 99% rename from backend/telemetry/src/feed_message.rs rename to backend/telemetry/src/state/feed_message.rs index e0ea5d6..9a02944 100644 --- a/backend/telemetry/src/feed_message.rs +++ b/backend/telemetry/src/state/feed_message.rs @@ -5,7 +5,7 @@ use serde::ser::{SerializeTuple, Serializer}; use serde::Serialize; use std::mem; -use crate::node::Node; +use super::node::Node; use serde_json::to_writer; use common::types::{ Address, BlockDetails, BlockHash, BlockNumber, NodeHardware, NodeIO, NodeId, NodeStats, diff --git a/backend/telemetry/src/state/mod.rs b/backend/telemetry/src/state/mod.rs new file mode 100644 index 0000000..d4c16a9 --- /dev/null +++ b/backend/telemetry/src/state/mod.rs @@ -0,0 +1,7 @@ +mod node; +mod chain; +mod feed_message; + +mod state; + +pub use state::State; \ No newline at end of file diff --git a/backend/telemetry/src/node.rs b/backend/telemetry/src/state/node.rs similarity index 100% rename from backend/telemetry/src/node.rs rename to backend/telemetry/src/state/node.rs diff --git a/backend/telemetry/src/state/state.rs b/backend/telemetry/src/state/state.rs new file mode 100644 index 0000000..2b7060e --- /dev/null +++ b/backend/telemetry/src/state/state.rs @@ -0,0 +1,14 @@ +use super::chain::Chain; +use std::collections::HashMap; + +pub struct State { + chains: HashMap, Chain> +} + +impl State { + pub fn new() -> State { + State { + chains: HashMap::new() + } + } +} \ No newline at end of file From 06d131bf3f0d51baa945be5d203e03fde26ed48b Mon Sep 17 00:00:00 2001 From: James Wilson Date: Mon, 21 Jun 2021 14:23:29 +0100 Subject: [PATCH 012/134] Add a note about closing ws with statuscode+reason --- backend/shard/src/main.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/shard/src/main.rs b/backend/shard/src/main.rs index e4e8d8a..1f7c374 100644 --- a/backend/shard/src/main.rs +++ b/backend/shard/src/main.rs @@ -167,7 +167,9 @@ async fn handle_websocket_connection(websocket: ws::WebSocket, mut tx_to_aggr } } - // loops ended; attempt to close the connection gracefully: + // loops ended; attempt to close the connection gracefully. + // Note: IF we want to close with a status code and reason, we need to construct + // a ws::Message using `ws::Message::close_with`, rather than using this method: let _ = websocket.close().await; } From 19ef458e5be8e84c60fb474bbf515f53ce2fc7a7 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Mon, 21 Jun 2021 16:27:42 +0100 Subject: [PATCH 013/134] Allow multiple SystemConnects to be handled from a single node in the shard --- backend/common/src/assign_id.rs | 4 ++ backend/common/src/internal_messages.rs | 4 ++ backend/shard/src/aggregator.rs | 40 ++++++++++---- backend/shard/src/main.rs | 72 ++++++++++++------------- backend/telemetry/src/aggregator.rs | 56 ++++++++++++++----- backend/telemetry/src/main.rs | 3 ++ 6 files changed, 122 insertions(+), 57 deletions(-) diff --git a/backend/common/src/assign_id.rs b/backend/common/src/assign_id.rs index ed60d78..a85db06 100644 --- a/backend/common/src/assign_id.rs +++ b/backend/common/src/assign_id.rs @@ -69,4 +69,8 @@ impl
AssignId
where Details: Eq + Hash + Clone { pub fn clear(&mut self) { *self = AssignId::new() } + + pub fn iter(&self) -> impl Iterator { + self.from_id.iter().map(|(id, details)| (*id, details)) + } } \ No newline at end of file diff --git a/backend/common/src/internal_messages.rs b/backend/common/src/internal_messages.rs index 1aebab0..1024b65 100644 --- a/backend/common/src/internal_messages.rs +++ b/backend/common/src/internal_messages.rs @@ -26,6 +26,10 @@ pub enum FromShardAggregator { local_id: LocalId, payload: Payload, }, + /// Inform the core that a node has been removed + RemoveNode { + local_id: LocalId + } } /// Message sent form the backend core to the shard diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs index 5c80a87..e54234b 100644 --- a/backend/shard/src/aggregator.rs +++ b/backend/shard/src/aggregator.rs @@ -29,22 +29,27 @@ enum ToAggregator { /// messages from it will be ignored. #[derive(Clone,Debug)] pub enum FromWebsocket { - /// Tell the aggregator about a new node. - Add { - message_id: node::NodeMessageId, - ip: Option, - node: common::types::NodeDetails, + /// Fire this when the connection is established. + Initialize { /// When a message is sent back up this channel, we terminate /// the websocket connection and force the node to reconnect /// so that it sends its system info again incase the telemetry /// core has restarted. close_connection: mpsc::Sender<()> }, + /// Tell the aggregator about a new node. + Add { + message_id: node::NodeMessageId, + ip: Option, + node: common::types::NodeDetails, + }, /// Update/pass through details about a node. Update { message_id: node::NodeMessageId, payload: node::Payload - } + }, + /// Make a note when the node disconnects. + Disconnected } pub type FromAggregator = internal_messages::FromShardAggregator; @@ -139,10 +144,13 @@ impl Aggregator { connected_to_telemetry_core = false; log::info!("Disconnected from telemetry core"); }, - ToAggregator::FromWebsocket(conn_id, FromWebsocket::Add { message_id, ip, node, close_connection }) => { - // Keep the close_connection channel incase we need it: + ToAggregator::FromWebsocket(_conn_id, FromWebsocket::Initialize { close_connection }) => { + // We boot all connections on a reconnect-to-core to force new systemconnected + // messages to be sent. We could boot on muting, but need to be careful not to boot + // connections where we mute one set of messages it sends and not others. close_connections.push(close_connection); - + }, + ToAggregator::FromWebsocket(conn_id, FromWebsocket::Add { message_id, ip, node }) => { // Don't bother doing anything else if we're disconnected, since we'll force the // ndoe to reconnect anyway when the backend does: if !connected_to_telemetry_core { continue } @@ -178,6 +186,20 @@ impl Aggregator { payload }).await; }, + ToAggregator::FromWebsocket(disconnected_conn_id, FromWebsocket::Disconnected) => { + // Find all of the local IDs corresponding to the disconnected connection ID and + // remove them, telling Telemetry Core about them too. This could be more efficient, + // but the mapping isn't currently cached and it's not a super frequent op. + let local_ids_disconnected: Vec<_> = to_local_id.iter() + .filter(|(_, &(conn_id, _))| disconnected_conn_id == conn_id) + .map(|(local_id, _)| local_id) + .collect(); + + for local_id in local_ids_disconnected { + to_local_id.remove_by_id(local_id); + let _ = tx_to_telemetry_core.send(FromShardAggregator::RemoveNode { local_id }).await; + } + }, ToAggregator::FromTelemetryCore(FromTelemetryCore::Mute { local_id }) => { // Ignore incoming messages if we're not connected to the backend: if !connected_to_telemetry_core { continue } diff --git a/backend/shard/src/main.rs b/backend/shard/src/main.rs index 1f7c374..b1ae7b7 100644 --- a/backend/shard/src/main.rs +++ b/backend/shard/src/main.rs @@ -89,8 +89,13 @@ async fn start_server(opts: Opts) -> anyhow::Result<()> { let tx_to_aggregator = aggregator.subscribe_node(); log::info!("Opening /submit connection from {:?}", addr); ws.on_upgrade(move |websocket| async move { - handle_websocket_connection(websocket, tx_to_aggregator, addr).await; + let (mut tx_to_aggregator, websocket) = handle_websocket_connection(websocket, tx_to_aggregator, addr).await; log::info!("Closing /submit connection from {:?}", addr); + // Tell the aggregator that this connection has closed, so it can tidy up. + let _ = tx_to_aggregator.send(FromWebsocket::Disconnected).await; + // Note: IF we want to close with a status code and reason, we need to construct + // a ws::Message using `ws::Message::close_with`, rather than using this method: + let _ = websocket.close().await; }) }); @@ -101,53 +106,38 @@ async fn start_server(opts: Opts) -> anyhow::Result<()> { } /// This takes care of handling messages from an established socket connection. -async fn handle_websocket_connection(websocket: ws::WebSocket, mut tx_to_aggregator: S, addr: Option) +async fn handle_websocket_connection(mut websocket: ws::WebSocket, mut tx_to_aggregator: S, addr: Option) -> (S, ws::WebSocket) where S: futures::Sink + Unpin { - let mut websocket = websocket.fuse(); - // This could be a oneshot channel, but it's useful to be able to clone // messages, and we can't clone oneshot channel senders. let (close_connection_tx, mut close_connection_rx) = mpsc::channel(0); - // First, we wait until we receive a SystemConnected message. - // Until this turns up, we ignore other messages. We could buffer - // a few quite easily if we liked. - while let Some(msg) = websocket.next().await { - let node_message = match deserialize_ws_message(msg) { - Ok(Some(msg)) => msg, - Ok(None) => continue, - Err(e) => { log::error!("{}", e); break } - }; - - let message_id = node_message.id(); - let payload = node_message.into_payload(); - - if let node::Payload::SystemConnected(info) = payload { - let _ = tx_to_aggregator.send(FromWebsocket::Add { - message_id, - ip: addr.map(|a| a.ip()), - node: info.node, - close_connection: close_connection_tx, - }).await; - break; - } + // Tell the aggregator about this new connection, and give it a way to close this connection: + let init_msg = FromWebsocket::Initialize { + close_connection: close_connection_tx + }; + if let Err(e) = tx_to_aggregator.send(init_msg).await { + log::error!("Error sending message to aggregator: {}", e); + return (tx_to_aggregator, websocket); } - // Now, the node has been added, so we forward messages along as updates. - // We keep an eye on the close_connection channel; if that resolves, then - // end this loop and let the connection close gracefully. + // Now we've "initialized", wait for messages from the node. Messages will + // either be `SystemConnected` type messages that inform us that a new set + // of messages with some message ID will be sent (a node could have more + // than one of these), or updates linked to a specific message_id. loop { - futures::select_biased! { + tokio::select! { // The close channel has fired, so end the loop: _ = close_connection_rx.next() => { + log::info!("connection to {:?} being closed by aggregator", addr); break }, // A message was received; handle it: msg = websocket.next() => { let msg = match msg { Some(msg) => msg, - None => break + None => { log::warn!("Websocket connection from {:?} closed", addr); break } }; let node_message = match deserialize_ws_message(msg) { @@ -159,7 +149,19 @@ async fn handle_websocket_connection(websocket: ws::WebSocket, mut tx_to_aggr let message_id = node_message.id(); let payload = node_message.into_payload(); - if let Err(e) = tx_to_aggregator.send(FromWebsocket::Update { message_id, payload } ).await { + // Until the aggregator receives an `Add` message, which we can create once + // we see one of these SystemConnected ones, it will ignore messages with + // the corresponding message_id. + if let node::Payload::SystemConnected(info) = payload { + let _ = tx_to_aggregator.send(FromWebsocket::Add { + message_id, + ip: addr.map(|a| a.ip()), + node: info.node, + }).await; + } + // Anything that's not an "Add" is an Update. The aggregator will ignore + // updates against a message_id that hasn't first been Added, above. + else if let Err(e) = tx_to_aggregator.send(FromWebsocket::Update { message_id, payload } ).await { log::error!("Failed to send node message to aggregator: {}", e); continue; } @@ -167,10 +169,8 @@ async fn handle_websocket_connection(websocket: ws::WebSocket, mut tx_to_aggr } } - // loops ended; attempt to close the connection gracefully. - // Note: IF we want to close with a status code and reason, we need to construct - // a ws::Message using `ws::Message::close_with`, rather than using this method: - let _ = websocket.close().await; + // Return what we need to close the connection gracefully: + (tx_to_aggregator, websocket) } /// Deserialize an incoming websocket message, returning an error if something diff --git a/backend/telemetry/src/aggregator.rs b/backend/telemetry/src/aggregator.rs index 7f7c22c..c750bc4 100644 --- a/backend/telemetry/src/aggregator.rs +++ b/backend/telemetry/src/aggregator.rs @@ -38,6 +38,10 @@ pub enum FromShardWebsocket { Update { local_id: LocalId, payload: node::Payload + }, + /// Tell the aggregator that a node has been removed when it disconnects. + Remove { + local_id: LocalId, } } @@ -138,44 +142,72 @@ impl Aggregator { // any more, this task will gracefully end. async fn handle_messages(mut rx_from_external: mpsc::Receiver, denylist: Vec) { - let mut nodes_state = State::new(); + let mut node_state = State::new(); // Maintain mappings from the shard connection ID and local ID of messages to a global ID // that uniquely identifies nodes in our node state. - let mut to_global_id = AssignId::new(); + let mut to_global_node_id = AssignId::new(); - // Temporary: if we drop channels to shards, they will be booted: - let mut to_shards = vec![]; + // Keep track of channels to communicate with feeds and shards: + let mut feed_channels = HashMap::new(); + let mut shard_channels = HashMap::new(); + + // What chains have aour feeds subscribed to (one at a time at the mo): + let mut feed_conn_id_to_chain: HashMap> = HashMap::new(); + let mut chain_to_feed_conn_ids: HashMap, HashSet> = HashMap::new(); + let mut feed_conn_id_finality: HashSet = HashSet::new(); // Now, loop and receive messages to handle. while let Some(msg) = rx_from_external.next().await { match msg { ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Initialize { channel }) => { + feed_channels.insert(feed_conn_id, channel); + // TODO: `feed::AddedChain` message to tell feed about current chains. }, ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Ping { chain }) => { - + // TODO: Return with feed::Pong(chain) feed message. }, ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Subscribe { chain }) => { + // Unsubscribe from previous chain if subscribed to one: + if let Some(feed_ids) = chain_to_feed_conn_ids.get_mut(&chain) { + feed_ids.remove(&feed_conn_id); + } + + // Subscribe to the new chain: + feed_conn_id_to_chain.insert(feed_conn_id, chain.clone()); + chain_to_feed_conn_ids.entry(chain).or_default().insert(feed_conn_id); }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::SendFinality { chain }) => { - + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::SendFinality { chain: _ }) => { + feed_conn_id_finality.insert(feed_conn_id); + // TODO: Do we care about the chain here? }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::NoMoreFinality { chain }) => { - + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::NoMoreFinality { chain: _ }) => { + feed_conn_id_finality.remove(&feed_conn_id); + // TODO: Do we care about the chain here? }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Initialize { channel }) => { - to_shards.push(channel); + shard_channels.insert(shard_conn_id, channel); }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Add { local_id, ip, node }) => { - let global_id = to_global_id.assign_id((shard_conn_id, local_id)); + let global_node_id = to_global_node_id.assign_id((shard_conn_id, local_id)); + + // TODO: node_state.add_node. Every feed should know about node count changes. + }, + ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Remove { local_id }) => { + println!("Removed node! {:?}", local_id); + if let Some(id) = to_global_node_id.remove_by_details(&(shard_conn_id, local_id)) { + // TODO: node_state.remove_node, Every feed should know about node count changes. + } }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Update { local_id, payload }) => { - let global_id = match to_global_id.get_id(&(shard_conn_id, local_id)) { + let global_node_id = match to_global_node_id.get_id(&(shard_conn_id, local_id)) { Some(id) => id, None => continue }; + + // TODO: node_state.update_node, then handle returned diffs }, } } diff --git a/backend/telemetry/src/main.rs b/backend/telemetry/src/main.rs index de8f0f5..cefa094 100644 --- a/backend/telemetry/src/main.rs +++ b/backend/telemetry/src/main.rs @@ -199,6 +199,9 @@ async fn handle_shard_websocket_connection(mut websocket: ws::WebSocket, mut internal_messages::FromShardAggregator::UpdateNode { payload, local_id } => { FromShardWebsocket::Update { local_id, payload } }, + internal_messages::FromShardAggregator::RemoveNode { local_id } => { + FromShardWebsocket::Remove { local_id } + }, }; if let Err(e) = tx_to_aggregator.send(aggregator_msg).await { log::error!("Failed to send message to aggregator; closing shard: {}", e); From 83e2cee295951cdc7c93019b0271c6dbdb4efb8b Mon Sep 17 00:00:00 2001 From: James Wilson Date: Mon, 21 Jun 2021 16:28:04 +0100 Subject: [PATCH 014/134] Remove 'remove' logging --- backend/telemetry/src/aggregator.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/backend/telemetry/src/aggregator.rs b/backend/telemetry/src/aggregator.rs index c750bc4..2895640 100644 --- a/backend/telemetry/src/aggregator.rs +++ b/backend/telemetry/src/aggregator.rs @@ -196,7 +196,6 @@ impl Aggregator { // TODO: node_state.add_node. Every feed should know about node count changes. }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Remove { local_id }) => { - println!("Removed node! {:?}", local_id); if let Some(id) = to_global_node_id.remove_by_details(&(shard_conn_id, local_id)) { // TODO: node_state.remove_node, Every feed should know about node count changes. } From 63283195addb9941850488029985655ed07904ea Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 23 Jun 2021 09:34:56 +0100 Subject: [PATCH 015/134] WIP filling in core aggregator match arms and various other tweaks --- backend/Cargo.lock | 2 + backend/common/src/internal_messages.rs | 5 +- backend/shard/src/aggregator.rs | 6 +- backend/shard/src/main.rs | 1 + backend/telemetry/Cargo.toml | 2 + backend/telemetry/src/aggregator.rs | 185 ++++++++++++++---- .../telemetry/src/{state => }/feed_message.rs | 21 +- backend/telemetry/src/main.rs | 13 +- backend/telemetry/src/state/chain.rs | 58 +++++- backend/telemetry/src/state/mod.rs | 6 +- backend/telemetry/src/state/state.rs | 125 +++++++++++- 11 files changed, 370 insertions(+), 54 deletions(-) rename backend/telemetry/src/{state => }/feed_message.rs (88%) diff --git a/backend/Cargo.lock b/backend/Cargo.lock index 4329710..8b89673 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -1193,10 +1193,12 @@ dependencies = [ "hex", "http", "log", + "once_cell", "primitive-types", "serde", "serde_json", "simple_logger", + "smallvec", "soketto", "structopt", "thiserror", diff --git a/backend/common/src/internal_messages.rs b/backend/common/src/internal_messages.rs index 1024b65..b2b1b9b 100644 --- a/backend/common/src/internal_messages.rs +++ b/backend/common/src/internal_messages.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; use crate::node::Payload; -use crate::types::{NodeDetails}; +use crate::types::{NodeDetails, BlockHash}; use crate::assign_id::Id; use serde::{Deserialize, Serialize}; @@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize}; pub type LocalId = Id; /// A global ID assigned to messages from each different pair of ConnId+LocalId. -pub type GlobalId = Id; +pub type GlobalId = usize; /// Message sent from the shard to the backend core #[derive(Deserialize, Serialize, Debug, Clone)] @@ -20,6 +20,7 @@ pub enum FromShardAggregator { ip: Option, node: NodeDetails, local_id: LocalId, + genesis_hash: BlockHash }, /// Send a message payload to update details for a node UpdateNode { diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs index e54234b..a6e5f1a 100644 --- a/backend/shard/src/aggregator.rs +++ b/backend/shard/src/aggregator.rs @@ -1,4 +1,4 @@ -use common::{internal_messages::{self, LocalId}, node, assign_id::AssignId}; +use common::{internal_messages::{self, LocalId}, node, assign_id::AssignId, types::BlockHash}; use std::sync::Arc; use std::sync::atomic::AtomicU64; use futures::{channel::mpsc, future}; @@ -42,6 +42,7 @@ pub enum FromWebsocket { message_id: node::NodeMessageId, ip: Option, node: common::types::NodeDetails, + genesis_hash: BlockHash }, /// Update/pass through details about a node. Update { @@ -150,7 +151,7 @@ impl Aggregator { // connections where we mute one set of messages it sends and not others. close_connections.push(close_connection); }, - ToAggregator::FromWebsocket(conn_id, FromWebsocket::Add { message_id, ip, node }) => { + ToAggregator::FromWebsocket(conn_id, FromWebsocket::Add { message_id, ip, node, genesis_hash }) => { // Don't bother doing anything else if we're disconnected, since we'll force the // ndoe to reconnect anyway when the backend does: if !connected_to_telemetry_core { continue } @@ -162,6 +163,7 @@ impl Aggregator { let _ = tx_to_telemetry_core.send(FromShardAggregator::AddNode { ip, node, + genesis_hash, local_id }).await; }, diff --git a/backend/shard/src/main.rs b/backend/shard/src/main.rs index b1ae7b7..a1ed08e 100644 --- a/backend/shard/src/main.rs +++ b/backend/shard/src/main.rs @@ -157,6 +157,7 @@ async fn handle_websocket_connection(mut websocket: ws::WebSocket, mut tx_to_ message_id, ip: addr.map(|a| a.ip()), node: info.node, + genesis_hash: info.genesis_hash, }).await; } // Anything that's not an "Add" is an Update. The aggregator will ignore diff --git a/backend/telemetry/Cargo.toml b/backend/telemetry/Cargo.toml index 08f5a14..0f29d50 100644 --- a/backend/telemetry/Cargo.toml +++ b/backend/telemetry/Cargo.toml @@ -12,10 +12,12 @@ futures = "0.3.15" hex = "0.4.3" http = "0.2.4" log = "0.4.14" +once_cell = "1.8.0" primitive-types = { version = "0.9.0", features = ["serde"] } serde = { version = "1.0.126", features = ["derive"] } serde_json = "1.0.64" simple_logger = "1.11.0" +smallvec = "1.6.1" soketto = "0.6.0" structopt = "0.3.21" thiserror = "1.0.25" diff --git a/backend/telemetry/src/aggregator.rs b/backend/telemetry/src/aggregator.rs index 2895640..d4c1ee3 100644 --- a/backend/telemetry/src/aggregator.rs +++ b/backend/telemetry/src/aggregator.rs @@ -1,4 +1,9 @@ -use common::{internal_messages::{GlobalId, LocalId}, node, assign_id::AssignId}; +use common::{ + internal_messages::{GlobalId, LocalId}, + node, + assign_id::AssignId, + util::now +}; use std::{str::FromStr, sync::Arc}; use std::sync::atomic::AtomicU64; use futures::channel::{ mpsc, oneshot }; @@ -7,6 +12,7 @@ use tokio::net::TcpStream; use tokio_util::compat::{ TokioAsyncReadCompatExt }; use std::collections::{ HashMap, HashSet }; use crate::state::State; +use crate::feed_message::{ self, FeedMessageSerializer }; /// A unique Id is assigned per websocket connection (or more accurately, /// per feed socket and per shard socket). This can be combined with the @@ -33,6 +39,7 @@ pub enum FromShardWebsocket { local_id: LocalId, ip: Option, node: common::types::NodeDetails, + genesis_hash: common::types::BlockHash }, /// Update/pass through details about a node. Update { @@ -68,13 +75,9 @@ pub enum FromFeedWebsocket { chain: Box }, /// The feed wants finality info for the chain, too. - SendFinality { - chain: Box - }, + SendFinality, /// The feed doesn't want any more finality info for the chain. - NoMoreFinality { - chain: Box - }, + NoMoreFinality, /// An explicit ping message. Ping { chain: Box @@ -92,8 +95,8 @@ impl FromStr for FromFeedWebsocket { match cmd { "ping" => Ok(FromFeedWebsocket::Ping { chain }), "subscribe" => Ok(FromFeedWebsocket::Subscribe { chain }), - "send-finality" => Ok(FromFeedWebsocket::SendFinality { chain }), - "no-more-finality" => Ok(FromFeedWebsocket::NoMoreFinality { chain }), + "send-finality" => Ok(FromFeedWebsocket::SendFinality), + "no-more-finality" => Ok(FromFeedWebsocket::NoMoreFinality), _ => return Err(anyhow::anyhow!("Command {} not recognised", cmd)) } } @@ -102,7 +105,7 @@ impl FromStr for FromFeedWebsocket { /// The aggregator can these messages back to a feed connection. #[derive(Debug)] pub enum ToFeedWebsocket { - + Bytes(Vec) } #[derive(Clone)] @@ -142,7 +145,7 @@ impl Aggregator { // any more, this task will gracefully end. async fn handle_messages(mut rx_from_external: mpsc::Receiver, denylist: Vec) { - let mut node_state = State::new(); + let mut node_state = State::new(denylist); // Maintain mappings from the shard connection ID and local ID of messages to a global ID // that uniquely identifies nodes in our node state. @@ -152,46 +155,124 @@ impl Aggregator { let mut feed_channels = HashMap::new(); let mut shard_channels = HashMap::new(); - // What chains have aour feeds subscribed to (one at a time at the mo): + // What chains have our feeds subscribed to (one at a time at the mo)? + // Both of these need to be kept in sync (should move to own struct eventually). let mut feed_conn_id_to_chain: HashMap> = HashMap::new(); let mut chain_to_feed_conn_ids: HashMap, HashSet> = HashMap::new(); + + // Which feeds want finality info too? let mut feed_conn_id_finality: HashSet = HashSet::new(); // Now, loop and receive messages to handle. while let Some(msg) = rx_from_external.next().await { match msg { - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Initialize { channel }) => { - feed_channels.insert(feed_conn_id, channel); + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Initialize { mut channel }) => { + feed_channels.insert(feed_conn_id, channel.clone()); - // TODO: `feed::AddedChain` message to tell feed about current chains. - }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Ping { chain }) => { - // TODO: Return with feed::Pong(chain) feed message. - }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Subscribe { chain }) => { - // Unsubscribe from previous chain if subscribed to one: - if let Some(feed_ids) = chain_to_feed_conn_ids.get_mut(&chain) { - feed_ids.remove(&feed_conn_id); + // Tell the new feed subscription some basic things to get it going: + let mut feed_serializer = FeedMessageSerializer::new(); + feed_serializer.push(feed_message::Version(31)); + for chain in node_state.iter_chains() { + feed_serializer.push(feed_message::AddedChain( + chain.label(), + chain.node_count() + )); } - // Subscribe to the new chain: - feed_conn_id_to_chain.insert(feed_conn_id, chain.clone()); - chain_to_feed_conn_ids.entry(chain).or_default().insert(feed_conn_id); + // Send this to the channel that subscribed: + if let Some(bytes) = feed_serializer.into_finalized() { + let _ = channel.send(ToFeedWebsocket::Bytes(bytes)).await; + } + }, + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Ping { chain }) => { + let feed_channel = match feed_channels.get_mut(&feed_conn_id) { + Some(chan) => chan, + None => continue + }; + // Pong! + let mut feed_serializer = FeedMessageSerializer::new(); + feed_serializer.push(feed_message::Pong(&chain)); + if let Some(bytes) = feed_serializer.into_finalized() { + let _ = feed_channel.send(ToFeedWebsocket::Bytes(bytes)).await; + } }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::SendFinality { chain: _ }) => { - feed_conn_id_finality.insert(feed_conn_id); - // TODO: Do we care about the chain here? - }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::NoMoreFinality { chain: _ }) => { + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Subscribe { chain }) => { + let feed_channel = match feed_channels.get_mut(&feed_conn_id) { + Some(chan) => chan, + None => continue + }; + + // Unsubscribe from previous chain if subscribed to one: + let old_chain_label = feed_conn_id_to_chain.remove(&feed_conn_id); + if let Some(old_chain_label) = &old_chain_label { + if let Some(map) = chain_to_feed_conn_ids.get_mut(old_chain_label) { + map.remove(&feed_conn_id); + } + } + + // Untoggle request for finality feeds: + feed_conn_id_finality.remove(&feed_conn_id); + + // Get the chain we're subscribing to, ignoring the rest if it doesn't exist. + let chain = match node_state.get_chain_by_label(&chain) { + Some(chain) => chain, + None => continue + }; + + // Send messages to the feed about the new chain: + let mut feed_serializer = FeedMessageSerializer::new(); + if let Some(old_chain_label) = old_chain_label { + feed_serializer.push(feed_message::UnsubscribedFrom(&old_chain_label)); + } + feed_serializer.push(feed_message::SubscribedTo(chain.label())); + feed_serializer.push(feed_message::TimeSync(now())); + feed_serializer.push(feed_message::BestBlock ( + chain.best_block().height, + chain.timestamp(), + chain.average_block_time() + )); + feed_serializer.push(feed_message::BestFinalized ( + chain.finalized_block().height, + chain.finalized_block().hash + )); + for (idx, (gid, node)) in node_state.get_nodes_in_chain(chain).enumerate() { + // Send subscription confirmation and chain head before doing all the nodes, + // and continue sending batches of 32 nodes a time over the wire subsequently + if idx % 32 == 0 { + if let Some(bytes) = feed_serializer.finalize() { + let _ = feed_channel.send(ToFeedWebsocket::Bytes(bytes)).await; + } + } + feed_serializer.push(feed_message::AddedNode(gid, node)); + feed_serializer.push(feed_message::FinalizedBlock( + gid, + node.finalized().height, + node.finalized().hash, + )); + if node.stale() { + feed_serializer.push(feed_message::StaleNode(gid)); + } + } + if let Some(bytes) = feed_serializer.into_finalized() { + let _ = feed_channel.send(ToFeedWebsocket::Bytes(bytes)).await; + } + + // Actually make a note of the new chain subsciption: + feed_conn_id_to_chain.insert(feed_conn_id, chain.label().into()); + chain_to_feed_conn_ids.entry(chain.label().into()).or_default().insert(feed_conn_id); + }, + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::SendFinality) => { + feed_conn_id_finality.insert(feed_conn_id); + }, + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::NoMoreFinality) => { feed_conn_id_finality.remove(&feed_conn_id); - // TODO: Do we care about the chain here? }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Initialize { channel }) => { shard_channels.insert(shard_conn_id, channel); }, - ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Add { local_id, ip, node }) => { - let global_node_id = to_global_node_id.assign_id((shard_conn_id, local_id)); + ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Add { local_id, ip, node, genesis_hash }) => { + // Get globalId from add_node and store that against shard/local_id. // TODO: node_state.add_node. Every feed should know about node count changes. }, @@ -201,11 +282,47 @@ impl Aggregator { } }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Update { local_id, payload }) => { + // TODO: Fill this all in... let global_node_id = match to_global_node_id.get_id(&(shard_conn_id, local_id)) { Some(id) => id, None => continue }; + if let Some(block) = payload.best_block() { + + } + + match payload { + node::Payload::SystemInterval(system_interval) => { + + }, + node::Payload::AfgAuthoritySet(_) => { + + }, + node::Payload::AfgFinalized(_) => { + + }, + node::Payload::AfgReceivedPrecommit(_) => { + + }, + node::Payload::AfgReceivedPrevote(_) => { + + }, + // This message should have been handled before the payload made it this far: + node::Payload::SystemConnected(_) => { + unreachable!("SystemConnected message seen in Telemetry Core, but should have been handled in shard"); + }, + // The following messages aren't handled at the moment. List them explicitly so + // that we have to make an explicit choice for any new messages: + node::Payload::BlockImport(_) | + node::Payload::NotifyFinalized(_) | + node::Payload::AfgReceivedCommit(_) | + node::Payload::TxPoolImport | + node::Payload::AfgFinalizedBlocksUpTo | + node::Payload::AuraPreSealedBlock | + node::Payload::PreparedBlockForProposing => {}, + } + // TODO: node_state.update_node, then handle returned diffs }, } diff --git a/backend/telemetry/src/state/feed_message.rs b/backend/telemetry/src/feed_message.rs similarity index 88% rename from backend/telemetry/src/state/feed_message.rs rename to backend/telemetry/src/feed_message.rs index 9a02944..80d422e 100644 --- a/backend/telemetry/src/state/feed_message.rs +++ b/backend/telemetry/src/feed_message.rs @@ -1,15 +1,14 @@ -//! This module provides the messages that will be -//! sent to subscribing feeds. +//! This module provides a way of encoding the various messages that we'll +//! send to subscribed feeds (browsers). -use serde::ser::{SerializeTuple, Serializer}; use serde::Serialize; use std::mem; -use super::node::Node; +use crate::state::Node; use serde_json::to_writer; use common::types::{ Address, BlockDetails, BlockHash, BlockNumber, NodeHardware, NodeIO, NodeId, NodeStats, - Timestamp, NodeDetails, + Timestamp }; pub trait FeedMessage { @@ -65,6 +64,8 @@ impl FeedMessageSerializer { let _ = to_writer(&mut self.buffer, value); } + /// Return the bytes we've serialized so far and prepare a new buffer. If you're + /// finished serializing data, prefer [`FeedMessageSerializer::into_finalized`] pub fn finalize(&mut self) -> Option> { if self.buffer.is_empty() { return None; @@ -76,6 +77,16 @@ impl FeedMessageSerializer { Some(bytes) } + + /// Return the bytes that we've serialized so far, consuming the serializer. + pub fn into_finalized(mut self) -> Option> { + if self.buffer.is_empty() { + return None; + } + + self.buffer.push(b']'); + Some(self.buffer) + } } macro_rules! actions { diff --git a/backend/telemetry/src/main.rs b/backend/telemetry/src/main.rs index cefa094..9ce32c1 100644 --- a/backend/telemetry/src/main.rs +++ b/backend/telemetry/src/main.rs @@ -1,5 +1,6 @@ mod aggregator; mod state; +mod feed_message; use std::net::SocketAddr; use std::str::FromStr; @@ -193,8 +194,8 @@ async fn handle_shard_websocket_connection(mut websocket: ws::WebSocket, mut // Convert and send to the aggregator: let aggregator_msg = match msg { - internal_messages::FromShardAggregator::AddNode { ip, node, local_id } => { - FromShardWebsocket::Add { ip, node, local_id } + internal_messages::FromShardAggregator::AddNode { ip, node, local_id, genesis_hash } => { + FromShardWebsocket::Add { ip, node, genesis_hash, local_id } }, internal_messages::FromShardAggregator::UpdateNode { payload, local_id } => { FromShardWebsocket::Update { local_id, payload } @@ -241,7 +242,13 @@ async fn handle_feed_websocket_connection(mut websocket: ws::WebSocket, mut t None => break }; - println!("TODO: encode message and send down feed websocket: {:?}", msg); + // Send messages to the client (currently the only message is + // pre-serialized bytes that we send as binary): + match msg { + ToFeedWebsocket::Bytes(bytes) => { + let _ = websocket.send(ws::Message::binary(bytes)).await; + } + } } // FRONTEND -> AGGREGATOR msg = websocket.next() => { diff --git a/backend/telemetry/src/state/chain.rs b/backend/telemetry/src/state/chain.rs index a44d50a..ce6fcc8 100644 --- a/backend/telemetry/src/state/chain.rs +++ b/backend/telemetry/src/state/chain.rs @@ -1,3 +1,59 @@ -pub struct Chain { +use std::sync::Arc; +use std::collections::{ HashSet, HashMap }; +use common::types::{ BlockHash }; +use common::internal_messages::{ GlobalId }; +use super::node::Node; +use common::types::{Block, NodeDetails, NodeId, NodeLocation, Timestamp}; +use common::util::{now, DenseMap, NumStats}; +use common::node::Payload; +use std::iter::IntoIterator; +pub type ChainId = usize; +pub type Label = Arc; + +pub struct Chain { + /// Label of this chain, along with count of nodes that use this label + label: (Label, usize), + /// Chain genesis hash + genesis_hash: BlockHash, + /// Set of nodes that are in this chain + nodes: HashSet, + /// Best block + best: Block, + /// Finalized block + finalized: Block, + /// Block times history, stored so we can calculate averages + block_times: NumStats, + /// Calculated average block time + average_block_time: Option, + /// When the best block first arrived + timestamp: Option, + /// Some nodes might manifest a different label, note them here + labels: HashMap, + /// How many nodes are allowed in this chain + max_nodes: usize +} + +impl Chain { + pub fn label(&self) -> &str { + &self.label.0 + } + pub fn node_count(&self) -> usize { + self.nodes.len() + } + pub fn node_ids(&self) -> impl Iterator + '_ { + self.nodes.iter().copied() + } + pub fn best_block(&self) -> &Block { + &self.best + } + pub fn timestamp(&self) -> Timestamp { + self.timestamp.unwrap_or(0) + } + pub fn average_block_time(&self) -> Option { + self.average_block_time + } + pub fn finalized_block(&self) -> &Block { + &self.finalized + } } \ No newline at end of file diff --git a/backend/telemetry/src/state/mod.rs b/backend/telemetry/src/state/mod.rs index d4c16a9..7f81315 100644 --- a/backend/telemetry/src/state/mod.rs +++ b/backend/telemetry/src/state/mod.rs @@ -1,7 +1,9 @@ mod node; mod chain; -mod feed_message; +// mod feed_message; +// mod diff; mod state; -pub use state::State; \ No newline at end of file +pub use state::State; +pub use node::Node; \ No newline at end of file diff --git a/backend/telemetry/src/state/state.rs b/backend/telemetry/src/state/state.rs index 2b7060e..29c3b75 100644 --- a/backend/telemetry/src/state/state.rs +++ b/backend/telemetry/src/state/state.rs @@ -1,14 +1,129 @@ -use super::chain::Chain; -use std::collections::HashMap; +use std::sync::Arc; +use std::collections::{ HashSet, HashMap }; +use common::types::{ BlockHash }; +use common::internal_messages::{ GlobalId }; +use super::node::Node; +use once_cell::sync::Lazy; +use common::types::{Block, NodeDetails, NodeId, NodeLocation, Timestamp}; +use common::util::{now, DenseMap, NumStats}; +use common::node::Payload; +use std::iter::IntoIterator; +use super::chain::Chain; + +pub type ChainId = usize; +pub type Label = Arc; + +/// Our state constains node and chain information pub struct State { - chains: HashMap, Chain> + chains: DenseMap, + nodes: HashMap, + chains_by_genesis_hash: HashMap, + chains_by_label: HashMap, + /// Denylist for networks we do not want to allow connecting. + denylist: HashSet, +} + +/// Labels of chains we consider "first party". These chains allow any +/// number of nodes to connect. +static FIRST_PARTY_NETWORKS: Lazy> = Lazy::new(|| { + let mut set = HashSet::new(); + set.insert("Polkadot"); + set.insert("Kusama"); + set.insert("Westend"); + set.insert("Rococo"); + set +}); + +/// Max number of nodes allowed to connect to the telemetry server. +const THIRD_PARTY_NETWORKS_MAX_NODES: usize = 500; + +/// Adding a node to a chain leads to this result: +pub enum AddNodeResult { + /// The chain is on the "deny list", so we can't add the node + ChainOnDenyList, + /// The chain is over quota (too many nodes connected), so can't add the node + ChainOverQuota, + /// The node was added to the chain + NodeAddedToChain(NodeAddedToChain) +} + +pub struct NodeAddedToChain { + /// The label for the chain (which may have changed as a result of adding the node): + chain_label: Arc, + /// Has the chain label been updated? + has_chain_label_changed: bool, + // How many nodes now exist in the chain? + chain_node_count: usize +} + +pub struct RemoveNodeResult { + /// How many nodes remain on the chain (0 if the chain was removed): + chain_node_count: usize } impl State { - pub fn new() -> State { + pub fn new>(denylist: T) -> State { State { - chains: HashMap::new() + chains: DenseMap::new(), + nodes: HashMap::new(), + chains_by_genesis_hash: HashMap::new(), + chains_by_label: HashMap::new(), + denylist: denylist.into_iter().collect() } } + + pub fn iter_chains(&self) -> impl Iterator { + self.chains + .iter() + .map(|(_,chain)| chain) + } + + pub fn get_chain_by_label(&self, label: &str) -> Option<&Chain> { + self.chains_by_label + .get(label) + .and_then(|chain_id| self.chains.get(*chain_id)) + } + + pub fn get_nodes_in_chain<'s>(&'s self, chain: &'s Chain) -> impl Iterator { + chain.node_ids() + .filter_map(move |id| self.nodes.get(&id).map(|node| (id, node))) + } + + // /// Add a new node to our state. + // pub fn add_node(&mut self, id: GlobalId, genesis_hash: BlockHash, node: &NodeDetails) -> AddNodeResult { + // if self.denylist.contains(&*node.chain) { + // return AddNodeResult::ChainOnDenyList; + // } + // let chain_id = self.chains.get_or_create(genesis_hash, &node.chain); + + // return Ok(()) + // } + + // /// Remove a node from our state. + // pub fn remove_node(&mut self, id: GlobalId) -> RemoveNodeResult { + + // } + + // /// Update a node with new data. This needs breaking down into parts so + // /// that we can emit a useful result in each case to inform the aggregator + // /// what messages it needs to emit. + // pub fn update_node(&mut self, id: GlobalId, payload: Payload) { + + // } + + // fn get_or_create_chain(genesis_hash: BlockHash, chain: &str) -> ChainId { + + // } +} + +/// First party networks (Polkadot, Kusama etc) are allowed any number of nodes. +/// Third party networks are allowed `THIRD_PARTY_NETWORKS_MAX_NODES` nodes and +/// no more. +fn max_nodes(label: &str) -> usize { + if FIRST_PARTY_NETWORKS.contains(label) { + usize::MAX + } else { + THIRD_PARTY_NETWORKS_MAX_NODES + } } \ No newline at end of file From 7dfc582a201601c1061a70459f7258f0a5a0460d Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 23 Jun 2021 09:55:28 +0100 Subject: [PATCH 016/134] feed/shard disconnects can be handled, and unbounded output to feeds --- backend/telemetry/src/aggregator.rs | 27 ++++++++++++++++++++++++--- backend/telemetry/src/main.rs | 23 ++++++++++++++--------- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/backend/telemetry/src/aggregator.rs b/backend/telemetry/src/aggregator.rs index d4c1ee3..a9b1828 100644 --- a/backend/telemetry/src/aggregator.rs +++ b/backend/telemetry/src/aggregator.rs @@ -49,7 +49,9 @@ pub enum FromShardWebsocket { /// Tell the aggregator that a node has been removed when it disconnects. Remove { local_id: LocalId, - } + }, + /// The shard is disconnected. + Disconnected } /// The aggregator can these messages back to a shard connection. @@ -66,8 +68,10 @@ pub enum ToShardWebsocket { pub enum FromFeedWebsocket { /// When the socket is opened, it'll send this first /// so that we have a way to communicate back to it. + /// Unbounded so that slow feeds don't block aggregato + /// progress. Initialize { - channel: mpsc::Sender, + channel: mpsc::UnboundedSender, }, /// The feed can subscribe to a chain to receive /// messages relating to it. @@ -81,7 +85,9 @@ pub enum FromFeedWebsocket { /// An explicit ping message. Ping { chain: Box - } + }, + /// The feed is disconnected. + Disconnected } // The frontend sends text based commands; parse them into these messages: @@ -166,6 +172,7 @@ impl Aggregator { // Now, loop and receive messages to handle. while let Some(msg) = rx_from_external.next().await { match msg { + // FROM FEED ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Initialize { mut channel }) => { feed_channels.insert(feed_conn_id, channel.clone()); @@ -268,6 +275,16 @@ impl Aggregator { ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::NoMoreFinality) => { feed_conn_id_finality.remove(&feed_conn_id); }, + ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Disconnected) => { + // The feed has disconnected; clean up references to it: + if let Some(chain) = feed_conn_id_to_chain.remove(&feed_conn_id) { + chain_to_feed_conn_ids.remove(&chain); + } + feed_channels.remove(&feed_conn_id); + feed_conn_id_finality.remove(&feed_conn_id); + }, + + // FROM SHARD ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Initialize { channel }) => { shard_channels.insert(shard_conn_id, channel); }, @@ -325,6 +342,10 @@ impl Aggregator { // TODO: node_state.update_node, then handle returned diffs }, + ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Disconnected) => { + // The shard has disconnected; remove the shard channel, but also + // remove any nodes associated with the shard, firing the relevant feed messages. + } } } } diff --git a/backend/telemetry/src/main.rs b/backend/telemetry/src/main.rs index 9ce32c1..125b634 100644 --- a/backend/telemetry/src/main.rs +++ b/backend/telemetry/src/main.rs @@ -91,8 +91,10 @@ async fn start_server(opts: Opts) -> anyhow::Result<()> { let tx_to_aggregator = shard_aggregator.subscribe_shard(); log::info!("Opening /shard_submit connection from {:?}", addr); ws.on_upgrade(move |websocket| async move { - let websocket = handle_shard_websocket_connection(websocket, tx_to_aggregator).await; + let (mut tx_to_aggregator, websocket) = handle_shard_websocket_connection(websocket, tx_to_aggregator).await; log::info!("Closing /shard_submit connection from {:?}", addr); + // Tell the aggregator that this connection has closed, so it can tidy up. + let _ = tx_to_aggregator.send(FromShardWebsocket::Disconnected).await; let _ = websocket.close().await; }) }); @@ -106,8 +108,10 @@ async fn start_server(opts: Opts) -> anyhow::Result<()> { let tx_to_aggregator = feed_aggregator.subscribe_feed(); log::info!("Opening /feed connection from {:?}", addr); ws.on_upgrade(move |websocket| async move { - let websocket = handle_feed_websocket_connection(websocket, tx_to_aggregator).await; + let (mut tx_to_aggregator, websocket) = handle_feed_websocket_connection(websocket, tx_to_aggregator).await; log::info!("Closing /feed connection from {:?}", addr); + // Tell the aggregator that this connection has closed, so it can tidy up. + let _ = tx_to_aggregator.send(FromFeedWebsocket::Disconnected).await; let _ = websocket.close().await; }) }); @@ -121,7 +125,7 @@ async fn start_server(opts: Opts) -> anyhow::Result<()> { } /// This handles messages coming to/from a shard connection -async fn handle_shard_websocket_connection(mut websocket: ws::WebSocket, mut tx_to_aggregator: S) -> ws::WebSocket +async fn handle_shard_websocket_connection(mut websocket: ws::WebSocket, mut tx_to_aggregator: S) -> (S, ws::WebSocket) where S: futures::Sink + Unpin { let (tx_to_shard_conn, mut rx_from_aggregator) = mpsc::channel(10); @@ -132,7 +136,7 @@ async fn handle_shard_websocket_connection(mut websocket: ws::WebSocket, mut }; if let Err(e) = tx_to_aggregator.send(init_msg).await { log::error!("Error sending message to aggregator: {}", e); - return websocket; + return (tx_to_aggregator, websocket); } // Loop, handling new messages from the shard or from the aggregator: @@ -213,14 +217,15 @@ async fn handle_shard_websocket_connection(mut websocket: ws::WebSocket, mut } // loop ended; give socket back to parent: - websocket + (tx_to_aggregator, websocket) } /// This handles messages coming from a feed connection -async fn handle_feed_websocket_connection(mut websocket: ws::WebSocket, mut tx_to_aggregator: S) -> ws::WebSocket +async fn handle_feed_websocket_connection(mut websocket: ws::WebSocket, mut tx_to_aggregator: S) -> (S, ws::WebSocket) where S: futures::Sink + Unpin { - let (tx_to_feed_conn, mut rx_from_aggregator) = mpsc::channel(10); + // unbounded channel so that slow feeds don't block aggregator progress: + let (tx_to_feed_conn, mut rx_from_aggregator) = mpsc::unbounded(); // Tell the aggregator about this new connection, and give it a way to send messages to us: let init_msg = FromFeedWebsocket::Initialize { @@ -228,7 +233,7 @@ async fn handle_feed_websocket_connection(mut websocket: ws::WebSocket, mut t }; if let Err(e) = tx_to_aggregator.send(init_msg).await { log::error!("Error sending message to aggregator: {}", e); - return websocket; + return (tx_to_aggregator, websocket); } // Loop, handling new messages from the shard or from the aggregator: @@ -290,5 +295,5 @@ async fn handle_feed_websocket_connection(mut websocket: ws::WebSocket, mut t } // loop ended; give socket back to parent: - websocket + (tx_to_aggregator, websocket) } From 2db2677217ac8ab058502d5774fc0841e51eb50a Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 23 Jun 2021 10:12:50 +0100 Subject: [PATCH 017/134] bimap to store global ID mappings: we'll assign them in node state --- backend/Cargo.lock | 8 ++++++ backend/common/Cargo.toml | 1 + backend/common/src/assign_id.rs | 43 ++++++++++++----------------- backend/telemetry/Cargo.toml | 1 + backend/telemetry/src/aggregator.rs | 8 +++--- 5 files changed, 31 insertions(+), 30 deletions(-) diff --git a/backend/Cargo.lock b/backend/Cargo.lock index 8b89673..d05e8e9 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -44,6 +44,12 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "bimap" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50ae17cabbc8a38a1e3e4c1a6a664e9a09672dc14d0896fa8d865d3a5a446b07" + [[package]] name = "bincode" version = "1.3.3" @@ -157,6 +163,7 @@ dependencies = [ name = "common" version = "0.1.0" dependencies = [ + "bimap", "bincode", "bytes", "fnv", @@ -1187,6 +1194,7 @@ name = "telemetry" version = "0.1.0" dependencies = [ "anyhow", + "bimap", "bincode", "common", "futures", diff --git a/backend/common/Cargo.toml b/backend/common/Cargo.toml index e873306..391cbbd 100644 --- a/backend/common/Cargo.toml +++ b/backend/common/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Parity Technologies Ltd. "] edition = "2018" [dependencies] +bimap = "0.6.1" bytes = "1.0.1" fnv = "1.0.7" hex = "0.4.3" diff --git a/backend/common/src/assign_id.rs b/backend/common/src/assign_id.rs index a85db06..9f4e0a7 100644 --- a/backend/common/src/assign_id.rs +++ b/backend/common/src/assign_id.rs @@ -1,5 +1,6 @@ -use std::{collections::HashMap, hash::Hash}; +use std::hash::Hash; use serde::{Serialize,Deserialize}; +use bimap::BiMap; #[derive(Clone,Copy,Debug,Hash,PartialEq,Eq,Serialize,Deserialize)] pub struct Id(usize); @@ -9,6 +10,11 @@ impl std::convert::From for usize { id.0 } } +impl std::convert::From for Id { + fn from(n: usize) -> Id { + Id(n) + } +} /// A struct that allows you to assign ID to an arbitrary set of /// details (so long as they are Eq+Hash+Clone), and then access @@ -17,60 +23,45 @@ impl std::convert::From for usize { #[derive(Debug)] pub struct AssignId
{ current_id: Id, - from_details: HashMap, - from_id: HashMap + mapping: BiMap } -impl
AssignId
where Details: Eq + Hash + Clone { +impl
AssignId
where Details: Eq + Hash { pub fn new() -> Self { Self { current_id: Id(0), - from_details: HashMap::new(), - from_id: HashMap::new() + mapping: BiMap::new() } } pub fn assign_id(&mut self, details: Details) -> Id { let this_id = self.current_id; self.current_id.0 += 1; - - self.from_details.insert(details.clone(), this_id); - self.from_id.insert(this_id, details); - + self.mapping.insert(this_id, details); this_id } pub fn get_details(&mut self, id: Id) -> Option<&Details> { - self.from_id.get(&id) + self.mapping.get_by_left(&id) } pub fn get_id(&mut self, details: &Details) -> Option { - self.from_details.get(details).map(|id| *id) + self.mapping.get_by_right(details).map(|id| *id) } pub fn remove_by_id(&mut self, id: Id) -> Option
{ - if let Some(details) = self.from_id.remove(&id) { - self.from_details.remove(&details); - Some(details) - } else { - None - } + self.mapping.remove_by_left(&id).map(|(_,details)| details) } pub fn remove_by_details(&mut self, details: &Details) -> Option { - if let Some(id) = self.from_details.remove(&details) { - self.from_id.remove(&id); - Some(id) - } else { - None - } + self.mapping.remove_by_right(&details).map(|(id,_)| id) } pub fn clear(&mut self) { - *self = AssignId::new() + *self = AssignId::new(); } pub fn iter(&self) -> impl Iterator { - self.from_id.iter().map(|(id, details)| (*id, details)) + self.mapping.iter().map(|(id, details)| (*id, details)) } } \ No newline at end of file diff --git a/backend/telemetry/Cargo.toml b/backend/telemetry/Cargo.toml index 0f29d50..bfe352a 100644 --- a/backend/telemetry/Cargo.toml +++ b/backend/telemetry/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] anyhow = "1.0.41" +bimap = "0.6.1" bincode = "1.3.3" common = { path = "../common" } futures = "0.3.15" diff --git a/backend/telemetry/src/aggregator.rs b/backend/telemetry/src/aggregator.rs index a9b1828..8b1e227 100644 --- a/backend/telemetry/src/aggregator.rs +++ b/backend/telemetry/src/aggregator.rs @@ -1,9 +1,9 @@ use common::{ internal_messages::{GlobalId, LocalId}, node, - assign_id::AssignId, util::now }; +use bimap::BiMap; use std::{str::FromStr, sync::Arc}; use std::sync::atomic::AtomicU64; use futures::channel::{ mpsc, oneshot }; @@ -155,7 +155,7 @@ impl Aggregator { // Maintain mappings from the shard connection ID and local ID of messages to a global ID // that uniquely identifies nodes in our node state. - let mut to_global_node_id = AssignId::new(); + let mut global_ids: BiMap = BiMap::new(); // Keep track of channels to communicate with feeds and shards: let mut feed_channels = HashMap::new(); @@ -294,13 +294,13 @@ impl Aggregator { // TODO: node_state.add_node. Every feed should know about node count changes. }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Remove { local_id }) => { - if let Some(id) = to_global_node_id.remove_by_details(&(shard_conn_id, local_id)) { + if let Some(id) = global_ids.remove_by_right(&(shard_conn_id, local_id)) { // TODO: node_state.remove_node, Every feed should know about node count changes. } }, ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Update { local_id, payload }) => { // TODO: Fill this all in... - let global_node_id = match to_global_node_id.get_id(&(shard_conn_id, local_id)) { + let global_node_id = match global_ids.get_by_right(&(shard_conn_id, local_id)) { Some(id) => id, None => continue }; From 47c12ce210d73c88330f7354c6bcea71d79f4865 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 23 Jun 2021 18:03:11 +0100 Subject: [PATCH 018/134] Lots more refactoring, finish add node (and almost the location updating) --- backend/Cargo.lock | 311 +++++++++++++ backend/common/src/assign_id.rs | 2 +- backend/common/src/internal_messages.rs | 13 +- backend/common/src/lib.rs | 3 +- backend/common/src/most_seen.rs | 109 +++++ backend/shard/src/aggregator.rs | 2 +- backend/telemetry/Cargo.toml | 3 + backend/telemetry/src/aggregator.rs | 381 ---------------- .../telemetry/src/aggregator/aggregator.rs | 83 ++++ .../telemetry/src/aggregator/find_location.rs | 206 +++++++++ .../telemetry/src/aggregator/inner_loop.rs | 423 ++++++++++++++++++ backend/telemetry/src/aggregator/mod.rs | 8 + backend/telemetry/src/main.rs | 4 +- backend/telemetry/src/state/chain.rs | 100 ++++- backend/telemetry/src/state/mod.rs | 6 +- backend/telemetry/src/state/state.rs | 102 +++-- 16 files changed, 1297 insertions(+), 459 deletions(-) create mode 100644 backend/common/src/most_seen.rs delete mode 100644 backend/telemetry/src/aggregator.rs create mode 100644 backend/telemetry/src/aggregator/aggregator.rs create mode 100644 backend/telemetry/src/aggregator/find_location.rs create mode 100644 backend/telemetry/src/aggregator/inner_loop.rs create mode 100644 backend/telemetry/src/aggregator/mod.rs diff --git a/backend/Cargo.lock b/backend/Cargo.lock index d05e8e9..7c52f7c 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -96,6 +96,12 @@ dependencies = [ "safemem", ] +[[package]] +name = "bumpalo" +version = "3.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" + [[package]] name = "byte-slice-cast" version = "1.0.0" @@ -114,6 +120,12 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +[[package]] +name = "cc" +version = "1.0.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" + [[package]] name = "cfg-if" version = "1.0.0" @@ -177,6 +189,22 @@ dependencies = [ "thiserror", ] +[[package]] +name = "core-foundation" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" + [[package]] name = "cpufeatures" version = "0.1.4" @@ -201,6 +229,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "encoding_rs" +version = "0.8.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +dependencies = [ + "cfg-if", +] + [[package]] name = "fixed-hash" version = "0.7.0" @@ -219,6 +256,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.0.1" @@ -493,6 +545,19 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + [[package]] name = "idna" version = "0.2.3" @@ -550,12 +615,27 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "ipnet" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" + [[package]] name = "itoa" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +[[package]] +name = "js-sys" +version = "0.3.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" +dependencies = [ + "wasm-bindgen", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -654,6 +734,24 @@ dependencies = [ "twoway", ] +[[package]] +name = "native-tls" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "ntapi" version = "0.3.6" @@ -704,6 +802,39 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl" +version = "0.10.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "549430950c79ae24e6d02e0b7404534ecf311d94cc9f861e9e4020187d13d885" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-sys", +] + +[[package]] +name = "openssl-probe" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" + +[[package]] +name = "openssl-sys" +version = "0.9.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a7907e3bfa08bb85105209cdfcb6c63d109f8f6c1ed6ca318fff5c1853fbc1d" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "parity-scale-codec" version = "2.1.3" @@ -779,6 +910,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkg-config" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" + [[package]] name = "ppv-lite86" version = "0.2.10" @@ -962,6 +1099,41 @@ dependencies = [ "winapi", ] +[[package]] +name = "reqwest" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "lazy_static", + "log", + "mime", + "native-tls", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-native-tls", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + [[package]] name = "rustc-hash" version = "1.1.0" @@ -986,6 +1158,16 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi", +] + [[package]] name = "scoped-tls" version = "1.0.0" @@ -998,6 +1180,29 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "security-framework" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e4effb91b4b8b6fb7732e670b6cee160278ff8e6bf485c7805d9e319d76e284" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "serde" version = "1.0.126" @@ -1202,7 +1407,10 @@ dependencies = [ "http", "log", "once_cell", + "parking_lot", "primitive-types", + "reqwest", + "rustc-hash", "serde", "serde_json", "simple_logger", @@ -1314,6 +1522,16 @@ dependencies = [ "syn", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-stream" version = "0.1.6" @@ -1495,6 +1713,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "vec_map" version = "0.8.2" @@ -1558,6 +1782,84 @@ version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +[[package]] +name = "wasm-bindgen" +version = "0.2.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" +dependencies = [ + "cfg-if", + "serde", + "serde_json", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" + +[[package]] +name = "web-sys" +version = "0.3.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "winapi" version = "0.3.9" @@ -1580,6 +1882,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "winreg" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +dependencies = [ + "winapi", +] + [[package]] name = "wyz" version = "0.2.0" diff --git a/backend/common/src/assign_id.rs b/backend/common/src/assign_id.rs index 9f4e0a7..e52c41f 100644 --- a/backend/common/src/assign_id.rs +++ b/backend/common/src/assign_id.rs @@ -16,7 +16,7 @@ impl std::convert::From for Id { } } -/// A struct that allows you to assign ID to an arbitrary set of +/// A struct that allows you to assign an ID to an arbitrary set of /// details (so long as they are Eq+Hash+Clone), and then access /// the assigned ID given those details or access the details given /// the ID. diff --git a/backend/common/src/internal_messages.rs b/backend/common/src/internal_messages.rs index b2b1b9b..a9c1d7d 100644 --- a/backend/common/src/internal_messages.rs +++ b/backend/common/src/internal_messages.rs @@ -9,9 +9,6 @@ use serde::{Deserialize, Serialize}; /// might send data on behalf of more than one chain. pub type LocalId = Id; -/// A global ID assigned to messages from each different pair of ConnId+LocalId. -pub type GlobalId = usize; - /// Message sent from the shard to the backend core #[derive(Deserialize, Serialize, Debug, Clone)] pub enum FromShardAggregator { @@ -37,6 +34,14 @@ pub enum FromShardAggregator { #[derive(Deserialize, Serialize, Debug, Clone)] pub enum FromTelemetryCore { Mute { - local_id: LocalId + local_id: LocalId, + reason: MuteReason } } + +/// Why is the thing being muted? +#[derive(Deserialize, Serialize, Debug, Clone)] +pub enum MuteReason { + Overquota, + ChainNotAllowed +} \ No newline at end of file diff --git a/backend/common/src/lib.rs b/backend/common/src/lib.rs index ec5b967..f0c068c 100644 --- a/backend/common/src/lib.rs +++ b/backend/common/src/lib.rs @@ -4,4 +4,5 @@ pub mod types; pub mod util; pub mod json; pub mod log_level; -pub mod assign_id; \ No newline at end of file +pub mod assign_id; +pub mod most_seen; \ No newline at end of file diff --git a/backend/common/src/most_seen.rs b/backend/common/src/most_seen.rs new file mode 100644 index 0000000..e2b0c64 --- /dev/null +++ b/backend/common/src/most_seen.rs @@ -0,0 +1,109 @@ +use std::collections::HashMap; +use std::hash::Hash; + +/// Add items to this, and it will keep track of what the item +/// seen the most is. +#[derive(Debug)] +pub struct MostSeen { + current_best: T, + current_count: usize, + others: HashMap +} + +impl MostSeen { + pub fn new(item: T) -> Self { + Self { + current_best: item, + current_count: 1, + others: HashMap::new() + } + } + pub fn best(&self) -> &T { + &self.current_best + } +} + +impl MostSeen { + pub fn insert(&mut self, item: &T) -> ChangeResult { + if &self.current_best == item { + // Item already the best one; bump count. + self.current_count += 1; + return ChangeResult::NoChange; + } + + // Item not the best; increment count in map + let item_count = self.others.entry(item.clone()).or_default(); + *item_count += 1; + + // Is item now the best? + if *item_count > self.current_count { + let (item, count) = self.others + .remove_entry(item) + .expect("item added above"); + self.current_best = item; + self.current_count = count; + + ChangeResult::NewMostSeenItem + } else { + ChangeResult::NoChange + } + } + pub fn remove(&mut self, item: &T) -> ChangeResult { + if &self.current_best == item { + // Item already the best one; reduce count + self.current_count -= 1; + + // Is there a new best? + let other_best = self.others + .iter() + .max_by_key(|f| f.1); + + let (other_item, &other_count) = match other_best { + Some(item) => item, + None => { return ChangeResult::NoChange } + }; + + if other_count > self.current_count { + // Clone item to unborrow self.others so that we can remove + // the item from it. We could pre-emptively remove and reinsert + // instead, but most of the time there is no change, so I'm + // aiming to keep that path cheaper. + let other_item = other_item.clone(); + let (other_item, other_count) = self.others + .remove_entry(&other_item) + .expect("item returned above, so def exists"); + + self.current_best = other_item; + self.current_count = other_count; + + return ChangeResult::NewMostSeenItem; + } else { + return ChangeResult::NoChange; + } + } + + // Item is in the map; not the best anyway. decrement count. + if let Some(count) = self.others.get_mut(item) { + *count += 1; + } + ChangeResult::NoChange + } +} + +/// Record the result of adding/removing an entry +#[derive(Clone,Copy)] +pub enum ChangeResult { + /// The best item has remained the same. + NoChange, + /// There is a new best item now. + NewMostSeenItem +} + +impl ChangeResult { + pub fn has_changed(self) -> bool { + match self { + ChangeResult::NewMostSeenItem => true, + ChangeResult::NoChange => false + } + } +} \ No newline at end of file diff --git a/backend/shard/src/aggregator.rs b/backend/shard/src/aggregator.rs index a6e5f1a..3c89a7e 100644 --- a/backend/shard/src/aggregator.rs +++ b/backend/shard/src/aggregator.rs @@ -202,7 +202,7 @@ impl Aggregator { let _ = tx_to_telemetry_core.send(FromShardAggregator::RemoveNode { local_id }).await; } }, - ToAggregator::FromTelemetryCore(FromTelemetryCore::Mute { local_id }) => { + ToAggregator::FromTelemetryCore(FromTelemetryCore::Mute { local_id, reason: _ }) => { // Ignore incoming messages if we're not connected to the backend: if !connected_to_telemetry_core { continue } diff --git a/backend/telemetry/Cargo.toml b/backend/telemetry/Cargo.toml index bfe352a..ace4369 100644 --- a/backend/telemetry/Cargo.toml +++ b/backend/telemetry/Cargo.toml @@ -14,7 +14,10 @@ hex = "0.4.3" http = "0.2.4" log = "0.4.14" once_cell = "1.8.0" +parking_lot = "0.11.1" primitive-types = { version = "0.9.0", features = ["serde"] } +reqwest = { version = "0.11.4", features = ["json"] } +rustc-hash = "1.1.0" serde = { version = "1.0.126", features = ["derive"] } serde_json = "1.0.64" simple_logger = "1.11.0" diff --git a/backend/telemetry/src/aggregator.rs b/backend/telemetry/src/aggregator.rs deleted file mode 100644 index 8b1e227..0000000 --- a/backend/telemetry/src/aggregator.rs +++ /dev/null @@ -1,381 +0,0 @@ -use common::{ - internal_messages::{GlobalId, LocalId}, - node, - util::now -}; -use bimap::BiMap; -use std::{str::FromStr, sync::Arc}; -use std::sync::atomic::AtomicU64; -use futures::channel::{ mpsc, oneshot }; -use futures::{ Sink, SinkExt, StreamExt }; -use tokio::net::TcpStream; -use tokio_util::compat::{ TokioAsyncReadCompatExt }; -use std::collections::{ HashMap, HashSet }; -use crate::state::State; -use crate::feed_message::{ self, FeedMessageSerializer }; - -/// A unique Id is assigned per websocket connection (or more accurately, -/// per feed socket and per shard socket). This can be combined with the -/// [`LocalId`] of messages to give us a global ID. -type ConnId = u64; - -/// Incoming messages come via subscriptions, and end up looking like this. -#[derive(Debug)] -enum ToAggregator { - FromShardWebsocket(ConnId, FromShardWebsocket), - FromFeedWebsocket(ConnId, FromFeedWebsocket), -} - -/// An incoming shard connection can send these messages to the aggregator. -#[derive(Debug)] -pub enum FromShardWebsocket { - /// When the socket is opened, it'll send this first - /// so that we have a way to communicate back to it. - Initialize { - channel: mpsc::Sender, - }, - /// Tell the aggregator about a new node. - Add { - local_id: LocalId, - ip: Option, - node: common::types::NodeDetails, - genesis_hash: common::types::BlockHash - }, - /// Update/pass through details about a node. - Update { - local_id: LocalId, - payload: node::Payload - }, - /// Tell the aggregator that a node has been removed when it disconnects. - Remove { - local_id: LocalId, - }, - /// The shard is disconnected. - Disconnected -} - -/// The aggregator can these messages back to a shard connection. -#[derive(Debug)] -pub enum ToShardWebsocket { - /// Mute messages to the core by passing the shard-local ID of them. - Mute { - local_id: LocalId - } -} - -/// An incoming feed connection can send these messages to the aggregator. -#[derive(Debug)] -pub enum FromFeedWebsocket { - /// When the socket is opened, it'll send this first - /// so that we have a way to communicate back to it. - /// Unbounded so that slow feeds don't block aggregato - /// progress. - Initialize { - channel: mpsc::UnboundedSender, - }, - /// The feed can subscribe to a chain to receive - /// messages relating to it. - Subscribe { - chain: Box - }, - /// The feed wants finality info for the chain, too. - SendFinality, - /// The feed doesn't want any more finality info for the chain. - NoMoreFinality, - /// An explicit ping message. - Ping { - chain: Box - }, - /// The feed is disconnected. - Disconnected -} - -// The frontend sends text based commands; parse them into these messages: -impl FromStr for FromFeedWebsocket { - type Err = anyhow::Error; - fn from_str(s: &str) -> Result { - let (cmd, chain) = match s.find(':') { - Some(idx) => (&s[..idx], s[idx+1..].into()), - None => return Err(anyhow::anyhow!("Expecting format `CMD:CHAIN_NAME`")) - }; - match cmd { - "ping" => Ok(FromFeedWebsocket::Ping { chain }), - "subscribe" => Ok(FromFeedWebsocket::Subscribe { chain }), - "send-finality" => Ok(FromFeedWebsocket::SendFinality), - "no-more-finality" => Ok(FromFeedWebsocket::NoMoreFinality), - _ => return Err(anyhow::anyhow!("Command {} not recognised", cmd)) - } - } -} - -/// The aggregator can these messages back to a feed connection. -#[derive(Debug)] -pub enum ToFeedWebsocket { - Bytes(Vec) -} - -#[derive(Clone)] -pub struct Aggregator(Arc); - -struct AggregatorInternal { - /// Shards that connect are each assigned a unique connection ID. - /// This helps us know who to send messages back to (especially in - /// conjunction with the [`LocalId`] that messages will come with). - shard_conn_id: AtomicU64, - /// Feeds that connect have their own unique connection ID, too. - feed_conn_id: AtomicU64, - /// Send messages in to the aggregator from the outside via this. This is - /// stored here so that anybody holding an `Aggregator` handle can - /// make use of it. - tx_to_aggregator: mpsc::Sender -} - -impl Aggregator { - /// Spawn a new Aggregator. This connects to the telemetry backend - pub async fn spawn(denylist: Vec) -> anyhow::Result { - let (tx_to_aggregator, rx_from_external) = mpsc::channel(10); - - // Handle any incoming messages in our handler loop: - tokio::spawn(Aggregator::handle_messages(rx_from_external, denylist)); - - // Return a handle to our aggregator: - Ok(Aggregator(Arc::new(AggregatorInternal { - shard_conn_id: AtomicU64::new(1), - feed_conn_id: AtomicU64::new(1), - tx_to_aggregator, - }))) - } - - // This is spawned into a separate task and handles any messages coming - // in to the aggregator. If nobody is tolding the tx side of the channel - // any more, this task will gracefully end. - async fn handle_messages(mut rx_from_external: mpsc::Receiver, denylist: Vec) { - - let mut node_state = State::new(denylist); - - // Maintain mappings from the shard connection ID and local ID of messages to a global ID - // that uniquely identifies nodes in our node state. - let mut global_ids: BiMap = BiMap::new(); - - // Keep track of channels to communicate with feeds and shards: - let mut feed_channels = HashMap::new(); - let mut shard_channels = HashMap::new(); - - // What chains have our feeds subscribed to (one at a time at the mo)? - // Both of these need to be kept in sync (should move to own struct eventually). - let mut feed_conn_id_to_chain: HashMap> = HashMap::new(); - let mut chain_to_feed_conn_ids: HashMap, HashSet> = HashMap::new(); - - // Which feeds want finality info too? - let mut feed_conn_id_finality: HashSet = HashSet::new(); - - // Now, loop and receive messages to handle. - while let Some(msg) = rx_from_external.next().await { - match msg { - // FROM FEED - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Initialize { mut channel }) => { - feed_channels.insert(feed_conn_id, channel.clone()); - - // Tell the new feed subscription some basic things to get it going: - let mut feed_serializer = FeedMessageSerializer::new(); - feed_serializer.push(feed_message::Version(31)); - for chain in node_state.iter_chains() { - feed_serializer.push(feed_message::AddedChain( - chain.label(), - chain.node_count() - )); - } - - // Send this to the channel that subscribed: - if let Some(bytes) = feed_serializer.into_finalized() { - let _ = channel.send(ToFeedWebsocket::Bytes(bytes)).await; - } - }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Ping { chain }) => { - let feed_channel = match feed_channels.get_mut(&feed_conn_id) { - Some(chan) => chan, - None => continue - }; - - // Pong! - let mut feed_serializer = FeedMessageSerializer::new(); - feed_serializer.push(feed_message::Pong(&chain)); - if let Some(bytes) = feed_serializer.into_finalized() { - let _ = feed_channel.send(ToFeedWebsocket::Bytes(bytes)).await; - } - }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Subscribe { chain }) => { - let feed_channel = match feed_channels.get_mut(&feed_conn_id) { - Some(chan) => chan, - None => continue - }; - - // Unsubscribe from previous chain if subscribed to one: - let old_chain_label = feed_conn_id_to_chain.remove(&feed_conn_id); - if let Some(old_chain_label) = &old_chain_label { - if let Some(map) = chain_to_feed_conn_ids.get_mut(old_chain_label) { - map.remove(&feed_conn_id); - } - } - - // Untoggle request for finality feeds: - feed_conn_id_finality.remove(&feed_conn_id); - - // Get the chain we're subscribing to, ignoring the rest if it doesn't exist. - let chain = match node_state.get_chain_by_label(&chain) { - Some(chain) => chain, - None => continue - }; - - // Send messages to the feed about the new chain: - let mut feed_serializer = FeedMessageSerializer::new(); - if let Some(old_chain_label) = old_chain_label { - feed_serializer.push(feed_message::UnsubscribedFrom(&old_chain_label)); - } - feed_serializer.push(feed_message::SubscribedTo(chain.label())); - feed_serializer.push(feed_message::TimeSync(now())); - feed_serializer.push(feed_message::BestBlock ( - chain.best_block().height, - chain.timestamp(), - chain.average_block_time() - )); - feed_serializer.push(feed_message::BestFinalized ( - chain.finalized_block().height, - chain.finalized_block().hash - )); - for (idx, (gid, node)) in node_state.get_nodes_in_chain(chain).enumerate() { - // Send subscription confirmation and chain head before doing all the nodes, - // and continue sending batches of 32 nodes a time over the wire subsequently - if idx % 32 == 0 { - if let Some(bytes) = feed_serializer.finalize() { - let _ = feed_channel.send(ToFeedWebsocket::Bytes(bytes)).await; - } - } - feed_serializer.push(feed_message::AddedNode(gid, node)); - feed_serializer.push(feed_message::FinalizedBlock( - gid, - node.finalized().height, - node.finalized().hash, - )); - if node.stale() { - feed_serializer.push(feed_message::StaleNode(gid)); - } - } - if let Some(bytes) = feed_serializer.into_finalized() { - let _ = feed_channel.send(ToFeedWebsocket::Bytes(bytes)).await; - } - - // Actually make a note of the new chain subsciption: - feed_conn_id_to_chain.insert(feed_conn_id, chain.label().into()); - chain_to_feed_conn_ids.entry(chain.label().into()).or_default().insert(feed_conn_id); - }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::SendFinality) => { - feed_conn_id_finality.insert(feed_conn_id); - }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::NoMoreFinality) => { - feed_conn_id_finality.remove(&feed_conn_id); - }, - ToAggregator::FromFeedWebsocket(feed_conn_id, FromFeedWebsocket::Disconnected) => { - // The feed has disconnected; clean up references to it: - if let Some(chain) = feed_conn_id_to_chain.remove(&feed_conn_id) { - chain_to_feed_conn_ids.remove(&chain); - } - feed_channels.remove(&feed_conn_id); - feed_conn_id_finality.remove(&feed_conn_id); - }, - - // FROM SHARD - ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Initialize { channel }) => { - shard_channels.insert(shard_conn_id, channel); - }, - ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Add { local_id, ip, node, genesis_hash }) => { - // Get globalId from add_node and store that against shard/local_id. - - // TODO: node_state.add_node. Every feed should know about node count changes. - }, - ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Remove { local_id }) => { - if let Some(id) = global_ids.remove_by_right(&(shard_conn_id, local_id)) { - // TODO: node_state.remove_node, Every feed should know about node count changes. - } - }, - ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Update { local_id, payload }) => { - // TODO: Fill this all in... - let global_node_id = match global_ids.get_by_right(&(shard_conn_id, local_id)) { - Some(id) => id, - None => continue - }; - - if let Some(block) = payload.best_block() { - - } - - match payload { - node::Payload::SystemInterval(system_interval) => { - - }, - node::Payload::AfgAuthoritySet(_) => { - - }, - node::Payload::AfgFinalized(_) => { - - }, - node::Payload::AfgReceivedPrecommit(_) => { - - }, - node::Payload::AfgReceivedPrevote(_) => { - - }, - // This message should have been handled before the payload made it this far: - node::Payload::SystemConnected(_) => { - unreachable!("SystemConnected message seen in Telemetry Core, but should have been handled in shard"); - }, - // The following messages aren't handled at the moment. List them explicitly so - // that we have to make an explicit choice for any new messages: - node::Payload::BlockImport(_) | - node::Payload::NotifyFinalized(_) | - node::Payload::AfgReceivedCommit(_) | - node::Payload::TxPoolImport | - node::Payload::AfgFinalizedBlocksUpTo | - node::Payload::AuraPreSealedBlock | - node::Payload::PreparedBlockForProposing => {}, - } - - // TODO: node_state.update_node, then handle returned diffs - }, - ToAggregator::FromShardWebsocket(shard_conn_id, FromShardWebsocket::Disconnected) => { - // The shard has disconnected; remove the shard channel, but also - // remove any nodes associated with the shard, firing the relevant feed messages. - } - } - } - } - - /// Return a sink that a shard can send messages into to be handled by the aggregator. - pub fn subscribe_shard(&self) -> impl Sink + Unpin { - // Assign a unique aggregator-local ID to each connection that subscribes, and pass - // that along with every message to the aggregator loop: - let shard_conn_id: ConnId = self.0.shard_conn_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed); - let tx_to_aggregator = self.0.tx_to_aggregator.clone(); - - // Calling `send` on this Sink requires Unpin. There may be a nicer way than this, - // but pinning by boxing is the easy solution for now: - Box::pin(tx_to_aggregator.with(move |msg| async move { - Ok(ToAggregator::FromShardWebsocket(shard_conn_id, msg)) - })) - } - - /// Return a sink that a feed can send messages into to be handled by the aggregator. - pub fn subscribe_feed(&self) -> impl Sink + Unpin { - // Assign a unique aggregator-local ID to each connection that subscribes, and pass - // that along with every message to the aggregator loop: - let feed_conn_id: ConnId = self.0.feed_conn_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed); - let tx_to_aggregator = self.0.tx_to_aggregator.clone(); - - // Calling `send` on this Sink requires Unpin. There may be a nicer way than this, - // but pinning by boxing is the easy solution for now: - Box::pin(tx_to_aggregator.with(move |msg| async move { - Ok(ToAggregator::FromFeedWebsocket(feed_conn_id, msg)) - })) - } - -} \ No newline at end of file diff --git a/backend/telemetry/src/aggregator/aggregator.rs b/backend/telemetry/src/aggregator/aggregator.rs new file mode 100644 index 0000000..90e3b1f --- /dev/null +++ b/backend/telemetry/src/aggregator/aggregator.rs @@ -0,0 +1,83 @@ +use std::sync::Arc; +use std::sync::atomic::AtomicU64; +use futures::channel::mpsc; +use futures::{ Sink, SinkExt }; +use super::inner_loop; + +/// A unique Id is assigned per websocket connection (or more accurately, +/// per feed socket and per shard socket). This can be combined with the +/// [`LocalId`] of messages to give us a global ID. +type ConnId = u64; + +#[derive(Clone)] +pub struct Aggregator(Arc); + +struct AggregatorInternal { + /// Shards that connect are each assigned a unique connection ID. + /// This helps us know who to send messages back to (especially in + /// conjunction with the [`LocalId`] that messages will come with). + shard_conn_id: AtomicU64, + /// Feeds that connect have their own unique connection ID, too. + feed_conn_id: AtomicU64, + /// Send messages in to the aggregator from the outside via this. This is + /// stored here so that anybody holding an `Aggregator` handle can + /// make use of it. + tx_to_aggregator: mpsc::Sender +} + +impl Aggregator { + /// Spawn a new Aggregator. This connects to the telemetry backend + pub async fn spawn(denylist: Vec) -> anyhow::Result { + let (tx_to_aggregator, rx_from_external) = mpsc::channel(10); + + // Handle any incoming messages in our handler loop: + tokio::spawn(Aggregator::handle_messages(rx_from_external, tx_to_aggregator.clone(), denylist)); + + // Return a handle to our aggregator: + Ok(Aggregator(Arc::new(AggregatorInternal { + shard_conn_id: AtomicU64::new(1), + feed_conn_id: AtomicU64::new(1), + tx_to_aggregator, + }))) + } + + // This is spawned into a separate task and handles any messages coming + // in to the aggregator. If nobody is tolding the tx side of the channel + // any more, this task will gracefully end. + async fn handle_messages( + rx_from_external: mpsc::Receiver, + tx_to_aggregator: mpsc::Sender, + denylist: Vec + ) { + inner_loop::InnerLoop::new(rx_from_external, tx_to_aggregator, denylist).handle().await; + } + + /// Return a sink that a shard can send messages into to be handled by the aggregator. + pub fn subscribe_shard(&self) -> impl Sink + Unpin { + // Assign a unique aggregator-local ID to each connection that subscribes, and pass + // that along with every message to the aggregator loop: + let shard_conn_id: ConnId = self.0.shard_conn_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let tx_to_aggregator = self.0.tx_to_aggregator.clone(); + + // Calling `send` on this Sink requires Unpin. There may be a nicer way than this, + // but pinning by boxing is the easy solution for now: + Box::pin(tx_to_aggregator.with(move |msg| async move { + Ok(inner_loop::ToAggregator::FromShardWebsocket(shard_conn_id, msg)) + })) + } + + /// Return a sink that a feed can send messages into to be handled by the aggregator. + pub fn subscribe_feed(&self) -> impl Sink + Unpin { + // Assign a unique aggregator-local ID to each connection that subscribes, and pass + // that along with every message to the aggregator loop: + let feed_conn_id: ConnId = self.0.feed_conn_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let tx_to_aggregator = self.0.tx_to_aggregator.clone(); + + // Calling `send` on this Sink requires Unpin. There may be a nicer way than this, + // but pinning by boxing is the easy solution for now: + Box::pin(tx_to_aggregator.with(move |msg| async move { + Ok(inner_loop::ToAggregator::FromFeedWebsocket(feed_conn_id, msg)) + })) + } + +} \ No newline at end of file diff --git a/backend/telemetry/src/aggregator/find_location.rs b/backend/telemetry/src/aggregator/find_location.rs new file mode 100644 index 0000000..4aa2834 --- /dev/null +++ b/backend/telemetry/src/aggregator/find_location.rs @@ -0,0 +1,206 @@ +use std::net::Ipv4Addr; +use std::sync::Arc; + +use parking_lot::RwLock; +use rustc_hash::FxHashMap; +use serde::Deserialize; +use futures::{Sink, SinkExt, StreamExt}; +use futures::channel::mpsc; + +use common::types::NodeLocation; +use tokio::sync::Semaphore; + +/// The returned location is optional; it may be None if not found. +pub type Location = Option>; + +/// This is responsible for taking an IP address and attempting +/// to find a geographical location from this +pub fn find_location(response_chan: R) -> mpsc::UnboundedSender<(Id, Ipv4Addr)> +where + R: Sink<(Id, Option>)> + Unpin + Send + Clone + 'static, + Id: Clone + Send + 'static +{ + let (tx, mut rx) = mpsc::unbounded(); + + // cache entries + let mut cache: FxHashMap>> = FxHashMap::default(); + + // Default entry for localhost + cache.insert( + Ipv4Addr::new(127, 0, 0, 1), + Some(Arc::new(NodeLocation { + latitude: 52.516_6667, + longitude: 13.4, + city: "Berlin".into(), + })), + ); + + // Create a locator with our cache. This is used to obtain locations. + let locator = Locator::new(cache); + + // Spawn a loop to handle location requests + tokio::spawn(async move { + + // Allow 4 requests at a time. acquiring a token will block while the + // number of concurrent location requests is more than this. + let semaphore = Arc::new(Semaphore::new(4)); + + loop { + while let Some((id, ip_address)) = rx.next().await { + + let permit = semaphore.clone().acquire_owned().await.unwrap(); + let mut response_chan = response_chan.clone(); + let locator = locator.clone(); + + // Once we have acquired our permit, spawn a task to avoid + // blocking this loop so that we can handle concurrent requests. + tokio::spawn(async move { + match locator.locate(ip_address).await { + Ok(loc) => { + let _ = response_chan.send((id,loc)).await; + }, + Err(e) => { + log::debug!("GET error for ip location: {:?}", e); + } + }; + + // ensure permit is moved into task by dropping it explicitly: + drop(permit); + }); + } + } + }); + + tx +} + +/// This struct can be used to make location requests, given +/// an IPV4 address. +#[derive(Clone)] +struct Locator { + client: reqwest::Client, + cache: Arc>>>>, +} + +impl Locator { + pub fn new(cache: FxHashMap>>) -> Self { + let client = reqwest::Client::new(); + + Locator { + client, + cache: Arc::new(RwLock::new(cache)) + } + } + + pub async fn locate(&self, ip: Ipv4Addr) -> Result>, reqwest::Error> { + // Return location quickly if it's cached: + let cached_loc = { + let cache_reader = self.cache.read(); + cache_reader.get(&ip).map(|o| o.clone()) + }; + if let Some(loc) = cached_loc { + return Ok(loc); + } + + // Look it up via the location services if not cached: + let location = self.iplocate_ipapi_co(ip).await?; + let location = match location { + Some(location) => Ok(Some(location)), + None => self.iplocate_ipinfo_io(ip).await, + }?; + + self.cache.write().insert(ip, location.clone()); + Ok(location) + } + + async fn iplocate_ipapi_co(&self, ip: Ipv4Addr) -> Result>, reqwest::Error> { + let location = self + .query(&format!("https://ipapi.co/{}/json", ip)) + .await? + .map(Arc::new); + + Ok(location) + } + + async fn iplocate_ipinfo_io(&self, ip: Ipv4Addr) -> Result>, reqwest::Error> { + let location = self + .query(&format!("https://ipinfo.io/{}/json", ip)) + .await? + .and_then(|loc: IPApiLocate| loc.into_node_location().map(Arc::new)); + + Ok(location) + } + + async fn query(&self, url: &str) -> Result, reqwest::Error> + where for<'de> T: Deserialize<'de> + { + match self.client.get(url).send().await?.json::().await { + Ok(result) => Ok(Some(result)), + Err(err) => { + log::debug!("JSON error for ip location: {:?}", err); + Ok(None) + } + } + } +} + +/// This is the format returned from ipinfo.co, so we do +/// a little conversion to get it into the shape we want. +#[derive(Deserialize)] +struct IPApiLocate { + city: Box, + loc: Box, +} + +impl IPApiLocate { + fn into_node_location(self) -> Option { + let IPApiLocate { city, loc } = self; + + let mut loc = loc.split(',').map(|n| n.parse()); + + let latitude = loc.next()?.ok()?; + let longitude = loc.next()?.ok()?; + + // Guarantee that the iterator has been exhausted + if loc.next().is_some() { + return None; + } + + Some(NodeLocation { + latitude, + longitude, + city, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn ipapi_locate_to_node_location() { + let ipapi = IPApiLocate { + loc: "12.5,56.25".into(), + city: "Foobar".into(), + }; + + let location = ipapi.into_node_location().unwrap(); + + assert_eq!(location.latitude, 12.5); + assert_eq!(location.longitude, 56.25); + assert_eq!(&*location.city, "Foobar"); + } + + #[test] + fn ipapi_locate_to_node_location_too_many() { + let ipapi = IPApiLocate { + loc: "12.5,56.25,1.0".into(), + city: "Foobar".into(), + }; + + let location = ipapi.into_node_location(); + + assert!(location.is_none()); + } +} \ No newline at end of file diff --git a/backend/telemetry/src/aggregator/inner_loop.rs b/backend/telemetry/src/aggregator/inner_loop.rs new file mode 100644 index 0000000..cc77daf --- /dev/null +++ b/backend/telemetry/src/aggregator/inner_loop.rs @@ -0,0 +1,423 @@ +use common::{ + internal_messages::{ + self, + LocalId, + MuteReason + }, + node, + util::now +}; +use bimap::BiMap; +use std::{iter::FromIterator, net::Ipv4Addr, str::FromStr}; +use futures::channel::{ mpsc }; +use futures::{ future, SinkExt, StreamExt }; +use std::collections::{ HashMap, HashSet }; +use crate::state::{ self, State, NodeId }; +use crate::feed_message::{ self, FeedMessageSerializer }; +use super::find_location::{ self, find_location }; + +/// A unique Id is assigned per websocket connection (or more accurately, +/// per feed socket and per shard socket). This can be combined with the +/// [`LocalId`] of messages to give us a global ID. +type ConnId = u64; + +/// Incoming messages come via subscriptions, and end up looking like this. +#[derive(Clone,Debug)] +pub enum ToAggregator { + FromShardWebsocket(ConnId, FromShardWebsocket), + FromFeedWebsocket(ConnId, FromFeedWebsocket), + FromFindLocation(NodeId, find_location::Location) +} + +/// An incoming shard connection can send these messages to the aggregator. +#[derive(Clone,Debug)] +pub enum FromShardWebsocket { + /// When the socket is opened, it'll send this first + /// so that we have a way to communicate back to it. + Initialize { + channel: mpsc::Sender, + }, + /// Tell the aggregator about a new node. + Add { + local_id: LocalId, + ip: Option, + node: common::types::NodeDetails, + genesis_hash: common::types::BlockHash + }, + /// Update/pass through details about a node. + Update { + local_id: LocalId, + payload: node::Payload + }, + /// Tell the aggregator that a node has been removed when it disconnects. + Remove { + local_id: LocalId, + }, + /// The shard is disconnected. + Disconnected +} + +/// The aggregator can these messages back to a shard connection. +#[derive(Debug)] +pub enum ToShardWebsocket { + /// Mute messages to the core by passing the shard-local ID of them. + Mute { + local_id: LocalId, + reason: internal_messages::MuteReason + } +} + +/// An incoming feed connection can send these messages to the aggregator. +#[derive(Clone,Debug)] +pub enum FromFeedWebsocket { + /// When the socket is opened, it'll send this first + /// so that we have a way to communicate back to it. + /// Unbounded so that slow feeds don't block aggregato + /// progress. + Initialize { + channel: mpsc::UnboundedSender, + }, + /// The feed can subscribe to a chain to receive + /// messages relating to it. + Subscribe { + chain: Box + }, + /// The feed wants finality info for the chain, too. + SendFinality, + /// The feed doesn't want any more finality info for the chain. + NoMoreFinality, + /// An explicit ping message. + Ping { + chain: Box + }, + /// The feed is disconnected. + Disconnected +} + +// The frontend sends text based commands; parse them into these messages: +impl FromStr for FromFeedWebsocket { + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + let (cmd, chain) = match s.find(':') { + Some(idx) => (&s[..idx], s[idx+1..].into()), + None => return Err(anyhow::anyhow!("Expecting format `CMD:CHAIN_NAME`")) + }; + match cmd { + "ping" => Ok(FromFeedWebsocket::Ping { chain }), + "subscribe" => Ok(FromFeedWebsocket::Subscribe { chain }), + "send-finality" => Ok(FromFeedWebsocket::SendFinality), + "no-more-finality" => Ok(FromFeedWebsocket::NoMoreFinality), + _ => return Err(anyhow::anyhow!("Command {} not recognised", cmd)) + } + } +} + +/// The aggregator can these messages back to a feed connection. +#[derive(Clone,Debug)] +pub enum ToFeedWebsocket { + Bytes(Vec) +} + +/// Instances of this are responsible for handling incoming and +/// outgoing messages in the main aggregator loop. +pub struct InnerLoop { + /// Messages from the outside world come into this: + rx_from_external: mpsc::Receiver, + + /// The state of our chains and nodes lives here: + node_state: State, + /// We maintain a mapping between NodeId and ConnId+LocalId, so that we know + /// which messages are about which nodes. + node_ids: BiMap, + + /// Keep track of how to send messages out to feeds. + feed_channels: HashMap>, + /// Keep track of how to send messages out to shards. + shard_channels: HashMap>, + + /// Which chain is a feed subscribed to? + feed_conn_id_to_chain: HashMap>, + /// Which feeds are subscribed to a given chain (needs to stay in sync with above)? + chain_to_feed_conn_ids: HashMap, HashSet>, + + /// These feeds want finality info, too. + feed_conn_id_finality: HashSet, + + /// Send messages here to make location requests, which are sent back into the loop. + tx_to_locator: mpsc::UnboundedSender<(NodeId, Ipv4Addr)> +} + +impl InnerLoop { + /// Create a new inner loop handler with the various state it needs. + pub fn new( + rx_from_external: mpsc::Receiver, + tx_to_aggregator: mpsc::Sender, + denylist: Vec + ) -> Self { + + let tx_to_locator = find_location(tx_to_aggregator.with(|(node_id, msg)| { + future::ok::<_,mpsc::SendError>(ToAggregator::FromFindLocation(node_id, msg)) + })); + + InnerLoop { + rx_from_external, + node_state: State::new(denylist), + node_ids: BiMap::new(), + feed_channels: HashMap::new(), + shard_channels: HashMap::new(), + feed_conn_id_to_chain: HashMap::new(), + chain_to_feed_conn_ids: HashMap::new(), + feed_conn_id_finality: HashSet::new(), + tx_to_locator + } + } + + /// Start handling and responding to incoming messages. + pub async fn handle(mut self) { + while let Some(msg) = self.rx_from_external.next().await { + match msg { + ToAggregator::FromFeedWebsocket(feed_conn_id, msg) => { + self.handle_from_feed(feed_conn_id, msg).await + }, + ToAggregator::FromShardWebsocket(shard_conn_id, msg) => { + self.handle_from_shard(shard_conn_id, msg).await + }, + ToAggregator::FromFindLocation(node_id, location) => { + self.handle_from_find_location(node_id, location).await + } + } + } + } + + async fn handle_from_find_location(&mut self, node_id: NodeId, location: find_location::Location) { + // TODO: Update node location here + } + + /// Handle messages coming from shards. + async fn handle_from_shard(&mut self, shard_conn_id: ConnId, msg: FromShardWebsocket) { + match msg { + FromShardWebsocket::Initialize { channel } => { + self.shard_channels.insert(shard_conn_id, channel); + }, + FromShardWebsocket::Add { local_id, ip, node, genesis_hash } => { + match self.node_state.add_node(genesis_hash, node) { + state::AddNodeResult::ChainOnDenyList => { + if let Some(shard_conn) = self.shard_channels.get_mut(&shard_conn_id) { + let _ = shard_conn.send(ToShardWebsocket::Mute { + local_id, + reason: MuteReason::ChainNotAllowed + }).await; + } + }, + state::AddNodeResult::ChainOverQuota => { + if let Some(shard_conn) = self.shard_channels.get_mut(&shard_conn_id) { + let _ = shard_conn.send(ToShardWebsocket::Mute { + local_id, + reason: MuteReason::Overquota + }).await; + } + }, + state::AddNodeResult::NodeAddedToChain(details) => { + let node_id = details.id; + // Note the ID so that we know what node other messages are referring to: + self.node_ids.insert(node_id, (shard_conn_id, local_id)); + + let mut feed_serializer = FeedMessageSerializer::new(); + feed_serializer.push(feed_message::AddedNode(node_id, details.node)); + let chain_label = details.chain.label().to_owned(); + + if let Some(bytes) = feed_serializer.into_finalized() { + self.broadcast_to_chain_feeds( + &chain_label, + ToFeedWebsocket::Bytes(bytes) + ).await + } + + // TODO: The node has been added. use it's IP to find a location. + }, + } + }, + FromShardWebsocket::Remove { local_id } => { + if let Some(node_id) = self.node_ids.remove_by_right(&(shard_conn_id, local_id)) { + // TODO: node_state.remove_node, Every feed should know about node count changes. + } + }, + FromShardWebsocket::Update { local_id, payload } => { + // TODO: Fill this all in... + let node_id = match self.node_ids.get_by_right(&(shard_conn_id, local_id)) { + Some(id) => id, + None => return + }; + + if let Some(block) = payload.best_block() { + + } + + match payload { + node::Payload::SystemInterval(system_interval) => { + + }, + node::Payload::AfgAuthoritySet(_) => { + + }, + node::Payload::AfgFinalized(_) => { + + }, + node::Payload::AfgReceivedPrecommit(_) => { + + }, + node::Payload::AfgReceivedPrevote(_) => { + + }, + // This message should have been handled before the payload made it this far: + node::Payload::SystemConnected(_) => { + unreachable!("SystemConnected message seen in Telemetry Core, but should have been handled in shard"); + }, + // The following messages aren't handled at the moment. List them explicitly so + // that we have to make an explicit choice for any new messages: + node::Payload::BlockImport(_) | + node::Payload::NotifyFinalized(_) | + node::Payload::AfgReceivedCommit(_) | + node::Payload::TxPoolImport | + node::Payload::AfgFinalizedBlocksUpTo | + node::Payload::AuraPreSealedBlock | + node::Payload::PreparedBlockForProposing => {}, + } + + // TODO: node_state.update_node, then handle returned diffs + }, + FromShardWebsocket::Disconnected => { + // The shard has disconnected; remove the shard channel, but also + // remove any nodes associated with the shard, firing the relevant feed messages. + } + } + } + + /// Handle messages coming from feeds. + async fn handle_from_feed(&mut self, feed_conn_id: ConnId, msg: FromFeedWebsocket) { + match msg { + FromFeedWebsocket::Initialize { mut channel } => { + self.feed_channels.insert(feed_conn_id, channel.clone()); + + // Tell the new feed subscription some basic things to get it going: + let mut feed_serializer = FeedMessageSerializer::new(); + feed_serializer.push(feed_message::Version(31)); + for chain in self.node_state.iter_chains() { + feed_serializer.push(feed_message::AddedChain( + chain.label(), + chain.node_count() + )); + } + + // Send this to the channel that subscribed: + if let Some(bytes) = feed_serializer.into_finalized() { + let _ = channel.send(ToFeedWebsocket::Bytes(bytes)).await; + } + }, + FromFeedWebsocket::Ping { chain } => { + let feed_channel = match self.feed_channels.get_mut(&feed_conn_id) { + Some(chan) => chan, + None => return + }; + + // Pong! + let mut feed_serializer = FeedMessageSerializer::new(); + feed_serializer.push(feed_message::Pong(&chain)); + if let Some(bytes) = feed_serializer.into_finalized() { + let _ = feed_channel.send(ToFeedWebsocket::Bytes(bytes)).await; + } + }, + FromFeedWebsocket::Subscribe { chain } => { + let feed_channel = match self.feed_channels.get_mut(&feed_conn_id) { + Some(chan) => chan, + None => return + }; + + // Unsubscribe from previous chain if subscribed to one: + let old_chain_label = self.feed_conn_id_to_chain.remove(&feed_conn_id); + if let Some(old_chain_label) = &old_chain_label { + if let Some(map) = self.chain_to_feed_conn_ids.get_mut(old_chain_label) { + map.remove(&feed_conn_id); + } + } + + // Untoggle request for finality feeds: + self.feed_conn_id_finality.remove(&feed_conn_id); + + // Get the chain we're subscribing to, ignoring the rest if it doesn't exist. + let chain = match self.node_state.get_chain_by_label(&chain) { + Some(chain) => chain, + None => return + }; + + // Send messages to the feed about the new chain: + let mut feed_serializer = FeedMessageSerializer::new(); + if let Some(old_chain_label) = old_chain_label { + feed_serializer.push(feed_message::UnsubscribedFrom(&old_chain_label)); + } + feed_serializer.push(feed_message::SubscribedTo(chain.label())); + feed_serializer.push(feed_message::TimeSync(now())); + feed_serializer.push(feed_message::BestBlock ( + chain.best_block().height, + chain.timestamp(), + chain.average_block_time() + )); + feed_serializer.push(feed_message::BestFinalized ( + chain.finalized_block().height, + chain.finalized_block().hash + )); + for (idx, (gid, node)) in chain.nodes().enumerate() { + // Send subscription confirmation and chain head before doing all the nodes, + // and continue sending batches of 32 nodes a time over the wire subsequently + if idx % 32 == 0 { + if let Some(bytes) = feed_serializer.finalize() { + let _ = feed_channel.send(ToFeedWebsocket::Bytes(bytes)).await; + } + } + feed_serializer.push(feed_message::AddedNode(gid, node)); + feed_serializer.push(feed_message::FinalizedBlock( + gid, + node.finalized().height, + node.finalized().hash, + )); + if node.stale() { + feed_serializer.push(feed_message::StaleNode(gid)); + } + } + if let Some(bytes) = feed_serializer.into_finalized() { + let _ = feed_channel.send(ToFeedWebsocket::Bytes(bytes)).await; + } + + // Actually make a note of the new chain subsciption: + self.feed_conn_id_to_chain.insert(feed_conn_id, chain.label().into()); + self.chain_to_feed_conn_ids.entry(chain.label().into()).or_default().insert(feed_conn_id); + }, + FromFeedWebsocket::SendFinality => { + self.feed_conn_id_finality.insert(feed_conn_id); + }, + FromFeedWebsocket::NoMoreFinality => { + self.feed_conn_id_finality.remove(&feed_conn_id); + }, + FromFeedWebsocket::Disconnected => { + // The feed has disconnected; clean up references to it: + if let Some(chain) = self.feed_conn_id_to_chain.remove(&feed_conn_id) { + self.chain_to_feed_conn_ids.remove(&chain); + } + self.feed_channels.remove(&feed_conn_id); + self.feed_conn_id_finality.remove(&feed_conn_id); + }, + } + } + + /// Send a message to all chain feeds. + async fn broadcast_to_chain_feeds(&mut self, chain: &str, message: ToFeedWebsocket) { + if let Some(feeds) = self.chain_to_feed_conn_ids.get(chain) { + for &feed_id in feeds { + // How much faster would it be if we processed these in parallel? + if let Some(chan) = self.feed_channels.get_mut(&feed_id) { + chan.send(message.clone()).await; + } + } + } + } +} \ No newline at end of file diff --git a/backend/telemetry/src/aggregator/mod.rs b/backend/telemetry/src/aggregator/mod.rs new file mode 100644 index 0000000..30622d3 --- /dev/null +++ b/backend/telemetry/src/aggregator/mod.rs @@ -0,0 +1,8 @@ +mod aggregator; +mod inner_loop; +mod find_location; + +// Expose the various message types that can be worked with externally: +pub use inner_loop::{ FromFeedWebsocket, FromShardWebsocket, ToFeedWebsocket, ToShardWebsocket }; + +pub use aggregator::*; \ No newline at end of file diff --git a/backend/telemetry/src/main.rs b/backend/telemetry/src/main.rs index 125b634..434f3b1 100644 --- a/backend/telemetry/src/main.rs +++ b/backend/telemetry/src/main.rs @@ -151,8 +151,8 @@ async fn handle_shard_websocket_connection(mut websocket: ws::WebSocket, mut }; let internal_msg = match msg { - ToShardWebsocket::Mute { local_id } => { - internal_messages::FromTelemetryCore::Mute { local_id } + ToShardWebsocket::Mute { local_id, reason } => { + internal_messages::FromTelemetryCore::Mute { local_id, reason } } }; diff --git a/backend/telemetry/src/state/chain.rs b/backend/telemetry/src/state/chain.rs index ce6fcc8..b4fb611 100644 --- a/backend/telemetry/src/state/chain.rs +++ b/backend/telemetry/src/state/chain.rs @@ -1,23 +1,25 @@ use std::sync::Arc; use std::collections::{ HashSet, HashMap }; use common::types::{ BlockHash }; -use common::internal_messages::{ GlobalId }; -use super::node::Node; -use common::types::{Block, NodeDetails, NodeId, NodeLocation, Timestamp}; +use common::types::{Block, NodeDetails, NodeLocation, Timestamp}; use common::util::{now, DenseMap, NumStats}; +use common::most_seen::{ MostSeen, self }; use common::node::Payload; use std::iter::IntoIterator; +use once_cell::sync::Lazy; + +use super::node::Node; +use super::NodeId; pub type ChainId = usize; -pub type Label = Arc; +pub type Label = Box; pub struct Chain { - /// Label of this chain, along with count of nodes that use this label - label: (Label, usize), - /// Chain genesis hash - genesis_hash: BlockHash, + /// Labels that nodes use for this chain. We keep track of + /// the most commonly used label as nodes are added/removed. + labels: MostSeen