Rust backend (#185)

This commit is contained in:
Maciej Hirsz
2019-11-07 10:52:38 +01:00
committed by GitHub
parent 31784131d6
commit a3b6f6a5a1
26 changed files with 3194 additions and 808 deletions
+10 -4
View File
@@ -1,7 +1,9 @@
# Polkadot Telemetry
## Getting Started
After cloning the repo, make sure to grab the latest stable version of node and install dependencies before doing anything.
To run the backend, you will need `cargo` to build the binary. We recommend using [`rustup`](https://rustup.rs/).
To run the frontend make sure to grab the latest stable version of node and install dependencies before doing anything:
```
nvm install stable
@@ -10,7 +12,9 @@ yarn
### Terminal 1 - Backend
```
yarn start:backend
cd backend
cargo build --release
./target/release/telemetry
```
### Terminal 2 - Frontend
```
@@ -18,8 +22,10 @@ yarn start:frontend
```
### Terminal 3 - Node
Follow up installation instructions from the [Polkadot repo](https://github.com/paritytech/polkadot)
```
./target/debug/polkadot --dev --telemetry-url ws://localhost:1024
./target/release/polkadot --dev --telemetry-url ws://localhost:8000/submit
```
### Run via Docker
@@ -31,5 +37,5 @@ docker-compose up --build -d
- -d stands for detach, if you would like to see logs i recommend using [Kitmatic](https://kitematic.com/) or dont use the -d
- --build will build the images and rebuild, but this is not required everytime
- If you want to makes UI changes, there is no need to rebuild the image as the files are being copied in via volumes.
Now navigate to localhost:3000 in your browser to view the app.
+1219 -613
View File
File diff suppressed because it is too large Load Diff
+29 -7
View File
@@ -1,14 +1,36 @@
[package]
name = "backend"
name = "telemetry"
version = "0.1.0"
authors = ["Maciej Hirsz <maciej.hirsz@gmail.com>"]
authors = ["Parity Technologies Ltd. <admin@parity.io>"]
edition = "2018"
license = "GPL-3.0"
[dependencies]
actix = "0.8"
actix-web = "1.0.0-rc"
actix-web-actors = "1.0.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
actix-web = { git = "https://github.com/maciejhirsz/actix-web" }
actix-web-actors = { git = "https://github.com/maciejhirsz/actix-web" }
actix-http = { git = "https://github.com/maciejhirsz/actix-web" }
# actix-web = "1.0.8"
# actix-web-actors = "1.0.2"
# actix-http = "0.2.10"
bytes = "0.4"
chrono = { version = "0.4", features = ["serde"] }
primitive-types = { version = "0.3.0", features = ["serde"] }
fnv = "1.0.6"
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["raw_value"] }
primitive-types = { version = "0.5.0", features = ["serde"] }
log = "0.4"
simple_logger = "1.3.0"
num-traits = "0.2"
parking_lot = "0.9"
reqwest = "0.9.18"
rustc-hash = "1.0.1"
[profile.release]
lto = true
panic = "abort"
[patch.crates-io]
actix-web = { git = "https://github.com/maciejhirsz/actix-web" }
actix-web-actors = { git = "https://github.com/maciejhirsz/actix-web" }
actix-http = { git = "https://github.com/maciejhirsz/actix-web" }
+230
View File
@@ -0,0 +1,230 @@
use std::collections::HashMap;
use actix::prelude::*;
use crate::node::connector::Initialize;
use crate::feed::connector::{FeedConnector, Connected, FeedId};
use crate::util::DenseMap;
use crate::feed::{self, FeedMessageSerializer};
use crate::chain::{self, Chain, ChainId, Label, GetNodeNetworkState};
use crate::types::{NodeDetails, NodeId};
pub struct Aggregator {
labels: HashMap<Label, ChainId>,
chains: DenseMap<ChainEntry>,
feeds: DenseMap<Addr<FeedConnector>>,
serializer: FeedMessageSerializer,
}
pub struct ChainEntry {
addr: Addr<Chain>,
label: Label,
nodes: usize,
}
impl Aggregator {
pub fn new() -> Self {
Aggregator {
labels: HashMap::new(),
chains: DenseMap::new(),
feeds: DenseMap::new(),
serializer: FeedMessageSerializer::new(),
}
}
/// Get an address to the chain actor by name. If the address is not found,
/// or the address is disconnected (actor dropped), create a new one.
pub fn lazy_chain(&mut self, label: Label, ctx: &mut <Self as Actor>::Context) -> &mut ChainEntry {
let (cid, found) = self.labels
.get(&label)
.map(|&cid| (cid, true))
.unwrap_or_else(|| {
self.serializer.push(feed::AddedChain(&label, 1));
let addr = ctx.address();
let label = label.clone();
let cid = self.chains.add_with(move |cid| {
ChainEntry {
addr: Chain::new(cid, addr, label.clone()).start(),
label,
nodes: 1,
}
});
self.broadcast();
(cid, false)
});
if !found {
self.labels.insert(label, cid);
}
self.chains.get_mut(cid).expect("Entry just created above; qed")
}
fn get_chain(&mut self, label: &str) -> Option<&mut ChainEntry> {
let chains = &mut self.chains;
self.labels.get(label).and_then(move |&cid| chains.get_mut(cid))
}
fn broadcast(&mut self) {
if let Some(msg) = self.serializer.finalize() {
for (_, feed) in self.feeds.iter() {
feed.do_send(msg.clone());
}
}
}
}
impl Actor for Aggregator {
type Context = Context<Self>;
}
/// Message sent from the NodeConnector to the Aggregator upon getting all node details
#[derive(Message)]
pub struct AddNode {
pub node: NodeDetails,
pub chain: Label,
pub rec: Recipient<Initialize>,
}
/// Message sent from the Chain to the Aggregator when the Chain loses all nodes
#[derive(Message)]
pub struct DropChain(pub Label);
/// Message sent from the FeedConnector to the Aggregator when subscribing to a new chain
pub struct Subscribe {
pub chain: Label,
pub feed: Addr<FeedConnector>,
}
impl Message for Subscribe {
type Result = bool;
}
/// Message sent from the FeedConnector to the Aggregator when first connected
#[derive(Message)]
pub struct Connect(pub Addr<FeedConnector>);
/// Message sent from the FeedConnector to the Aggregator when disconnecting
#[derive(Message)]
pub struct Disconnect(pub FeedId);
/// Message sent from the Chain to the Aggergator when the node count on the chain changes
#[derive(Message)]
pub struct NodeCount(pub ChainId, pub usize);
/// Message sent to the Aggregator to get the network state of a particular node
pub struct GetNetworkState(pub Box<str>, pub NodeId);
impl Message for GetNetworkState {
type Result = Option<Request<Chain, GetNodeNetworkState>>;
}
impl Handler<AddNode> for Aggregator {
type Result = ();
fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) {
let AddNode { node, chain, rec } = msg;
self.lazy_chain(chain, ctx).addr.do_send(chain::AddNode {
node,
rec,
});
}
}
impl Handler<DropChain> for Aggregator {
type Result = ();
fn handle(&mut self, msg: DropChain, _: &mut Self::Context) {
let DropChain(label) = msg;
if let Some(cid) = self.labels.remove(&label) {
self.chains.remove(cid);
self.serializer.push(feed::RemovedChain(&label));
self.broadcast();
}
info!("Dropped chain [{}] from the aggregator", label);
}
}
impl Handler<Subscribe> for Aggregator {
type Result = bool;
fn handle(&mut self, msg: Subscribe, _: &mut Self::Context) -> bool {
let Subscribe { chain, feed } = msg;
if let Some(chain) = self.get_chain(&chain) {
chain.addr.do_send(chain::Subscribe(feed));
true
} else {
false
}
}
}
impl Handler<Connect> for Aggregator {
type Result = ();
fn handle(&mut self, msg: Connect, _: &mut Self::Context) {
let Connect(connector) = msg;
let fid = self.feeds.add(connector.clone());
info!("Feed #{} connected", fid);
connector.do_send(Connected(fid));
self.serializer.push(feed::Version(27));
// TODO: keep track on number of nodes connected to each chain
for (_, entry) in self.chains.iter() {
self.serializer.push(feed::AddedChain(&entry.label, entry.nodes));
}
if let Some(msg) = self.serializer.finalize() {
connector.do_send(msg);
}
}
}
impl Handler<Disconnect> for Aggregator {
type Result = ();
fn handle(&mut self, msg: Disconnect, _: &mut Self::Context) {
let Disconnect(fid) = msg;
info!("Feed #{} disconnected", fid);
self.feeds.remove(fid);
}
}
impl Handler<NodeCount> for Aggregator {
type Result = ();
fn handle(&mut self, msg: NodeCount, _: &mut Self::Context) {
let NodeCount(cid, count) = msg;
if let Some(entry) = self.chains.get_mut(cid) {
entry.nodes = count;
if count != 0 {
self.serializer.push(feed::AddedChain(&entry.label, count));
self.broadcast();
}
}
}
}
impl Handler<GetNetworkState> for Aggregator {
type Result = <GetNetworkState as Message>::Result;
fn handle(&mut self, msg: GetNetworkState, _: &mut Self::Context) -> Self::Result {
let GetNetworkState(chain, nid) = msg;
Some(self.get_chain(&*chain)?.addr.send(GetNodeNetworkState(nid)))
}
}
+371 -1
View File
@@ -1,7 +1,377 @@
use actix::prelude::*;
use std::sync::Arc;
use bytes::Bytes;
pub struct Chain;
use crate::aggregator::{Aggregator, DropChain, NodeCount};
use crate::node::{Node, connector::Initialize, message::{NodeMessage, Details}};
use crate::feed::connector::{FeedId, FeedConnector, Subscribed, Unsubscribed};
use crate::feed::{self, FeedMessageSerializer};
use crate::util::{DenseMap, NumStats, now};
use crate::types::{NodeId, NodeDetails, NodeLocation, Block, Timestamp};
const STALE_TIMEOUT: u64 = 2 * 60 * 1000; // 2 minutes
pub type ChainId = usize;
pub type Label = Arc<str>;
pub struct Chain {
cid: ChainId,
/// Who to inform if we Chain drops itself
aggregator: Addr<Aggregator>,
/// Label of this chain
label: Label,
/// Dense mapping of NodeId -> Node
nodes: DenseMap<Node>,
/// Dense mapping of FeedId -> Addr<FeedConnector>,
feeds: DenseMap<Addr<FeedConnector>>,
/// Best block
best: Block,
/// Finalized block
finalized: Block,
/// Block times history, stored so we can calculate averages
block_times: NumStats<u64>,
/// Calculated average block time
average_block_time: Option<u64>,
/// Message serializer
serializer: FeedMessageSerializer,
/// When the best block first arrived
timestamp: Option<Timestamp>,
}
impl Chain {
pub fn new(cid: ChainId, aggregator: Addr<Aggregator>, label: Label) -> Self {
info!("[{}] Created", label);
Chain {
cid,
aggregator,
label,
nodes: DenseMap::new(),
feeds: DenseMap::new(),
best: Block::zero(),
finalized: Block::zero(),
block_times: NumStats::new(50),
average_block_time: None,
serializer: FeedMessageSerializer::new(),
timestamp: None,
}
}
fn broadcast(&mut self) {
if let Some(msg) = self.serializer.finalize() {
for (_, feed) in self.feeds.iter() {
feed.do_send(msg.clone());
}
}
}
/// Triggered when the number of nodes in this chain has changed, Aggregator will
/// propagate new counts to all connected feeds
fn update_count(&self) {
self.aggregator.do_send(NodeCount(self.cid, self.nodes.len()));
}
fn update_average_block_time(&mut self, now: u64) {
if let Some(timestamp) = self.timestamp {
self.block_times.push(now - timestamp);
self.average_block_time = Some(self.block_times.average());
}
}
/// Check if the chain is stale (has not received a new best block in a while).
/// If so, find a new best block, ignoring any stale nodes and marking them as such.
fn update_stale_nodes(&mut self, now: u64) {
let threshold = now - STALE_TIMEOUT;
let timestamp = match self.timestamp {
Some(ts) => ts,
None => return,
};
if timestamp > threshold {
// Timestamp is in range, nothing to do
return;
}
let mut best = Block::zero();
let mut finalized = Block::zero();
for (nid, node) in self.nodes.iter_mut() {
if !node.update_stale(threshold) {
if node.best().height > best.height {
best = *node.best();
}
if node.finalized().height > finalized.height {
finalized = *node.finalized();
}
} else {
self.serializer.push(feed::StaleNode(nid));
}
}
if self.best.height != 0 || self.finalized.height != 0 {
self.best = best;
self.finalized = finalized;
self.block_times.reset();
self.timestamp = None;
self.serializer.push(feed::BestBlock(self.best.height, now, None));
self.serializer.push(feed::BestFinalized(finalized.height, finalized.hash));
}
}
}
impl Actor for Chain {
type Context = Context<Self>;
fn stopped(&mut self, _: &mut Self::Context) {
self.aggregator.do_send(DropChain(self.label.clone()));
for (_, feed) in self.feeds.iter() {
feed.do_send(Unsubscribed)
}
}
}
/// Message sent from the Aggregator to the Chain when new Node is connected
#[derive(Message)]
pub struct AddNode {
pub node: NodeDetails,
pub rec: Recipient<Initialize>,
}
/// Message sent from the NodeConnector to the Chain when it receives new telemetry data
#[derive(Message)]
pub struct UpdateNode {
pub nid: NodeId,
pub msg: NodeMessage,
pub raw: Option<Bytes>,
}
/// Message sent from the NodeConnector to the Chain when the connector disconnects
#[derive(Message)]
pub struct RemoveNode(pub NodeId);
/// Message sent from the Aggregator to the Chain when the connector wants to subscribe to that chain
#[derive(Message)]
pub struct Subscribe(pub Addr<FeedConnector>);
/// Message sent from the FeedConnector before it subscribes to a new chain, or if it disconnects
#[derive(Message)]
pub struct Unsubscribe(pub FeedId);
/// Message sent from the NodeConnector to the Chain when it receives location data
#[derive(Message)]
pub struct LocateNode {
pub nid: NodeId,
pub location: Arc<NodeLocation>,
}
pub struct GetNodeNetworkState(pub NodeId);
impl Message for GetNodeNetworkState {
type Result = Option<Bytes>;
}
impl Handler<AddNode> for Chain {
type Result = ();
fn handle(&mut self, msg: AddNode, ctx: &mut Self::Context) {
let nid = self.nodes.add(Node::new(msg.node));
if let Err(_) = msg.rec.do_send(Initialize(nid, ctx.address())) {
self.nodes.remove(nid);
} else if let Some(node) = self.nodes.get(nid) {
self.serializer.push(feed::AddedNode(
nid,
node.details(),
node.stats(),
node.hardware(),
node.block_details(),
node.location(),
));
self.broadcast();
}
self.update_count();
}
}
impl Handler<UpdateNode> for Chain {
type Result = ();
fn handle(&mut self, msg: UpdateNode, _: &mut Self::Context) {
let UpdateNode { nid, msg, raw } = msg;
if let Some(block) = msg.details.best_block() {
let mut propagation_time = 0;
let now = now();
self.update_stale_nodes(now);
if block.height > self.best.height {
self.best = *block;
info!(
"[{}] [{}/{}] new best block ({}) {:?}",
self.label,
self.nodes.len(),
self.feeds.len(),
self.best.height,
self.best.hash,
);
self.update_average_block_time(now);
self.timestamp = Some(now);
self.serializer.push(feed::BestBlock(self.best.height, now, self.average_block_time));
} else if block.height == self.best.height {
if let Some(timestamp) = self.timestamp {
propagation_time = now - timestamp;
}
}
if let Some(node) = self.nodes.get_mut(nid) {
if let Some(details) = node.update_block(*block, now, propagation_time) {
self.serializer.push(feed::ImportedBlock(nid, details));
}
}
}
if let Some(node) = self.nodes.get_mut(nid) {
match msg.details {
Details::SystemInterval(ref interval) => {
if interval.network_state.is_some() {
if let Some(raw) = raw {
node.set_network_state(raw);
}
}
if node.update_hardware(interval) {
self.serializer.push(feed::Hardware(nid, node.hardware()));
}
if let Some(stats) = node.update_stats(interval) {
self.serializer.push(feed::NodeStatsUpdate(nid, stats));
}
}
Details::SystemNetworkState(_) => {
if let Some(raw) = raw {
node.set_network_state(raw);
}
}
_ => (),
}
if let Some(block) = msg.details.finalized_block() {
if let Some(finalized) = node.update_finalized(block) {
self.serializer.push(feed::FinalizedBlock(nid, finalized.height, finalized.hash));
if finalized.height > self.finalized.height {
self.finalized = *finalized;
self.serializer.push(feed::BestFinalized(finalized.height, finalized.hash));
}
}
}
}
self.broadcast();
}
}
impl Handler<LocateNode> for Chain {
type Result = ();
fn handle(&mut self, msg: LocateNode, _: &mut Self::Context) {
let LocateNode { nid, location } = msg;
if let Some(node) = self.nodes.get_mut(nid) {
self.serializer.push(feed::LocatedNode(nid, location.latitude, location.longitude, &location.city));
node.update_location(location);
}
}
}
impl Handler<RemoveNode> for Chain {
type Result = ();
fn handle(&mut self, msg: RemoveNode, ctx: &mut Self::Context) {
let RemoveNode(nid) = msg;
self.nodes.remove(nid);
if self.nodes.is_empty() {
info!("[{}] Lost all nodes, dropping...", self.label);
ctx.stop();
}
self.serializer.push(feed::RemovedNode(nid));
self.broadcast();
self.update_count();
}
}
impl Handler<Subscribe> for Chain {
type Result = ();
fn handle(&mut self, msg: Subscribe, ctx: &mut Self::Context) {
let Subscribe(feed) = msg;
let fid = self.feeds.add(feed.clone());
feed.do_send(Subscribed(fid, ctx.address().recipient()));
self.serializer.push(feed::SubscribedTo(&self.label));
self.serializer.push(feed::TimeSync(now()));
self.serializer.push(feed::BestBlock(
self.best.height,
self.timestamp.unwrap_or_else(|| 0),
self.average_block_time,
));
self.serializer.push(feed::BestFinalized(self.finalized.height, self.finalized.hash));
for (nid, node) in self.nodes.iter() {
self.serializer.push(feed::AddedNode(
nid,
node.details(),
node.stats(),
node.hardware(),
node.block_details(),
node.location(),
));
self.serializer.push(feed::FinalizedBlock(nid, node.finalized().height, node.finalized().hash));
if node.stale() {
self.serializer.push(feed::StaleNode(nid));
}
}
if let Some(serialized) = self.serializer.finalize() {
feed.do_send(serialized);
}
}
}
impl Handler<Unsubscribe> for Chain {
type Result = ();
fn handle(&mut self, msg: Unsubscribe, _: &mut Self::Context) {
let Unsubscribe(fid) = msg;
if let Some(feed) = self.feeds.get(fid) {
self.serializer.push(feed::UnsubscribedFrom(&self.label));
if let Some(serialized) = self.serializer.finalize() {
feed.do_send(serialized);
}
}
self.feeds.remove(fid);
}
}
impl Handler<GetNodeNetworkState> for Chain {
type Result = <GetNodeNetworkState as Message>::Result;
fn handle(&mut self, msg: GetNodeNetworkState, _: &mut Self::Context) -> Self::Result {
let GetNodeNetworkState(nid) = msg;
self.nodes.get(nid)?.network_state()
}
}
+140
View File
@@ -0,0 +1,140 @@
use serde::Serialize;
use serde_json::to_writer;
use crate::types::{
NodeId, NodeDetails, NodeStats, NodeHardware, NodeLocation,
BlockNumber, BlockHash, BlockDetails, Timestamp,
};
pub mod connector;
use connector::Serialized;
pub trait FeedMessage: Serialize {
const ACTION: u8;
}
pub struct FeedMessageSerializer {
/// Current buffer,
buffer: Vec<u8>,
}
impl FeedMessageSerializer {
pub fn new() -> Self {
Self {
buffer: Vec::new(),
}
}
pub fn clear(&mut self) {
self.buffer.clear();
}
pub fn push<Message>(&mut self, msg: Message)
where
Message: FeedMessage,
{
let glue = match self.buffer.len() {
0 => b'[',
_ => b',',
};
self.buffer.push(glue);
let _ = to_writer(&mut self.buffer, &Message::ACTION);
self.buffer.push(b',');
let _ = to_writer(&mut self.buffer, &msg);
}
pub fn finalize(&mut self) -> Option<Serialized> {
if self.buffer.len() == 0 {
return None;
}
self.buffer.push(b']');
let bytes = self.buffer[..].into();
self.clear();
Some(Serialized(bytes))
}
}
macro_rules! actions {
($($action:literal: $t:ty,)*) => {
$(
impl FeedMessage for $t {
const ACTION: u8 = $action;
}
)*
}
}
actions! {
0x00: Version,
0x01: BestBlock,
0x02: BestFinalized,
0x03: AddedNode<'_>,
0x04: RemovedNode,
0x05: LocatedNode<'_>,
0x06: ImportedBlock<'_>,
0x07: FinalizedBlock,
0x08: NodeStatsUpdate<'_>,
0x09: Hardware<'_>,
0x0A: TimeSync,
0x0B: AddedChain<'_>,
0x0C: RemovedChain<'_>,
0x0D: SubscribedTo<'_>,
0x0E: UnsubscribedFrom<'_>,
0x0F: Pong<'_>,
0x14: StaleNode,
}
#[derive(Serialize)]
pub struct Version(pub usize);
#[derive(Serialize)]
pub struct BestBlock(pub BlockNumber, pub Timestamp, pub Option<u64>);
#[derive(Serialize)]
pub struct BestFinalized(pub BlockNumber, pub BlockHash);
#[derive(Serialize)]
pub struct AddedNode<'a>(pub NodeId, pub &'a NodeDetails, pub &'a NodeStats, pub NodeHardware<'a>,
pub &'a BlockDetails, pub Option<&'a NodeLocation>);
#[derive(Serialize)]
pub struct RemovedNode(pub NodeId);
#[derive(Serialize)]
pub struct LocatedNode<'a>(pub NodeId, pub f32, pub f32, pub &'a str);
#[derive(Serialize)]
pub struct ImportedBlock<'a>(pub NodeId, pub &'a BlockDetails);
#[derive(Serialize)]
pub struct FinalizedBlock(pub NodeId, pub BlockNumber, pub BlockHash);
#[derive(Serialize)]
pub struct NodeStatsUpdate<'a>(pub NodeId, pub &'a NodeStats);
#[derive(Serialize)]
pub struct Hardware<'a>(pub NodeId, pub NodeHardware<'a>);
#[derive(Serialize)]
pub struct TimeSync(pub u64);
#[derive(Serialize)]
pub struct AddedChain<'a>(pub &'a str, pub usize);
#[derive(Serialize)]
pub struct RemovedChain<'a>(pub &'a str);
#[derive(Serialize)]
pub struct SubscribedTo<'a>(pub &'a str);
#[derive(Serialize)]
pub struct UnsubscribedFrom<'a>(pub &'a str);
#[derive(Serialize)]
pub struct Pong<'a>(pub &'a str);
#[derive(Serialize)]
pub struct StaleNode(pub NodeId);
+195
View File
@@ -0,0 +1,195 @@
use std::time::{Duration, Instant};
use bytes::Bytes;
use actix::prelude::*;
use actix_web_actors::ws;
use crate::aggregator::{Aggregator, Connect, Disconnect, Subscribe};
use crate::chain::Unsubscribe;
use crate::feed::{FeedMessageSerializer, Pong};
use crate::util::fnv;
pub type FeedId = usize;
/// How often heartbeat pings are sent
const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20);
/// How long before lack of client response causes a timeout
const CLIENT_TIMEOUT: Duration = Duration::from_secs(60);
pub struct FeedConnector {
/// FeedId that Aggregator holds of this actor
fid_aggregator: FeedId,
/// FeedId that Chain holds of this actor
fid_chain: FeedId,
/// Client must send ping at least once per 10 seconds (CLIENT_TIMEOUT),
hb: Instant,
/// Aggregator actor address
aggregator: Addr<Aggregator>,
/// Chain actor address
chain: Option<Recipient<Unsubscribe>>,
/// FNV hash of the chain label, optimization to avoid double-subscribing
chain_hash: u64,
/// Message serializer
serializer: FeedMessageSerializer,
}
impl Actor for FeedConnector {
type Context = ws::WebsocketContext<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
self.heartbeat(ctx);
self.aggregator.do_send(Connect(ctx.address()));
}
fn stopped(&mut self, _: &mut Self::Context) {
if let Some(chain) = self.chain.take() {
let _ = chain.do_send(Unsubscribe(self.fid_chain));
}
self.aggregator.do_send(Disconnect(self.fid_aggregator));
}
}
impl FeedConnector {
pub fn new(aggregator: Addr<Aggregator>) -> Self {
Self {
// Garbage id, will be replaced by the Connected message
fid_aggregator: !0,
// Garbage id, will be replaced by the Subscribed message
fid_chain: !0,
hb: Instant::now(),
aggregator,
chain: None,
chain_hash: 0,
serializer: FeedMessageSerializer::new(),
}
}
fn heartbeat(&self, ctx: &mut <Self as Actor>::Context) {
ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| {
// check client heartbeats
if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT {
// stop actor
ctx.stop();
} else {
ctx.ping("")
}
});
}
fn handle_cmd(&mut self, cmd: &str, payload: &str, ctx: &mut <Self as Actor>::Context) {
match cmd {
"subscribe" => {
match fnv(payload) {
hash if hash == self.chain_hash => return,
hash => self.chain_hash = hash,
}
self.aggregator.send(Subscribe {
chain: payload.into(),
feed: ctx.address(),
})
.into_actor(self)
.then(|res, actor, _| {
match res {
Ok(true) => (),
// Chain not found, reset hash
_ => actor.chain_hash = 0,
}
fut::ok(())
})
.wait(ctx);
}
"ping" => {
self.serializer.push(Pong(payload));
if let Some(serialized) = self.serializer.finalize() {
ctx.binary(serialized.0);
}
}
_ => (),
}
}
}
/// Message sent form Chain to the FeedConnector upon successful subscription
#[derive(Message)]
pub struct Subscribed(pub FeedId, pub Recipient<Unsubscribe>);
#[derive(Message)]
pub struct Unsubscribed;
/// Message sent from Aggregator to FeedConnector upon successful connection
#[derive(Message)]
pub struct Connected(pub FeedId);
/// Message sent from either Aggregator or Chain to FeedConnector containing
/// serialized message(s) for the frontend
///
/// Since Bytes is ARC'ed, this is cheap to clone
#[derive(Message, Clone)]
pub struct Serialized(pub Bytes);
impl StreamHandler<ws::Message, ws::ProtocolError> for FeedConnector {
fn handle(&mut self, msg: ws::Message, ctx: &mut Self::Context) {
match msg {
ws::Message::Ping(msg) => {
self.hb = Instant::now();
ctx.pong(&msg);
}
ws::Message::Pong(_) => self.hb = Instant::now(),
ws::Message::Text(text) => {
if let Some(idx) = text.find(':') {
let cmd = &text[..idx];
let payload = &text[idx+1..];
self.handle_cmd(cmd, payload, ctx);
}
}
ws::Message::Close(_) => ctx.stop(),
_ => (),
}
}
}
impl Handler<Subscribed> for FeedConnector {
type Result = ();
fn handle(&mut self, msg: Subscribed, _: &mut Self::Context) {
let Subscribed(fid_chain, chain) = msg;
if let Some(current) = self.chain.take() {
let _ = current.do_send(Unsubscribe(self.fid_chain));
}
self.fid_chain = fid_chain;
self.chain = Some(chain);
}
}
impl Handler<Unsubscribed> for FeedConnector {
type Result = ();
fn handle(&mut self, _: Unsubscribed, _: &mut Self::Context) {
self.chain = None;
self.chain_hash = 0;
}
}
impl Handler<Connected> for FeedConnector {
type Result = ();
fn handle(&mut self, msg: Connected, _: &mut Self::Context) {
let Connected(fid_aggregator) = msg;
self.fid_aggregator = fid_aggregator;
}
}
impl Handler<Serialized> for FeedConnector {
type Result = ();
fn handle(&mut self, msg: Serialized, ctx: &mut Self::Context) {
let Serialized(bytes) = msg;
ctx.binary(bytes);
}
}
+76 -11
View File
@@ -1,39 +1,104 @@
#[macro_use]
extern crate log;
use std::net::Ipv4Addr;
use actix::prelude::*;
use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer, Error};
use actix_web_actors::ws;
use actix_http::ws::Codec;
mod node_connector;
mod node_message;
mod types;
mod aggregator;
mod chain;
mod node;
mod feed;
mod util;
use node_connector::NodeConnector;
use chain::Chain;
use node::connector::NodeConnector;
use feed::connector::FeedConnector;
use aggregator::{Aggregator, GetNetworkState};
use util::{Locator, LocatorFactory};
use types::NodeId;
/// Entry point for connecting nodes
fn node_route(
req: HttpRequest,
stream: web::Payload,
chain: web::Data<Addr<Chain>>,
aggregator: web::Data<Addr<Aggregator>>,
locator: web::Data<Addr<Locator>>,
) -> Result<HttpResponse, Error> {
println!("Connection!");
let ip = req.connection_info().remote().and_then(|mut addr| {
if let Some(port_idx) = addr.find(":") {
addr = &addr[..port_idx];
}
addr.parse::<Ipv4Addr>().ok()
});
let mut res = ws::handshake(&req)?;
let aggregator = aggregator.get_ref().clone();
let locator = locator.get_ref().clone().recipient();
Ok(res.streaming(ws::WebsocketContext::with_codec(
NodeConnector::new(aggregator, locator, ip),
stream,
Codec::new().max_size(10 * 1024 * 1024), // 10mb frame limit
)))
}
/// Entry point for connecting feeds
fn feed_route(
req: HttpRequest,
stream: web::Payload,
aggregator: web::Data<Addr<Aggregator>>,
) -> Result<HttpResponse, Error> {
ws::start(
NodeConnector::new(chain.get_ref().clone()),
FeedConnector::new(aggregator.get_ref().clone()),
&req,
stream,
)
}
fn state_route(
path: web::Path<(Box<str>, NodeId)>,
aggregator: web::Data<Addr<Aggregator>>
) -> impl Future<Item = HttpResponse, Error = Error> {
let (chain, nid) = path.into_inner();
aggregator
.send(GetNetworkState(chain, nid))
.flatten()
.from_err()
.and_then(|data| {
match data.and_then(|nested| nested) {
Some(body) => HttpResponse::Ok().content_type("application/json").body(body),
None => HttpResponse::Ok().body("Node has disconnected or has not submitted its network state yet"),
}
})
}
fn main() -> std::io::Result<()> {
use web::{resource, get};
simple_logger::init_with_level(log::Level::Info).expect("Must be able to start a logger");
let sys = System::new("substrate-telemetry");
let chain = Chain.start();
let aggregator = Aggregator::new().start();
let factory = LocatorFactory::new();
let locator = SyncArbiter::start(4, move || factory.create());
HttpServer::new(move || {
App::new()
.data(chain.clone())
.service(web::resource("/submit").route(web::get().to(node_route)))
.data(aggregator.clone())
.data(locator.clone())
.service(resource("/submit").route(get().to(node_route)))
.service(resource("/submit/").route(get().to(node_route)))
.service(resource("/feed").route(get().to(feed_route)))
.service(resource("/feed/").route(get().to(feed_route)))
.service(resource("/network_state/{chain}/{nid}").route(get().to_async(state_route)))
.service(resource("/network_state/{chain}/{nid}/").route(get().to_async(state_route)))
})
.bind("127.0.0.1:8080")?
.bind("0.0.0.0:8000")?
.start();
sys.run()
+207
View File
@@ -0,0 +1,207 @@
use bytes::Bytes;
use std::sync::Arc;
use crate::types::{NodeId, NodeDetails, NodeStats, NodeHardware, NodeLocation, BlockDetails, Block};
use crate::util::{MeanList, now};
pub mod message;
pub mod connector;
use message::SystemInterval;
/// Minimum time between block below broadcasting updates to the browser gets throttled, in ms.
const THROTTLE_THRESHOLD: u64 = 100;
/// Minimum time of intervals for block updates sent to the browser when throttled, in ms.
const THROTTLE_INTERVAL: u64 = 1000;
pub struct Node {
/// Static details
details: NodeDetails,
/// Basic stats
stats: NodeStats,
/// Best block
best: BlockDetails,
/// Finalized block
finalized: Block,
/// Timer for throttling block updates
throttle: u64,
/// CPU use means
cpu: MeanList<f32>,
/// Memory use means
memory: MeanList<f32>,
/// Upload uses means
upload: MeanList<f64>,
/// Download uses means
download: MeanList<f64>,
/// Stampchange uses means
chart_stamps: MeanList<f64>,
/// Physical location details
location: Option<Arc<NodeLocation>>,
/// Flag marking if the node is stale (not syncing or producing blocks)
stale: bool,
/// Network state
pub network_state: Option<Bytes>,
}
impl Node {
pub fn new(details: NodeDetails) -> Self {
Node {
details,
stats: NodeStats {
txcount: 0,
peers: 0,
},
best: BlockDetails {
block: Block::zero(),
block_timestamp: now(),
block_time: 0,
propagation_time: 0,
},
finalized: Block::zero(),
throttle: 0,
cpu: MeanList::new(),
memory: MeanList::new(),
upload: MeanList::new(),
download: MeanList::new(),
chart_stamps: MeanList::new(),
location: None,
stale: false,
network_state: None,
}
}
pub fn details(&self) -> &NodeDetails {
&self.details
}
pub fn stats(&self) -> &NodeStats {
&self.stats
}
pub fn best(&self) -> &Block {
&self.best.block
}
pub fn finalized(&self) -> &Block {
&self.finalized
}
pub fn hardware(&self) -> NodeHardware {
(
self.memory.slice(),
self.cpu.slice(),
self.upload.slice(),
self.download.slice(),
self.chart_stamps.slice(),
)
}
pub fn location(&self) -> Option<&NodeLocation> {
match self.location {
Some(ref location) => Some(&**location),
None => None
}
}
pub fn update_location(&mut self, location: Arc<NodeLocation>) {
self.location = Some(location);
}
pub fn block_details(&self) -> &BlockDetails {
&self.best
}
pub fn update_block(&mut self, block: Block, timestamp: u64, propagation_time: u64) -> Option<&BlockDetails> {
if block.height > self.best.block.height {
self.stale = false;
self.best.block = block;
self.best.block_time = timestamp - self.best.block_timestamp;
self.best.block_timestamp = timestamp;
self.best.propagation_time = propagation_time;
if self.throttle < timestamp {
if self.best.block_time <= THROTTLE_THRESHOLD {
self.throttle = timestamp + THROTTLE_INTERVAL;
}
return Some(&self.best);
}
}
None
}
pub fn update_hardware(&mut self, interval: &SystemInterval) -> bool {
let mut changed = false;
self.stats = interval.stats;
if let Some(cpu) = interval.cpu {
changed |= self.cpu.push(cpu);
}
if let Some(memory) = interval.memory {
changed |= self.memory.push(memory);
}
if let Some(upload) = interval.bandwidth_upload {
changed |= self.upload.push(upload);
}
if let Some(download) = interval.bandwidth_download {
changed |= self.download.push(download);
}
self.chart_stamps.push(now() as f64);
changed
}
pub fn update_stats(&mut self, interval: &SystemInterval) -> Option<&NodeStats> {
if self.stats != interval.stats {
self.stats = interval.stats;
Some(&self.stats)
} else {
None
}
}
pub fn update_finalized(&mut self, block: Block) -> Option<&Block> {
if block.height > self.finalized.height {
self.finalized = block;
Some(self.finalized())
} else {
None
}
}
pub fn update_stale(&mut self, threshold: u64) -> bool {
if self.best.block_timestamp < threshold {
self.stale = true;
}
self.stale
}
pub fn stale(&self) -> bool {
self.stale
}
pub fn set_network_state(&mut self, state: Bytes) {
self.network_state = Some(state);
}
pub fn network_state(&self) -> Option<Bytes> {
use serde::Deserialize;
use serde_json::value::RawValue;
#[derive(Deserialize)]
struct Wrapper<'a> {
#[serde(borrow)]
state: Option<&'a RawValue>,
#[serde(borrow)]
network_state: Option<&'a RawValue>,
}
let raw = self.network_state.as_ref()?;
let wrap: Wrapper = serde_json::from_slice(raw).ok()?;
let state = wrap.state.or(wrap.network_state)?;
Some(state.get().into())
}
}
+151
View File
@@ -0,0 +1,151 @@
use std::time::{Duration, Instant};
use std::net::Ipv4Addr;
use bytes::Bytes;
use actix::prelude::*;
use actix_web_actors::ws;
use crate::aggregator::{Aggregator, AddNode};
use crate::chain::{Chain, UpdateNode, RemoveNode};
use crate::node::NodeId;
use crate::node::message::{NodeMessage, Details, SystemConnected};
use crate::util::LocateRequest;
/// How often heartbeat pings are sent
const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(20);
/// How long before lack of client response causes a timeout
const CLIENT_TIMEOUT: Duration = Duration::from_secs(60);
pub struct NodeConnector {
/// Id of the node this connector is responsible for handling
nid: NodeId,
/// Client must send ping at least once per 10 seconds (CLIENT_TIMEOUT),
hb: Instant,
/// Aggregator actor address
aggregator: Addr<Aggregator>,
/// Chain actor address
chain: Option<Addr<Chain>>,
/// Backlog of messages to be sent once we get a recipient handle to the chain
backlog: Vec<NodeMessage>,
/// IP address of the node this connector is responsible for
ip: Option<Ipv4Addr>,
/// Actix address of location services
locator: Recipient<LocateRequest>,
}
impl Actor for NodeConnector {
type Context = ws::WebsocketContext<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
self.heartbeat(ctx);
}
fn stopped(&mut self, _: &mut Self::Context) {
if let Some(chain) = self.chain.as_ref() {
chain.do_send(RemoveNode(self.nid));
}
}
}
impl NodeConnector {
pub fn new(aggregator: Addr<Aggregator>, locator: Recipient<LocateRequest>, ip: Option<Ipv4Addr>) -> Self {
Self {
// Garbage id, will be replaced by the Initialize message
nid: !0,
hb: Instant::now(),
aggregator,
chain: None,
backlog: Vec::new(),
ip,
locator,
}
}
fn heartbeat(&self, ctx: &mut <Self as Actor>::Context) {
ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| {
// check client heartbeats
if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT {
// stop actor
ctx.stop();
}
});
}
fn handle_message(&mut self, msg: NodeMessage, data: Bytes, ctx: &mut <Self as Actor>::Context) {
if let Some(chain) = self.chain.as_ref() {
chain.do_send(UpdateNode {
nid: self.nid,
msg,
raw: Some(data)
});
return;
}
if let Details::SystemConnected(connected) = msg.details {
let SystemConnected { chain, node } = connected;
let rec = ctx.address().recipient();
let chain = chain.into();
self.aggregator.do_send(AddNode { rec, chain, node });
} else {
if self.backlog.len() >= 10 {
self.backlog.remove(0);
}
self.backlog.push(msg);
}
}
}
#[derive(Message)]
pub struct Initialize(pub NodeId, pub Addr<Chain>);
impl Handler<Initialize> for NodeConnector {
type Result = ();
fn handle(&mut self, msg: Initialize, _: &mut Self::Context) {
let Initialize(nid, chain) = msg;
let backlog = std::mem::replace(&mut self.backlog, Vec::new());
for msg in backlog {
chain.do_send(UpdateNode { nid, msg, raw: None });
}
self.nid = nid;
self.chain = Some(chain.clone());
// Acquire the node's physical location
if let Some(ip) = self.ip {
let _ = self.locator.do_send(LocateRequest { ip, nid, chain });
}
}
}
impl StreamHandler<ws::Message, ws::ProtocolError> for NodeConnector {
fn handle(&mut self, msg: ws::Message, ctx: &mut Self::Context) {
self.hb = Instant::now();
let data = match msg {
ws::Message::Ping(msg) => {
ctx.pong(&msg);
return;
}
ws::Message::Pong(_) => return,
ws::Message::Text(text) => text.into(),
ws::Message::Binary(data) => data,
ws::Message::Close(_) => {
ctx.stop();
return;
}
ws::Message::Nop => return,
};
match serde_json::from_slice(&data) {
Ok(msg) => self.handle_message(msg, data, ctx),
Err(err) => {
let data: &[u8] = data.get(..256).unwrap_or_else(|| &data);
warn!("Failed to parse node message: {} {}", err, std::str::from_utf8(data).unwrap_or_else(|_| "INVALID UTF8"))
},
}
}
}
+120
View File
@@ -0,0 +1,120 @@
use actix::prelude::*;
use chrono::{DateTime, Utc};
use serde::Deserialize;
use serde::de::IgnoredAny;
use crate::node::{NodeDetails, NodeStats};
use crate::types::{Block, BlockNumber, BlockHash};
#[derive(Deserialize, Debug, Message)]
pub struct NodeMessage {
pub level: Level,
pub ts: DateTime<Utc>,
#[serde(flatten)]
pub details: Details,
}
#[derive(Deserialize, Debug)]
pub enum Level {
#[serde(rename = "INFO")]
Info,
}
#[derive(Deserialize, Debug)]
#[serde(tag = "msg")]
pub enum Details {
#[serde(rename = "node.start")]
NodeStart(Block),
#[serde(rename = "system.connected")]
SystemConnected(SystemConnected),
#[serde(rename = "system.interval")]
SystemInterval(SystemInterval),
#[serde(rename = "system.network_state")]
SystemNetworkState(IgnoredAny),
#[serde(rename = "block.import")]
BlockImport(Block),
#[serde(rename = "notify.finalized")]
NotifyFinalized(Finalized),
#[serde(rename = "txpool.import")]
TxPoolImport(IgnoredAny),
#[serde(rename = "afg.finalized")]
AfgFinalized(IgnoredAny),
#[serde(rename = "afg.received_precommit")]
AfgReceivedPrecommit(IgnoredAny),
#[serde(rename = "afg.received_prevote")]
AfgReceivedPrevote(IgnoredAny),
#[serde(rename = "afg.received_commit")]
AfgReceivedCommit(IgnoredAny),
#[serde(rename = "afg.authority_set")]
AfgAuthoritySet(IgnoredAny),
#[serde(rename = "aura.pre_sealed_block")]
AuraPreSealedBlock(IgnoredAny),
#[serde(rename = "prepared_block_for_proposing")]
PreparedBlockForProposing(IgnoredAny),
}
#[derive(Deserialize, Debug)]
pub struct SystemConnected {
pub chain: Box<str>,
#[serde(flatten)]
pub node: NodeDetails,
}
#[derive(Deserialize, Debug)]
pub struct SystemInterval {
#[serde(flatten)]
pub stats: NodeStats,
pub memory: Option<f32>,
pub cpu: Option<f32>,
pub bandwidth_upload: Option<f64>,
pub bandwidth_download: Option<f64>,
pub finalized_height: Option<BlockNumber>,
pub finalized_hash: Option<BlockHash>,
#[serde(flatten)]
pub block: Block,
pub network_state: Option<IgnoredAny>,
}
#[derive(Deserialize, Debug)]
pub struct Finalized {
#[serde(rename = "best")]
pub hash: BlockHash,
pub height: Box<str>,
}
impl Block {
pub fn zero() -> Self {
Block {
hash: BlockHash::from([0; 32]),
height: 0,
}
}
}
impl Details {
pub fn best_block(&self) -> Option<&Block> {
match self {
Details::BlockImport(block) | Details::SystemInterval(SystemInterval { block, .. }) => {
Some(block)
}
_ => None,
}
}
pub fn finalized_block(&self) -> Option<Block> {
match self {
Details::SystemInterval(ref interval) => {
Some(Block {
hash: interval.finalized_hash?,
height: interval.finalized_height?,
})
},
Details::NotifyFinalized(ref finalized) => {
Some(Block {
hash: finalized.hash,
height: finalized.height.parse().ok()?
})
},
_ => None
}
}
}
-84
View File
@@ -1,84 +0,0 @@
use std::time::{Duration, Instant};
use actix::prelude::*;
use actix_web_actors::ws;
use crate::chain::Chain;
use crate::node_message::NodeMessage;
/// How often heartbeat pings are sent
const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5);
/// How long before lack of client response causes a timeout
const CLIENT_TIMEOUT: Duration = Duration::from_secs(10);
pub struct NodeConnector {
/// Client must send ping at least once per 10 seconds (CLIENT_TIMEOUT),
hb: Instant,
/// Chain actor address
addr: Addr<Chain>,
}
impl Actor for NodeConnector {
type Context = ws::WebsocketContext<Self>;
/// Method is called on actor start. We start the heartbeat process here.
fn started(&mut self, ctx: &mut Self::Context) {
self.hb(ctx);
}
}
impl NodeConnector {
pub fn new(addr: Addr<Chain>) -> Self {
Self {
hb: Instant::now(),
addr,
}
}
/// Send ping every 5 seconds
fn hb(&self, ctx: &mut <Self as Actor>::Context) {
ctx.run_interval(HEARTBEAT_INTERVAL, |act, ctx| {
// check client heartbeats
if Instant::now().duration_since(act.hb) > CLIENT_TIMEOUT {
// heartbeat timed out
println!("NodeConnector timeout!");
// stop actor
ctx.stop();
} else {
ctx.ping("")
}
});
}
}
/// Handler for `ws::Message`
impl StreamHandler<ws::Message, ws::ProtocolError> for NodeConnector {
fn handle(&mut self, msg: ws::Message, ctx: &mut Self::Context) {
// process websocket messages
match msg {
ws::Message::Ping(msg) => {
self.hb = Instant::now();
ctx.pong(&msg);
}
ws::Message::Pong(_) => {
self.hb = Instant::now();
}
ws::Message::Text(text) => {
match serde_json::from_str::<NodeMessage>(&text) {
Ok(msg) => println!("GOT\t{:?}\nFROM:\t{}\n", msg, text),
_ => (),
// Err(err) => println!("\t{:?}\n\t{}", err, text),
}
// ctx.text(test); // echo
}
ws::Message::Binary(bin) => {
println!("Binary message: {} bytes", bin.len());
// ctx.binary(bin); // echo
}
ws::Message::Close(_) => {
ctx.stop();
}
ws::Message::Nop => (),
}
}
}
-61
View File
@@ -1,61 +0,0 @@
use chrono::{DateTime, Utc};
use serde::Deserialize;
pub use primitive_types::H256 as BlockHash;
pub type BlockNumber = u64;
#[derive(Deserialize, Debug)]
pub struct NodeMessage {
level: Level,
ts: DateTime<Utc>,
#[serde(flatten)]
details: Details,
}
#[derive(Deserialize, Debug)]
pub enum Level {
#[serde(rename = "INFO")]
Info,
}
#[derive(Deserialize, Debug)]
#[serde(tag = "msg")]
pub enum Details {
#[serde(rename = "node.start")]
NodeStart(BestBlock),
#[serde(rename = "system.connected")]
SystemConnected(SystemConnected),
#[serde(rename = "system.interval")]
SystemInterval(SystemInterval),
#[serde(rename = "block.import")]
BlockImport(BestBlock),
}
#[derive(Deserialize, Debug)]
pub struct SystemConnected {
pub name: Box<str>,
pub chain: Box<str>,
pub implementation: Box<str>,
pub version: Box<str>,
pub config: Option<Box<str>>,
}
#[derive(Deserialize, Debug)]
pub struct SystemInterval {
pub txcount: u64,
pub peers: u64,
pub memory: Option<f64>,
pub cpu: Option<f64>,
pub bandwidth_upload: Option<f64>,
pub bandwidth_download: Option<f64>,
pub finalized_height: Option<BlockNumber>,
pub finalized_hash: Option<BlockHash>,
#[serde(flatten)]
pub best_block: BestBlock,
}
#[derive(Deserialize, Debug)]
pub struct BestBlock {
pub best: BlockHash,
pub height: BlockNumber,
}
+100
View File
@@ -0,0 +1,100 @@
use serde::ser::{Serialize, Serializer, SerializeTuple};
use serde::Deserialize;
pub type NodeId = usize;
pub type BlockNumber = u64;
pub type Timestamp = u64;
pub use primitive_types::H256 as BlockHash;
#[derive(Deserialize, Debug)]
pub struct NodeDetails {
pub name: Box<str>,
pub implementation: Box<str>,
pub version: Box<str>,
}
#[derive(Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub struct NodeStats {
pub peers: u64,
pub txcount: u64,
}
#[derive(Deserialize, Debug, Clone, Copy)]
pub struct Block {
#[serde(rename = "best")]
pub hash: BlockHash,
pub height: BlockNumber,
}
#[derive(Debug, Clone, Copy)]
pub struct BlockDetails {
pub block: Block,
pub block_time: u64,
pub block_timestamp: u64,
pub propagation_time: u64,
}
pub type NodeHardware<'a> = (&'a [f32], &'a [f32], &'a [f64], &'a [f64], &'a [f64]);
#[derive(Deserialize, Debug, Clone)]
pub struct NodeLocation {
pub latitude: f32,
pub longitude: f32,
pub city: Box<str>,
}
impl Serialize for NodeDetails {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut tup = serializer.serialize_tuple(6)?;
tup.serialize_element(&self.name)?;
tup.serialize_element(&self.implementation)?;
tup.serialize_element(&self.version)?;
tup.serialize_element::<Option<String>>(&None)?; // TODO Maybe<Address>
tup.serialize_element::<Option<usize>>(&None)?; // TODO Maybe<NetworkId>
tup.serialize_element("")?; // TODO Address
tup.end()
}
}
impl Serialize for NodeStats {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut tup = serializer.serialize_tuple(2)?;
tup.serialize_element(&self.peers)?;
tup.serialize_element(&self.txcount)?;
tup.end()
}
}
impl Serialize for BlockDetails {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut tup = serializer.serialize_tuple(5)?;
tup.serialize_element(&self.block.height)?;
tup.serialize_element(&self.block.hash)?;
tup.serialize_element(&self.block_time)?;
tup.serialize_element(&self.block_timestamp)?;
tup.serialize_element(&self.propagation_time)?;
tup.end()
}
}
impl Serialize for NodeLocation {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut tup = serializer.serialize_tuple(3)?;
tup.serialize_element(&self.latitude)?;
tup.serialize_element(&self.longitude)?;
tup.serialize_element(&&*self.city)?;
tup.end()
}
}
+29
View File
@@ -0,0 +1,29 @@
mod dense_map;
mod mean_list;
mod num_stats;
mod location;
pub use mean_list::MeanList;
pub use location::{Locator, LocatorFactory, LocateRequest};
pub use dense_map::DenseMap;
pub use num_stats::NumStats;
pub fn fnv<D: AsRef<[u8]>>(data: D) -> u64 {
use fnv::FnvHasher;
use std::hash::Hasher;
let mut hasher = FnvHasher::default();
hasher.write(data.as_ref());
hasher.finish()
}
/// Returns current unix time in ms (compatible with JS Date.now())
pub fn now() -> u64 {
use std::time::SystemTime;
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("System time must be configured to be post Unix Epoch start; qed")
.as_millis() as u64
}
+78
View File
@@ -0,0 +1,78 @@
pub type Id = usize;
pub struct DenseMap<T> {
/// List of retired indexes that can be re-used
retired: Vec<Id>,
/// All items
items: Vec<Option<T>>,
}
impl<T> DenseMap<T> {
pub fn new() -> Self {
DenseMap {
retired: Vec::new(),
items: Vec::new(),
}
}
pub fn add(&mut self, item: T) -> Id {
self.add_with(|_| item)
}
pub fn add_with<F>(&mut self, f: F) -> Id
where
F: FnOnce(Id) -> T,
{
match self.retired.pop() {
Some(id) => {
self.items[id] = Some(f(id));
id
},
None => {
let id = self.items.len();
self.items.push(Some(f(id)));
id
},
}
}
pub fn get(&self, id: Id) -> Option<&T> {
self.items.get(id).and_then(|item| item.as_ref())
}
pub fn get_mut(&mut self, id: Id) -> Option<&mut T> {
self.items.get_mut(id).and_then(|item| item.as_mut())
}
pub fn remove(&mut self, id: Id) -> Option<T> {
let old = self.items.get_mut(id).and_then(|item| item.take());
if old.is_some() {
// something was actually removed, so lets add the id to
// the list of retired ids!
self.retired.push(id);
}
old
}
pub fn iter(&self) -> impl Iterator<Item = (Id, &T)> + '_ {
self.items.iter().enumerate().filter_map(|(id, item)| {
Some((id, item.as_ref()?))
})
}
pub fn iter_mut(&mut self) -> impl Iterator<Item = (Id, &mut T)> + '_ {
self.items.iter_mut().enumerate().filter_map(|(id, item)| {
Some((id, item.as_mut()?))
})
}
pub fn len(&self) -> usize {
self.items.len() - self.retired.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
+89
View File
@@ -0,0 +1,89 @@
use std::net::Ipv4Addr;
use std::sync::Arc;
use actix::prelude::*;
use rustc_hash::FxHashMap;
use parking_lot::RwLock;
use crate::chain::{Chain, LocateNode};
use crate::types::{NodeId, NodeLocation};
#[derive(Clone)]
pub struct Locator {
client: reqwest::Client,
cache: Arc<RwLock<FxHashMap<Ipv4Addr, Option<Arc<NodeLocation>>>>>,
}
pub struct LocatorFactory {
cache: Arc<RwLock<FxHashMap<Ipv4Addr, Option<Arc<NodeLocation>>>>>,
}
impl LocatorFactory {
pub fn new() -> Self {
let mut cache = FxHashMap::default();
// Default entry for localhost
cache.insert(
Ipv4Addr::new(127, 0, 0, 1),
Some(Arc::new(NodeLocation { latitude: 52.5166667, longitude: 13.4, city: "Berlin".into() })),
);
LocatorFactory {
cache: Arc::new(RwLock::new(cache)),
}
}
pub fn create(&self) -> Locator {
Locator {
client: reqwest::Client::new(),
cache: self.cache.clone(),
}
}
}
impl Actor for Locator {
type Context = SyncContext<Self>;
}
#[derive(Message)]
pub struct LocateRequest {
pub ip: Ipv4Addr,
pub nid: NodeId,
pub chain: Addr<Chain>,
}
impl Handler<LocateRequest> for Locator {
type Result = ();
fn handle(&mut self, msg: LocateRequest, _: &mut Self::Context) {
let LocateRequest { ip, nid, chain } = msg;
if let Some(item) = self.cache.read().get(&ip) {
if let Some(location) = item {
return chain.do_send(LocateNode { nid, location: location.clone() });
}
return
}
let ip_req = format!("https://ipapi.co/{}/json", ip);
let mut response = match self.client.post(&ip_req).send() {
Ok(response) => response,
Err(err) => return debug!("POST error for ip location: {:?}", err),
};
let location = match response.json::<NodeLocation>() {
Ok(location) => Some(Arc::new(location)),
Err(err) => {
debug!("JSON error for ip location: {:?}", err);
None
}
};
self.cache.write().insert(ip, location.clone());
if let Some(location) = location {
chain.do_send(LocateNode { nid, location });
}
}
}
+68
View File
@@ -0,0 +1,68 @@
use num_traits::{Float, Zero};
use std::ops::AddAssign;
pub struct MeanList<T> where T: Float + AddAssign + Zero + From<u8> {
period_sum: T,
period_count: u8,
mean_index: u8,
means: [T; 20],
ticks_per_mean: u8,
}
impl<T> MeanList<T> where T: Float + AddAssign + Zero + From<u8> {
pub fn new() -> MeanList<T> {
MeanList {
period_sum: T::zero(),
period_count: 0,
mean_index: 0,
means: [T::zero(); 20],
ticks_per_mean: 1,
}
}
pub fn slice(&self) -> &[T] {
&self.means[..usize::from(self.mean_index)]
}
pub fn push(&mut self, val: T) -> bool {
if self.mean_index == 20 && self.ticks_per_mean < 32 {
self.squash_means();
}
self.period_sum += val;
self.period_count += 1;
if self.period_count == self.ticks_per_mean {
self.push_mean();
true
} else {
false
}
}
fn push_mean(&mut self) {
let mean = self.period_sum / std::convert::From::from(self.period_count);
if self.mean_index == 20 && self.ticks_per_mean == 32 {
self.means.rotate_left(1);
self.means[19] = mean;
} else {
self.means[usize::from(self.mean_index)] = mean;
self.mean_index += 1;
}
self.period_sum = T::zero();
self.period_count = 0;
}
fn squash_means(&mut self) {
self.ticks_per_mean *= 2;
self.mean_index = 10;
for i in 0..10 {
let i2 = i * 2;
self.means[i] = (self.means[i2] + self.means[i2 + 1]) / std::convert::From::from(2)
}
}
}
+43
View File
@@ -0,0 +1,43 @@
use num_traits::{Zero, NumOps, Bounded};
use std::iter::Sum;
use std::convert::TryFrom;
/// Keep track of last N numbers pushed onto internal stack.
/// Provides means to get an average of said numbers.
pub struct NumStats<T> {
stack: Box<[T]>,
index: usize,
sum: T,
}
impl<T: NumOps + Zero + Bounded + Copy + Sum + TryFrom<usize>> NumStats<T> {
pub fn new(size: usize) -> Self {
NumStats {
stack: vec![T::zero(); size].into_boxed_slice(),
index: 0,
sum: T::zero(),
}
}
pub fn push(&mut self, val: T) {
let slot = &mut self.stack[self.index % self.stack.len()];
self.sum = self.sum - *slot + val;
*slot = val;
self.index += 1;
}
pub fn average(&self) -> T {
let cap = std::cmp::min(self.index, self.stack.len());
let cap = T::try_from(cap).unwrap_or_else(|_| T::max_value());
self.sum / cap
}
pub fn reset(&mut self) {
self.index = 0;
self.sum = T::zero();
}
}
+1 -1
View File
@@ -9,4 +9,4 @@ import * as FeedMessage from './feed';
export { Types, FeedMessage };
// Increment this if breaking changes were made to types in `feed.ts`
export const VERSION: Types.FeedVersion = 25 as Types.FeedVersion;
export const VERSION: Types.FeedVersion = 27 as Types.FeedVersion;
+15 -7
View File
@@ -11,6 +11,7 @@ import './App.css';
export default class App extends React.Component<{}, State> {
public state: State;
private chainsCache: ChainData[] = [];
private readonly settings: PersistentObject<State.Settings>;
private readonly pins: PersistentSet<Types.NodeName>;
private readonly connection: Promise<Connection>;
@@ -80,6 +81,8 @@ export default class App extends React.Component<{}, State> {
return this.state;
});
setInterval(() => this.chainsCache = [], 10000); // Wipe sorted chains cache every 10 seconds
}
public render() {
@@ -154,22 +157,27 @@ export default class App extends React.Component<{}, State> {
}
private chains(): ChainData[] {
return stable
if (this.chainsCache.length === this.state.chains.size) {
return this.chainsCache;
}
this.chainsCache = stable
.inplace(
Array.from(this.state.chains.entries()),
Array.from(this.state.chains.values()),
(a, b) => {
if (a[0] === PINNED_CHAIN) {
if (a.label === PINNED_CHAIN) {
return -1;
}
if (b[0] === PINNED_CHAIN) {
if (b.label === PINNED_CHAIN) {
return 1;
}
return b[1] - a[1];
return b.nodeCount - a.nodeCount;
}
)
.map(([label, nodeCount]) => ({ label, nodeCount }));
);
return this.chainsCache;
}
}
+17 -13
View File
@@ -1,5 +1,5 @@
import { VERSION, timestamp, FeedMessage, Types, Maybe, sleep } from '@dotstats/common';
import { State, Update, Node, UpdateBound, PINNED_CHAIN } from './state';
import { State, Update, Node, UpdateBound, ChainData, PINNED_CHAIN } from './state';
import { PersistentSet } from './persist';
import { getHashData, setHashData } from './utils';
import { AfgHandling } from './AfgHandling';
@@ -19,7 +19,7 @@ export class Connection {
private static readonly address = window.location.protocol === 'https:'
? `wss://${window.location.hostname}/feed/`
: `ws://${window.location.hostname}:8080`;
: `ws://127.0.0.1:8000/feed`;
// private static readonly address = 'wss://telemetry.polkadot.io/feed/';
@@ -234,7 +234,13 @@ export class Connection {
case Actions.AddedChain: {
const [label, nodeCount] = message.payload;
chains.set(label, nodeCount);
const chain = chains.get(label);
if (chain) {
chain.nodeCount = nodeCount;
} else {
chains.set(label, { label, nodeCount });
}
this.state = this.update({ chains });
@@ -412,23 +418,21 @@ export class Connection {
}
}
let topLabel: Maybe<Types.ChainLabel> = null;
let topCount: Types.NodeCount = 0 as Types.NodeCount;
let topChain: Maybe<ChainData> = null;
for (const [label, count] of chains.entries()) {
if (label === PINNED_CHAIN) {
topLabel = label;
for (const chain of chains.values()) {
if (chain.label === PINNED_CHAIN) {
topChain = chain;
break;
}
if (count > topCount) {
topLabel = label;
topCount = count;
if (!topChain || chain.nodeCount > topChain.nodeCount) {
topChain = chain;
}
}
if (topLabel) {
this.subscribe(topLabel);
if (topChain) {
this.subscribe(topChain.label);
}
}
@@ -135,7 +135,7 @@ function formatCPU(cpu: number, stamp: Maybe<Types.Timestamp>): string {
const URI_BASE = window.location.protocol === 'https:'
? `/network_state/`
: `http://${window.location.hostname}:8081/network_state/`;
: `http://${window.location.hostname}:8000/network_state/`;
export class Row extends React.Component<Row.Props, Row.State> {
public static readonly columns: Column[] = [
+3 -3
View File
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.0" x="0" y="0" width="131" height="131">
<rect width="131" height="131" x="0" y="0" stroke="white" stroke-width="10"/>
<rect width="131" height="40" x="0" y="45" fill="white"/>
<svg xmlns="http://www.w3.org/2000/svg" width="131" height="131" viewBox="0 0 131 131">
<rect width="131" height="131" x="0" y="0" stroke="currentColor" stroke-width="10" fill="transparent"/>
<rect width="131" height="40" x="0" y="45" fill="currentColor"/>
</svg>

Before

Width:  |  Height:  |  Size: 338 B

After

Width:  |  Height:  |  Size: 324 B

+1 -1
View File
@@ -217,7 +217,7 @@ export interface State {
blockAverage: Maybe<Types.Milliseconds>;
timeDiff: Types.Milliseconds;
subscribed: Maybe<Types.ChainLabel>;
chains: Map<Types.ChainLabel, Types.NodeCount>;
chains: Map<Types.ChainLabel, ChainData>;
nodes: SortedCollection<Types.NodeId, Node>;
settings: Readonly<State.Settings>;
pins: Readonly<Set<Types.NodeName>>;
+1 -1
View File
@@ -1 +1 @@
yarn build:all && pm2 restart all
yarn build:all && pm2 restart index