rpc: bump jsonrpsee v0.22 and fix race in rpc v2 chain_head (#3230)

Close #2992 

Breaking changes:
- rpc server grafana metric `substrate_rpc_requests_started` is removed
(not possible to implement anymore)
- rpc server grafana metric `substrate_rpc_requests_finished` is removed
(not possible to implement anymore)
- rpc server ws ping/pong not ACK:ed within 30 seconds more than three
times then the connection will be closed

Added
- rpc server grafana metric `substrate_rpc_sessions_time` is added to
get the duration for each websocket session
This commit is contained in:
Niklas Adolfsson
2024-02-14 23:18:22 +01:00
committed by GitHub
parent 7e7c488ba8
commit c7c4fe0184
53 changed files with 777 additions and 468 deletions
+85 -16
View File
@@ -22,21 +22,32 @@
pub mod middleware;
use std::{error::Error as StdError, net::SocketAddr, time::Duration};
use std::{convert::Infallible, error::Error as StdError, net::SocketAddr, time::Duration};
use http::header::HeaderValue;
use hyper::{
server::conn::AddrStream,
service::{make_service_fn, service_fn},
};
use jsonrpsee::{
server::middleware::{HostFilterLayer, ProxyGetRequestLayer},
RpcModule,
server::{
middleware::{
http::{HostFilterLayer, ProxyGetRequestLayer},
rpc::RpcServiceBuilder,
},
stop_channel, ws, PingConfig, StopHandle, TowerServiceBuilder,
},
Methods, RpcModule,
};
use tokio::net::TcpListener;
use tower::Service;
use tower_http::cors::{AllowOrigin, CorsLayer};
pub use crate::middleware::RpcMetrics;
pub use jsonrpsee::core::{
id_providers::{RandomIntegerIdProvider, RandomStringIdProvider},
traits::IdProvider,
};
pub use middleware::{MetricsLayer, RpcMetrics};
const MEGABYTE: u32 = 1024 * 1024;
@@ -92,7 +103,7 @@ pub async fn start_server<M: Send + Sync + 'static>(
let local_addr = std_listener.local_addr().ok();
let host_filter = hosts_filtering(cors.is_some(), local_addr);
let middleware = tower::ServiceBuilder::new()
let http_middleware = tower::ServiceBuilder::new()
.option_layer(host_filter)
// Proxy `GET /health` requests to internal `system_health` method.
.layer(ProxyGetRequestLayer::new("/health", "system_health")?)
@@ -103,10 +114,15 @@ pub async fn start_server<M: Send + Sync + 'static>(
.max_response_body_size(max_payload_out_mb.saturating_mul(MEGABYTE))
.max_connections(max_connections)
.max_subscriptions_per_connection(max_subs_per_conn)
.ping_interval(Duration::from_secs(30))
.set_middleware(middleware)
.enable_ws_ping(
PingConfig::new()
.ping_interval(Duration::from_secs(30))
.inactive_limit(Duration::from_secs(60))
.max_failures(3),
)
.set_http_middleware(http_middleware)
.set_message_buffer_capacity(message_buffer_capacity)
.custom_tokio_runtime(tokio_handle);
.custom_tokio_runtime(tokio_handle.clone());
if let Some(provider) = id_provider {
builder = builder.set_id_provider(provider);
@@ -114,22 +130,66 @@ pub async fn start_server<M: Send + Sync + 'static>(
builder = builder.set_id_provider(RandomStringIdProvider::new(16));
};
let rpc_api = build_rpc_api(rpc_api);
let handle = if let Some(metrics) = metrics {
let server = builder.set_logger(metrics).build_from_tcp(std_listener)?;
server.start(rpc_api)
} else {
let server = builder.build_from_tcp(std_listener)?;
server.start(rpc_api)
let (stop_handle, server_handle) = stop_channel();
let cfg = PerConnection {
methods: build_rpc_api(rpc_api).into(),
service_builder: builder.to_service_builder(),
metrics,
tokio_handle,
stop_handle: stop_handle.clone(),
};
let make_service = make_service_fn(move |_conn: &AddrStream| {
let cfg = cfg.clone();
async move {
let cfg = cfg.clone();
Ok::<_, Infallible>(service_fn(move |req| {
let PerConnection { service_builder, metrics, tokio_handle, stop_handle, methods } =
cfg.clone();
let is_websocket = ws::is_upgrade_request(&req);
let transport_label = if is_websocket { "ws" } else { "http" };
let metrics = metrics.map(|m| MetricsLayer::new(m, transport_label));
let rpc_middleware = RpcServiceBuilder::new().option_layer(metrics.clone());
let mut svc =
service_builder.set_rpc_middleware(rpc_middleware).build(methods, stop_handle);
async move {
if is_websocket {
let on_disconnect = svc.on_session_closed();
// Spawn a task to handle when the connection is closed.
tokio_handle.spawn(async move {
let now = std::time::Instant::now();
metrics.as_ref().map(|m| m.ws_connect());
on_disconnect.await;
metrics.as_ref().map(|m| m.ws_disconnect(now));
});
}
svc.call(req).await
}
}))
}
});
let server = hyper::Server::from_tcp(std_listener)?.serve(make_service);
tokio::spawn(async move {
let graceful = server.with_graceful_shutdown(async move { stop_handle.shutdown().await });
let _ = graceful.await;
});
log::info!(
"Running JSON-RPC server: addr={}, allowed origins={}",
local_addr.map_or_else(|| "unknown".to_string(), |a| a.to_string()),
format_cors(cors)
);
Ok(handle)
Ok(server_handle)
}
fn hosts_filtering(enabled: bool, addr: Option<SocketAddr>) -> Option<HostFilterLayer> {
@@ -185,3 +245,12 @@ fn format_cors(maybe_cors: Option<&Vec<String>>) -> String {
format!("{:?}", ["*"])
}
}
#[derive(Clone)]
struct PerConnection<RpcMiddleware, HttpMiddleware> {
methods: Methods,
stop_handle: StopHandle,
metrics: Option<RpcMetrics>,
tokio_handle: tokio::runtime::Handle,
service_builder: TowerServiceBuilder<RpcMiddleware, HttpMiddleware>,
}