feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit 286de54384
6841 changed files with 1848356 additions and 0 deletions
@@ -0,0 +1,44 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::arg_enums::Database;
use clap::Args;
/// Parameters for database
#[derive(Debug, Clone, PartialEq, Args)]
pub struct DatabaseParams {
/// Select database backend to use.
#[arg(long, alias = "db", value_name = "DB", ignore_case = true, value_enum)]
pub database: Option<Database>,
/// Limit the memory the database cache can use.
#[arg(long = "db-cache", value_name = "MiB")]
pub database_cache_size: Option<usize>,
}
impl DatabaseParams {
/// Database backend
pub fn database(&self) -> Option<Database> {
self.database
}
/// Limit the memory the database cache can use.
pub fn database_cache_size(&self) -> Option<usize> {
self.database_cache_size
}
}
@@ -0,0 +1,204 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{
arg_enums::{
ExecutionStrategy, WasmExecutionMethod, WasmtimeInstantiationStrategy,
DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, DEFAULT_WASM_EXECUTION_METHOD,
},
params::{DatabaseParams, PruningParams},
};
use clap::{Args, ValueEnum};
use std::path::PathBuf;
/// Parameters for block import.
#[derive(Debug, Clone, Args)]
pub struct ImportParams {
#[allow(missing_docs)]
#[clap(flatten)]
pub pruning_params: PruningParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub database_params: DatabaseParams,
/// Method for executing Wasm runtime code.
#[arg(
long = "wasm-execution",
value_name = "METHOD",
value_enum,
ignore_case = true,
default_value_t = DEFAULT_WASM_EXECUTION_METHOD,
)]
pub wasm_method: WasmExecutionMethod,
/// The WASM instantiation method to use.
///
/// Only has an effect when `wasm-execution` is set to `compiled`.
/// The copy-on-write strategies are only supported on Linux.
/// If the copy-on-write variant of a strategy is unsupported
/// the executor will fall back to the non-CoW equivalent.
/// The fastest (and the default) strategy available is `pooling-copy-on-write`.
/// The `legacy-instance-reuse` strategy is deprecated and will
/// be removed in the future. It should only be used in case of
/// issues with the default instantiation strategy.
#[arg(
long,
value_name = "STRATEGY",
default_value_t = DEFAULT_WASMTIME_INSTANTIATION_STRATEGY,
value_enum,
)]
pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy,
/// Specify the path where local WASM runtimes are stored.
///
/// These runtimes will override on-chain runtimes when the version matches.
#[arg(long, value_name = "PATH")]
pub wasm_runtime_overrides: Option<PathBuf>,
#[allow(missing_docs)]
#[clap(flatten)]
pub execution_strategies: ExecutionStrategiesParams,
/// Specify the state cache size.
///
/// Providing `0` will disable the cache.
#[arg(long, value_name = "Bytes", default_value_t = 1024 * 1024 * 1024)]
pub trie_cache_size: usize,
/// Warm up the trie cache.
///
/// No warmup if flag is not present. Using flag without value chooses non-blocking warmup.
#[arg(long, value_name = "STRATEGY", value_enum, num_args = 0..=1, default_missing_value = "non-blocking")]
pub warm_up_trie_cache: Option<TrieCacheWarmUpStrategy>,
}
/// Warmup strategy for the trie cache.
#[derive(Debug, Clone, Copy, ValueEnum)]
pub enum TrieCacheWarmUpStrategy {
/// Warm up the cache in a non-blocking way.
#[clap(name = "non-blocking")]
NonBlocking,
/// Warm up the cache in a blocking way (not recommended for production use).
///
/// When enabled, the trie cache warm-up will block the node startup until complete.
/// This is not recommended for production use as it can significantly delay node startup.
/// Only enable this option for testing or debugging purposes.
#[clap(name = "blocking")]
Blocking,
}
impl From<TrieCacheWarmUpStrategy> for sc_service::config::TrieCacheWarmUpStrategy {
fn from(strategy: TrieCacheWarmUpStrategy) -> Self {
match strategy {
TrieCacheWarmUpStrategy::NonBlocking =>
sc_service::config::TrieCacheWarmUpStrategy::NonBlocking,
TrieCacheWarmUpStrategy::Blocking =>
sc_service::config::TrieCacheWarmUpStrategy::Blocking,
}
}
}
impl ImportParams {
/// Specify the trie cache maximum size.
pub fn trie_cache_maximum_size(&self) -> Option<usize> {
if self.trie_cache_size == 0 {
None
} else {
Some(self.trie_cache_size)
}
}
/// Specify if we should warm up the trie cache.
pub fn warm_up_trie_cache(&self) -> Option<TrieCacheWarmUpStrategy> {
self.warm_up_trie_cache
}
/// Get the WASM execution method from the parameters
pub fn wasm_method(&self) -> sc_service::config::WasmExecutionMethod {
self.execution_strategies.check_usage_and_print_deprecation_warning();
crate::execution_method_from_cli(self.wasm_method, self.wasmtime_instantiation_strategy)
}
/// Enable overriding on-chain WASM with locally-stored WASM
/// by specifying the path where local WASM is stored.
pub fn wasm_runtime_overrides(&self) -> Option<PathBuf> {
self.wasm_runtime_overrides.clone()
}
}
/// Execution strategies parameters.
#[derive(Debug, Clone, Args)]
pub struct ExecutionStrategiesParams {
/// Runtime execution strategy for importing blocks during initial sync.
#[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)]
pub execution_syncing: Option<ExecutionStrategy>,
/// Runtime execution strategy for general block import (including locally authored blocks).
#[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)]
pub execution_import_block: Option<ExecutionStrategy>,
/// Runtime execution strategy for constructing blocks.
#[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)]
pub execution_block_construction: Option<ExecutionStrategy>,
/// Runtime execution strategy for offchain workers.
#[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)]
pub execution_offchain_worker: Option<ExecutionStrategy>,
/// Runtime execution strategy when not syncing, importing or constructing blocks.
#[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)]
pub execution_other: Option<ExecutionStrategy>,
/// The execution strategy that should be used by all execution contexts.
#[arg(
long,
value_name = "STRATEGY",
value_enum,
ignore_case = true,
conflicts_with_all = &[
"execution_other",
"execution_offchain_worker",
"execution_block_construction",
"execution_import_block",
"execution_syncing",
]
)]
pub execution: Option<ExecutionStrategy>,
}
impl ExecutionStrategiesParams {
/// Check if one of the parameters is still passed and print a warning if so.
fn check_usage_and_print_deprecation_warning(&self) {
for (param, name) in [
(&self.execution_syncing, "execution-syncing"),
(&self.execution_import_block, "execution-import-block"),
(&self.execution_block_construction, "execution-block-construction"),
(&self.execution_offchain_worker, "execution-offchain-worker"),
(&self.execution_other, "execution-other"),
(&self.execution, "execution"),
] {
if param.is_some() {
eprintln!(
"CLI parameter `--{name}` has no effect anymore and will be removed in the future!"
);
}
}
}
}
@@ -0,0 +1,103 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{error, error::Result};
use clap::Args;
use sc_service::config::KeystoreConfig;
use sp_core::crypto::SecretString;
use std::{
fs,
path::{Path, PathBuf},
};
/// default sub directory for the key store
const DEFAULT_KEYSTORE_CONFIG_PATH: &str = "keystore";
/// Parameters of the keystore
#[derive(Debug, Clone, Args)]
pub struct KeystoreParams {
/// Specify custom keystore path.
#[arg(long, value_name = "PATH")]
pub keystore_path: Option<PathBuf>,
/// Use interactive shell for entering the password used by the keystore.
#[arg(long, conflicts_with_all = &["password", "password_filename"])]
pub password_interactive: bool,
/// Password used by the keystore.
///
/// This allows appending an extra user-defined secret to the seed.
#[arg(
long,
value_parser = secret_string_from_str,
conflicts_with_all = &["password_interactive", "password_filename"]
)]
pub password: Option<SecretString>,
/// File that contains the password used by the keystore.
#[arg(
long,
value_name = "PATH",
conflicts_with_all = &["password_interactive", "password"]
)]
pub password_filename: Option<PathBuf>,
}
/// Parse a secret string, returning a displayable error.
pub fn secret_string_from_str(s: &str) -> std::result::Result<SecretString, String> {
std::str::FromStr::from_str(s).map_err(|_| "Could not get SecretString".to_string())
}
impl KeystoreParams {
/// Get the keystore configuration for the parameters
pub fn keystore_config(&self, config_dir: &Path) -> Result<KeystoreConfig> {
let password = if self.password_interactive {
Some(SecretString::new(input_keystore_password()?))
} else if let Some(ref file) = self.password_filename {
let password = fs::read_to_string(file).map_err(|e| format!("{}", e))?;
Some(SecretString::new(password))
} else {
self.password.clone()
};
let path = self
.keystore_path
.clone()
.unwrap_or_else(|| config_dir.join(DEFAULT_KEYSTORE_CONFIG_PATH));
Ok(KeystoreConfig::Path { path, password })
}
/// helper method to fetch password from `KeyParams` or read from stdin
pub fn read_password(&self) -> error::Result<Option<SecretString>> {
let (password_interactive, password) = (self.password_interactive, self.password.clone());
let pass = if password_interactive {
let password = rpassword::prompt_password("Key password: ")?;
Some(SecretString::new(password))
} else {
password
};
Ok(pass)
}
}
fn input_keystore_password() -> Result<String> {
rpassword::prompt_password("Keystore password: ").map_err(|e| format!("{:?}", e).into())
}
@@ -0,0 +1,120 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Params to configure how a message should be passed into a command.
use crate::error::Error;
use array_bytes::{hex2bytes, hex_bytes2hex_str};
use clap::Args;
use std::io::BufRead;
/// Params to configure how a message should be passed into a command.
#[derive(Debug, Clone, Args)]
pub struct MessageParams {
/// Message to process. Will be read from STDIN otherwise.
/// The message is assumed to be raw bytes per default. Use `--hex` for hex input. Can
/// optionally be prefixed with `0x` in the hex case.
#[arg(long)]
message: Option<String>,
/// The message is hex-encoded data.
#[arg(long)]
hex: bool,
}
impl MessageParams {
/// Produces the message by either using its immediate value or reading from stdin.
///
/// This function should only be called once and the result cached.
pub(crate) fn message_from<F, R>(&self, create_reader: F) -> Result<Vec<u8>, Error>
where
R: BufRead,
F: FnOnce() -> R,
{
let raw = match &self.message {
Some(raw) => raw.as_bytes().to_vec(),
None => {
let mut raw = vec![];
create_reader().read_to_end(&mut raw)?;
raw
},
};
if self.hex {
hex2bytes(hex_bytes2hex_str(&raw)?).map_err(Into::into)
} else {
Ok(raw)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Test that decoding an immediate message works.
#[test]
fn message_decode_immediate() {
for (name, input, hex, output) in test_closures() {
println!("Testing: immediate_{}", name);
let params = MessageParams { message: Some(input.into()), hex };
let message = params.message_from(|| std::io::stdin().lock());
match output {
Some(output) => {
let message = message.expect(&format!("{}: should decode but did not", name));
assert_eq!(message, output, "{}: decoded a wrong message", name);
},
None => {
message.err().expect(&format!("{}: should not decode but did", name));
},
}
}
}
/// Test that decoding a message from a stream works.
#[test]
fn message_decode_stream() {
for (name, input, hex, output) in test_closures() {
println!("Testing: stream_{}", name);
let params = MessageParams { message: None, hex };
let message = params.message_from(|| input.as_bytes());
match output {
Some(output) => {
let message = message.expect(&format!("{}: should decode but did not", name));
assert_eq!(message, output, "{}: decoded a wrong message", name);
},
None => {
message.err().expect(&format!("{}: should not decode but did", name));
},
}
}
}
/// Returns (test_name, input, hex, output).
fn test_closures() -> Vec<(&'static str, &'static str, bool, Option<&'static [u8]>)> {
vec![
("decode_no_hex_works", "Hello this is not hex", false, Some(b"Hello this is not hex")),
("decode_no_hex_with_hex_string_works", "0xffffffff", false, Some(b"0xffffffff")),
("decode_hex_works", "0x00112233", true, Some(&[0, 17, 34, 51])),
("decode_hex_without_prefix_works", "00112233", true, Some(&[0, 17, 34, 51])),
("decode_hex_uppercase_works", "0xaAbbCCDd", true, Some(&[170, 187, 204, 221])),
("decode_hex_wrong_len_errors", "0x0011223", true, None),
]
}
}
@@ -0,0 +1,67 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use clap::Args;
use sp_core::H256;
use std::str::FromStr;
fn parse_kx_secret(s: &str) -> Result<sc_mixnet::KxSecret, String> {
H256::from_str(s).map(H256::to_fixed_bytes).map_err(|err| err.to_string())
}
/// Parameters used to create the mixnet configuration.
#[derive(Debug, Clone, Args)]
pub struct MixnetParams {
/// Enable the mixnet service.
///
/// This will make the mixnet RPC methods available. If the node is running as a validator, it
/// will also attempt to register and operate as a mixnode.
#[arg(long)]
pub mixnet: bool,
/// The mixnet key-exchange secret to use in session 0.
///
/// Should be 64 hex characters, giving a 32-byte secret.
///
/// WARNING: Secrets provided as command-line arguments are easily exposed. Use of this option
/// should be limited to development and testing.
#[arg(long, value_name = "SECRET", value_parser = parse_kx_secret)]
pub mixnet_session_0_kx_secret: Option<sc_mixnet::KxSecret>,
}
impl MixnetParams {
/// Returns the mixnet configuration, or `None` if the mixnet is disabled.
pub fn config(&self, is_authority: bool) -> Option<sc_mixnet::Config> {
self.mixnet.then(|| {
let mut config = sc_mixnet::Config {
core: sc_mixnet::CoreConfig {
session_0_kx_secret: self.mixnet_session_0_kx_secret,
..Default::default()
},
..Default::default()
};
if !is_authority {
// Only authorities can be mixnodes; don't attempt to register
config.substrate.register = false;
// Only mixnodes need to allow connections from non-mixnodes
config.substrate.num_gateway_slots = 0;
}
config
})
}
}
+203
View File
@@ -0,0 +1,203 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
mod database_params;
mod import_params;
mod keystore_params;
mod message_params;
mod mixnet_params;
mod network_params;
mod node_key_params;
mod offchain_worker_params;
mod prometheus_params;
mod pruning_params;
mod rpc_params;
mod runtime_params;
mod shared_params;
mod telemetry_params;
mod transaction_pool_params;
use crate::arg_enums::{CryptoScheme, OutputType};
use clap::Args;
use sc_service::config::{IpNetwork, RpcBatchRequestConfig};
use sp_core::crypto::{Ss58AddressFormat, Ss58AddressFormatRegistry};
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, NumberFor},
};
use std::{fmt::Debug, str::FromStr};
pub use crate::params::{
database_params::*, import_params::*, keystore_params::*, message_params::*, mixnet_params::*,
network_params::*, node_key_params::*, offchain_worker_params::*, prometheus_params::*,
pruning_params::*, rpc_params::*, runtime_params::*, shared_params::*, telemetry_params::*,
transaction_pool_params::*,
};
/// Parse Ss58AddressFormat
pub fn parse_ss58_address_format(x: &str) -> Result<Ss58AddressFormat, String> {
match Ss58AddressFormatRegistry::try_from(x) {
Ok(format_registry) => Ok(format_registry.into()),
Err(_) => Err(format!(
"Unable to parse variant. Known variants: {:?}",
Ss58AddressFormat::all_names()
)),
}
}
/// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a
/// decimal.
#[derive(Debug, Clone)]
pub struct GenericNumber(String);
impl FromStr for GenericNumber {
type Err = String;
fn from_str(block_number: &str) -> Result<Self, Self::Err> {
if let Some(pos) = block_number.chars().position(|d| !d.is_digit(10)) {
Err(format!("Expected block number, found illegal digit at position: {}", pos))
} else {
Ok(Self(block_number.to_owned()))
}
}
}
impl GenericNumber {
/// Wrapper on top of `std::str::parse<N>` but with `Error` as a `String`
///
/// See `https://doc.rust-lang.org/std/primitive.str.html#method.parse` for more elaborate
/// documentation.
pub fn parse<N>(&self) -> Result<N, String>
where
N: FromStr,
N::Err: std::fmt::Debug,
{
FromStr::from_str(&self.0).map_err(|e| format!("Failed to parse block number: {:?}", e))
}
}
/// Wrapper type that is either a `Hash` or the number of a `Block`.
#[derive(Debug, Clone)]
pub struct BlockNumberOrHash(String);
impl FromStr for BlockNumberOrHash {
type Err = String;
fn from_str(block_number: &str) -> Result<Self, Self::Err> {
if let Some(rest) = block_number.strip_prefix("0x") {
if let Some(pos) = rest.chars().position(|c| !c.is_ascii_hexdigit()) {
Err(format!(
"Expected block hash, found illegal hex character at position: {}",
2 + pos,
))
} else {
Ok(Self(block_number.into()))
}
} else {
GenericNumber::from_str(block_number).map(|v| Self(v.0))
}
}
}
impl BlockNumberOrHash {
/// Parse the inner value as `BlockId`.
pub fn parse<B: BlockT>(&self) -> Result<BlockId<B>, String>
where
<B::Hash as FromStr>::Err: std::fmt::Debug,
NumberFor<B>: FromStr,
<NumberFor<B> as FromStr>::Err: std::fmt::Debug,
{
if self.0.starts_with("0x") {
Ok(BlockId::Hash(
FromStr::from_str(&self.0[2..])
.map_err(|e| format!("Failed to parse block hash: {:?}", e))?,
))
} else {
GenericNumber(self.0.clone()).parse().map(BlockId::Number)
}
}
}
/// Optional flag for specifying crypto algorithm
#[derive(Debug, Clone, Args)]
pub struct CryptoSchemeFlag {
/// cryptography scheme
#[arg(long, value_name = "SCHEME", value_enum, ignore_case = true, default_value_t = CryptoScheme::Sr25519)]
pub scheme: CryptoScheme,
}
/// Optional flag for specifying output type
#[derive(Debug, Clone, Args)]
pub struct OutputTypeFlag {
/// output format
#[arg(long, value_name = "FORMAT", value_enum, ignore_case = true, default_value_t = OutputType::Text)]
pub output_type: OutputType,
}
/// Optional flag for specifying network scheme
#[derive(Debug, Clone, Args)]
pub struct NetworkSchemeFlag {
/// network address format
#[arg(
short = 'n',
long,
value_name = "NETWORK",
ignore_case = true,
value_parser = parse_ss58_address_format,
)]
pub network: Option<Ss58AddressFormat>,
}
#[cfg(test)]
mod tests {
use super::*;
type Header = sp_runtime::generic::Header<u32, sp_runtime::traits::BlakeTwo256>;
type Block = sp_runtime::generic::Block<Header, sp_runtime::OpaqueExtrinsic>;
#[test]
fn parse_block_number() {
let block_number_or_hash = BlockNumberOrHash::from_str("1234").unwrap();
let parsed = block_number_or_hash.parse::<Block>().unwrap();
assert_eq!(BlockId::Number(1234), parsed);
}
#[test]
fn parse_block_hash() {
let hash = sp_core::H256::default();
let hash_str = format!("{:?}", hash);
let block_number_or_hash = BlockNumberOrHash::from_str(&hash_str).unwrap();
let parsed = block_number_or_hash.parse::<Block>().unwrap();
assert_eq!(BlockId::Hash(hash), parsed);
}
#[test]
fn parse_block_hash_fails() {
assert_eq!(
"Expected block hash, found illegal hex character at position: 2",
BlockNumberOrHash::from_str("0xHello").unwrap_err(),
);
}
#[test]
fn parse_block_number_fails() {
assert_eq!(
"Expected block number, found illegal digit at position: 3",
BlockNumberOrHash::from_str("345Hello").unwrap_err(),
);
}
}
@@ -0,0 +1,342 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{
arg_enums::{NetworkBackendType, SyncMode},
params::node_key_params::NodeKeyParams,
};
use clap::Args;
use sc_network::{
config::{
NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig,
DEFAULT_IDLE_CONNECTION_TIMEOUT,
},
multiaddr::Protocol,
};
use sc_service::{
config::{Multiaddr, MultiaddrWithPeerId},
ChainSpec, ChainType,
};
use std::{borrow::Cow, num::NonZeroUsize, path::PathBuf};
/// Parameters used to create the network configuration.
#[derive(Debug, Clone, Args)]
pub struct NetworkParams {
/// Specify a list of bootnodes.
#[arg(long, value_name = "ADDR", num_args = 1..)]
pub bootnodes: Vec<MultiaddrWithPeerId>,
/// Specify a list of reserved node addresses.
#[arg(long, value_name = "ADDR", num_args = 1..)]
pub reserved_nodes: Vec<MultiaddrWithPeerId>,
/// Whether to only synchronize the chain with reserved nodes.
///
/// Also disables automatic peer discovery.
/// TCP connections might still be established with non-reserved nodes.
/// In particular, if you are a validator your node might still connect to other
/// validator nodes and collator nodes regardless of whether they are defined as
/// reserved nodes.
#[arg(long)]
pub reserved_only: bool,
/// Public address that other nodes will use to connect to this node.
///
/// This can be used if there's a proxy in front of this node.
#[arg(long, value_name = "PUBLIC_ADDR", num_args = 1..)]
pub public_addr: Vec<Multiaddr>,
/// Listen on this multiaddress.
///
/// By default:
/// If `--validator` is passed: `/ip4/0.0.0.0/tcp/<port>` and `/ip6/[::]/tcp/<port>`.
/// Otherwise: `/ip4/0.0.0.0/tcp/<port>/ws` and `/ip6/[::]/tcp/<port>/ws`.
#[arg(long, value_name = "LISTEN_ADDR", num_args = 1..)]
pub listen_addr: Vec<Multiaddr>,
/// Specify p2p protocol TCP port.
#[arg(long, value_name = "PORT", conflicts_with_all = &[ "listen_addr" ])]
pub port: Option<u16>,
/// Always forbid connecting to private IPv4/IPv6 addresses.
///
/// The option doesn't apply to addresses passed with `--reserved-nodes` or
/// `--bootnodes`. Enabled by default for chains marked as "live" in their chain
/// specifications.
///
/// Address allocation for private networks is specified by
/// [RFC1918](https://tools.ietf.org/html/rfc1918)).
#[arg(long, alias = "no-private-ipv4", conflicts_with_all = &["allow_private_ip"])]
pub no_private_ip: bool,
/// Always accept connecting to private IPv4/IPv6 addresses.
///
/// Enabled by default for chains marked as "local" in their chain specifications,
/// or when `--dev` is passed.
///
/// Address allocation for private networks is specified by
/// [RFC1918](https://tools.ietf.org/html/rfc1918)).
#[arg(long, alias = "allow-private-ipv4", conflicts_with_all = &["no_private_ip"])]
pub allow_private_ip: bool,
/// Number of outgoing connections we're trying to maintain.
#[arg(long, value_name = "COUNT", default_value_t = 8)]
pub out_peers: u32,
/// Maximum number of inbound full nodes peers.
#[arg(long, value_name = "COUNT", default_value_t = 32)]
pub in_peers: u32,
/// Maximum number of inbound light nodes peers.
#[arg(long, value_name = "COUNT", default_value_t = 100)]
pub in_peers_light: u32,
/// Disable mDNS discovery (default: true).
///
/// By default, the network will use mDNS to discover other nodes on the
/// local network. This disables it. Automatically implied when using --dev.
#[arg(long)]
pub no_mdns: bool,
/// Maximum number of peers from which to ask for the same blocks in parallel.
///
/// This allows downloading announced blocks from multiple peers.
/// Decrease to save traffic and risk increased latency.
#[arg(long, value_name = "COUNT", default_value_t = 5)]
pub max_parallel_downloads: u32,
#[allow(missing_docs)]
#[clap(flatten)]
pub node_key_params: NodeKeyParams,
/// Enable peer discovery on local networks.
///
/// By default this option is `true` for `--dev` or when the chain type is
/// `Local`/`Development` and false otherwise.
#[arg(long)]
pub discover_local: bool,
/// Require iterative Kademlia DHT queries to use disjoint paths.
///
/// Disjoint paths increase resiliency in the presence of potentially adversarial nodes.
///
/// See the S/Kademlia paper for more information on the high level design as well as its
/// security improvements.
#[arg(long)]
pub kademlia_disjoint_query_paths: bool,
/// Kademlia replication factor.
///
/// Determines to how many closest peers a record is replicated to.
///
/// Discovery mechanism requires successful replication to all
/// `kademlia_replication_factor` peers to consider record successfully put.
#[arg(long, default_value = "20")]
pub kademlia_replication_factor: NonZeroUsize,
/// Join the IPFS network and serve transactions over bitswap protocol.
#[arg(long)]
pub ipfs_server: bool,
/// Blockchain syncing mode.
#[arg(
long,
value_enum,
value_name = "SYNC_MODE",
default_value_t = SyncMode::Full,
ignore_case = true,
verbatim_doc_comment
)]
pub sync: SyncMode,
/// Maximum number of blocks per request.
///
/// Try reducing this number from the default value if you have a slow network connection
/// and observe block requests timing out.
#[arg(long, value_name = "COUNT", default_value_t = 64)]
pub max_blocks_per_request: u32,
/// Network backend used for P2P networking.
///
/// Litep2p is a lightweight alternative to libp2p, that is designed to be more
/// efficient and easier to use. At the same time, litep2p brings performance
/// improvements and reduces the CPU usage significantly.
///
/// Libp2p is the old network backend, that may still be used for compatibility
/// reasons until the whole ecosystem is migrated to litep2p.
#[arg(
long,
value_enum,
value_name = "NETWORK_BACKEND",
default_value_t = NetworkBackendType::Litep2p,
ignore_case = true,
verbatim_doc_comment
)]
pub network_backend: NetworkBackendType,
}
impl NetworkParams {
/// Fill the given `NetworkConfiguration` by looking at the cli parameters.
pub fn network_config(
&self,
chain_spec: &Box<dyn ChainSpec>,
is_dev: bool,
is_validator: bool,
net_config_path: Option<PathBuf>,
client_id: &str,
node_name: &str,
node_key: NodeKeyConfig,
default_listen_port: u16,
) -> NetworkConfiguration {
let port = self.port.unwrap_or(default_listen_port);
let listen_addresses = if self.listen_addr.is_empty() {
if is_validator || is_dev {
vec![
Multiaddr::empty()
.with(Protocol::Ip6([0, 0, 0, 0, 0, 0, 0, 0].into()))
.with(Protocol::Tcp(port)),
Multiaddr::empty()
.with(Protocol::Ip4([0, 0, 0, 0].into()))
.with(Protocol::Tcp(port)),
]
} else {
vec![
Multiaddr::empty()
.with(Protocol::Ip6([0, 0, 0, 0, 0, 0, 0, 0].into()))
.with(Protocol::Tcp(port))
.with(Protocol::Ws(Cow::Borrowed("/"))),
Multiaddr::empty()
.with(Protocol::Ip4([0, 0, 0, 0].into()))
.with(Protocol::Tcp(port))
.with(Protocol::Ws(Cow::Borrowed("/"))),
]
}
} else {
self.listen_addr.clone()
};
let public_addresses = self.public_addr.clone();
let mut boot_nodes = chain_spec.boot_nodes().to_vec();
boot_nodes.extend(self.bootnodes.clone());
let chain_type = chain_spec.chain_type();
// Activate if the user explicitly requested local discovery, `--dev` is given or the
// chain type is `Local`/`Development`
let allow_non_globals_in_dht =
self.discover_local ||
is_dev || matches!(chain_type, ChainType::Local | ChainType::Development);
let allow_private_ip = match (self.allow_private_ip, self.no_private_ip) {
(true, true) => unreachable!("`*_private_ip` flags are mutually exclusive; qed"),
(true, false) => true,
(false, true) => false,
(false, false) =>
is_dev || matches!(chain_type, ChainType::Local | ChainType::Development),
};
NetworkConfiguration {
boot_nodes,
net_config_path,
default_peers_set: SetConfig {
in_peers: self.in_peers + self.in_peers_light,
out_peers: self.out_peers,
reserved_nodes: self.reserved_nodes.clone(),
non_reserved_mode: if self.reserved_only {
NonReservedPeerMode::Deny
} else {
NonReservedPeerMode::Accept
},
},
default_peers_set_num_full: self.in_peers + self.out_peers,
listen_addresses,
public_addresses,
node_key,
node_name: node_name.to_string(),
client_version: client_id.to_string(),
transport: TransportConfig::Normal {
enable_mdns: !is_dev && !self.no_mdns,
allow_private_ip,
},
idle_connection_timeout: DEFAULT_IDLE_CONNECTION_TIMEOUT,
max_parallel_downloads: self.max_parallel_downloads,
max_blocks_per_request: self.max_blocks_per_request,
min_peers_to_start_warp_sync: None,
enable_dht_random_walk: !self.reserved_only,
allow_non_globals_in_dht,
kademlia_disjoint_query_paths: self.kademlia_disjoint_query_paths,
kademlia_replication_factor: self.kademlia_replication_factor,
ipfs_server: self.ipfs_server,
sync_mode: self.sync.into(),
network_backend: self.network_backend.into(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
#[derive(Parser)]
struct Cli {
#[clap(flatten)]
network_params: NetworkParams,
}
#[test]
fn reserved_nodes_multiple_values_and_occurrences() {
let params = Cli::try_parse_from([
"",
"--reserved-nodes",
"/ip4/0.0.0.0/tcp/501/p2p/12D3KooWEBo1HUPQJwiBmM5kSeg4XgiVxEArArQdDarYEsGxMfbS",
"/ip4/0.0.0.0/tcp/502/p2p/12D3KooWEBo1HUPQJwiBmM5kSeg4XgiVxEArArQdDarYEsGxMfbS",
"--reserved-nodes",
"/ip4/0.0.0.0/tcp/503/p2p/12D3KooWEBo1HUPQJwiBmM5kSeg4XgiVxEArArQdDarYEsGxMfbS",
])
.expect("Parses network params");
let expected = vec![
MultiaddrWithPeerId::try_from(
"/ip4/0.0.0.0/tcp/501/p2p/12D3KooWEBo1HUPQJwiBmM5kSeg4XgiVxEArArQdDarYEsGxMfbS"
.to_string(),
)
.unwrap(),
MultiaddrWithPeerId::try_from(
"/ip4/0.0.0.0/tcp/502/p2p/12D3KooWEBo1HUPQJwiBmM5kSeg4XgiVxEArArQdDarYEsGxMfbS"
.to_string(),
)
.unwrap(),
MultiaddrWithPeerId::try_from(
"/ip4/0.0.0.0/tcp/503/p2p/12D3KooWEBo1HUPQJwiBmM5kSeg4XgiVxEArArQdDarYEsGxMfbS"
.to_string(),
)
.unwrap(),
];
assert_eq!(expected, params.network_params.reserved_nodes);
}
#[test]
fn sync_ignores_case() {
let params = Cli::try_parse_from(["", "--sync", "wArP"]).expect("Parses network params");
assert_eq!(SyncMode::Warp, params.network_params.sync);
}
}
@@ -0,0 +1,268 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use clap::Args;
use sc_network::config::{ed25519, NodeKeyConfig};
use sc_service::Role;
use sp_core::H256;
use std::{path::PathBuf, str::FromStr};
use crate::{arg_enums::NodeKeyType, error, Error};
/// The file name of the node's Ed25519 secret key inside the chain-specific
/// network config directory, if neither `--node-key` nor `--node-key-file`
/// is specified in combination with `--node-key-type=ed25519`.
pub(crate) const NODE_KEY_ED25519_FILE: &str = "secret_ed25519";
/// Parameters used to create the `NodeKeyConfig`, which determines the keypair
/// used for libp2p networking.
#[derive(Debug, Clone, Args)]
pub struct NodeKeyParams {
/// Secret key to use for p2p networking.
///
/// The value is a string that is parsed according to the choice of
/// `--node-key-type` as follows:
///
/// - `ed25519`: the value is parsed as a hex-encoded Ed25519 32 byte secret key (64 hex
/// chars)
///
/// The value of this option takes precedence over `--node-key-file`.
///
/// WARNING: Secrets provided as command-line arguments are easily exposed.
/// Use of this option should be limited to development and testing. To use
/// an externally managed secret key, use `--node-key-file` instead.
#[arg(long, value_name = "KEY")]
pub node_key: Option<String>,
/// Crypto primitive to use for p2p networking.
///
/// The secret key of the node is obtained as follows:
///
/// - If the `--node-key` option is given, the value is parsed as a secret key according to the
/// type. See the documentation for `--node-key`.
///
/// - If the `--node-key-file` option is given, the secret key is read from the specified file.
/// See the documentation for `--node-key-file`.
///
/// - Otherwise, the secret key is read from a file with a predetermined, type-specific name
/// from the chain-specific network config directory inside the base directory specified by
/// `--base-dir`. If this file does not exist, it is created with a newly generated secret
/// key of the chosen type.
///
/// The node's secret key determines the corresponding public key and hence the
/// node's peer ID in the context of libp2p.
#[arg(long, value_name = "TYPE", value_enum, ignore_case = true, default_value_t = NodeKeyType::Ed25519)]
pub node_key_type: NodeKeyType,
/// File from which to read the node's secret key to use for p2p networking.
///
/// The contents of the file are parsed according to the choice of `--node-key-type`
/// as follows:
///
/// - `ed25519`: the file must contain an unencoded 32 byte or hex encoded Ed25519 secret key.
///
/// If the file does not exist, it is created with a newly generated secret key of
/// the chosen type.
#[arg(long, value_name = "FILE")]
pub node_key_file: Option<PathBuf>,
/// Forces key generation if node-key-file file does not exist.
///
/// This is an unsafe feature for production networks, because as an active authority
/// other authorities may depend on your node having a stable identity and they might
/// not being able to reach you if your identity changes after entering the active set.
///
/// For minimal node downtime if no custom `node-key-file` argument is provided
/// the network-key is usually persisted accross nodes restarts,
/// in the `network` folder from directory provided in `--base-path`
///
/// Warning!! If you ever run the node with this argument, make sure
/// you remove it for the subsequent restarts.
#[arg(long)]
pub unsafe_force_node_key_generation: bool,
}
impl NodeKeyParams {
/// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context
/// of an optional network config storage directory.
pub fn node_key(
&self,
net_config_dir: &PathBuf,
role: Role,
is_dev: bool,
) -> error::Result<NodeKeyConfig> {
Ok(match self.node_key_type {
NodeKeyType::Ed25519 => {
let secret = if let Some(node_key) = self.node_key.as_ref() {
parse_ed25519_secret(node_key)?
} else {
let key_path = self
.node_key_file
.clone()
.unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE));
if !self.unsafe_force_node_key_generation &&
role.is_authority() &&
!is_dev && !key_path.exists()
{
return Err(Error::NetworkKeyNotFound(key_path));
}
sc_network::config::Secret::File(key_path)
};
NodeKeyConfig::Ed25519(secret)
},
})
}
}
/// Create an error caused by an invalid node key argument.
fn invalid_node_key(e: impl std::fmt::Display) -> error::Error {
error::Error::Input(format!("Invalid node key: {}", e))
}
/// Parse a Ed25519 secret key from a hex string into a `sc_network::Secret`.
fn parse_ed25519_secret(hex: &str) -> error::Result<sc_network::config::Ed25519Secret> {
H256::from_str(hex).map_err(invalid_node_key).and_then(|bytes| {
ed25519::SecretKey::try_from_bytes(bytes)
.map(sc_network::config::Secret::Input)
.map_err(invalid_node_key)
})
}
#[cfg(test)]
mod tests {
use super::*;
use clap::ValueEnum;
use sc_network::config::ed25519;
use std::fs::{self, File};
use tempfile::TempDir;
#[test]
fn test_node_key_config_input() {
fn secret_input(net_config_dir: &PathBuf) -> error::Result<()> {
NodeKeyType::value_variants().iter().try_for_each(|t| {
let node_key_type = *t;
let sk = match node_key_type {
NodeKeyType::Ed25519 => ed25519::SecretKey::generate().as_ref().to_vec(),
};
let params = NodeKeyParams {
node_key_type,
node_key: Some(format!("{:x}", H256::from_slice(sk.as_ref()))),
node_key_file: None,
unsafe_force_node_key_generation: false,
};
params.node_key(net_config_dir, Role::Authority, false).and_then(|c| match c {
NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski))
if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() =>
Ok(()),
_ => Err(error::Error::Input("Unexpected node key config".into())),
})
})
}
assert!(secret_input(&PathBuf::from_str("x").unwrap()).is_ok());
}
#[test]
fn test_node_key_config_file() {
fn check_key(file: PathBuf, key: &ed25519::SecretKey) {
let params = NodeKeyParams {
node_key_type: NodeKeyType::Ed25519,
node_key: None,
node_key_file: Some(file),
unsafe_force_node_key_generation: false,
};
let node_key = params
.node_key(&PathBuf::from("not-used"), Role::Authority, false)
.expect("Creates node key config")
.into_keypair()
.expect("Creates node key pair");
if node_key.secret().as_ref() != key.as_ref() {
panic!("Invalid key")
}
}
let tmp = tempfile::Builder::new().prefix("alice").tempdir().expect("Creates tempfile");
let file = tmp.path().join("mysecret").to_path_buf();
let key = ed25519::SecretKey::generate();
fs::write(&file, array_bytes::bytes2hex("", key.as_ref())).expect("Writes secret key");
check_key(file.clone(), &key);
fs::write(&file, &key).expect("Writes secret key");
check_key(file.clone(), &key);
}
#[test]
fn test_node_key_config_default() {
fn with_def_params<F>(f: F, unsafe_force_node_key_generation: bool) -> error::Result<()>
where
F: Fn(NodeKeyParams) -> error::Result<()>,
{
NodeKeyType::value_variants().iter().try_for_each(|t| {
let node_key_type = *t;
f(NodeKeyParams {
node_key_type,
node_key: None,
node_key_file: None,
unsafe_force_node_key_generation,
})
})
}
fn some_config_dir(
net_config_dir: &PathBuf,
unsafe_force_node_key_generation: bool,
role: Role,
is_dev: bool,
) -> error::Result<()> {
with_def_params(
|params| {
let dir = PathBuf::from(net_config_dir.clone());
let typ = params.node_key_type;
params.node_key(net_config_dir, role, is_dev).and_then(move |c| match c {
NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f))
if typ == NodeKeyType::Ed25519 &&
f == &dir.join(NODE_KEY_ED25519_FILE) =>
Ok(()),
_ => Err(error::Error::Input("Unexpected node key config".into())),
})
},
unsafe_force_node_key_generation,
)
}
assert!(some_config_dir(&PathBuf::from_str("x").unwrap(), false, Role::Full, false).is_ok());
assert!(
some_config_dir(&PathBuf::from_str("x").unwrap(), false, Role::Authority, true).is_ok()
);
assert!(
some_config_dir(&PathBuf::from_str("x").unwrap(), true, Role::Authority, false).is_ok()
);
assert!(matches!(
some_config_dir(&PathBuf::from_str("x").unwrap(), false, Role::Authority, false),
Err(Error::NetworkKeyNotFound(_))
));
let tempdir = TempDir::new().unwrap();
let _file = File::create(tempdir.path().join(NODE_KEY_ED25519_FILE)).unwrap();
assert!(some_config_dir(&tempdir.path().into(), false, Role::Authority, false).is_ok());
}
}
@@ -0,0 +1,65 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Offchain worker related configuration parameters.
//!
//! A subset of configuration parameters which are relevant to
//! the inner working of offchain workers. The usage is solely
//! targeted at handling input parameter parsing providing
//! a reasonable abstraction.
use clap::{ArgAction, Args};
use sc_network::config::Role;
use sc_service::config::OffchainWorkerConfig;
use crate::{error, OffchainWorkerEnabled};
/// Offchain worker related parameters.
#[derive(Debug, Clone, Args)]
pub struct OffchainWorkerParams {
/// Execute offchain workers on every block.
#[arg(
long = "offchain-worker",
value_name = "ENABLED",
value_enum,
ignore_case = true,
default_value_t = OffchainWorkerEnabled::WhenAuthority
)]
pub enabled: OffchainWorkerEnabled,
/// Enable offchain indexing API.
///
/// Allows the runtime to write directly to offchain workers DB during block import.
#[arg(long = "enable-offchain-indexing", value_name = "ENABLE_OFFCHAIN_INDEXING", default_value_t = false, action = ArgAction::Set)]
pub indexing_enabled: bool,
}
impl OffchainWorkerParams {
/// Load spec to `Configuration` from `OffchainWorkerParams` and spec factory.
pub fn offchain_worker(&self, role: &Role) -> error::Result<OffchainWorkerConfig> {
let enabled = match (&self.enabled, role) {
(OffchainWorkerEnabled::WhenAuthority, Role::Authority { .. }) => true,
(OffchainWorkerEnabled::Always, _) => true,
(OffchainWorkerEnabled::Never, _) => false,
(OffchainWorkerEnabled::WhenAuthority, _) => false,
};
let indexing_enabled = self.indexing_enabled;
Ok(OffchainWorkerConfig { enabled, indexing_enabled })
}
}
@@ -0,0 +1,63 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use clap::Args;
use sc_service::config::PrometheusConfig;
use std::net::{Ipv4Addr, SocketAddr};
/// Parameters used to config prometheus.
#[derive(Debug, Clone, Args)]
pub struct PrometheusParams {
/// Specify Prometheus exporter TCP Port.
#[arg(long, value_name = "PORT")]
pub prometheus_port: Option<u16>,
/// Expose Prometheus exporter on all interfaces.
///
/// Default is local.
#[arg(long)]
pub prometheus_external: bool,
/// Do not expose a Prometheus exporter endpoint.
///
/// Prometheus metric endpoint is enabled by default.
#[arg(long)]
pub no_prometheus: bool,
}
impl PrometheusParams {
/// Creates [`PrometheusConfig`].
pub fn prometheus_config(
&self,
default_listen_port: u16,
chain_id: String,
) -> Option<PrometheusConfig> {
if self.no_prometheus {
None
} else {
let interface =
if self.prometheus_external { Ipv4Addr::UNSPECIFIED } else { Ipv4Addr::LOCALHOST };
Some(PrometheusConfig::new_with_default_registry(
SocketAddr::new(
interface.into(),
self.prometheus_port.unwrap_or(default_listen_port),
),
chain_id,
))
}
}
}
@@ -0,0 +1,165 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::error;
use clap::Args;
use sc_service::{BlocksPruning, PruningMode};
/// Parameters to define the pruning mode
#[derive(Debug, Clone, Args)]
pub struct PruningParams {
/// Specify the state pruning mode.
///
/// This mode specifies when the block's state (ie, storage)
/// should be pruned (ie, removed) from the database.
/// This setting can only be set on the first creation of the database. Every subsequent run
/// will load the pruning mode from the database and will error if the stored mode doesn't
/// match this CLI value. It is fine to drop this CLI flag for subsequent runs. The only
/// exception is that `NUMBER` can change between subsequent runs (increasing it will not
/// lead to restoring pruned state).
///
/// Possible values:
///
/// - archive: Keep the data of all blocks.
///
/// - archive-canonical: Keep only the data of finalized blocks.
///
/// - NUMBER: Keep the data of the last NUMBER of finalized blocks.
///
/// [default: 256]
#[arg(alias = "pruning", long, value_name = "PRUNING_MODE")]
pub state_pruning: Option<DatabasePruningMode>,
/// Specify the blocks pruning mode.
///
/// This mode specifies when the block's body (including justifications)
/// should be pruned (ie, removed) from the database.
///
/// Possible values:
///
/// - archive: Keep the data of all blocks.
///
/// - archive-canonical: Keep only the data of finalized blocks.
///
/// - NUMBER: Keep the data of the last NUMBER of finalized blocks.
#[arg(
alias = "keep-blocks",
long,
value_name = "PRUNING_MODE",
default_value = "archive-canonical"
)]
pub blocks_pruning: DatabasePruningMode,
}
impl PruningParams {
/// Get the pruning value from the parameters
pub fn state_pruning(&self) -> error::Result<Option<PruningMode>> {
Ok(self.state_pruning.map(|v| v.into()))
}
/// Get the block pruning value from the parameters
pub fn blocks_pruning(&self) -> error::Result<BlocksPruning> {
Ok(self.blocks_pruning.into())
}
}
/// Specifies the pruning mode of the database.
///
/// This specifies when the block's data (either state via `--state-pruning`
/// or body via `--blocks-pruning`) should be pruned (ie, removed) from
/// the database.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum DatabasePruningMode {
/// Keep the data of all blocks.
Archive,
/// Keep only the data of finalized blocks.
ArchiveCanonical,
/// Keep the data of the last number of finalized blocks.
Custom(u32),
}
impl std::str::FromStr for DatabasePruningMode {
type Err = String;
fn from_str(input: &str) -> Result<Self, Self::Err> {
match input {
"archive" => Ok(Self::Archive),
"archive-canonical" => Ok(Self::ArchiveCanonical),
bc => bc
.parse()
.map_err(|_| "Invalid pruning mode specified".to_string())
.map(Self::Custom),
}
}
}
impl Into<PruningMode> for DatabasePruningMode {
fn into(self) -> PruningMode {
match self {
DatabasePruningMode::Archive => PruningMode::ArchiveAll,
DatabasePruningMode::ArchiveCanonical => PruningMode::ArchiveCanonical,
DatabasePruningMode::Custom(n) => PruningMode::blocks_pruning(n),
}
}
}
impl Into<BlocksPruning> for DatabasePruningMode {
fn into(self) -> BlocksPruning {
match self {
DatabasePruningMode::Archive => BlocksPruning::KeepAll,
DatabasePruningMode::ArchiveCanonical => BlocksPruning::KeepFinalized,
DatabasePruningMode::Custom(n) => BlocksPruning::Some(n),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
#[derive(Parser)]
struct Cli {
#[clap(flatten)]
pruning: PruningParams,
}
#[test]
fn pruning_params_parse_works() {
let Cli { pruning } =
Cli::parse_from(["", "--state-pruning=1000", "--blocks-pruning=1000"]);
assert!(matches!(pruning.state_pruning, Some(DatabasePruningMode::Custom(1000))));
assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::Custom(1000)));
let Cli { pruning } =
Cli::parse_from(["", "--state-pruning=archive", "--blocks-pruning=archive"]);
assert!(matches!(dbg!(pruning.state_pruning), Some(DatabasePruningMode::Archive)));
assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::Archive));
let Cli { pruning } = Cli::parse_from([
"",
"--state-pruning=archive-canonical",
"--blocks-pruning=archive-canonical",
]);
assert!(matches!(dbg!(pruning.state_pruning), Some(DatabasePruningMode::ArchiveCanonical)));
assert!(matches!(pruning.blocks_pruning, DatabasePruningMode::ArchiveCanonical));
}
}
@@ -0,0 +1,681 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::{
arg_enums::{Cors, RpcMethods},
params::{IpNetwork, RpcBatchRequestConfig},
RPC_DEFAULT_MAX_CONNECTIONS, RPC_DEFAULT_MAX_REQUEST_SIZE_MB, RPC_DEFAULT_MAX_RESPONSE_SIZE_MB,
RPC_DEFAULT_MAX_SUBS_PER_CONN, RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN,
};
use clap::Args;
use std::{
net::{Ipv4Addr, Ipv6Addr, SocketAddr},
num::NonZeroU32,
};
const RPC_LISTEN_ADDR: &str = "listen-addr";
const RPC_CORS: &str = "cors";
const RPC_MAX_CONNS: &str = "max-connections";
const RPC_MAX_REQUEST_SIZE: &str = "max-request-size";
const RPC_MAX_RESPONSE_SIZE: &str = "max-response-size";
const RPC_MAX_SUBS_PER_CONN: &str = "max-subscriptions-per-connection";
const RPC_MAX_BUF_CAP_PER_CONN: &str = "max-buffer-capacity-per-connection";
const RPC_RATE_LIMIT: &str = "rate-limit";
const RPC_RATE_LIMIT_TRUST_PROXY_HEADERS: &str = "rate-limit-trust-proxy-headers";
const RPC_RATE_LIMIT_WHITELISTED_IPS: &str = "rate-limit-whitelisted-ips";
const RPC_RETRY_RANDOM_PORT: &str = "retry-random-port";
const RPC_METHODS: &str = "methods";
const RPC_OPTIONAL: &str = "optional";
const RPC_DISABLE_BATCH: &str = "disable-batch-requests";
const RPC_BATCH_LIMIT: &str = "max-batch-request-len";
/// Parameters of RPC.
#[derive(Debug, Clone, Args)]
pub struct RpcParams {
/// Listen to all RPC interfaces (default: local).
///
/// Not all RPC methods are safe to be exposed publicly.
///
/// Use an RPC proxy server to filter out dangerous methods. More details:
/// <https://docs.pezkuwichain.io/build/remote-procedure-calls/#public-rpc-interfaces>.
///
/// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks.
#[arg(long)]
pub rpc_external: bool,
/// Listen to all RPC interfaces.
///
/// Same as `--rpc-external`.
#[arg(long)]
pub unsafe_rpc_external: bool,
/// RPC methods to expose.
#[arg(
long,
value_name = "METHOD SET",
value_enum,
ignore_case = true,
default_value_t = RpcMethods::Auto,
verbatim_doc_comment
)]
pub rpc_methods: RpcMethods,
/// RPC rate limiting (calls/minute) for each connection.
///
/// This is disabled by default.
///
/// For example `--rpc-rate-limit 10` will maximum allow
/// 10 calls per minute per connection.
#[arg(long)]
pub rpc_rate_limit: Option<NonZeroU32>,
/// Disable RPC rate limiting for certain ip addresses.
///
/// Each IP address must be in CIDR notation such as `1.2.3.4/24`.
#[arg(long, num_args = 1..)]
pub rpc_rate_limit_whitelisted_ips: Vec<IpNetwork>,
/// Trust proxy headers for disable rate limiting.
///
/// By default the rpc server will not trust headers such `X-Real-IP`, `X-Forwarded-For` and
/// `Forwarded` and this option will make the rpc server to trust these headers.
///
/// For instance this may be secure if the rpc server is behind a reverse proxy and that the
/// proxy always sets these headers.
#[arg(long)]
pub rpc_rate_limit_trust_proxy_headers: bool,
/// Set the maximum RPC request payload size for both HTTP and WS in megabytes.
#[arg(long, default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB)]
pub rpc_max_request_size: u32,
/// Set the maximum RPC response payload size for both HTTP and WS in megabytes.
#[arg(long, default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB)]
pub rpc_max_response_size: u32,
/// Set the maximum concurrent subscriptions per connection.
#[arg(long, default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN)]
pub rpc_max_subscriptions_per_connection: u32,
/// Specify JSON-RPC server TCP port.
#[arg(long, value_name = "PORT")]
pub rpc_port: Option<u16>,
/// EXPERIMENTAL: Specify the JSON-RPC server interface and this option which can be enabled
/// several times if you want expose several RPC interfaces with different configurations.
///
/// The format for this option is:
/// `--experimental-rpc-endpoint" listen-addr=<ip:port>,<key=value>,..."` where each option is
/// separated by a comma and `listen-addr` is the only required param.
///
/// The following options are available:
/// • listen-addr: The socket address (ip:port) to listen on. Be careful to not expose the
/// server to the public internet unless you know what you're doing. (required)
/// • disable-batch-requests: Disable batch requests (optional)
/// • max-connections: The maximum number of concurrent connections that the server will
/// accept (optional)
/// • max-request-size: The maximum size of a request body in megabytes (optional)
/// • max-response-size: The maximum size of a response body in megabytes (optional)
/// • max-subscriptions-per-connection: The maximum number of subscriptions per connection
/// (optional)
/// • max-buffer-capacity-per-connection: The maximum buffer capacity per connection
/// (optional)
/// • max-batch-request-len: The maximum number of requests in a batch (optional)
/// • cors: The CORS allowed origins, this can enabled more than once (optional)
/// • methods: Which RPC methods to allow, valid values are "safe", "unsafe" and "auto"
/// (optional)
/// • optional: If the listen address is optional i.e the interface is not required to be
/// available For example this may be useful if some platforms doesn't support ipv6
/// (optional)
/// • rate-limit: The rate limit in calls per minute for each connection (optional)
/// • rate-limit-trust-proxy-headers: Trust proxy headers for disable rate limiting (optional)
/// • rate-limit-whitelisted-ips: Disable rate limiting for certain ip addresses, this can be
/// enabled more than once (optional) • retry-random-port: If the port is already in use,
/// retry with a random port (optional)
///
/// Use with care, this flag is unstable and subject to change.
#[arg(
long,
num_args = 1..,
verbatim_doc_comment,
conflicts_with_all = &["rpc_external", "unsafe_rpc_external", "rpc_port", "rpc_cors", "rpc_rate_limit_trust_proxy_headers", "rpc_rate_limit", "rpc_rate_limit_whitelisted_ips", "rpc_message_buffer_capacity_per_connection", "rpc_disable_batch_requests", "rpc_max_subscriptions_per_connection", "rpc_max_request_size", "rpc_max_response_size"]
)]
pub experimental_rpc_endpoint: Vec<RpcEndpoint>,
/// Maximum number of RPC server connections.
#[arg(long, value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS)]
pub rpc_max_connections: u32,
/// The number of messages the RPC server is allowed to keep in memory.
///
/// If the buffer becomes full then the server will not process
/// new messages until the connected client start reading the
/// underlying messages.
///
/// This applies per connection which includes both
/// JSON-RPC methods calls and subscriptions.
#[arg(long, default_value_t = RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN)]
pub rpc_message_buffer_capacity_per_connection: u32,
/// Disable RPC batch requests
#[arg(long, alias = "rpc_no_batch_requests", conflicts_with_all = &["rpc_max_batch_request_len"])]
pub rpc_disable_batch_requests: bool,
/// Limit the max length per RPC batch request
#[arg(long, conflicts_with_all = &["rpc_disable_batch_requests"], value_name = "LEN")]
pub rpc_max_batch_request_len: Option<u32>,
/// Specify browser *origins* allowed to access the HTTP & WS RPC servers.
///
/// A comma-separated list of origins (protocol://domain or special `null`
/// value). Value of `all` will disable origin validation. Default is to
/// allow localhost and <https://polkadot.js.org> origins. When running in
/// `--dev` mode the default is to allow all origins.
#[arg(long, value_name = "ORIGINS")]
pub rpc_cors: Option<Cors>,
}
impl RpcParams {
/// Returns the RPC CORS configuration.
pub fn rpc_cors(&self, is_dev: bool) -> crate::Result<Option<Vec<String>>> {
Ok(self
.rpc_cors
.clone()
.unwrap_or_else(|| {
if is_dev {
log::warn!("Running in --dev mode, RPC CORS has been disabled.");
Cors::All
} else {
Cors::List(vec![
"http://localhost:*".into(),
"http://127.0.0.1:*".into(),
"https://localhost:*".into(),
"https://127.0.0.1:*".into(),
"https://polkadot.js.org".into(),
])
}
})
.into())
}
/// Returns the RPC endpoints.
pub fn rpc_addr(
&self,
is_dev: bool,
is_validator: bool,
default_listen_port: u16,
) -> crate::Result<Option<Vec<RpcEndpoint>>> {
if !self.experimental_rpc_endpoint.is_empty() {
for endpoint in &self.experimental_rpc_endpoint {
// Technically, `0.0.0.0` isn't a public IP address, but it's a way to listen on
// all interfaces. Thus, we consider it as a public endpoint and warn about it.
if endpoint.rpc_methods == RpcMethods::Unsafe && endpoint.is_global() ||
endpoint.listen_addr.ip().is_unspecified()
{
eprintln!(
"It isn't safe to expose RPC publicly without a proxy server that filters \
available set of RPC methods."
);
}
}
return Ok(Some(self.experimental_rpc_endpoint.clone()));
}
let (ipv4, ipv6) = rpc_interface(
self.rpc_external,
self.unsafe_rpc_external,
self.rpc_methods,
is_validator,
)?;
let cors = self.rpc_cors(is_dev)?;
let port = self.rpc_port.unwrap_or(default_listen_port);
Ok(Some(vec![
RpcEndpoint {
batch_config: self.rpc_batch_config()?,
max_connections: self.rpc_max_connections,
listen_addr: SocketAddr::new(std::net::IpAddr::V4(ipv4), port),
rpc_methods: self.rpc_methods,
rate_limit: self.rpc_rate_limit,
rate_limit_trust_proxy_headers: self.rpc_rate_limit_trust_proxy_headers,
rate_limit_whitelisted_ips: self.rpc_rate_limit_whitelisted_ips.clone(),
max_payload_in_mb: self.rpc_max_request_size,
max_payload_out_mb: self.rpc_max_response_size,
max_subscriptions_per_connection: self.rpc_max_subscriptions_per_connection,
max_buffer_capacity_per_connection: self.rpc_message_buffer_capacity_per_connection,
cors: cors.clone(),
retry_random_port: true,
is_optional: false,
},
RpcEndpoint {
batch_config: self.rpc_batch_config()?,
max_connections: self.rpc_max_connections,
listen_addr: SocketAddr::new(std::net::IpAddr::V6(ipv6), port),
rpc_methods: self.rpc_methods,
rate_limit: self.rpc_rate_limit,
rate_limit_trust_proxy_headers: self.rpc_rate_limit_trust_proxy_headers,
rate_limit_whitelisted_ips: self.rpc_rate_limit_whitelisted_ips.clone(),
max_payload_in_mb: self.rpc_max_request_size,
max_payload_out_mb: self.rpc_max_response_size,
max_subscriptions_per_connection: self.rpc_max_subscriptions_per_connection,
max_buffer_capacity_per_connection: self.rpc_message_buffer_capacity_per_connection,
cors: cors.clone(),
retry_random_port: true,
is_optional: true,
},
]))
}
/// Returns the configuration for batch RPC requests.
pub fn rpc_batch_config(&self) -> crate::Result<RpcBatchRequestConfig> {
let cfg = if self.rpc_disable_batch_requests {
RpcBatchRequestConfig::Disabled
} else if let Some(l) = self.rpc_max_batch_request_len {
RpcBatchRequestConfig::Limit(l)
} else {
RpcBatchRequestConfig::Unlimited
};
Ok(cfg)
}
}
fn rpc_interface(
is_external: bool,
is_unsafe_external: bool,
rpc_methods: RpcMethods,
is_validator: bool,
) -> crate::Result<(Ipv4Addr, Ipv6Addr)> {
if is_external && is_validator && rpc_methods != RpcMethods::Unsafe {
return Err(crate::Error::Input(
"--rpc-external option shouldn't be used if the node is running as \
a validator. Use `--unsafe-rpc-external` or `--rpc-methods=unsafe` if you understand \
the risks. See the options description for more information."
.to_owned(),
));
}
if is_external || is_unsafe_external {
if rpc_methods == RpcMethods::Unsafe {
eprintln!(
"It isn't safe to expose RPC publicly without a proxy server that filters \
available set of RPC methods."
);
}
Ok((Ipv4Addr::UNSPECIFIED, Ipv6Addr::UNSPECIFIED))
} else {
Ok((Ipv4Addr::LOCALHOST, Ipv6Addr::LOCALHOST))
}
}
/// Represent a single RPC endpoint with its configuration.
#[derive(Debug, Clone)]
pub struct RpcEndpoint {
/// Listen address.
pub listen_addr: SocketAddr,
/// Batch request configuration.
pub batch_config: RpcBatchRequestConfig,
/// Maximum number of connections.
pub max_connections: u32,
/// Maximum inbound payload size in MB.
pub max_payload_in_mb: u32,
/// Maximum outbound payload size in MB.
pub max_payload_out_mb: u32,
/// Maximum number of subscriptions per connection.
pub max_subscriptions_per_connection: u32,
/// Maximum buffer capacity per connection.
pub max_buffer_capacity_per_connection: u32,
/// Rate limit per minute.
pub rate_limit: Option<NonZeroU32>,
/// Whether to trust proxy headers for rate limiting.
pub rate_limit_trust_proxy_headers: bool,
/// Whitelisted IPs for rate limiting.
pub rate_limit_whitelisted_ips: Vec<IpNetwork>,
/// CORS.
pub cors: Option<Vec<String>>,
/// RPC methods to expose.
pub rpc_methods: RpcMethods,
/// Whether it's an optional listening address i.e, it's ignored if it fails to bind.
/// For example substrate tries to bind both ipv4 and ipv6 addresses but some platforms
/// may not support ipv6.
pub is_optional: bool,
/// Whether to retry with a random port if the provided port is already in use.
pub retry_random_port: bool,
}
impl std::str::FromStr for RpcEndpoint {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut listen_addr = None;
let mut max_connections = None;
let mut max_payload_in_mb = None;
let mut max_payload_out_mb = None;
let mut max_subscriptions_per_connection = None;
let mut max_buffer_capacity_per_connection = None;
let mut cors: Option<Vec<String>> = None;
let mut rpc_methods = None;
let mut is_optional = None;
let mut disable_batch_requests = None;
let mut max_batch_request_len = None;
let mut rate_limit = None;
let mut rate_limit_trust_proxy_headers = None;
let mut rate_limit_whitelisted_ips = Vec::new();
let mut retry_random_port = None;
for input in s.split(',') {
let (key, val) = input.trim().split_once('=').ok_or_else(|| invalid_input(input))?;
let key = key.trim();
let val = val.trim();
match key {
RPC_LISTEN_ADDR => {
if listen_addr.is_some() {
return Err(only_once_err(RPC_LISTEN_ADDR));
}
let val: SocketAddr =
val.parse().map_err(|_| invalid_value(RPC_LISTEN_ADDR, &val))?;
listen_addr = Some(val);
},
RPC_CORS => {
if val.is_empty() {
return Err(invalid_value(RPC_CORS, &val));
}
if let Some(cors) = cors.as_mut() {
cors.push(val.to_string());
} else {
cors = Some(vec![val.to_string()]);
}
},
RPC_MAX_CONNS => {
if max_connections.is_some() {
return Err(only_once_err(RPC_MAX_CONNS));
}
let val = val.parse().map_err(|_| invalid_value(RPC_MAX_CONNS, &val))?;
max_connections = Some(val);
},
RPC_MAX_REQUEST_SIZE => {
if max_payload_in_mb.is_some() {
return Err(only_once_err(RPC_MAX_REQUEST_SIZE));
}
let val =
val.parse().map_err(|_| invalid_value(RPC_MAX_RESPONSE_SIZE, &val))?;
max_payload_in_mb = Some(val);
},
RPC_MAX_RESPONSE_SIZE => {
if max_payload_out_mb.is_some() {
return Err(only_once_err(RPC_MAX_RESPONSE_SIZE));
}
let val =
val.parse().map_err(|_| invalid_value(RPC_MAX_RESPONSE_SIZE, &val))?;
max_payload_out_mb = Some(val);
},
RPC_MAX_SUBS_PER_CONN => {
if max_subscriptions_per_connection.is_some() {
return Err(only_once_err(RPC_MAX_SUBS_PER_CONN));
}
let val =
val.parse().map_err(|_| invalid_value(RPC_MAX_SUBS_PER_CONN, &val))?;
max_subscriptions_per_connection = Some(val);
},
RPC_MAX_BUF_CAP_PER_CONN => {
if max_buffer_capacity_per_connection.is_some() {
return Err(only_once_err(RPC_MAX_BUF_CAP_PER_CONN));
}
let val =
val.parse().map_err(|_| invalid_value(RPC_MAX_BUF_CAP_PER_CONN, &val))?;
max_buffer_capacity_per_connection = Some(val);
},
RPC_RATE_LIMIT => {
if rate_limit.is_some() {
return Err(only_once_err("rate-limit"));
}
let val = val.parse().map_err(|_| invalid_value(RPC_RATE_LIMIT, &val))?;
rate_limit = Some(val);
},
RPC_RATE_LIMIT_TRUST_PROXY_HEADERS => {
if rate_limit_trust_proxy_headers.is_some() {
return Err(only_once_err(RPC_RATE_LIMIT_TRUST_PROXY_HEADERS));
}
let val = val
.parse()
.map_err(|_| invalid_value(RPC_RATE_LIMIT_TRUST_PROXY_HEADERS, &val))?;
rate_limit_trust_proxy_headers = Some(val);
},
RPC_RATE_LIMIT_WHITELISTED_IPS => {
let ip: IpNetwork = val
.parse()
.map_err(|_| invalid_value(RPC_RATE_LIMIT_WHITELISTED_IPS, &val))?;
rate_limit_whitelisted_ips.push(ip);
},
RPC_RETRY_RANDOM_PORT => {
if retry_random_port.is_some() {
return Err(only_once_err(RPC_RETRY_RANDOM_PORT));
}
let val =
val.parse().map_err(|_| invalid_value(RPC_RETRY_RANDOM_PORT, &val))?;
retry_random_port = Some(val);
},
RPC_METHODS => {
if rpc_methods.is_some() {
return Err(only_once_err("methods"));
}
let val = val.parse().map_err(|_| invalid_value(RPC_METHODS, &val))?;
rpc_methods = Some(val);
},
RPC_OPTIONAL => {
if is_optional.is_some() {
return Err(only_once_err(RPC_OPTIONAL));
}
let val = val.parse().map_err(|_| invalid_value(RPC_OPTIONAL, &val))?;
is_optional = Some(val);
},
RPC_DISABLE_BATCH => {
if disable_batch_requests.is_some() {
return Err(only_once_err(RPC_DISABLE_BATCH));
}
let val = val.parse().map_err(|_| invalid_value(RPC_DISABLE_BATCH, &val))?;
disable_batch_requests = Some(val);
},
RPC_BATCH_LIMIT => {
if max_batch_request_len.is_some() {
return Err(only_once_err(RPC_BATCH_LIMIT));
}
let val = val.parse().map_err(|_| invalid_value(RPC_BATCH_LIMIT, &val))?;
max_batch_request_len = Some(val);
},
_ => return Err(invalid_key(key)),
}
}
let listen_addr = listen_addr.ok_or("`listen-addr` must be specified exactly once")?;
let batch_config = match (disable_batch_requests, max_batch_request_len) {
(Some(true), Some(_)) => {
return Err(format!("`{RPC_BATCH_LIMIT}` and `{RPC_DISABLE_BATCH}` are mutually exclusive and can't be used together"));
},
(Some(false), None) => RpcBatchRequestConfig::Disabled,
(None, Some(len)) => RpcBatchRequestConfig::Limit(len),
_ => RpcBatchRequestConfig::Unlimited,
};
Ok(Self {
listen_addr,
batch_config,
max_connections: max_connections.unwrap_or(RPC_DEFAULT_MAX_CONNECTIONS),
max_payload_in_mb: max_payload_in_mb.unwrap_or(RPC_DEFAULT_MAX_REQUEST_SIZE_MB),
max_payload_out_mb: max_payload_out_mb.unwrap_or(RPC_DEFAULT_MAX_RESPONSE_SIZE_MB),
cors,
max_buffer_capacity_per_connection: max_buffer_capacity_per_connection
.unwrap_or(RPC_DEFAULT_MESSAGE_CAPACITY_PER_CONN),
max_subscriptions_per_connection: max_subscriptions_per_connection
.unwrap_or(RPC_DEFAULT_MAX_SUBS_PER_CONN),
rpc_methods: rpc_methods.unwrap_or(RpcMethods::Auto),
rate_limit,
rate_limit_trust_proxy_headers: rate_limit_trust_proxy_headers.unwrap_or(false),
rate_limit_whitelisted_ips,
is_optional: is_optional.unwrap_or(false),
retry_random_port: retry_random_port.unwrap_or(false),
})
}
}
impl Into<sc_service::config::RpcEndpoint> for RpcEndpoint {
fn into(self) -> sc_service::config::RpcEndpoint {
sc_service::config::RpcEndpoint {
batch_config: self.batch_config,
listen_addr: self.listen_addr,
max_buffer_capacity_per_connection: self.max_buffer_capacity_per_connection,
max_connections: self.max_connections,
max_payload_in_mb: self.max_payload_in_mb,
max_payload_out_mb: self.max_payload_out_mb,
max_subscriptions_per_connection: self.max_subscriptions_per_connection,
rpc_methods: self.rpc_methods.into(),
rate_limit: self.rate_limit,
rate_limit_trust_proxy_headers: self.rate_limit_trust_proxy_headers,
rate_limit_whitelisted_ips: self.rate_limit_whitelisted_ips,
cors: self.cors,
retry_random_port: self.retry_random_port,
is_optional: self.is_optional,
}
}
}
impl RpcEndpoint {
/// Returns whether the endpoint is globally exposed.
pub fn is_global(&self) -> bool {
let ip = IpNetwork::from(self.listen_addr.ip());
ip.is_global()
}
}
fn only_once_err(reason: &str) -> String {
format!("`{reason}` is only allowed be specified once")
}
fn invalid_input(input: &str) -> String {
format!("`{input}`, expects: `key=value`")
}
fn invalid_value(key: &str, value: &str) -> String {
format!("value=`{value}` key=`{key}`")
}
fn invalid_key(key: &str) -> String {
format!("unknown key=`{key}`, see `--help` for available options")
}
#[cfg(test)]
mod tests {
use super::*;
use std::{num::NonZeroU32, str::FromStr};
#[test]
fn parse_rpc_endpoint_works() {
assert!(RpcEndpoint::from_str("listen-addr=127.0.0.1:9944").is_ok());
assert!(RpcEndpoint::from_str("listen-addr=[::1]:9944").is_ok());
assert!(RpcEndpoint::from_str("listen-addr=127.0.0.1:9944,methods=auto").is_ok());
assert!(RpcEndpoint::from_str("listen-addr=[::1]:9944,methods=auto").is_ok());
assert!(RpcEndpoint::from_str(
"listen-addr=127.0.0.1:9944,methods=auto,cors=*,optional=true"
)
.is_ok());
assert!(RpcEndpoint::from_str("listen-addrs=127.0.0.1:9944,foo=*").is_err());
assert!(RpcEndpoint::from_str("listen-addrs=127.0.0.1:9944,cors=").is_err());
}
#[test]
fn parse_rpc_endpoint_all() {
let endpoint = RpcEndpoint::from_str(
"listen-addr=127.0.0.1:9944,methods=unsafe,cors=*,optional=true,retry-random-port=true,rate-limit=99,\
max-batch-request-len=100,rate-limit-trust-proxy-headers=true,max-connections=33,max-request-size=4,\
max-response-size=3,max-subscriptions-per-connection=7,max-buffer-capacity-per-connection=8,\
rate-limit-whitelisted-ips=192.168.1.0/24,rate-limit-whitelisted-ips=ff01::0/32"
).unwrap();
assert_eq!(endpoint.listen_addr, ([127, 0, 0, 1], 9944).into());
assert_eq!(endpoint.rpc_methods, RpcMethods::Unsafe);
assert_eq!(endpoint.cors, Some(vec!["*".to_string()]));
assert_eq!(endpoint.is_optional, true);
assert_eq!(endpoint.retry_random_port, true);
assert_eq!(endpoint.rate_limit, Some(NonZeroU32::new(99).unwrap()));
assert!(matches!(endpoint.batch_config, RpcBatchRequestConfig::Limit(l) if l == 100));
assert_eq!(endpoint.rate_limit_trust_proxy_headers, true);
assert_eq!(
endpoint.rate_limit_whitelisted_ips,
vec![
IpNetwork::V4("192.168.1.0/24".parse().unwrap()),
IpNetwork::V6("ff01::0/32".parse().unwrap())
]
);
assert_eq!(endpoint.max_connections, 33);
assert_eq!(endpoint.max_payload_in_mb, 4);
assert_eq!(endpoint.max_payload_out_mb, 3);
assert_eq!(endpoint.max_subscriptions_per_connection, 7);
assert_eq!(endpoint.max_buffer_capacity_per_connection, 8);
}
#[test]
fn parse_rpc_endpoint_multiple_cors() {
let addr = RpcEndpoint::from_str(
"listen-addr=127.0.0.1:9944,methods=auto,cors=https://polkadot.js.org,cors=*,cors=localhost:*",
)
.unwrap();
assert_eq!(
addr.cors,
Some(vec![
"https://polkadot.js.org".to_string(),
"*".to_string(),
"localhost:*".to_string()
])
);
}
#[test]
fn parse_rpc_endpoint_whitespaces() {
let addr = RpcEndpoint::from_str(
" listen-addr = 127.0.0.1:9944, methods = auto, optional = true ",
)
.unwrap();
assert_eq!(addr.rpc_methods, RpcMethods::Auto);
assert_eq!(addr.is_optional, true);
}
#[test]
fn parse_rpc_endpoint_batch_options_mutually_exclusive() {
assert!(RpcEndpoint::from_str(
"listen-addr = 127.0.0.1:9944,disable-batch-requests=true,max-batch-request-len=100",
)
.is_err());
}
}
@@ -0,0 +1,45 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use clap::Args;
use std::str::FromStr;
/// Parameters used to config runtime.
#[derive(Debug, Clone, Args)]
pub struct RuntimeParams {
/// The size of the instances cache for each runtime [max: 32].
///
/// Values higher than 32 are illegal.
#[arg(long, default_value_t = 8, value_parser = parse_max_runtime_instances)]
pub max_runtime_instances: usize,
/// Maximum number of different runtimes that can be cached.
#[arg(long, default_value_t = 2)]
pub runtime_cache_size: u8,
}
fn parse_max_runtime_instances(s: &str) -> Result<usize, String> {
let max_runtime_instances = usize::from_str(s)
.map_err(|_err| format!("Illegal `--max-runtime-instances` value: {s}"))?;
if max_runtime_instances > 32 {
Err(format!("Illegal `--max-runtime-instances` value: {max_runtime_instances} is more than the allowed maximum of `32` "))
} else {
Ok(max_runtime_instances)
}
}
@@ -0,0 +1,148 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use crate::arg_enums::TracingReceiver;
use clap::Args;
use sc_service::config::BasePath;
use std::path::PathBuf;
/// Shared parameters used by all `CoreParams`.
#[derive(Debug, Clone, Args)]
pub struct SharedParams {
/// Specify the chain specification.
///
/// It can be one of the predefined ones (dev, local, or staging) or it can be a path to
/// a file with the chainspec (such as one exported by the `build-spec` subcommand).
#[arg(long, value_name = "CHAIN_SPEC")]
pub chain: Option<String>,
/// Specify the development chain.
///
/// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, `--alice`, and `--tmp`
/// flags, unless explicitly overridden. It also disables local peer discovery (see `--no-mdns`
/// and `--discover-local`). With this flag some nodes might start with manual seal, producing
/// blocks at certain events (e.g. `pezkuwi-omni-node`, which produces blocks at certain
/// intervals dictated by `--dev-block-time`).
#[arg(long)]
pub dev: bool,
/// Specify custom base path.
#[arg(long, short = 'd', value_name = "PATH")]
pub base_path: Option<PathBuf>,
/// Sets a custom logging filter (syntax: `<target>=<level>`).
///
/// Log levels (least to most verbose) are `error`, `warn`, `info`, `debug`, and `trace`.
///
/// By default, all targets log `info`. The global log level can be set with `-l<level>`.
///
/// Multiple `<target>=<level>` entries can be specified and separated by a comma.
///
/// *Example*: `--log error,sync=debug,grandpa=warn`.
/// Sets Global log level to `error`, sets `sync` target to debug and grandpa target to `warn`.
#[arg(short = 'l', long, value_name = "LOG_PATTERN", num_args = 1..)]
pub log: Vec<String>,
/// Enable detailed log output.
///
/// Includes displaying the log target, log level and thread name.
///
/// This is automatically enabled when something is logged with any higher level than `info`.
#[arg(long)]
pub detailed_log_output: bool,
/// Disable log color output.
#[arg(long)]
pub disable_log_color: bool,
/// Enable feature to dynamically update and reload the log filter.
///
/// Be aware that enabling this feature can lead to a performance decrease up to factor six or
/// more. Depending on the global logging level the performance decrease changes.
///
/// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this
/// option not being set.
#[arg(long)]
pub enable_log_reloading: bool,
/// Sets a custom profiling filter.
///
/// Syntax is the same as for logging (`--log`).
#[arg(long, value_name = "TARGETS")]
pub tracing_targets: Option<String>,
/// Receiver to process tracing messages.
#[arg(long, value_name = "RECEIVER", value_enum, ignore_case = true, default_value_t = TracingReceiver::Log)]
pub tracing_receiver: TracingReceiver,
}
impl SharedParams {
/// Specify custom base path.
pub fn base_path(&self) -> Result<Option<BasePath>, crate::Error> {
match &self.base_path {
Some(r) => Ok(Some(r.clone().into())),
// If `dev` is enabled, we use the temp base path.
None if self.is_dev() => Ok(Some(BasePath::new_temp_dir()?)),
None => Ok(None),
}
}
/// Specify the development chain.
pub fn is_dev(&self) -> bool {
self.dev
}
/// Get the chain spec for the parameters provided
pub fn chain_id(&self, is_dev: bool) -> String {
match self.chain {
Some(ref chain) => chain.clone(),
None if is_dev => "dev".into(),
_ => "".into(),
}
}
/// Get the filters for the logging
pub fn log_filters(&self) -> &[String] {
&self.log
}
/// Should the detailed log output be enabled.
pub fn detailed_log_output(&self) -> bool {
self.detailed_log_output
}
/// Should the log color output be disabled?
pub fn disable_log_color(&self) -> bool {
self.disable_log_color
}
/// Is log reloading enabled
pub fn enable_log_reloading(&self) -> bool {
self.enable_log_reloading
}
/// Receiver to process tracing messages.
pub fn tracing_receiver(&self) -> sc_service::TracingReceiver {
self.tracing_receiver.into()
}
/// Comma separated list of targets for tracing.
pub fn tracing_targets(&self) -> Option<String> {
self.tracing_targets.clone()
}
}
@@ -0,0 +1,69 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use clap::Args;
/// Parameters used to config telemetry.
#[derive(Debug, Clone, Args)]
pub struct TelemetryParams {
/// Disable connecting to the Substrate telemetry server.
///
/// Telemetry is on by default on global chains.
#[arg(long)]
pub no_telemetry: bool,
/// The URL of the telemetry server to connect to.
///
/// This flag can be passed multiple times as a means to specify multiple
/// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting
/// the least verbosity.
///
/// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`.
#[arg(long = "telemetry-url", value_name = "URL VERBOSITY", value_parser = parse_telemetry_endpoints)]
pub telemetry_endpoints: Vec<(String, u8)>,
}
#[derive(Debug)]
enum TelemetryParsingError {
MissingVerbosity,
VerbosityParsingError(std::num::ParseIntError),
}
impl std::error::Error for TelemetryParsingError {}
impl std::fmt::Display for TelemetryParsingError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TelemetryParsingError::MissingVerbosity => write!(f, "Verbosity level missing"),
TelemetryParsingError::VerbosityParsingError(e) => write!(f, "{}", e),
}
}
}
fn parse_telemetry_endpoints(s: &str) -> Result<(String, u8), TelemetryParsingError> {
let pos = s.find(' ');
match pos {
None => Err(TelemetryParsingError::MissingVerbosity),
Some(pos_) => {
let url = s[..pos_].to_string();
let verbosity =
s[pos_ + 1..].parse().map_err(TelemetryParsingError::VerbosityParsingError)?;
Ok((url, verbosity))
},
}
}
@@ -0,0 +1,75 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use clap::{Args, ValueEnum};
use sc_transaction_pool::TransactionPoolOptions;
/// Type of transaction pool to be used
#[derive(Debug, Clone, Copy, ValueEnum)]
#[value(rename_all = "kebab-case")]
pub enum TransactionPoolType {
/// Uses a legacy, single-state transaction pool.
SingleState,
/// Uses a fork-aware transaction pool.
ForkAware,
}
impl Into<sc_transaction_pool::TransactionPoolType> for TransactionPoolType {
fn into(self) -> sc_transaction_pool::TransactionPoolType {
match self {
TransactionPoolType::SingleState =>
sc_transaction_pool::TransactionPoolType::SingleState,
TransactionPoolType::ForkAware => sc_transaction_pool::TransactionPoolType::ForkAware,
}
}
}
/// Parameters used to create the pool configuration.
#[derive(Debug, Clone, Args)]
pub struct TransactionPoolParams {
/// Maximum number of transactions in the transaction pool.
#[arg(long, value_name = "COUNT", default_value_t = 8192)]
pub pool_limit: usize,
/// Maximum number of kilobytes of all transactions stored in the pool.
#[arg(long, value_name = "COUNT", default_value_t = 20480)]
pub pool_kbytes: usize,
/// How long a transaction is banned for.
///
/// If it is considered invalid. Defaults to 1800s.
#[arg(long, value_name = "SECONDS")]
pub tx_ban_seconds: Option<u64>,
/// The type of transaction pool to be instantiated.
#[arg(long, value_enum, default_value_t = TransactionPoolType::ForkAware)]
pub pool_type: TransactionPoolType,
}
impl TransactionPoolParams {
/// Fill the given `PoolConfiguration` by looking at the cli parameters.
pub fn transaction_pool(&self, is_dev: bool) -> TransactionPoolOptions {
TransactionPoolOptions::new_with_params(
self.pool_limit,
self.pool_kbytes * 1024,
self.tx_ban_seconds,
self.pool_type.into(),
is_dev,
)
}
}