feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit e4778b4576
6838 changed files with 1847450 additions and 0 deletions
+64
View File
@@ -0,0 +1,64 @@
[package]
name = "node-bench"
version = "0.9.0-dev"
authors.workspace = true
description = "Substrate node integration benchmarks."
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
homepage.workspace = true
repository.workspace = true
publish = false
[lints]
workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
array-bytes = { workspace = true, default-features = true }
async-trait = { workspace = true }
clap = { features = ["derive"], workspace = true }
derive_more = { features = ["display"], workspace = true }
fs_extra = { workspace = true }
futures = { features = ["thread-pool"], workspace = true }
hash-db = { workspace = true, default-features = true }
kitchensink-runtime = { workspace = true }
kvdb = { workspace = true }
kvdb-rocksdb = { workspace = true }
log = { workspace = true, default-features = true }
node-primitives = { workspace = true, default-features = true }
node-testing = { workspace = true }
parity-db = { workspace = true }
rand = { features = ["small_rng"], workspace = true, default-features = true }
sc-basic-authorship = { workspace = true, default-features = true }
sc-client-api = { workspace = true, default-features = true }
sc-transaction-pool = { workspace = true, default-features = true }
sc-transaction-pool-api = { workspace = true, default-features = true }
serde = { workspace = true, default-features = true }
serde_json = { workspace = true, default-features = true }
sp-consensus = { workspace = true, default-features = true }
sp-core = { workspace = true, default-features = true }
sp-inherents = { workspace = true, default-features = true }
sp-runtime = { workspace = true, default-features = true }
sp-state-machine = { workspace = true, default-features = true }
sp-timestamp = { workspace = true }
sp-tracing = { workspace = true, default-features = true }
sp-trie = { workspace = true, default-features = true }
tempfile = { workspace = true }
[features]
runtime-benchmarks = [
"kitchensink-runtime/runtime-benchmarks",
"node-primitives/runtime-benchmarks",
"node-testing/runtime-benchmarks",
"sc-basic-authorship/runtime-benchmarks",
"sc-client-api/runtime-benchmarks",
"sc-transaction-pool-api/runtime-benchmarks",
"sc-transaction-pool/runtime-benchmarks",
"sp-consensus/runtime-benchmarks",
"sp-inherents/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"sp-state-machine/runtime-benchmarks",
"sp-timestamp/runtime-benchmarks",
"sp-trie/runtime-benchmarks",
]
+47
View File
@@ -0,0 +1,47 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#[derive(Clone, Copy, Debug, derive_more::Display)]
pub enum SizeType {
#[display(fmt = "empty")]
Empty,
#[display(fmt = "small")]
Small,
#[display(fmt = "medium")]
Medium,
#[display(fmt = "large")]
Large,
#[display(fmt = "full")]
Full,
#[display(fmt = "custom")]
Custom(usize),
}
impl SizeType {
pub fn transactions(&self) -> Option<usize> {
match self {
SizeType::Empty => Some(0),
SizeType::Small => Some(10),
SizeType::Medium => Some(100),
SizeType::Large => Some(500),
SizeType::Full => None,
// Custom SizeType will use the `--transactions` input parameter
SizeType::Custom(val) => Some(*val),
}
}
}
+314
View File
@@ -0,0 +1,314 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Block construction benchmark.
//!
//! This benchmark is expected to measure block construction.
//! We want to protect against cold-cache attacks, and so this
//! benchmark should not rely on any caching (except those entries that
//! DO NOT depend on user input). Thus transaction generation should be
//! based on randomized data.
use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc};
use async_trait::async_trait;
use node_primitives::Block;
use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes};
use sc_transaction_pool_api::{
ImportNotificationStream, PoolStatus, ReadyTransactions, TransactionFor, TransactionSource,
TransactionStatusStreamFor, TxHash, TxInvalidityReportMap,
};
use sp_consensus::{Environment, Proposer};
use sp_inherents::InherentDataProvider;
use sp_runtime::OpaqueExtrinsic;
use crate::{
common::SizeType,
core::{self, Mode, Path},
};
pub struct ConstructionBenchmarkDescription {
pub key_types: KeyTypes,
pub block_type: BlockType,
pub size: SizeType,
pub database_type: DatabaseType,
}
pub struct ConstructionBenchmark {
database: BenchDb,
transactions: Transactions,
}
impl core::BenchmarkDescription for ConstructionBenchmarkDescription {
fn path(&self) -> Path {
let mut path = Path::new(&["node", "proposer"]);
match self.key_types {
KeyTypes::Sr25519 => path.push("sr25519"),
KeyTypes::Ed25519 => path.push("ed25519"),
}
match self.block_type {
BlockType::RandomTransfersKeepAlive => path.push("transfer"),
BlockType::RandomTransfersReaping => path.push("transfer_reaping"),
BlockType::Noop => path.push("noop"),
}
match self.database_type {
DatabaseType::RocksDb => path.push("rocksdb"),
DatabaseType::ParityDb => path.push("paritydb"),
}
path.push(&format!("{}", self.size));
path
}
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
let mut extrinsics: Vec<Arc<PoolTransaction>> = Vec::new();
let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types);
let client = bench_db.client();
let content_type = self.block_type.to_content(self.size.transactions());
for transaction in bench_db.block_content(content_type, &client) {
extrinsics.push(Arc::new(transaction.into()));
}
Box::new(ConstructionBenchmark {
database: bench_db,
transactions: Transactions(extrinsics),
})
}
fn name(&self) -> Cow<'static, str> {
format!(
"Block construction ({:?}/{}, {:?} backend)",
self.block_type, self.size, self.database_type,
)
.into()
}
}
impl core::Benchmark for ConstructionBenchmark {
fn run(&mut self, mode: Mode) -> std::time::Duration {
let context = self.database.create_context();
let _ = context
.client
.runtime_version_at(context.client.chain_info().genesis_hash)
.expect("Failed to get runtime version")
.spec_version;
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(3));
}
let mut proposer_factory = sc_basic_authorship::ProposerFactory::new(
context.spawn_handle.clone(),
context.client.clone(),
self.transactions.clone().into(),
None,
None,
);
let timestamp_provider = sp_timestamp::InherentDataProvider::from_system_time();
let start = std::time::Instant::now();
let proposer = futures::executor::block_on(
proposer_factory.init(
&context
.client
.header(context.client.chain_info().genesis_hash)
.expect("Database error querying block #0")
.expect("Block #0 should exist"),
),
)
.expect("Proposer initialization failed");
let inherent_data = futures::executor::block_on(timestamp_provider.create_inherent_data())
.expect("Create inherent data failed");
let _block = futures::executor::block_on(Proposer::propose(
proposer,
inherent_data,
Default::default(),
std::time::Duration::from_secs(20),
None,
))
.map(|r| r.block)
.expect("Proposing failed");
let elapsed = start.elapsed();
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(1));
}
elapsed
}
}
#[derive(Clone, Debug)]
pub struct PoolTransaction {
data: Arc<OpaqueExtrinsic>,
hash: node_primitives::Hash,
}
impl From<OpaqueExtrinsic> for PoolTransaction {
fn from(e: OpaqueExtrinsic) -> Self {
PoolTransaction { data: Arc::from(e), hash: node_primitives::Hash::zero() }
}
}
impl sc_transaction_pool_api::InPoolTransaction for PoolTransaction {
type Transaction = Arc<OpaqueExtrinsic>;
type Hash = node_primitives::Hash;
fn data(&self) -> &Self::Transaction {
&self.data
}
fn hash(&self) -> &Self::Hash {
&self.hash
}
fn priority(&self) -> &u64 {
unimplemented!()
}
fn longevity(&self) -> &u64 {
unimplemented!()
}
fn requires(&self) -> &[Vec<u8>] {
unimplemented!()
}
fn provides(&self) -> &[Vec<u8>] {
unimplemented!()
}
fn is_propagable(&self) -> bool {
unimplemented!()
}
}
#[derive(Clone, Debug)]
pub struct Transactions(Vec<Arc<PoolTransaction>>);
pub struct TransactionsIterator(std::vec::IntoIter<Arc<PoolTransaction>>);
impl Iterator for TransactionsIterator {
type Item = Arc<PoolTransaction>;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
impl ReadyTransactions for TransactionsIterator {
fn report_invalid(&mut self, _tx: &Self::Item) {}
}
#[async_trait]
impl sc_transaction_pool_api::TransactionPool for Transactions {
type Block = Block;
type Hash = node_primitives::Hash;
type InPoolTransaction = PoolTransaction;
type Error = sc_transaction_pool_api::error::Error;
/// Asynchronously imports a bunch of unverified transactions to the pool.
async fn submit_at(
&self,
_at: Self::Hash,
_source: TransactionSource,
_xts: Vec<TransactionFor<Self>>,
) -> Result<Vec<Result<node_primitives::Hash, Self::Error>>, Self::Error> {
unimplemented!()
}
/// Asynchronously imports one unverified transaction to the pool.
async fn submit_one(
&self,
_at: Self::Hash,
_source: TransactionSource,
_xt: TransactionFor<Self>,
) -> Result<TxHash<Self>, Self::Error> {
unimplemented!()
}
async fn submit_and_watch(
&self,
_at: Self::Hash,
_source: TransactionSource,
_xt: TransactionFor<Self>,
) -> Result<Pin<Box<TransactionStatusStreamFor<Self>>>, Self::Error> {
unimplemented!()
}
async fn ready_at(
&self,
_at: Self::Hash,
) -> Box<dyn ReadyTransactions<Item = Arc<Self::InPoolTransaction>> + Send> {
Box::new(TransactionsIterator(self.0.clone().into_iter()))
}
fn ready(&self) -> Box<dyn ReadyTransactions<Item = Arc<Self::InPoolTransaction>> + Send> {
unimplemented!()
}
async fn report_invalid(
&self,
_at: Option<Self::Hash>,
_invalid_tx_errors: TxInvalidityReportMap<TxHash<Self>>,
) -> Vec<Arc<Self::InPoolTransaction>> {
Default::default()
}
fn futures(&self) -> Vec<Self::InPoolTransaction> {
unimplemented!()
}
fn status(&self) -> PoolStatus {
unimplemented!()
}
fn import_notification_stream(&self) -> ImportNotificationStream<TxHash<Self>> {
unimplemented!()
}
fn on_broadcasted(&self, _propagations: HashMap<TxHash<Self>, Vec<String>>) {
unimplemented!()
}
fn hash_of(&self, _xt: &TransactionFor<Self>) -> TxHash<Self> {
unimplemented!()
}
fn ready_transaction(&self, _hash: &TxHash<Self>) -> Option<Arc<Self::InPoolTransaction>> {
unimplemented!()
}
async fn ready_at_with_timeout(
&self,
_at: Self::Hash,
_timeout: std::time::Duration,
) -> Box<dyn ReadyTransactions<Item = Arc<Self::InPoolTransaction>> + Send> {
unimplemented!()
}
}
+151
View File
@@ -0,0 +1,151 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use serde::Serialize;
use std::{
borrow::{Cow, ToOwned},
fmt,
};
pub struct Path(Vec<String>);
impl Path {
pub fn new(initial: &'static [&'static str]) -> Self {
Path(initial.iter().map(|x| x.to_string()).collect())
}
}
impl Path {
pub fn push(&mut self, item: &str) {
self.0.push(item.to_string());
}
pub fn full(&self) -> String {
self.0.iter().fold(String::new(), |mut val, next| {
val.push_str("::");
val.push_str(next);
val
})
}
pub fn has(&self, path: &str) -> bool {
self.full().contains(path)
}
}
pub trait BenchmarkDescription {
fn path(&self) -> Path;
fn setup(self: Box<Self>) -> Box<dyn Benchmark>;
fn name(&self) -> Cow<'static, str>;
}
pub trait Benchmark {
fn run(&mut self, mode: Mode) -> std::time::Duration;
}
#[derive(Debug, Clone, Serialize)]
pub struct BenchmarkOutput {
name: String,
raw_average: u64,
average: u64,
}
pub struct NsFormatter(pub u64);
impl fmt::Display for NsFormatter {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let v = self.0;
match v {
v if v < 100 => write!(f, "{} ns", v),
v if v < 100_000 => write!(f, "{:.1} µs", v as f64 / 1000.0),
v if v < 1_000_000 => write!(f, "{:.4} ms", v as f64 / 1_000_000.0),
v if v < 100_000_000 => write!(f, "{:.1} ms", v as f64 / 1_000_000.0),
_ => write!(f, "{:.4} s", v as f64 / 1_000_000_000.0),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Mode {
Regular,
Profile,
}
impl std::str::FromStr for Mode {
type Err = &'static str;
fn from_str(day: &str) -> Result<Self, Self::Err> {
match day {
"regular" => Ok(Mode::Regular),
"profile" => Ok(Mode::Profile),
_ => Err("Could not parse mode"),
}
}
}
impl fmt::Display for BenchmarkOutput {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: avg {}, w_avg {}",
self.name,
NsFormatter(self.raw_average),
NsFormatter(self.average),
)
}
}
pub fn run_benchmark(benchmark: Box<dyn BenchmarkDescription>, mode: Mode) -> BenchmarkOutput {
let name = benchmark.name().to_owned();
let mut benchmark = benchmark.setup();
let mut durations: Vec<u128> = vec![];
for _ in 0..50 {
let duration = benchmark.run(mode);
durations.push(duration.as_nanos());
}
durations.sort();
let raw_average = (durations.iter().sum::<u128>() / (durations.len() as u128)) as u64;
let average = (durations.iter().skip(10).take(30).sum::<u128>() / 30) as u64;
BenchmarkOutput { name: name.into(), raw_average, average }
}
macro_rules! matrix(
( $var:tt in $over:expr => $tt:expr, $( $rest:tt )* ) => {
{
let mut res = Vec::<Box<dyn crate::core::BenchmarkDescription>>::new();
for $var in $over {
res.push(Box::new($tt));
}
res.extend(matrix!( $($rest)* ));
res
}
};
( $var:expr, $( $rest:tt )*) => {
{
let mut res = vec![Box::new($var) as Box<dyn crate::core::BenchmarkDescription>];
res.extend(matrix!( $($rest)* ));
res
}
};
() => { vec![] }
);
+69
View File
@@ -0,0 +1,69 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::{collections::HashMap, sync::Arc};
use kvdb::KeyValueDB;
use node_primitives::Hash;
use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut};
use crate::simple_trie::SimpleTrie;
/// Generate trie from given `key_values`.
///
/// Will fill your database `db` with trie data from `key_values` and
/// return root.
pub fn generate_trie(
db: Arc<dyn KeyValueDB>,
key_values: impl IntoIterator<Item = (Vec<u8>, Vec<u8>)>,
) -> Hash {
let mut root = Hash::default();
let (db, overlay) = {
let mut overlay = HashMap::new();
overlay.insert(
array_bytes::hex2bytes(
"03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314",
)
.expect("null key is valid"),
Some(vec![0]),
);
let mut trie = SimpleTrie { db, overlay: &mut overlay };
{
let mut trie_db =
TrieDBMutBuilderV1::<crate::simple_trie::Hasher>::new(&mut trie, &mut root).build();
for (key, value) in key_values {
trie_db.insert(&key, &value).expect("trie insertion failed");
}
trie_db.commit();
}
(trie.db, overlay)
};
let mut transaction = db.transaction();
for (key, value) in overlay.into_iter() {
match value {
Some(value) => transaction.put(0, &key[..], &value[..]),
None => transaction.delete(0, &key[..]),
}
}
db.write(transaction).expect("Failed to write transaction");
root
}
+134
View File
@@ -0,0 +1,134 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Block import benchmark.
//!
//! This benchmark is expected to measure block import operation of
//! some more or less full block.
//!
//! As we also want to protect against cold-cache attacks, this
//! benchmark should not rely on any caching (except those that
//! DO NOT depend on user input). Thus block generation should be
//! based on randomized operation.
//!
//! This is supposed to be very simple benchmark and is not subject
//! to much configuring - just block full of randomized transactions.
//! It is not supposed to measure runtime modules weight correctness
use std::borrow::Cow;
use node_primitives::Block;
use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes};
use sc_client_api::backend::Backend;
use crate::{
common::SizeType,
core::{self, Mode, Path},
};
pub struct ImportBenchmarkDescription {
pub key_types: KeyTypes,
pub block_type: BlockType,
pub size: SizeType,
pub database_type: DatabaseType,
}
pub struct ImportBenchmark {
database: BenchDb,
block: Block,
}
impl core::BenchmarkDescription for ImportBenchmarkDescription {
fn path(&self) -> Path {
let mut path = Path::new(&["node", "import"]);
match self.key_types {
KeyTypes::Sr25519 => path.push("sr25519"),
KeyTypes::Ed25519 => path.push("ed25519"),
}
match self.block_type {
BlockType::RandomTransfersKeepAlive => path.push("transfer_keep_alive"),
BlockType::RandomTransfersReaping => path.push("transfer_reaping"),
BlockType::Noop => path.push("noop"),
}
match self.database_type {
DatabaseType::RocksDb => path.push("rocksdb"),
DatabaseType::ParityDb => path.push("paritydb"),
}
path.push(&format!("{}", self.size));
path
}
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types);
let block = bench_db.generate_block(self.block_type.to_content(self.size.transactions()));
Box::new(ImportBenchmark { database: bench_db, block })
}
fn name(&self) -> Cow<'static, str> {
format!(
"Block import ({:?}/{}, {:?} backend)",
self.block_type, self.size, self.database_type,
)
.into()
}
}
impl core::Benchmark for ImportBenchmark {
fn run(&mut self, mode: Mode) -> std::time::Duration {
let mut context = self.database.create_context();
let _ = context
.client
.runtime_version_at(context.client.chain_info().genesis_hash)
.expect("Failed to get runtime version")
.spec_version;
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(3));
}
let start = std::time::Instant::now();
context.import_block(self.block.clone());
let elapsed = start.elapsed();
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(1));
}
log::info!(
target: "bench-logistics",
"imported block with {} tx, took: {:#?}",
self.block.extrinsics.len(),
elapsed,
);
log::info!(
target: "bench-logistics",
"usage info: {}",
context.backend.usage_info()
.expect("RocksDB backend always provides usage info!"),
);
elapsed
}
}
+186
View File
@@ -0,0 +1,186 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
mod common;
mod construct;
#[macro_use]
mod core;
mod generator;
mod import;
mod simple_trie;
mod state_sizes;
mod tempdb;
mod trie;
mod txpool;
use clap::Parser;
use node_testing::bench::{BlockType, DatabaseType as BenchDataBaseType, KeyTypes};
use crate::{
common::SizeType,
construct::ConstructionBenchmarkDescription,
core::{run_benchmark, Mode as BenchmarkMode},
import::ImportBenchmarkDescription,
tempdb::DatabaseType,
trie::{DatabaseSize, TrieReadBenchmarkDescription, TrieWriteBenchmarkDescription},
txpool::PoolBenchmarkDescription,
};
#[derive(Debug, Parser)]
#[command(name = "node-bench", about = "Node integration benchmarks")]
struct Opt {
/// Show list of all available benchmarks.
///
/// Will output ("name", "path"). Benchmarks can then be filtered by path.
#[arg(short, long)]
list: bool,
/// Machine readable json output.
///
/// This also suppresses all regular output (except to stderr)
#[arg(short, long)]
json: bool,
/// Filter benchmarks.
///
/// Run with `--list` for the hint of what to filter.
filter: Option<String>,
/// Number of transactions for block import with `custom` size.
#[arg(long)]
transactions: Option<usize>,
/// Mode
///
/// "regular" for regular benchmark
///
/// "profile" mode adds pauses between measurable runs,
/// so that actual interval can be selected in the profiler of choice.
#[arg(short, long, default_value = "regular")]
mode: BenchmarkMode,
}
fn main() {
let opt = Opt::parse();
if !opt.json {
sp_tracing::try_init_simple();
}
let mut import_benchmarks = Vec::new();
for size in [
SizeType::Empty,
SizeType::Small,
SizeType::Medium,
SizeType::Large,
SizeType::Full,
SizeType::Custom(opt.transactions.unwrap_or(0)),
] {
for block_type in [
BlockType::RandomTransfersKeepAlive,
BlockType::RandomTransfersReaping,
BlockType::Noop,
] {
for database_type in [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb] {
import_benchmarks.push((size, block_type, database_type));
}
}
}
let benchmarks = matrix!(
(size, block_type, database_type) in import_benchmarks.into_iter() =>
ImportBenchmarkDescription {
key_types: KeyTypes::Sr25519,
size,
block_type,
database_type,
},
(size, db_type) in
[
DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small,
DatabaseSize::Medium, DatabaseSize::Large, DatabaseSize::Huge,
]
.iter().flat_map(|size|
[
DatabaseType::RocksDb, DatabaseType::ParityDb
]
.iter().map(move |db_type| (size, db_type)))
=> TrieReadBenchmarkDescription { database_size: *size, database_type: *db_type },
(size, db_type) in
[
DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small,
DatabaseSize::Medium, DatabaseSize::Large, DatabaseSize::Huge,
]
.iter().flat_map(|size|
[
DatabaseType::RocksDb, DatabaseType::ParityDb
]
.iter().map(move |db_type| (size, db_type)))
=> TrieWriteBenchmarkDescription { database_size: *size, database_type: *db_type },
ConstructionBenchmarkDescription {
key_types: KeyTypes::Sr25519,
block_type: BlockType::RandomTransfersKeepAlive,
size: SizeType::Medium,
database_type: BenchDataBaseType::RocksDb,
},
ConstructionBenchmarkDescription {
key_types: KeyTypes::Sr25519,
block_type: BlockType::RandomTransfersKeepAlive,
size: SizeType::Large,
database_type: BenchDataBaseType::RocksDb,
},
PoolBenchmarkDescription { database_type: BenchDataBaseType::RocksDb },
);
if opt.list {
println!("Available benchmarks:");
if let Some(filter) = opt.filter.as_ref() {
println!("\t(filtered by \"{}\")", filter);
}
for benchmark in benchmarks.iter() {
if opt.filter.as_ref().map(|f| benchmark.path().has(f)).unwrap_or(true) {
println!("{}: {}", benchmark.name(), benchmark.path().full())
}
}
return;
}
let mut results = Vec::new();
for benchmark in benchmarks {
if opt.filter.as_ref().map(|f| benchmark.path().has(f)).unwrap_or(true) {
log::info!("Starting {}", benchmark.name());
let result = run_benchmark(benchmark, opt.mode);
log::info!("{}", result);
results.push(result);
}
}
if results.is_empty() {
eprintln!("No benchmark was found for query");
std::process::exit(1);
}
if opt.json {
let json_result: String =
serde_json::to_string(&results).expect("Failed to construct json");
println!("{}", json_result);
}
}
@@ -0,0 +1,72 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::{collections::HashMap, sync::Arc};
use hash_db::{AsHashDB, HashDB, Hasher as _, Prefix};
use kvdb::KeyValueDB;
use node_primitives::Hash;
use sp_trie::DBValue;
pub type Hasher = sp_core::Blake2Hasher;
/// Immutable generated trie database with root.
pub struct SimpleTrie<'a> {
pub db: Arc<dyn KeyValueDB>,
pub overlay: &'a mut HashMap<Vec<u8>, Option<Vec<u8>>>,
}
impl<'a> AsHashDB<Hasher, DBValue> for SimpleTrie<'a> {
fn as_hash_db(&self) -> &dyn hash_db::HashDB<Hasher, DBValue> {
self
}
fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB<Hasher, DBValue> + 'b) {
&mut *self
}
}
impl<'a> HashDB<Hasher, DBValue> for SimpleTrie<'a> {
fn get(&self, key: &Hash, prefix: Prefix) -> Option<DBValue> {
let key = sp_trie::prefixed_key::<Hasher>(key, prefix);
if let Some(value) = self.overlay.get(&key) {
return value.clone();
}
self.db.get(0, &key).expect("Database backend error")
}
fn contains(&self, hash: &Hash, prefix: Prefix) -> bool {
self.get(hash, prefix).is_some()
}
fn insert(&mut self, prefix: Prefix, value: &[u8]) -> Hash {
let key = Hasher::hash(value);
self.emplace(key, prefix, value.to_vec());
key
}
fn emplace(&mut self, key: Hash, prefix: Prefix, value: DBValue) {
let key = sp_trie::prefixed_key::<Hasher>(&key, prefix);
self.overlay.insert(key, Some(value));
}
fn remove(&mut self, key: &Hash, prefix: Prefix) {
let key = sp_trie::prefixed_key::<Hasher>(key, prefix);
self.overlay.insert(key, None);
}
}
File diff suppressed because it is too large Load Diff
+123
View File
@@ -0,0 +1,123 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use kvdb::{DBKeyValue, DBTransaction, KeyValueDB};
use kvdb_rocksdb::{Database, DatabaseConfig};
use std::{io, path::PathBuf, sync::Arc};
#[derive(Clone, Copy, Debug)]
pub enum DatabaseType {
RocksDb,
ParityDb,
}
pub struct TempDatabase(tempfile::TempDir);
struct ParityDbWrapper(parity_db::Db);
impl KeyValueDB for ParityDbWrapper {
/// Get a value by key.
fn get(&self, col: u32, key: &[u8]) -> io::Result<Option<Vec<u8>>> {
Ok(self.0.get(col as u8, &key[key.len() - 32..]).expect("db error"))
}
/// Get a value by partial key. Only works for flushed data.
fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> io::Result<Option<Vec<u8>>> {
unimplemented!()
}
/// Write a transaction of changes to the buffer.
fn write(&self, transaction: DBTransaction) -> io::Result<()> {
self.0
.commit(transaction.ops.iter().map(|op| match op {
kvdb::DBOp::Insert { col, key, value } =>
(*col as u8, &key[key.len() - 32..], Some(value.to_vec())),
kvdb::DBOp::Delete { col, key } => (*col as u8, &key[key.len() - 32..], None),
kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!(),
}))
.expect("db error");
Ok(())
}
/// Iterate over flushed data for a given column.
fn iter<'a>(&'a self, _col: u32) -> Box<dyn Iterator<Item = io::Result<DBKeyValue>> + 'a> {
unimplemented!()
}
/// Iterate over flushed data for a given column, starting from a given prefix.
fn iter_with_prefix<'a>(
&'a self,
_col: u32,
_prefix: &'a [u8],
) -> Box<dyn Iterator<Item = io::Result<DBKeyValue>> + 'a> {
unimplemented!()
}
}
impl TempDatabase {
pub fn new() -> Self {
let dir = tempfile::tempdir().expect("temp dir creation failed");
log::trace!(
target: "bench-logistics",
"Created temp db at {}",
dir.path().to_string_lossy(),
);
TempDatabase(dir)
}
pub fn open(&mut self, db_type: DatabaseType) -> Arc<dyn KeyValueDB> {
match db_type {
DatabaseType::RocksDb => {
let db_cfg = DatabaseConfig::with_columns(1);
let db = Database::open(&db_cfg, &self.0.path()).expect("Database backend error");
Arc::new(db)
},
DatabaseType::ParityDb => Arc::new(ParityDbWrapper({
let mut options = parity_db::Options::with_columns(self.0.path(), 1);
let column_options = &mut options.columns[0];
column_options.ref_counted = true;
column_options.preimage = true;
column_options.uniform = true;
parity_db::Db::open_or_create(&options).expect("db open error")
})),
}
}
}
impl Clone for TempDatabase {
fn clone(&self) -> Self {
let new_dir = tempfile::tempdir().expect("temp dir creation failed");
let self_dir = self.0.path();
log::trace!(
target: "bench-logistics",
"Cloning db ({}) to {}",
self_dir.to_string_lossy(),
new_dir.path().to_string_lossy(),
);
let self_db_files = std::fs::read_dir(self_dir)
.expect("failed to list file in seed dir")
.map(|f_result| f_result.expect("failed to read file in seed db").path())
.collect::<Vec<PathBuf>>();
fs_extra::copy_items(&self_db_files, new_dir.path(), &fs_extra::dir::CopyOptions::new())
.expect("Copy of seed database is ok");
TempDatabase(new_dir)
}
}
+371
View File
@@ -0,0 +1,371 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Trie benchmark (integrated).
use hash_db::Prefix;
use kvdb::KeyValueDB;
use rand::Rng;
use sp_state_machine::Backend as _;
use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut as _};
use std::{
borrow::Cow,
collections::HashMap,
sync::{Arc, LazyLock},
};
use node_primitives::Hash;
use crate::{
core::{self, Mode, Path},
generator::generate_trie,
simple_trie::SimpleTrie,
tempdb::{DatabaseType, TempDatabase},
};
pub const SAMPLE_SIZE: usize = 100;
pub const TEST_WRITE_SIZE: usize = 128;
pub type KeyValue = (Vec<u8>, Vec<u8>);
pub type KeyValues = Vec<KeyValue>;
#[derive(Clone, Copy, Debug, derive_more::Display)]
pub enum DatabaseSize {
#[display(fmt = "empty")]
Empty,
#[display(fmt = "smallest")]
Smallest,
#[display(fmt = "small")]
Small,
#[display(fmt = "medium")]
Medium,
#[display(fmt = "large")]
Large,
#[display(fmt = "huge")]
Huge,
}
static KUSAMA_STATE_DISTRIBUTION: LazyLock<SizePool> =
LazyLock::new(|| SizePool::from_histogram(crate::state_sizes::KUSAMA_STATE_DISTRIBUTION));
impl DatabaseSize {
/// Should be multiple of SAMPLE_SIZE!
fn keys(&self) -> usize {
let val = match *self {
Self::Empty => 200, // still need some keys to query
Self::Smallest => 1_000,
Self::Small => 10_000,
Self::Medium => 100_000,
Self::Large => 200_000,
Self::Huge => 1_000_000,
};
assert_eq!(val % SAMPLE_SIZE, 0);
val
}
}
fn pretty_print(v: usize) -> String {
let mut print = String::new();
for (idx, val) in v.to_string().chars().rev().enumerate() {
if idx != 0 && idx % 3 == 0 {
print.insert(0, ',');
}
print.insert(0, val);
}
print
}
pub struct TrieReadBenchmarkDescription {
pub database_size: DatabaseSize,
pub database_type: DatabaseType,
}
pub struct TrieReadBenchmark {
database: TempDatabase,
root: Hash,
warmup_keys: KeyValues,
query_keys: KeyValues,
database_type: DatabaseType,
}
impl core::BenchmarkDescription for TrieReadBenchmarkDescription {
fn path(&self) -> Path {
let mut path = Path::new(&["trie", "read"]);
path.push(&format!("{}", self.database_size));
path
}
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
let mut database = TempDatabase::new();
let mut rng = rand::thread_rng();
let warmup_prefix = KUSAMA_STATE_DISTRIBUTION.key(&mut rng);
let mut key_values = KeyValues::new();
let mut warmup_keys = KeyValues::new();
let mut query_keys = KeyValues::new();
let every_x_key = self.database_size.keys() / SAMPLE_SIZE;
for idx in 0..self.database_size.keys() {
let kv = (
KUSAMA_STATE_DISTRIBUTION.key(&mut rng).to_vec(),
KUSAMA_STATE_DISTRIBUTION.value(&mut rng),
);
if idx % every_x_key == 0 {
// warmup keys go to separate tree with high prob
let mut actual_warmup_key = warmup_prefix.clone();
actual_warmup_key[16..].copy_from_slice(&kv.0[16..]);
warmup_keys.push((actual_warmup_key.clone(), kv.1.clone()));
key_values.push((actual_warmup_key.clone(), kv.1.clone()));
} else if idx % every_x_key == 1 {
query_keys.push(kv.clone());
}
key_values.push(kv)
}
assert_eq!(warmup_keys.len(), SAMPLE_SIZE);
assert_eq!(query_keys.len(), SAMPLE_SIZE);
let root = generate_trie(database.open(self.database_type), key_values);
Box::new(TrieReadBenchmark {
database,
root,
warmup_keys,
query_keys,
database_type: self.database_type,
})
}
fn name(&self) -> Cow<'static, str> {
format!(
"Trie read benchmark({:?} database ({} keys), db_type: {:?})",
self.database_size,
pretty_print(self.database_size.keys()),
self.database_type,
)
.into()
}
}
struct Storage(Arc<dyn KeyValueDB>);
impl sp_state_machine::Storage<sp_core::Blake2Hasher> for Storage {
fn get(&self, key: &Hash, prefix: Prefix) -> Result<Option<Vec<u8>>, String> {
let key = sp_trie::prefixed_key::<sp_core::Blake2Hasher>(key, prefix);
self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e))
}
}
impl core::Benchmark for TrieReadBenchmark {
fn run(&mut self, mode: Mode) -> std::time::Duration {
let mut db = self.database.clone();
let storage: Arc<dyn sp_state_machine::Storage<sp_core::Blake2Hasher>> =
Arc::new(Storage(db.open(self.database_type)));
let trie_backend = sp_state_machine::TrieBackendBuilder::new(storage, self.root).build();
for (warmup_key, warmup_value) in self.warmup_keys.iter() {
let value = trie_backend
.storage(&warmup_key[..])
.expect("Failed to get key: db error")
.expect("Warmup key should exist");
// sanity for warmup keys
assert_eq!(&value, warmup_value);
}
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(3));
}
let started = std::time::Instant::now();
for (key, _) in self.query_keys.iter() {
let _ = trie_backend.storage(&key[..]);
}
let elapsed = started.elapsed();
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(1));
}
elapsed / (SAMPLE_SIZE as u32)
}
}
pub struct TrieWriteBenchmarkDescription {
pub database_size: DatabaseSize,
pub database_type: DatabaseType,
}
impl core::BenchmarkDescription for TrieWriteBenchmarkDescription {
fn path(&self) -> Path {
let mut path = Path::new(&["trie", "write"]);
path.push(&format!("{}", self.database_size));
path
}
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
let mut database = TempDatabase::new();
let mut rng = rand::thread_rng();
let warmup_prefix = KUSAMA_STATE_DISTRIBUTION.key(&mut rng);
let mut key_values = KeyValues::new();
let mut warmup_keys = KeyValues::new();
let every_x_key = self.database_size.keys() / SAMPLE_SIZE;
for idx in 0..self.database_size.keys() {
let kv = (
KUSAMA_STATE_DISTRIBUTION.key(&mut rng).to_vec(),
KUSAMA_STATE_DISTRIBUTION.value(&mut rng),
);
if idx % every_x_key == 0 {
// warmup keys go to separate tree with high prob
let mut actual_warmup_key = warmup_prefix.clone();
actual_warmup_key[16..].copy_from_slice(&kv.0[16..]);
warmup_keys.push((actual_warmup_key.clone(), kv.1.clone()));
key_values.push((actual_warmup_key.clone(), kv.1.clone()));
}
key_values.push(kv)
}
assert_eq!(warmup_keys.len(), SAMPLE_SIZE);
let root = generate_trie(database.open(self.database_type), key_values);
Box::new(TrieWriteBenchmark {
database,
root,
warmup_keys,
database_type: self.database_type,
})
}
fn name(&self) -> Cow<'static, str> {
format!(
"Trie write benchmark({:?} database ({} keys), db_type = {:?})",
self.database_size,
pretty_print(self.database_size.keys()),
self.database_type,
)
.into()
}
}
struct TrieWriteBenchmark {
database: TempDatabase,
root: Hash,
warmup_keys: KeyValues,
database_type: DatabaseType,
}
impl core::Benchmark for TrieWriteBenchmark {
fn run(&mut self, mode: Mode) -> std::time::Duration {
let mut rng = rand::thread_rng();
let mut db = self.database.clone();
let kvdb = db.open(self.database_type);
let mut new_root = self.root;
let mut overlay = HashMap::new();
let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay };
let mut trie_db_mut = TrieDBMutBuilderV1::from_existing(&mut trie, &mut new_root).build();
for (warmup_key, warmup_value) in self.warmup_keys.iter() {
let value = trie_db_mut
.get(&warmup_key[..])
.expect("Failed to get key: db error")
.expect("Warmup key should exist");
// sanity for warmup keys
assert_eq!(&value, warmup_value);
}
let test_key = random_vec(&mut rng, 32);
let test_val = random_vec(&mut rng, TEST_WRITE_SIZE);
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(3));
}
let started = std::time::Instant::now();
trie_db_mut.insert(&test_key, &test_val).expect("Should be inserted ok");
trie_db_mut.commit();
drop(trie_db_mut);
let mut transaction = kvdb.transaction();
for (key, value) in overlay.into_iter() {
match value {
Some(value) => transaction.put(0, &key[..], &value[..]),
None => transaction.delete(0, &key[..]),
}
}
kvdb.write(transaction).expect("Failed to write transaction");
let elapsed = started.elapsed();
// sanity check
assert!(new_root != self.root);
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(1));
}
elapsed
}
}
fn random_vec<R: Rng>(rng: &mut R, len: usize) -> Vec<u8> {
let mut val = vec![0u8; len];
rng.fill_bytes(&mut val[..]);
val
}
struct SizePool {
distribution: std::collections::BTreeMap<u32, u32>,
total: u32,
}
impl SizePool {
fn from_histogram(h: &[(u32, u32)]) -> SizePool {
let mut distribution = std::collections::BTreeMap::default();
let mut total = 0;
for (size, count) in h {
total += count;
distribution.insert(total, *size);
}
SizePool { distribution, total }
}
fn value<R: Rng>(&self, rng: &mut R) -> Vec<u8> {
let sr = (rng.next_u64() % self.total as u64) as u32;
let mut range = self
.distribution
.range((std::ops::Bound::Included(sr), std::ops::Bound::Unbounded));
let size = *range.next().unwrap().1 as usize;
random_vec(rng, size)
}
fn key<R: Rng>(&self, rng: &mut R) -> Vec<u8> {
random_vec(rng, 32)
}
}
+102
View File
@@ -0,0 +1,102 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Transaction pool integrated benchmarks.
//!
//! The goal of this benchmark is to figure out time needed to fill
//! the transaction pool for the next block.
use std::borrow::Cow;
use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes};
use sc_transaction_pool::BasicPool;
use sc_transaction_pool_api::{TransactionPool, TransactionSource};
use crate::core::{self, Mode, Path};
pub struct PoolBenchmarkDescription {
pub database_type: DatabaseType,
}
pub struct PoolBenchmark {
database: BenchDb,
}
impl core::BenchmarkDescription for PoolBenchmarkDescription {
fn path(&self) -> Path {
Path::new(&["node", "txpool"])
}
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
Box::new(PoolBenchmark {
database: BenchDb::with_key_types(self.database_type, 50_000, KeyTypes::Sr25519),
})
}
fn name(&self) -> Cow<'static, str> {
"Transaction pool benchmark".into()
}
}
impl core::Benchmark for PoolBenchmark {
fn run(&mut self, mode: Mode) -> std::time::Duration {
let context = self.database.create_context();
let genesis_hash = context.client.chain_info().genesis_hash;
let _ = context
.client
.runtime_version_at(genesis_hash)
.expect("Failed to get runtime version")
.spec_version;
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(3));
}
let executor = sp_core::testing::TaskExecutor::new();
let txpool = BasicPool::new_full(
Default::default(),
true.into(),
None,
executor,
context.client.clone(),
);
let generated_transactions = self
.database
.block_content(
BlockType::RandomTransfersKeepAlive.to_content(Some(100)),
&context.client,
)
.into_iter()
.collect::<Vec<_>>();
let start = std::time::Instant::now();
let submissions = generated_transactions
.into_iter()
.map(|tx| txpool.submit_one(genesis_hash, TransactionSource::External, tx));
futures::executor::block_on(futures::future::join_all(submissions));
let elapsed = start.elapsed();
if mode == Mode::Profile {
std::thread::park_timeout(std::time::Duration::from_secs(1));
}
elapsed
}
}
+213
View File
@@ -0,0 +1,213 @@
[package]
name = "staging-node-cli"
version = "3.0.0-dev"
authors.workspace = true
description = "Generic Substrate node implementation in Rust."
build = "build.rs"
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
default-run = "substrate-node"
homepage.workspace = true
repository.workspace = true
publish = false
[lints]
workspace = true
[package.metadata.wasm-pack.profile.release]
# `wasm-opt` has some problems on linux, see
# https://github.com/rustwasm/wasm-pack/issues/781 etc.
wasm-opt = false
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[badges]
maintenance = { status = "actively-developed" }
is-it-maintained-issue-resolution = { repository = "pezkuwichain/pezkuwi-sdk" }
is-it-maintained-open-issues = { repository = "pezkuwichain/pezkuwi-sdk" }
[lib]
crate-type = ["cdylib", "rlib"]
[[bin]]
name = "substrate-node"
path = "bin/main.rs"
required-features = ["cli"]
[[bench]]
name = "transaction_pool"
harness = false
[[bench]]
name = "block_production"
harness = false
[[bench]]
name = "executor"
harness = false
[dependencies]
# third-party dependencies
array-bytes = { workspace = true, default-features = true }
clap = { features = ["derive"], optional = true, workspace = true }
codec = { workspace = true, default-features = true }
futures = { workspace = true }
jsonrpsee = { features = ["server"], workspace = true }
log = { workspace = true, default-features = true }
rand = { workspace = true, default-features = true }
serde = { features = ["derive"], workspace = true, default-features = true }
serde_json = { workspace = true, default-features = true }
subxt-signer = { workspace = true, features = ["unstable-eth"] }
# The Pezkuwi-SDK:
pezkuwi-sdk = { features = [
"fork-tree",
"frame-benchmarking-cli",
"frame-remote-externalities",
"frame-support-procedural-tools",
"generate-bags",
"mmr-gadget",
"mmr-rpc",
"pallet-transaction-payment-rpc",
"sc-allocator",
"sc-authority-discovery",
"sc-basic-authorship",
"sc-block-builder",
"sc-chain-spec",
"sc-cli",
"sc-client-api",
"sc-client-db",
"sc-consensus",
"sc-consensus-aura",
"sc-consensus-babe",
"sc-consensus-babe-rpc",
"sc-consensus-beefy",
"sc-consensus-beefy-rpc",
"sc-consensus-epochs",
"sc-consensus-grandpa",
"sc-consensus-grandpa-rpc",
"sc-consensus-manual-seal",
"sc-consensus-pow",
"sc-consensus-slots",
"sc-executor",
"sc-executor-common",
"sc-executor-polkavm",
"sc-executor-wasmtime",
"sc-informant",
"sc-keystore",
"sc-mixnet",
"sc-network",
"sc-network-common",
"sc-network-gossip",
"sc-network-light",
"sc-network-statement",
"sc-network-sync",
"sc-network-transactions",
"sc-network-types",
"sc-offchain",
"sc-proposer-metrics",
"sc-rpc",
"sc-rpc-api",
"sc-rpc-server",
"sc-rpc-spec-v2",
"sc-service",
"sc-state-db",
"sc-statement-store",
"sc-storage-monitor",
"sc-sync-state-rpc",
"sc-sysinfo",
"sc-telemetry",
"sc-tracing",
"sc-transaction-pool",
"sc-transaction-pool-api",
"sc-utils",
"sp-blockchain",
"sp-consensus",
"sp-core-hashing",
"sp-core-hashing-proc-macro",
"sp-database",
"sp-maybe-compressed-blob",
"sp-panic-handler",
"sp-rpc",
"staging-chain-spec-builder",
"staging-node-inspect",
"staging-tracking-allocator",
"std",
"subkey",
"substrate-build-script-utils",
"substrate-frame-rpc-support",
"substrate-frame-rpc-system",
"substrate-prometheus-endpoint",
"substrate-rpc-client",
"substrate-state-trie-migration-rpc",
"substrate-wasm-builder",
"tracing-gum",
], workspace = true, default-features = true }
# Shared code between the staging node and kitchensink runtime:
kitchensink-runtime = { workspace = true }
node-inspect = { optional = true, workspace = true, default-features = true }
node-primitives = { workspace = true, default-features = true }
node-rpc = { workspace = true }
[dev-dependencies]
assert_cmd = { workspace = true }
criterion = { features = [
"async_tokio",
], workspace = true, default-features = true }
nix = { features = ["signal"], workspace = true }
pretty_assertions.workspace = true
regex = { workspace = true }
scale-info = { features = [
"derive",
"serde",
], workspace = true, default-features = true }
soketto = { workspace = true }
sp-keyring = { workspace = true }
tempfile = { workspace = true }
tokio = { features = [
"macros",
"parking_lot",
"time",
], workspace = true, default-features = true }
tokio-util = { features = ["compat"], workspace = true }
wat = { workspace = true }
# These testing-only dependencies are not exported by the Pezkuwi-SDK crate:
node-testing = { workspace = true }
sc-service-test = { workspace = true }
substrate-cli-test-utils = { workspace = true }
[build-dependencies]
clap = { optional = true, workspace = true }
clap_complete = { optional = true, workspace = true }
node-inspect = { optional = true, workspace = true, default-features = true }
pezkuwi-sdk = { features = [
"frame-benchmarking-cli",
"sc-cli",
"sc-storage-monitor",
"substrate-build-script-utils",
], optional = true, workspace = true, default-features = true }
[features]
default = ["cli"]
cli = ["clap", "clap_complete", "node-inspect", "pezkuwi-sdk"]
runtime-benchmarks = [
"kitchensink-runtime/runtime-benchmarks",
"node-inspect?/runtime-benchmarks",
"node-primitives/runtime-benchmarks",
"node-rpc/runtime-benchmarks",
"node-testing/runtime-benchmarks",
"pezkuwi-sdk/runtime-benchmarks",
"sc-service-test/runtime-benchmarks",
"sp-keyring/runtime-benchmarks",
"substrate-cli-test-utils/runtime-benchmarks",
]
try-runtime = [
"kitchensink-runtime/try-runtime",
"pezkuwi-sdk/try-runtime",
"substrate-cli-test-utils/try-runtime",
]
@@ -0,0 +1,256 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use pezkuwi_sdk::*;
use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput};
use kitchensink_runtime::{constants::currency::*, BalancesCall};
use node_cli::service::{create_extrinsic, FullClient};
use pezkuwi_sdk::sc_service::config::{ExecutorConfiguration, RpcConfiguration};
use sc_block_builder::{BlockBuilderBuilder, BuiltBlock};
use sc_consensus::{
block_import::{BlockImportParams, ForkChoiceStrategy},
BlockImport, StateAction,
};
use sc_service::{
config::{
BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig,
PruningMode, RpcBatchRequestConfig, WasmExecutionMethod, WasmtimeInstantiationStrategy,
},
BasePath, Configuration, Role,
};
use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed};
use sp_consensus::BlockOrigin;
use sp_keyring::Sr25519Keyring;
use sp_runtime::{
generic,
transaction_validity::{InvalidTransaction, TransactionValidityError},
AccountId32, MultiAddress, OpaqueExtrinsic,
};
use staging_node_cli as node_cli;
use tokio::runtime::Handle;
fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
let base_path = BasePath::new_temp_dir()
.expect("getting the base path of a temporary path doesn't fail; qed");
let root = base_path.path().to_path_buf();
let network_config = NetworkConfiguration::new(
Sr25519Keyring::Alice.to_seed(),
"network/test/0.1",
Default::default(),
None,
);
let spec = Box::new(node_cli::chain_spec::development_config());
let config = Configuration {
impl_name: "BenchmarkImpl".into(),
impl_version: "1.0".into(),
// We don't use the authority role since that would start producing blocks
// in the background which would mess with our benchmark.
role: Role::Full,
tokio_handle,
transaction_pool: Default::default(),
network: network_config,
keystore: KeystoreConfig::InMemory,
database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
trie_cache_maximum_size: Some(64 * 1024 * 1024),
warm_up_trie_cache: None,
state_pruning: Some(PruningMode::ArchiveAll),
blocks_pruning: BlocksPruning::KeepAll,
chain_spec: spec,
executor: ExecutorConfiguration {
wasm_method: WasmExecutionMethod::Compiled {
instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite,
},
..ExecutorConfiguration::default()
},
rpc: RpcConfiguration {
addr: None,
max_connections: Default::default(),
cors: None,
methods: Default::default(),
max_request_size: Default::default(),
max_response_size: Default::default(),
id_provider: Default::default(),
max_subs_per_conn: Default::default(),
port: 9944,
message_buffer_capacity: Default::default(),
batch_config: RpcBatchRequestConfig::Unlimited,
rate_limit: None,
rate_limit_whitelisted_ips: Default::default(),
rate_limit_trust_proxy_headers: Default::default(),
request_logger_limit: 1024,
},
prometheus_config: None,
telemetry_endpoints: None,
offchain_worker: OffchainWorkerConfig { enabled: true, indexing_enabled: false },
force_authoring: false,
disable_grandpa: false,
dev_key_seed: Some(Sr25519Keyring::Alice.to_seed()),
tracing_targets: None,
tracing_receiver: Default::default(),
announce_block: true,
data_path: base_path.path().into(),
base_path,
wasm_runtime_overrides: None,
};
node_cli::service::new_full_base::<sc_network::NetworkWorker<_, _>>(
config,
None,
false,
|_, _| (),
)
.expect("creating a full node doesn't fail")
}
fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic {
let utx: kitchensink_runtime::UncheckedExtrinsic = generic::UncheckedExtrinsic::new_bare(
kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }),
)
.into();
utx.into()
}
fn import_block(client: &FullClient, built: BuiltBlock<node_primitives::Block>) {
let mut params = BlockImportParams::new(BlockOrigin::File, built.block.header);
params.state_action =
StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(built.storage_changes));
params.fork_choice = Some(ForkChoiceStrategy::LongestChain);
futures::executor::block_on(client.import_block(params))
.expect("importing a block doesn't fail");
}
fn prepare_benchmark(client: &FullClient) -> (usize, Vec<OpaqueExtrinsic>) {
const MINIMUM_PERIOD_FOR_BLOCKS: u64 = 1500;
let mut max_transfer_count = 0;
let mut extrinsics = Vec::new();
let mut block_builder = BlockBuilderBuilder::new(client)
.on_parent_block(client.chain_info().best_hash)
.with_parent_block_number(client.chain_info().best_number)
.build()
.unwrap();
// Every block needs one timestamp extrinsic.
let extrinsic_set_time = extrinsic_set_time(1 + MINIMUM_PERIOD_FOR_BLOCKS);
block_builder.push(extrinsic_set_time.clone()).unwrap();
extrinsics.push(extrinsic_set_time);
// Creating those is surprisingly costly, so let's only do it once and later just `clone` them.
let src = Sr25519Keyring::Alice.pair();
let dst: MultiAddress<AccountId32, u32> = Sr25519Keyring::Bob.to_account_id().into();
// Add as many transfer extrinsics as possible into a single block.
for nonce in 0.. {
let extrinsic: OpaqueExtrinsic = create_extrinsic(
client,
src.clone(),
BalancesCall::transfer_allow_death { dest: dst.clone(), value: 1 * DOLLARS },
Some(nonce),
)
.into();
match block_builder.push(extrinsic.clone()) {
Ok(_) => {},
Err(ApplyExtrinsicFailed(Validity(TransactionValidityError::Invalid(
InvalidTransaction::ExhaustsResources,
)))) => break,
Err(error) => panic!("{}", error),
}
extrinsics.push(extrinsic);
max_transfer_count += 1;
}
(max_transfer_count, extrinsics)
}
fn block_production(c: &mut Criterion) {
sp_tracing::try_init_simple();
let runtime = tokio::runtime::Runtime::new().expect("creating tokio runtime doesn't fail; qed");
let tokio_handle = runtime.handle().clone();
let node = new_node(tokio_handle.clone());
let client = &*node.client;
// Building the very first block is around ~30x slower than any subsequent one,
// so let's make sure it's built and imported before we benchmark anything.
let mut block_builder = BlockBuilderBuilder::new(client)
.on_parent_block(client.chain_info().best_hash)
.with_parent_block_number(client.chain_info().best_number)
.build()
.unwrap();
block_builder.push(extrinsic_set_time(1)).unwrap();
import_block(client, block_builder.build().unwrap());
let (max_transfer_count, extrinsics) = prepare_benchmark(&client);
log::info!("Maximum transfer count: {}", max_transfer_count);
let mut group = c.benchmark_group("Block production");
group.sample_size(10);
group.throughput(Throughput::Elements(max_transfer_count as u64));
let chain = client.chain_info();
let best_hash = chain.best_hash;
let best_number = chain.best_number;
group.bench_function(format!("{} transfers (no proof)", max_transfer_count), |b| {
b.iter_batched(
|| extrinsics.clone(),
|extrinsics| {
let mut block_builder = BlockBuilderBuilder::new(client)
.on_parent_block(best_hash)
.with_parent_block_number(best_number)
.build()
.unwrap();
for extrinsic in extrinsics {
block_builder.push(extrinsic).unwrap();
}
block_builder.build().unwrap()
},
BatchSize::SmallInput,
)
});
group.bench_function(format!("{} transfers (with proof)", max_transfer_count), |b| {
b.iter_batched(
|| extrinsics.clone(),
|extrinsics| {
let mut block_builder = BlockBuilderBuilder::new(client)
.on_parent_block(best_hash)
.with_parent_block_number(best_number)
.build()
.unwrap();
for extrinsic in extrinsics {
block_builder.push(extrinsic).unwrap();
}
block_builder.build().unwrap()
},
BatchSize::SmallInput,
)
});
}
criterion_group!(benches, block_production);
criterion_main!(benches);
+206
View File
@@ -0,0 +1,206 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use pezkuwi_sdk::*;
use codec::{Decode, Encode};
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
use frame_support::Hashable;
use kitchensink_runtime::{
constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, RuntimeCall,
RuntimeGenesisConfig, UncheckedExtrinsic,
};
use node_primitives::{BlockNumber, Hash};
use node_testing::keyring::*;
use sc_executor::{Externalities, RuntimeVersionOf};
use sp_core::{
storage::well_known_keys,
traits::{CallContext, CodeExecutor, RuntimeCode},
};
use sp_runtime::{generic::ExtrinsicFormat, traits::BlakeTwo256};
use sp_state_machine::TestExternalities as CoreTestExternalities;
use staging_node_cli::service::RuntimeExecutor;
criterion_group!(benches, bench_execute_block);
criterion_main!(benches);
/// The wasm runtime code.
pub fn compact_code_unwrap() -> &'static [u8] {
kitchensink_runtime::WASM_BINARY.expect(
"Development wasm binary is not available. Testing is only supported with the flag \
disabled.",
)
}
const GENESIS_HASH: [u8; 32] = [69u8; 32];
const TRANSACTION_VERSION: u32 = kitchensink_runtime::VERSION.transaction_version;
const SPEC_VERSION: u32 = kitchensink_runtime::VERSION.spec_version;
const HEAP_PAGES: u64 = 20;
type TestExternalities<H> = CoreTestExternalities<H>;
fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic {
node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH, None)
}
fn new_test_ext(genesis_config: &RuntimeGenesisConfig) -> TestExternalities<BlakeTwo256> {
let mut test_ext = TestExternalities::new_with_code(
compact_code_unwrap(),
genesis_config.build_storage().unwrap(),
);
test_ext
.ext()
.place_storage(well_known_keys::HEAP_PAGES.to_vec(), Some(HEAP_PAGES.encode()));
test_ext
}
fn construct_block<E: Externalities>(
executor: &RuntimeExecutor,
ext: &mut E,
number: BlockNumber,
parent_hash: Hash,
extrinsics: Vec<CheckedExtrinsic>,
) -> (Vec<u8>, Hash) {
use sp_trie::{LayoutV0, TrieConfiguration};
// sign extrinsics.
let extrinsics = extrinsics.into_iter().map(sign).collect::<Vec<_>>();
// calculate the header fields that we can.
let extrinsics_root =
LayoutV0::<BlakeTwo256>::ordered_trie_root(extrinsics.iter().map(Encode::encode))
.to_fixed_bytes()
.into();
let header = Header {
parent_hash,
number,
extrinsics_root,
state_root: Default::default(),
digest: Default::default(),
};
let runtime_code = RuntimeCode {
code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()),
hash: vec![1, 2, 3],
heap_pages: None,
};
// execute the block to get the real header.
executor
.call(ext, &runtime_code, "Core_initialize_block", &header.encode(), CallContext::Offchain)
.0
.unwrap();
for i in extrinsics.iter() {
executor
.call(
ext,
&runtime_code,
"BlockBuilder_apply_extrinsic",
&i.encode(),
CallContext::Offchain,
)
.0
.unwrap();
}
let header = Header::decode(
&mut &executor
.call(
ext,
&runtime_code,
"BlockBuilder_finalize_block",
&[0u8; 0],
CallContext::Offchain,
)
.0
.unwrap()[..],
)
.unwrap();
let hash = header.blake2_256();
(Block { header, extrinsics }.encode(), hash.into())
}
fn test_blocks(
genesis_config: &RuntimeGenesisConfig,
executor: &RuntimeExecutor,
) -> Vec<(Vec<u8>, Hash)> {
let mut test_ext = new_test_ext(genesis_config);
let mut block1_extrinsics = vec![CheckedExtrinsic {
format: ExtrinsicFormat::Bare,
function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: 0 }),
}];
block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic {
format: ExtrinsicFormat::Signed(alice(), tx_ext(i, 0)),
function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death {
dest: bob().into(),
value: 1 * DOLLARS,
}),
}));
let block1 =
construct_block(executor, &mut test_ext.ext(), 1, GENESIS_HASH.into(), block1_extrinsics);
vec![block1]
}
fn bench_execute_block(c: &mut Criterion) {
let mut group = c.benchmark_group("execute blocks");
group.bench_function("wasm", |b| {
let genesis_config = node_testing::genesis::config();
let executor = RuntimeExecutor::builder().build();
let runtime_code = RuntimeCode {
code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()),
hash: vec![1, 2, 3],
heap_pages: None,
};
// Get the runtime version to initialize the runtimes cache.
{
let mut test_ext = new_test_ext(&genesis_config);
executor.runtime_version(&mut test_ext.ext(), &runtime_code).unwrap();
}
let blocks = test_blocks(&genesis_config, &executor);
b.iter_batched_ref(
|| new_test_ext(&genesis_config),
|test_ext| {
for block in blocks.iter() {
executor
.call(
&mut test_ext.ext(),
&runtime_code,
"Core_execute_block",
&block.0,
CallContext::Offchain,
)
.0
.unwrap();
}
},
BatchSize::LargeInput,
);
});
}
@@ -0,0 +1,275 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput};
use futures::{future, StreamExt};
use kitchensink_runtime::{constants::currency::*, BalancesCall, SudoCall};
use node_cli::service::{create_extrinsic, fetch_nonce, FullClient, TransactionPool};
use node_primitives::AccountId;
use pezkuwi_sdk::{
sc_service::config::{ExecutorConfiguration, RpcConfiguration},
sc_transaction_pool_api::TransactionPool as _,
*,
};
use sc_service::{
config::{
BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig,
PruningMode, RpcBatchRequestConfig, TransactionPoolOptions,
},
BasePath, Configuration, Role,
};
use sc_transaction_pool_api::{TransactionSource, TransactionStatus};
use sp_core::{crypto::Pair, sr25519};
use sp_keyring::Sr25519Keyring;
use sp_runtime::OpaqueExtrinsic;
use staging_node_cli as node_cli;
use tokio::runtime::Handle;
fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
let base_path = BasePath::new_temp_dir().expect("Creates base path");
let root = base_path.path().to_path_buf();
let network_config = NetworkConfiguration::new(
Sr25519Keyring::Alice.to_seed(),
"network/test/0.1",
Default::default(),
None,
);
let spec = Box::new(node_cli::chain_spec::development_config());
let config = Configuration {
impl_name: "BenchmarkImpl".into(),
impl_version: "1.0".into(),
role: Role::Authority,
tokio_handle: tokio_handle.clone(),
transaction_pool: TransactionPoolOptions::new_for_benchmarks(),
network: network_config,
keystore: KeystoreConfig::InMemory,
database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
trie_cache_maximum_size: Some(64 * 1024 * 1024),
warm_up_trie_cache: None,
state_pruning: Some(PruningMode::ArchiveAll),
blocks_pruning: BlocksPruning::KeepAll,
chain_spec: spec,
executor: ExecutorConfiguration::default(),
rpc: RpcConfiguration {
addr: None,
max_connections: Default::default(),
cors: None,
methods: Default::default(),
max_request_size: Default::default(),
max_response_size: Default::default(),
id_provider: Default::default(),
max_subs_per_conn: Default::default(),
port: 9944,
message_buffer_capacity: Default::default(),
batch_config: RpcBatchRequestConfig::Unlimited,
rate_limit: None,
rate_limit_whitelisted_ips: Default::default(),
rate_limit_trust_proxy_headers: Default::default(),
request_logger_limit: 1024,
},
prometheus_config: None,
telemetry_endpoints: None,
offchain_worker: OffchainWorkerConfig { enabled: true, indexing_enabled: false },
force_authoring: false,
disable_grandpa: false,
dev_key_seed: Some(Sr25519Keyring::Alice.to_seed()),
tracing_targets: None,
tracing_receiver: Default::default(),
announce_block: true,
data_path: base_path.path().into(),
base_path,
wasm_runtime_overrides: None,
};
tokio_handle.block_on(async move {
node_cli::service::new_full_base::<sc_network::NetworkWorker<_, _>>(
config,
None,
false,
|_, _| (),
)
.expect("Creates node")
})
}
fn create_accounts(num: usize) -> Vec<sr25519::Pair> {
(0..num)
.map(|i| {
Pair::from_string(&format!("{}/{}", Sr25519Keyring::Alice.to_seed(), i), None)
.expect("Creates account pair")
})
.collect()
}
/// Create the extrinsics that will initialize the accounts from the sudo account (Alice).
///
/// `start_nonce` is the current nonce of Alice.
fn create_account_extrinsics(
client: &FullClient,
accounts: &[sr25519::Pair],
) -> Vec<OpaqueExtrinsic> {
let start_nonce = fetch_nonce(client, Sr25519Keyring::Alice.pair());
accounts
.iter()
.enumerate()
.flat_map(|(i, a)| {
vec![
// Reset the nonce by removing any funds
create_extrinsic(
client,
Sr25519Keyring::Alice.pair(),
SudoCall::sudo {
call: Box::new(
BalancesCall::force_set_balance {
who: AccountId::from(a.public()).into(),
new_free: 0,
}
.into(),
),
},
Some(start_nonce + (i as u32) * 2),
),
// Give back funds
create_extrinsic(
client,
Sr25519Keyring::Alice.pair(),
SudoCall::sudo {
call: Box::new(
BalancesCall::force_set_balance {
who: AccountId::from(a.public()).into(),
new_free: 1_000_000 * DOLLARS,
}
.into(),
),
},
Some(start_nonce + (i as u32) * 2 + 1),
),
]
})
.map(OpaqueExtrinsic::from)
.collect()
}
fn create_benchmark_extrinsics(
client: &FullClient,
accounts: &[sr25519::Pair],
extrinsics_per_account: usize,
) -> Vec<OpaqueExtrinsic> {
accounts
.iter()
.flat_map(|account| {
(0..extrinsics_per_account).map(move |nonce| {
create_extrinsic(
client,
account.clone(),
BalancesCall::transfer_allow_death {
dest: Sr25519Keyring::Bob.to_account_id().into(),
value: 1 * DOLLARS,
},
Some(nonce as u32),
)
})
})
.map(OpaqueExtrinsic::from)
.collect()
}
async fn submit_tx_and_wait_for_inclusion(
tx_pool: &TransactionPool,
tx: OpaqueExtrinsic,
client: &FullClient,
wait_for_finalized: bool,
) {
let best_hash = client.chain_info().best_hash;
let mut watch = tx_pool
.submit_and_watch(best_hash, TransactionSource::External, tx.clone())
.await
.expect("Submits tx to pool")
.fuse();
loop {
match watch.select_next_some().await {
TransactionStatus::Finalized(_) => break,
TransactionStatus::InBlock(_) if !wait_for_finalized => break,
_ => {},
}
}
}
fn transaction_pool_benchmarks(c: &mut Criterion) {
sp_tracing::try_init_simple();
let runtime = tokio::runtime::Runtime::new().expect("Creates tokio runtime");
let tokio_handle = runtime.handle().clone();
let node = new_node(tokio_handle.clone());
let account_num = 10;
let extrinsics_per_account = 2000;
let accounts = create_accounts(account_num);
let mut group = c.benchmark_group("Transaction pool");
group.sample_size(10);
group.throughput(Throughput::Elements(account_num as u64 * extrinsics_per_account as u64));
let mut counter = 1;
group.bench_function(
format!("{} transfers from {} accounts", account_num * extrinsics_per_account, account_num),
move |b| {
b.iter_batched(
|| {
let prepare_extrinsics = create_account_extrinsics(&node.client, &accounts);
runtime.block_on(future::join_all(prepare_extrinsics.into_iter().map(|tx| {
submit_tx_and_wait_for_inclusion(
&node.transaction_pool,
tx,
&node.client,
true,
)
})));
create_benchmark_extrinsics(&node.client, &accounts, extrinsics_per_account)
},
|extrinsics| {
runtime.block_on(future::join_all(extrinsics.into_iter().map(|tx| {
submit_tx_and_wait_for_inclusion(
&node.transaction_pool,
tx,
&node.client,
false,
)
})));
println!("Finished {}", counter);
counter += 1;
},
BatchSize::SmallInput,
)
},
);
}
criterion_group!(benches, transaction_pool_benchmarks);
criterion_main!(benches);
+28
View File
@@ -0,0 +1,28 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate Node CLI
#![warn(missing_docs)]
use pezkuwi_sdk::*;
use staging_node_cli as node_cli;
fn main() -> sc_cli::Result<()> {
node_cli::run()
}
+68
View File
@@ -0,0 +1,68 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
fn main() {
#[cfg(feature = "cli")]
cli::main();
}
#[cfg(feature = "cli")]
mod cli {
include!("src/cli.rs");
use clap::{CommandFactory, ValueEnum};
use clap_complete::{generate_to, Shell};
use pezkuwi_sdk::substrate_build_script_utils::{
generate_cargo_keys, rerun_if_git_head_changed,
};
use std::{env, fs, path::Path};
pub fn main() {
build_shell_completion();
generate_cargo_keys();
rerun_if_git_head_changed();
}
/// Build shell completion scripts for all known shells.
fn build_shell_completion() {
for shell in Shell::value_variants() {
build_completion(shell);
}
}
/// Build the shell auto-completion for a given Shell.
fn build_completion(shell: &Shell) {
let outdir = match env::var_os("OUT_DIR") {
None => return,
Some(dir) => dir,
};
let path = Path::new(&outdir)
.parent()
.unwrap()
.parent()
.unwrap()
.parent()
.unwrap()
.join("completion-scripts");
fs::create_dir(&path).ok();
let _ = generate_to(*shell, &mut Cli::command(), "substrate-node", &path);
}
}
@@ -0,0 +1,34 @@
# Shell completion
The Substrate cli command supports shell auto-completion. For this to work, you will need to run the
completion script matching your build and system.
Assuming you built a release version using `cargo build --release` and use `bash` run the following:
`source target/release/completion-scripts/substrate.bash`
You can find completion scripts for:
- bash
- fish
- zsh
- elvish
- powershell
To make this change persistent, you can proceed as follows:
```shell
COMPL_DIR=$HOME/.completion
mkdir -p $COMPL_DIR
cp -f target/release/completion-scripts/substrate.bash $COMPL_DIR/
echo "source $COMPL_DIR/substrate.bash" >> $HOME/.bash_profile
source $HOME/.bash_profile
```
When you build a new version of Substrate, the following will ensure your auto-completion script matches the current binary:
```shell
COMPL_DIR=$HOME/.completion
mkdir -p $COMPL_DIR
cp -f target/release/completion-scripts/substrate.bash $COMPL_DIR/
source $HOME/.bash_profile
```
File diff suppressed because one or more lines are too long
+124
View File
@@ -0,0 +1,124 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Setup code for [`super::command`] which would otherwise bloat that module.
//!
//! Should only be used for benchmarking as it may break in other contexts.
use crate::service::{create_extrinsic, FullClient};
use pezkuwi_sdk::*;
use kitchensink_runtime::{BalancesCall, SystemCall};
use node_primitives::{AccountId, Balance};
use sc_cli::Result;
use sp_inherents::{InherentData, InherentDataProvider};
use sp_keyring::Sr25519Keyring;
use sp_runtime::OpaqueExtrinsic;
use std::{sync::Arc, time::Duration};
/// Generates `System::Remark` extrinsics for the benchmarks.
///
/// Note: Should only be used for benchmarking.
pub struct RemarkBuilder {
client: Arc<FullClient>,
}
impl RemarkBuilder {
/// Creates a new [`Self`] from the given client.
pub fn new(client: Arc<FullClient>) -> Self {
Self { client }
}
}
impl frame_benchmarking_cli::ExtrinsicBuilder for RemarkBuilder {
fn pallet(&self) -> &str {
"system"
}
fn extrinsic(&self) -> &str {
"remark"
}
fn build(&self, nonce: u32) -> std::result::Result<OpaqueExtrinsic, &'static str> {
let acc = Sr25519Keyring::Bob.pair();
let extrinsic: OpaqueExtrinsic = create_extrinsic(
self.client.as_ref(),
acc,
SystemCall::remark { remark: vec![] },
Some(nonce),
)
.into();
Ok(extrinsic)
}
}
/// Generates `Balances::TransferKeepAlive` extrinsics for the benchmarks.
///
/// Note: Should only be used for benchmarking.
pub struct TransferKeepAliveBuilder {
client: Arc<FullClient>,
dest: AccountId,
value: Balance,
}
impl TransferKeepAliveBuilder {
/// Creates a new [`Self`] from the given client.
pub fn new(client: Arc<FullClient>, dest: AccountId, value: Balance) -> Self {
Self { client, dest, value }
}
}
impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder {
fn pallet(&self) -> &str {
"balances"
}
fn extrinsic(&self) -> &str {
"transfer_keep_alive"
}
fn build(&self, nonce: u32) -> std::result::Result<OpaqueExtrinsic, &'static str> {
let acc = Sr25519Keyring::Bob.pair();
let extrinsic: OpaqueExtrinsic = create_extrinsic(
self.client.as_ref(),
acc,
BalancesCall::transfer_keep_alive {
dest: self.dest.clone().into(),
value: self.value.into(),
},
Some(nonce),
)
.into();
Ok(extrinsic)
}
}
/// Generates inherent data for the `benchmark overhead` command.
pub fn inherent_benchmark_data() -> Result<InherentData> {
let mut inherent_data = InherentData::new();
let d = Duration::from_millis(0);
let timestamp = sp_timestamp::InherentDataProvider::new(d.into());
futures::executor::block_on(timestamp.provide_inherent_data(&mut inherent_data))
.map_err(|e| format!("creating inherent data: {:?}", e))?;
Ok(inherent_data)
}
+507
View File
@@ -0,0 +1,507 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate chain configurations.
use pezkuwi_sdk::*;
use crate::chain_spec::sc_service::Properties;
use kitchensink_runtime::{
genesis_config_presets::{Staker, ENDOWMENT, STASH},
wasm_binary_unwrap, Block, MaxNominations, StakerStatus,
};
use pallet_im_online::sr25519::AuthorityId as ImOnlineId;
use pallet_revive::is_eth_derived;
use sc_chain_spec::ChainSpecExtension;
use sc_service::ChainType;
use sc_telemetry::TelemetryEndpoints;
use serde::{Deserialize, Serialize};
use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
use sp_consensus_babe::AuthorityId as BabeId;
use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId;
use sp_consensus_grandpa::AuthorityId as GrandpaId;
use sp_core::crypto::UncheckedInto;
use sp_mixnet::types::AuthorityId as MixnetId;
pub use kitchensink_runtime::RuntimeGenesisConfig;
pub use node_primitives::{AccountId, Balance, Signature};
const STAGING_TELEMETRY_URL: &str = "wss://telemetry.pezkuwichain.io/submit/";
/// Node `ChainSpec` extensions.
///
/// Additional parameters for some Substrate core modules,
/// customizable from the chain spec.
#[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)]
#[serde(rename_all = "camelCase")]
pub struct Extensions {
/// Block numbers with known hashes.
pub fork_blocks: sc_client_api::ForkBlocks<Block>,
/// Known bad block hashes.
pub bad_blocks: sc_client_api::BadBlocks<Block>,
/// The light sync state extension used by the sync-state rpc.
pub light_sync_state: sc_sync_state_rpc::LightSyncStateExtension,
}
/// Specialized `ChainSpec`.
pub type ChainSpec = sc_service::GenericChainSpec<Extensions>;
/// Flaming Fir testnet generator
pub fn flaming_fir_config() -> Result<ChainSpec, String> {
ChainSpec::from_json_bytes(&include_bytes!("../res/flaming-fir.json")[..])
}
fn configure_accounts_for_staging_testnet() -> (
Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
MixnetId,
BeefyId,
)>,
AccountId,
Vec<AccountId>,
) {
#[rustfmt::skip]
// stash, controller, session-key, beefy id
// generated with secret:
// for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done
//
// and
//
// for i in 1 2 3 4 ; do for j in session; do subkey inspect --scheme ed25519 "$secret"//fir//$j//$i; done; done
//
// and
//
// for i in 1 2 3 4 ; do for j in session; do subkey inspect --scheme ecdsa "$secret"//fir//$j//$i; done; done
let initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
MixnetId,
BeefyId,
)> = vec![
(
// 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy
array_bytes::hex_n_into_unchecked("9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"),
// 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq
array_bytes::hex_n_into_unchecked("781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"),
// 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC
array_bytes::hex2array_unchecked("9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332")
.unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106")
.unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106")
.unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106")
.unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106")
.unchecked_into(),
// 5DMLFcDdLLQbw696YfHaWBpQR99HwR456ycSCfr6L7KXGYK8
array_bytes::hex2array_unchecked("035560fafa241739869360aa4b32bc98953172ceb41a19c6cc1a27962fb3d1ecec")
.unchecked_into(),
),
(
// 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2
array_bytes::hex_n_into_unchecked("68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"),
// 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF
array_bytes::hex_n_into_unchecked("c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"),
// 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE
array_bytes::hex2array_unchecked("7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f")
.unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e")
.unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e")
.unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e")
.unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e")
.unchecked_into(),
// 5FYk11kNtB4178wLKJ2RNoUzzcjgRUciFe3SJDVZXhqX4dzG
array_bytes::hex2array_unchecked("02da1ab255ed888ee3e19b73d335fc13160b3eb10456c2d17c6a8ea7de403d2445")
.unchecked_into(),
),
(
// 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp
array_bytes::hex_n_into_unchecked("547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"),
// 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9
array_bytes::hex_n_into_unchecked("9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"),
// 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d
array_bytes::hex2array_unchecked("5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440")
.unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a")
.unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a")
.unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a")
.unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a")
.unchecked_into(),
// 5GQx4FToRBPqfani6o7owFJE1UstiviqbPP7HPWyvtXWWukn
array_bytes::hex2array_unchecked("036a818b3f59579c5fbbe4fede64f49dbf090ba883eb2a175d5ca90e5adb5f0b3e")
.unchecked_into(),
),
(
// 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9
array_bytes::hex_n_into_unchecked("f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"),
// 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn
array_bytes::hex_n_into_unchecked("66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"),
// 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4
array_bytes::hex2array_unchecked("3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef")
.unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378")
.unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378")
.unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378")
.unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378")
.unchecked_into(),
// 5FCu2pY928VVHPgnNVJssvxFJZECyNe1CyH3WTG79Wisx58B
array_bytes::hex2array_unchecked("020ce02b963548f9f8ade8765f7a4a06638c17819c78422a1cc35b647873583eef")
.unchecked_into(),
),
];
// generated with secret: subkey inspect "$secret"/fir
let root_key: AccountId = array_bytes::hex_n_into_unchecked(
// 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo
"9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809",
);
let endowed_accounts: Vec<AccountId> = vec![root_key.clone()];
(initial_authorities, root_key, endowed_accounts)
}
fn staging_testnet_genesis_patch() -> serde_json::Value {
let (initial_authorities, root_key, endowed_accounts) =
configure_accounts_for_staging_testnet();
testnet_genesis_patch(initial_authorities, vec![], root_key, endowed_accounts)
}
/// Staging testnet config.
pub fn staging_testnet_config() -> ChainSpec {
ChainSpec::builder(wasm_binary_unwrap(), Default::default())
.with_name("Staging Testnet")
.with_id("staging_testnet")
.with_chain_type(ChainType::Live)
.with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET)
.with_genesis_config_patch(staging_testnet_genesis_patch())
.with_telemetry_endpoints(
TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])
.expect("Staging telemetry url is valid; qed"),
)
.build()
}
/// Configure the accounts for the testnet.
///
/// * Adds `initial_authorities` and `initial_nominators` to endowed accounts if missing.
/// * Sets up the stakers consisting of the `initial_authorities` and `initial_nominators`.
fn configure_accounts(
initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
MixnetId,
BeefyId,
)>,
initial_nominators: Vec<AccountId>,
endowed_accounts: Vec<AccountId>,
stash: Balance,
) -> (
Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
MixnetId,
BeefyId,
)>,
Vec<AccountId>,
Vec<Staker>,
) {
let mut endowed_accounts = endowed_accounts;
// endow all authorities and nominators.
initial_authorities
.iter()
.map(|x| &x.0)
.chain(initial_nominators.iter())
.for_each(|x| {
if !endowed_accounts.contains(x) {
endowed_accounts.push(x.clone())
}
});
// stakers: all validators and nominators.
let mut rng = rand::thread_rng();
let stakers = initial_authorities
.iter()
.map(|x| (x.0.clone(), x.0.clone(), stash, StakerStatus::Validator))
.chain(initial_nominators.iter().map(|x| {
use rand::{seq::SliceRandom, Rng};
let limit = (MaxNominations::get() as usize).min(initial_authorities.len());
let count = rng.gen::<usize>() % limit;
let nominations = initial_authorities
.as_slice()
.choose_multiple(&mut rng, count)
.into_iter()
.map(|choice| choice.0.clone())
.collect::<Vec<_>>();
(x.clone(), x.clone(), stash, StakerStatus::Nominator(nominations))
}))
.collect::<Vec<_>>();
(initial_authorities, endowed_accounts, stakers)
}
/// Helper function to create RuntimeGenesisConfig json patch for testing.
pub fn testnet_genesis_patch(
initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
MixnetId,
BeefyId,
)>,
initial_nominators: Vec<AccountId>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
) -> serde_json::Value {
let (initial_authorities, endowed_accounts, stakers) =
configure_accounts(initial_authorities, initial_nominators, endowed_accounts, STASH);
let validator_count = initial_authorities.len();
let minimum_validator_count = validator_count;
let collective = collective(&endowed_accounts);
serde_json::json!({
"balances": {
"balances": endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect::<Vec<_>>()
},
"session": {
"keys": initial_authorities
.iter()
.map(|x| {
(
x.0.clone(),
// stash account is controller
x.0.clone(),
session_keys_json(
x.2.clone(),
x.3.clone(),
x.4.clone(),
x.5.clone(),
x.6.clone(),
x.7.clone(),
)
)
})
.collect::<Vec<_>>()
},
"elections": {
"members": collective.iter().cloned().map(|member| (member, STASH)).collect::<Vec<_>>(),
},
"technicalCommittee": {
"members": collective,
},
"staking": {
"validatorCount": validator_count,
"minimumValidatorCount": minimum_validator_count,
"invulnerables": initial_authorities
.iter()
.map(|x| x.0.clone())
.collect::<Vec<_>>(),
"stakers": stakers,
},
"sudo": {
"key": root_key,
},
"revive": {
"mappedAccounts": endowed_accounts.iter().filter(|x| ! is_eth_derived(x)).cloned().collect::<Vec<_>>()
}
})
}
/// Creates the session keys as defined by the runtime.
fn session_keys_json(
grandpa: GrandpaId,
babe: BabeId,
im_online: ImOnlineId,
authority_discovery: AuthorityDiscoveryId,
mixnet: MixnetId,
beefy: BeefyId,
) -> serde_json::Value {
serde_json::json!({
"authority_discovery": authority_discovery,
"babe": babe,
"beefy": beefy,
"grandpa": grandpa,
"im_online": im_online,
"mixnet": mixnet
})
}
/// Extract some accounts from endowed to be put into the collective.
fn collective(endowed: &[AccountId]) -> Vec<AccountId> {
const MAX_COLLECTIVE_SIZE: usize = 50;
let endowed_accounts_count = endowed.len();
endowed
.iter()
.take((endowed_accounts_count.div_ceil(2)).min(MAX_COLLECTIVE_SIZE))
.cloned()
.collect()
}
fn props() -> Properties {
let mut properties = Properties::new();
properties.insert("tokenDecimals".to_string(), 12.into());
properties
}
/// Development config (single validator Alice).
pub fn development_config() -> ChainSpec {
ChainSpec::builder(wasm_binary_unwrap(), Default::default())
.with_name("Development")
.with_id("dev")
.with_chain_type(ChainType::Development)
.with_properties(props())
.with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET)
.build()
}
/// Local testnet config (multivalidator Alice + Bob).
pub fn local_testnet_config() -> ChainSpec {
ChainSpec::builder(wasm_binary_unwrap(), Default::default())
.with_name("Local Testnet")
.with_id("local_testnet")
.with_chain_type(ChainType::Local)
.with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET)
.build()
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::service::{new_full_base, NewFullBase};
use kitchensink_runtime::genesis_config_presets::well_known_including_eth_accounts;
use sc_service_test;
use sp_runtime::{AccountId32, BuildStorage};
/// Local testnet config (single validator - Alice).
pub fn integration_test_config_with_single_authority() -> ChainSpec {
ChainSpec::builder(wasm_binary_unwrap(), Default::default())
.with_name("Integration Test")
.with_id("test")
.with_chain_type(ChainType::Development)
.with_genesis_config_preset_name(sp_genesis_builder::DEV_RUNTIME_PRESET)
.build()
}
/// Local testnet config (multivalidator Alice + Bob).
pub fn integration_test_config_with_two_authorities() -> ChainSpec {
ChainSpec::builder(wasm_binary_unwrap(), Default::default())
.with_name("Integration Test")
.with_id("test")
.with_chain_type(ChainType::Local)
.with_genesis_config_preset_name(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET)
.build()
}
fn eth_account(from: subxt_signer::eth::Keypair) -> AccountId32 {
let mut account_id = AccountId32::new([0xEE; 32]);
<AccountId32 as AsMut<[u8; 32]>>::as_mut(&mut account_id)[..20]
.copy_from_slice(&from.public_key().to_account_id().as_ref());
account_id
}
#[test]
#[ignore]
fn test_connectivity() {
sp_tracing::try_init_simple();
sc_service_test::connectivity(integration_test_config_with_two_authorities(), |config| {
let NewFullBase { task_manager, client, network, sync, transaction_pool, .. } =
new_full_base::<sc_network::NetworkWorker<_, _>>(config, None, false, |_, _| ())?;
Ok(sc_service_test::TestNetComponents::new(
task_manager,
client,
network,
sync,
transaction_pool,
))
});
}
#[test]
fn test_create_development_chain_spec() {
development_config().build_storage().unwrap();
}
#[test]
fn test_create_local_testnet_chain_spec() {
local_testnet_config().build_storage().unwrap();
}
#[test]
fn test_staging_test_net_chain_spec() {
staging_testnet_config().build_storage().unwrap();
}
#[test]
fn ensure_eth_accounts_are_in_endowed() {
let alith = eth_account(subxt_signer::eth::dev::alith());
let baltathar = eth_account(subxt_signer::eth::dev::baltathar());
let endowed = well_known_including_eth_accounts();
assert!(endowed.contains(&alith), "Alith must be in endowed for integration tests");
assert!(endowed.contains(&baltathar), "Baltathar must be in endowed for integration tests");
}
}
+111
View File
@@ -0,0 +1,111 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use pezkuwi_sdk::*;
/// An overarching CLI command definition.
#[derive(Debug, clap::Parser)]
pub struct Cli {
/// Possible subcommand with parameters.
#[command(subcommand)]
pub subcommand: Option<Subcommand>,
#[allow(missing_docs)]
#[clap(flatten)]
pub run: sc_cli::RunCmd,
#[allow(missing_docs)]
#[clap(flatten)]
pub mixnet_params: sc_cli::MixnetParams,
/// Disable automatic hardware benchmarks.
///
/// By default these benchmarks are automatically ran at startup and measure
/// the CPU speed, the memory bandwidth and the disk speed.
///
/// The results are then printed out in the logs, and also sent as part of
/// telemetry, if telemetry is enabled.
#[arg(long)]
pub no_hardware_benchmarks: bool,
#[allow(missing_docs)]
#[clap(flatten)]
pub storage_monitor: sc_storage_monitor::StorageMonitorParams,
}
/// Possible subcommands of the main binary.
#[derive(Debug, clap::Subcommand)]
pub enum Subcommand {
/// The custom inspect subcommand for decoding blocks and extrinsics.
#[command(
name = "inspect",
about = "Decode given block or extrinsic using current native runtime."
)]
Inspect(node_inspect::cli::InspectCmd),
/// Sub-commands concerned with benchmarking.
///
/// The pallet benchmarking moved to the `pallet` sub-command.
#[command(subcommand)]
Benchmark(frame_benchmarking_cli::BenchmarkCmd),
/// Key management cli utilities
#[command(subcommand)]
Key(sc_cli::KeySubcommand),
/// Verify a signature for a message, provided on STDIN, with a given (public or secret) key.
Verify(sc_cli::VerifyCmd),
/// Generate a seed that provides a vanity address.
Vanity(sc_cli::VanityCmd),
/// Sign a message, with a given (secret) key.
Sign(sc_cli::SignCmd),
/// Build a chain specification.
/// DEPRECATED: `build-spec` command will be removed after 1/04/2026. Use `export-chain-spec`
/// command instead.
#[deprecated(
note = "build-spec command will be removed after 1/04/2026. Use export-chain-spec command instead"
)]
BuildSpec(sc_cli::BuildSpecCmd),
/// Export the chain specification.
ExportChainSpec(sc_cli::ExportChainSpecCmd),
/// Validate blocks.
CheckBlock(sc_cli::CheckBlockCmd),
/// Export blocks.
ExportBlocks(sc_cli::ExportBlocksCmd),
/// Export the state of a given block into a chain spec.
ExportState(sc_cli::ExportStateCmd),
/// Import blocks.
ImportBlocks(sc_cli::ImportBlocksCmd),
/// Remove the whole chain.
PurgeChain(sc_cli::PurgeChainCmd),
/// Revert the chain to a previous state.
Revert(sc_cli::RevertCmd),
/// Db meta columns information.
ChainInfo(sc_cli::ChainInfoCmd),
}
+238
View File
@@ -0,0 +1,238 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use pezkuwi_sdk::*;
use super::benchmarking::{inherent_benchmark_data, RemarkBuilder, TransferKeepAliveBuilder};
use crate::{
chain_spec, service,
service::{new_partial, FullClient},
Cli, Subcommand,
};
use frame_benchmarking_cli::*;
use kitchensink_runtime::{ExistentialDeposit, RuntimeApi};
use node_primitives::Block;
use sc_cli::{Result, SubstrateCli};
use sc_service::PartialComponents;
use sp_keyring::Sr25519Keyring;
use sp_runtime::traits::HashingFor;
use std::sync::Arc;
impl SubstrateCli for Cli {
fn impl_name() -> String {
"Substrate Node".into()
}
fn impl_version() -> String {
env!("SUBSTRATE_CLI_IMPL_VERSION").into()
}
fn description() -> String {
env!("CARGO_PKG_DESCRIPTION").into()
}
fn author() -> String {
env!("CARGO_PKG_AUTHORS").into()
}
fn support_url() -> String {
"https://github.com/pezkuwichain/pezkuwi-sdk/issues/new".into()
}
fn copyright_start_year() -> i32 {
2017
}
fn load_spec(&self, id: &str) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
let spec = match id {
"" =>
return Err(
"Please specify which chain you want to run, e.g. --dev or --chain=local"
.into(),
),
"dev" => Box::new(chain_spec::development_config()),
"local" => Box::new(chain_spec::local_testnet_config()),
"fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?),
"staging" => Box::new(chain_spec::staging_testnet_config()),
path =>
Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?),
};
Ok(spec)
}
}
/// Parse command line arguments into service configuration.
pub fn run() -> Result<()> {
let cli = Cli::from_args();
match &cli.subcommand {
None => {
let runner = cli.create_runner(&cli.run)?;
runner.run_node_until_exit(|config| async move {
service::new_full(config, cli).map_err(sc_cli::Error::Service)
})
},
Some(Subcommand::Inspect(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run::<Block, RuntimeApi>(config))
},
Some(Subcommand::ExportChainSpec(cmd)) => {
let chain_spec = cli.load_spec(&cmd.chain)?;
cmd.run(chain_spec)
},
Some(Subcommand::Benchmark(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| {
// This switch needs to be in the client, since the client decides
// which sub-commands it wants to support.
match cmd {
BenchmarkCmd::Pallet(cmd) => {
if !cfg!(feature = "runtime-benchmarks") {
return Err(
"Runtime benchmarking wasn't enabled when building the node. \
You can enable it with `--features runtime-benchmarks`."
.into(),
)
}
cmd.run_with_spec::<HashingFor<Block>, sp_statement_store::runtime_api::HostFunctions>(Some(config.chain_spec))
},
BenchmarkCmd::Block(cmd) => {
// ensure that we keep the task manager alive
let partial = new_partial(&config, None)?;
cmd.run(partial.client)
},
#[cfg(not(feature = "runtime-benchmarks"))]
BenchmarkCmd::Storage(_) => Err(
"Storage benchmarking can be enabled with `--features runtime-benchmarks`."
.into(),
),
#[cfg(feature = "runtime-benchmarks")]
BenchmarkCmd::Storage(cmd) => {
// ensure that we keep the task manager alive
let partial = new_partial(&config, None)?;
let db = partial.backend.expose_db();
let storage = partial.backend.expose_storage();
let shared_trie_cache = partial.backend.expose_shared_trie_cache();
cmd.run(config, partial.client, db, storage, shared_trie_cache)
},
BenchmarkCmd::Overhead(cmd) => {
// ensure that we keep the task manager alive
let partial = new_partial(&config, None)?;
let ext_builder = RemarkBuilder::new(partial.client.clone());
cmd.run(
config.chain_spec.name().into(),
partial.client,
inherent_benchmark_data()?,
Vec::new(),
&ext_builder,
false,
)
},
BenchmarkCmd::Extrinsic(cmd) => {
// ensure that we keep the task manager alive
let partial = service::new_partial(&config, None)?;
// Register the *Remark* and *TKA* builders.
let ext_factory = ExtrinsicFactory(vec![
Box::new(RemarkBuilder::new(partial.client.clone())),
Box::new(TransferKeepAliveBuilder::new(
partial.client.clone(),
Sr25519Keyring::Alice.to_account_id(),
ExistentialDeposit::get(),
)),
]);
cmd.run(
partial.client,
inherent_benchmark_data()?,
Vec::new(),
&ext_factory,
)
},
BenchmarkCmd::Machine(cmd) =>
cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()),
}
})
},
Some(Subcommand::Key(cmd)) => cmd.run(&cli),
Some(Subcommand::Sign(cmd)) => cmd.run(),
Some(Subcommand::Verify(cmd)) => cmd.run(),
Some(Subcommand::Vanity(cmd)) => cmd.run(),
#[allow(deprecated)]
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
},
Some(Subcommand::CheckBlock(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, import_queue, .. } =
new_partial(&config, None)?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
Some(Subcommand::ExportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, .. } = new_partial(&config, None)?;
Ok((cmd.run(client, config.database), task_manager))
})
},
Some(Subcommand::ExportState(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, .. } = new_partial(&config, None)?;
Ok((cmd.run(client, config.chain_spec), task_manager))
})
},
Some(Subcommand::ImportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, import_queue, .. } =
new_partial(&config, None)?;
Ok((cmd.run(client, import_queue), task_manager))
})
},
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run(config.database))
},
Some(Subcommand::Revert(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, backend, .. } =
new_partial(&config, None)?;
let aux_revert = Box::new(|client: Arc<FullClient>, backend, blocks| {
sc_consensus_babe::revert(client.clone(), backend, blocks)?;
sc_consensus_grandpa::revert(client, blocks)?;
Ok(())
});
Ok((cmd.run(client, backend, Some(aux_revert)), task_manager))
})
},
Some(Subcommand::ChainInfo(cmd)) => {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| cmd.run::<Block>(&config))
},
}
}
+45
View File
@@ -0,0 +1,45 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate CLI library.
//!
//! This package has two Cargo features:
//!
//! - `cli` (default): exposes functions that parse command-line options, then start and run the
//! node as a CLI application.
//!
//! - `browser`: exposes the content of the `browser` module, which consists of exported symbols
//! that are meant to be passed through the `wasm-bindgen` utility and called from JavaScript.
//! Despite its name the produced WASM can theoretically also be used from NodeJS, although this
//! hasn't been tested.
#![warn(missing_docs)]
#[cfg(feature = "cli")]
mod benchmarking;
pub mod chain_spec;
#[cfg(feature = "cli")]
mod cli;
#[cfg(feature = "cli")]
mod command;
pub mod service;
#[cfg(feature = "cli")]
pub use cli::*;
#[cfg(feature = "cli")]
pub use command::*;
File diff suppressed because it is too large Load Diff
+892
View File
@@ -0,0 +1,892 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use codec::{Decode, Encode, Joiner};
use frame_support::{
dispatch::{DispatchClass, GetDispatchInfo},
traits::Currency,
weights::Weight,
};
use frame_system::{self, AccountInfo, DispatchEventInfo, EventRecord, Phase};
use pezkuwi_sdk::*;
use sp_core::{storage::well_known_keys, traits::Externalities};
use sp_runtime::{
traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult,
};
use kitchensink_runtime::{
constants::{currency::*, time::SLOT_DURATION},
Balances, CheckedExtrinsic, Header, Runtime, RuntimeCall, RuntimeEvent, System,
TransactionPayment, Treasury, UncheckedExtrinsic,
};
use node_primitives::{Balance, Hash};
use node_testing::keyring::*;
use pretty_assertions::assert_eq;
use wat;
pub mod common;
use self::common::{sign, *};
/// The wasm runtime binary which hasn't undergone the compacting process.
///
/// The idea here is to pass it as the current runtime code to the executor so the executor will
/// have to execute provided wasm code instead of the native equivalent. This trick is used to
/// test code paths that differ between native and wasm versions.
pub fn bloaty_code_unwrap() -> &'static [u8] {
kitchensink_runtime::WASM_BINARY_BLOATY.expect(
"Development wasm binary is not available. \
Testing is only supported with the flag disabled.",
)
}
/// Default transfer fee. This will use the same logic that is implemented in transaction-payment
/// module.
///
/// Note that reads the multiplier from storage directly, hence to get the fee of `extrinsic`
/// at block `n`, it must be called prior to executing block `n` to do the calculation with the
/// correct multiplier.
fn transfer_fee(extrinsic: &UncheckedExtrinsic) -> Balance {
let mut info = default_transfer_call().get_dispatch_info();
info.extension_weight = extrinsic.0.extension_weight();
TransactionPayment::compute_fee(extrinsic.encode().len() as u32, &info, 0)
}
/// Default transfer fee, same as `transfer_fee`, but with a weight refund factored in.
fn transfer_fee_with_refund(extrinsic: &UncheckedExtrinsic, weight_refund: Weight) -> Balance {
let mut info = default_transfer_call().get_dispatch_info();
info.extension_weight = extrinsic.0.extension_weight();
let post_info = (Some(info.total_weight().saturating_sub(weight_refund)), info.pays_fee).into();
TransactionPayment::compute_actual_fee(extrinsic.encode().len() as u32, &info, &post_info, 0)
}
fn xt() -> UncheckedExtrinsic {
sign(CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)),
function: RuntimeCall::Balances(default_transfer_call()),
})
}
fn set_heap_pages<E: Externalities>(ext: &mut E, heap_pages: u64) {
ext.place_storage(well_known_keys::HEAP_PAGES.to_vec(), Some(heap_pages.encode()));
}
fn changes_trie_block() -> (Vec<u8>, Hash) {
let time = 42 * 1000;
construct_block(
&mut new_test_ext(compact_code_unwrap()),
1,
GENESIS_HASH.into(),
vec![
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Bare,
function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }),
},
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)),
function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death {
dest: bob().into(),
value: 69 * DOLLARS,
}),
},
],
(time / SLOT_DURATION).into(),
)
}
/// block 1 and 2 must be created together to ensure transactions are only signed once (since they
/// are not guaranteed to be deterministic) and to ensure that the correct state is propagated
/// from block1's execution to block2 to derive the correct storage_root.
fn blocks() -> ((Vec<u8>, Hash), (Vec<u8>, Hash)) {
let mut t = new_test_ext(compact_code_unwrap());
let time1 = 42 * 1000;
let block1 = construct_block(
&mut t,
1,
GENESIS_HASH.into(),
vec![
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Bare,
function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }),
},
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)),
function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death {
dest: bob().into(),
value: 69 * DOLLARS,
}),
},
],
(time1 / SLOT_DURATION).into(),
);
let time2 = 52 * 1000;
let block2 = construct_block(
&mut t,
2,
block1.1,
vec![
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Bare,
function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }),
},
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(bob(), tx_ext(0, 0)),
function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death {
dest: alice().into(),
value: 5 * DOLLARS,
}),
},
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(1, 0)),
function: RuntimeCall::Balances(pallet_balances::Call::transfer_allow_death {
dest: bob().into(),
value: 15 * DOLLARS,
}),
},
],
(time2 / SLOT_DURATION).into(),
);
// session change => consensus authorities change => authorities change digest item appears
let digest = Header::decode(&mut &block2.0[..]).unwrap().digest;
assert_eq!(digest.logs().len(), 2 /* Just babe and BEEFY slots */);
(block1, block2)
}
fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec<u8>, Hash) {
construct_block(
&mut new_test_ext(compact_code_unwrap()),
1,
GENESIS_HASH.into(),
vec![
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Bare,
function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }),
},
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(nonce, 0)),
function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; size] }),
},
],
(time * 1000 / SLOT_DURATION).into(),
)
}
#[test]
fn panic_execution_with_foreign_code_gives_error() {
let mut t = new_test_ext(bloaty_code_unwrap());
t.insert(
<frame_system::Account<Runtime>>::hashed_key_for(alice()),
AccountInfo::<<Runtime as frame_system::Config>::Nonce, _> {
providers: 1,
data: (69u128, 0u128, 0u128, 1u128 << 127),
..Default::default()
}
.encode(),
);
t.insert(<pallet_balances::TotalIssuance<Runtime>>::hashed_key().to_vec(), 69_u128.encode());
t.insert(<frame_system::BlockHash<Runtime>>::hashed_key_for(0), vec![0u8; 32]);
let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0;
assert!(r.is_ok());
let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()))
.0
.unwrap();
let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap();
assert_eq!(r, Err(InvalidTransaction::Payment.into()));
}
#[test]
fn bad_extrinsic_with_native_equivalent_code_gives_error() {
let mut t = new_test_ext(compact_code_unwrap());
t.insert(
<frame_system::Account<Runtime>>::hashed_key_for(alice()),
AccountInfo::<<Runtime as frame_system::Config>::Nonce, _> {
providers: 1,
data: (69u128, 0u128, 0u128, 1u128 << 127),
..Default::default()
}
.encode(),
);
t.insert(<pallet_balances::TotalIssuance<Runtime>>::hashed_key().to_vec(), 69u128.encode());
t.insert(<frame_system::BlockHash<Runtime>>::hashed_key_for(0), vec![0u8; 32]);
let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0;
assert!(r.is_ok());
let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()))
.0
.unwrap();
let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap();
assert_eq!(r, Err(InvalidTransaction::Payment.into()));
}
#[test]
fn successful_execution_with_native_equivalent_code_gives_ok() {
let mut t = new_test_ext(compact_code_unwrap());
t.insert(
<frame_system::Account<Runtime>>::hashed_key_for(alice()),
AccountInfo::<<Runtime as frame_system::Config>::Nonce, _> {
providers: 1,
data: (111 * DOLLARS, 0u128, 0u128, 1u128 << 127),
..Default::default()
}
.encode(),
);
t.insert(
<frame_system::Account<Runtime>>::hashed_key_for(bob()),
AccountInfo::<
<Runtime as frame_system::Config>::Nonce,
<Runtime as frame_system::Config>::AccountData,
>::default()
.encode(),
);
t.insert(
<pallet_balances::TotalIssuance<Runtime>>::hashed_key().to_vec(),
(111 * DOLLARS).encode(),
);
t.insert(<frame_system::BlockHash<Runtime>>::hashed_key_for(0), vec![0u8; 32]);
let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0;
assert!(r.is_ok());
let weight_refund = Weight::zero();
let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund));
let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0;
assert!(r.is_ok());
t.execute_with(|| {
assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund);
assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS);
});
}
#[test]
fn successful_execution_with_foreign_code_gives_ok() {
let mut t = new_test_ext(bloaty_code_unwrap());
t.insert(
<frame_system::Account<Runtime>>::hashed_key_for(alice()),
AccountInfo::<<Runtime as frame_system::Config>::Nonce, _> {
providers: 1,
data: (111 * DOLLARS, 0u128, 0u128, 1u128 << 127),
..Default::default()
}
.encode(),
);
t.insert(
<frame_system::Account<Runtime>>::hashed_key_for(bob()),
AccountInfo::<
<Runtime as frame_system::Config>::Nonce,
<Runtime as frame_system::Config>::AccountData,
>::default()
.encode(),
);
t.insert(
<pallet_balances::TotalIssuance<Runtime>>::hashed_key().to_vec(),
(111 * DOLLARS).encode(),
);
t.insert(<frame_system::BlockHash<Runtime>>::hashed_key_for(0), vec![0u8; 32]);
let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0;
assert!(r.is_ok());
let weight_refund = Weight::zero();
let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund));
let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0;
assert!(r.is_ok());
t.execute_with(|| {
assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund);
assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS);
});
}
#[test]
fn full_native_block_import_works() {
let mut t = new_test_ext(compact_code_unwrap());
let (block1, block2) = blocks();
let mut alice_last_known_balance: Balance = Default::default();
let mut fees = t.execute_with(|| transfer_fee(&xt()));
let extension_weight = xt().0.extension_weight();
let weight_refund = Weight::zero();
let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund));
let transfer_weight = default_transfer_call().get_dispatch_info().call_weight.saturating_add(
<Runtime as frame_system::Config>::BlockWeights::get()
.get(DispatchClass::Normal)
.base_extrinsic,
);
let timestamp_weight = pallet_timestamp::Call::set::<Runtime> { now: Default::default() }
.get_dispatch_info()
.call_weight
.saturating_add(
<Runtime as frame_system::Config>::BlockWeights::get()
.get(DispatchClass::Mandatory)
.base_extrinsic,
);
executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap();
t.execute_with(|| {
assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund);
assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS);
alice_last_known_balance = Balances::total_balance(&alice());
let events = vec![
EventRecord {
phase: Phase::ApplyExtrinsic(0),
event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess {
dispatch_info: DispatchEventInfo {
weight: timestamp_weight,
class: DispatchClass::Mandatory,
pays_fee: Default::default(),
},
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::Balances(pallet_balances::Event::Withdraw {
who: alice().into(),
amount: fees,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::Balances(pallet_balances::Event::Transfer {
from: alice().into(),
to: bob().into(),
amount: 69 * DOLLARS,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::Balances(pallet_balances::Event::Deposit {
who: pallet_treasury::Pallet::<Runtime>::account_id(),
amount: fees_after_refund,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::TransactionPayment(
pallet_transaction_payment::Event::TransactionFeePaid {
who: alice().into(),
actual_fee: fees_after_refund,
tip: 0,
},
),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess {
dispatch_info: DispatchEventInfo {
weight: transfer_weight
.saturating_add(extension_weight.saturating_sub(weight_refund)),
..Default::default()
},
}),
topics: vec![],
},
];
let filtered_events: Vec<_> = System::events()
.into_iter()
.filter(|ev| {
!matches!(
ev.event,
RuntimeEvent::VoterList(
pallet_bags_list::Event::<Runtime, _>::ScoreUpdated { .. }
)
)
})
.collect();
assert_eq!(filtered_events, events);
});
fees = t.execute_with(|| transfer_fee(&xt()));
let pot = t.execute_with(|| Treasury::pot());
let extension_weight = xt().0.extension_weight();
let weight_refund = Weight::zero();
let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund));
executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap();
t.execute_with(|| {
assert_eq!(
Balances::total_balance(&alice()),
alice_last_known_balance - 10 * DOLLARS - fees_after_refund,
);
assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees_after_refund);
let events = vec![
EventRecord {
phase: Phase::Initialization,
event: RuntimeEvent::Treasury(pallet_treasury::Event::UpdatedInactive {
reactivated: 0,
deactivated: pot,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(0),
event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess {
dispatch_info: DispatchEventInfo {
weight: timestamp_weight,
class: DispatchClass::Mandatory,
pays_fee: Default::default(),
},
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::Balances(pallet_balances::Event::Withdraw {
who: bob().into(),
amount: fees,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::Balances(pallet_balances::Event::Transfer {
from: bob().into(),
to: alice().into(),
amount: 5 * DOLLARS,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::Balances(pallet_balances::Event::Deposit {
who: pallet_treasury::Pallet::<Runtime>::account_id(),
amount: fees_after_refund,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::TransactionPayment(
pallet_transaction_payment::Event::TransactionFeePaid {
who: bob().into(),
actual_fee: fees_after_refund,
tip: 0,
},
),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(1),
event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess {
dispatch_info: DispatchEventInfo {
weight: transfer_weight
.saturating_add(extension_weight.saturating_sub(weight_refund)),
..Default::default()
},
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(2),
event: RuntimeEvent::Balances(pallet_balances::Event::Withdraw {
who: alice().into(),
amount: fees,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(2),
event: RuntimeEvent::Balances(pallet_balances::Event::Transfer {
from: alice().into(),
to: bob().into(),
amount: 15 * DOLLARS,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(2),
event: RuntimeEvent::Balances(pallet_balances::Event::Deposit {
who: pallet_treasury::Pallet::<Runtime>::account_id(),
amount: fees_after_refund,
}),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(2),
event: RuntimeEvent::TransactionPayment(
pallet_transaction_payment::Event::TransactionFeePaid {
who: alice().into(),
actual_fee: fees_after_refund,
tip: 0,
},
),
topics: vec![],
},
EventRecord {
phase: Phase::ApplyExtrinsic(2),
event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess {
dispatch_info: DispatchEventInfo {
weight: transfer_weight
.saturating_add(extension_weight.saturating_sub(weight_refund)),
..Default::default()
},
}),
topics: vec![],
},
];
let all_events = System::events();
// Ensure that all expected events (`events`) are present in the full event log
// (`all_events`). We use this instead of strict equality since some events (like
// VoterList::ScoreUpdated) may be emitted non-deterministically depending on runtime
// internals or auto-rebagging logic.
for expected_event in &events {
assert!(
all_events.contains(expected_event),
"Expected event {:?} not found in actual events",
expected_event
);
}
});
}
#[test]
fn full_wasm_block_import_works() {
let mut t = new_test_ext(compact_code_unwrap());
let (block1, block2) = blocks();
let mut alice_last_known_balance: Balance = Default::default();
let weight_refund = Weight::zero();
let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund));
executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap();
t.execute_with(|| {
assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund);
assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS);
alice_last_known_balance = Balances::total_balance(&alice());
});
let weight_refund = Weight::zero();
let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund));
executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap();
t.execute_with(|| {
assert_eq!(
Balances::total_balance(&alice()),
alice_last_known_balance - 10 * DOLLARS - fees_after_refund,
);
assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - 1 * fees_after_refund);
});
}
const CODE_TRANSFER: &str = r#"
(module
;; seal_call(
;; callee_ptr: u32,
;; callee_len: u32,
;; gas: u64,
;; value_ptr: u32,
;; value_len: u32,
;; input_data_ptr: u32,
;; input_data_len: u32,
;; output_ptr: u32,
;; output_len_ptr: u32
;; ) -> u32
(import "seal0" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32 i32) (result i32)))
(import "seal0" "seal_input" (func $seal_input (param i32 i32)))
(import "env" "memory" (memory 1 1))
(func (export "deploy")
)
(func (export "call")
(block $fail
;; Load input data to contract memory
(call $seal_input
(i32.const 0)
(i32.const 52)
)
;; fail if the input size is not != 4
(br_if $fail
(i32.ne
(i32.const 4)
(i32.load (i32.const 52))
)
)
(br_if $fail
(i32.ne
(i32.load8_u (i32.const 0))
(i32.const 0)
)
)
(br_if $fail
(i32.ne
(i32.load8_u (i32.const 1))
(i32.const 1)
)
)
(br_if $fail
(i32.ne
(i32.load8_u (i32.const 2))
(i32.const 2)
)
)
(br_if $fail
(i32.ne
(i32.load8_u (i32.const 3))
(i32.const 3)
)
)
(drop
(call $seal_call
(i32.const 4) ;; Pointer to "callee" address.
(i32.const 32) ;; Length of "callee" address.
(i64.const 0) ;; How much gas to devote for the execution. 0 = all.
(i32.const 36) ;; Pointer to the buffer with value to transfer
(i32.const 16) ;; Length of the buffer with value to transfer.
(i32.const 0) ;; Pointer to input data buffer address
(i32.const 0) ;; Length of input data buffer
(i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output
(i32.const 0) ;; Length is ignored in this case
)
)
(return)
)
unreachable
)
;; Destination AccountId to transfer the funds.
;; Represented by H256 (32 bytes long) in little endian.
(data (i32.const 4)
"\09\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00"
"\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00"
"\00\00\00\00"
)
;; Amount of value to transfer.
;; Represented by u128 (16 bytes long) in little endian.
(data (i32.const 36)
"\06\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00"
"\00\00"
)
;; Length of the input buffer
(data (i32.const 52) "\04")
)
"#;
#[test]
fn deploying_wasm_contract_should_work() {
let transfer_code = wat::parse_str(CODE_TRANSFER).unwrap();
let transfer_ch = <Runtime as frame_system::Config>::Hashing::hash(&transfer_code);
let addr =
pallet_contracts::Pallet::<Runtime>::contract_address(&charlie(), &transfer_ch, &[], &[]);
let time = 42 * 1000;
let b = construct_block(
&mut new_test_ext(compact_code_unwrap()),
1,
GENESIS_HASH.into(),
vec![
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Bare,
function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }),
},
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)),
function: RuntimeCall::Contracts(pallet_contracts::Call::instantiate_with_code::<
Runtime,
> {
value: 0,
gas_limit: Weight::from_parts(500_000_000, 0),
storage_deposit_limit: None,
code: transfer_code,
data: Vec::new(),
salt: Vec::new(),
}),
},
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)),
function: RuntimeCall::Contracts(pallet_contracts::Call::call::<Runtime> {
dest: sp_runtime::MultiAddress::Id(addr.clone()),
value: 10,
gas_limit: Weight::from_parts(500_000_000, 0),
storage_deposit_limit: None,
data: vec![0x00, 0x01, 0x02, 0x03],
}),
},
],
(time / SLOT_DURATION).into(),
);
let mut t = new_test_ext(compact_code_unwrap());
executor_call(&mut t, "Core_execute_block", &b.0).0.unwrap();
t.execute_with(|| {
// Verify that the contract does exist by querying some of its storage items
// It does not matter that the storage item itself does not exist.
assert!(&pallet_contracts::Pallet::<Runtime>::get_storage(addr, vec![]).is_ok());
});
}
#[test]
fn wasm_big_block_import_fails() {
let mut t = new_test_ext(compact_code_unwrap());
set_heap_pages(&mut t.ext(), 4);
let result = executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0).0;
assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) })))
}
#[test]
fn native_big_block_import_succeeds() {
let mut t = new_test_ext(compact_code_unwrap());
executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0)
.0
.unwrap();
}
#[test]
fn native_big_block_import_fails_on_fallback() {
let mut t = new_test_ext(compact_code_unwrap());
// We set the heap pages to 8 because we know that should give an OOM in WASM with the given
// block.
set_heap_pages(&mut t.ext(), 8);
assert!(executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0)
.0
.is_err());
}
#[test]
fn panic_execution_gives_error() {
let mut t = new_test_ext(bloaty_code_unwrap());
t.insert(
<frame_system::Account<Runtime>>::hashed_key_for(alice()),
AccountInfo::<<Runtime as frame_system::Config>::Nonce, _> {
data: (0 * DOLLARS, 0u128, 0u128, 0u128),
..Default::default()
}
.encode(),
);
t.insert(<pallet_balances::TotalIssuance<Runtime>>::hashed_key().to_vec(), 0_u128.encode());
t.insert(<frame_system::BlockHash<Runtime>>::hashed_key_for(0), vec![0u8; 32]);
let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0;
assert!(r.is_ok());
let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()))
.0
.unwrap();
let r = ApplyExtrinsicResult::decode(&mut &r[..]).unwrap();
assert_eq!(r, Err(InvalidTransaction::Payment.into()));
}
#[test]
fn successful_execution_gives_ok() {
let mut t = new_test_ext(compact_code_unwrap());
t.insert(
<frame_system::Account<Runtime>>::hashed_key_for(alice()),
AccountInfo::<<Runtime as frame_system::Config>::Nonce, _> {
providers: 1,
data: (111 * DOLLARS, 0u128, 0u128, 1u128 << 127),
..Default::default()
}
.encode(),
);
t.insert(
<frame_system::Account<Runtime>>::hashed_key_for(bob()),
AccountInfo::<
<Runtime as frame_system::Config>::Nonce,
<Runtime as frame_system::Config>::AccountData,
>::default()
.encode(),
);
t.insert(
<pallet_balances::TotalIssuance<Runtime>>::hashed_key().to_vec(),
(111 * DOLLARS).encode(),
);
t.insert(<frame_system::BlockHash<Runtime>>::hashed_key_for(0), vec![0u8; 32]);
let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0;
assert!(r.is_ok());
t.execute_with(|| {
assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS);
});
let weight_refund = Weight::zero();
let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund));
let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()))
.0
.unwrap();
ApplyExtrinsicResult::decode(&mut &r[..])
.unwrap()
.expect("Extrinsic could not be applied")
.expect("Extrinsic failed");
t.execute_with(|| {
assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund);
assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS);
});
}
#[test]
fn should_import_block_with_test_client() {
use node_testing::client::{
sp_consensus::BlockOrigin, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt,
};
let client = TestClientBuilder::new().build();
let block1 = changes_trie_block();
let block_data = block1.0;
let block = node_primitives::Block::decode(&mut &block_data[..]).unwrap();
futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap();
}
#[test]
fn default_config_as_json_works() {
let mut t = new_test_ext(compact_code_unwrap());
let r = executor_call(
&mut t,
"GenesisBuilder_get_preset",
&None::<&sp_genesis_builder::PresetId>.encode(),
)
.0
.unwrap();
let r = Option::<Vec<u8>>::decode(&mut &r[..])
.unwrap()
.expect("default config is there");
let json = String::from_utf8(r.into()).expect("returned value is json. qed.");
let expected = include_str!("res/default_genesis_config.json").to_string();
assert_eq!(
serde_json::from_str::<serde_json::Value>(&expected).unwrap(),
serde_json::from_str::<serde_json::Value>(&json).unwrap()
);
}
@@ -0,0 +1,47 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
// Unix only since it uses signals from [`common::run_node_for_a_while`].
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
use substrate_cli_test_utils as common;
/// `benchmark block` works for the dev runtime using the wasm executor.
#[tokio::test]
async fn benchmark_block_works() {
let base_dir = tempdir().expect("could not create a temp dir");
common::run_node_for_a_while(base_dir.path(), &["--dev", "--no-hardware-benchmarks"]).await;
// Invoke `benchmark block` with all options to make sure that they are valid.
let status = Command::new(cargo_bin("substrate-node"))
.args(["benchmark", "block", "--dev"])
.arg("-d")
.arg(base_dir.path())
.args(["--from", "1", "--to", "1"])
.args(["--repeat", "1"])
.args(["--wasm-execution=compiled"])
.status()
.unwrap();
assert!(status.success())
}
@@ -0,0 +1,47 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
/// Tests that the `benchmark extrinsic` command works for
/// remark and transfer_keep_alive within the substrate dev runtime.
#[test]
fn benchmark_extrinsic_works() {
benchmark_extrinsic("system", "remark");
benchmark_extrinsic("balances", "transfer_keep_alive");
}
/// Checks that the `benchmark extrinsic` command works for the given pallet and extrinsic.
fn benchmark_extrinsic(pallet: &str, extrinsic: &str) {
let base_dir = tempdir().expect("could not create a temp dir");
let status = Command::new(cargo_bin("substrate-node"))
.args(&["benchmark", "extrinsic", "--dev"])
.arg("-d")
.arg(base_dir.path())
.args(&["--pallet", pallet, "--extrinsic", extrinsic])
// Run with low repeats for faster execution.
.args(["--warmup=10", "--repeat=10", "--max-ext-per-block=10"])
.args(["--wasm-execution=compiled"])
.status()
.unwrap();
assert!(status.success());
}
@@ -0,0 +1,73 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use assert_cmd::cargo::cargo_bin;
use std::process::Command;
/// Tests that the `benchmark machine` command works for the substrate dev runtime.
#[test]
fn benchmark_machine_works() {
let status = Command::new(cargo_bin("substrate-node"))
.args(["benchmark", "machine", "--dev"])
.args([
"--verify-duration",
"0.1",
"--disk-duration",
"0.1",
"--memory-duration",
"0.1",
"--hash-duration",
"0.1",
])
// Make it succeed.
.args(["--allow-fail"])
.status()
.unwrap();
assert!(status.success());
}
/// Test that the hardware does not meet the requirements.
///
/// This is most likely to succeed since it uses a test profile.
#[test]
#[cfg(debug_assertions)]
fn benchmark_machine_fails_with_slow_hardware() {
let output = Command::new(cargo_bin("substrate-node"))
.args(["benchmark", "machine", "--dev"])
.args([
"--verify-duration",
"1.0",
"--disk-duration",
"2",
"--hash-duration",
"1.0",
"--memory-duration",
"1.0",
"--tolerance",
"0",
])
.output()
.unwrap();
// Command should have failed.
assert!(!output.status.success());
// An `UnmetRequirement` error should have been printed.
let log = String::from_utf8_lossy(&output.stderr).to_string();
assert!(log.contains("UnmetRequirement"));
}
@@ -0,0 +1,47 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
/// Tests that the `benchmark overhead` command works for the substrate dev runtime.
#[test]
fn benchmark_overhead_works() {
let tmp_dir = tempdir().expect("could not create a temp dir");
let base_path = tmp_dir.path();
// Only put 10 extrinsics into the block otherwise it takes forever to build it
// especially for a non-release build.
let status = Command::new(cargo_bin("substrate-node"))
.args(&["benchmark", "overhead", "--dev", "-d"])
.arg(base_path)
.arg("--weight-path")
.arg(base_path)
.args(["--warmup", "10", "--repeat", "10"])
.args(["--add", "100", "--mul", "1.2", "--metric", "p75"])
.args(["--max-ext-per-block", "10"])
.args(["--wasm-execution=compiled"])
.status()
.unwrap();
assert!(status.success());
// Weight files have been created.
assert!(base_path.join("block_weights.rs").exists());
assert!(base_path.join("extrinsic_weights.rs").exists());
}
@@ -0,0 +1,86 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(feature = "runtime-benchmarks")]
use assert_cmd::cargo::cargo_bin;
use std::process::Command;
/// `benchmark pallet` works for the different combinations of `steps` and `repeat`.
#[test]
fn benchmark_pallet_works() {
// Some invalid combinations:
benchmark_pallet(0, 10, false);
benchmark_pallet(1, 10, false);
// ... and some valid:
benchmark_pallet(2, 1, true);
benchmark_pallet(50, 20, true);
benchmark_pallet(20, 50, true);
}
#[test]
fn benchmark_pallet_args_work() {
benchmark_pallet_args(&["--list", "--pallet=pallet_balances"], true);
benchmark_pallet_args(&["--list", "--pallet=pallet_balances"], true);
benchmark_pallet_args(
&["--list", "--pallet=pallet_balances", "--genesis-builder=spec-genesis"],
true,
);
benchmark_pallet_args(
&["--list", "--pallet=pallet_balances", "--chain=dev", "--genesis-builder=spec-genesis"],
true,
);
benchmark_pallet_args(
&["--list", "--pallet=pallet_balances", "--chain=dev", "--genesis-builder=spec-runtime"],
true,
);
// Error because no runtime is provided:
benchmark_pallet_args(
&["--list", "--pallet=pallet_balances", "--chain=dev", "--genesis-builder=runtime"],
false,
);
}
fn benchmark_pallet(steps: u32, repeat: u32, should_work: bool) {
let status = Command::new(cargo_bin("substrate-node"))
.args(["benchmark", "pallet", "--dev"])
// Use the `addition` benchmark since is the fastest.
.args(["--pallet", "frame-benchmarking", "--extrinsic", "addition"])
.args(["--steps", &format!("{}", steps), "--repeat", &format!("{}", repeat)])
.args([
"--wasm-execution=compiled",
"--no-storage-info",
"--no-median-slopes",
"--no-min-squares",
"--heap-pages=4096",
])
.status()
.unwrap();
assert_eq!(status.success(), should_work);
}
fn benchmark_pallet_args(args: &[&str], should_work: bool) {
let status = Command::new(cargo_bin("substrate-node"))
.args(["benchmark", "pallet"])
.args(args)
.status()
.unwrap();
assert_eq!(status.success(), should_work);
}
@@ -0,0 +1,56 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(feature = "runtime-benchmarks")]
use assert_cmd::cargo::cargo_bin;
use std::{
path::Path,
process::{Command, ExitStatus},
};
use tempfile::tempdir;
/// Tests that the `benchmark storage` command works for the dev runtime.
#[test]
fn benchmark_storage_works() {
let tmp_dir = tempdir().expect("could not create a temp dir");
let base_path = tmp_dir.path();
// Benchmarking the storage works and creates the correct weight file.
assert!(benchmark_storage("rocksdb", base_path).success());
assert!(base_path.join("rocksdb_weights.rs").exists());
assert!(benchmark_storage("paritydb", base_path).success());
assert!(base_path.join("paritydb_weights.rs").exists());
}
fn benchmark_storage(db: &str, base_path: &Path) -> ExitStatus {
Command::new(cargo_bin("substrate-node"))
.args(&["benchmark", "storage", "--dev"])
.arg("--db")
.arg(db)
.arg("--weight-path")
.arg(base_path)
.args(["--state-version", "1"])
.args(["--batch-size", "1"])
.args(["--warmups", "0"])
.args(["--add", "100", "--mul", "1.2", "--metric", "p75"])
.arg("--include-child-trees")
.status()
.unwrap()
}
@@ -0,0 +1,39 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
#[test]
fn build_spec_works() {
let base_path = tempdir().expect("could not create a temp dir");
let output = Command::new(cargo_bin("substrate-node"))
.args(&["build-spec", "--dev", "-d"])
.arg(base_path.path())
.output()
.unwrap();
assert!(output.status.success());
// Make sure that the `dev` chain folder exists, but the `db` doesn't
assert!(base_path.path().join("chains/dev/").exists());
assert!(!base_path.path().join("chains/dev/db").exists());
let _value: serde_json::Value = serde_json::from_slice(output.stdout.as_slice()).unwrap();
}
@@ -0,0 +1,40 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
use substrate_cli_test_utils as common;
#[tokio::test]
async fn check_block_works() {
let base_path = tempdir().expect("could not create a temp dir");
common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await;
let status = Command::new(cargo_bin("substrate-node"))
.args(&["check-block", "--dev", "-d"])
.arg(base_path.path())
.arg("1")
.status()
.unwrap();
assert!(status.success());
}
+197
View File
@@ -0,0 +1,197 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use codec::{Decode, Encode};
use frame_support::Hashable;
use frame_system::offchain::AppCrypto;
use pezkuwi_sdk::*;
use sc_executor::error::Result;
use sp_consensus_babe::{
digests::{PreDigest, SecondaryPlainPreDigest},
Slot, BABE_ENGINE_ID,
};
use sp_core::{
crypto::KeyTypeId,
sr25519::Signature,
traits::{CallContext, CodeExecutor, RuntimeCode},
};
use sp_runtime::{
traits::{BlakeTwo256, Header as HeaderT},
ApplyExtrinsicResult, Digest, DigestItem, MultiSignature, MultiSigner,
};
use sp_state_machine::TestExternalities as CoreTestExternalities;
use kitchensink_runtime::{
constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, Runtime,
UncheckedExtrinsic,
};
use node_primitives::{BlockNumber, Hash};
use node_testing::keyring::*;
use sp_externalities::Externalities;
use staging_node_cli::service::RuntimeExecutor;
pub const TEST_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"test");
pub mod sr25519 {
mod app_sr25519 {
use super::super::TEST_KEY_TYPE_ID;
use pezkuwi_sdk::sp_application_crypto::{app_crypto, sr25519};
app_crypto!(sr25519, TEST_KEY_TYPE_ID);
}
pub type AuthorityId = app_sr25519::Public;
}
pub struct TestAuthorityId;
impl AppCrypto<MultiSigner, MultiSignature> for TestAuthorityId {
type RuntimeAppPublic = sr25519::AuthorityId;
type GenericSignature = Signature;
type GenericPublic = sp_core::sr25519::Public;
}
/// The wasm runtime code.
///
/// `compact` since it is after post-processing with wasm-gc which performs tree-shaking thus
/// making the binary slimmer. There is a convention to use compact version of the runtime
/// as canonical.
pub fn compact_code_unwrap() -> &'static [u8] {
kitchensink_runtime::WASM_BINARY.expect(
"Development wasm binary is not available. Testing is only supported with the flag \
disabled.",
)
}
pub const GENESIS_HASH: [u8; 32] = [69u8; 32];
pub const SPEC_VERSION: u32 = kitchensink_runtime::VERSION.spec_version;
pub const TRANSACTION_VERSION: u32 = kitchensink_runtime::VERSION.transaction_version;
pub type TestExternalities<H> = CoreTestExternalities<H>;
pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic {
node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH, None)
}
pub fn default_transfer_call() -> pallet_balances::Call<Runtime> {
pallet_balances::Call::<Runtime>::transfer_allow_death {
dest: bob().into(),
value: 69 * DOLLARS,
}
}
pub fn from_block_number(n: u32) -> Header {
Header::new(n, Default::default(), Default::default(), [69; 32].into(), Default::default())
}
pub fn executor() -> RuntimeExecutor {
RuntimeExecutor::builder().build()
}
pub fn executor_call(
t: &mut TestExternalities<BlakeTwo256>,
method: &str,
data: &[u8],
) -> (Result<Vec<u8>>, bool) {
let mut t = t.ext();
let code = t.storage(sp_core::storage::well_known_keys::CODE).unwrap();
let heap_pages = t.storage(sp_core::storage::well_known_keys::HEAP_PAGES);
let runtime_code = RuntimeCode {
code_fetcher: &sp_core::traits::WrappedRuntimeCode(code.as_slice().into()),
hash: sp_crypto_hashing::blake2_256(&code).to_vec(),
heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()),
};
sp_tracing::try_init_simple();
executor().call(&mut t, &runtime_code, method, data, CallContext::Onchain)
}
pub fn new_test_ext(code: &[u8]) -> TestExternalities<BlakeTwo256> {
sp_tracing::try_init_simple();
let ext = TestExternalities::new_with_code(
code,
node_testing::genesis::config().build_storage().unwrap(),
);
ext
}
/// Construct a fake block.
///
/// `extrinsics` must be a list of valid extrinsics, i.e. none of the extrinsics for example
/// can report `ExhaustResources`. Otherwise, this function panics.
pub fn construct_block(
env: &mut TestExternalities<BlakeTwo256>,
number: BlockNumber,
parent_hash: Hash,
extrinsics: Vec<CheckedExtrinsic>,
babe_slot: Slot,
) -> (Vec<u8>, Hash) {
use sp_trie::{LayoutV1 as Layout, TrieConfiguration};
// sign extrinsics.
let extrinsics = extrinsics.into_iter().map(sign).collect::<Vec<_>>();
// calculate the header fields that we can.
let extrinsics_root =
Layout::<BlakeTwo256>::ordered_trie_root(extrinsics.iter().map(Encode::encode))
.to_fixed_bytes()
.into();
let header = Header {
parent_hash,
number,
extrinsics_root,
state_root: Default::default(),
digest: Digest {
logs: vec![DigestItem::PreRuntime(
BABE_ENGINE_ID,
PreDigest::SecondaryPlain(SecondaryPlainPreDigest {
slot: babe_slot,
authority_index: 42,
})
.encode(),
)],
},
};
// execute the block to get the real header.
executor_call(env, "Core_initialize_block", &header.encode()).0.unwrap();
for extrinsic in extrinsics.iter() {
// Try to apply the `extrinsic`. It should be valid, in the sense that it passes
// all pre-inclusion checks.
let r = executor_call(env, "BlockBuilder_apply_extrinsic", &extrinsic.encode())
.0
.expect("application of an extrinsic failed");
match ApplyExtrinsicResult::decode(&mut &r[..])
.expect("apply result deserialization failed")
{
Ok(_) => {},
Err(e) => panic!("Applying extrinsic failed: {:?}", e),
}
}
let header = Header::decode(
&mut &executor_call(env, "BlockBuilder_finalize_block", &[0u8; 0]).0.unwrap()[..],
)
.unwrap();
let hash = header.blake2_256();
(Block { header, extrinsics }.encode(), hash.into())
}
@@ -0,0 +1,206 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
use regex::Regex;
use std::{fs, path::PathBuf, process::Command};
use tempfile::{tempdir, TempDir};
use substrate_cli_test_utils as common;
fn contains_error(logged_output: &str) -> bool {
logged_output.contains("Error")
}
/// Helper struct to execute the export/import/revert tests.
/// The fields are paths to a temporary directory
struct ExportImportRevertExecutor<'a> {
base_path: &'a TempDir,
exported_blocks_file: &'a PathBuf,
db_path: &'a PathBuf,
num_exported_blocks: Option<u64>,
}
/// Format options for export / import commands.
enum FormatOpt {
Json,
Binary,
}
/// Command corresponding to the different commands we would like to run.
enum SubCommand {
ExportBlocks,
ImportBlocks,
}
impl ToString for SubCommand {
fn to_string(&self) -> String {
match self {
SubCommand::ExportBlocks => String::from("export-blocks"),
SubCommand::ImportBlocks => String::from("import-blocks"),
}
}
}
impl<'a> ExportImportRevertExecutor<'a> {
fn new(
base_path: &'a TempDir,
exported_blocks_file: &'a PathBuf,
db_path: &'a PathBuf,
) -> Self {
Self { base_path, exported_blocks_file, db_path, num_exported_blocks: None }
}
/// Helper method to run a command. Returns a string corresponding to what has been logged.
fn run_block_command(
&self,
sub_command: SubCommand,
format_opt: FormatOpt,
expected_to_fail: bool,
) -> String {
let sub_command_str = sub_command.to_string();
// Adding "--binary" if need be.
let arguments: Vec<&str> = match format_opt {
FormatOpt::Binary => {
vec![&sub_command_str, "--dev", "--binary", "-d"]
},
FormatOpt::Json => vec![&sub_command_str, "--dev", "-d"],
};
let tmp: TempDir;
// Setting base_path to be a temporary folder if we are importing blocks.
// This allows us to make sure we are importing from scratch.
let base_path = match sub_command {
SubCommand::ExportBlocks => &self.base_path.path(),
SubCommand::ImportBlocks => {
tmp = tempdir().unwrap();
tmp.path()
},
};
// Running the command and capturing the output.
let output = Command::new(cargo_bin("substrate-node"))
.args(&arguments)
.arg(&base_path)
.arg(&self.exported_blocks_file)
.output()
.unwrap();
let logged_output = String::from_utf8_lossy(&output.stderr).to_string();
if expected_to_fail {
// Checking that we did indeed find an error.
assert!(contains_error(&logged_output), "expected to error but did not error!");
assert!(!output.status.success());
} else {
// Making sure no error were logged.
assert!(
!contains_error(&logged_output),
"expected not to error but error'd: \n{logged_output}"
);
assert!(output.status.success());
}
logged_output
}
/// Runs the `export-blocks` command.
fn run_export(&mut self, fmt_opt: FormatOpt) {
let log = self.run_block_command(SubCommand::ExportBlocks, fmt_opt, false);
// Using regex to find out how many block we exported.
let re = Regex::new(r"Exporting blocks from #\d* to #(?P<exported_blocks>\d*)").unwrap();
let caps = re.captures(&log).unwrap();
// Saving the number of blocks we've exported for further use.
self.num_exported_blocks = Some(caps["exported_blocks"].parse::<u64>().unwrap());
let metadata = fs::metadata(&self.exported_blocks_file).unwrap();
assert!(metadata.len() > 0, "file exported_blocks should not be empty");
let _ = fs::remove_dir_all(&self.db_path);
}
/// Runs the `import-blocks` command, asserting that an error was found or
/// not depending on `expected_to_fail`.
fn run_import(&mut self, fmt_opt: FormatOpt, expected_to_fail: bool) {
let log = self.run_block_command(SubCommand::ImportBlocks, fmt_opt, expected_to_fail);
if !expected_to_fail {
// Using regex to find out how much block we imported,
// and what's the best current block.
let re =
Regex::new(r"Imported (?P<imported>\d*) blocks. Best: #(?P<best>\d*)").unwrap();
let caps = re.captures(&log).expect("capture should have succeeded");
let imported = caps["imported"].parse::<u64>().unwrap();
let best = caps["best"].parse::<u64>().unwrap();
assert_eq!(imported, best, "numbers of blocks imported and best number differs");
assert_eq!(
best,
self.num_exported_blocks.expect("number of exported blocks cannot be None; qed"),
"best block number and number of expected blocks should not differ"
);
}
self.num_exported_blocks = None;
}
/// Runs the `revert` command.
fn run_revert(&self) {
let output = Command::new(cargo_bin("substrate-node"))
.args(&["revert", "--dev", "-d"])
.arg(&self.base_path.path())
.output()
.unwrap();
let logged_output = String::from_utf8_lossy(&output.stderr).to_string();
// Reverting should not log any error.
assert!(!contains_error(&logged_output));
// Command should never fail.
assert!(output.status.success());
}
/// Helper function that runs the whole export / import / revert flow and checks for errors.
fn run(&mut self, export_fmt: FormatOpt, import_fmt: FormatOpt, expected_to_fail: bool) {
self.run_export(export_fmt);
self.run_import(import_fmt, expected_to_fail);
self.run_revert();
}
}
#[tokio::test]
async fn export_import_revert() {
let base_path = tempdir().expect("could not create a temp dir");
let exported_blocks_file = base_path.path().join("exported_blocks");
let db_path = base_path.path().join("db");
common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await;
let mut executor = ExportImportRevertExecutor::new(&base_path, &exported_blocks_file, &db_path);
// Binary and binary should work.
executor.run(FormatOpt::Binary, FormatOpt::Binary, false);
// Binary and JSON should fail.
executor.run(FormatOpt::Binary, FormatOpt::Json, true);
// JSON and JSON should work.
executor.run(FormatOpt::Json, FormatOpt::Json, false);
// JSON and binary should fail.
executor.run(FormatOpt::Json, FormatOpt::Binary, true);
}
+193
View File
@@ -0,0 +1,193 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use codec::{Encode, Joiner};
use frame_support::{
dispatch::GetDispatchInfo,
traits::Currency,
weights::{constants::ExtrinsicBaseWeight, IdentityFee, WeightToFee},
};
use kitchensink_runtime::{
constants::{currency::*, time::SLOT_DURATION},
Balances, CheckedExtrinsic, Multiplier, Runtime, RuntimeCall, TransactionByteFee,
TransactionPayment,
};
use node_primitives::Balance;
use node_testing::keyring::*;
use pezkuwi_sdk::*;
use sp_runtime::{traits::One, Perbill};
pub mod common;
use self::common::{sign, *};
#[test]
fn fee_multiplier_increases_and_decreases_on_big_weight() {
let mut t = new_test_ext(compact_code_unwrap());
// initial fee multiplier must be one.
let mut prev_multiplier = Multiplier::one();
t.execute_with(|| {
assert_eq!(TransactionPayment::next_fee_multiplier(), prev_multiplier);
});
let mut tt = new_test_ext(compact_code_unwrap());
let time1 = 42 * 1000;
// big one in terms of weight.
let block1 = construct_block(
&mut tt,
1,
GENESIS_HASH.into(),
vec![
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Bare,
function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }),
},
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(0, 0)),
function: RuntimeCall::Sudo(pallet_sudo::Call::sudo {
call: Box::new(RuntimeCall::RootTesting(
pallet_root_testing::Call::fill_block { ratio: Perbill::from_percent(60) },
)),
}),
},
],
(time1 / SLOT_DURATION).into(),
);
let time2 = 52 * 1000;
// small one in terms of weight.
let block2 = construct_block(
&mut tt,
2,
block1.1,
vec![
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Bare,
function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }),
},
CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(charlie(), tx_ext(1, 0)),
function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1] }),
},
],
(time2 / SLOT_DURATION).into(),
);
println!(
"++ Block 1 size: {} / Block 2 size {}",
block1.0.encode().len(),
block2.0.encode().len(),
);
// execute a big block.
executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap();
// weight multiplier is increased for next block.
t.execute_with(|| {
let fm = TransactionPayment::next_fee_multiplier();
println!("After a big block: {:?} -> {:?}", prev_multiplier, fm);
assert!(fm > prev_multiplier);
prev_multiplier = fm;
});
// execute a big block.
executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap();
// weight multiplier is increased for next block.
t.execute_with(|| {
let fm = TransactionPayment::next_fee_multiplier();
println!("After a small block: {:?} -> {:?}", prev_multiplier, fm);
assert!(fm < prev_multiplier);
});
}
fn new_account_info(free_dollars: u128) -> Vec<u8> {
frame_system::AccountInfo {
nonce: 0u32,
consumers: 0,
providers: 1,
sufficients: 0,
data: (free_dollars * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 1u128 << 127),
}
.encode()
}
#[test]
fn transaction_fee_is_correct() {
// This uses the exact values of substrate-node.
//
// weight of transfer call as of now: 1_000_000
// if weight of the cheapest weight would be 10^7, this would be 10^9, which is:
// - 1 MILLICENTS in substrate node.
// - 1 milli-dot based on current pezkuwi runtime.
// (this based on assigning 0.1 CENT to the cheapest tx with `weight = 100`)
let mut t = new_test_ext(compact_code_unwrap());
t.insert(<frame_system::Account<Runtime>>::hashed_key_for(alice()), new_account_info(100));
t.insert(<frame_system::Account<Runtime>>::hashed_key_for(bob()), new_account_info(10));
t.insert(
<pallet_balances::TotalIssuance<Runtime>>::hashed_key().to_vec(),
(110 * DOLLARS).encode(),
);
t.insert(<frame_system::BlockHash<Runtime>>::hashed_key_for(0), vec![0u8; 32]);
let tip = 1_000_000;
let xt = sign(CheckedExtrinsic {
format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, tip)),
function: RuntimeCall::Balances(default_transfer_call()),
});
let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0;
assert!(r.is_ok());
let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt.clone())).0;
assert!(r.is_ok());
t.execute_with(|| {
assert_eq!(Balances::total_balance(&bob()), (10 + 69) * DOLLARS);
// Components deducted from alice's balances:
// - Base fee
// - Weight fee
// - Length fee
// - Tip
// - Creation-fee of bob's account.
let mut balance_alice = (100 - 69) * DOLLARS;
let base_weight = ExtrinsicBaseWeight::get();
let base_fee = IdentityFee::<Balance>::weight_to_fee(&base_weight);
let length_fee = TransactionByteFee::get() * (xt.clone().encode().len() as Balance);
balance_alice -= length_fee;
let mut info = default_transfer_call().get_dispatch_info();
info.extension_weight = xt.0.extension_weight();
let weight = info.total_weight();
let weight_fee = IdentityFee::<Balance>::weight_to_fee(&weight);
// we know that weight to fee multiplier is effect-less in block 1.
// current weight of transfer = 200_000_000
// Linear weight to fee is 1:1 right now (1 weight = 1 unit of balance)
assert_eq!(weight_fee, weight.ref_time() as Balance);
balance_alice -= base_fee;
balance_alice -= weight_fee;
balance_alice -= tip;
assert_eq!(Balances::total_balance(&alice()), balance_alice);
});
}
@@ -0,0 +1,40 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
use substrate_cli_test_utils as common;
#[tokio::test]
async fn inspect_works() {
let base_path = tempdir().expect("could not create a temp dir");
common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await;
let status = Command::new(cargo_bin("substrate-node"))
.args(&["inspect", "--dev", "-d"])
.arg(base_path.path())
.args(&["block", "1"])
.status()
.unwrap();
assert!(status.success());
}
@@ -0,0 +1,43 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
use substrate_cli_test_utils as common;
#[tokio::test]
#[cfg(unix)]
async fn purge_chain_works() {
let base_path = tempdir().expect("could not create a temp dir");
common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await;
let status = Command::new(cargo_bin("substrate-node"))
.args(&["purge-chain", "--dev", "-d"])
.arg(base_path.path())
.arg("-y")
.status()
.unwrap();
assert!(status.success());
// Make sure that the `dev` chain folder exists, but the `db` is deleted.
assert!(base_path.path().join("chains/dev/").exists());
assert!(!base_path.path().join("chains/dev/db/full").exists());
}
@@ -0,0 +1,38 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use tempfile::tempdir;
use substrate_cli_test_utils as common;
#[tokio::test]
#[cfg(unix)]
async fn remember_state_pruning_works() {
let base_path = tempdir().expect("could not create a temp dir");
// First run with `--state-pruning=archive`.
common::run_node_for_a_while(
base_path.path(),
&["--dev", "--state-pruning=archive", "--no-hardware-benchmarks"],
)
.await;
// Then run again without specifying the state pruning.
// This should load state pruning settings from the db.
common::run_node_for_a_while(base_path.path(), &["--dev", "--no-hardware-benchmarks"]).await;
}
@@ -0,0 +1,129 @@
{
"system": {},
"babe": {
"authorities": [],
"epochConfig": {
"allowed_slots": "PrimaryAndSecondaryVRFSlots",
"c": [
1,
4
]
}
},
"indices": {
"indices": []
},
"balances": {
"balances": [],
"devAccounts": null
},
"broker": {},
"transactionPayment": {
"multiplier": "1000000000000000000"
},
"staking": {
"validatorCount": 0,
"minimumValidatorCount": 0,
"invulnerables": [],
"forceEra": "NotForcing",
"slashRewardFraction": 0,
"canceledPayout": 0,
"stakers": [],
"minNominatorBond": 0,
"minValidatorBond": 0,
"maxValidatorCount": null,
"maxNominatorCount": null
},
"session": {
"keys": [],
"nonAuthorityKeys": []
},
"revive": {
},
"democracy": {},
"council": {
"members": []
},
"technicalCommittee": {
"members": []
},
"elections": {
"members": []
},
"technicalMembership": {
"members": []
},
"grandpa": {
"authorities": []
},
"beefy": {
"authorities": [],
"genesisBlock": 1
},
"treasury": {},
"sudo": {
"key": null
},
"imOnline": {
"keys": []
},
"authorityDiscovery": {
"keys": []
},
"society": {
"pot": 0
},
"vesting": {
"vesting": []
},
"glutton": {
"compute": "0",
"storage": "0",
"blockLength": "0",
"trashDataCount": 0
},
"assets": {
"assets": [],
"metadata": [],
"accounts": [],
"nextAssetId": null,
"reserves": []
},
"poolAssets": {
"assets": [],
"metadata": [],
"accounts": [],
"nextAssetId": null,
"reserves": []
},
"transactionStorage": {
"byteFee": 10,
"entryFee": 1000,
"storagePeriod": 100800
},
"allianceMotion": {
"members": []
},
"alliance": {
"fellows": [],
"allies": []
},
"mixnet": {
"mixnodes": []
},
"nominationPools": {
"minJoinBond": 0,
"minCreateBond": 0,
"maxPools": 16,
"maxMembersPerPool": 32,
"maxMembers": 512,
"globalMaxCommission": null
},
"txPause": {
"paused": []
},
"safeMode": {
"enteredUntil": null
}
}
@@ -0,0 +1,89 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
use nix::sys::signal::Signal::{self, SIGINT, SIGTERM};
use std::{
process::{self, Command},
time::Duration,
};
use tempfile::tempdir;
use substrate_cli_test_utils as common;
#[tokio::test]
async fn running_the_node_works_and_can_be_interrupted() {
common::run_with_timeout(Duration::from_secs(60 * 10), async move {
async fn run_command_and_kill(signal: Signal) {
let base_path = tempdir().expect("could not create a temp dir");
let mut cmd = common::KillChildOnDrop(
Command::new(cargo_bin("substrate-node"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(&["--dev", "-d"])
.arg(base_path.path())
.arg("--db=paritydb")
.arg("--no-hardware-benchmarks")
.spawn()
.unwrap(),
);
let stderr = cmd.stderr.take().unwrap();
let ws_url = common::extract_info_from_output(stderr).0.ws_url;
common::wait_n_finalized_blocks(3, &ws_url).await;
cmd.assert_still_running();
cmd.stop_with_signal(signal);
// Check if the database was closed gracefully. If it was not,
// there may exist a ref cycle that prevents the Client from being dropped properly.
//
// parity-db only writes the stats file on clean shutdown.
let stats_file = base_path.path().join("chains/dev/paritydb/full/stats.txt");
assert!(std::path::Path::exists(&stats_file));
}
run_command_and_kill(SIGINT).await;
run_command_and_kill(SIGTERM).await;
})
.await;
}
#[tokio::test]
async fn running_two_nodes_with_the_same_ws_port_should_work() {
common::run_with_timeout(Duration::from_secs(60 * 10), async move {
let mut first_node = common::KillChildOnDrop(common::start_node());
let mut second_node = common::KillChildOnDrop(common::start_node());
let stderr = first_node.stderr.take().unwrap();
let ws_url = common::extract_info_from_output(stderr).0.ws_url;
common::wait_n_finalized_blocks(3, &ws_url).await;
first_node.assert_still_running();
second_node.assert_still_running();
first_node.stop();
second_node.stop();
})
.await;
}
@@ -0,0 +1,264 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use codec::Decode;
use frame_system::offchain::{SendSignedTransaction, Signer, SubmitTransaction};
use kitchensink_runtime::{Executive, ExistentialDeposit, Indices, Runtime, UncheckedExtrinsic};
use pezkuwi_sdk::*;
use sp_application_crypto::AppCrypto;
use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt};
use sp_keyring::sr25519::Keyring::Alice;
use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt};
use sp_runtime::generic;
pub mod common;
use self::common::*;
#[test]
fn should_submit_unsigned_transaction() {
let mut t = new_test_ext(compact_code_unwrap());
let (pool, state) = TestTransactionPoolExt::new();
t.register_extension(TransactionPoolExt::new(pool));
t.execute_with(|| {
let signature =
pallet_im_online::sr25519::AuthoritySignature::try_from(vec![0; 64]).unwrap();
let heartbeat_data = pallet_im_online::Heartbeat {
block_number: 1,
session_index: 1,
authority_index: 0,
validators_len: 0,
};
let call = pallet_im_online::Call::heartbeat { heartbeat: heartbeat_data, signature };
let xt = generic::UncheckedExtrinsic::new_bare(call.into()).into();
SubmitTransaction::<Runtime, pallet_im_online::Call<Runtime>>::submit_transaction(xt)
.unwrap();
assert_eq!(state.read().transactions.len(), 1)
});
}
const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten";
#[test]
fn should_submit_signed_transaction() {
let mut t = new_test_ext(compact_code_unwrap());
let (pool, state) = TestTransactionPoolExt::new();
t.register_extension(TransactionPoolExt::new(pool));
let keystore = MemoryKeystore::new();
keystore
.sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE)))
.unwrap();
keystore
.sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter2", PHRASE)))
.unwrap();
keystore
.sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter3", PHRASE)))
.unwrap();
t.register_extension(KeystoreExt::new(keystore));
t.execute_with(|| {
let results =
Signer::<Runtime, TestAuthorityId>::all_accounts().send_signed_transaction(|_| {
pallet_balances::Call::transfer_allow_death {
dest: Alice.to_account_id().into(),
value: Default::default(),
}
});
let len = results.len();
assert_eq!(len, 3);
assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len);
assert_eq!(state.read().transactions.len(), len);
});
}
#[test]
fn should_submit_signed_twice_from_the_same_account() {
let mut t = new_test_ext(compact_code_unwrap());
let (pool, state) = TestTransactionPoolExt::new();
t.register_extension(TransactionPoolExt::new(pool));
let keystore = MemoryKeystore::new();
keystore
.sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE)))
.unwrap();
keystore
.sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter2", PHRASE)))
.unwrap();
t.register_extension(KeystoreExt::new(keystore));
t.execute_with(|| {
let result =
Signer::<Runtime, TestAuthorityId>::any_account().send_signed_transaction(|_| {
pallet_balances::Call::transfer_allow_death {
dest: Alice.to_account_id().into(),
value: Default::default(),
}
});
assert!(result.is_some());
assert_eq!(state.read().transactions.len(), 1);
// submit another one from the same account. The nonce should be incremented.
let result =
Signer::<Runtime, TestAuthorityId>::any_account().send_signed_transaction(|_| {
pallet_balances::Call::transfer_allow_death {
dest: Alice.to_account_id().into(),
value: Default::default(),
}
});
assert!(result.is_some());
assert_eq!(state.read().transactions.len(), 2);
// now check that the transaction nonces are not equal
let s = state.read();
fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce<Runtime> {
let extra = tx.0.preamble.to_signed().unwrap().2;
extra.6
}
let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap());
let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap());
assert!(nonce1 != nonce2, "Transactions should have different nonces. Got: {:?}", nonce1);
});
}
#[test]
fn should_submit_signed_twice_from_all_accounts() {
let mut t = new_test_ext(compact_code_unwrap());
let (pool, state) = TestTransactionPoolExt::new();
t.register_extension(TransactionPoolExt::new(pool));
let keystore = MemoryKeystore::new();
keystore
.sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE)))
.unwrap();
keystore
.sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter2", PHRASE)))
.unwrap();
t.register_extension(KeystoreExt::new(keystore));
t.execute_with(|| {
let results = Signer::<Runtime, TestAuthorityId>::all_accounts()
.send_signed_transaction(|_| {
pallet_balances::Call::transfer_allow_death { dest: Alice.to_account_id().into(), value: Default::default() }
});
let len = results.len();
assert_eq!(len, 2);
assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len);
assert_eq!(state.read().transactions.len(), 2);
// submit another one from the same account. The nonce should be incremented.
let results = Signer::<Runtime, TestAuthorityId>::all_accounts()
.send_signed_transaction(|_| {
pallet_balances::Call::transfer_allow_death { dest: Alice.to_account_id().into(), value: Default::default() }
});
let len = results.len();
assert_eq!(len, 2);
assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len);
assert_eq!(state.read().transactions.len(), 4);
// now check that the transaction nonces are not equal
let s = state.read();
fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce<Runtime> {
let extra = tx.0.preamble.to_signed().unwrap().2;
extra.6
}
let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap());
let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap());
let nonce3 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[2]).unwrap());
let nonce4 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[3]).unwrap());
assert!(
nonce1 != nonce3,
"Transactions should have different nonces. Got: 1st tx nonce: {:?}, 2nd nonce: {:?}", nonce1, nonce3
);
assert!(
nonce2 != nonce4,
"Transactions should have different nonces. Got: 1st tx nonce: {:?}, 2nd tx nonce: {:?}", nonce2, nonce4
);
});
}
#[test]
fn submitted_transaction_should_be_valid() {
use codec::Encode;
use sp_runtime::{
traits::StaticLookup,
transaction_validity::{TransactionSource, TransactionTag},
};
let mut t = new_test_ext(compact_code_unwrap());
let (pool, state) = TestTransactionPoolExt::new();
t.register_extension(TransactionPoolExt::new(pool));
let keystore = MemoryKeystore::new();
keystore
.sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE)))
.unwrap();
t.register_extension(KeystoreExt::new(keystore));
t.execute_with(|| {
let results =
Signer::<Runtime, TestAuthorityId>::all_accounts().send_signed_transaction(|_| {
pallet_balances::Call::transfer_allow_death {
dest: Alice.to_account_id().into(),
value: Default::default(),
}
});
let len = results.len();
assert_eq!(len, 1);
assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len);
});
// check that transaction is valid, but reset environment storage,
// since CreateTransaction increments the nonce
let tx0 = state.read().transactions[0].clone();
let mut t = new_test_ext(compact_code_unwrap());
t.execute_with(|| {
let source = TransactionSource::External;
let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap();
// add balance to the account
let author = extrinsic.0.preamble.clone().to_signed().clone().unwrap().0;
let address = Indices::lookup(author).unwrap();
let data = pallet_balances::AccountData {
free: ExistentialDeposit::get() * 10,
..Default::default()
};
let account = frame_system::AccountInfo { providers: 1, data, ..Default::default() };
<frame_system::Account<Runtime>>::insert(&address, account);
// check validity
let res = Executive::validate_transaction(
source,
extrinsic,
frame_system::BlockHash::<Runtime>::get(0),
)
.unwrap();
// We ignore res.priority since this number can change based on updates to weights and such.
assert_eq!(res.requires, Vec::<TransactionTag>::new());
assert_eq!(res.provides, vec![(address, 0).encode()]);
assert_eq!(res.longevity, 2047);
assert_eq!(res.propagate, true);
});
}
+92
View File
@@ -0,0 +1,92 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use assert_cmd::cargo::cargo_bin;
use std::{process, time::Duration};
use crate::common::KillChildOnDrop;
use substrate_cli_test_utils as common;
pub mod websocket_server;
#[tokio::test]
async fn telemetry_works() {
common::run_with_timeout(Duration::from_secs(60 * 10), async move {
let config = websocket_server::Config {
capacity: 1,
max_frame_size: 1048 * 1024,
send_buffer_len: 32,
bind_address: "127.0.0.1:0".parse().unwrap(),
};
let mut server = websocket_server::WsServer::new(config).await.unwrap();
let addr = server.local_addr().unwrap();
let server_task = tokio::spawn(async move {
loop {
use websocket_server::Event;
match server.next_event().await {
// New connection on the listener.
Event::ConnectionOpen { address } => {
println!("New connection from {:?}", address);
server.accept();
},
// Received a message from a connection.
Event::BinaryFrame { message, .. } => {
let json: serde_json::Value = serde_json::from_slice(&message).unwrap();
let object =
json.as_object().unwrap().get("payload").unwrap().as_object().unwrap();
if matches!(object.get("best"), Some(serde_json::Value::String(_))) {
break;
}
},
Event::TextFrame { .. } => {
panic!("Got a TextFrame over the socket, this is a bug")
},
// Connection has been closed.
Event::ConnectionError { .. } => {},
}
}
});
let mut substrate = process::Command::new(cargo_bin("substrate-node"));
let mut substrate = KillChildOnDrop(
substrate
.args(&["--dev", "--tmp", "--telemetry-url"])
.arg(format!("ws://{} 10", addr))
.arg("--no-hardware-benchmarks")
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.stdin(process::Stdio::null())
.spawn()
.unwrap(),
);
server_task.await.expect("server task panicked");
substrate.assert_still_running();
// Stop the process
substrate.stop();
})
.await;
}
@@ -0,0 +1,62 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#![cfg(unix)]
use assert_cmd::cargo::cargo_bin;
use std::{
process::{Command, Stdio},
time::Duration,
};
use substrate_cli_test_utils as common;
#[allow(dead_code)]
// Apparently `#[ignore]` doesn't actually work to disable this one.
//#[tokio::test]
async fn temp_base_path_works() {
common::run_with_timeout(Duration::from_secs(60 * 10), async move {
let mut cmd = Command::new(cargo_bin("substrate-node"));
let mut child = common::KillChildOnDrop(
cmd.args(&["--dev", "--tmp", "--no-hardware-benchmarks"])
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap(),
);
let mut stderr = child.stderr.take().unwrap();
let node_info = common::extract_info_from_output(&mut stderr).0;
// Let it produce some blocks.
common::wait_n_finalized_blocks(3, &node_info.ws_url).await;
// Ensure the db path exists while the node is running
assert!(node_info.db_path.exists());
child.assert_still_running();
// Stop the process
child.stop();
if node_info.db_path.exists() {
panic!("Database path `{}` wasn't deleted!", node_info.db_path.display());
}
})
.await;
}
+51
View File
@@ -0,0 +1,51 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use assert_cmd::cargo::cargo_bin;
use regex::Regex;
use std::process::Command;
fn expected_regex() -> Regex {
Regex::new(r"^substrate-node (.+)-([a-f\d]+)$").unwrap()
}
#[test]
fn version_is_full() {
let expected = expected_regex();
let output = Command::new(cargo_bin("substrate-node")).args(&["--version"]).output().unwrap();
assert!(output.status.success(), "command returned with non-success exit code");
let output = dbg!(String::from_utf8_lossy(&output.stdout).trim().to_owned());
let captures = expected.captures(output.as_str()).expect("could not parse version in output");
assert_eq!(&captures[1], env!("CARGO_PKG_VERSION"));
}
#[test]
fn test_regex_matches_properly() {
let expected = expected_regex();
let captures = expected.captures("substrate-node 2.0.0-da487d19d").unwrap();
assert_eq!(&captures[1], "2.0.0");
assert_eq!(&captures[2], "da487d19d");
let captures = expected.captures("substrate-node 2.0.0-alpha.5-da487d19d").unwrap();
assert_eq!(&captures[1], "2.0.0-alpha.5");
assert_eq!(&captures[2], "da487d19d");
}
@@ -0,0 +1,278 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use core::pin::Pin;
use futures::prelude::*;
use soketto::handshake::{server::Response, Server};
use std::{io, net::SocketAddr};
use tokio::net::{TcpListener, TcpStream};
use tokio_util::compat::{Compat, TokioAsyncReadCompatExt};
/// Configuration for a [`WsServer`].
pub struct Config {
/// IP address to try to bind to.
pub bind_address: SocketAddr,
/// Maximum size, in bytes, of a frame sent by the remote.
///
/// Since the messages are entirely buffered before being returned, a maximum value is
/// necessary in order to prevent malicious clients from sending huge frames that would
/// occupy a lot of memory.
pub max_frame_size: usize,
/// Number of pending messages to buffer up for sending before the socket is considered
/// unresponsive.
pub send_buffer_len: usize,
/// Pre-allocated capacity for the list of connections.
pub capacity: usize,
}
/// Identifier for a connection with regard to a [`WsServer`].
///
/// After a connection has been closed, its [`ConnectionId`] might be reused.
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
pub struct ConnectionId(u64);
/// A WebSocket message.
pub enum Message {
Text(String),
Binary(Vec<u8>),
}
/// WebSockets listening socket and list of open connections.
pub struct WsServer {
/// Value passed through [`Config::max_frame_size`].
max_frame_size: usize,
/// Endpoint for incoming TCP sockets.
listener: TcpListener,
/// Pending incoming connection to accept. Accepted by calling [`WsServer::accept`].
pending_incoming: Option<TcpStream>,
/// List of TCP connections that are currently negotiating the WebSocket handshake.
///
/// The output can be an error if the handshake fails.
negotiating: stream::FuturesUnordered<
Pin<
Box<
dyn Future<
Output = Result<
Server<'static, Compat<TcpStream>>,
Box<dyn std::error::Error>,
>,
> + Send,
>,
>,
>,
/// List of streams of incoming messages for all connections.
incoming_messages: stream::SelectAll<
Pin<Box<dyn Stream<Item = Result<Message, Box<dyn std::error::Error>>> + Send>>,
>,
/// Tasks dedicated to closing sockets that have been rejected.
rejected_sockets: stream::FuturesUnordered<Pin<Box<dyn Future<Output = ()> + Send>>>,
}
impl WsServer {
/// Try opening a TCP listening socket.
///
/// Returns an error if the listening socket fails to open.
pub async fn new(config: Config) -> Result<Self, io::Error> {
let listener = TcpListener::bind(config.bind_address).await?;
Ok(WsServer {
max_frame_size: config.max_frame_size,
listener,
pending_incoming: None,
negotiating: stream::FuturesUnordered::new(),
incoming_messages: stream::SelectAll::new(),
rejected_sockets: stream::FuturesUnordered::new(),
})
}
/// Address of the local TCP listening socket, as provided by the operating system.
pub fn local_addr(&self) -> Result<SocketAddr, io::Error> {
self.listener.local_addr()
}
/// Accepts the pending connection.
///
/// Either [`WsServer::accept`] or [`WsServer::reject`] must be called after a
/// [`Event::ConnectionOpen`] event is returned.
///
/// # Panic
///
/// Panics if no connection is pending.
pub fn accept(&mut self) {
let pending_incoming = self.pending_incoming.take().expect("no pending socket");
self.negotiating.push(Box::pin(async move {
let mut server = Server::new(pending_incoming.compat());
let websocket_key = match server.receive_request().await {
Ok(req) => req.key(),
Err(err) => return Err(Box::new(err) as Box<_>),
};
match server
.send_response(&{ Response::Accept { key: websocket_key, protocol: None } })
.await
{
Ok(()) => {},
Err(err) => return Err(Box::new(err) as Box<_>),
};
Ok(server)
}));
}
/// Reject the pending connection.
///
/// Either [`WsServer::accept`] or [`WsServer::reject`] must be called after a
/// [`Event::ConnectionOpen`] event is returned.
///
/// # Panic
///
/// Panics if no connection is pending.
pub fn reject(&mut self) {
let _ = self.pending_incoming.take().expect("no pending socket");
}
/// Returns the next event happening on the server.
pub async fn next_event(&mut self) -> Event {
loop {
futures::select! {
// Only try to fetch a new incoming connection if none is pending.
socket = {
let listener = &self.listener;
let has_pending = self.pending_incoming.is_some();
async move {
if !has_pending {
listener.accept().await
} else {
loop { futures::pending!() }
}
}
}.fuse() => {
let (socket, address) = match socket {
Ok(s) => s,
Err(_) => continue,
};
debug_assert!(self.pending_incoming.is_none());
self.pending_incoming = Some(socket);
return Event::ConnectionOpen { address };
},
result = self.negotiating.select_next_some() => {
let server = match result {
Ok(s) => s,
Err(error) => return Event::ConnectionError {
error,
},
};
let (mut _sender, receiver) = {
let mut builder = server.into_builder();
builder.set_max_frame_size(self.max_frame_size);
builder.set_max_message_size(self.max_frame_size);
builder.finish()
};
// Spawn a task dedicated to receiving messages from the socket.
self.incoming_messages.push({
// Turn `receiver` into a stream of received packets.
let socket_packets = stream::unfold((receiver, Vec::new()), move |(mut receiver, mut buf)| async {
buf.clear();
let ret = match receiver.receive_data(&mut buf).await {
Ok(soketto::Data::Text(len)) => String::from_utf8(buf[..len].to_vec())
.map(Message::Text)
.map_err(|err| Box::new(err) as Box<_>),
Ok(soketto::Data::Binary(len)) => Ok(Message::Binary(buf[..len].to_vec())),
Err(err) => Err(Box::new(err) as Box<_>),
};
Some((ret, (receiver, buf)))
});
Box::pin(socket_packets.map(move |msg| (msg)))
});
},
result = self.incoming_messages.select_next_some() => {
let message = match result {
Ok(m) => m,
Err(error) => return Event::ConnectionError {
error,
},
};
match message {
Message::Text(message) => {
return Event::TextFrame {
message,
}
}
Message::Binary(message) => {
return Event::BinaryFrame {
message,
}
}
}
},
_ = self.rejected_sockets.select_next_some() => {
}
}
}
}
}
/// Event that has happened on a [`WsServer`].
#[derive(Debug)]
pub enum Event {
/// A new TCP connection has arrived on the listening socket.
///
/// The connection *must* be accepted or rejected using [`WsServer::accept`] or
/// [`WsServer::reject`].
/// No other [`Event::ConnectionOpen`] event will be generated until the current pending
/// connection has been either accepted or rejected.
ConnectionOpen {
/// Address of the remote, as provided by the operating system.
address: SocketAddr,
},
/// An error has happened on a connection. The connection is now closed and its
/// [`ConnectionId`] is now invalid.
ConnectionError { error: Box<dyn std::error::Error> },
/// A text frame has been received on a connection.
TextFrame {
/// Message sent by the remote. Its content is entirely decided by the client, and
/// nothing must be assumed about the validity of this message.
message: String,
},
/// A text frame has been received on a connection.
BinaryFrame {
/// Message sent by the remote. Its content is entirely decided by the client, and
/// nothing must be assumed about the validity of this message.
message: Vec<u8>,
},
}
+39
View File
@@ -0,0 +1,39 @@
[package]
name = "staging-node-inspect"
version = "0.12.0"
authors.workspace = true
description = "Substrate node block inspection tool."
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
homepage.workspace = true
repository.workspace = true
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
clap = { features = ["derive"], workspace = true }
codec = { workspace = true, default-features = true }
sc-cli = { workspace = true }
sc-client-api = { workspace = true, default-features = true }
sc-service = { workspace = true }
sp-blockchain = { workspace = true, default-features = true }
sp-core = { workspace = true, default-features = true }
sp-io = { workspace = true, default-features = true }
sp-runtime = { workspace = true, default-features = true }
sp-statement-store = { workspace = true, default-features = true }
thiserror = { workspace = true }
[features]
runtime-benchmarks = [
"sc-cli/runtime-benchmarks",
"sc-client-api/runtime-benchmarks",
"sc-service/runtime-benchmarks",
"sp-blockchain/runtime-benchmarks",
"sp-io/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"sp-statement-store/runtime-benchmarks",
]
+62
View File
@@ -0,0 +1,62 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Structs to easily compose inspect sub-command for CLI.
use sc_cli::{ImportParams, SharedParams};
/// The `inspect` command used to print decoded chain data.
#[derive(Debug, clap::Parser)]
pub struct InspectCmd {
#[allow(missing_docs)]
#[clap(subcommand)]
pub command: InspectSubCmd,
#[allow(missing_docs)]
#[clap(flatten)]
pub shared_params: SharedParams,
#[allow(missing_docs)]
#[clap(flatten)]
pub import_params: ImportParams,
}
/// A possible inspect sub-commands.
#[derive(Debug, clap::Subcommand)]
pub enum InspectSubCmd {
/// Decode block with native version of runtime and print out the details.
Block {
/// Address of the block to print out.
///
/// Can be either a block hash (no 0x prefix) or a number to retrieve existing block,
/// or a 0x-prefixed bytes hex string, representing SCALE encoding of
/// a block.
#[arg(value_name = "HASH or NUMBER or BYTES")]
input: String,
},
/// Decode extrinsic with native version of runtime and print out the details.
Extrinsic {
/// Address of an extrinsic to print out.
///
/// Can be either a block hash (no 0x prefix) or number and the index, in the form
/// of `{block}:{index}` or a 0x-prefixed bytes hex string,
/// representing SCALE encoding of an extrinsic.
#[arg(value_name = "BLOCK:INDEX or BYTES")]
input: String,
},
}
+68
View File
@@ -0,0 +1,68 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Command ran by the CLI
use crate::{
cli::{InspectCmd, InspectSubCmd},
Inspector,
};
use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams};
use sc_service::Configuration;
use sp_runtime::traits::Block;
type HostFunctions =
(sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions);
impl InspectCmd {
/// Run the inspect command, passing the inspector.
pub fn run<B, RA>(&self, config: Configuration) -> Result<()>
where
B: Block,
RA: Send + Sync + 'static,
{
let executor = sc_service::new_wasm_executor::<HostFunctions>(&config.executor);
let client = sc_service::new_full_client::<B, RA, _>(&config, None, executor)?;
let inspect = Inspector::<B>::new(client);
match &self.command {
InspectSubCmd::Block { input } => {
let input = input.parse()?;
let res = inspect.block(input).map_err(|e| e.to_string())?;
println!("{res}");
Ok(())
},
InspectSubCmd::Extrinsic { input } => {
let input = input.parse()?;
let res = inspect.extrinsic(input).map_err(|e| e.to_string())?;
println!("{res}");
Ok(())
},
}
}
}
impl CliConfiguration for InspectCmd {
fn shared_params(&self) -> &SharedParams {
&self.shared_params
}
fn import_params(&self) -> Option<&ImportParams> {
Some(&self.import_params)
}
}
+311
View File
@@ -0,0 +1,311 @@
// This file is part of Substrate.
//
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! A CLI extension for substrate node, adding sub-command to pretty print debug info
//! about blocks and extrinsics.
//!
//! The blocks and extrinsics can either be retrieved from the database (on-chain),
//! or a raw SCALE-encoding can be provided.
#![warn(missing_docs)]
pub mod cli;
pub mod command;
use codec::{Decode, Encode};
use sc_client_api::BlockBackend;
use sp_blockchain::HeaderBackend;
use sp_core::hexdisplay::HexDisplay;
use sp_runtime::{
generic::BlockId,
traits::{Block, Hash, HashingFor, NumberFor},
};
use std::{fmt, fmt::Debug, marker::PhantomData, str::FromStr};
/// A helper type for a generic block input.
pub type BlockAddressFor<TBlock> =
BlockAddress<<HashingFor<TBlock> as Hash>::Output, NumberFor<TBlock>>;
/// A Pretty formatter implementation.
pub trait PrettyPrinter<TBlock: Block> {
/// Nicely format block.
fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result;
/// Nicely format extrinsic.
fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic)
-> fmt::Result;
}
/// Default dummy debug printer.
#[derive(Default)]
pub struct DebugPrinter;
impl<TBlock: Block> PrettyPrinter<TBlock> for DebugPrinter {
fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result {
writeln!(fmt, "Header:")?;
writeln!(fmt, "{:?}", block.header())?;
writeln!(fmt, "Block bytes: {:?}", HexDisplay::from(&block.encode()))?;
writeln!(fmt, "Extrinsics ({})", block.extrinsics().len())?;
for (idx, ex) in block.extrinsics().iter().enumerate() {
writeln!(fmt, "- {}:", idx)?;
<DebugPrinter as PrettyPrinter<TBlock>>::fmt_extrinsic(self, fmt, ex)?;
}
Ok(())
}
fn fmt_extrinsic(
&self,
fmt: &mut fmt::Formatter,
extrinsic: &TBlock::Extrinsic,
) -> fmt::Result {
writeln!(fmt, " {:#?}", extrinsic)?;
writeln!(fmt, " Bytes: {:?}", HexDisplay::from(&extrinsic.encode()))?;
Ok(())
}
}
/// Aggregated error for `Inspector` operations.
#[derive(Debug, thiserror::Error)]
pub enum Error {
/// Could not decode Block or Extrinsic.
#[error(transparent)]
Codec(#[from] codec::Error),
/// Error accessing blockchain DB.
#[error(transparent)]
Blockchain(#[from] sp_blockchain::Error),
/// Given block has not been found.
#[error("{0}")]
NotFound(String),
}
/// A helper trait to access block headers and bodies.
pub trait ChainAccess<TBlock: Block>: HeaderBackend<TBlock> + BlockBackend<TBlock> {}
impl<T, TBlock> ChainAccess<TBlock> for T
where
TBlock: Block,
T: sp_blockchain::HeaderBackend<TBlock> + sc_client_api::BlockBackend<TBlock>,
{
}
/// Blockchain inspector.
pub struct Inspector<TBlock: Block, TPrinter: PrettyPrinter<TBlock> = DebugPrinter> {
printer: TPrinter,
chain: Box<dyn ChainAccess<TBlock>>,
_block: PhantomData<TBlock>,
}
impl<TBlock: Block, TPrinter: PrettyPrinter<TBlock>> Inspector<TBlock, TPrinter> {
/// Create new instance of the inspector with default printer.
pub fn new(chain: impl ChainAccess<TBlock> + 'static) -> Self
where
TPrinter: Default,
{
Self::with_printer(chain, Default::default())
}
/// Customize pretty-printing of the data.
pub fn with_printer(chain: impl ChainAccess<TBlock> + 'static, printer: TPrinter) -> Self {
Inspector { chain: Box::new(chain) as _, printer, _block: Default::default() }
}
/// Get a pretty-printed block.
pub fn block(&self, input: BlockAddressFor<TBlock>) -> Result<String, Error> {
struct BlockPrinter<'a, A, B>(A, &'a B);
impl<'a, A: Block, B: PrettyPrinter<A>> fmt::Display for BlockPrinter<'a, A, B> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.1.fmt_block(fmt, &self.0)
}
}
let block = self.get_block(input)?;
Ok(format!("{}", BlockPrinter(block, &self.printer)))
}
fn get_block(&self, input: BlockAddressFor<TBlock>) -> Result<TBlock, Error> {
Ok(match input {
BlockAddress::Bytes(bytes) => TBlock::decode(&mut &*bytes)?,
BlockAddress::Number(number) => {
let id = BlockId::number(number);
let hash = self.chain.expect_block_hash_from_id(&id)?;
let not_found = format!("Could not find block {:?}", id);
let body = self
.chain
.block_body(hash)?
.ok_or_else(|| Error::NotFound(not_found.clone()))?;
let header =
self.chain.header(hash)?.ok_or_else(|| Error::NotFound(not_found.clone()))?;
TBlock::new(header, body)
},
BlockAddress::Hash(hash) => {
let not_found = format!("Could not find block {:?}", BlockId::<TBlock>::Hash(hash));
let body = self
.chain
.block_body(hash)?
.ok_or_else(|| Error::NotFound(not_found.clone()))?;
let header =
self.chain.header(hash)?.ok_or_else(|| Error::NotFound(not_found.clone()))?;
TBlock::new(header, body)
},
})
}
/// Get a pretty-printed extrinsic.
pub fn extrinsic(
&self,
input: ExtrinsicAddress<<HashingFor<TBlock> as Hash>::Output, NumberFor<TBlock>>,
) -> Result<String, Error> {
struct ExtrinsicPrinter<'a, A: Block, B>(A::Extrinsic, &'a B);
impl<'a, A: Block, B: PrettyPrinter<A>> fmt::Display for ExtrinsicPrinter<'a, A, B> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.1.fmt_extrinsic(fmt, &self.0)
}
}
let ext = match input {
ExtrinsicAddress::Block(block, index) => {
let block = self.get_block(block)?;
block.extrinsics().get(index).cloned().ok_or_else(|| {
Error::NotFound(format!(
"Could not find extrinsic {} in block {:?}",
index, block
))
})?
},
ExtrinsicAddress::Bytes(bytes) => TBlock::Extrinsic::decode(&mut &*bytes)?,
};
Ok(format!("{}", ExtrinsicPrinter(ext, &self.printer)))
}
}
/// A block to retrieve.
#[derive(Debug, Clone, PartialEq)]
pub enum BlockAddress<Hash, Number> {
/// Get block by hash.
Hash(Hash),
/// Get block by number.
Number(Number),
/// Raw SCALE-encoded bytes.
Bytes(Vec<u8>),
}
impl<Hash: FromStr, Number: FromStr> FromStr for BlockAddress<Hash, Number> {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// try to parse hash first
if let Ok(hash) = s.parse() {
return Ok(Self::Hash(hash));
}
// then number
if let Ok(number) = s.parse() {
return Ok(Self::Number(number));
}
// then assume it's bytes (hex-encoded)
sp_core::bytes::from_hex(s).map(Self::Bytes).map_err(|e| {
format!(
"Given string does not look like hash or number. It could not be parsed as bytes either: {}",
e
)
})
}
}
/// An extrinsic address to decode and print out.
#[derive(Debug, Clone, PartialEq)]
pub enum ExtrinsicAddress<Hash, Number> {
/// Extrinsic as part of existing block.
Block(BlockAddress<Hash, Number>, usize),
/// Raw SCALE-encoded extrinsic bytes.
Bytes(Vec<u8>),
}
impl<Hash: FromStr + Debug, Number: FromStr + Debug> FromStr for ExtrinsicAddress<Hash, Number> {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// first try raw bytes
if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) {
return Ok(bytes);
}
// split by a bunch of different characters
let mut it = s.split(|c| c == '.' || c == ':' || c == ' ');
let block = it
.next()
.expect("First element of split iterator is never empty; qed")
.parse()?;
let index = it
.next()
.ok_or("Extrinsic index missing: example \"5:0\"")?
.parse()
.map_err(|e| format!("Invalid index format: {}", e))?;
Ok(Self::Block(block, index))
}
}
#[cfg(test)]
mod tests {
use super::*;
use sp_core::hash::H160 as Hash;
#[test]
fn should_parse_block_strings() {
type BlockAddress = super::BlockAddress<Hash, u64>;
let b0 = BlockAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258");
let b1 = BlockAddress::from_str("1234");
let b2 = BlockAddress::from_str("0");
let b3 = BlockAddress::from_str("0x0012345f");
assert_eq!(
b0,
Ok(BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()))
);
assert_eq!(b1, Ok(BlockAddress::Number(1234)));
assert_eq!(b2, Ok(BlockAddress::Number(0)));
assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f])));
}
#[test]
fn should_parse_extrinsic_address() {
type BlockAddress = super::BlockAddress<Hash, u64>;
type ExtrinsicAddress = super::ExtrinsicAddress<Hash, u64>;
let e0 = ExtrinsicAddress::from_str("1234");
let b0 = ExtrinsicAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258:5");
let b1 = ExtrinsicAddress::from_str("1234:0");
let b2 = ExtrinsicAddress::from_str("0 0");
let b3 = ExtrinsicAddress::from_str("0x0012345f");
assert_eq!(e0, Ok(ExtrinsicAddress::Bytes(vec![0x12, 0x34])));
assert_eq!(
b0,
Ok(ExtrinsicAddress::Block(
BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()),
5
))
);
assert_eq!(b1, Ok(ExtrinsicAddress::Block(BlockAddress::Number(1234), 0)));
assert_eq!(b2, Ok(ExtrinsicAddress::Bytes(vec![0, 0])));
assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f])));
}
}
+25
View File
@@ -0,0 +1,25 @@
[package]
name = "node-primitives"
version = "2.0.0"
authors.workspace = true
description = "Substrate node low-level primitives."
edition.workspace = true
license = "Apache-2.0"
homepage.workspace = true
repository.workspace = true
publish = false
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
sp-core = { workspace = true }
sp-runtime = { workspace = true }
[features]
default = ["std"]
std = ["sp-core/std", "sp-runtime/std"]
runtime-benchmarks = ["sp-runtime/runtime-benchmarks"]
+66
View File
@@ -0,0 +1,66 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Low-level types used throughout the Substrate code.
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
use sp_runtime::{
generic,
traits::{BlakeTwo256, IdentifyAccount, Verify},
MultiSignature, OpaqueExtrinsic,
};
/// An index to a block.
pub type BlockNumber = u32;
/// Alias to 512-bit hash when used in the context of a transaction signature on the chain.
pub type Signature = MultiSignature;
/// Some way of identifying an account on the chain. We intentionally make it equivalent
/// to the public key of our transaction signing scheme.
pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
/// The type for looking up accounts. We don't expect more than 4 billion of them.
pub type AccountIndex = u32;
/// Balance of an account.
pub type Balance = u128;
/// Type used for expressing timestamp.
pub type Moment = u64;
/// Index of a transaction in the chain.
pub type Nonce = u32;
/// A hash of some data used by the chain.
pub type Hash = sp_core::H256;
/// A timestamp: milliseconds since the unix epoch.
/// `u64` is enough to represent a duration of half a billion years, when the
/// time scale is milliseconds.
pub type Timestamp = u64;
/// Digest item type.
pub type DigestItem = generic::DigestItem;
/// Header type.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Block type.
pub type Block = generic::Block<Header, OpaqueExtrinsic>;
/// Block ID.
pub type BlockId = generic::BlockId<Block>;
+75
View File
@@ -0,0 +1,75 @@
[package]
name = "node-rpc"
version = "3.0.0-dev"
authors.workspace = true
description = "Substrate node rpc methods."
edition.workspace = true
license = "Apache-2.0"
homepage.workspace = true
repository.workspace = true
publish = false
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
jsonrpsee = { features = ["server"], workspace = true }
mmr-rpc = { workspace = true, default-features = true }
node-primitives = { workspace = true, default-features = true }
pallet-transaction-payment-rpc = { workspace = true, default-features = true }
sc-chain-spec = { workspace = true, default-features = true }
sc-client-api = { workspace = true, default-features = true }
sc-consensus-babe = { workspace = true, default-features = true }
sc-consensus-babe-rpc = { workspace = true, default-features = true }
sc-consensus-beefy = { workspace = true, default-features = true }
sc-consensus-beefy-rpc = { workspace = true, default-features = true }
sc-consensus-grandpa = { workspace = true, default-features = true }
sc-consensus-grandpa-rpc = { workspace = true, default-features = true }
sc-mixnet = { workspace = true, default-features = true }
sc-rpc = { workspace = true, default-features = true }
sc-sync-state-rpc = { workspace = true, default-features = true }
sc-transaction-pool-api = { workspace = true, default-features = true }
sp-api = { workspace = true, default-features = true }
sp-application-crypto = { workspace = true, default-features = true }
sp-block-builder = { workspace = true, default-features = true }
sp-blockchain = { workspace = true, default-features = true }
sp-consensus = { workspace = true, default-features = true }
sp-consensus-babe = { workspace = true, default-features = true }
sp-consensus-beefy = { workspace = true, default-features = true }
sp-keystore = { workspace = true, default-features = true }
sp-runtime = { workspace = true, default-features = true }
sp-statement-store = { workspace = true, default-features = true }
substrate-frame-rpc-system = { workspace = true, default-features = true }
substrate-state-trie-migration-rpc = { workspace = true, default-features = true }
[features]
runtime-benchmarks = [
"mmr-rpc/runtime-benchmarks",
"node-primitives/runtime-benchmarks",
"pallet-transaction-payment-rpc/runtime-benchmarks",
"sc-chain-spec/runtime-benchmarks",
"sc-client-api/runtime-benchmarks",
"sc-consensus-babe-rpc/runtime-benchmarks",
"sc-consensus-babe/runtime-benchmarks",
"sc-consensus-beefy-rpc/runtime-benchmarks",
"sc-consensus-beefy/runtime-benchmarks",
"sc-consensus-grandpa-rpc/runtime-benchmarks",
"sc-consensus-grandpa/runtime-benchmarks",
"sc-mixnet/runtime-benchmarks",
"sc-rpc/runtime-benchmarks",
"sc-sync-state-rpc/runtime-benchmarks",
"sc-transaction-pool-api/runtime-benchmarks",
"sp-api/runtime-benchmarks",
"sp-block-builder/runtime-benchmarks",
"sp-blockchain/runtime-benchmarks",
"sp-consensus-babe/runtime-benchmarks",
"sp-consensus-beefy/runtime-benchmarks",
"sp-consensus/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"sp-statement-store/runtime-benchmarks",
"substrate-frame-rpc-system/runtime-benchmarks",
"substrate-state-trie-migration-rpc/runtime-benchmarks",
]
+227
View File
@@ -0,0 +1,227 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A collection of node-specific RPC methods.
//!
//! Since `substrate` core functionality makes no assumptions
//! about the modules used inside the runtime, so do
//! RPC methods defined in `sc-rpc` crate.
//! It means that `client/rpc` can't have any methods that
//! need some strong assumptions about the particular runtime.
//!
//! The RPCs available in this crate however can make some assumptions
//! about how the runtime is constructed and what FRAME pallets
//! are part of it. Therefore all node-runtime-specific RPCs can
//! be placed here or imported from corresponding FRAME RPC definitions.
#![warn(missing_docs)]
#![warn(unused_crate_dependencies)]
use std::sync::Arc;
use jsonrpsee::RpcModule;
use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Nonce};
use sc_client_api::AuxStore;
use sc_consensus_babe::BabeWorkerHandle;
use sc_consensus_beefy::communication::notification::{
BeefyBestBlockStream, BeefyVersionedFinalityProofStream,
};
use sc_consensus_grandpa::{
FinalityProofProvider, GrandpaJustificationStream, SharedAuthoritySet, SharedVoterState,
};
pub use sc_rpc::SubscriptionTaskExecutor;
use sc_transaction_pool_api::TransactionPool;
use sp_api::ProvideRuntimeApi;
use sp_application_crypto::RuntimeAppPublic;
use sp_block_builder::BlockBuilder;
use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata};
use sp_consensus::SelectChain;
use sp_consensus_babe::BabeApi;
use sp_consensus_beefy::AuthorityIdBound;
use sp_keystore::KeystorePtr;
/// Extra dependencies for BABE.
pub struct BabeDeps {
/// A handle to the BABE worker for issuing requests.
pub babe_worker_handle: BabeWorkerHandle<Block>,
/// The keystore that manages the keys of the node.
pub keystore: KeystorePtr,
}
/// Extra dependencies for GRANDPA
pub struct GrandpaDeps<B> {
/// Voting round info.
pub shared_voter_state: SharedVoterState,
/// Authority set info.
pub shared_authority_set: SharedAuthoritySet<Hash, BlockNumber>,
/// Receives notifications about justification events from Grandpa.
pub justification_stream: GrandpaJustificationStream<Block>,
/// Executor to drive the subscription manager in the Grandpa RPC handler.
pub subscription_executor: SubscriptionTaskExecutor,
/// Finality proof provider.
pub finality_provider: Arc<FinalityProofProvider<B, Block>>,
}
/// Dependencies for BEEFY
pub struct BeefyDeps<AuthorityId: AuthorityIdBound> {
/// Receives notifications about finality proof events from BEEFY.
pub beefy_finality_proof_stream: BeefyVersionedFinalityProofStream<Block, AuthorityId>,
/// Receives notifications about best block events from BEEFY.
pub beefy_best_block_stream: BeefyBestBlockStream<Block>,
/// Executor to drive the subscription manager in the BEEFY RPC handler.
pub subscription_executor: SubscriptionTaskExecutor,
}
/// Full client dependencies.
pub struct FullDeps<C, P, SC, B, AuthorityId: AuthorityIdBound> {
/// The client instance to use.
pub client: Arc<C>,
/// Transaction pool instance.
pub pool: Arc<P>,
/// The SelectChain Strategy
pub select_chain: SC,
/// A copy of the chain spec.
pub chain_spec: Box<dyn sc_chain_spec::ChainSpec>,
/// BABE specific dependencies.
pub babe: BabeDeps,
/// GRANDPA specific dependencies.
pub grandpa: GrandpaDeps<B>,
/// BEEFY specific dependencies.
pub beefy: BeefyDeps<AuthorityId>,
/// Shared statement store reference.
pub statement_store: Arc<dyn sp_statement_store::StatementStore>,
/// The backend used by the node.
pub backend: Arc<B>,
/// Mixnet API.
pub mixnet_api: Option<sc_mixnet::Api>,
}
/// Instantiate all Full RPC extensions.
pub fn create_full<C, P, SC, B, AuthorityId>(
FullDeps {
client,
pool,
select_chain,
chain_spec,
babe,
grandpa,
beefy,
statement_store,
backend,
mixnet_api,
}: FullDeps<C, P, SC, B, AuthorityId>,
) -> Result<RpcModule<()>, Box<dyn std::error::Error + Send + Sync>>
where
C: ProvideRuntimeApi<Block>
+ sc_client_api::BlockBackend<Block>
+ HeaderBackend<Block>
+ AuxStore
+ HeaderMetadata<Block, Error = BlockChainError>
+ Sync
+ Send
+ 'static,
C::Api: substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>,
C::Api: mmr_rpc::MmrRuntimeApi<Block, <Block as sp_runtime::traits::Block>::Hash, BlockNumber>,
C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi<Block, Balance>,
C::Api: BabeApi<Block>,
C::Api: BlockBuilder<Block>,
P: TransactionPool + 'static,
SC: SelectChain<Block> + 'static,
B: sc_client_api::Backend<Block> + Send + Sync + 'static,
B::State: sc_client_api::backend::StateBackend<sp_runtime::traits::HashingFor<Block>>,
AuthorityId: AuthorityIdBound,
<AuthorityId as RuntimeAppPublic>::Signature: Send + Sync,
{
use mmr_rpc::{Mmr, MmrApiServer};
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
use sc_consensus_babe_rpc::{Babe, BabeApiServer};
use sc_consensus_beefy_rpc::{Beefy, BeefyApiServer};
use sc_consensus_grandpa_rpc::{Grandpa, GrandpaApiServer};
use sc_rpc::{
dev::{Dev, DevApiServer},
mixnet::MixnetApiServer,
statement::StatementApiServer,
};
use sc_sync_state_rpc::{SyncState, SyncStateApiServer};
use substrate_frame_rpc_system::{System, SystemApiServer};
use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer};
let mut io = RpcModule::new(());
let BabeDeps { keystore, babe_worker_handle } = babe;
let GrandpaDeps {
shared_voter_state,
shared_authority_set,
justification_stream,
subscription_executor,
finality_provider,
} = grandpa;
io.merge(System::new(client.clone(), pool).into_rpc())?;
// Making synchronous calls in light client freezes the browser currently,
// more context: https://github.com/paritytech/substrate/pull/3480
// These RPCs should use an asynchronous caller instead.
io.merge(
Mmr::new(
client.clone(),
backend
.offchain_storage()
.ok_or_else(|| "Backend doesn't provide an offchain storage")?,
)
.into_rpc(),
)?;
io.merge(TransactionPayment::new(client.clone()).into_rpc())?;
io.merge(
Babe::new(client.clone(), babe_worker_handle.clone(), keystore, select_chain).into_rpc(),
)?;
io.merge(
Grandpa::new(
subscription_executor,
shared_authority_set.clone(),
shared_voter_state,
justification_stream,
finality_provider,
)
.into_rpc(),
)?;
io.merge(
SyncState::new(chain_spec, client.clone(), shared_authority_set, babe_worker_handle)?
.into_rpc(),
)?;
io.merge(StateMigration::new(client.clone(), backend).into_rpc())?;
io.merge(Dev::new(client).into_rpc())?;
let statement_store = sc_rpc::statement::StatementStore::new(statement_store).into_rpc();
io.merge(statement_store)?;
if let Some(mixnet_api) = mixnet_api {
let mixnet = sc_rpc::mixnet::Mixnet::new(mixnet_api).into_rpc();
io.merge(mixnet)?;
}
io.merge(
Beefy::<Block, AuthorityId>::new(
beefy.beefy_finality_proof_stream,
beefy.beefy_best_block_stream,
beefy.subscription_executor,
)?
.into_rpc(),
)?;
Ok(io)
}
+82
View File
@@ -0,0 +1,82 @@
[package]
name = "kitchensink-runtime"
version = "3.0.0-dev"
authors.workspace = true
description = "Substrate node kitchensink runtime."
edition.workspace = true
build = "build.rs"
license = "Apache-2.0"
homepage.workspace = true
repository.workspace = true
publish = false
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
# third-party dependencies
array-bytes = { workspace = true }
codec = { features = ["derive", "max-encoded-len"], workspace = true }
log = { workspace = true }
rand = { workspace = true, optional = true }
rand_pcg = { workspace = true, optional = true }
scale-info = { features = ["derive", "serde"], workspace = true }
serde_json = { features = ["alloc", "arbitrary_precision"], workspace = true }
sp-debug-derive = { workspace = true, features = ["force-debug"] }
static_assertions = { workspace = true, default-features = true }
# pallet-asset-conversion: turn on "num-traits" feature
primitive-types = { features = [
"codec",
"num-traits",
"scale-info",
], workspace = true }
pezkuwi-sdk = { features = ["runtime-full", "tuples-96"], workspace = true }
# shared code between runtime and node
node-primitives = { workspace = true }
# Example pallets that are not published:
pallet-example-mbm = { workspace = true }
pallet-example-tasks = { workspace = true }
[build-dependencies]
substrate-wasm-builder = { optional = true, workspace = true, default-features = true }
[features]
default = ["std"]
with-tracing = ["pezkuwi-sdk/with-tracing"]
std = [
"codec/std",
"log/std",
"node-primitives/std",
"pallet-example-mbm/std",
"pallet-example-tasks/std",
"pezkuwi-sdk/std",
"primitive-types/std",
"rand?/std",
"scale-info/std",
"serde_json/std",
"sp-debug-derive/std",
"substrate-wasm-builder",
]
runtime-benchmarks = [
"node-primitives/runtime-benchmarks",
"pallet-example-mbm/runtime-benchmarks",
"pallet-example-tasks/runtime-benchmarks",
"pezkuwi-sdk/runtime-benchmarks",
"rand",
"rand_pcg",
"substrate-wasm-builder?/runtime-benchmarks",
]
try-runtime = [
"pallet-example-mbm/try-runtime",
"pallet-example-tasks/try-runtime",
"pezkuwi-sdk/try-runtime",
]
experimental = ["pallet-example-tasks/experimental"]
metadata-hash = ["substrate-wasm-builder/metadata-hash"]
+31
View File
@@ -0,0 +1,31 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(all(feature = "std", not(feature = "metadata-hash")))]
fn main() {
substrate_wasm_builder::WasmBuilder::build_using_defaults()
}
#[cfg(all(feature = "std", feature = "metadata-hash"))]
fn main() {
substrate_wasm_builder::WasmBuilder::init_with_defaults()
.enable_metadata_hash("Test", 14)
.build()
}
#[cfg(not(feature = "std"))]
fn main() {}
@@ -0,0 +1,35 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Runtime API definition for assets.
use pezkuwi_sdk::*;
use alloc::vec::Vec;
use codec::Codec;
sp_api::decl_runtime_apis! {
pub trait AssetsApi<AccountId, AssetBalance, AssetId>
where
AccountId: Codec,
AssetBalance: Codec,
AssetId: Codec,
{
/// Returns the list of `AssetId`s and corresponding balance that an `AccountId` has.
fn account_balances(account: AccountId) -> Vec<(AssetId, AssetBalance)>;
}
}
@@ -0,0 +1,77 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A set of constant values used in substrate runtime.
/// Money matters.
pub mod currency {
use node_primitives::Balance;
pub const MILLICENTS: Balance = 1_000_000_000;
pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent.
pub const DOLLARS: Balance = 100 * CENTS;
pub const fn deposit(items: u32, bytes: u32) -> Balance {
items as Balance * 15 * CENTS + (bytes as Balance) * 6 * CENTS
}
}
/// Time.
pub mod time {
use node_primitives::{BlockNumber, Moment};
/// Since BABE is probabilistic this is the average expected block time that
/// we are targeting. Blocks will be produced at a minimum duration defined
/// by `SLOT_DURATION`, but some slots will not be allocated to any
/// authority and hence no block will be produced. We expect to have this
/// block time on average following the defined slot duration and the value
/// of `c` configured for BABE (where `1 - c` represents the probability of
/// a slot being empty).
/// This value is only used indirectly to define the unit constants below
/// that are expressed in blocks. The rest of the code should use
/// `SLOT_DURATION` instead (like the Timestamp pallet for calculating the
/// minimum period).
///
/// If using BABE with secondary slots (default) then all of the slots will
/// always be assigned, in which case `MILLISECS_PER_BLOCK` and
/// `SLOT_DURATION` should have the same value.
///
/// <https://research.web3.foundation/Polkadot/protocols/block-production/Babe#6-practical-results>
pub const MILLISECS_PER_BLOCK: Moment = 3000;
pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000;
// NOTE: Currently it is not possible to change the slot duration after the chain has started.
// Attempting to do so will brick block production.
pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK;
// 1 in 4 blocks (on average, not counting collisions) will be primary BABE blocks.
pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);
// NOTE: Currently it is not possible to change the epoch duration after the chain has started.
// Attempting to do so will brick block production.
pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10 * MINUTES;
pub const EPOCH_DURATION_IN_SLOTS: u64 = {
const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64;
(EPOCH_DURATION_IN_BLOCKS as f64 * SLOT_FILL_RATE) as u64
};
// These time units are defined in number of blocks.
pub const MINUTES: BlockNumber = 60 / (SECS_PER_BLOCK as BlockNumber);
pub const HOURS: BlockNumber = MINUTES * 60;
pub const DAYS: BlockNumber = HOURS * 24;
}
@@ -0,0 +1,222 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Genesis Presets for the Kitchensink Runtime
use pezkuwi_sdk::*;
use crate::{
constants::currency::*, frame_support::build_struct_json_patch, AccountId, AssetsConfig,
BabeConfig, Balance, BalancesConfig, ElectionsConfig, NominationPoolsConfig, ReviveConfig,
RuntimeGenesisConfig, SessionConfig, SessionKeys, SocietyConfig, StakerStatus, StakingConfig,
SudoConfig, TechnicalCommitteeConfig, BABE_GENESIS_EPOCH_CONFIG,
};
use alloc::{vec, vec::Vec};
use pallet_im_online::sr25519::AuthorityId as ImOnlineId;
use pallet_revive::is_eth_derived;
use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
use sp_consensus_babe::AuthorityId as BabeId;
use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId;
use sp_consensus_grandpa::AuthorityId as GrandpaId;
use sp_core::{crypto::get_public_from_string_or_panic, sr25519};
use sp_genesis_builder::PresetId;
use sp_keyring::Sr25519Keyring;
use sp_mixnet::types::AuthorityId as MixnetId;
use sp_runtime::Perbill;
pub const ENDOWMENT: Balance = 10_000_000 * DOLLARS;
pub const STASH: Balance = ENDOWMENT / 1000;
/// The staker type as supplied ot the Staking config.
pub type Staker = (AccountId, AccountId, Balance, StakerStatus<AccountId>);
/// Helper function to create RuntimeGenesisConfig json patch for testing.
pub fn kitchensink_genesis(
initial_authorities: Vec<(AccountId, AccountId, SessionKeys)>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
stakers: Vec<Staker>,
) -> serde_json::Value {
let validator_count = initial_authorities.len() as u32;
let minimum_validator_count = validator_count;
let collective = collective(&endowed_accounts);
build_struct_json_patch!(RuntimeGenesisConfig {
balances: BalancesConfig {
balances: endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect(),
..Default::default()
},
session: SessionConfig {
keys: initial_authorities
.iter()
.map(|x| { (x.0.clone(), x.1.clone(), x.2.clone()) })
.collect(),
},
staking: StakingConfig {
validator_count,
minimum_validator_count,
invulnerables: initial_authorities
.iter()
.map(|x| x.0.clone())
.collect::<Vec<_>>()
.try_into()
.expect("Too many invulnerable validators: upper limit is MaxInvulnerables from pallet staking config"),
slash_reward_fraction: Perbill::from_percent(10),
stakers,
},
elections: ElectionsConfig {
members: collective.iter().cloned().map(|member| (member, STASH)).collect(),
},
technical_committee: TechnicalCommitteeConfig { members: collective },
sudo: SudoConfig { key: Some(root_key) },
babe: BabeConfig { epoch_config: BABE_GENESIS_EPOCH_CONFIG },
society: SocietyConfig { pot: 0 },
assets: AssetsConfig {
// This asset is used by the NIS pallet as counterpart currency.
assets: vec![(9, Sr25519Keyring::Alice.to_account_id(), true, 1)],
..Default::default()
},
nomination_pools: NominationPoolsConfig {
min_create_bond: 10 * DOLLARS,
min_join_bond: 1 * DOLLARS,
},
revive: ReviveConfig {
mapped_accounts: endowed_accounts.iter().filter(|x| ! is_eth_derived(x)).cloned().collect(),
},
})
}
/// Provides the JSON representation of predefined genesis config for given `id`.
pub fn get_preset(id: &PresetId) -> Option<Vec<u8>> {
// Note: Can't use `Sr25519Keyring::Alice.to_seed()` because the seed comes with `//`.
let (alice_stash, alice, alice_session_keys) = authority_keys_from_seed("Alice");
let (bob_stash, _bob, bob_session_keys) = authority_keys_from_seed("Bob");
let endowed = well_known_including_eth_accounts();
let patch = match id.as_ref() {
sp_genesis_builder::DEV_RUNTIME_PRESET => kitchensink_genesis(
// Use stash as controller account, otherwise grandpa can't load the authority set at
// genesis.
vec![(alice_stash.clone(), alice_stash.clone(), alice_session_keys)],
alice.clone(),
endowed,
vec![validator(alice_stash.clone())],
),
sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET => kitchensink_genesis(
vec![
// Use stash as controller account, otherwise grandpa can't load the authority set
// at genesis.
(alice_stash.clone(), alice_stash.clone(), alice_session_keys),
(bob_stash.clone(), bob_stash.clone(), bob_session_keys),
],
alice,
endowed,
vec![validator(alice_stash), validator(bob_stash)],
),
_ => return None,
};
Some(
serde_json::to_string(&patch)
.expect("serialization to json is expected to work. qed.")
.into_bytes(),
)
}
/// List of supported presets.
pub fn preset_names() -> Vec<PresetId> {
vec![
PresetId::from(sp_genesis_builder::DEV_RUNTIME_PRESET),
PresetId::from(sp_genesis_builder::LOCAL_TESTNET_RUNTIME_PRESET),
]
}
/// Sets up the `account` to be a staker of validator variant as supplied to the
/// staking config.
pub fn validator(account: AccountId) -> Staker {
// validator, controller, stash, staker status
(account.clone(), account, STASH, StakerStatus::Validator)
}
/// Extract some accounts from endowed to be put into the collective.
fn collective(endowed: &[AccountId]) -> Vec<AccountId> {
const MAX_COLLECTIVE_SIZE: usize = 50;
let endowed_accounts_count = endowed.len();
endowed
.iter()
.take((endowed_accounts_count.div_ceil(2)).min(MAX_COLLECTIVE_SIZE))
.cloned()
.collect()
}
/// The Keyring's wellknown accounts + Alith and Baltathar.
///
/// Some integration tests require these ETH accounts.
pub fn well_known_including_eth_accounts() -> Vec<AccountId> {
Sr25519Keyring::well_known()
.map(|k| k.to_account_id())
.chain([
// subxt_signer::eth::dev::alith()
array_bytes::hex_n_into_unchecked(
"f24ff3a9cf04c71dbc94d0b566f7a27b94566caceeeeeeeeeeeeeeeeeeeeeeee",
),
// subxt_signer::eth::dev::baltathar()
array_bytes::hex_n_into_unchecked(
"3cd0a705a2dc65e5b1e1205896baa2be8a07c6e0eeeeeeeeeeeeeeeeeeeeeeee",
),
])
.collect::<Vec<_>>()
}
/// Helper function to generate stash, controller and session key from seed.
///
/// Note: `//` is prepended internally.
pub fn authority_keys_from_seed(seed: &str) -> (AccountId, AccountId, SessionKeys) {
(
get_public_from_string_or_panic::<sr25519::Public>(&alloc::format!("{seed}//stash")).into(),
get_public_from_string_or_panic::<sr25519::Public>(seed).into(),
session_keys_from_seed(seed),
)
}
pub fn session_keys(
grandpa: GrandpaId,
babe: BabeId,
im_online: ImOnlineId,
authority_discovery: AuthorityDiscoveryId,
mixnet: MixnetId,
beefy: BeefyId,
) -> SessionKeys {
SessionKeys { grandpa, babe, im_online, authority_discovery, mixnet, beefy }
}
/// We have this method as there is no straight forward way to convert the
/// account keyring into these ids.
///
/// Note: `//` is prepended internally.
pub fn session_keys_from_seed(seed: &str) -> SessionKeys {
session_keys(
get_public_from_string_or_panic::<GrandpaId>(seed),
get_public_from_string_or_panic::<BabeId>(seed),
get_public_from_string_or_panic::<ImOnlineId>(seed),
get_public_from_string_or_panic::<AuthorityDiscoveryId>(seed),
get_public_from_string_or_panic::<MixnetId>(seed),
get_public_from_string_or_panic::<BeefyId>(seed),
)
}
+493
View File
@@ -0,0 +1,493 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Some configurable implementations as associated type for the substrate runtime.
use alloc::boxed::Box;
use frame_support::{
pallet_prelude::*,
traits::{
fungibles::{Balanced, Credit},
Currency, OnUnbalanced,
},
};
use pallet_alliance::{IdentityVerifier, ProposalIndex, ProposalProvider};
use pallet_asset_tx_payment::HandleCredit;
use pallet_identity::legacy::IdentityField;
use pezkuwi_sdk::*;
use crate::{
AccountId, AllianceCollective, AllianceMotion, Assets, Authorship, Balances, Hash,
NegativeImbalance, Runtime, RuntimeCall,
};
pub struct Author;
impl OnUnbalanced<NegativeImbalance> for Author {
fn on_nonzero_unbalanced(amount: NegativeImbalance) {
if let Some(author) = Authorship::author() {
Balances::resolve_creating(&author, amount);
}
}
}
/// A `HandleCredit` implementation that naively transfers the fees to the block author.
/// Will drop and burn the assets in case the transfer fails.
pub struct CreditToBlockAuthor;
impl HandleCredit<AccountId, Assets> for CreditToBlockAuthor {
fn handle_credit(credit: Credit<AccountId, Assets>) {
if let Some(author) = pallet_authorship::Pallet::<Runtime>::author() {
// Drop the result which will trigger the `OnDrop` of the imbalance in case of error.
let _ = Assets::resolve(&author, credit);
}
}
}
pub struct AllianceIdentityVerifier;
impl IdentityVerifier<AccountId> for AllianceIdentityVerifier {
fn has_required_identities(who: &AccountId) -> bool {
crate::Identity::has_identity(who, (IdentityField::Display | IdentityField::Web).bits())
}
fn has_good_judgement(who: &AccountId) -> bool {
use pallet_identity::{IdentityOf, Judgement};
IdentityOf::<Runtime>::get(who)
.map(|registration| registration.judgements)
.map_or(false, |judgements| {
judgements
.iter()
.any(|(_, j)| matches!(j, Judgement::KnownGood | Judgement::Reasonable))
})
}
fn super_account_id(who: &AccountId) -> Option<AccountId> {
use pallet_identity::SuperOf;
SuperOf::<Runtime>::get(who).map(|parent| parent.0)
}
}
pub struct AllianceProposalProvider;
impl ProposalProvider<AccountId, Hash, RuntimeCall> for AllianceProposalProvider {
fn propose_proposal(
who: AccountId,
threshold: u32,
proposal: Box<RuntimeCall>,
length_bound: u32,
) -> Result<(u32, u32), DispatchError> {
AllianceMotion::do_propose_proposed(who, threshold, proposal, length_bound)
}
fn vote_proposal(
who: AccountId,
proposal: Hash,
index: ProposalIndex,
approve: bool,
) -> Result<bool, DispatchError> {
AllianceMotion::do_vote(who, proposal, index, approve)
}
fn close_proposal(
proposal_hash: Hash,
proposal_index: ProposalIndex,
proposal_weight_bound: Weight,
length_bound: u32,
) -> DispatchResultWithPostInfo {
AllianceMotion::do_close(proposal_hash, proposal_index, proposal_weight_bound, length_bound)
}
fn proposal_of(proposal_hash: Hash) -> Option<RuntimeCall> {
pallet_collective::ProposalOf::<Runtime, AllianceCollective>::get(proposal_hash)
}
}
#[cfg(test)]
mod multiplier_tests {
use frame_support::{
dispatch::DispatchClass,
weights::{Weight, WeightToFee},
};
use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment};
use pezkuwi_sdk::*;
use sp_runtime::{
assert_eq_error_rate,
traits::{Convert, One, Zero},
BuildStorage, FixedPointNumber,
};
use crate::{
constants::{currency::*, time::*},
AdjustmentVariable, MaximumMultiplier, MinimumMultiplier, Runtime,
RuntimeBlockWeights as BlockWeights, System, TargetBlockFullness, TransactionPayment,
};
fn max_normal() -> Weight {
BlockWeights::get()
.get(DispatchClass::Normal)
.max_total
.unwrap_or_else(|| BlockWeights::get().max_block)
}
fn min_multiplier() -> Multiplier {
MinimumMultiplier::get()
}
fn target() -> Weight {
TargetBlockFullness::get() * max_normal()
}
// update based on runtime impl.
fn runtime_multiplier_update(fm: Multiplier) -> Multiplier {
TargetedFeeAdjustment::<
Runtime,
TargetBlockFullness,
AdjustmentVariable,
MinimumMultiplier,
MaximumMultiplier,
>::convert(fm)
}
// update based on reference impl.
fn truth_value_update(block_weight: Weight, previous: Multiplier) -> Multiplier {
let accuracy = Multiplier::accuracy() as f64;
let previous_float = previous.into_inner() as f64 / accuracy;
// bump if it is zero.
let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy);
let max_normal = max_normal();
let target_weight = target();
let normalized_weight_dimensions = (
block_weight.ref_time() as f64 / max_normal.ref_time() as f64,
block_weight.proof_size() as f64 / max_normal.proof_size() as f64,
);
let (normal, max, target) =
if normalized_weight_dimensions.0 < normalized_weight_dimensions.1 {
(block_weight.proof_size(), max_normal.proof_size(), target_weight.proof_size())
} else {
(block_weight.ref_time(), max_normal.ref_time(), target_weight.ref_time())
};
// maximum tx weight
let m = max as f64;
// block weight always truncated to max weight
let block_weight = (normal as f64).min(m);
let v: f64 = AdjustmentVariable::get().to_float();
// Ideal saturation in terms of weight
let ss = target as f64;
// Current saturation in terms of weight
let s = block_weight;
let t1 = v * (s / m - ss / m);
let t2 = v.powi(2) * (s / m - ss / m).powi(2) / 2.0;
let next_float = previous_float * (1.0 + t1 + t2);
Multiplier::from_float(next_float)
}
fn run_with_system_weight<F>(w: Weight, assertions: F)
where
F: Fn() -> (),
{
let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::<Runtime>::default()
.build_storage()
.unwrap()
.into();
t.execute_with(|| {
System::set_block_consumed_resources(w, 0);
assertions()
});
}
#[test]
fn truth_value_update_poc_works() {
let fm = Multiplier::saturating_from_rational(1, 2);
let test_set = vec![
(Weight::zero(), fm),
(Weight::from_parts(100, 0), fm),
(Weight::from_parts(1000, 0), fm),
(target(), fm),
(max_normal() / 2, fm),
(max_normal(), fm),
];
test_set.into_iter().for_each(|(w, fm)| {
run_with_system_weight(w, || {
assert_eq_error_rate!(
truth_value_update(w, fm),
runtime_multiplier_update(fm),
// Error is only 1 in 100^18
Multiplier::from_inner(100),
);
})
})
}
#[test]
fn multiplier_can_grow_from_zero() {
// if the min is too small, then this will not change, and we are doomed forever.
// the block ref time is 1/100th bigger than target.
run_with_system_weight(target().set_ref_time(target().ref_time() * 101 / 100), || {
let next = runtime_multiplier_update(min_multiplier());
assert!(next > min_multiplier(), "{:?} !> {:?}", next, min_multiplier());
});
// the block proof size is 1/100th bigger than target.
run_with_system_weight(target().set_proof_size((target().proof_size() / 100) * 101), || {
let next = runtime_multiplier_update(min_multiplier());
assert!(next > min_multiplier(), "{:?} !> {:?}", next, min_multiplier());
})
}
#[test]
fn multiplier_cannot_go_below_limit() {
// will not go any further below even if block is empty.
run_with_system_weight(Weight::zero(), || {
let next = runtime_multiplier_update(min_multiplier());
assert_eq!(next, min_multiplier());
})
}
#[test]
fn time_to_reach_zero() {
// blocks per 24h in substrate-node: 28,800 (k)
// s* = 0.1875
// The bound from the research in an empty chain is:
// v <~ (p / k(0 - s*))
// p > v * k * -0.1875
// to get p == -1 we'd need
// -1 > 0.00001 * k * -0.1875
// 1 < 0.00001 * k * 0.1875
// 10^9 / 1875 < k
// k > 533_333 ~ 18,5 days.
run_with_system_weight(Weight::zero(), || {
// start from 1, the default.
let mut fm = Multiplier::one();
let mut iterations: u64 = 0;
loop {
let next = runtime_multiplier_update(fm);
fm = next;
if fm == min_multiplier() {
break;
}
iterations += 1;
}
assert!(iterations > 533_333);
})
}
#[test]
fn min_change_per_day() {
run_with_system_weight(max_normal(), || {
let mut fm = Multiplier::one();
// See the example in the doc of `TargetedFeeAdjustment`. are at least 0.234, hence
// `fm > 1.234`.
for _ in 0..DAYS {
let next = runtime_multiplier_update(fm);
fm = next;
}
assert!(fm > Multiplier::saturating_from_rational(1234, 1000));
})
}
#[test]
#[ignore]
fn congested_chain_simulation() {
// `cargo test congested_chain_simulation -- --nocapture` to get some insight.
// almost full. The entire quota of normal transactions is taken.
let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() -
Weight::from_parts(100, 0);
// Default substrate weight.
let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get();
run_with_system_weight(block_weight, || {
// initial value configured on module
let mut fm = Multiplier::one();
assert_eq!(fm, TransactionPayment::next_fee_multiplier());
let mut iterations: u64 = 0;
loop {
let next = runtime_multiplier_update(fm);
// if no change, panic. This should never happen in this case.
if fm == next {
panic!("The fee should ever increase");
}
fm = next;
iterations += 1;
let fee =
<Runtime as pallet_transaction_payment::Config>::WeightToFee::weight_to_fee(
&tx_weight,
);
let adjusted_fee = fm.saturating_mul_acc_int(fee);
println!(
"iteration {}, new fm = {:?}. Fee at this point is: {} units / {} millicents, \
{} cents, {} dollars",
iterations,
fm,
adjusted_fee,
adjusted_fee / MILLICENTS,
adjusted_fee / CENTS,
adjusted_fee / DOLLARS,
);
}
});
}
#[test]
fn stateless_weight_mul() {
let fm = Multiplier::saturating_from_rational(1, 2);
run_with_system_weight(target() / 4, || {
let next = runtime_multiplier_update(fm);
assert_eq_error_rate!(
next,
truth_value_update(target() / 4, fm),
Multiplier::from_inner(100),
);
// Light block. Multiplier is reduced a little.
assert!(next < fm);
});
run_with_system_weight(target() / 2, || {
let next = runtime_multiplier_update(fm);
assert_eq_error_rate!(
next,
truth_value_update(target() / 2, fm),
Multiplier::from_inner(100),
);
// Light block. Multiplier is reduced a little.
assert!(next < fm);
});
run_with_system_weight(target(), || {
let next = runtime_multiplier_update(fm);
assert_eq_error_rate!(
next,
truth_value_update(target(), fm),
Multiplier::from_inner(100),
);
// ideal. No changes.
assert_eq!(next, fm)
});
run_with_system_weight(target() * 2, || {
// More than ideal. Fee is increased.
let next = runtime_multiplier_update(fm);
assert_eq_error_rate!(
next,
truth_value_update(target() * 2, fm),
Multiplier::from_inner(100),
);
// Heavy block. Fee is increased a little.
assert!(next > fm);
});
}
#[test]
fn weight_mul_grow_on_big_block() {
run_with_system_weight(target() * 2, || {
let mut original = Multiplier::zero();
let mut next = Multiplier::default();
(0..1_000).for_each(|_| {
next = runtime_multiplier_update(original);
assert_eq_error_rate!(
next,
truth_value_update(target() * 2, original),
Multiplier::from_inner(100),
);
// must always increase
assert!(next > original, "{:?} !>= {:?}", next, original);
original = next;
});
});
}
#[test]
fn weight_mul_decrease_on_small_block() {
run_with_system_weight(target() / 2, || {
let mut original = Multiplier::saturating_from_rational(1, 2);
let mut next;
for _ in 0..100 {
// decreases
next = runtime_multiplier_update(original);
assert!(next < original, "{:?} !<= {:?}", next, original);
original = next;
}
})
}
#[test]
fn weight_to_fee_should_not_overflow_on_large_weights() {
let kb_time = Weight::from_parts(1024, 0);
let kb_size = Weight::from_parts(0, 1024);
let mb_time = 1024u64 * kb_time;
let max_fm = Multiplier::saturating_from_integer(i128::MAX);
// check that for all values it can compute, correctly.
vec![
Weight::zero(),
// testcases ignoring proof size part of the weight.
Weight::from_parts(1, 0),
Weight::from_parts(10, 0),
Weight::from_parts(1000, 0),
kb_time,
10u64 * kb_time,
100u64 * kb_time,
mb_time,
10u64 * mb_time,
Weight::from_parts(2147483647, 0),
Weight::from_parts(4294967295, 0),
// testcases ignoring ref time part of the weight.
Weight::from_parts(0, 100000000000),
1000000u64 * kb_size,
1000000000u64 * kb_size,
Weight::from_parts(0, 18014398509481983),
Weight::from_parts(0, 9223372036854775807),
// test cases with both parts of the weight.
BlockWeights::get().max_block / 1024,
BlockWeights::get().max_block / 2,
BlockWeights::get().max_block,
Weight::MAX / 2,
Weight::MAX,
]
.into_iter()
.for_each(|i| {
run_with_system_weight(i, || {
let next = runtime_multiplier_update(Multiplier::one());
let truth = truth_value_update(i, Multiplier::one());
assert_eq_error_rate!(truth, next, Multiplier::from_inner(50_000_000));
});
});
// Some values that are all above the target and will cause an increase.
let t = target();
vec![
t + Weight::from_parts(100, 0),
t + Weight::from_parts(0, t.proof_size() * 2),
t * 2,
t * 4,
]
.into_iter()
.for_each(|i| {
run_with_system_weight(i, || {
let fm = runtime_multiplier_update(max_fm);
// won't grow. The convert saturates everything.
assert_eq!(fm, max_fm);
})
});
}
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,238 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Autogenerated bag thresholds.
//!
//! Generated on 2022-08-15T19:26:59.939787+00:00
//! Arguments
//! Total issuance: 100000000000000
//! Minimum balance: 100000000000000
//! for the node runtime.
/// Existential weight for this runtime.
#[cfg(any(test, feature = "std"))]
#[allow(unused)]
pub const EXISTENTIAL_WEIGHT: u64 = 100_000_000_000_000;
/// Constant ratio between bags for this runtime.
#[cfg(any(test, feature = "std"))]
#[allow(unused)]
pub const CONSTANT_RATIO: f64 = 1.0628253590743408;
/// Upper thresholds delimiting the bag list.
pub const THRESHOLDS: [u64; 200] = [
100_000_000_000_000,
106_282_535_907_434,
112_959_774_389_150,
120_056_512_776_105,
127_599_106_300_477,
135_615_565_971_369,
144_135_662_599_590,
153_191_037_357_827,
162_815_319_286_803,
173_044_250_183_800,
183_915_817_337_347,
195_470_394_601_017,
207_750_892_330_229,
220_802_916_738_890,
234_674_939_267_673,
249_418_476_592_914,
265_088_281_944_639,
281_742_548_444_211,
299_443_125_216_738,
318_255_747_080_822,
338_250_278_668_647,
359_500_973_883_001,
382_086_751_654_776,
406_091_489_025_036,
431_604_332_640_068,
458_720_029_816_222,
487_539_280_404_019,
518_169_110_758_247,
550_723_271_202_866,
585_322_658_466_782,
622_095_764_659_305,
661_179_154_452_653,
702_717_972_243_610,
746_866_481_177_808,
793_788_636_038_393,
843_658_692_126_636,
896_661_852_395_681,
952_994_955_240_703,
1_012_867_205_499_736,
1_076_500_951_379_881,
1_144_132_510_194_192,
1_216_013_045_975_769,
1_292_409_502_228_280,
1_373_605_593_276_862,
1_459_902_857_901_004,
1_551_621_779_162_291,
1_649_102_974_585_730,
1_752_708_461_114_642,
1_862_822_999_536_805,
1_979_855_523_374_646,
2_104_240_657_545_975,
2_236_440_332_435_128,
2_376_945_499_368_703,
2_526_277_953_866_680,
2_684_992_273_439_945,
2_853_677_877_130_641,
3_032_961_214_443_876,
3_223_508_091_799_862,
3_426_026_145_146_232,
3_641_267_467_913_124,
3_870_031_404_070_482,
4_113_167_516_660_186,
4_371_578_742_827_277,
4_646_224_747_067_156,
4_938_125_485_141_739,
5_248_364_991_899_922,
5_578_095_407_069_235,
5_928_541_253_969_291,
6_301_003_987_036_955,
6_696_866_825_051_405,
7_117_599_888_008_300,
7_564_765_656_719_910,
8_040_024_775_416_580,
8_545_142_218_898_723,
9_081_993_847_142_344,
9_652_573_371_700_016,
10_258_999_759_768_490,
10_903_525_103_419_522,
11_588_542_983_217_942,
12_316_597_357_287_042,
13_090_392_008_832_678,
13_912_800_587_211_472,
14_786_877_279_832_732,
15_715_868_154_526_436,
16_703_223_214_499_558,
17_752_609_210_649_358,
18_867_923_258_814_856,
20_053_307_312_537_008,
21_313_163_545_075_252,
22_652_170_697_804_756,
24_075_301_455_707_600,
25_587_840_914_485_432,
27_195_406_207_875_088,
28_903_967_368_057_400,
30_719_869_496_628_636,
32_649_856_328_471_220,
34_701_095_276_033_064,
36_881_204_047_022_752,
39_198_278_934_370_992,
41_660_924_883_519_016,
44_278_287_448_695_240,
47_060_086_756_856_400,
50_016_653_605_425_536,
53_158_967_827_883_320,
56_498_699_069_691_424,
60_048_250_125_977_912,
63_820_803_001_928_304,
67_830_367_866_937_216,
72_091_835_084_322_176,
76_621_030_509_822_880,
81_434_774_264_248_528,
86_550_943_198_537_824,
91_988_537_283_208_848,
97_767_750_168_749_840,
103_910_044_178_992_000,
110_438_230_015_967_792,
117_376_551_472_255_616,
124_750_775_465_407_920,
132_588_287_728_824_640,
140_918_194_514_440_064,
149_771_430_684_917_568,
159_180_874_596_775_264,
169_181_470_201_085_280,
179_810_356_815_193_344,
191_107_007_047_393_216,
203_113_373_386_768_288,
215_874_044_002_592_672,
229_436_408_331_885_600,
243_850_833_070_063_392,
259_170_849_218_267_264,
275_453_350_882_006_752,
292_758_806_559_399_232,
311_151_483_703_668_992,
330_699_687_393_865_920,
351_476_014_000_157_824,
373_557_620_785_735_808,
397_026_512_446_556_096,
421_969_845_653_044_224,
448_480_252_724_740_928,
476_656_185_639_923_904,
506_602_281_657_757_760,
538_429_751_910_786_752,
572_256_794_410_890_176,
608_209_033_002_485_632,
646_419_983_893_124_352,
687_031_551_494_039_552,
730_194_555_412_054_016,
776_069_290_549_944_960,
824_826_122_395_314_176,
876_646_119_708_695_936,
931_721_726_960_522_368,
990_257_479_014_182_144,
1_052_470_760_709_299_712,
1_118_592_614_166_106_112,
1_188_868_596_808_997_376,
1_263_559_693_295_730_432,
1_342_943_284_738_898_688,
1_427_314_178_819_094_784,
1_516_985_704_615_302_400,
1_612_290_876_218_400_768,
1_713_583_629_449_105_408,
1_821_240_136_273_157_632,
1_935_660_201_795_120_128,
2_057_268_749_018_809_600,
2_186_517_396_888_336_384,
2_323_886_137_470_138_880,
2_469_885_118_504_583_168,
2_625_056_537_947_004_416,
2_789_976_657_533_970_944,
2_965_257_942_852_572_160,
3_151_551_337_860_326_400,
3_349_548_682_302_620_672,
3_559_985_281_005_267_968,
3_783_642_634_583_792_128,
4_021_351_341_710_503_936,
4_273_994_183_717_548_544,
4_542_509_402_991_247_872,
4_827_894_187_332_742_144,
5_131_208_373_224_844_288,
5_453_578_381_757_959_168,
5_796_201_401_831_965_696,
6_160_349_836_169_256_960,
6_547_376_026_650_146_816,
6_958_717_276_519_173_120,
7_395_901_188_113_309_696,
7_860_551_335_934_872_576,
8_354_393_296_137_270_272,
8_879_261_054_815_360_000,
9_437_103_818_898_946_048,
10_029_993_254_943_105_024,
10_660_131_182_698_121_216,
11_329_857_752_030_707_712,
12_041_660_133_563_240_448,
12_798_181_755_305_525_248,
13_602_232_119_581_272_064,
14_456_797_236_706_498_560,
15_365_050_714_167_523_328,
16_330_365_542_480_556_032,
17_356_326_621_502_140_416,
18_446_744_073_709_551_615,
];
+81
View File
@@ -0,0 +1,81 @@
[package]
name = "node-testing"
version = "3.0.0-dev"
authors.workspace = true
description = "Test utilities for Substrate node."
edition.workspace = true
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
homepage.workspace = true
repository.workspace = true
publish = false
[lints]
workspace = true
[package.metadata.docs.rs]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { workspace = true, default-features = true }
frame-metadata-hash-extension = { workspace = true, default-features = true }
frame-system = { workspace = true, default-features = true }
fs_extra = { workspace = true }
futures = { workspace = true }
kitchensink-runtime = { workspace = true }
log = { workspace = true, default-features = true }
node-cli = { workspace = true }
node-primitives = { workspace = true, default-features = true }
pallet-asset-conversion = { workspace = true, default-features = true }
pallet-asset-conversion-tx-payment = { workspace = true, default-features = true }
pallet-revive = { workspace = true, default-features = true }
pallet-skip-feeless-payment = { workspace = true, default-features = true }
sc-block-builder = { workspace = true, default-features = true }
sc-client-api = { workspace = true, default-features = true }
sc-client-db = { features = [
"rocksdb",
], workspace = true, default-features = true }
sc-consensus = { workspace = true, default-features = true }
sc-executor = { workspace = true, default-features = true }
sc-service = { features = [
"rocksdb",
], workspace = true, default-features = true }
sp-api = { workspace = true, default-features = true }
sp-block-builder = { workspace = true, default-features = true }
sp-blockchain = { workspace = true, default-features = true }
sp-consensus = { workspace = true, default-features = true }
sp-core = { workspace = true, default-features = true }
sp-crypto-hashing = { workspace = true, default-features = true }
sp-inherents = { workspace = true, default-features = true }
sp-keyring = { workspace = true, default-features = true }
sp-runtime = { workspace = true, default-features = true }
sp-timestamp = { workspace = true }
substrate-test-client = { workspace = true }
tempfile = { workspace = true }
[features]
runtime-benchmarks = [
"frame-metadata-hash-extension/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"kitchensink-runtime/runtime-benchmarks",
"node-cli/runtime-benchmarks",
"node-primitives/runtime-benchmarks",
"pallet-asset-conversion-tx-payment/runtime-benchmarks",
"pallet-asset-conversion/runtime-benchmarks",
"pallet-revive/runtime-benchmarks",
"pallet-skip-feeless-payment/runtime-benchmarks",
"sc-block-builder/runtime-benchmarks",
"sc-client-api/runtime-benchmarks",
"sc-client-db/runtime-benchmarks",
"sc-consensus/runtime-benchmarks",
"sc-executor/runtime-benchmarks",
"sc-service/runtime-benchmarks",
"sp-api/runtime-benchmarks",
"sp-block-builder/runtime-benchmarks",
"sp-blockchain/runtime-benchmarks",
"sp-consensus/runtime-benchmarks",
"sp-inherents/runtime-benchmarks",
"sp-keyring/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
"sp-timestamp/runtime-benchmarks",
"substrate-test-client/runtime-benchmarks",
]
+680
View File
@@ -0,0 +1,680 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Benchmarking module.
//!
//! Utilities to do full-scale benchmarks involving database. With `BenchDb` you
//! can pregenerate seed database and `clone` it for every iteration of your benchmarks
//! or tests to get consistent, smooth benchmark experience!
use std::{
collections::BTreeMap,
path::{Path, PathBuf},
sync::Arc,
};
use crate::{
client::{Backend, Client},
keyring::*,
};
use codec::{Decode, Encode};
use futures::executor;
use kitchensink_runtime::{
constants::currency::DOLLARS, AccountId, BalancesCall, CheckedExtrinsic, MinimumPeriod,
RuntimeCall, Signature, SystemCall, UncheckedExtrinsic,
};
use node_primitives::Block;
use sc_block_builder::BlockBuilderBuilder;
use sc_client_api::{execution_extensions::ExecutionExtensions, UsageProvider};
use sc_client_db::PruningMode;
use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, ImportedAux};
use sc_executor::{WasmExecutionMethod, WasmtimeInstantiationStrategy};
use sp_api::ProvideRuntimeApi;
use sp_block_builder::BlockBuilder;
use sp_consensus::BlockOrigin;
use sp_core::{
crypto::get_public_from_string_or_panic, ed25519, sr25519, traits::SpawnNamed, Pair,
};
use sp_crypto_hashing::blake2_256;
use sp_inherents::InherentData;
use sp_runtime::{
generic::{self, ExtrinsicFormat, Preamble},
traits::{Block as BlockT, IdentifyAccount, Verify},
OpaqueExtrinsic,
};
/// Keyring full of accounts for benching.
///
/// Accounts are ordered:
/// //endowed-user//00
/// //endowed-user//01
/// ...
/// //endowed-user//N
#[derive(Clone)]
pub struct BenchKeyring {
accounts: BTreeMap<AccountId, BenchPair>,
}
#[derive(Clone)]
enum BenchPair {
Sr25519(sr25519::Pair),
Ed25519(ed25519::Pair),
}
impl BenchPair {
fn sign(&self, payload: &[u8]) -> Signature {
match self {
Self::Sr25519(pair) => pair.sign(payload).into(),
Self::Ed25519(pair) => pair.sign(payload).into(),
}
}
}
/// Drop system cache.
///
/// Will panic if cache drop is impossible.
pub fn drop_system_cache() {
#[cfg(target_os = "windows")]
{
log::warn!(
target: "bench-logistics",
"Clearing system cache on windows is not supported. Benchmark might totally be wrong.",
);
return;
}
std::process::Command::new("sync")
.output()
.expect("Failed to execute system cache clear");
#[cfg(target_os = "linux")]
{
log::trace!(target: "bench-logistics", "Clearing system cache...");
std::process::Command::new("echo")
.args(&["3", ">", "/proc/sys/vm/drop_caches", "2>", "/dev/null"])
.output()
.expect("Failed to execute system cache clear");
let temp = tempfile::tempdir().expect("Failed to spawn tempdir");
let temp_file_path = format!("of={}/buf", temp.path().to_string_lossy());
// this should refill write cache with 2GB of garbage
std::process::Command::new("dd")
.args(&["if=/dev/urandom", &temp_file_path, "bs=64M", "count=32"])
.output()
.expect("Failed to execute dd for cache clear");
// remove tempfile of previous command
std::process::Command::new("rm")
.arg(&temp_file_path)
.output()
.expect("Failed to remove temp file");
std::process::Command::new("sync")
.output()
.expect("Failed to execute system cache clear");
log::trace!(target: "bench-logistics", "Clearing system cache done!");
}
#[cfg(target_os = "macos")]
{
log::trace!(target: "bench-logistics", "Clearing system cache...");
if let Err(err) = std::process::Command::new("purge").output() {
log::error!("purge error {:?}: ", err);
panic!("Could not clear system cache. Run under sudo?");
}
log::trace!(target: "bench-logistics", "Clearing system cache done!");
}
}
/// Pre-initialized benchmarking database.
///
/// This is prepared database with genesis and keyring
/// that can be cloned and then used for any benchmarking.
pub struct BenchDb {
keyring: BenchKeyring,
directory_guard: Guard,
database_type: DatabaseType,
}
impl Clone for BenchDb {
fn clone(&self) -> Self {
let keyring = self.keyring.clone();
let database_type = self.database_type;
let dir = tempfile::tempdir().expect("temp dir creation failed");
let seed_dir = self.directory_guard.0.path();
log::trace!(
target: "bench-logistics",
"Copying seed db from {} to {}",
seed_dir.to_string_lossy(),
dir.path().to_string_lossy(),
);
let seed_db_files = std::fs::read_dir(seed_dir)
.expect("failed to list file in seed dir")
.map(|f_result| f_result.expect("failed to read file in seed db").path())
.collect::<Vec<PathBuf>>();
fs_extra::copy_items(&seed_db_files, dir.path(), &fs_extra::dir::CopyOptions::new())
.expect("Copy of seed database is ok");
// We clear system cache after db clone but before any warmups.
// This populates system cache with some data unrelated to actual
// data we will be querying further under benchmark (like what
// would have happened in real system that queries random entries
// from database).
drop_system_cache();
BenchDb { keyring, directory_guard: Guard(dir), database_type }
}
}
/// Type of block for generation
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum BlockType {
/// Bunch of random transfers.
RandomTransfersKeepAlive,
/// Bunch of random transfers that drain all of the source balance.
RandomTransfersReaping,
/// Bunch of "no-op" calls.
Noop,
}
impl BlockType {
/// Create block content description with specified number of transactions.
pub fn to_content(self, size: Option<usize>) -> BlockContent {
BlockContent { block_type: self, size }
}
}
/// Content of the generated block.
#[derive(Clone, Debug)]
pub struct BlockContent {
block_type: BlockType,
size: Option<usize>,
}
/// Type of backend database.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum DatabaseType {
/// RocksDb backend.
RocksDb,
/// Parity DB backend.
ParityDb,
}
impl DatabaseType {
fn into_settings(self, path: PathBuf) -> sc_client_db::DatabaseSource {
match self {
Self::RocksDb => sc_client_db::DatabaseSource::RocksDb { path, cache_size: 512 },
Self::ParityDb => sc_client_db::DatabaseSource::ParityDb { path },
}
}
}
/// Benchmarking task executor.
///
/// Uses multiple threads as the regular executable.
#[derive(Debug, Clone)]
pub struct TaskExecutor {
pool: executor::ThreadPool,
}
impl TaskExecutor {
fn new() -> Self {
Self { pool: executor::ThreadPool::new().expect("Failed to create task executor") }
}
}
impl SpawnNamed for TaskExecutor {
fn spawn(
&self,
_: &'static str,
_: Option<&'static str>,
future: futures::future::BoxFuture<'static, ()>,
) {
self.pool.spawn_ok(future);
}
fn spawn_blocking(
&self,
_: &'static str,
_: Option<&'static str>,
future: futures::future::BoxFuture<'static, ()>,
) {
self.pool.spawn_ok(future);
}
}
/// Iterator for block content.
pub struct BlockContentIterator<'a> {
iteration: usize,
content: BlockContent,
runtime_version: sc_executor::RuntimeVersion,
genesis_hash: node_primitives::Hash,
keyring: &'a BenchKeyring,
}
impl<'a> BlockContentIterator<'a> {
fn new(content: BlockContent, keyring: &'a BenchKeyring, client: &Client) -> Self {
let genesis_hash = client.chain_info().genesis_hash;
let runtime_version = client
.runtime_version_at(genesis_hash)
.expect("There should be runtime version at 0");
BlockContentIterator { iteration: 0, content, keyring, runtime_version, genesis_hash }
}
}
impl<'a> Iterator for BlockContentIterator<'a> {
type Item = OpaqueExtrinsic;
fn next(&mut self) -> Option<Self::Item> {
if self.content.size.map(|size| size <= self.iteration).unwrap_or(false) {
return None;
}
let sender = self.keyring.at(self.iteration);
let receiver = get_public_from_string_or_panic::<sr25519::Public>(&format!(
"random-user//{}",
self.iteration
))
.into();
let signed = self.keyring.sign(
CheckedExtrinsic {
format: ExtrinsicFormat::Signed(
sender,
tx_ext(0, kitchensink_runtime::ExistentialDeposit::get() + 1),
),
function: match self.content.block_type {
BlockType::RandomTransfersKeepAlive =>
RuntimeCall::Balances(BalancesCall::transfer_keep_alive {
dest: sp_runtime::MultiAddress::Id(receiver),
value: kitchensink_runtime::ExistentialDeposit::get() + 1,
}),
BlockType::RandomTransfersReaping => {
RuntimeCall::Balances(BalancesCall::transfer_allow_death {
dest: sp_runtime::MultiAddress::Id(receiver),
// Transfer so that ending balance would be 1 less than existential
// deposit so that we kill the sender account.
value: 100 * DOLLARS -
(kitchensink_runtime::ExistentialDeposit::get() - 1),
})
},
BlockType::Noop =>
RuntimeCall::System(SystemCall::remark { remark: Vec::new() }),
},
},
self.runtime_version.spec_version,
self.runtime_version.transaction_version,
self.genesis_hash.into(),
);
let encoded = Encode::encode(&signed);
let opaque = OpaqueExtrinsic::decode(&mut &encoded[..]).expect("Failed to decode opaque");
self.iteration += 1;
Some(opaque)
}
}
impl BenchDb {
/// New immutable benchmarking database.
///
/// See [`BenchDb::new`] method documentation for more information about the purpose
/// of this structure.
pub fn with_key_types(
database_type: DatabaseType,
keyring_length: usize,
key_types: KeyTypes,
) -> Self {
let keyring = BenchKeyring::new(keyring_length, key_types);
let dir = tempfile::tempdir().expect("temp dir creation failed");
log::trace!(
target: "bench-logistics",
"Created seed db at {}",
dir.path().to_string_lossy(),
);
let (_client, _backend, _task_executor) =
Self::bench_client(database_type, dir.path(), &keyring);
let directory_guard = Guard(dir);
BenchDb { keyring, directory_guard, database_type }
}
/// New immutable benchmarking database.
///
/// This will generate database files in random temporary directory
/// and keep it there until struct is dropped.
///
/// You can `clone` this database or you can `create_context` from it
/// (which also does `clone`) to run actual operation against new database
/// which will be identical to the original.
pub fn new(database_type: DatabaseType, keyring_length: usize) -> Self {
Self::with_key_types(database_type, keyring_length, KeyTypes::Sr25519)
}
// This should return client that is doing everything that full node
// is doing.
//
// - This client should use best wasm execution method.
// - This client should work with real database only.
fn bench_client(
database_type: DatabaseType,
dir: &std::path::Path,
keyring: &BenchKeyring,
) -> (Client, std::sync::Arc<Backend>, TaskExecutor) {
let db_config = sc_client_db::DatabaseSettings {
trie_cache_maximum_size: Some(16 * 1024 * 1024),
state_pruning: Some(PruningMode::ArchiveAll),
source: database_type.into_settings(dir.into()),
blocks_pruning: sc_client_db::BlocksPruning::KeepAll,
metrics_registry: None,
};
let task_executor = TaskExecutor::new();
let backend = sc_service::new_db_backend(db_config).expect("Should not fail");
let executor = sc_executor::WasmExecutor::builder()
.with_execution_method(WasmExecutionMethod::Compiled {
instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite,
})
.build();
let client_config = sc_service::ClientConfig::default();
let genesis_block_builder = sc_service::GenesisBlockBuilder::new(
keyring.as_storage_builder(),
!client_config.no_genesis,
backend.clone(),
executor.clone(),
)
.expect("Failed to create genesis block builder");
let client = sc_service::new_client(
backend.clone(),
executor.clone(),
genesis_block_builder,
None,
None,
ExecutionExtensions::new(None, Arc::new(executor)),
Box::new(task_executor.clone()),
None,
None,
client_config,
)
.expect("Should not fail");
(client, backend, task_executor)
}
/// Generate list of required inherents.
///
/// Uses already instantiated Client.
pub fn generate_inherents(&mut self, client: &Client) -> Vec<OpaqueExtrinsic> {
let mut inherent_data = InherentData::new();
let timestamp = 1 * MinimumPeriod::get();
inherent_data
.put_data(sp_timestamp::INHERENT_IDENTIFIER, &timestamp)
.expect("Put timestamp failed");
client
.runtime_api()
.inherent_extrinsics(client.chain_info().genesis_hash, inherent_data)
.expect("Get inherents failed")
}
/// Iterate over some block content with transaction signed using this database keyring.
pub fn block_content(
&self,
content: BlockContent,
client: &Client,
) -> BlockContentIterator<'_> {
BlockContentIterator::new(content, &self.keyring, client)
}
/// Get client for this database operations.
pub fn client(&mut self) -> Client {
let (client, _backend, _task_executor) =
Self::bench_client(self.database_type, self.directory_guard.path(), &self.keyring);
client
}
/// Generate new block using this database.
pub fn generate_block(&mut self, content: BlockContent) -> Block {
let client = self.client();
let chain = client.usage_info().chain;
let mut block = BlockBuilderBuilder::new(&client)
.on_parent_block(chain.best_hash)
.with_parent_block_number(chain.best_number)
.build()
.expect("Failed to create block builder.");
for extrinsic in self.generate_inherents(&client) {
block.push(extrinsic).expect("Push inherent failed");
}
let start = std::time::Instant::now();
for opaque in self.block_content(content, &client) {
match block.push(opaque) {
Err(sp_blockchain::Error::ApplyExtrinsicFailed(
sp_blockchain::ApplyExtrinsicFailed::Validity(e),
)) if e.exhausted_resources() => break,
Err(err) => panic!("Error pushing transaction: {:?}", err),
Ok(_) => {},
}
}
let block = block.build().expect("Block build failed").block;
log::info!(
target: "bench-logistics",
"Block construction: {:#?} ({} tx)",
start.elapsed(), block.extrinsics.len()
);
block
}
/// Database path.
pub fn path(&self) -> &Path {
self.directory_guard.path()
}
/// Clone this database and create context for testing/benchmarking.
pub fn create_context(&self) -> BenchContext {
let BenchDb { directory_guard, keyring, database_type } = self.clone();
let (client, backend, task_executor) =
Self::bench_client(database_type, directory_guard.path(), &keyring);
BenchContext {
client: Arc::new(client),
db_guard: directory_guard,
backend,
spawn_handle: Box::new(task_executor),
}
}
}
/// Key types to be used in benching keyring
pub enum KeyTypes {
/// sr25519 signing keys
Sr25519,
/// ed25519 signing keys
Ed25519,
}
impl BenchKeyring {
/// New keyring.
///
/// `length` is the number of accounts generated.
pub fn new(length: usize, key_types: KeyTypes) -> Self {
let mut accounts = BTreeMap::new();
for n in 0..length {
let seed = format!("//endowed-user/{}", n);
let (account_id, pair) = match key_types {
KeyTypes::Sr25519 => {
let pair =
sr25519::Pair::from_string(&seed, None).expect("failed to generate pair");
let account_id = AccountPublic::from(pair.public()).into_account();
(account_id, BenchPair::Sr25519(pair))
},
KeyTypes::Ed25519 => {
let pair = ed25519::Pair::from_seed(&blake2_256(seed.as_bytes()));
let account_id = AccountPublic::from(pair.public()).into_account();
(account_id, BenchPair::Ed25519(pair))
},
};
accounts.insert(account_id, pair);
}
Self { accounts }
}
/// Generated account id-s from keyring keypairs.
pub fn collect_account_ids(&self) -> Vec<AccountId> {
self.accounts.keys().cloned().collect()
}
/// Get account id at position `index`
pub fn at(&self, index: usize) -> AccountId {
self.accounts.keys().nth(index).expect("Failed to get account").clone()
}
/// Sign transaction with keypair from this keyring.
pub fn sign(
&self,
xt: CheckedExtrinsic,
spec_version: u32,
tx_version: u32,
genesis_hash: [u8; 32],
) -> UncheckedExtrinsic {
match xt.format {
ExtrinsicFormat::Signed(signed, tx_ext) => {
let payload = (
xt.function,
tx_ext.clone(),
spec_version,
tx_version,
genesis_hash,
genesis_hash,
// metadata_hash
None::<()>,
);
let key = self.accounts.get(&signed).expect("Account id not found in keyring");
let signature = payload.using_encoded(|b| {
if b.len() > 256 {
key.sign(&blake2_256(b))
} else {
key.sign(b)
}
});
generic::UncheckedExtrinsic::new_signed(
payload.0,
sp_runtime::MultiAddress::Id(signed),
signature,
tx_ext,
)
.into()
},
ExtrinsicFormat::Bare => generic::UncheckedExtrinsic::new_bare(xt.function).into(),
ExtrinsicFormat::General(ext_version, tx_ext) =>
generic::UncheckedExtrinsic::from_parts(
xt.function,
Preamble::General(ext_version, tx_ext),
)
.into(),
}
}
/// Generate genesis with accounts from this keyring endowed with some balance and
/// kitchensink_runtime code blob.
pub fn as_storage_builder(&self) -> &dyn sp_runtime::BuildStorage {
self
}
}
impl sp_runtime::BuildStorage for BenchKeyring {
fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String> {
storage.top.insert(
sp_core::storage::well_known_keys::CODE.to_vec(),
kitchensink_runtime::wasm_binary_unwrap().into(),
);
crate::genesis::config_endowed(self.collect_account_ids()).assimilate_storage(storage)
}
}
struct Guard(tempfile::TempDir);
impl Guard {
fn path(&self) -> &Path {
self.0.path()
}
}
/// Benchmarking/test context holding instantiated client and backend references.
pub struct BenchContext {
/// Node client.
pub client: Arc<Client>,
/// Node backend.
pub backend: Arc<Backend>,
/// Spawn handle.
pub spawn_handle: Box<dyn SpawnNamed>,
db_guard: Guard,
}
type AccountPublic = <Signature as Verify>::Signer;
impl BenchContext {
/// Import some block.
pub fn import_block(&mut self, block: Block) {
let mut import_params =
BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header.clone());
import_params.body = Some(block.extrinsics().to_vec());
import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain);
assert_eq!(self.client.chain_info().best_number, 0);
assert_eq!(
futures::executor::block_on(self.client.import_block(import_params))
.expect("Failed to import block"),
ImportResult::Imported(ImportedAux {
header_only: false,
clear_justification_requests: false,
needs_justification: false,
bad_justification: false,
is_new_best: true,
})
);
assert_eq!(self.client.chain_info().best_number, 1);
}
/// Database path for the current context.
pub fn path(&self) -> &Path {
self.db_guard.path()
}
}
+87
View File
@@ -0,0 +1,87 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Utilities to build a `TestClient` for `kitchensink-runtime`.
use sp_runtime::BuildStorage;
/// Re-export test-client utilities.
pub use substrate_test_client::*;
/// Call executor for `kitchensink-runtime` `TestClient`.
use node_cli::service::RuntimeExecutor;
/// Default backend type.
pub type Backend = sc_client_db::Backend<node_primitives::Block>;
/// Test client type.
pub type Client = client::Client<
Backend,
client::LocalCallExecutor<node_primitives::Block, Backend, RuntimeExecutor>,
node_primitives::Block,
kitchensink_runtime::RuntimeApi,
>;
/// Genesis configuration parameters for `TestClient`.
#[derive(Default)]
pub struct GenesisParameters;
impl substrate_test_client::GenesisInit for GenesisParameters {
fn genesis_storage(&self) -> Storage {
let mut storage = crate::genesis::config().build_storage().unwrap();
storage.top.insert(
sp_core::storage::well_known_keys::CODE.to_vec(),
kitchensink_runtime::wasm_binary_unwrap().into(),
);
storage
}
}
/// A `test-runtime` extensions to `TestClientBuilder`.
pub trait TestClientBuilderExt: Sized {
/// Create test client builder.
fn new() -> Self;
/// Build the test client.
fn build(self) -> Client;
}
impl TestClientBuilderExt
for substrate_test_client::TestClientBuilder<
node_primitives::Block,
client::LocalCallExecutor<node_primitives::Block, Backend, RuntimeExecutor>,
Backend,
GenesisParameters,
>
{
fn new() -> Self {
Self::default()
}
fn build(self) -> Client {
let executor = RuntimeExecutor::builder().build();
use sc_service::client::LocalCallExecutor;
use std::sync::Arc;
let executor = LocalCallExecutor::new(
self.backend().clone(),
executor.clone(),
Default::default(),
ExecutionExtensions::new(None, Arc::new(executor)),
)
.expect("Creates LocalCallExecutor");
self.build_with_executor(executor).0
}
}
+75
View File
@@ -0,0 +1,75 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Genesis Configuration.
use crate::keyring::*;
use kitchensink_runtime::{
constants::currency::*, AccountId, AssetsConfig, BalancesConfig, IndicesConfig,
RuntimeGenesisConfig, SessionConfig, SocietyConfig, StakerStatus, StakingConfig,
};
use sp_keyring::Ed25519Keyring;
use sp_runtime::Perbill;
/// Create genesis runtime configuration for tests.
pub fn config() -> RuntimeGenesisConfig {
config_endowed(Default::default())
}
/// Create genesis runtime configuration for tests with some extra
/// endowed accounts.
pub fn config_endowed(extra_endowed: Vec<AccountId>) -> RuntimeGenesisConfig {
let mut endowed = vec![
(alice(), 111 * DOLLARS),
(bob(), 100 * DOLLARS),
(charlie(), 100_000_000 * DOLLARS),
(dave(), 112 * DOLLARS),
(eve(), 101 * DOLLARS),
(ferdie(), 101 * DOLLARS),
];
endowed.extend(extra_endowed.into_iter().map(|endowed| (endowed, 100 * DOLLARS)));
RuntimeGenesisConfig {
indices: IndicesConfig { indices: vec![] },
balances: BalancesConfig { balances: endowed, ..Default::default() },
session: SessionConfig {
keys: vec![
(alice(), dave(), session_keys_from_seed(Ed25519Keyring::Alice.into())),
(bob(), eve(), session_keys_from_seed(Ed25519Keyring::Bob.into())),
(charlie(), ferdie(), session_keys_from_seed(Ed25519Keyring::Charlie.into())),
],
..Default::default()
},
staking: StakingConfig {
stakers: vec![
(dave(), dave(), 111 * DOLLARS, StakerStatus::Validator),
(eve(), eve(), 100 * DOLLARS, StakerStatus::Validator),
(ferdie(), ferdie(), 100 * DOLLARS, StakerStatus::Validator),
],
validator_count: 3,
minimum_validator_count: 0,
slash_reward_fraction: Perbill::from_percent(10),
invulnerables: vec![alice(), bob(), charlie()],
..Default::default()
},
society: SocietyConfig { pot: 0 },
assets: AssetsConfig { assets: vec![(9, alice(), true, 1)], ..Default::default() },
..Default::default()
}
}
+140
View File
@@ -0,0 +1,140 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Test accounts.
use codec::Encode;
use kitchensink_runtime::{CheckedExtrinsic, SessionKeys, TxExtension, UncheckedExtrinsic};
use node_primitives::{AccountId, Balance, Nonce};
use sp_core::{crypto::get_public_from_string_or_panic, ecdsa, ed25519, sr25519};
use sp_crypto_hashing::blake2_256;
use sp_keyring::Sr25519Keyring;
use sp_runtime::generic::{self, Era, ExtrinsicFormat};
/// Alice's account id.
pub fn alice() -> AccountId {
Sr25519Keyring::Alice.into()
}
/// Bob's account id.
pub fn bob() -> AccountId {
Sr25519Keyring::Bob.into()
}
/// Charlie's account id.
pub fn charlie() -> AccountId {
Sr25519Keyring::Charlie.into()
}
/// Dave's account id.
pub fn dave() -> AccountId {
Sr25519Keyring::Dave.into()
}
/// Eve's account id.
pub fn eve() -> AccountId {
Sr25519Keyring::Eve.into()
}
/// Ferdie's account id.
pub fn ferdie() -> AccountId {
Sr25519Keyring::Ferdie.into()
}
/// Convert keyrings into `SessionKeys`.
///
/// # Panics
///
/// Function will panic when invalid string is provided.
pub fn session_keys_from_seed(seed: &str) -> SessionKeys {
SessionKeys {
grandpa: get_public_from_string_or_panic::<ed25519::Public>(seed).into(),
babe: get_public_from_string_or_panic::<sr25519::Public>(seed).into(),
im_online: get_public_from_string_or_panic::<sr25519::Public>(seed).into(),
authority_discovery: get_public_from_string_or_panic::<sr25519::Public>(seed).into(),
mixnet: get_public_from_string_or_panic::<sr25519::Public>(seed).into(),
beefy: get_public_from_string_or_panic::<ecdsa::Public>(seed).into(),
}
}
/// Returns transaction extra.
pub fn tx_ext(nonce: Nonce, extra_fee: Balance) -> TxExtension {
(
frame_system::AuthorizeCall::new(),
frame_system::CheckNonZeroSender::new(),
frame_system::CheckSpecVersion::new(),
frame_system::CheckTxVersion::new(),
frame_system::CheckGenesis::new(),
frame_system::CheckEra::from(Era::mortal(256, 0)),
frame_system::CheckNonce::from(nonce),
frame_system::CheckWeight::new(),
pallet_skip_feeless_payment::SkipCheckIfFeeless::from(
pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None),
),
frame_metadata_hash_extension::CheckMetadataHash::new(false),
pallet_revive::evm::tx_extension::SetOrigin::default(),
frame_system::WeightReclaim::new(),
)
}
/// Sign given `CheckedExtrinsic`.
pub fn sign(
xt: CheckedExtrinsic,
spec_version: u32,
tx_version: u32,
genesis_hash: [u8; 32],
metadata_hash: Option<[u8; 32]>,
) -> UncheckedExtrinsic {
match xt.format {
ExtrinsicFormat::Signed(signed, tx_ext) => {
let payload = (
xt.function,
tx_ext.clone(),
spec_version,
tx_version,
genesis_hash,
genesis_hash,
metadata_hash,
);
let key = Sr25519Keyring::from_account_id(&signed).unwrap();
let signature =
payload
.using_encoded(|b| {
if b.len() > 256 {
key.sign(&blake2_256(b))
} else {
key.sign(b)
}
})
.into();
generic::UncheckedExtrinsic::new_signed(
payload.0,
sp_runtime::MultiAddress::Id(signed),
signature,
tx_ext,
)
.into()
},
ExtrinsicFormat::Bare => generic::UncheckedExtrinsic::new_bare(xt.function).into(),
ExtrinsicFormat::General(ext_version, tx_ext) => generic::UncheckedExtrinsic::from_parts(
xt.function,
generic::Preamble::General(ext_version, tx_ext),
)
.into(),
}
}
+26
View File
@@ -0,0 +1,26 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! A set of testing utilities for Substrate Node.
#![warn(missing_docs)]
pub mod bench;
pub mod client;
pub mod genesis;
pub mod keyring;