feat: initialize Kurdistan SDK - independent fork of Polkadot SDK
This commit is contained in:
@@ -0,0 +1,64 @@
|
||||
[package]
|
||||
name = "node-bench"
|
||||
version = "0.9.0-dev"
|
||||
authors.workspace = true
|
||||
description = "Substrate node integration benchmarks."
|
||||
edition.workspace = true
|
||||
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
publish = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
array-bytes = { workspace = true, default-features = true }
|
||||
async-trait = { workspace = true }
|
||||
clap = { features = ["derive"], workspace = true }
|
||||
derive_more = { features = ["display"], workspace = true }
|
||||
fs_extra = { workspace = true }
|
||||
futures = { features = ["thread-pool"], workspace = true }
|
||||
hash-db = { workspace = true, default-features = true }
|
||||
kitchensink-runtime = { workspace = true }
|
||||
kvdb = { workspace = true }
|
||||
kvdb-rocksdb = { workspace = true }
|
||||
log = { workspace = true, default-features = true }
|
||||
node-primitives = { workspace = true, default-features = true }
|
||||
node-testing = { workspace = true }
|
||||
parity-db = { workspace = true }
|
||||
rand = { features = ["small_rng"], workspace = true, default-features = true }
|
||||
sc-basic-authorship = { workspace = true, default-features = true }
|
||||
sc-client-api = { workspace = true, default-features = true }
|
||||
sc-transaction-pool = { workspace = true, default-features = true }
|
||||
sc-transaction-pool-api = { workspace = true, default-features = true }
|
||||
serde = { workspace = true, default-features = true }
|
||||
serde_json = { workspace = true, default-features = true }
|
||||
sp-consensus = { workspace = true, default-features = true }
|
||||
sp-core = { workspace = true, default-features = true }
|
||||
sp-inherents = { workspace = true, default-features = true }
|
||||
sp-runtime = { workspace = true, default-features = true }
|
||||
sp-state-machine = { workspace = true, default-features = true }
|
||||
sp-timestamp = { workspace = true }
|
||||
sp-tracing = { workspace = true, default-features = true }
|
||||
sp-trie = { workspace = true, default-features = true }
|
||||
tempfile = { workspace = true }
|
||||
|
||||
[features]
|
||||
runtime-benchmarks = [
|
||||
"kitchensink-runtime/runtime-benchmarks",
|
||||
"node-primitives/runtime-benchmarks",
|
||||
"node-testing/runtime-benchmarks",
|
||||
"sc-basic-authorship/runtime-benchmarks",
|
||||
"sc-client-api/runtime-benchmarks",
|
||||
"sc-transaction-pool-api/runtime-benchmarks",
|
||||
"sc-transaction-pool/runtime-benchmarks",
|
||||
"sp-consensus/runtime-benchmarks",
|
||||
"sp-inherents/runtime-benchmarks",
|
||||
"sp-runtime/runtime-benchmarks",
|
||||
"sp-state-machine/runtime-benchmarks",
|
||||
"sp-timestamp/runtime-benchmarks",
|
||||
"sp-trie/runtime-benchmarks",
|
||||
]
|
||||
@@ -0,0 +1,47 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
#[derive(Clone, Copy, Debug, derive_more::Display)]
|
||||
pub enum SizeType {
|
||||
#[display(fmt = "empty")]
|
||||
Empty,
|
||||
#[display(fmt = "small")]
|
||||
Small,
|
||||
#[display(fmt = "medium")]
|
||||
Medium,
|
||||
#[display(fmt = "large")]
|
||||
Large,
|
||||
#[display(fmt = "full")]
|
||||
Full,
|
||||
#[display(fmt = "custom")]
|
||||
Custom(usize),
|
||||
}
|
||||
|
||||
impl SizeType {
|
||||
pub fn transactions(&self) -> Option<usize> {
|
||||
match self {
|
||||
SizeType::Empty => Some(0),
|
||||
SizeType::Small => Some(10),
|
||||
SizeType::Medium => Some(100),
|
||||
SizeType::Large => Some(500),
|
||||
SizeType::Full => None,
|
||||
// Custom SizeType will use the `--transactions` input parameter
|
||||
SizeType::Custom(val) => Some(*val),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,314 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! Block construction benchmark.
|
||||
//!
|
||||
//! This benchmark is expected to measure block construction.
|
||||
//! We want to protect against cold-cache attacks, and so this
|
||||
//! benchmark should not rely on any caching (except those entries that
|
||||
//! DO NOT depend on user input). Thus transaction generation should be
|
||||
//! based on randomized data.
|
||||
|
||||
use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use node_primitives::Block;
|
||||
use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes};
|
||||
use sc_transaction_pool_api::{
|
||||
ImportNotificationStream, PoolStatus, ReadyTransactions, TransactionFor, TransactionSource,
|
||||
TransactionStatusStreamFor, TxHash, TxInvalidityReportMap,
|
||||
};
|
||||
use sp_consensus::{Environment, Proposer};
|
||||
use sp_inherents::InherentDataProvider;
|
||||
use sp_runtime::OpaqueExtrinsic;
|
||||
|
||||
use crate::{
|
||||
common::SizeType,
|
||||
core::{self, Mode, Path},
|
||||
};
|
||||
|
||||
pub struct ConstructionBenchmarkDescription {
|
||||
pub key_types: KeyTypes,
|
||||
pub block_type: BlockType,
|
||||
pub size: SizeType,
|
||||
pub database_type: DatabaseType,
|
||||
}
|
||||
|
||||
pub struct ConstructionBenchmark {
|
||||
database: BenchDb,
|
||||
transactions: Transactions,
|
||||
}
|
||||
|
||||
impl core::BenchmarkDescription for ConstructionBenchmarkDescription {
|
||||
fn path(&self) -> Path {
|
||||
let mut path = Path::new(&["node", "proposer"]);
|
||||
|
||||
match self.key_types {
|
||||
KeyTypes::Sr25519 => path.push("sr25519"),
|
||||
KeyTypes::Ed25519 => path.push("ed25519"),
|
||||
}
|
||||
|
||||
match self.block_type {
|
||||
BlockType::RandomTransfersKeepAlive => path.push("transfer"),
|
||||
BlockType::RandomTransfersReaping => path.push("transfer_reaping"),
|
||||
BlockType::Noop => path.push("noop"),
|
||||
}
|
||||
|
||||
match self.database_type {
|
||||
DatabaseType::RocksDb => path.push("rocksdb"),
|
||||
DatabaseType::ParityDb => path.push("paritydb"),
|
||||
}
|
||||
|
||||
path.push(&format!("{}", self.size));
|
||||
|
||||
path
|
||||
}
|
||||
|
||||
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
|
||||
let mut extrinsics: Vec<Arc<PoolTransaction>> = Vec::new();
|
||||
|
||||
let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types);
|
||||
|
||||
let client = bench_db.client();
|
||||
|
||||
let content_type = self.block_type.to_content(self.size.transactions());
|
||||
for transaction in bench_db.block_content(content_type, &client) {
|
||||
extrinsics.push(Arc::new(transaction.into()));
|
||||
}
|
||||
|
||||
Box::new(ConstructionBenchmark {
|
||||
database: bench_db,
|
||||
transactions: Transactions(extrinsics),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
format!(
|
||||
"Block construction ({:?}/{}, {:?} backend)",
|
||||
self.block_type, self.size, self.database_type,
|
||||
)
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl core::Benchmark for ConstructionBenchmark {
|
||||
fn run(&mut self, mode: Mode) -> std::time::Duration {
|
||||
let context = self.database.create_context();
|
||||
|
||||
let _ = context
|
||||
.client
|
||||
.runtime_version_at(context.client.chain_info().genesis_hash)
|
||||
.expect("Failed to get runtime version")
|
||||
.spec_version;
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(3));
|
||||
}
|
||||
|
||||
let mut proposer_factory = sc_basic_authorship::ProposerFactory::new(
|
||||
context.spawn_handle.clone(),
|
||||
context.client.clone(),
|
||||
self.transactions.clone().into(),
|
||||
None,
|
||||
None,
|
||||
);
|
||||
let timestamp_provider = sp_timestamp::InherentDataProvider::from_system_time();
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
let proposer = futures::executor::block_on(
|
||||
proposer_factory.init(
|
||||
&context
|
||||
.client
|
||||
.header(context.client.chain_info().genesis_hash)
|
||||
.expect("Database error querying block #0")
|
||||
.expect("Block #0 should exist"),
|
||||
),
|
||||
)
|
||||
.expect("Proposer initialization failed");
|
||||
|
||||
let inherent_data = futures::executor::block_on(timestamp_provider.create_inherent_data())
|
||||
.expect("Create inherent data failed");
|
||||
let _block = futures::executor::block_on(Proposer::propose(
|
||||
proposer,
|
||||
inherent_data,
|
||||
Default::default(),
|
||||
std::time::Duration::from_secs(20),
|
||||
None,
|
||||
))
|
||||
.map(|r| r.block)
|
||||
.expect("Proposing failed");
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(1));
|
||||
}
|
||||
|
||||
elapsed
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PoolTransaction {
|
||||
data: Arc<OpaqueExtrinsic>,
|
||||
hash: node_primitives::Hash,
|
||||
}
|
||||
|
||||
impl From<OpaqueExtrinsic> for PoolTransaction {
|
||||
fn from(e: OpaqueExtrinsic) -> Self {
|
||||
PoolTransaction { data: Arc::from(e), hash: node_primitives::Hash::zero() }
|
||||
}
|
||||
}
|
||||
|
||||
impl sc_transaction_pool_api::InPoolTransaction for PoolTransaction {
|
||||
type Transaction = Arc<OpaqueExtrinsic>;
|
||||
type Hash = node_primitives::Hash;
|
||||
|
||||
fn data(&self) -> &Self::Transaction {
|
||||
&self.data
|
||||
}
|
||||
|
||||
fn hash(&self) -> &Self::Hash {
|
||||
&self.hash
|
||||
}
|
||||
|
||||
fn priority(&self) -> &u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn longevity(&self) -> &u64 {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn requires(&self) -> &[Vec<u8>] {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn provides(&self) -> &[Vec<u8>] {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn is_propagable(&self) -> bool {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Transactions(Vec<Arc<PoolTransaction>>);
|
||||
pub struct TransactionsIterator(std::vec::IntoIter<Arc<PoolTransaction>>);
|
||||
|
||||
impl Iterator for TransactionsIterator {
|
||||
type Item = Arc<PoolTransaction>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadyTransactions for TransactionsIterator {
|
||||
fn report_invalid(&mut self, _tx: &Self::Item) {}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl sc_transaction_pool_api::TransactionPool for Transactions {
|
||||
type Block = Block;
|
||||
type Hash = node_primitives::Hash;
|
||||
type InPoolTransaction = PoolTransaction;
|
||||
type Error = sc_transaction_pool_api::error::Error;
|
||||
|
||||
/// Asynchronously imports a bunch of unverified transactions to the pool.
|
||||
async fn submit_at(
|
||||
&self,
|
||||
_at: Self::Hash,
|
||||
_source: TransactionSource,
|
||||
_xts: Vec<TransactionFor<Self>>,
|
||||
) -> Result<Vec<Result<node_primitives::Hash, Self::Error>>, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Asynchronously imports one unverified transaction to the pool.
|
||||
async fn submit_one(
|
||||
&self,
|
||||
_at: Self::Hash,
|
||||
_source: TransactionSource,
|
||||
_xt: TransactionFor<Self>,
|
||||
) -> Result<TxHash<Self>, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn submit_and_watch(
|
||||
&self,
|
||||
_at: Self::Hash,
|
||||
_source: TransactionSource,
|
||||
_xt: TransactionFor<Self>,
|
||||
) -> Result<Pin<Box<TransactionStatusStreamFor<Self>>>, Self::Error> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn ready_at(
|
||||
&self,
|
||||
_at: Self::Hash,
|
||||
) -> Box<dyn ReadyTransactions<Item = Arc<Self::InPoolTransaction>> + Send> {
|
||||
Box::new(TransactionsIterator(self.0.clone().into_iter()))
|
||||
}
|
||||
|
||||
fn ready(&self) -> Box<dyn ReadyTransactions<Item = Arc<Self::InPoolTransaction>> + Send> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn report_invalid(
|
||||
&self,
|
||||
_at: Option<Self::Hash>,
|
||||
_invalid_tx_errors: TxInvalidityReportMap<TxHash<Self>>,
|
||||
) -> Vec<Arc<Self::InPoolTransaction>> {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
fn futures(&self) -> Vec<Self::InPoolTransaction> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn status(&self) -> PoolStatus {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn import_notification_stream(&self) -> ImportNotificationStream<TxHash<Self>> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn on_broadcasted(&self, _propagations: HashMap<TxHash<Self>, Vec<String>>) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn hash_of(&self, _xt: &TransactionFor<Self>) -> TxHash<Self> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn ready_transaction(&self, _hash: &TxHash<Self>) -> Option<Arc<Self::InPoolTransaction>> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
async fn ready_at_with_timeout(
|
||||
&self,
|
||||
_at: Self::Hash,
|
||||
_timeout: std::time::Duration,
|
||||
) -> Box<dyn ReadyTransactions<Item = Arc<Self::InPoolTransaction>> + Send> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,151 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use serde::Serialize;
|
||||
use std::{
|
||||
borrow::{Cow, ToOwned},
|
||||
fmt,
|
||||
};
|
||||
|
||||
pub struct Path(Vec<String>);
|
||||
|
||||
impl Path {
|
||||
pub fn new(initial: &'static [&'static str]) -> Self {
|
||||
Path(initial.iter().map(|x| x.to_string()).collect())
|
||||
}
|
||||
}
|
||||
|
||||
impl Path {
|
||||
pub fn push(&mut self, item: &str) {
|
||||
self.0.push(item.to_string());
|
||||
}
|
||||
|
||||
pub fn full(&self) -> String {
|
||||
self.0.iter().fold(String::new(), |mut val, next| {
|
||||
val.push_str("::");
|
||||
val.push_str(next);
|
||||
val
|
||||
})
|
||||
}
|
||||
|
||||
pub fn has(&self, path: &str) -> bool {
|
||||
self.full().contains(path)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait BenchmarkDescription {
|
||||
fn path(&self) -> Path;
|
||||
|
||||
fn setup(self: Box<Self>) -> Box<dyn Benchmark>;
|
||||
|
||||
fn name(&self) -> Cow<'static, str>;
|
||||
}
|
||||
|
||||
pub trait Benchmark {
|
||||
fn run(&mut self, mode: Mode) -> std::time::Duration;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct BenchmarkOutput {
|
||||
name: String,
|
||||
raw_average: u64,
|
||||
average: u64,
|
||||
}
|
||||
|
||||
pub struct NsFormatter(pub u64);
|
||||
|
||||
impl fmt::Display for NsFormatter {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let v = self.0;
|
||||
match v {
|
||||
v if v < 100 => write!(f, "{} ns", v),
|
||||
v if v < 100_000 => write!(f, "{:.1} µs", v as f64 / 1000.0),
|
||||
v if v < 1_000_000 => write!(f, "{:.4} ms", v as f64 / 1_000_000.0),
|
||||
v if v < 100_000_000 => write!(f, "{:.1} ms", v as f64 / 1_000_000.0),
|
||||
_ => write!(f, "{:.4} s", v as f64 / 1_000_000_000.0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum Mode {
|
||||
Regular,
|
||||
Profile,
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Mode {
|
||||
type Err = &'static str;
|
||||
fn from_str(day: &str) -> Result<Self, Self::Err> {
|
||||
match day {
|
||||
"regular" => Ok(Mode::Regular),
|
||||
"profile" => Ok(Mode::Profile),
|
||||
_ => Err("Could not parse mode"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for BenchmarkOutput {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}: avg {}, w_avg {}",
|
||||
self.name,
|
||||
NsFormatter(self.raw_average),
|
||||
NsFormatter(self.average),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_benchmark(benchmark: Box<dyn BenchmarkDescription>, mode: Mode) -> BenchmarkOutput {
|
||||
let name = benchmark.name().to_owned();
|
||||
let mut benchmark = benchmark.setup();
|
||||
|
||||
let mut durations: Vec<u128> = vec![];
|
||||
for _ in 0..50 {
|
||||
let duration = benchmark.run(mode);
|
||||
durations.push(duration.as_nanos());
|
||||
}
|
||||
|
||||
durations.sort();
|
||||
|
||||
let raw_average = (durations.iter().sum::<u128>() / (durations.len() as u128)) as u64;
|
||||
let average = (durations.iter().skip(10).take(30).sum::<u128>() / 30) as u64;
|
||||
|
||||
BenchmarkOutput { name: name.into(), raw_average, average }
|
||||
}
|
||||
|
||||
macro_rules! matrix(
|
||||
( $var:tt in $over:expr => $tt:expr, $( $rest:tt )* ) => {
|
||||
{
|
||||
let mut res = Vec::<Box<dyn crate::core::BenchmarkDescription>>::new();
|
||||
for $var in $over {
|
||||
res.push(Box::new($tt));
|
||||
}
|
||||
res.extend(matrix!( $($rest)* ));
|
||||
res
|
||||
}
|
||||
};
|
||||
( $var:expr, $( $rest:tt )*) => {
|
||||
{
|
||||
let mut res = vec![Box::new($var) as Box<dyn crate::core::BenchmarkDescription>];
|
||||
res.extend(matrix!( $($rest)* ));
|
||||
res
|
||||
}
|
||||
};
|
||||
() => { vec![] }
|
||||
);
|
||||
@@ -0,0 +1,69 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use kvdb::KeyValueDB;
|
||||
use node_primitives::Hash;
|
||||
use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut};
|
||||
|
||||
use crate::simple_trie::SimpleTrie;
|
||||
|
||||
/// Generate trie from given `key_values`.
|
||||
///
|
||||
/// Will fill your database `db` with trie data from `key_values` and
|
||||
/// return root.
|
||||
pub fn generate_trie(
|
||||
db: Arc<dyn KeyValueDB>,
|
||||
key_values: impl IntoIterator<Item = (Vec<u8>, Vec<u8>)>,
|
||||
) -> Hash {
|
||||
let mut root = Hash::default();
|
||||
|
||||
let (db, overlay) = {
|
||||
let mut overlay = HashMap::new();
|
||||
overlay.insert(
|
||||
array_bytes::hex2bytes(
|
||||
"03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314",
|
||||
)
|
||||
.expect("null key is valid"),
|
||||
Some(vec![0]),
|
||||
);
|
||||
let mut trie = SimpleTrie { db, overlay: &mut overlay };
|
||||
{
|
||||
let mut trie_db =
|
||||
TrieDBMutBuilderV1::<crate::simple_trie::Hasher>::new(&mut trie, &mut root).build();
|
||||
for (key, value) in key_values {
|
||||
trie_db.insert(&key, &value).expect("trie insertion failed");
|
||||
}
|
||||
|
||||
trie_db.commit();
|
||||
}
|
||||
(trie.db, overlay)
|
||||
};
|
||||
|
||||
let mut transaction = db.transaction();
|
||||
for (key, value) in overlay.into_iter() {
|
||||
match value {
|
||||
Some(value) => transaction.put(0, &key[..], &value[..]),
|
||||
None => transaction.delete(0, &key[..]),
|
||||
}
|
||||
}
|
||||
db.write(transaction).expect("Failed to write transaction");
|
||||
|
||||
root
|
||||
}
|
||||
@@ -0,0 +1,134 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! Block import benchmark.
|
||||
//!
|
||||
//! This benchmark is expected to measure block import operation of
|
||||
//! some more or less full block.
|
||||
//!
|
||||
//! As we also want to protect against cold-cache attacks, this
|
||||
//! benchmark should not rely on any caching (except those that
|
||||
//! DO NOT depend on user input). Thus block generation should be
|
||||
//! based on randomized operation.
|
||||
//!
|
||||
//! This is supposed to be very simple benchmark and is not subject
|
||||
//! to much configuring - just block full of randomized transactions.
|
||||
//! It is not supposed to measure runtime modules weight correctness
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use node_primitives::Block;
|
||||
use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes};
|
||||
use sc_client_api::backend::Backend;
|
||||
|
||||
use crate::{
|
||||
common::SizeType,
|
||||
core::{self, Mode, Path},
|
||||
};
|
||||
|
||||
pub struct ImportBenchmarkDescription {
|
||||
pub key_types: KeyTypes,
|
||||
pub block_type: BlockType,
|
||||
pub size: SizeType,
|
||||
pub database_type: DatabaseType,
|
||||
}
|
||||
|
||||
pub struct ImportBenchmark {
|
||||
database: BenchDb,
|
||||
block: Block,
|
||||
}
|
||||
|
||||
impl core::BenchmarkDescription for ImportBenchmarkDescription {
|
||||
fn path(&self) -> Path {
|
||||
let mut path = Path::new(&["node", "import"]);
|
||||
|
||||
match self.key_types {
|
||||
KeyTypes::Sr25519 => path.push("sr25519"),
|
||||
KeyTypes::Ed25519 => path.push("ed25519"),
|
||||
}
|
||||
|
||||
match self.block_type {
|
||||
BlockType::RandomTransfersKeepAlive => path.push("transfer_keep_alive"),
|
||||
BlockType::RandomTransfersReaping => path.push("transfer_reaping"),
|
||||
BlockType::Noop => path.push("noop"),
|
||||
}
|
||||
|
||||
match self.database_type {
|
||||
DatabaseType::RocksDb => path.push("rocksdb"),
|
||||
DatabaseType::ParityDb => path.push("paritydb"),
|
||||
}
|
||||
|
||||
path.push(&format!("{}", self.size));
|
||||
|
||||
path
|
||||
}
|
||||
|
||||
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
|
||||
let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types);
|
||||
let block = bench_db.generate_block(self.block_type.to_content(self.size.transactions()));
|
||||
Box::new(ImportBenchmark { database: bench_db, block })
|
||||
}
|
||||
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
format!(
|
||||
"Block import ({:?}/{}, {:?} backend)",
|
||||
self.block_type, self.size, self.database_type,
|
||||
)
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl core::Benchmark for ImportBenchmark {
|
||||
fn run(&mut self, mode: Mode) -> std::time::Duration {
|
||||
let mut context = self.database.create_context();
|
||||
|
||||
let _ = context
|
||||
.client
|
||||
.runtime_version_at(context.client.chain_info().genesis_hash)
|
||||
.expect("Failed to get runtime version")
|
||||
.spec_version;
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(3));
|
||||
}
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
context.import_block(self.block.clone());
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(1));
|
||||
}
|
||||
|
||||
log::info!(
|
||||
target: "bench-logistics",
|
||||
"imported block with {} tx, took: {:#?}",
|
||||
self.block.extrinsics.len(),
|
||||
elapsed,
|
||||
);
|
||||
|
||||
log::info!(
|
||||
target: "bench-logistics",
|
||||
"usage info: {}",
|
||||
context.backend.usage_info()
|
||||
.expect("RocksDB backend always provides usage info!"),
|
||||
);
|
||||
|
||||
elapsed
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,186 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
mod common;
|
||||
mod construct;
|
||||
#[macro_use]
|
||||
mod core;
|
||||
mod generator;
|
||||
mod import;
|
||||
mod simple_trie;
|
||||
mod state_sizes;
|
||||
mod tempdb;
|
||||
mod trie;
|
||||
mod txpool;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
use node_testing::bench::{BlockType, DatabaseType as BenchDataBaseType, KeyTypes};
|
||||
|
||||
use crate::{
|
||||
common::SizeType,
|
||||
construct::ConstructionBenchmarkDescription,
|
||||
core::{run_benchmark, Mode as BenchmarkMode},
|
||||
import::ImportBenchmarkDescription,
|
||||
tempdb::DatabaseType,
|
||||
trie::{DatabaseSize, TrieReadBenchmarkDescription, TrieWriteBenchmarkDescription},
|
||||
txpool::PoolBenchmarkDescription,
|
||||
};
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "node-bench", about = "Node integration benchmarks")]
|
||||
struct Opt {
|
||||
/// Show list of all available benchmarks.
|
||||
///
|
||||
/// Will output ("name", "path"). Benchmarks can then be filtered by path.
|
||||
#[arg(short, long)]
|
||||
list: bool,
|
||||
|
||||
/// Machine readable json output.
|
||||
///
|
||||
/// This also suppresses all regular output (except to stderr)
|
||||
#[arg(short, long)]
|
||||
json: bool,
|
||||
|
||||
/// Filter benchmarks.
|
||||
///
|
||||
/// Run with `--list` for the hint of what to filter.
|
||||
filter: Option<String>,
|
||||
|
||||
/// Number of transactions for block import with `custom` size.
|
||||
#[arg(long)]
|
||||
transactions: Option<usize>,
|
||||
|
||||
/// Mode
|
||||
///
|
||||
/// "regular" for regular benchmark
|
||||
///
|
||||
/// "profile" mode adds pauses between measurable runs,
|
||||
/// so that actual interval can be selected in the profiler of choice.
|
||||
#[arg(short, long, default_value = "regular")]
|
||||
mode: BenchmarkMode,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let opt = Opt::parse();
|
||||
|
||||
if !opt.json {
|
||||
sp_tracing::try_init_simple();
|
||||
}
|
||||
|
||||
let mut import_benchmarks = Vec::new();
|
||||
|
||||
for size in [
|
||||
SizeType::Empty,
|
||||
SizeType::Small,
|
||||
SizeType::Medium,
|
||||
SizeType::Large,
|
||||
SizeType::Full,
|
||||
SizeType::Custom(opt.transactions.unwrap_or(0)),
|
||||
] {
|
||||
for block_type in [
|
||||
BlockType::RandomTransfersKeepAlive,
|
||||
BlockType::RandomTransfersReaping,
|
||||
BlockType::Noop,
|
||||
] {
|
||||
for database_type in [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb] {
|
||||
import_benchmarks.push((size, block_type, database_type));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let benchmarks = matrix!(
|
||||
(size, block_type, database_type) in import_benchmarks.into_iter() =>
|
||||
ImportBenchmarkDescription {
|
||||
key_types: KeyTypes::Sr25519,
|
||||
size,
|
||||
block_type,
|
||||
database_type,
|
||||
},
|
||||
(size, db_type) in
|
||||
[
|
||||
DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small,
|
||||
DatabaseSize::Medium, DatabaseSize::Large, DatabaseSize::Huge,
|
||||
]
|
||||
.iter().flat_map(|size|
|
||||
[
|
||||
DatabaseType::RocksDb, DatabaseType::ParityDb
|
||||
]
|
||||
.iter().map(move |db_type| (size, db_type)))
|
||||
=> TrieReadBenchmarkDescription { database_size: *size, database_type: *db_type },
|
||||
(size, db_type) in
|
||||
[
|
||||
DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small,
|
||||
DatabaseSize::Medium, DatabaseSize::Large, DatabaseSize::Huge,
|
||||
]
|
||||
.iter().flat_map(|size|
|
||||
[
|
||||
DatabaseType::RocksDb, DatabaseType::ParityDb
|
||||
]
|
||||
.iter().map(move |db_type| (size, db_type)))
|
||||
=> TrieWriteBenchmarkDescription { database_size: *size, database_type: *db_type },
|
||||
ConstructionBenchmarkDescription {
|
||||
key_types: KeyTypes::Sr25519,
|
||||
block_type: BlockType::RandomTransfersKeepAlive,
|
||||
size: SizeType::Medium,
|
||||
database_type: BenchDataBaseType::RocksDb,
|
||||
},
|
||||
ConstructionBenchmarkDescription {
|
||||
key_types: KeyTypes::Sr25519,
|
||||
block_type: BlockType::RandomTransfersKeepAlive,
|
||||
size: SizeType::Large,
|
||||
database_type: BenchDataBaseType::RocksDb,
|
||||
},
|
||||
PoolBenchmarkDescription { database_type: BenchDataBaseType::RocksDb },
|
||||
);
|
||||
|
||||
if opt.list {
|
||||
println!("Available benchmarks:");
|
||||
if let Some(filter) = opt.filter.as_ref() {
|
||||
println!("\t(filtered by \"{}\")", filter);
|
||||
}
|
||||
for benchmark in benchmarks.iter() {
|
||||
if opt.filter.as_ref().map(|f| benchmark.path().has(f)).unwrap_or(true) {
|
||||
println!("{}: {}", benchmark.name(), benchmark.path().full())
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let mut results = Vec::new();
|
||||
for benchmark in benchmarks {
|
||||
if opt.filter.as_ref().map(|f| benchmark.path().has(f)).unwrap_or(true) {
|
||||
log::info!("Starting {}", benchmark.name());
|
||||
let result = run_benchmark(benchmark, opt.mode);
|
||||
log::info!("{}", result);
|
||||
|
||||
results.push(result);
|
||||
}
|
||||
}
|
||||
|
||||
if results.is_empty() {
|
||||
eprintln!("No benchmark was found for query");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if opt.json {
|
||||
let json_result: String =
|
||||
serde_json::to_string(&results).expect("Failed to construct json");
|
||||
println!("{}", json_result);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,72 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use hash_db::{AsHashDB, HashDB, Hasher as _, Prefix};
|
||||
use kvdb::KeyValueDB;
|
||||
use node_primitives::Hash;
|
||||
use sp_trie::DBValue;
|
||||
|
||||
pub type Hasher = sp_core::Blake2Hasher;
|
||||
|
||||
/// Immutable generated trie database with root.
|
||||
pub struct SimpleTrie<'a> {
|
||||
pub db: Arc<dyn KeyValueDB>,
|
||||
pub overlay: &'a mut HashMap<Vec<u8>, Option<Vec<u8>>>,
|
||||
}
|
||||
|
||||
impl<'a> AsHashDB<Hasher, DBValue> for SimpleTrie<'a> {
|
||||
fn as_hash_db(&self) -> &dyn hash_db::HashDB<Hasher, DBValue> {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB<Hasher, DBValue> + 'b) {
|
||||
&mut *self
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> HashDB<Hasher, DBValue> for SimpleTrie<'a> {
|
||||
fn get(&self, key: &Hash, prefix: Prefix) -> Option<DBValue> {
|
||||
let key = sp_trie::prefixed_key::<Hasher>(key, prefix);
|
||||
if let Some(value) = self.overlay.get(&key) {
|
||||
return value.clone();
|
||||
}
|
||||
self.db.get(0, &key).expect("Database backend error")
|
||||
}
|
||||
|
||||
fn contains(&self, hash: &Hash, prefix: Prefix) -> bool {
|
||||
self.get(hash, prefix).is_some()
|
||||
}
|
||||
|
||||
fn insert(&mut self, prefix: Prefix, value: &[u8]) -> Hash {
|
||||
let key = Hasher::hash(value);
|
||||
self.emplace(key, prefix, value.to_vec());
|
||||
key
|
||||
}
|
||||
|
||||
fn emplace(&mut self, key: Hash, prefix: Prefix, value: DBValue) {
|
||||
let key = sp_trie::prefixed_key::<Hasher>(&key, prefix);
|
||||
self.overlay.insert(key, Some(value));
|
||||
}
|
||||
|
||||
fn remove(&mut self, key: &Hash, prefix: Prefix) {
|
||||
let key = sp_trie::prefixed_key::<Hasher>(key, prefix);
|
||||
self.overlay.insert(key, None);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,123 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use kvdb::{DBKeyValue, DBTransaction, KeyValueDB};
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
use std::{io, path::PathBuf, sync::Arc};
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum DatabaseType {
|
||||
RocksDb,
|
||||
ParityDb,
|
||||
}
|
||||
|
||||
pub struct TempDatabase(tempfile::TempDir);
|
||||
|
||||
struct ParityDbWrapper(parity_db::Db);
|
||||
|
||||
impl KeyValueDB for ParityDbWrapper {
|
||||
/// Get a value by key.
|
||||
fn get(&self, col: u32, key: &[u8]) -> io::Result<Option<Vec<u8>>> {
|
||||
Ok(self.0.get(col as u8, &key[key.len() - 32..]).expect("db error"))
|
||||
}
|
||||
|
||||
/// Get a value by partial key. Only works for flushed data.
|
||||
fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> io::Result<Option<Vec<u8>>> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Write a transaction of changes to the buffer.
|
||||
fn write(&self, transaction: DBTransaction) -> io::Result<()> {
|
||||
self.0
|
||||
.commit(transaction.ops.iter().map(|op| match op {
|
||||
kvdb::DBOp::Insert { col, key, value } =>
|
||||
(*col as u8, &key[key.len() - 32..], Some(value.to_vec())),
|
||||
kvdb::DBOp::Delete { col, key } => (*col as u8, &key[key.len() - 32..], None),
|
||||
kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!(),
|
||||
}))
|
||||
.expect("db error");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Iterate over flushed data for a given column.
|
||||
fn iter<'a>(&'a self, _col: u32) -> Box<dyn Iterator<Item = io::Result<DBKeyValue>> + 'a> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Iterate over flushed data for a given column, starting from a given prefix.
|
||||
fn iter_with_prefix<'a>(
|
||||
&'a self,
|
||||
_col: u32,
|
||||
_prefix: &'a [u8],
|
||||
) -> Box<dyn Iterator<Item = io::Result<DBKeyValue>> + 'a> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl TempDatabase {
|
||||
pub fn new() -> Self {
|
||||
let dir = tempfile::tempdir().expect("temp dir creation failed");
|
||||
log::trace!(
|
||||
target: "bench-logistics",
|
||||
"Created temp db at {}",
|
||||
dir.path().to_string_lossy(),
|
||||
);
|
||||
|
||||
TempDatabase(dir)
|
||||
}
|
||||
|
||||
pub fn open(&mut self, db_type: DatabaseType) -> Arc<dyn KeyValueDB> {
|
||||
match db_type {
|
||||
DatabaseType::RocksDb => {
|
||||
let db_cfg = DatabaseConfig::with_columns(1);
|
||||
let db = Database::open(&db_cfg, &self.0.path()).expect("Database backend error");
|
||||
Arc::new(db)
|
||||
},
|
||||
DatabaseType::ParityDb => Arc::new(ParityDbWrapper({
|
||||
let mut options = parity_db::Options::with_columns(self.0.path(), 1);
|
||||
let column_options = &mut options.columns[0];
|
||||
column_options.ref_counted = true;
|
||||
column_options.preimage = true;
|
||||
column_options.uniform = true;
|
||||
parity_db::Db::open_or_create(&options).expect("db open error")
|
||||
})),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for TempDatabase {
|
||||
fn clone(&self) -> Self {
|
||||
let new_dir = tempfile::tempdir().expect("temp dir creation failed");
|
||||
let self_dir = self.0.path();
|
||||
|
||||
log::trace!(
|
||||
target: "bench-logistics",
|
||||
"Cloning db ({}) to {}",
|
||||
self_dir.to_string_lossy(),
|
||||
new_dir.path().to_string_lossy(),
|
||||
);
|
||||
let self_db_files = std::fs::read_dir(self_dir)
|
||||
.expect("failed to list file in seed dir")
|
||||
.map(|f_result| f_result.expect("failed to read file in seed db").path())
|
||||
.collect::<Vec<PathBuf>>();
|
||||
fs_extra::copy_items(&self_db_files, new_dir.path(), &fs_extra::dir::CopyOptions::new())
|
||||
.expect("Copy of seed database is ok");
|
||||
|
||||
TempDatabase(new_dir)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,371 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! Trie benchmark (integrated).
|
||||
|
||||
use hash_db::Prefix;
|
||||
use kvdb::KeyValueDB;
|
||||
use rand::Rng;
|
||||
use sp_state_machine::Backend as _;
|
||||
use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut as _};
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
|
||||
use node_primitives::Hash;
|
||||
|
||||
use crate::{
|
||||
core::{self, Mode, Path},
|
||||
generator::generate_trie,
|
||||
simple_trie::SimpleTrie,
|
||||
tempdb::{DatabaseType, TempDatabase},
|
||||
};
|
||||
|
||||
pub const SAMPLE_SIZE: usize = 100;
|
||||
pub const TEST_WRITE_SIZE: usize = 128;
|
||||
|
||||
pub type KeyValue = (Vec<u8>, Vec<u8>);
|
||||
pub type KeyValues = Vec<KeyValue>;
|
||||
|
||||
#[derive(Clone, Copy, Debug, derive_more::Display)]
|
||||
pub enum DatabaseSize {
|
||||
#[display(fmt = "empty")]
|
||||
Empty,
|
||||
#[display(fmt = "smallest")]
|
||||
Smallest,
|
||||
#[display(fmt = "small")]
|
||||
Small,
|
||||
#[display(fmt = "medium")]
|
||||
Medium,
|
||||
#[display(fmt = "large")]
|
||||
Large,
|
||||
#[display(fmt = "huge")]
|
||||
Huge,
|
||||
}
|
||||
|
||||
static KUSAMA_STATE_DISTRIBUTION: LazyLock<SizePool> =
|
||||
LazyLock::new(|| SizePool::from_histogram(crate::state_sizes::KUSAMA_STATE_DISTRIBUTION));
|
||||
|
||||
impl DatabaseSize {
|
||||
/// Should be multiple of SAMPLE_SIZE!
|
||||
fn keys(&self) -> usize {
|
||||
let val = match *self {
|
||||
Self::Empty => 200, // still need some keys to query
|
||||
Self::Smallest => 1_000,
|
||||
Self::Small => 10_000,
|
||||
Self::Medium => 100_000,
|
||||
Self::Large => 200_000,
|
||||
Self::Huge => 1_000_000,
|
||||
};
|
||||
|
||||
assert_eq!(val % SAMPLE_SIZE, 0);
|
||||
|
||||
val
|
||||
}
|
||||
}
|
||||
|
||||
fn pretty_print(v: usize) -> String {
|
||||
let mut print = String::new();
|
||||
for (idx, val) in v.to_string().chars().rev().enumerate() {
|
||||
if idx != 0 && idx % 3 == 0 {
|
||||
print.insert(0, ',');
|
||||
}
|
||||
print.insert(0, val);
|
||||
}
|
||||
print
|
||||
}
|
||||
|
||||
pub struct TrieReadBenchmarkDescription {
|
||||
pub database_size: DatabaseSize,
|
||||
pub database_type: DatabaseType,
|
||||
}
|
||||
|
||||
pub struct TrieReadBenchmark {
|
||||
database: TempDatabase,
|
||||
root: Hash,
|
||||
warmup_keys: KeyValues,
|
||||
query_keys: KeyValues,
|
||||
database_type: DatabaseType,
|
||||
}
|
||||
|
||||
impl core::BenchmarkDescription for TrieReadBenchmarkDescription {
|
||||
fn path(&self) -> Path {
|
||||
let mut path = Path::new(&["trie", "read"]);
|
||||
path.push(&format!("{}", self.database_size));
|
||||
path
|
||||
}
|
||||
|
||||
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
|
||||
let mut database = TempDatabase::new();
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let warmup_prefix = KUSAMA_STATE_DISTRIBUTION.key(&mut rng);
|
||||
|
||||
let mut key_values = KeyValues::new();
|
||||
let mut warmup_keys = KeyValues::new();
|
||||
let mut query_keys = KeyValues::new();
|
||||
let every_x_key = self.database_size.keys() / SAMPLE_SIZE;
|
||||
for idx in 0..self.database_size.keys() {
|
||||
let kv = (
|
||||
KUSAMA_STATE_DISTRIBUTION.key(&mut rng).to_vec(),
|
||||
KUSAMA_STATE_DISTRIBUTION.value(&mut rng),
|
||||
);
|
||||
if idx % every_x_key == 0 {
|
||||
// warmup keys go to separate tree with high prob
|
||||
let mut actual_warmup_key = warmup_prefix.clone();
|
||||
actual_warmup_key[16..].copy_from_slice(&kv.0[16..]);
|
||||
warmup_keys.push((actual_warmup_key.clone(), kv.1.clone()));
|
||||
key_values.push((actual_warmup_key.clone(), kv.1.clone()));
|
||||
} else if idx % every_x_key == 1 {
|
||||
query_keys.push(kv.clone());
|
||||
}
|
||||
|
||||
key_values.push(kv)
|
||||
}
|
||||
|
||||
assert_eq!(warmup_keys.len(), SAMPLE_SIZE);
|
||||
assert_eq!(query_keys.len(), SAMPLE_SIZE);
|
||||
|
||||
let root = generate_trie(database.open(self.database_type), key_values);
|
||||
|
||||
Box::new(TrieReadBenchmark {
|
||||
database,
|
||||
root,
|
||||
warmup_keys,
|
||||
query_keys,
|
||||
database_type: self.database_type,
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
format!(
|
||||
"Trie read benchmark({:?} database ({} keys), db_type: {:?})",
|
||||
self.database_size,
|
||||
pretty_print(self.database_size.keys()),
|
||||
self.database_type,
|
||||
)
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
struct Storage(Arc<dyn KeyValueDB>);
|
||||
|
||||
impl sp_state_machine::Storage<sp_core::Blake2Hasher> for Storage {
|
||||
fn get(&self, key: &Hash, prefix: Prefix) -> Result<Option<Vec<u8>>, String> {
|
||||
let key = sp_trie::prefixed_key::<sp_core::Blake2Hasher>(key, prefix);
|
||||
self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e))
|
||||
}
|
||||
}
|
||||
|
||||
impl core::Benchmark for TrieReadBenchmark {
|
||||
fn run(&mut self, mode: Mode) -> std::time::Duration {
|
||||
let mut db = self.database.clone();
|
||||
|
||||
let storage: Arc<dyn sp_state_machine::Storage<sp_core::Blake2Hasher>> =
|
||||
Arc::new(Storage(db.open(self.database_type)));
|
||||
|
||||
let trie_backend = sp_state_machine::TrieBackendBuilder::new(storage, self.root).build();
|
||||
for (warmup_key, warmup_value) in self.warmup_keys.iter() {
|
||||
let value = trie_backend
|
||||
.storage(&warmup_key[..])
|
||||
.expect("Failed to get key: db error")
|
||||
.expect("Warmup key should exist");
|
||||
|
||||
// sanity for warmup keys
|
||||
assert_eq!(&value, warmup_value);
|
||||
}
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(3));
|
||||
}
|
||||
|
||||
let started = std::time::Instant::now();
|
||||
for (key, _) in self.query_keys.iter() {
|
||||
let _ = trie_backend.storage(&key[..]);
|
||||
}
|
||||
let elapsed = started.elapsed();
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(1));
|
||||
}
|
||||
|
||||
elapsed / (SAMPLE_SIZE as u32)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TrieWriteBenchmarkDescription {
|
||||
pub database_size: DatabaseSize,
|
||||
pub database_type: DatabaseType,
|
||||
}
|
||||
|
||||
impl core::BenchmarkDescription for TrieWriteBenchmarkDescription {
|
||||
fn path(&self) -> Path {
|
||||
let mut path = Path::new(&["trie", "write"]);
|
||||
path.push(&format!("{}", self.database_size));
|
||||
path
|
||||
}
|
||||
|
||||
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
|
||||
let mut database = TempDatabase::new();
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let warmup_prefix = KUSAMA_STATE_DISTRIBUTION.key(&mut rng);
|
||||
|
||||
let mut key_values = KeyValues::new();
|
||||
let mut warmup_keys = KeyValues::new();
|
||||
let every_x_key = self.database_size.keys() / SAMPLE_SIZE;
|
||||
for idx in 0..self.database_size.keys() {
|
||||
let kv = (
|
||||
KUSAMA_STATE_DISTRIBUTION.key(&mut rng).to_vec(),
|
||||
KUSAMA_STATE_DISTRIBUTION.value(&mut rng),
|
||||
);
|
||||
if idx % every_x_key == 0 {
|
||||
// warmup keys go to separate tree with high prob
|
||||
let mut actual_warmup_key = warmup_prefix.clone();
|
||||
actual_warmup_key[16..].copy_from_slice(&kv.0[16..]);
|
||||
warmup_keys.push((actual_warmup_key.clone(), kv.1.clone()));
|
||||
key_values.push((actual_warmup_key.clone(), kv.1.clone()));
|
||||
}
|
||||
|
||||
key_values.push(kv)
|
||||
}
|
||||
|
||||
assert_eq!(warmup_keys.len(), SAMPLE_SIZE);
|
||||
|
||||
let root = generate_trie(database.open(self.database_type), key_values);
|
||||
|
||||
Box::new(TrieWriteBenchmark {
|
||||
database,
|
||||
root,
|
||||
warmup_keys,
|
||||
database_type: self.database_type,
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
format!(
|
||||
"Trie write benchmark({:?} database ({} keys), db_type = {:?})",
|
||||
self.database_size,
|
||||
pretty_print(self.database_size.keys()),
|
||||
self.database_type,
|
||||
)
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
struct TrieWriteBenchmark {
|
||||
database: TempDatabase,
|
||||
root: Hash,
|
||||
warmup_keys: KeyValues,
|
||||
database_type: DatabaseType,
|
||||
}
|
||||
|
||||
impl core::Benchmark for TrieWriteBenchmark {
|
||||
fn run(&mut self, mode: Mode) -> std::time::Duration {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut db = self.database.clone();
|
||||
let kvdb = db.open(self.database_type);
|
||||
|
||||
let mut new_root = self.root;
|
||||
|
||||
let mut overlay = HashMap::new();
|
||||
let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay };
|
||||
let mut trie_db_mut = TrieDBMutBuilderV1::from_existing(&mut trie, &mut new_root).build();
|
||||
|
||||
for (warmup_key, warmup_value) in self.warmup_keys.iter() {
|
||||
let value = trie_db_mut
|
||||
.get(&warmup_key[..])
|
||||
.expect("Failed to get key: db error")
|
||||
.expect("Warmup key should exist");
|
||||
|
||||
// sanity for warmup keys
|
||||
assert_eq!(&value, warmup_value);
|
||||
}
|
||||
|
||||
let test_key = random_vec(&mut rng, 32);
|
||||
let test_val = random_vec(&mut rng, TEST_WRITE_SIZE);
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(3));
|
||||
}
|
||||
|
||||
let started = std::time::Instant::now();
|
||||
|
||||
trie_db_mut.insert(&test_key, &test_val).expect("Should be inserted ok");
|
||||
trie_db_mut.commit();
|
||||
drop(trie_db_mut);
|
||||
|
||||
let mut transaction = kvdb.transaction();
|
||||
for (key, value) in overlay.into_iter() {
|
||||
match value {
|
||||
Some(value) => transaction.put(0, &key[..], &value[..]),
|
||||
None => transaction.delete(0, &key[..]),
|
||||
}
|
||||
}
|
||||
kvdb.write(transaction).expect("Failed to write transaction");
|
||||
|
||||
let elapsed = started.elapsed();
|
||||
|
||||
// sanity check
|
||||
assert!(new_root != self.root);
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(1));
|
||||
}
|
||||
|
||||
elapsed
|
||||
}
|
||||
}
|
||||
|
||||
fn random_vec<R: Rng>(rng: &mut R, len: usize) -> Vec<u8> {
|
||||
let mut val = vec![0u8; len];
|
||||
rng.fill_bytes(&mut val[..]);
|
||||
val
|
||||
}
|
||||
|
||||
struct SizePool {
|
||||
distribution: std::collections::BTreeMap<u32, u32>,
|
||||
total: u32,
|
||||
}
|
||||
|
||||
impl SizePool {
|
||||
fn from_histogram(h: &[(u32, u32)]) -> SizePool {
|
||||
let mut distribution = std::collections::BTreeMap::default();
|
||||
let mut total = 0;
|
||||
for (size, count) in h {
|
||||
total += count;
|
||||
distribution.insert(total, *size);
|
||||
}
|
||||
SizePool { distribution, total }
|
||||
}
|
||||
|
||||
fn value<R: Rng>(&self, rng: &mut R) -> Vec<u8> {
|
||||
let sr = (rng.next_u64() % self.total as u64) as u32;
|
||||
let mut range = self
|
||||
.distribution
|
||||
.range((std::ops::Bound::Included(sr), std::ops::Bound::Unbounded));
|
||||
let size = *range.next().unwrap().1 as usize;
|
||||
random_vec(rng, size)
|
||||
}
|
||||
|
||||
fn key<R: Rng>(&self, rng: &mut R) -> Vec<u8> {
|
||||
random_vec(rng, 32)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,102 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//! Transaction pool integrated benchmarks.
|
||||
//!
|
||||
//! The goal of this benchmark is to figure out time needed to fill
|
||||
//! the transaction pool for the next block.
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes};
|
||||
|
||||
use sc_transaction_pool::BasicPool;
|
||||
use sc_transaction_pool_api::{TransactionPool, TransactionSource};
|
||||
|
||||
use crate::core::{self, Mode, Path};
|
||||
|
||||
pub struct PoolBenchmarkDescription {
|
||||
pub database_type: DatabaseType,
|
||||
}
|
||||
|
||||
pub struct PoolBenchmark {
|
||||
database: BenchDb,
|
||||
}
|
||||
|
||||
impl core::BenchmarkDescription for PoolBenchmarkDescription {
|
||||
fn path(&self) -> Path {
|
||||
Path::new(&["node", "txpool"])
|
||||
}
|
||||
|
||||
fn setup(self: Box<Self>) -> Box<dyn core::Benchmark> {
|
||||
Box::new(PoolBenchmark {
|
||||
database: BenchDb::with_key_types(self.database_type, 50_000, KeyTypes::Sr25519),
|
||||
})
|
||||
}
|
||||
|
||||
fn name(&self) -> Cow<'static, str> {
|
||||
"Transaction pool benchmark".into()
|
||||
}
|
||||
}
|
||||
|
||||
impl core::Benchmark for PoolBenchmark {
|
||||
fn run(&mut self, mode: Mode) -> std::time::Duration {
|
||||
let context = self.database.create_context();
|
||||
let genesis_hash = context.client.chain_info().genesis_hash;
|
||||
|
||||
let _ = context
|
||||
.client
|
||||
.runtime_version_at(genesis_hash)
|
||||
.expect("Failed to get runtime version")
|
||||
.spec_version;
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(3));
|
||||
}
|
||||
|
||||
let executor = sp_core::testing::TaskExecutor::new();
|
||||
let txpool = BasicPool::new_full(
|
||||
Default::default(),
|
||||
true.into(),
|
||||
None,
|
||||
executor,
|
||||
context.client.clone(),
|
||||
);
|
||||
|
||||
let generated_transactions = self
|
||||
.database
|
||||
.block_content(
|
||||
BlockType::RandomTransfersKeepAlive.to_content(Some(100)),
|
||||
&context.client,
|
||||
)
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
let submissions = generated_transactions
|
||||
.into_iter()
|
||||
.map(|tx| txpool.submit_one(genesis_hash, TransactionSource::External, tx));
|
||||
futures::executor::block_on(futures::future::join_all(submissions));
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
if mode == Mode::Profile {
|
||||
std::thread::park_timeout(std::time::Duration::from_secs(1));
|
||||
}
|
||||
elapsed
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user