mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-04-26 04:07:57 +00:00
80616f6d03
[litep2p](https://github.com/altonen/litep2p) is a libp2p-compatible P2P networking library. It supports all of the features of `rust-libp2p` that are currently being utilized by Polkadot SDK. Compared to `rust-libp2p`, `litep2p` has a quite different architecture which is why the new `litep2p` network backend is only able to use a little of the existing code in `sc-network`. The design has been mainly influenced by how we'd wish to structure our networking-related code in Polkadot SDK: independent higher-levels protocols directly communicating with the network over links that support bidirectional backpressure. A good example would be `NotificationHandle`/`RequestResponseHandle` abstractions which allow, e.g., `SyncingEngine` to directly communicate with peers to announce/request blocks. I've tried running `polkadot --network-backend litep2p` with a few different peer configurations and there is a noticeable reduction in networking CPU usage. For high load (`--out-peers 200`), networking CPU usage goes down from ~110% to ~30% (80 pp) and for normal load (`--out-peers 40`), the usage goes down from ~55% to ~18% (37 pp). These should not be taken as final numbers because: a) there are still some low-hanging optimization fruits, such as enabling [receive window auto-tuning](https://github.com/libp2p/rust-yamux/pull/176), integrating `Peerset` more closely with `litep2p` or improving memory usage of the WebSocket transport b) fixing bugs/instabilities that incorrectly cause `litep2p` to do less work will increase the networking CPU usage c) verification in a more diverse set of tests/conditions is needed Nevertheless, these numbers should give an early estimate for CPU usage of the new networking backend. This PR consists of three separate changes: * introduce a generic `PeerId` (wrapper around `Multihash`) so that we don't have use `NetworkService::PeerId` in every part of the code that uses a `PeerId` * introduce `NetworkBackend` trait, implement it for the libp2p network stack and make Polkadot SDK generic over `NetworkBackend` * implement `NetworkBackend` for litep2p The new library should be considered experimental which is why `rust-libp2p` will remain as the default option for the time being. This PR currently depends on the master branch of `litep2p` but I'll cut a new release for the library once all review comments have been addresses. --------- Signed-off-by: Alexandru Vasile <alexandru.vasile@parity.io> Co-authored-by: Dmitry Markin <dmitry@markin.tech> Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Alexandru Vasile <alexandru.vasile@parity.io>
248 lines
8.2 KiB
Rust
248 lines
8.2 KiB
Rust
// This file is part of Substrate.
|
|
|
|
// Copyright (C) Parity Technologies (UK) Ltd.
|
|
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU General Public License for more details.
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput};
|
|
|
|
use kitchensink_runtime::{constants::currency::*, BalancesCall};
|
|
use node_cli::service::{create_extrinsic, FullClient};
|
|
use sc_block_builder::{BlockBuilderBuilder, BuiltBlock};
|
|
use sc_consensus::{
|
|
block_import::{BlockImportParams, ForkChoiceStrategy},
|
|
BlockImport, StateAction,
|
|
};
|
|
use sc_service::{
|
|
config::{
|
|
BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig,
|
|
PruningMode, RpcBatchRequestConfig, WasmExecutionMethod, WasmtimeInstantiationStrategy,
|
|
},
|
|
BasePath, Configuration, Role,
|
|
};
|
|
use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed};
|
|
use sp_consensus::BlockOrigin;
|
|
use sp_keyring::Sr25519Keyring;
|
|
use sp_runtime::{
|
|
transaction_validity::{InvalidTransaction, TransactionValidityError},
|
|
AccountId32, MultiAddress, OpaqueExtrinsic,
|
|
};
|
|
use staging_node_cli as node_cli;
|
|
use tokio::runtime::Handle;
|
|
|
|
fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
|
|
let base_path = BasePath::new_temp_dir()
|
|
.expect("getting the base path of a temporary path doesn't fail; qed");
|
|
let root = base_path.path().to_path_buf();
|
|
|
|
let network_config = NetworkConfiguration::new(
|
|
Sr25519Keyring::Alice.to_seed(),
|
|
"network/test/0.1",
|
|
Default::default(),
|
|
None,
|
|
);
|
|
|
|
let spec = Box::new(node_cli::chain_spec::development_config());
|
|
|
|
let config = Configuration {
|
|
impl_name: "BenchmarkImpl".into(),
|
|
impl_version: "1.0".into(),
|
|
// We don't use the authority role since that would start producing blocks
|
|
// in the background which would mess with our benchmark.
|
|
role: Role::Full,
|
|
tokio_handle,
|
|
transaction_pool: Default::default(),
|
|
network: network_config,
|
|
keystore: KeystoreConfig::InMemory,
|
|
database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
|
|
trie_cache_maximum_size: Some(64 * 1024 * 1024),
|
|
state_pruning: Some(PruningMode::ArchiveAll),
|
|
blocks_pruning: BlocksPruning::KeepAll,
|
|
chain_spec: spec,
|
|
wasm_method: WasmExecutionMethod::Compiled {
|
|
instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite,
|
|
},
|
|
rpc_addr: None,
|
|
rpc_max_connections: Default::default(),
|
|
rpc_cors: None,
|
|
rpc_methods: Default::default(),
|
|
rpc_max_request_size: Default::default(),
|
|
rpc_max_response_size: Default::default(),
|
|
rpc_id_provider: Default::default(),
|
|
rpc_max_subs_per_conn: Default::default(),
|
|
rpc_port: 9944,
|
|
rpc_message_buffer_capacity: Default::default(),
|
|
rpc_batch_config: RpcBatchRequestConfig::Unlimited,
|
|
rpc_rate_limit: None,
|
|
prometheus_config: None,
|
|
telemetry_endpoints: None,
|
|
default_heap_pages: None,
|
|
offchain_worker: OffchainWorkerConfig { enabled: true, indexing_enabled: false },
|
|
force_authoring: false,
|
|
disable_grandpa: false,
|
|
dev_key_seed: Some(Sr25519Keyring::Alice.to_seed()),
|
|
tracing_targets: None,
|
|
tracing_receiver: Default::default(),
|
|
max_runtime_instances: 8,
|
|
runtime_cache_size: 2,
|
|
announce_block: true,
|
|
data_path: base_path.path().into(),
|
|
base_path,
|
|
informant_output_format: Default::default(),
|
|
wasm_runtime_overrides: None,
|
|
};
|
|
|
|
node_cli::service::new_full_base::<sc_network::NetworkWorker<_, _>>(
|
|
config,
|
|
None,
|
|
false,
|
|
|_, _| (),
|
|
)
|
|
.expect("creating a full node doesn't fail")
|
|
}
|
|
|
|
fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic {
|
|
kitchensink_runtime::UncheckedExtrinsic {
|
|
signature: None,
|
|
function: kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }),
|
|
}
|
|
.into()
|
|
}
|
|
|
|
fn import_block(mut client: &FullClient, built: BuiltBlock<node_primitives::Block>) {
|
|
let mut params = BlockImportParams::new(BlockOrigin::File, built.block.header);
|
|
params.state_action =
|
|
StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(built.storage_changes));
|
|
params.fork_choice = Some(ForkChoiceStrategy::LongestChain);
|
|
futures::executor::block_on(client.import_block(params))
|
|
.expect("importing a block doesn't fail");
|
|
}
|
|
|
|
fn prepare_benchmark(client: &FullClient) -> (usize, Vec<OpaqueExtrinsic>) {
|
|
const MINIMUM_PERIOD_FOR_BLOCKS: u64 = 1500;
|
|
|
|
let mut max_transfer_count = 0;
|
|
let mut extrinsics = Vec::new();
|
|
let mut block_builder = BlockBuilderBuilder::new(client)
|
|
.on_parent_block(client.chain_info().best_hash)
|
|
.with_parent_block_number(client.chain_info().best_number)
|
|
.build()
|
|
.unwrap();
|
|
|
|
// Every block needs one timestamp extrinsic.
|
|
let extrinsic_set_time = extrinsic_set_time(1 + MINIMUM_PERIOD_FOR_BLOCKS);
|
|
block_builder.push(extrinsic_set_time.clone()).unwrap();
|
|
extrinsics.push(extrinsic_set_time);
|
|
|
|
// Creating those is surprisingly costly, so let's only do it once and later just `clone` them.
|
|
let src = Sr25519Keyring::Alice.pair();
|
|
let dst: MultiAddress<AccountId32, u32> = Sr25519Keyring::Bob.to_account_id().into();
|
|
|
|
// Add as many transfer extrinsics as possible into a single block.
|
|
for nonce in 0.. {
|
|
let extrinsic: OpaqueExtrinsic = create_extrinsic(
|
|
client,
|
|
src.clone(),
|
|
BalancesCall::transfer_allow_death { dest: dst.clone(), value: 1 * DOLLARS },
|
|
Some(nonce),
|
|
)
|
|
.into();
|
|
|
|
match block_builder.push(extrinsic.clone()) {
|
|
Ok(_) => {},
|
|
Err(ApplyExtrinsicFailed(Validity(TransactionValidityError::Invalid(
|
|
InvalidTransaction::ExhaustsResources,
|
|
)))) => break,
|
|
Err(error) => panic!("{}", error),
|
|
}
|
|
|
|
extrinsics.push(extrinsic);
|
|
max_transfer_count += 1;
|
|
}
|
|
|
|
(max_transfer_count, extrinsics)
|
|
}
|
|
|
|
fn block_production(c: &mut Criterion) {
|
|
sp_tracing::try_init_simple();
|
|
|
|
let runtime = tokio::runtime::Runtime::new().expect("creating tokio runtime doesn't fail; qed");
|
|
let tokio_handle = runtime.handle().clone();
|
|
|
|
let node = new_node(tokio_handle.clone());
|
|
let client = &*node.client;
|
|
|
|
// Building the very first block is around ~30x slower than any subsequent one,
|
|
// so let's make sure it's built and imported before we benchmark anything.
|
|
let mut block_builder = BlockBuilderBuilder::new(client)
|
|
.on_parent_block(client.chain_info().best_hash)
|
|
.with_parent_block_number(client.chain_info().best_number)
|
|
.build()
|
|
.unwrap();
|
|
block_builder.push(extrinsic_set_time(1)).unwrap();
|
|
import_block(client, block_builder.build().unwrap());
|
|
|
|
let (max_transfer_count, extrinsics) = prepare_benchmark(&client);
|
|
log::info!("Maximum transfer count: {}", max_transfer_count);
|
|
|
|
let mut group = c.benchmark_group("Block production");
|
|
|
|
group.sample_size(10);
|
|
group.throughput(Throughput::Elements(max_transfer_count as u64));
|
|
|
|
let chain = client.chain_info();
|
|
let best_hash = chain.best_hash;
|
|
let best_number = chain.best_number;
|
|
|
|
group.bench_function(format!("{} transfers (no proof)", max_transfer_count), |b| {
|
|
b.iter_batched(
|
|
|| extrinsics.clone(),
|
|
|extrinsics| {
|
|
let mut block_builder = BlockBuilderBuilder::new(client)
|
|
.on_parent_block(best_hash)
|
|
.with_parent_block_number(best_number)
|
|
.build()
|
|
.unwrap();
|
|
for extrinsic in extrinsics {
|
|
block_builder.push(extrinsic).unwrap();
|
|
}
|
|
block_builder.build().unwrap()
|
|
},
|
|
BatchSize::SmallInput,
|
|
)
|
|
});
|
|
|
|
group.bench_function(format!("{} transfers (with proof)", max_transfer_count), |b| {
|
|
b.iter_batched(
|
|
|| extrinsics.clone(),
|
|
|extrinsics| {
|
|
let mut block_builder = BlockBuilderBuilder::new(client)
|
|
.on_parent_block(best_hash)
|
|
.with_parent_block_number(best_number)
|
|
.build()
|
|
.unwrap();
|
|
for extrinsic in extrinsics {
|
|
block_builder.push(extrinsic).unwrap();
|
|
}
|
|
block_builder.build().unwrap()
|
|
},
|
|
BatchSize::SmallInput,
|
|
)
|
|
});
|
|
}
|
|
|
|
criterion_group!(benches, block_production);
|
|
criterion_main!(benches);
|