feat: initialize Kurdistan SDK - independent fork of Polkadot SDK
This commit is contained in:
@@ -0,0 +1,31 @@
|
||||
FROM docker.io/library/ubuntu:20.04
|
||||
|
||||
# metadata
|
||||
ARG VCS_REF
|
||||
ARG BUILD_DATE
|
||||
ARG IMAGE_NAME
|
||||
|
||||
LABEL io.parity.image.authors="devops-team@parity.io" \
|
||||
io.parity.image.vendor="Parity Technologies" \
|
||||
io.parity.image.title="${IMAGE_NAME}" \
|
||||
io.parity.image.description="Subkey: key generating utility for Substrate." \
|
||||
io.parity.image.source="https://github.com/paritytech/substrate/blob/${VCS_REF}/scripts/ci/docker/subkey.Dockerfile" \
|
||||
io.parity.image.revision="${VCS_REF}" \
|
||||
io.parity.image.created="${BUILD_DATE}" \
|
||||
io.parity.image.documentation="https://github.com/paritytech/substrate/tree/${VCS_REF}/subkey"
|
||||
|
||||
# show backtraces
|
||||
ENV RUST_BACKTRACE 1
|
||||
|
||||
# add user
|
||||
RUN useradd -m -u 1000 -U -s /bin/sh -d /subkey subkey
|
||||
|
||||
# add subkey binary to docker image
|
||||
COPY ./subkey /usr/local/bin
|
||||
|
||||
USER subkey
|
||||
|
||||
# check if executable works in this container
|
||||
RUN /usr/local/bin/subkey --version
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/subkey"]
|
||||
@@ -0,0 +1,11 @@
|
||||
# Subkey
|
||||
|
||||
The `subkey` program is a key management utility for Substrate-based blockchains. You can use the `subkey` program to
|
||||
perform the following tasks
|
||||
|
||||
* Generate and inspect cryptographically-secure public and private key pairs.
|
||||
* Restore keys from secret phrases and raw seeds.
|
||||
* Sign and verify signatures on messages.
|
||||
* Sign and verify signatures for encoded transactions.
|
||||
* Derive hierarchical deterministic child key pairs.
|
||||
* [Documentation](https://docs.pezkuwichain.io/reference/command-line-tools/subkey/)
|
||||
@@ -0,0 +1,45 @@
|
||||
FROM docker.io/library/ubuntu:20.04
|
||||
|
||||
# metadata
|
||||
ARG VCS_REF
|
||||
ARG BUILD_DATE
|
||||
ARG IMAGE_NAME
|
||||
|
||||
LABEL io.parity.image.authors="devops-team@parity.io" \
|
||||
io.parity.image.vendor="Parity Technologies" \
|
||||
io.parity.image.title="${IMAGE_NAME}" \
|
||||
io.parity.image.description="Substrate: The platform for blockchain innovators." \
|
||||
io.parity.image.source="https://github.com/paritytech/substrate/blob/${VCS_REF}/scripts/ci/docker/Dockerfile" \
|
||||
io.parity.image.revision="${VCS_REF}" \
|
||||
io.parity.image.created="${BUILD_DATE}" \
|
||||
io.parity.image.documentation="https://wiki.parity.io/Parity-Substrate"
|
||||
|
||||
# show backtraces
|
||||
ENV RUST_BACKTRACE 1
|
||||
|
||||
# install tools and dependencies
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
libssl1.1 \
|
||||
ca-certificates \
|
||||
curl && \
|
||||
# apt cleanup
|
||||
apt-get autoremove -y && \
|
||||
apt-get clean && \
|
||||
find /var/lib/apt/lists/ -type f -not -name lock -delete; \
|
||||
# add user
|
||||
useradd -m -u 1000 -U -s /bin/sh -d /substrate substrate
|
||||
|
||||
# add substrate binary to docker image
|
||||
COPY ./substrate /usr/local/bin
|
||||
|
||||
USER substrate
|
||||
|
||||
# check if executable works in this container
|
||||
RUN /usr/local/bin/substrate --version
|
||||
|
||||
EXPOSE 30333 9933 9944
|
||||
VOLUME ["/substrate"]
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/substrate"]
|
||||
@@ -0,0 +1 @@
|
||||
# Substrate Docker Image
|
||||
@@ -0,0 +1,239 @@
|
||||
rule_files:
|
||||
- /dev/stdin
|
||||
|
||||
evaluation_interval: 1m
|
||||
|
||||
tests:
|
||||
- interval: 1m
|
||||
input_series:
|
||||
- series: 'substrate_sub_libp2p_peers_count{
|
||||
job="substrate",
|
||||
pod="substrate-abcdef01234-abcdef",
|
||||
instance="substrate-abcdef01234-abcdef",
|
||||
}'
|
||||
values: '3 2+0x4 1+0x9' # 3 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
|
||||
|
||||
- series: 'substrate_sub_txpool_validations_scheduled{
|
||||
job="substrate",
|
||||
pod="substrate-abcdef01234-abcdef",
|
||||
instance="substrate-abcdef01234-abcdef",
|
||||
}'
|
||||
values: '11+1x10 22+2x30 10043x5'
|
||||
|
||||
- series: 'substrate_sub_txpool_validations_finished{
|
||||
job="substrate",
|
||||
pod="substrate-abcdef01234-abcdef",
|
||||
instance="substrate-abcdef01234-abcdef",
|
||||
}'
|
||||
values: '0+1x42 42x5'
|
||||
|
||||
- series: 'substrate_block_height{
|
||||
status="best", job="substrate",
|
||||
pod="substrate-abcdef01234-abcdef",
|
||||
instance="substrate-abcdef01234-abcdef",
|
||||
}'
|
||||
values: '1+1x3 4+0x13' # 1 2 3 4 4 4 4 4 4 4 4 4 ...
|
||||
|
||||
- series: 'substrate_block_height{
|
||||
status="finalized",
|
||||
job="substrate",
|
||||
pod="substrate-abcdef01234-abcdef",
|
||||
instance="substrate-abcdef01234-abcdef",
|
||||
}'
|
||||
values: '1+1x3 4+0x13' # 1 2 3 4 4 4 4 4 4 4 4 4 ...
|
||||
|
||||
alert_rule_test:
|
||||
|
||||
######################################################################
|
||||
# Block production
|
||||
######################################################################
|
||||
|
||||
- eval_time: 6m
|
||||
alertname: BlockProductionSlow
|
||||
exp_alerts:
|
||||
- eval_time: 7m
|
||||
alertname: BlockProductionSlow
|
||||
exp_alerts:
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
status: best
|
||||
exp_annotations:
|
||||
message: "Best block on instance
|
||||
substrate-abcdef01234-abcdef increases by less than 1 per
|
||||
minute for more than 3 minutes."
|
||||
|
||||
- eval_time: 14m
|
||||
alertname: BlockProductionSlow
|
||||
exp_alerts:
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
status: best
|
||||
exp_annotations:
|
||||
message: "Best block on instance
|
||||
substrate-abcdef01234-abcdef increases by less than 1 per
|
||||
minute for more than 3 minutes."
|
||||
- exp_labels:
|
||||
severity: critical
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
status: best
|
||||
exp_annotations:
|
||||
message: "Best block on instance
|
||||
substrate-abcdef01234-abcdef increases by less than 1 per
|
||||
minute for more than 10 minutes."
|
||||
|
||||
######################################################################
|
||||
# Block finalization
|
||||
######################################################################
|
||||
|
||||
- eval_time: 6m
|
||||
alertname: BlockFinalizationSlow
|
||||
exp_alerts:
|
||||
- eval_time: 7m
|
||||
alertname: BlockFinalizationSlow
|
||||
exp_alerts:
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
status: finalized
|
||||
exp_annotations:
|
||||
message: "Finalized block on instance
|
||||
substrate-abcdef01234-abcdef increases by less than 1 per
|
||||
minute for more than 3 minutes."
|
||||
|
||||
- eval_time: 14m
|
||||
alertname: BlockFinalizationSlow
|
||||
exp_alerts:
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
status: finalized
|
||||
exp_annotations:
|
||||
message: "Finalized block on instance
|
||||
substrate-abcdef01234-abcdef increases by less than 1 per
|
||||
minute for more than 3 minutes."
|
||||
- exp_labels:
|
||||
severity: critical
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
status: finalized
|
||||
exp_annotations:
|
||||
message: "Finalized block on instance
|
||||
substrate-abcdef01234-abcdef increases by less than 1 per
|
||||
minute for more than 10 minutes."
|
||||
|
||||
######################################################################
|
||||
# Transaction queue
|
||||
######################################################################
|
||||
|
||||
- eval_time: 11m
|
||||
alertname: TransactionQueueSizeIncreasing
|
||||
# Number of validations scheduled and finished both grow at a rate
|
||||
# of 1 in the first 10 minutes, thereby the queue is not increasing
|
||||
# in size, thus don't expect an alert.
|
||||
exp_alerts:
|
||||
- eval_time: 22m
|
||||
alertname: TransactionQueueSizeIncreasing
|
||||
# Number of validations scheduled is growing twice as fast as the
|
||||
# number of validations finished after minute 10. Thus expect
|
||||
# warning alert after 20 minutes.
|
||||
exp_alerts:
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
exp_annotations:
|
||||
message: "The transaction pool size on node
|
||||
substrate-abcdef01234-abcdef has been monotonically
|
||||
increasing for more than 10 minutes."
|
||||
- eval_time: 43m
|
||||
alertname: TransactionQueueSizeIncreasing
|
||||
# Number of validations scheduled is growing twice as fast as the
|
||||
# number of validations finished after minute 10. Thus expect
|
||||
# both warning and critical alert after 40 minutes.
|
||||
exp_alerts:
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
exp_annotations:
|
||||
message: "The transaction pool size on node
|
||||
substrate-abcdef01234-abcdef has been monotonically
|
||||
increasing for more than 10 minutes."
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
exp_annotations:
|
||||
message: "The transaction pool size on node
|
||||
substrate-abcdef01234-abcdef has been monotonically
|
||||
increasing for more than 30 minutes."
|
||||
- eval_time: 49m
|
||||
alertname: TransactionQueueSizeHigh
|
||||
# After minute 43 the number of validations scheduled jumps up
|
||||
# drastically while the number of validations finished stays the
|
||||
# same. Thus expect an alert.
|
||||
exp_alerts:
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
exp_annotations:
|
||||
message: "The transaction pool size on node
|
||||
substrate-abcdef01234-abcdef has been above 10_000 for more
|
||||
than 5 minutes."
|
||||
|
||||
######################################################################
|
||||
# Networking
|
||||
######################################################################
|
||||
|
||||
- eval_time: 3m # Values: 3 2 2
|
||||
alertname: NumberOfPeersLow
|
||||
exp_alerts:
|
||||
- eval_time: 4m # Values: 2 2 2
|
||||
alertname: NumberOfPeersLow
|
||||
exp_alerts:
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
exp_annotations:
|
||||
message: "The node substrate-abcdef01234-abcdef has less
|
||||
than 3 peers for more than 3 minutes"
|
||||
|
||||
- eval_time: 16m # Values: 3 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1
|
||||
alertname: NumberOfPeersLow
|
||||
exp_alerts:
|
||||
- exp_labels:
|
||||
severity: warning
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
exp_annotations:
|
||||
message: "The node substrate-abcdef01234-abcdef has less
|
||||
than 3 peers for more than 3 minutes"
|
||||
- exp_labels:
|
||||
severity: critical
|
||||
pod: substrate-abcdef01234-abcdef
|
||||
instance: substrate-abcdef01234-abcdef
|
||||
job: substrate
|
||||
exp_annotations:
|
||||
message: "The node substrate-abcdef01234-abcdef has less
|
||||
than 3 peers for more than 15 minutes"
|
||||
@@ -0,0 +1,167 @@
|
||||
groups:
|
||||
- name: substrate.rules
|
||||
rules:
|
||||
|
||||
##############################################################################
|
||||
# Block production
|
||||
##############################################################################
|
||||
|
||||
- alert: BlockProductionSlow
|
||||
annotations:
|
||||
message: 'Best block on instance {{ $labels.instance }} increases by
|
||||
less than 1 per minute for more than 3 minutes.'
|
||||
expr: increase(substrate_block_height{status="best"}[1m]) < 1
|
||||
for: 3m
|
||||
labels:
|
||||
severity: warning
|
||||
- alert: BlockProductionSlow
|
||||
annotations:
|
||||
message: 'Best block on instance {{ $labels.instance }} increases by
|
||||
less than 1 per minute for more than 10 minutes.'
|
||||
expr: increase(substrate_block_height{status="best"}[1m]) < 1
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
##############################################################################
|
||||
# Block finalization
|
||||
##############################################################################
|
||||
|
||||
- alert: BlockFinalizationSlow
|
||||
expr: increase(substrate_block_height{status="finalized"}[1m]) < 1
|
||||
for: 3m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'Finalized block on instance {{ $labels.instance }} increases by
|
||||
less than 1 per minute for more than 3 minutes.'
|
||||
- alert: BlockFinalizationSlow
|
||||
expr: increase(substrate_block_height{status="finalized"}[1m]) < 1
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
message: 'Finalized block on instance {{ $labels.instance }} increases by
|
||||
less than 1 per minute for more than 10 minutes.'
|
||||
- alert: BlockFinalizationLaggingBehind
|
||||
# Under the assumption of an average block production of 6 seconds,
|
||||
# "best" and "finalized" being more than 10 blocks apart would imply
|
||||
# more than a 1 minute delay between block production and finalization.
|
||||
expr: '(substrate_block_height{status="best"} - ignoring(status)
|
||||
substrate_block_height{status="finalized"}) > 10'
|
||||
for: 8m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
message: "Block finalization on instance {{ $labels.instance }} is behind
|
||||
block production by {{ $value }} for more than 8 minutes."
|
||||
|
||||
##############################################################################
|
||||
# Transaction queue
|
||||
##############################################################################
|
||||
|
||||
- alert: TransactionQueueSizeIncreasing
|
||||
expr: 'increase(substrate_sub_txpool_validations_scheduled[5m]) -
|
||||
increase(substrate_sub_txpool_validations_finished[5m]) > 0'
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'The transaction pool size on node {{ $labels.instance }} has
|
||||
been monotonically increasing for more than 10 minutes.'
|
||||
- alert: TransactionQueueSizeIncreasing
|
||||
expr: 'increase(substrate_sub_txpool_validations_scheduled[5m]) -
|
||||
increase(substrate_sub_txpool_validations_finished[5m]) > 0'
|
||||
for: 30m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'The transaction pool size on node {{ $labels.instance }} has
|
||||
been monotonically increasing for more than 30 minutes.'
|
||||
- alert: TransactionQueueSizeHigh
|
||||
expr: 'substrate_sub_txpool_validations_scheduled -
|
||||
substrate_sub_txpool_validations_finished > 10000'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'The transaction pool size on node {{ $labels.instance }} has
|
||||
been above 10_000 for more than 5 minutes.'
|
||||
|
||||
##############################################################################
|
||||
# Networking
|
||||
##############################################################################
|
||||
|
||||
- alert: NumberOfPeersLow
|
||||
expr: substrate_sub_libp2p_peers_count < 3
|
||||
for: 3m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'The node {{ $labels.instance }} has less than 3 peers for more
|
||||
than 3 minutes'
|
||||
- alert: NumberOfPeersLow
|
||||
expr: substrate_sub_libp2p_peers_count < 3
|
||||
for: 15m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
message: 'The node {{ $labels.instance }} has less than 3 peers for more
|
||||
than 15 minutes'
|
||||
- alert: NoIncomingConnection
|
||||
expr: increase(substrate_sub_libp2p_incoming_connections_total[20m]) == 0
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'The node {{ $labels.instance }} has not received any new incoming
|
||||
TCP connection in the past 20 minutes. Is it connected to the Internet?'
|
||||
|
||||
##############################################################################
|
||||
# System
|
||||
##############################################################################
|
||||
|
||||
- alert: NumberOfFileDescriptorsHigh
|
||||
expr: 'node_filefd_allocated{chain!=""} > 10000'
|
||||
for: 3m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'The node {{ $labels.instance }} has more than 10_000 file
|
||||
descriptors allocated for more than 3 minutes'
|
||||
|
||||
##############################################################################
|
||||
# Others
|
||||
##############################################################################
|
||||
|
||||
- alert: AuthorityDiscoveryDiscoveryFailureHigh
|
||||
expr: 'substrate_authority_discovery_handle_value_found_event_failure /
|
||||
ignoring(name)
|
||||
substrate_authority_discovery_dht_event_received{name="value_found"} > 0.5'
|
||||
for: 2h
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'Authority discovery on node {{ $labels.instance }} fails to
|
||||
process more than 50 % of the values found on the DHT for more than 2
|
||||
hours.'
|
||||
|
||||
- alert: UnboundedChannelPersistentlyLarge
|
||||
expr: 'substrate_unbounded_channel_size >= 200'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'Channel {{ $labels.entity }} on node {{ $labels.instance }} contains
|
||||
more than 200 items for more than 5 minutes. Node might be frozen.'
|
||||
|
||||
- alert: UnboundedChannelVeryLarge
|
||||
expr: '(
|
||||
(substrate_unbounded_channel_len{action = "send"} -
|
||||
ignoring(action) substrate_unbounded_channel_len{action = "received"})
|
||||
or on(instance) substrate_unbounded_channel_len{action = "send"}
|
||||
) > 15000'
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
message: 'Channel {{ $labels.entity }} on node {{ $labels.instance }} contains more than
|
||||
15000 items.'
|
||||
@@ -0,0 +1,7 @@
|
||||
# Substrate Dashboard
|
||||
|
||||
Shared templated Grafana dashboards.
|
||||
|
||||
To import the dashboards follow the [Grafana
|
||||
documentation](https://grafana.com/docs/grafana/latest/reference/export_import/).
|
||||
You can see an example setup [here](./substrate-networking.json).
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
+18
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
export TERM=xterm
|
||||
SUBSTRATE_FOLDER="/substrate"
|
||||
GIT_ROOT=`git rev-parse --show-toplevel`
|
||||
PROJECT_ROOT=${GIT_ROOT}${SUBSTRATE_FOLDER}
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "node-template-release.sh path_to_target_archive"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PATH_TO_ARCHIVE=$1
|
||||
|
||||
cd $PROJECT_ROOT/scripts/ci/node-template-release
|
||||
cargo run $PROJECT_ROOT/bin/node-template $PROJECT_ROOT/$PATH_TO_ARCHIVE
|
||||
@@ -0,0 +1,24 @@
|
||||
[package]
|
||||
name = "node-template-release"
|
||||
version = "3.0.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
|
||||
homepage.workspace = true
|
||||
publish = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[dependencies]
|
||||
clap = { features = ["derive"], workspace = true }
|
||||
flate2 = { workspace = true }
|
||||
fs_extra = { workspace = true }
|
||||
glob = { workspace = true }
|
||||
itertools = { workspace = true }
|
||||
tar = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
toml_edit = { workspace = true }
|
||||
@@ -0,0 +1,472 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
ffi::OsString,
|
||||
fs::{self, File, OpenOptions},
|
||||
path::{Path, PathBuf},
|
||||
process::Command,
|
||||
};
|
||||
|
||||
use clap::Parser;
|
||||
use flate2::{write::GzEncoder, Compression};
|
||||
use fs_extra::dir::{self, CopyOptions};
|
||||
use glob;
|
||||
use itertools::Itertools;
|
||||
use tar;
|
||||
use tempfile;
|
||||
use toml_edit::{self, value, Array, Item, Table};
|
||||
|
||||
const SUBSTRATE_GIT_URL: &str = "https://github.com/pezkuwichain/pezkuwi-sdk.git";
|
||||
|
||||
type CargoToml = toml_edit::Document;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
struct Dependency {
|
||||
name: String,
|
||||
version: Option<String>,
|
||||
default_features: Option<bool>,
|
||||
}
|
||||
|
||||
type Dependencies = HashMap<String, HashMap<String, Dependency>>;
|
||||
|
||||
#[derive(Parser)]
|
||||
struct Options {
|
||||
/// The path to the `node-template` source.
|
||||
#[arg()]
|
||||
node_template: PathBuf,
|
||||
/// The path where to output the generated `tar.gz` file.
|
||||
#[arg()]
|
||||
output: PathBuf,
|
||||
}
|
||||
|
||||
/// Copy the `node-template` to the given path.
|
||||
fn copy_node_template(node_template: &Path, node_template_folder: &OsString, dest_path: &Path) {
|
||||
let options = CopyOptions::new();
|
||||
dir::copy(node_template, dest_path, &options).expect("Copies node-template to tmp dir");
|
||||
|
||||
let dest_path = dest_path.join(node_template_folder);
|
||||
|
||||
dir::get_dir_content(dest_path.join("env-setup"))
|
||||
.expect("`env-setup` directory should exist")
|
||||
.files
|
||||
.iter()
|
||||
.for_each(|f| {
|
||||
fs::copy(
|
||||
f,
|
||||
dest_path.join(PathBuf::from(f).file_name().expect("File has a file name.")),
|
||||
)
|
||||
.expect("Copying from `env-setup` directory works");
|
||||
});
|
||||
dir::remove(dest_path.join("env-setup")).expect("Deleting `env-setup works`");
|
||||
}
|
||||
|
||||
/// Find all `Cargo.toml` files in the given path.
|
||||
fn find_cargo_tomls(path: &PathBuf) -> Vec<PathBuf> {
|
||||
let path = format!("{}/**/Cargo.toml", path.display());
|
||||
|
||||
let glob = glob::glob(&path).expect("Generates globbing pattern");
|
||||
|
||||
let mut result = Vec::new();
|
||||
glob.into_iter().for_each(|file| match file {
|
||||
Ok(file) => result.push(file),
|
||||
Err(e) => println!("{:?}", e),
|
||||
});
|
||||
|
||||
if result.is_empty() {
|
||||
panic!("Did not found any `Cargo.toml` files.");
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Parse the given `Cargo.toml`.
|
||||
fn parse_cargo_toml(file: &Path) -> CargoToml {
|
||||
fs::read_to_string(file)
|
||||
.unwrap_or_else(|e| panic!("Failed to read `{}`: {}", file.display(), e))
|
||||
.parse()
|
||||
.unwrap_or_else(|e| panic!("Failed to parse `{}`: {}", file.display(), e))
|
||||
}
|
||||
|
||||
/// Write the given `Cargo.toml` to the given path.
|
||||
fn write_cargo_toml(path: &Path, cargo_toml: CargoToml) {
|
||||
fs::write(path, cargo_toml.to_string())
|
||||
.unwrap_or_else(|e| panic!("Failed to write `{}`: {}", path.display(), e));
|
||||
}
|
||||
|
||||
/// Gets the latest commit id of the repository given by `path`.
|
||||
fn get_git_commit_id(path: &Path) -> String {
|
||||
let mut dir = path;
|
||||
while !dir.join(".git").exists() {
|
||||
dir = dir
|
||||
.parent()
|
||||
.expect(&format!("Node template ({}) should be in a git repository.", path.display()));
|
||||
}
|
||||
|
||||
let git = dir.join(".git");
|
||||
let head = git.join("HEAD");
|
||||
let head_contents = fs::read_to_string(head).expect("Repository should have a HEAD");
|
||||
let branch = head_contents.strip_prefix("ref: ").expect(".git/HEAD to start 'ref: '").trim();
|
||||
let mut commit = fs::read_to_string(git.join(branch)).expect("Head references a commit");
|
||||
commit.truncate(commit.trim_end().len());
|
||||
commit
|
||||
}
|
||||
|
||||
/// Rewrites git dependencies:
|
||||
/// - inserts `workspace = true`;
|
||||
/// - removes `path`;
|
||||
/// - removes `version`;
|
||||
/// - removes `default-features`
|
||||
/// - and returns the dependencies that were rewritten.
|
||||
fn update_git_dependencies<F: Copy + Fn(&str) -> bool>(
|
||||
cargo_toml: &mut CargoToml,
|
||||
path_filter: F,
|
||||
) -> Dependencies {
|
||||
let process_dep = |dep: (toml_edit::KeyMut, &mut Item)| -> Option<Dependency> {
|
||||
let (key, value) = dep;
|
||||
value
|
||||
.as_table_like_mut()
|
||||
.filter(|dep| {
|
||||
dep.get("path").and_then(|path| path.as_str()).map(path_filter).unwrap_or(false)
|
||||
})
|
||||
.map(|dep| {
|
||||
dep.insert("workspace", toml_edit::value(true));
|
||||
dep.remove("path");
|
||||
|
||||
Dependency {
|
||||
name: key.get().to_string(),
|
||||
version: dep
|
||||
.remove("version")
|
||||
.and_then(|version| version.as_str().map(|s| s.to_string())),
|
||||
default_features: dep.remove("default-features").and_then(|b| b.as_bool()),
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
["dependencies", "build-dependencies", "dev-dependencies"]
|
||||
.into_iter()
|
||||
.map(|table| -> (String, HashMap<String, Dependency>) {
|
||||
(
|
||||
table.to_string(),
|
||||
cargo_toml[table]
|
||||
.as_table_mut()
|
||||
.into_iter()
|
||||
.flat_map(|deps| deps.iter_mut().filter_map(process_dep))
|
||||
.map(|dep| (dep.name.clone(), dep))
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Processes all `Cargo.toml` files, aggregates dependencies and saves the changes.
|
||||
fn process_cargo_tomls(cargo_tomls: &Vec<PathBuf>) -> Dependencies {
|
||||
/// Merge dependencies from one collection in another.
|
||||
fn merge_deps(into: &mut Dependencies, from: Dependencies) {
|
||||
from.into_iter().for_each(|(table, deps)| {
|
||||
into.entry(table).or_insert_with(HashMap::new).extend(deps);
|
||||
});
|
||||
}
|
||||
|
||||
cargo_tomls.iter().fold(Dependencies::new(), |mut acc, path| {
|
||||
let mut cargo_toml = parse_cargo_toml(&path);
|
||||
|
||||
let mut cargo_toml_path = path.clone();
|
||||
cargo_toml_path.pop(); // remove `Cargo.toml` from the path
|
||||
let deps = update_git_dependencies(&mut cargo_toml, |dep_path| {
|
||||
!cargo_toml_path.join(dep_path).exists()
|
||||
});
|
||||
|
||||
write_cargo_toml(&path, cargo_toml);
|
||||
merge_deps(&mut acc, deps);
|
||||
acc
|
||||
})
|
||||
}
|
||||
|
||||
/// Update the top level (workspace) `Cargo.toml` file.
|
||||
///
|
||||
/// - Adds `workspace` definition
|
||||
/// - Adds dependencies
|
||||
/// - Adds `profile.release` = `panic = unwind`
|
||||
fn update_root_cargo_toml(
|
||||
cargo_toml: &mut CargoToml,
|
||||
members: &[String],
|
||||
deps: Dependencies,
|
||||
commit_id: &str,
|
||||
) {
|
||||
let mut workspace = Table::new();
|
||||
|
||||
workspace.insert("resolver", value("2"));
|
||||
|
||||
workspace.insert("members", value(Array::from_iter(members.iter())));
|
||||
let mut workspace_dependencies = Table::new();
|
||||
deps.values()
|
||||
.flatten()
|
||||
.sorted_by_key(|(name, _)| *name)
|
||||
.for_each(|(name, dep)| {
|
||||
if let Some(version) = &dep.version {
|
||||
workspace_dependencies[name]["version"] = value(version);
|
||||
}
|
||||
if let Some(default_features) = dep.default_features {
|
||||
workspace_dependencies[name]["default-features"] = value(default_features);
|
||||
}
|
||||
workspace_dependencies[name]["git"] = value(SUBSTRATE_GIT_URL);
|
||||
workspace_dependencies[name]["rev"] = value(commit_id);
|
||||
});
|
||||
|
||||
let mut package = Table::new();
|
||||
package.insert("edition", value("2021"));
|
||||
workspace.insert("package", Item::Table(package));
|
||||
|
||||
workspace.insert("dependencies", Item::Table(workspace_dependencies));
|
||||
|
||||
workspace.insert("lints", Item::Table(Table::new()));
|
||||
cargo_toml.insert("workspace", Item::Table(workspace));
|
||||
|
||||
let mut panic_unwind = Table::new();
|
||||
panic_unwind.insert("panic", value("unwind"));
|
||||
let mut profile = Table::new();
|
||||
profile.insert("release", Item::Table(panic_unwind));
|
||||
cargo_toml.insert("profile", Item::Table(profile.into()));
|
||||
}
|
||||
|
||||
fn process_root_cargo_toml(
|
||||
root_cargo_toml_path: &Path,
|
||||
root_deps: Dependencies,
|
||||
cargo_tomls: &[PathBuf],
|
||||
node_template_path: &PathBuf,
|
||||
commit_id: &str,
|
||||
) {
|
||||
let mut root_cargo_toml = parse_cargo_toml(root_cargo_toml_path);
|
||||
let workspace_members = cargo_tomls
|
||||
.iter()
|
||||
.map(|p| {
|
||||
p.strip_prefix(node_template_path)
|
||||
.expect("Workspace member is a child of the node template path!")
|
||||
.parent()
|
||||
// We get the `Cargo.toml` paths as workspace members, but for the `members` field
|
||||
// we just need the path.
|
||||
.expect("The given path ends with `Cargo.toml` as file name!")
|
||||
.display()
|
||||
.to_string()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
update_root_cargo_toml(&mut root_cargo_toml, &workspace_members, root_deps, commit_id);
|
||||
write_cargo_toml(&root_cargo_toml_path, root_cargo_toml);
|
||||
}
|
||||
|
||||
/// Build and test the generated node-template
|
||||
fn build_and_test(path: &Path, cargo_tomls: &[PathBuf]) {
|
||||
// Build node
|
||||
assert!(Command::new("cargo")
|
||||
.args(&["build", "--all"])
|
||||
.current_dir(path)
|
||||
.status()
|
||||
.expect("Compiles node")
|
||||
.success());
|
||||
|
||||
// Test node
|
||||
assert!(Command::new("cargo")
|
||||
.args(&["test", "--all"])
|
||||
.current_dir(path)
|
||||
.status()
|
||||
.expect("Tests node")
|
||||
.success());
|
||||
|
||||
// Remove all `target` directories
|
||||
for toml in cargo_tomls {
|
||||
let mut target_path = toml.clone();
|
||||
target_path.pop();
|
||||
target_path = target_path.join("target");
|
||||
|
||||
if target_path.exists() {
|
||||
fs::remove_dir_all(&target_path)
|
||||
.expect(&format!("Removes `{}`", target_path.display()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let options = Options::parse();
|
||||
|
||||
// Copy node-template to a temp build dir.
|
||||
let build_dir = tempfile::tempdir().expect("Creates temp build dir");
|
||||
let node_template_folder = options
|
||||
.node_template
|
||||
.canonicalize()
|
||||
.expect("Node template path exists")
|
||||
.file_name()
|
||||
.expect("Node template folder is last element of path")
|
||||
.to_owned();
|
||||
copy_node_template(&options.node_template, &node_template_folder, build_dir.path());
|
||||
|
||||
// The path to the node-template in the build dir.
|
||||
let node_template_path = build_dir.path().join(node_template_folder);
|
||||
let root_cargo_toml_path = node_template_path.join("Cargo.toml");
|
||||
|
||||
// Get all `Cargo.toml` files in the node-template
|
||||
let mut cargo_tomls = find_cargo_tomls(&node_template_path);
|
||||
|
||||
// Check if top level Cargo.toml exists. If not, create one in the destination,
|
||||
// else remove it from the list, as this requires some special treatments.
|
||||
if let Some(index) = cargo_tomls.iter().position(|x| *x == root_cargo_toml_path) {
|
||||
cargo_tomls.remove(index);
|
||||
} else {
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(root_cargo_toml_path.clone())
|
||||
.expect("Create root level `Cargo.toml` failed.");
|
||||
}
|
||||
|
||||
// Process all `Cargo.toml` files.
|
||||
let root_deps = process_cargo_tomls(&cargo_tomls);
|
||||
process_root_cargo_toml(
|
||||
&root_cargo_toml_path,
|
||||
root_deps,
|
||||
&cargo_tomls,
|
||||
&node_template_path,
|
||||
&get_git_commit_id(&options.node_template),
|
||||
);
|
||||
|
||||
// Add root rustfmt to node template build path.
|
||||
let node_template_rustfmt_toml_path = node_template_path.join("rustfmt.toml");
|
||||
let root_rustfmt_toml = &options.node_template.join("../../rustfmt.toml");
|
||||
if root_rustfmt_toml.exists() {
|
||||
fs::copy(&root_rustfmt_toml, &node_template_rustfmt_toml_path)
|
||||
.expect("Copying rustfmt.toml.");
|
||||
}
|
||||
|
||||
build_and_test(&node_template_path, &cargo_tomls);
|
||||
|
||||
let output = GzEncoder::new(
|
||||
File::create(&options.output).expect("Creates output file"),
|
||||
Compression::default(),
|
||||
);
|
||||
let mut tar = tar::Builder::new(output);
|
||||
tar.append_dir_all("substrate-node-template", node_template_path)
|
||||
.expect("Writes substrate-node-template archive");
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_update_git_dependencies() {
|
||||
let toml = r#"
|
||||
[dev-dependencies]
|
||||
scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
|
||||
|
||||
[dependencies]
|
||||
scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
|
||||
sp-io = { version = "7.0.0", path = "../../../../primitives/io" }
|
||||
frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../../frame/system" }
|
||||
"#;
|
||||
let mut cargo_toml = toml.parse::<CargoToml>().expect("invalid doc");
|
||||
let actual_deps = update_git_dependencies(&mut cargo_toml, |_| true);
|
||||
|
||||
assert_eq!(actual_deps.len(), 3);
|
||||
assert_eq!(actual_deps.get("dependencies").unwrap().len(), 2);
|
||||
assert_eq!(actual_deps.get("dev-dependencies").unwrap().len(), 0);
|
||||
assert_eq!(
|
||||
actual_deps.get("dependencies").unwrap().get("sp-io").unwrap(),
|
||||
&Dependency {
|
||||
name: "sp-io".into(),
|
||||
version: Some("7.0.0".into()),
|
||||
default_features: None
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
actual_deps.get("dependencies").unwrap().get("frame-system").unwrap(),
|
||||
&Dependency {
|
||||
name: "frame-system".into(),
|
||||
version: Some("4.0.0-dev".into()),
|
||||
default_features: Some(false),
|
||||
}
|
||||
);
|
||||
|
||||
let expected_toml = r#"
|
||||
[dev-dependencies]
|
||||
scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
|
||||
|
||||
[dependencies]
|
||||
scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
|
||||
sp-io = { workspace = true }
|
||||
frame-system = { workspace = true }
|
||||
"#;
|
||||
assert_eq!(cargo_toml.to_string(), expected_toml);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_root_cargo_toml() {
|
||||
let mut cargo_toml = CargoToml::new();
|
||||
update_root_cargo_toml(
|
||||
&mut cargo_toml,
|
||||
&vec!["node".into(), "pallets/template".into(), "runtime".into()],
|
||||
Dependencies::from([
|
||||
(
|
||||
"dependencies".into(),
|
||||
HashMap::from([
|
||||
(
|
||||
"sp-io".into(),
|
||||
Dependency {
|
||||
name: "sp-io".into(),
|
||||
version: Some("7.0.0".into()),
|
||||
default_features: None,
|
||||
},
|
||||
),
|
||||
(
|
||||
"frame-system".into(),
|
||||
Dependency {
|
||||
name: "frame-system".into(),
|
||||
version: Some("4.0.0-dev".into()),
|
||||
default_features: Some(true),
|
||||
},
|
||||
),
|
||||
]),
|
||||
),
|
||||
("dev-dependencies".into(), HashMap::new()),
|
||||
("build-dependencies".into(), HashMap::new()),
|
||||
]),
|
||||
"commit_id",
|
||||
);
|
||||
|
||||
let expected_toml = r#"[workspace]
|
||||
resolver = "2"
|
||||
members = ["node", "pallets/template", "runtime"]
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
|
||||
[workspace.dependencies]
|
||||
frame-system = { version = "4.0.0-dev", default-features = true, git = "https://github.com/pezkuwichain/pezkuwi-sdk.git", rev = "commit_id" }
|
||||
sp-io = { version = "7.0.0", git = "https://github.com/pezkuwichain/pezkuwi-sdk.git", rev = "commit_id" }
|
||||
|
||||
[workspace.lints]
|
||||
|
||||
[profile]
|
||||
|
||||
[profile.release]
|
||||
panic = "unwind"
|
||||
"#;
|
||||
assert_eq!(cargo_toml.to_string(), expected_toml);
|
||||
}
|
||||
}
|
||||
Executable
+186
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This file is part of Substrate.
|
||||
# Copyright (C) Parity Technologies (UK) Ltd.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script has three parts which all use the Substrate runtime:
|
||||
# - Pallet benchmarking to update the pallet weights
|
||||
# - Overhead benchmarking for the Extrinsic and Block weights
|
||||
# - Machine benchmarking
|
||||
#
|
||||
# Should be run on a reference machine to gain accurate benchmarks
|
||||
# current reference machine: https://github.com/paritytech/substrate/pull/5848
|
||||
|
||||
while getopts 'bfp:v' flag; do
|
||||
case "${flag}" in
|
||||
b)
|
||||
# Skip build.
|
||||
skip_build='true'
|
||||
;;
|
||||
f)
|
||||
# Fail if any sub-command in a pipe fails, not just the last one.
|
||||
set -o pipefail
|
||||
# Fail on undeclared variables.
|
||||
set -u
|
||||
# Fail if any sub-command fails.
|
||||
set -e
|
||||
# Fail on traps.
|
||||
set -E
|
||||
;;
|
||||
p)
|
||||
# Start at pallet
|
||||
start_pallet="${OPTARG}"
|
||||
;;
|
||||
v)
|
||||
# Echo all executed commands.
|
||||
set -x
|
||||
;;
|
||||
*)
|
||||
# Exit early.
|
||||
echo "Bad options. Check Script."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
if [ "$skip_build" != true ]
|
||||
then
|
||||
echo "[+] Compiling Substrate benchmarks..."
|
||||
cargo build --profile=production --locked --features=runtime-benchmarks --bin substrate-node
|
||||
fi
|
||||
|
||||
# The executable to use.
|
||||
# Parent directory because of the monorepo structure.
|
||||
SUBSTRATE=../target/production/substrate-node
|
||||
|
||||
# Manually exclude some pallets.
|
||||
EXCLUDED_PALLETS=(
|
||||
# Helper pallets
|
||||
"pallet_election_provider_support_benchmarking"
|
||||
# Pallets without automatic benchmarking
|
||||
"pallet_babe"
|
||||
"pallet_grandpa"
|
||||
"pallet_mmr"
|
||||
"pallet_offences"
|
||||
# Only used for testing, does not need real weights.
|
||||
"frame_benchmarking_pallet_pov"
|
||||
)
|
||||
|
||||
# Load all pallet names in an array.
|
||||
ALL_PALLETS=($(
|
||||
$SUBSTRATE benchmark pallet --list=pallets --no-csv-header --chain=dev
|
||||
))
|
||||
|
||||
# Filter out the excluded pallets by concatenating the arrays and discarding duplicates.
|
||||
PALLETS=($({ printf '%s\n' "${ALL_PALLETS[@]}" "${EXCLUDED_PALLETS[@]}"; } | sort | uniq -u))
|
||||
|
||||
echo "[+] Benchmarking ${#PALLETS[@]} Substrate pallets by excluding ${#EXCLUDED_PALLETS[@]} from ${#ALL_PALLETS[@]}."
|
||||
|
||||
# Define the error file.
|
||||
ERR_FILE="benchmarking_errors.txt"
|
||||
# Delete the error file before each run.
|
||||
rm -f $ERR_FILE
|
||||
|
||||
# Benchmark each pallet.
|
||||
for PALLET in "${PALLETS[@]}"; do
|
||||
# If `-p` is used, skip benchmarks until the start pallet.
|
||||
if [ ! -z "$start_pallet" ] && [ "$start_pallet" != "$PALLET" ]
|
||||
then
|
||||
echo "[+] Skipping ${PALLET}..."
|
||||
continue
|
||||
else
|
||||
unset start_pallet
|
||||
fi
|
||||
|
||||
FOLDER="$(echo "${PALLET#*_}" | tr '_' '-')";
|
||||
WEIGHT_FILE="./frame/${FOLDER}/src/weights.rs"
|
||||
|
||||
TEMPLATE_FILE_NAME="frame-weight-template.hbs"
|
||||
if [ $(cargo metadata --locked --format-version 1 --no-deps | jq --arg pallet "${PALLET//_/-}" -r '.packages[] | select(.name == $pallet) | .dependencies | any(.name == "pezkuwi-sdk-frame")') = true ]
|
||||
then
|
||||
TEMPLATE_FILE_NAME="frame-umbrella-weight-template.hbs"
|
||||
fi
|
||||
TEMPLATE_FILE="./.maintain/${TEMPLATE_FILE_NAME}"
|
||||
|
||||
# Special handling of custom weight paths.
|
||||
if [ "$PALLET" == "frame_system_extensions" ] || [ "$PALLET" == "frame-system-extensions" ]
|
||||
then
|
||||
WEIGHT_FILE="./frame/system/src/extensions/weights.rs"
|
||||
elif [ "$PALLET" == "pallet_asset_conversion_tx_payment" ] || [ "$PALLET" == "pallet-asset-conversion-tx-payment" ]
|
||||
then
|
||||
WEIGHT_FILE="./frame/transaction-payment/asset-conversion-tx-payment/src/weights.rs"
|
||||
elif [ "$PALLET" == "pallet_asset_tx_payment" ] || [ "$PALLET" == "pallet-asset-tx-payment" ]
|
||||
then
|
||||
WEIGHT_FILE="./frame/transaction-payment/asset-tx-payment/src/weights.rs"
|
||||
elif [ "$PALLET" == "pallet_asset_conversion_ops" ] || [ "$PALLET" == "pallet-asset-conversion-ops" ]
|
||||
then
|
||||
WEIGHT_FILE="./frame/asset-conversion/ops/src/weights.rs"
|
||||
fi
|
||||
|
||||
echo "[+] Benchmarking $PALLET with weight file $WEIGHT_FILE";
|
||||
|
||||
OUTPUT=$(
|
||||
$SUBSTRATE benchmark pallet \
|
||||
--chain=dev \
|
||||
--steps=50 \
|
||||
--repeat=20 \
|
||||
--pallet="$PALLET" \
|
||||
--extrinsic="*" \
|
||||
--wasm-execution=compiled \
|
||||
--heap-pages=4096 \
|
||||
--output="$WEIGHT_FILE" \
|
||||
--header="./HEADER-APACHE2" \
|
||||
--template="$TEMPLATE_FILE" 2>&1
|
||||
)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "$OUTPUT" >> "$ERR_FILE"
|
||||
echo "[-] Failed to benchmark $PALLET. Error written to $ERR_FILE; continuing..."
|
||||
fi
|
||||
done
|
||||
|
||||
# Update the block and extrinsic overhead weights.
|
||||
echo "[+] Benchmarking block and extrinsic overheads..."
|
||||
OUTPUT=$(
|
||||
$SUBSTRATE benchmark overhead \
|
||||
--chain=dev \
|
||||
--wasm-execution=compiled \
|
||||
--weight-path="./frame/support/src/weights/" \
|
||||
--header="./HEADER-APACHE2" \
|
||||
--warmup=10 \
|
||||
--repeat=100 2>&1
|
||||
)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "$OUTPUT" >> "$ERR_FILE"
|
||||
echo "[-] Failed to benchmark the block and extrinsic overheads. Error written to $ERR_FILE; continuing..."
|
||||
fi
|
||||
|
||||
echo "[+] Benchmarking the machine..."
|
||||
OUTPUT=$(
|
||||
$SUBSTRATE benchmark machine --chain=dev 2>&1
|
||||
)
|
||||
if [ $? -ne 0 ]; then
|
||||
# Do not write the error to the error file since it is not a benchmarking error.
|
||||
echo "[-] Failed the machine benchmark:\n$OUTPUT"
|
||||
fi
|
||||
|
||||
# Check if the error file exists.
|
||||
if [ -f "$ERR_FILE" ]; then
|
||||
echo "[-] Some benchmarks failed. See: $ERR_FILE"
|
||||
exit 1
|
||||
else
|
||||
echo "[+] All benchmarks passed."
|
||||
exit 0
|
||||
fi
|
||||
Reference in New Issue
Block a user