Fix node-metrics test (#1287)

This commit is contained in:
Bastian Köcher
2023-08-30 09:48:29 +02:00
committed by GitHub
parent d81c8cbaa7
commit cbd745c846
78 changed files with 2 additions and 4295 deletions
+2
View File
@@ -196,6 +196,8 @@ test-node-metrics:
# but still want to have debug assertions.
RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings"
script:
# Build the required workers.
- cargo build --bin polkadot-execute-worker --bin polkadot-prepare-worker --profile testnet --verbose --locked
- mkdir -p artifacts
- time cargo test --profile testnet
--locked
-4
View File
@@ -1,4 +0,0 @@
changelog.md
*.json
release*.md
.env
-23
View File
@@ -1,23 +0,0 @@
# frozen_string_literal: true
source 'https://rubygems.org'
git_source(:github) { |repo_name| "https://github.com/#{repo_name}" }
gem 'octokit', '~> 4'
gem 'git_diff_parser', '~> 3'
gem 'toml', '~> 0.3.0'
gem 'rake', group: :dev
gem 'optparse', '~> 0.1.1'
gem 'logger', '~> 1.4'
gem 'changelogerator', '0.10.1'
gem 'test-unit', group: :dev
gem 'rubocop', group: :dev, require: false
@@ -1,84 +0,0 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.8.0)
public_suffix (>= 2.0.2, < 5.0)
ast (2.4.2)
changelogerator (0.10.1)
git_diff_parser (~> 3)
octokit (~> 4)
faraday (1.8.0)
faraday-em_http (~> 1.0)
faraday-em_synchrony (~> 1.0)
faraday-excon (~> 1.1)
faraday-httpclient (~> 1.0.1)
faraday-net_http (~> 1.0)
faraday-net_http_persistent (~> 1.1)
faraday-patron (~> 1.0)
faraday-rack (~> 1.0)
multipart-post (>= 1.2, < 3)
ruby2_keywords (>= 0.0.4)
faraday-em_http (1.0.0)
faraday-em_synchrony (1.0.0)
faraday-excon (1.1.0)
faraday-httpclient (1.0.1)
faraday-net_http (1.0.1)
faraday-net_http_persistent (1.2.0)
faraday-patron (1.0.0)
faraday-rack (1.0.0)
git_diff_parser (3.2.0)
logger (1.4.4)
multipart-post (2.1.1)
octokit (4.21.0)
faraday (>= 0.9)
sawyer (~> 0.8.0, >= 0.5.3)
optparse (0.1.1)
parallel (1.21.0)
parser (3.0.2.0)
ast (~> 2.4.1)
parslet (2.0.0)
power_assert (2.0.1)
public_suffix (4.0.6)
rainbow (3.0.0)
rake (13.0.6)
regexp_parser (2.1.1)
rexml (3.2.5)
rubocop (1.23.0)
parallel (~> 1.10)
parser (>= 3.0.0.0)
rainbow (>= 2.2.2, < 4.0)
regexp_parser (>= 1.8, < 3.0)
rexml
rubocop-ast (>= 1.12.0, < 2.0)
ruby-progressbar (~> 1.7)
unicode-display_width (>= 1.4.0, < 3.0)
rubocop-ast (1.13.0)
parser (>= 3.0.1.1)
ruby-progressbar (1.11.0)
ruby2_keywords (0.0.5)
sawyer (0.8.2)
addressable (>= 2.3.5)
faraday (> 0.8, < 2.0)
test-unit (3.5.1)
power_assert
toml (0.3.0)
parslet (>= 1.8.0, < 3.0.0)
unicode-display_width (2.1.0)
PLATFORMS
x86_64-darwin-20
x86_64-darwin-22
DEPENDENCIES
changelogerator (= 0.10.1)
git_diff_parser (~> 3)
logger (~> 1.4)
octokit (~> 4)
optparse (~> 0.1.1)
rake
rubocop
test-unit
toml (~> 0.3.0)
BUNDLED WITH
2.4.6
-80
View File
@@ -1,80 +0,0 @@
# Changelog
Currently, the changelog is built locally. It will be moved to CI once labels stabilize.
For now, a bit of preparation is required before you can run the script:
- fetch the srtool digests
- store them under the `digests` folder as `<chain>-srtool-digest.json`
- ensure the `.env` file is up to date with correct information. See below for an example
The content of the release notes is generated from the template files under the `scripts/ci/changelog/templates` folder. For readability and maintenance, the template is split into several small snippets.
Run:
```
./bin/changelog <ref_until> [<ref_since>]
```
For instance:
```
./bin/changelog v0.9.18
```
A file called `release-notes.md` will be generated and can be used for the release.
## ENV
You may use the following ENV for testing:
```
RUSTC_STABLE="rustc 1.56.1 (59eed8a2a 2021-11-01)"
RUSTC_NIGHTLY="rustc 1.57.0-nightly (51e514c0f 2021-09-12)"
PRE_RELEASE=true
HIDE_SRTOOL_SHELL=true
DEBUG=1
NO_CACHE=1
```
## Considered labels
The following list will likely evolve over time and it will be hard to keep it in sync.
In any case, if you want to find all the labels that are used, search for `meta` in the templates.
Currently, the considered labels are:
- Priority: C<N> labels
- Audit: D<N> labels
- E4 => new host function
- E2 => database migration
- B0 => silent, not showing up
- B1 => noteworthy
- T0 => node
- T1 => runtime
Note that labels with the same letter are mutually exclusive.
A PR should not have both `B0` and `B5`, or both `C1` and `C9`. In case of conflicts, the template will
decide which label will be considered.
## Dev and debuggin
### Hot Reload
The following command allows **Hot Reload**:
```
fswatch templates -e ".*\.md$" | xargs -n1 -I{} ./bin/changelog v0.9.18
```
### Caching
By default, if the changelog data from Github is already present, the calls to the Github API will be skipped
and the local version of the data will be used. This is much faster.
If you know that some labels have changed in Github, you will want to refresh the data.
You can then either delete manually the `<chain>.json` file or `export NO_CACHE=1` to force refreshing the data.
## Full PR list
At times, it may be useful to get a raw full PR list.
In order to produce this list, you first need to fetch the the latest `context.json` from the `release-notes-context` artifacts you can find [here](https://github.com/paritytech/polkadot/actions/workflows/release-30_publish-draft-release.yml). You may store this `context.json` under `scripts/ci/changelog`.
Using the `full_pr_list.md.tera` template, you can generate the `raw` list of changes:
```
cd scripts/ci/changelog
tera --env --env-key env --template templates/full_pr_list.md.tera context.json
```
-105
View File
@@ -1,105 +0,0 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
# call for instance as:
# ./bin/changelog <to> [<from>] [<output_file>]
# for instance, for the release notes of v1.2.3:
# ./bin/changelog v1.2.3
# or
# ./bin/changelog v1.2.3 v1.2.2
#
# You may set the ENV NO_CACHE to force fetching from Github
# You should also ensure you set the ENV: GITHUB_TOKEN
require_relative '../lib/changelog'
require 'logger'
logger = Logger.new($stdout)
logger.level = Logger::DEBUG
logger.debug('Starting')
changelogerator_version = `changelogerator --version`
logger.debug(changelogerator_version)
owner = 'paritytech'
repo = 'polkadot'
gh_polkadot = SubRef.new(format('%<owner>s/%<repo>s', { owner: owner, repo: repo }))
last_release_ref = gh_polkadot.get_last_ref()
polkadot_ref2 = ARGV[0] || 'HEAD'
polkadot_ref1 = ARGV[1] || last_release_ref
output = ARGV[2] || 'release-notes.md'
ENV['REF1'] = polkadot_ref1
ENV['REF2'] = polkadot_ref2
substrate_ref1 = gh_polkadot.get_dependency_reference(polkadot_ref1, 'sp-io')
substrate_ref2 = gh_polkadot.get_dependency_reference(polkadot_ref2, 'sp-io')
logger.debug("Polkadot from: #{polkadot_ref1}")
logger.debug("Polkadot to: #{polkadot_ref2}")
logger.debug("Substrate from: #{substrate_ref1}")
logger.debug("Substrate to: #{substrate_ref2}")
substrate_data = 'substrate.json'
polkadot_data = 'polkadot.json'
logger.debug("Using SUBSTRATE: #{substrate_data}")
logger.debug("Using POLKADOT: #{polkadot_data}")
logger.warn('NO_CACHE set') if ENV['NO_CACHE']
if ENV['NO_CACHE'] || !File.file?(polkadot_data)
logger.debug(format('Fetching data for Polkadot into %s', polkadot_data))
cmd = format('changelogerator %<owner>s/%<repo>s -f %<from>s -t %<to>s > %<output>s',
{ owner: owner, repo: 'polkadot', from: polkadot_ref1, to: polkadot_ref2, output: polkadot_data })
system(cmd)
else
logger.debug("Re-using:#{polkadot_data}")
end
if ENV['NO_CACHE'] || !File.file?(substrate_data)
logger.debug(format('Fetching data for Substrate into %s', substrate_data))
cmd = format('changelogerator %<owner>s/%<repo>s -f %<from>s -t %<to>s > %<output>s',
{ owner: owner, repo: 'substrate', from: substrate_ref1, to: substrate_ref2, output: substrate_data })
system(cmd)
else
logger.debug("Re-using:#{substrate_data}")
end
KUSAMA_DIGEST = ENV['KUSAMA_DIGEST'] || 'digests/kusama_srtool_output.json'
WESTEND_DIGEST = ENV['WESTEND_DIGEST'] || 'digests/westend_srtool_output.json'
ROCOCO_DIGEST = ENV['ROCOCO_DIGEST'] || 'digests/rococo_srtool_output.json'
POLKADOT_DIGEST = ENV['POLKADOT_DIGEST'] || 'digests/polkadot_srtool_output.json'
# Here we compose all the pieces together into one
# single big json file.
cmd = format('jq \
--slurpfile substrate %s \
--slurpfile polkadot %s \
--slurpfile srtool_kusama %s \
--slurpfile srtool_westend %s \
--slurpfile srtool_rococo %s \
--slurpfile srtool_polkadot %s \
-n \'{
substrate: $substrate[0],
polkadot: $polkadot[0],
srtool: [
{ name: "kusama", data: $srtool_kusama[0] },
{ name: "westend", data: $srtool_westend[0] },
{ name: "rococo", data: $srtool_rococo[0] },
{ name: "polkadot", data: $srtool_polkadot[0] }
] }\' > context.json', substrate_data, polkadot_data,
KUSAMA_DIGEST,
WESTEND_DIGEST,
ROCOCO_DIGEST,
POLKADOT_DIGEST)
system(cmd)
cmd = format('tera --env --env-key env --include-path templates \
--template templates/template.md.tera context.json > %s', output)
system(cmd)
@@ -1 +0,0 @@
*.json
@@ -1,38 +0,0 @@
# frozen_string_literal: true
# A Class to find Substrate references
class SubRef
require 'octokit'
require 'toml'
attr_reader :client, :repository
def initialize(github_repo)
@client = Octokit::Client.new(
access_token: ENV['GITHUB_TOKEN']
)
@repository = @client.repository(github_repo)
end
# This function checks the Cargo.lock of a given
# Rust project, for a given package, and fetches
# the dependency git ref.
def get_dependency_reference(ref, package)
cargo = TOML::Parser.new(
Base64.decode64(
@client.contents(
@repository.full_name,
path: 'Cargo.lock',
query: { ref: ref.to_s }
).content
)
).parsed
cargo['package'].find { |p| p['name'] == package }['source'].split('#').last
end
# Get the git ref of the last release for the repo.
# repo is given in the form paritytech/polkadot
def get_last_ref()
'refs/tags/' + @client.latest_release(@repository.full_name).tag_name
end
end
@@ -1,10 +0,0 @@
{# This file uses the Markdown format with additional templating such as this comment. -#}
{# Such a comment will not show up in the rendered release notes. -#}
{# The content of this file (if any) will be inserted at the top of the release notes -#}
{# and generated for each new release candidate. -#}
{# Ensure you leave an empty line at both top and bottom of this file. -#}
<!-- Such a comment will be rendered but remain invisible in the rendered markdown -->
<!-- Edit below this line -->
<!-- Edit above this line -->
@@ -1,43 +0,0 @@
{# This macro shows ONE change #}
{%- macro change(c, cml="[C]", dot="[P]", sub="[S]") -%}
{%- if c.meta.C and c.meta.C.agg.max >= 7 -%}
{%- set prio = " ‼️ HIGH" -%}
{%- elif c.meta.C and c.meta.C.agg.max >= 3 -%}
{%- set prio = " ❗️ Medium" -%}
{%- elif c.meta.C and c.meta.C.agg.max < 3 -%}
{%- set prio = " Low" -%}
{%- else -%}
{%- set prio = "" -%}
{%- endif -%}
{%- set audit = "" -%}
{%- if c.meta.D and c.meta.D.D1 -%}
{%- set audit = "✅ audited " -%}
{%- elif c.meta.D and c.meta.D.D2 -%}
{%- set audit = "✅ trivial " -%}
{%- elif c.meta.D and c.meta.D.D3 -%}
{%- set audit = "✅ trivial " -%}
{%- elif c.meta.D and c.meta.D.D5 -%}
{%- set audit = "⏳ pending non-critical audit " -%}
{%- else -%}
{%- set audit = "" -%}
{%- endif -%}
{%- if c.html_url is containing("polkadot") -%}
{%- set repo = dot -%}
{%- elif c.html_url is containing("substrate") -%}
{%- set repo = sub -%}
{%- else -%}
{%- set repo = " " -%}
{%- endif -%}
{%- if c.meta.T and c.meta.T.T6 -%}
{%- set xcm = " [✉️ XCM]" -%}
{%- else -%}
{%- set xcm = "" -%}
{%- endif -%}
{{- repo }} {{ audit }}[`#{{c.number}}`]({{c.html_url}}) {{- prio }} - {{ c.title | capitalize | truncate(length=120, end="…") }}{{xcm }}
{%- endmacro change -%}
@@ -1,15 +0,0 @@
{# This include generates the section showing the changes #}
## Changes
### Legend
- {{ DOT }} Polkadot
- {{ SUB }} Substrate
{% include "changes_client.md.tera" %}
{% include "changes_runtime.md.tera" %}
{% include "changes_api.md.tera" %}
{% include "changes_misc.md.tera" %}
@@ -1,17 +0,0 @@
{% import "change.md.tera" as m_c -%}
### API
{#- The changes are sorted by merge date #}
{%- for pr in changes | sort(attribute="merged_at") %}
{%- if pr.meta.B %}
{%- if pr.meta.B.B0 %}
{#- We skip silent ones -#}
{%- else -%}
{%- if pr.meta.T and pr.meta.T.T2 and not pr.title is containing("ompanion") %}
- {{ m_c::change(c=pr) }}
{%- endif -%}
{% endif -%}
{% endif -%}
{% endfor %}
@@ -1,17 +0,0 @@
{% import "change.md.tera" as m_c -%}
### Client
{#- The changes are sorted by merge date #}
{%- for pr in changes | sort(attribute="merged_at") %}
{%- if pr.meta.B %}
{%- if pr.meta.B.B0 %}
{#- We skip silent ones -#}
{%- else -%}
{%- if pr.meta.T and pr.meta.T.T0 and not pr.title is containing("ompanion") %}
- {{ m_c::change(c=pr) }}
{%- endif -%}
{% endif -%}
{% endif -%}
{% endfor %}
@@ -1,42 +0,0 @@
{%- import "change.md.tera" as m_c -%}
{%- set_global misc_count = 0 -%}
{#- First pass to count #}
{%- for pr in changes -%}
{%- if pr.meta.B %}
{%- if pr.meta.B.B0 -%}
{#- We skip silent ones -#}
{%- else -%}
{%- if pr.meta.T and pr.meta.T.agg.max > 2 %}
{%- set_global misc_count = misc_count + 1 -%}
{%- endif -%}
{% endif -%}
{% endif -%}
{% endfor -%}
<!-- Found {{ misc_count }} misc PRs -->
{%- if misc_count > 0 %}
### Misc
{% if misc_count > 10 %}
There are other misc. changes. You can expand the list below to view them all.
<details><summary>Other misc. changes</summary>
{% endif -%}
{#- The changes are sorted by merge date #}
{%- for pr in changes | sort(attribute="merged_at") %}
{%- if pr.meta.B and not pr.title is containing("ompanion") %}
{%- if pr.meta.B.B0 %}
{#- We skip silent ones -#}
{%- else -%}
{%- if pr.meta.T and pr.meta.T.agg.max > 2 %}
- {{ m_c::change(c=pr) }}
{%- endif -%}
{% endif -%}
{% endif -%}
{% endfor %}
{% if misc_count > 10 %}
</details>
{% endif -%}
{% endif -%}
@@ -1,19 +0,0 @@
{%- import "change.md.tera" as m_c -%}
### Runtime
{#- The changes are sorted by merge date -#}
{% for pr in changes | sort(attribute="merged_at") -%}
{%- if pr.meta.B -%}
{%- if pr.meta.B.B0 -%}
{#- We skip silent ones -#}
{%- else -%}
{%- if pr.meta.T and pr.meta.T.T1 and not pr.title is containing("ompanion") %}
- {{ m_c::change(c=pr) }}
{%- endif -%}
{%- endif -%}
{%- endif -%}
{%- endfor %}
@@ -1,7 +0,0 @@
## Rust compiler versions
This release was built and tested against the following versions of `rustc`.
Other versions may work.
- Rust Stable: `{{ env.RUSTC_STABLE }}`
- Rust Nightly: `{{ env.RUSTC_NIGHTLY }}`
@@ -1,8 +0,0 @@
{%- set to_ignore = changes | filter(attribute="meta.B.B0") %}
<!--
changes:
- total: {{ changes | length }}
- silent: {{ to_ignore | length }}
- remaining: {{ changes | length - to_ignore | length }}
-->
@@ -1,11 +0,0 @@
## Docker image
The docker image for this release can be found at [Docker hub](https://hub.docker.com/r/parity/polkadot/tags?page=1&ordering=last_updated)
(It will be available a few minutes after the release has been published).
You may pull it using:
```
docker pull parity/polkadot:latest
```
@@ -1,16 +0,0 @@
{# This is a helper template to get the FULL PR list #}
{# It is not used in the release notes #}
# PR list
## substrate
{%- for change in substrate.changes %}
- [S] [`{{ change.number }}`]({{ change.html_url }}) - {{ change.title }}
{%- endfor %}
## polkadot
{%- for change in polkadot.changes %}
- [P] [`{{ change.number }}`]({{ change.html_url }}) - {{ change.title }}
{%- endfor %}
@@ -1,22 +0,0 @@
{% import "high_priority.md.tera" as m_p -%}
## Upgrade Priority
{%- set polkadot_prio = 0 -%}
{%- set substrate_prio = 0 -%}
{# We fetch the various priorities #}
{%- if polkadot.meta.C -%}
{%- set polkadot_prio = polkadot.meta.C.max -%}
{%- endif -%}
{%- if substrate.meta.C -%}
{%- set substrate_prio = substrate.meta.C.max -%}
{%- endif -%}
{# We compute the global priority #}
{%- set global_prio = polkadot_prio -%}
{%- if substrate_prio > global_prio -%}
{%- set global_prio = substrate_prio -%}
{%- endif -%}
{#- We show the result #}
{{ m_p::high_priority(p=global_prio, changes=changes) }}
@@ -1,38 +0,0 @@
{%- import "change.md.tera" as m_c -%}
{# This macro convert a priority level into readable output #}
{%- macro high_priority(p, changes) -%}
{%- if p >= 7 -%}
{%- set prio = "‼️ HIGH" -%}
{%- set text = "This is a **high priority** release and you must upgrade as as soon as possible." -%}
{%- elif p >= 3 -%}
{%- set prio = "❗️ Medium" -%}
{%- set text = "This is a medium priority release and you should upgrade in a timely manner." -%}
{%- else -%}
{%- set prio = "Low" -%}
{%- set text = "This is a low priority release and you may upgrade at your convenience." -%}
{%- endif %}
<!-- detected max prio is: {{p}} -->
{%- if prio %}
{{prio}}: {{text}}
{% if p >= 3 %}
The changes motivating this priority level are:
{% for pr in changes | sort(attribute="merged_at") -%}
{%- if pr.meta.C -%}
{%- if pr.meta.C.agg.max >= p %}
- {{ m_c::change(c=pr) }}
{%- if pr.meta.T and pr.meta.T.T1 %} (RUNTIME)
{% endif %}
{%- endif -%}
{%- endif -%}
{%- endfor -%}
{%- else -%}
<!-- No relevant Priority label as been detected for p={{ p }} -->
{%- endif -%}
{%- endif -%}
{%- endmacro priority -%}
@@ -1,12 +0,0 @@
{%- import "change.md.tera" as m_c -%}
{% for pr in changes | sort(attribute="merged_at") -%}
{%- if pr.meta.B and pr.meta.B.B0 -%}
{#- We skip silent ones -#}
{%- else -%}
{%- if pr.meta.E and pr.meta.E.E3 -%}
- {{ m_c::change(c=pr) }}
{% endif -%}
{% endif -%}
{%- endfor -%}
@@ -1,44 +0,0 @@
{%- import "change.md.tera" as m_c -%}
{%- set_global host_fn_count = 0 -%}
{%- set_global upgrade_first = 0 -%}
{% for pr in changes | sort(attribute="merged_at") -%}
{%- if pr.meta.B and pr.meta.B.B0 -%}
{#- We skip silent ones -#}
{%- else -%}
{%- if pr.meta.E and pr.meta.E.E3 -%}
{%- set_global host_fn_count = host_fn_count + 1 -%}
- {{ m_c::change(c=pr) }}
{% endif -%}
{%- if pr.meta.E and pr.meta.E.E4 -%}
{%- set_global upgrade_first = upgrade_first + 1 -%}
- {{ m_c::change(c=pr) }}
{% endif -%}
{% endif -%}
{%- endfor -%}
<!-- {{ upgrade_first }} changes require node upgrade -->
{%- if upgrade_first != 0 %}
## Node upgrade required
⚠️ There is a runtime change that will require nodes to be upgraded BEFORE the runtime upgrade.
⚠️ It is critical that you update your client before the chain switches to the new runtime.
{%- endif %}
<!-- {{ host_fn_count }} host functions were detected -->
## Host functions
{% if host_fn_count == 0 %}
️ This release does not contain any change related to host functions.
{% elif host_fn_count == 1 -%}
{# ---- #}
️ The runtimes in this release contain one change related to **host function**s:
{% include "host_functions-list.md.tera" -%}
{%- else -%}
️ The runtimes in this release contain {{ host_fn_count }} changes related to **host function**s:
{% include "host_functions-list.md.tera" -%}
{%- endif %}
@@ -1,30 +0,0 @@
{% import "change.md.tera" as m_c %}
{%- set_global db_migration_count = 0 -%}
{%- for pr in changes -%}
{%- if pr.meta.B and pr.meta.B.B0 %}
{#- We skip silent ones -#}
{%- elif pr.meta.E and pr.meta.E.E1 -%}
{%- set_global db_migration_count = db_migration_count + 1 -%}
{%- endif -%}
{%- endfor %}
## Database Migrations
Database migrations are operations upgrading the database to the latest stand.
Some migrations may break compatibility, making a backup of your database is highly recommended.
{% if db_migration_count == 0 -%}
️ There is no database migration in this release.
{%- elif db_migration_count == 1 -%}
⚠️ There is one database migration in this release:
{%- else -%}
⚠️ There are {{ db_migration_count }} database migrations in this release:
{%- endif %}
{% for pr in changes | sort(attribute="merged_at") -%}
{%- if pr.meta.B and pr.meta.B.B0 %}
{#- We skip silent ones -#}
{%- elif pr.meta.E and pr.meta.E.E1 -%}
- {{ m_c::change(c=pr) }}
{% endif -%}
{% endfor -%}
@@ -1,29 +0,0 @@
{%- import "change.md.tera" as m_c %}
{%- set_global runtime_migration_count = 0 -%}
{%- for pr in changes -%}
{%- if pr.meta.B and pr.meta.B.B0 %}
{#- We skip silent ones -#}
{%- elif pr.meta.E and pr.meta.E.E0 -%}
{%- set_global runtime_migration_count = runtime_migration_count + 1 -%}
{%- endif -%}
{%- endfor %}
## Runtime Migrations
Runtime migrations are operations running once during a runtime upgrade.
{% if runtime_migration_count == 0 -%}
️ There is no runtime migration in this release.
{%- elif runtime_migration_count == 1 -%}
⚠️ There is one runtime migration in this release:
{%- else -%}
⚠️ There are {{ runtime_migration_count }} runtime migrations in this release:
{%- endif %}
{% for pr in changes | sort(attribute="merged_at") -%}
{%- if pr.meta.B and pr.meta.B.B0 %}
{#- We skip silent ones -#}
{%- elif pr.meta.E and pr.meta.E.E0 -%}
- {{ m_c::change(c=pr) }}
{% endif -%}
{% endfor -%}
@@ -1,11 +0,0 @@
{%- if env.PRE_RELEASE == "true" -%}
<details><summary>⚠️ This is a pre-release</summary>
**Release candidates** are **pre-releases** and may not be final.
Although they are reasonably tested, there may be additional changes or issues
before an official release is tagged. Use at your own discretion, and consider
only using final releases on critical production infrastructure.
</details>
{% else -%}
<!-- NOT a pre-release-->
{%- endif %}
@@ -1,28 +0,0 @@
{# This macro shows one runtime #}
{%- macro runtime(runtime) -%}
### {{ runtime.name | capitalize }}
{%- if runtime.data.runtimes.compressed.subwasm.compression.compressed %}
{%- set compressed = "Yes" %}
{%- else %}
{%- set compressed = "No" %}
{%- endif %}
{%- set comp_ratio = 100 - (runtime.data.runtimes.compressed.subwasm.compression.size_compressed / runtime.data.runtimes.compressed.subwasm.compression.size_decompressed *100) %}
<!-- commit : {{ runtime.data.commit }} -->
<!-- tag : {{ runtime.data.tag }} -->
<!-- branch : {{ runtime.data.branch }} -->
<!-- pkg : {{ runtime.data.pkg }} -->
```
🏋️ Runtime Size: {{ runtime.data.runtimes.compressed.subwasm.size | filesizeformat }} ({{ runtime.data.runtimes.compressed.subwasm.size }} bytes)
🔥 Core Version: {{ runtime.data.runtimes.compressed.subwasm.core_version.specName }}-{{ runtime.data.runtimes.compressed.subwasm.core_version.specVersion }} ({{ runtime.data.runtimes.compressed.subwasm.core_version.implName }}-{{ runtime.data.runtimes.compressed.subwasm.core_version.implVersion }}.tx{{ runtime.data.runtimes.compressed.subwasm.core_version.transactionVersion }}.au{{ runtime.data.runtimes.compressed.subwasm.core_version.authoringVersion }})
🗜 Compressed: {{ compressed }}: {{ comp_ratio | round(method="ceil", precision=2) }}%
🎁 Metadata version: V{{ runtime.data.runtimes.compressed.subwasm.metadata_version }}
🗳️ system.setCode hash: {{ runtime.data.runtimes.compressed.subwasm.proposal_hash }}
🗳️ authorizeUpgrade hash: {{ runtime.data.runtimes.compressed.subwasm.parachain_authorize_upgrade_hash }}
🗳️ Blake2-256 hash: {{ runtime.data.runtimes.compressed.subwasm.blake2_256 }}
📦 IPFS: {{ runtime.data.runtimes.compressed.subwasm.ipfs_hash }}
```
{%- endmacro runtime %}
@@ -1,19 +0,0 @@
{# This include shows the list and details of the runtimes #}
{%- import "runtime.md.tera" as m_r -%}
{# --- #}
## Runtimes
{% set rtm = srtool[0] -%}
The information about the runtimes included in this release can be found below.
The runtimes have been built using [{{ rtm.data.gen }}](https://github.com/paritytech/srtool) and `{{ rtm.data.rustc }}`.
{%- for runtime in srtool | sort(attribute="name") %}
{%- set HIDE_VAR = "HIDE_SRTOOL_" ~ runtime.name | upper %}
{%- if not env is containing(HIDE_VAR) %}
{{ m_r::runtime(runtime=runtime) }}
{%- endif %}
{%- endfor %}
@@ -1,37 +0,0 @@
{# This is the entry point of the template -#}
<!-- repository: {{ polkadot.repository.name }} -->
{% include "pre_release.md.tera" -%}
{% if env.PRE_RELEASE == "true" -%}
This pre-release contains the changes from `{{ env.REF1 | replace(from="refs/tags/", to="") }}` to `{{ env.REF2 | replace(from="refs/tags/", to="") }}`.
{%- else -%}
This release contains the changes from `{{ env.REF1 | replace(from="refs/tags/", to="") }}` to `{{ env.REF2 | replace(from="refs/tags/", to="") }}`.
{% endif -%}
{%- set changes = polkadot.changes | concat(with=substrate.changes) -%}
{%- include "debug.md.tera" -%}
{%- set CML = "[C]" -%}
{%- set DOT = "[P]" -%}
{%- set SUB = "[S]" -%}
{# -- Manual free notes section -- #}
{% include "_free_notes.md.tera" -%}
{# -- Important automatic section -- #}
{% include "global_priority.md.tera" -%}
{% include "host_functions.md.tera" -%}
{% include "migrations-db.md.tera" -%}
{% include "migrations-runtime.md.tera" -%}
{# --------------------------------- #}
{% include "compiler.md.tera" -%}
{% include "runtimes.md.tera" -%}
{% include "changes.md.tera" -%}
{% include "docker_image.md.tera" -%}
@@ -1,23 +0,0 @@
# frozen_string_literal: true
require_relative '../lib/changelog'
require 'test/unit'
class TestChangelog < Test::Unit::TestCase
def test_get_dep_ref_polkadot
c = SubRef.new('paritytech/polkadot')
ref = '13c2695'
package = 'sc-cli'
result = c.get_dependency_reference(ref, package)
assert_equal('7db0768a85dc36a3f2a44d042b32f3715c00a90d', result)
end
def test_get_dep_ref_invalid_ref
c = SubRef.new('paritytech/polkadot')
ref = '9999999'
package = 'sc-cli'
assert_raise do
c.get_dependency_reference(ref, package)
end
end
end
-265
View File
@@ -1,265 +0,0 @@
#!/bin/sh
api_base="https://api.github.com/repos"
# Function to take 2 git tags/commits and get any lines from commit messages
# that contain something that looks like a PR reference: e.g., (#1234)
sanitised_git_logs(){
git --no-pager log --pretty=format:"%s" "$1...$2" |
# Only find messages referencing a PR
grep -E '\(#[0-9]+\)' |
# Strip any asterisks
sed 's/^* //g'
}
# Checks whether a tag on github has been verified
# repo: 'organization/repo'
# tagver: 'v1.2.3'
# Usage: check_tag $repo $tagver
check_tag () {
repo=$1
tagver=$2
if [ -n "$GITHUB_RELEASE_TOKEN" ]; then
echo '[+] Fetching tag using privileged token'
tag_out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$repo/git/refs/tags/$tagver")
else
echo '[+] Fetching tag using unprivileged token'
tag_out=$(curl -H "Authorization: token $GITHUB_PR_TOKEN" -s "$api_base/$repo/git/refs/tags/$tagver")
fi
tag_sha=$(echo "$tag_out" | jq -r .object.sha)
object_url=$(echo "$tag_out" | jq -r .object.url)
if [ "$tag_sha" = "null" ]; then
return 2
fi
echo "[+] Tag object SHA: $tag_sha"
verified_str=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$object_url" | jq -r .verification.verified)
if [ "$verified_str" = "true" ]; then
# Verified, everything is good
return 0
else
# Not verified. Bad juju.
return 1
fi
}
# Checks whether a given PR has a given label.
# repo: 'organization/repo'
# pr_id: 12345
# label: B1-silent
# Usage: has_label $repo $pr_id $label
has_label(){
repo="$1"
pr_id="$2"
label="$3"
# These will exist if the function is called in Gitlab.
# If the function's called in Github, we should have GITHUB_ACCESS_TOKEN set
# already.
if [ -n "$GITHUB_RELEASE_TOKEN" ]; then
GITHUB_TOKEN="$GITHUB_RELEASE_TOKEN"
elif [ -n "$GITHUB_PR_TOKEN" ]; then
GITHUB_TOKEN="$GITHUB_PR_TOKEN"
fi
out=$(curl -H "Authorization: token $GITHUB_TOKEN" -s "$api_base/$repo/pulls/$pr_id")
[ -n "$(echo "$out" | tr -d '\r\n' | jq ".labels | .[] | select(.name==\"$label\")")" ]
}
github_label () {
echo
echo "# run github-api job for labeling it ${1}"
curl -sS -X POST \
-F "token=${CI_JOB_TOKEN}" \
-F "ref=master" \
-F "variables[LABEL]=${1}" \
-F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \
-F "variables[PROJECT]=paritytech/polkadot" \
"${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline"
}
# Formats a message into a JSON string for posting to Matrix
# message: 'any plaintext message'
# formatted_message: '<strong>optional message formatted in <em>html</em></strong>'
# Usage: structure_message $content $formatted_content (optional)
structure_message() {
if [ -z "$2" ]; then
body=$(jq -Rs --arg body "$1" '{"msgtype": "m.text", $body}' < /dev/null)
else
body=$(jq -Rs --arg body "$1" --arg formatted_body "$2" '{"msgtype": "m.text", $body, "format": "org.matrix.custom.html", $formatted_body}' < /dev/null)
fi
echo "$body"
}
# Post a message to a matrix room
# body: '{body: "JSON string produced by structure_message"}'
# room_id: !fsfSRjgjBWEWffws:matrix.parity.io
# access_token: see https://matrix.org/docs/guides/client-server-api/
# Usage: send_message $body (json formatted) $room_id $access_token
send_message() {
curl -XPOST -d "$1" "https://m.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3"
}
# Pretty-printing functions
boldprint () { printf "|\n| \033[1m%s\033[0m\n|\n" "${@}"; }
boldcat () { printf "|\n"; while read -r l; do printf "| \033[1m%s\033[0m\n" "${l}"; done; printf "|\n" ; }
skip_if_companion_pr() {
url="https://api.github.com/repos/paritytech/polkadot/pulls/${CI_COMMIT_REF_NAME}"
echo "[+] API URL: $url"
pr_title=$(curl -sSL -H "Authorization: token ${GITHUB_PR_TOKEN}" "$url" | jq -r .title)
echo "[+] PR title: $pr_title"
if echo "$pr_title" | grep -qi '^companion'; then
echo "[!] PR is a companion PR. Build is already done in substrate"
exit 0
else
echo "[+] PR is not a companion PR. Proceeding test"
fi
}
# Fetches the tag name of the latest release from a repository
# repo: 'organisation/repo'
# Usage: latest_release 'paritytech/polkadot'
latest_release() {
curl -s "$api_base/$1/releases/latest" | jq -r '.tag_name'
}
# Check for runtime changes between two commits. This is defined as any changes
# to /primitives/src/* and any *production* chains under /runtime
has_runtime_changes() {
from=$1
to=$2
if git diff --name-only "${from}...${to}" \
| grep -q -e '^runtime/polkadot' -e '^runtime/kusama' -e '^primitives/src/' -e '^runtime/common'
then
return 0
else
return 1
fi
}
# given a bootnode and the path to a chainspec file, this function will create a new chainspec file
# with only the bootnode specified and test whether that bootnode provides peers
# The optional third argument is the index of the bootnode in the list of bootnodes, this is just used to pick an ephemeral
# port for the node to run on. If you're only testing one, it'll just use the first ephemeral port
# BOOTNODE: /dns/polkadot-connect-0.parity.io/tcp/443/wss/p2p/12D3KooWEPmjoRpDSUuiTjvyNDd8fejZ9eNWH5bE965nyBMDrB4o
# CHAINSPEC_FILE: /path/to/polkadot.json
check_bootnode(){
BOOTNODE=$1
BASE_CHAINSPEC=$2
RUNTIME=$(basename "$BASE_CHAINSPEC" | cut -d '.' -f 1)
MIN_PEERS=1
# Generate a temporary chainspec file containing only the bootnode we care about
TMP_CHAINSPEC_FILE="$RUNTIME.$(echo "$BOOTNODE" | tr '/' '_').tmp.json"
jq ".bootNodes = [\"$BOOTNODE\"] " < "$CHAINSPEC_FILE" > "$TMP_CHAINSPEC_FILE"
# Grab an unused port by binding to port 0 and then immediately closing the socket
# This is a bit of a hack, but it's the only way to do it in the shell
RPC_PORT=$(python -c "import socket; s=socket.socket(); s.bind(('', 0)); print(s.getsockname()[1]); s.close()")
echo "[+] Checking bootnode $BOOTNODE"
polkadot --chain "$TMP_CHAINSPEC_FILE" --no-mdns --rpc-port="$RPC_PORT" --tmp > /dev/null 2>&1 &
# Wait a few seconds for the node to start up
sleep 5
POLKADOT_PID=$!
MAX_POLLS=10
TIME_BETWEEN_POLLS=3
for _ in $(seq 1 "$MAX_POLLS"); do
# Check the health endpoint of the RPC node
PEERS="$(curl -s -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"system_health","params":[],"id":1}' http://localhost:"$RPC_PORT" | jq -r '.result.peers')"
# Sometimes due to machine load or other reasons, we don't get a response from the RPC node
# If $PEERS is an empty variable, make it 0 so we can still do the comparison
if [ -z "$PEERS" ]; then
PEERS=0
fi
if [ "$PEERS" -ge $MIN_PEERS ]; then
echo "[+] $PEERS peers found for $BOOTNODE"
echo " Bootnode appears contactable"
kill $POLKADOT_PID
# Delete the temporary chainspec file now we're done running the node
rm "$TMP_CHAINSPEC_FILE"
return 0
fi
sleep "$TIME_BETWEEN_POLLS"
done
kill $POLKADOT_PID
# Delete the temporary chainspec file now we're done running the node
rm "$TMP_CHAINSPEC_FILE"
echo "[!] No peers found for $BOOTNODE"
echo " Bootnode appears unreachable"
return 1
}
# Assumes the ENV are set:
# - RELEASE_ID
# - GITHUB_TOKEN
# - REPO in the form paritytech/polkadot
fetch_release_artifacts() {
echo "Release ID : $RELEASE_ID"
echo "Repo : $REPO"
curl -L -s \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${GITHUB_TOKEN}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
https://api.github.com/repos/${REPO}/releases/${RELEASE_ID} > release.json
# Get Asset ids
ids=($(jq -r '.assets[].id' < release.json ))
count=$(jq '.assets|length' < release.json )
# Fetch artifacts
mkdir -p "./release-artifacts"
pushd "./release-artifacts" > /dev/null
iter=1
for id in "${ids[@]}"
do
echo " - $iter/$count: downloading asset id: $id..."
curl -s -OJ -L -H "Accept: application/octet-stream" \
-H "Authorization: Token ${GITHUB_TOKEN}" \
"https://api.github.com/repos/${REPO}/releases/assets/$id"
iter=$((iter + 1))
done
pwd
ls -al --color
popd > /dev/null
}
# Check the checksum for a given binary
function check_sha256() {
echo "Checking SHA256 for $1"
shasum -qc $1.sha256
}
# Import GPG keys of the release team members
# This is done in parallel as it can take a while sometimes
function import_gpg_keys() {
GPG_KEYSERVER=${GPG_KEYSERVER:-"keyserver.ubuntu.com"}
SEC="9D4B2B6EB8F97156D19669A9FF0812D491B96798"
WILL="2835EAF92072BC01D188AF2C4A092B93E97CE1E2"
EGOR="E6FC4D4782EB0FA64A4903CCDB7D3555DD3932D3"
MARA="533C920F40E73A21EEB7E9EBF27AEA7E7594C9CF"
MORGAN="2E92A9D8B15D7891363D1AE8AF9E6C43F7F8C4CF"
echo "Importing GPG keys from $GPG_KEYSERVER in parallel"
for key in $SEC $WILL $EGOR $MARA $MORGAN; do
(
echo "Importing GPG key $key"
gpg --no-tty --quiet --keyserver $GPG_KEYSERVER --recv-keys $key
echo -e "5\ny\n" | gpg --no-tty --command-fd 0 --expert --edit-key $key trust;
) &
done
wait
}
# Check the GPG signature for a given binary
function check_gpg() {
echo "Checking GPG Signature for $1"
gpg --no-tty --verify -q $1.asc $1
}
@@ -1,13 +0,0 @@
#!/usr/bin/env bash
# Sample call:
# $0 /path/to/folder_with_binary
# This script replace the former dedicated Dockerfile
# and shows how to use the generic binary_injected.dockerfile
PROJECT_ROOT=`git rev-parse --show-toplevel`
export BINARY=adder-collator,undying-collator
export BIN_FOLDER=$1
$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh
@@ -1,23 +0,0 @@
#!/usr/bin/env bash
TMP=$(mktemp -d)
ENGINE=${ENGINE:-podman}
# TODO: Switch to /bin/bash when the image is built from parity/base-bin
# Fetch some binaries
$ENGINE run --user root --rm -i \
--pull always \
-v "$TMP:/export" \
--entrypoint /usr/bin/bash \
paritypr/colander:master -c \
'cp "$(which adder-collator)" /export'
$ENGINE run --user root --rm -i \
--pull always \
-v "$TMP:/export" \
--entrypoint /usr/bin/bash \
paritypr/colander:master -c \
'cp "$(which undying-collator)" /export'
./build-injected.sh $TMP
@@ -1,48 +0,0 @@
FROM docker.io/parity/base-bin
# This file allows building a Generic container image
# based on one or multiple pre-built Linux binaries.
# Some defaults are set to polkadot but all can be overriden.
SHELL ["/bin/bash", "-c"]
# metadata
ARG VCS_REF
ARG BUILD_DATE
ARG IMAGE_NAME
# That can be a single one or a comma separated list
ARG BINARY=polkadot
ARG BIN_FOLDER=.
ARG DOC_URL=https://github.com/paritytech/polkadot
ARG DESCRIPTION="Polkadot: a platform for web3"
ARG AUTHORS="devops-team@parity.io"
ARG VENDOR="Parity Technologies"
LABEL io.parity.image.authors=${AUTHORS} \
io.parity.image.vendor="${VENDOR}" \
io.parity.image.revision="${VCS_REF}" \
io.parity.image.title="${IMAGE_NAME}" \
io.parity.image.created="${BUILD_DATE}" \
io.parity.image.documentation="${DOC_URL}" \
io.parity.image.description="${DESCRIPTION}" \
io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/binary_injected.Dockerfile"
USER root
WORKDIR /app
# add polkadot binary to docker image
# sample for polkadot: COPY ./polkadot ./polkadot-*-worker /usr/local/bin/
COPY entrypoint.sh .
COPY "bin/*" "/usr/local/bin/"
RUN chmod -R a+rx "/usr/local/bin"
USER parity
ENV BINARY=${BINARY}
# ENTRYPOINT
ENTRYPOINT ["/app/entrypoint.sh"]
# We call the help by default
CMD ["--help"]
@@ -1,92 +0,0 @@
#!/usr/bin/env bash
set -e
# This script allows building a Container Image from a Linux
# binary that is injected into a base-image.
ENGINE=${ENGINE:-podman}
if [ "$ENGINE" == "podman" ]; then
PODMAN_FLAGS="--format docker"
else
PODMAN_FLAGS=""
fi
CONTEXT=$(mktemp -d)
REGISTRY=${REGISTRY:-docker.io}
# The following line ensure we know the project root
PROJECT_ROOT=${PROJECT_ROOT:-$(git rev-parse --show-toplevel)}
DOCKERFILE=${DOCKERFILE:-$PROJECT_ROOT/scripts/ci/dockerfiles/binary_injected.Dockerfile}
VERSION_TOML=$(grep "^version " $PROJECT_ROOT/Cargo.toml | grep -oE "([0-9\.]+-?[0-9]+)")
#n The following VAR have default that can be overriden
DOCKER_OWNER=${DOCKER_OWNER:-parity}
# We may get 1..n binaries, comma separated
BINARY=${BINARY:-polkadot}
IFS=',' read -r -a BINARIES <<< "$BINARY"
VERSION=${VERSION:-$VERSION_TOML}
BIN_FOLDER=${BIN_FOLDER:-.}
IMAGE=${IMAGE:-${REGISTRY}/${DOCKER_OWNER}/${BINARIES[0]}}
DESCRIPTION_DEFAULT="Injected Container image built for ${BINARY}"
DESCRIPTION=${DESCRIPTION:-$DESCRIPTION_DEFAULT}
VCS_REF=${VCS_REF:-01234567}
# Build the image
echo "Using engine: $ENGINE"
echo "Using Dockerfile: $DOCKERFILE"
echo "Using context: $CONTEXT"
echo "Building ${IMAGE}:latest container image for ${BINARY} v${VERSION} from ${BIN_FOLDER} hang on!"
echo "BIN_FOLDER=$BIN_FOLDER"
echo "CONTEXT=$CONTEXT"
# We need all binaries and resources available in the Container build "CONTEXT"
mkdir -p $CONTEXT/bin
for bin in "${BINARIES[@]}"
do
echo "Copying $BIN_FOLDER/$bin to context: $CONTEXT/bin"
cp "$BIN_FOLDER/$bin" "$CONTEXT/bin"
done
cp "$PROJECT_ROOT/scripts/ci/dockerfiles/entrypoint.sh" "$CONTEXT"
echo "Building image: ${IMAGE}"
TAGS=${TAGS[@]:-latest}
IFS=',' read -r -a TAG_ARRAY <<< "$TAGS"
TAG_ARGS=" "
echo "The image ${IMAGE} will be tagged with ${TAG_ARRAY[*]}"
for tag in "${TAG_ARRAY[@]}"; do
TAG_ARGS+="--tag ${IMAGE}:${tag} "
done
echo "$TAG_ARGS"
# time \
$ENGINE build \
${PODMAN_FLAGS} \
--build-arg VCS_REF="${VCS_REF}" \
--build-arg BUILD_DATE=$(date -u '+%Y-%m-%dT%H:%M:%SZ') \
--build-arg IMAGE_NAME="${IMAGE}" \
--build-arg BINARY="${BINARY}" \
--build-arg BIN_FOLDER="${BIN_FOLDER}" \
--build-arg DESCRIPTION="${DESCRIPTION}" \
${TAG_ARGS} \
-f "${DOCKERFILE}" \
${CONTEXT}
echo "Your Container image for ${IMAGE} is ready"
$ENGINE images
if [[ -z "${SKIP_IMAGE_VALIDATION}" ]]; then
echo "Check the image ${IMAGE}:${TAG_ARRAY[0]}"
$ENGINE run --rm -i "${IMAGE}:${TAG_ARRAY[0]}" --version
echo "Query binaries"
$ENGINE run --rm -i --entrypoint /bin/bash "${IMAGE}:${TAG_ARRAY[0]}" -c 'echo BINARY: $BINARY'
fi
@@ -1,18 +0,0 @@
#!/usr/bin/env bash
# Sanity check
if [ -z "$BINARY" ]
then
echo "BINARY ENV not defined, this should never be the case. Aborting..."
exit 1
fi
# If the user built the image with multiple binaries,
# we consider the first one to be the canonical one
# To start with another binary, the user can either:
# - use the --entrypoint option
# - pass the ENV BINARY with a single binary
IFS=',' read -r -a BINARIES <<< "$BINARY"
BIN0=${BINARIES[0]}
echo "Starting binary $BIN0"
$BIN0 $@
@@ -1,14 +0,0 @@
#!/usr/bin/env bash
# Sample call:
# $0 /path/to/folder_with_binary
# This script replace the former dedicated Dockerfile
# and shows how to use the generic binary_injected.dockerfile
PROJECT_ROOT=`git rev-parse --show-toplevel`
export BINARY=malus,polkadot-execute-worker,polkadot-prepare-worker
export BIN_FOLDER=$1
# export TAGS=...
$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh
@@ -1,19 +0,0 @@
#!/usr/bin/env bash
TMP=$(mktemp -d)
ENGINE=${ENGINE:-podman}
export TAGS=latest,beta,7777,1.0.2-rc23
# Fetch some binaries
$ENGINE run --user root --rm -i \
--pull always \
-v "$TMP:/export" \
--entrypoint /bin/bash \
paritypr/malus:7217 -c \
'cp "$(which malus)" /export'
echo "Checking binaries we got:"
ls -al $TMP
./build-injected.sh $TMP
@@ -1,9 +0,0 @@
# Self built Docker image
The Polkadot repo contains several options to build Docker images for Polkadot.
This folder contains a self-contained image that does not require a Linux pre-built binary.
Instead, building the image is possible on any host having docker installed and will
build Polkadot inside Docker. That also means that no Rust toolchain is required on the host
machine for the build to succeed.
@@ -1,13 +0,0 @@
#!/usr/bin/env bash
# Sample call:
# $0 /path/to/folder_with_binary
# This script replace the former dedicated Dockerfile
# and shows how to use the generic binary_injected.dockerfile
PROJECT_ROOT=`git rev-parse --show-toplevel`
export BINARY=polkadot,polkadot-execute-worker,polkadot-prepare-worker
export BIN_FOLDER=$1
$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh
@@ -1,50 +0,0 @@
version: '3'
services:
node_alice:
ports:
- "30333:30333"
- "9933:9933"
- "9944:9944"
- "9615:9615"
image: parity/polkadot:latest
volumes:
- "polkadot-data-alice:/data"
command: |
--chain=polkadot-local
--alice
-d /data
--node-key 0000000000000000000000000000000000000000000000000000000000000001
networks:
testing_net:
ipv4_address: 172.28.1.1
node_bob:
ports:
- "30344:30333"
- "9935:9933"
- "9945:9944"
- "29615:9615"
image: parity/polkadot:latest
volumes:
- "polkadot-data-bob:/data"
links:
- "node_alice:alice"
command: |
--chain=polkadot-local
--bob
-d /data
--bootnodes '/ip4/172.28.1.1/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR'
networks:
testing_net:
ipv4_address: 172.28.1.2
volumes:
polkadot-data-alice:
polkadot-data-bob:
networks:
testing_net:
ipam:
driver: default
config:
- subnet: 172.28.0.0/16
@@ -1,22 +0,0 @@
version: '3'
services:
polkadot:
image: parity/polkadot:latest
ports:
- "127.0.0.1:30333:30333/tcp"
- "127.0.0.1:9933:9933/tcp"
- "127.0.0.1:9944:9944/tcp"
- "127.0.0.1:9615:9615/tcp"
volumes:
- "polkadot-data:/data"
command: |
--unsafe-rpc-external
--unsafe-ws-external
--rpc-cors all
--prometheus-external
volumes:
polkadot-data:
@@ -1,7 +0,0 @@
# Polkadot official Docker image
## [Polkadot](https://polkadot.network/)
## [GitHub](https://github.com/paritytech/polkadot)
## [Polkadot Wiki](https://wiki.polkadot.network/)
@@ -1,36 +0,0 @@
# This is the build stage for Polkadot. Here we create the binary in a temporary image.
FROM docker.io/paritytech/ci-linux:production as builder
WORKDIR /polkadot
COPY . /polkadot
RUN cargo build --locked --release
# This is the 2nd stage: a very small image where we copy the Polkadot binary."
FROM docker.io/parity/base-bin:latest
LABEL description="Multistage Docker image for Polkadot: a platform for web3" \
io.parity.image.type="builder" \
io.parity.image.authors="chevdor@gmail.com, devops-team@parity.io" \
io.parity.image.vendor="Parity Technologies" \
io.parity.image.description="Polkadot: a platform for web3" \
io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile" \
io.parity.image.documentation="https://github.com/paritytech/polkadot/"
COPY --from=builder /polkadot/target/release/polkadot /usr/local/bin
RUN useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \
mkdir -p /data /polkadot/.local/share && \
chown -R polkadot:polkadot /data && \
ln -s /data /polkadot/.local/share/polkadot && \
# unclutter and minimize the attack surface
rm -rf /usr/bin /usr/sbin && \
# check if executable works in this container
/usr/local/bin/polkadot --version
USER polkadot
EXPOSE 30333 9933 9944 9615
VOLUME ["/data"]
ENTRYPOINT ["/usr/local/bin/polkadot"]
@@ -1,53 +0,0 @@
FROM docker.io/library/ubuntu:20.04
# metadata
ARG VCS_REF
ARG BUILD_DATE
ARG POLKADOT_VERSION
ARG POLKADOT_GPGKEY=9D4B2B6EB8F97156D19669A9FF0812D491B96798
ARG GPG_KEYSERVER="keyserver.ubuntu.com"
LABEL io.parity.image.authors="devops-team@parity.io" \
io.parity.image.vendor="Parity Technologies" \
io.parity.image.title="parity/polkadot" \
io.parity.image.description="Polkadot: a platform for web3. This is the official Parity image with an injected binary." \
io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile" \
io.parity.image.revision="${VCS_REF}" \
io.parity.image.created="${BUILD_DATE}" \
io.parity.image.documentation="https://github.com/paritytech/polkadot/"
# show backtraces
ENV RUST_BACKTRACE 1
# install tools and dependencies
RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
libssl1.1 \
ca-certificates \
gnupg && \
useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \
# add repo's gpg keys and install the published polkadot binary
gpg --keyserver ${GPG_KEYSERVER} --recv-keys ${POLKADOT_GPGKEY} && \
gpg --export ${POLKADOT_GPGKEY} > /usr/share/keyrings/parity.gpg && \
echo 'deb [signed-by=/usr/share/keyrings/parity.gpg] https://releases.parity.io/deb release main' > /etc/apt/sources.list.d/parity.list && \
apt-get update && \
apt-get install -y --no-install-recommends polkadot=${POLKADOT_VERSION#?} && \
# apt cleanup
apt-get autoremove -y && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* ; \
mkdir -p /data /polkadot/.local/share && \
chown -R polkadot:polkadot /data && \
ln -s /data /polkadot/.local/share/polkadot
USER polkadot
# check if executable works in this container
RUN /usr/bin/polkadot --version
RUN /usr/bin/polkadot-execute-worker --version
RUN /usr/bin/polkadot-prepare-worker --version
EXPOSE 30333 9933 9944
VOLUME ["/polkadot"]
ENTRYPOINT ["/usr/bin/polkadot"]
@@ -1,18 +0,0 @@
#!/usr/bin/env bash
TMP=$(mktemp -d)
ENGINE=${ENGINE:-podman}
# You need to build an injected image first
# Fetch some binaries
$ENGINE run --user root --rm -i \
-v "$TMP:/export" \
--entrypoint /bin/bash \
parity/polkadot -c \
'cp "$(which polkadot)" /export'
echo "Checking binaries we got:"
tree $TMP
./build-injected.sh $TMP
@@ -1,37 +0,0 @@
# staking-miner container image
## Build using the Builder
```
./build.sh
```
## Build the injected Image
You first need a valid Linux binary to inject. Let's assume this binary is located in `BIN_FOLDER`.
```
./build-injected.sh "$BIN_FOLDER"
```
## Test
Here is how to test the image. We can generate a valid seed but the staking-miner will quickly notice that our
account is not funded and "does not exist".
You may pass any ENV supported by the binary and must provide at least a few such as `SEED` and `URI`:
```
ENV SEED=""
ENV URI="wss://rpc.polkadot.io:443"
ENV RUST_LOG="info"
```
```
export SEED=$(subkey generate -n polkadot --output-type json | jq -r .secretSeed)
podman run --rm -it \
-e URI="wss://rpc.polkadot.io:443" \
-e RUST_LOG="info" \
-e SEED \
localhost/parity/staking-miner \
dry-run seq-phragmen
```
@@ -1,13 +0,0 @@
#!/usr/bin/env bash
# Sample call:
# $0 /path/to/folder_with_staking-miner_binary
# This script replace the former dedicated staking-miner "injected" Dockerfile
# and shows how to use the generic binary_injected.dockerfile
PROJECT_ROOT=`git rev-parse --show-toplevel`
export BINARY=staking-miner
export BIN_FOLDER=$1
$PROJECT_ROOT/scripts/ci/dockerfiles/build-injected.sh
@@ -1,13 +0,0 @@
#!/usr/bin/env bash
# Sample call:
# $0 /path/to/folder_with_staking-miner_binary
# This script replace the former dedicated staking-miner "injected" Dockerfile
# and shows how to use the generic binary_injected.dockerfile
PROJECT_ROOT=`git rev-parse --show-toplevel`
ENGINE=podman
echo "Building the staking-miner using the Builder image"
echo "PROJECT_ROOT=$PROJECT_ROOT"
$ENGINE build -t staking-miner -f staking-miner_builder.Dockerfile "$PROJECT_ROOT"
@@ -1,3 +0,0 @@
# Staking-miner Docker image
## [GitHub](https://github.com/paritytech/polkadot/tree/master/utils/staking-miner)
@@ -1,43 +0,0 @@
FROM paritytech/ci-linux:production as builder
# metadata
ARG VCS_REF
ARG BUILD_DATE
ARG IMAGE_NAME="staking-miner"
ARG PROFILE=production
LABEL description="This is the build stage. Here we create the binary."
WORKDIR /app
COPY . /app
RUN cargo build --locked --profile $PROFILE --package staking-miner
# ===== SECOND STAGE ======
FROM docker.io/parity/base-bin:latest
LABEL description="This is the 2nd stage: a very small image where we copy the binary."
LABEL io.parity.image.authors="devops-team@parity.io" \
io.parity.image.vendor="Parity Technologies" \
io.parity.image.title="${IMAGE_NAME}" \
io.parity.image.description="${IMAGE_NAME} for substrate based chains" \
io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/scripts/ci/dockerfiles/${IMAGE_NAME}/${IMAGE_NAME}_builder.Dockerfile" \
io.parity.image.revision="${VCS_REF}" \
io.parity.image.created="${BUILD_DATE}" \
io.parity.image.documentation="https://github.com/paritytech/polkadot/"
ARG PROFILE=release
COPY --from=builder /app/target/$PROFILE/staking-miner /usr/local/bin
# show backtraces
ENV RUST_BACKTRACE 1
USER parity
ENV SEED=""
ENV URI="wss://rpc.polkadot.io"
ENV RUST_LOG="info"
# check if the binary works in this container
RUN /usr/local/bin/staking-miner --version
ENTRYPOINT [ "/usr/local/bin/staking-miner" ]
@@ -1,18 +0,0 @@
#!/usr/bin/env bash
TMP=$(mktemp -d)
ENGINE=${ENGINE:-podman}
# You need to build an injected image first
# Fetch some binaries
$ENGINE run --user root --rm -i \
-v "$TMP:/export" \
--entrypoint /bin/bash \
parity/staking-miner -c \
'cp "$(which staking-miner)" /export'
echo "Checking binaries we got:"
tree $TMP
./build-injected.sh $TMP
-127
View File
@@ -1,127 +0,0 @@
#!/usr/bin/env bash
# This script helps running sanity checks on a release branch
# It is intended to be ran from the repo and from the release branch
# NOTE: The diener runs do take time and are not really required because
# if we missed the diener runs, the Cargo.lock that we check won't pass
# the tests. See https://github.com/bkchr/diener/issues/17
grv=$(git remote --verbose | grep push)
export RUST_LOG=none
REPO=$(echo "$grv" | cut -d ' ' -f1 | cut -d$'\t' -f2 | sed 's/.*github.com\/\(.*\)/\1/g' | cut -d '/' -f2 | cut -d '.' -f1 | sort | uniq)
echo "[+] Detected repo: $REPO"
BRANCH=$(git branch --show-current)
if ! [[ "$BRANCH" =~ ^release.*$ || "$BRANCH" =~ ^polkadot.*$ ]]; then
echo "This script is meant to run only on a RELEASE branch."
echo "Try one of the following branch:"
git branch -r --format "%(refname:short)" --sort=-committerdate | grep -Ei '/?release' | head
exit 1
fi
echo "[+] Working on $BRANCH"
# Tried to get the version of the release from the branch
# input: release-foo-v0.9.22 or release-bar-v9220 or release-foo-v0.9.220
# output: 0.9.22
get_version() {
branch=$1
[[ $branch =~ -v(.*) ]]
version=${BASH_REMATCH[1]}
if [[ $version =~ \. ]]; then
MAJOR=$(($(echo $version | cut -d '.' -f1)))
MINOR=$(($(echo $version | cut -d '.' -f2)))
PATCH=$(($(echo $version | cut -d '.' -f3)))
echo $MAJOR.$MINOR.${PATCH:0:2}
else
MAJOR=$(echo $(($version / 100000)))
remainer=$(($version - $MAJOR * 100000))
MINOR=$(echo $(($remainer / 1000)))
remainer=$(($remainer - $MINOR * 1000))
PATCH=$(echo $(($remainer / 10)))
echo $MAJOR.$MINOR.$PATCH
fi
}
# return the name of the release branch for a given repo and version
get_release_branch() {
repo=$1
version=$2
case $repo in
polkadot)
echo "release-v$version"
;;
substrate)
echo "polkadot-v$version"
;;
*)
echo "Repo $repo is not supported, exiting"
exit 1
;;
esac
}
# repo = substrate / polkadot
check_release_branch_repo() {
repo=$1
branch=$2
echo "[+] Checking deps for $repo=$branch"
POSTIVE=$(cat Cargo.lock | grep "$repo?branch=$branch" | sort | uniq | wc -l)
NEGATIVE=$(cat Cargo.lock | grep "$repo?branch=" | grep -v $branch | sort | uniq | wc -l)
if [[ $POSTIVE -eq 1 && $NEGATIVE -eq 0 ]]; then
echo -e "[+] ✅ Looking good"
cat Cargo.lock | grep "$repo?branch=" | sort | uniq | sed 's/^/\t - /'
return 0
else
echo -e "[+] ❌ Something seems to be wrong, we want 1 unique match and 0 non match (1, 0) and we got ($(($POSTIVE)), $(($NEGATIVE)))"
cat Cargo.lock | grep "$repo?branch=" | sort | uniq | sed 's/^/\t - /'
return 1
fi
}
# Check a release branch
check_release_branches() {
SUBSTRATE_BRANCH=$1
POLKADOT_BRANCH=$2
check_release_branch_repo substrate $SUBSTRATE_BRANCH
ret_a1=$?
ret_b1=0
if [ $POLKADOT_BRANCH ]; then
check_release_branch_repo polkadot $POLKADOT_BRANCH
ret_b1=$?
fi
STATUS=$(($ret_a1 + $ret_b1))
return $STATUS
}
VERSION=$(get_version $BRANCH)
echo "[+] Target version: v$VERSION"
case $REPO in
polkadot)
substrate=$(get_release_branch substrate $VERSION)
check_release_branches $substrate
;;
cumulus)
polkadot=$(get_release_branch polkadot $VERSION)
substrate=$(get_release_branch substrate $VERSION)
check_release_branches $substrate $polkadot
;;
*)
echo "REPO $REPO is not supported, exiting"
exit 1
;;
esac
@@ -1,71 +0,0 @@
#!/usr/bin/env bash
# In this script, we check each bootnode for a given chainspec file and ensure they are contactable.
# We do this by removing every bootnode from the chainspec with the exception of the one
# we want to check. Then we spin up a node using this new chainspec, wait a little while
# and then check our local node's RPC endpoint for the number of peers. If the node hasn't
# been able to contact any other nodes, we can reason that the bootnode we used is not well-connected
# or is otherwise uncontactable.
# shellcheck source=scripts/ci/common/lib.sh
source "$(dirname "${0}")/../common/lib.sh"
CHAINSPEC_FILE="$1"
RUNTIME=$(basename "$CHAINSPEC_FILE" | cut -d '.' -f 1)
trap cleanup EXIT INT TERM
cleanup(){
echo "[+] Script interrupted or ended. Cleaning up..."
# Kill all the polkadot processes
killall polkadot > /dev/null 2>&1
exit $1
}
# count the number of bootnodes
BOOTNODES=$( jq -r '.bootNodes | length' "$CHAINSPEC_FILE" )
# Make a temporary dir for chainspec files
# Store an array of the bad bootnodes
BAD_BOOTNODES=()
GOOD_BOOTNODES=()
PIDS=()
echo "[+] Checking $BOOTNODES bootnodes for $RUNTIME"
for i in $(seq 0 $((BOOTNODES-1))); do
BOOTNODE=$( jq -r .bootNodes["$i"] < "$CHAINSPEC_FILE" )
# Check each bootnode in parallel
check_bootnode "$BOOTNODE" "$CHAINSPEC_FILE" &
PIDS+=($!)
# Hold off 5 seconds between attempting to spawn nodes to stop the machine from getting overloaded
sleep 5
done
RESPS=()
# Wait for all the nodes to finish
for pid in "${PIDS[@]}"; do
wait "$pid"
RESPS+=($?)
done
echo
# For any bootnodes that failed, add them to the bad bootnodes array
for i in "${!RESPS[@]}"; do
if [ "${RESPS[$i]}" -ne 0 ]; then
BAD_BOOTNODES+=("$( jq -r .bootNodes["$i"] < "$CHAINSPEC_FILE" )")
fi
done
# For any bootnodes that succeeded, add them to the good bootnodes array
for i in "${!RESPS[@]}"; do
if [ "${RESPS[$i]}" -eq 0 ]; then
GOOD_BOOTNODES+=("$( jq -r .bootNodes["$i"] < "$CHAINSPEC_FILE" )")
fi
done
# If we've got any uncontactable bootnodes for this runtime, print them
if [ ${#BAD_BOOTNODES[@]} -gt 0 ]; then
echo "[!] Bad bootnodes found for $RUNTIME:"
for i in "${BAD_BOOTNODES[@]}"; do
echo " $i"
done
cleanup 1
else
echo "[+] All bootnodes for $RUNTIME are contactable"
cleanup 0
fi
@@ -1,75 +0,0 @@
#!/usr/bin/env bash
#shellcheck source=../common/lib.sh
source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh"
repo="$GITHUB_REPOSITORY"
pr="$GITHUB_PR"
ensure_labels() {
for label in "$@"; do
if has_label "$repo" "$pr" "$label"; then
return 0
fi
done
return 1
}
# Must have one of the following labels
releasenotes_labels=(
'B0-silent'
'B1-releasenotes'
'B7-runtimenoteworthy'
)
# Must be an ordered list of priorities, lowest first
priority_labels=(
'C1-low 📌'
'C3-medium 📣'
'C7-high ❗️'
'C9-critical ‼️'
)
audit_labels=(
'D1-audited 👍'
'D2-notlive 💤'
'D3-trivial 🧸'
'D5-nicetohaveaudit ⚠️'
'D9-needsaudit 👮'
)
echo "[+] Checking release notes (B) labels for $CI_COMMIT_BRANCH"
if ensure_labels "${releasenotes_labels[@]}"; then
echo "[+] Release notes label detected. All is well."
else
echo "[!] Release notes label not detected. Please add one of: ${releasenotes_labels[*]}"
exit 1
fi
echo "[+] Checking release priority (C) labels for $CI_COMMIT_BRANCH"
if ensure_labels "${priority_labels[@]}"; then
echo "[+] Release priority label detected. All is well."
else
echo "[!] Release priority label not detected. Please add one of: ${priority_labels[*]}"
exit 1
fi
if has_runtime_changes "${BASE_SHA}" "${HEAD_SHA}"; then
echo "[+] Runtime changes detected. Checking audit (D) labels"
if ensure_labels "${audit_labels[@]}"; then
echo "[+] Release audit label detected. All is well."
else
echo "[!] Release audit label not detected. Please add one of: ${audit_labels[*]}"
exit 1
fi
fi
# If the priority is anything other than the lowest, we *must not* have a B0-silent
# label
if has_label "$repo" "$GITHUB_PR" 'B0-silent' &&
! has_label "$repo" "$GITHUB_PR" "${priority_labels[0]}"; then
echo "[!] Changes with a priority higher than C1-low *MUST* have a B- label that is not B0-Silent"
exit 1
fi
exit 0
@@ -1,42 +0,0 @@
#!/bin/bash
set -e
# shellcheck source=scripts/ci/common/lib.sh
source "$(dirname "${0}")/../common/lib.sh"
# This script checks any new bootnodes added since the last git commit
RUNTIMES=( kusama westend polkadot )
WAS_ERROR=0
for RUNTIME in "${RUNTIMES[@]}"; do
CHAINSPEC_FILE="node/service/chain-specs/$RUNTIME.json"
# Get the bootnodes from master's chainspec
git show origin/master:"$CHAINSPEC_FILE" | jq '{"oldNodes": .bootNodes}' > "$RUNTIME-old-bootnodes.json"
# Get the bootnodes from the current branch's chainspec
git show HEAD:"$CHAINSPEC_FILE" | jq '{"newNodes": .bootNodes}' > "$RUNTIME-new-bootnodes.json"
# Make a chainspec containing only the new bootnodes
jq ".bootNodes = $(jq -rs '.[0] * .[1] | .newNodes-.oldNodes' \
"$RUNTIME-new-bootnodes.json" "$RUNTIME-old-bootnodes.json")" \
< "node/service/chain-specs/$RUNTIME.json" \
> "$RUNTIME-new-chainspec.json"
# exit early if the new chainspec has no bootnodes
if [ "$(jq -r '.bootNodes | length' "$RUNTIME-new-chainspec.json")" -eq 0 ]; then
echo "[+] No new bootnodes for $RUNTIME"
# Clean up the temporary files
rm "$RUNTIME-new-chainspec.json" "$RUNTIME-old-bootnodes.json" "$RUNTIME-new-bootnodes.json"
continue
fi
# Check the new bootnodes
if ! "scripts/ci/github/check_bootnodes.sh" "$RUNTIME-new-chainspec.json"; then
WAS_ERROR=1
fi
# Clean up the temporary files
rm "$RUNTIME-new-chainspec.json" "$RUNTIME-old-bootnodes.json" "$RUNTIME-new-bootnodes.json"
done
if [ $WAS_ERROR -eq 1 ]; then
echo "[!] One of the new bootnodes failed to connect. Please check logs above."
exit 1
fi
@@ -1,20 +0,0 @@
#!/usr/bin/env bash
# Need to set globstar for ** magic
shopt -s globstar
RUNTIME=$1
VERSION=$2
echo "<details>"
echo "<summary>Weight changes for $RUNTIME</summary>"
echo
swc compare commits \
--method asymptotic \
--offline \
--path-pattern "./runtime/$RUNTIME/src/weights/**/*.rs" \
--no-color \
--format markdown \
--strip-path-prefix "runtime/$RUNTIME/src/weights/" \
"$VERSION"
#--ignore-errors
echo
echo "</details>"
@@ -1,55 +0,0 @@
#!/usr/bin/env bash
# This script is used in a Github Workflow. It helps filtering out what is interesting
# when comparing metadata and spot what would require a tx version bump.
# shellcheck disable=SC2002,SC2086
FILE=$1
# Higlight indexes that were deleted
function find_deletions() {
echo "\n## Deletions\n"
RES=$(cat "$FILE" | grep -n '\[\-\]' | tr -s " ")
if [ "$RES" ]; then
echo "$RES" | awk '{ printf "%s\\n", $0 }'
else
echo "n/a"
fi
}
# Highlight indexes that have been deleted
function find_index_changes() {
echo "\n## Index changes\n"
RES=$(cat "$FILE" | grep -E -n -i 'idx:\s*([0-9]+)\s*(->)\s*([0-9]+)' | tr -s " ")
if [ "$RES" ]; then
echo "$RES" | awk '{ printf "%s\\n", $0 }'
else
echo "n/a"
fi
}
# Highlight values that decreased
function find_decreases() {
echo "\n## Decreases\n"
OUT=$(cat "$FILE" | grep -E -i -o '([0-9]+)\s*(->)\s*([0-9]+)' | awk '$1 > $3 { printf "%s;", $0 }')
IFS=$';' LIST=("$OUT")
unset RES
for line in "${LIST[@]}"; do
RES="$RES\n$(cat "$FILE" | grep -E -i -n \"$line\" | tr -s " ")"
done
if [ "$RES" ]; then
echo "$RES" | awk '{ printf "%s\\n", $0 }' | sort -u -g | uniq
else
echo "n/a"
fi
}
echo "\n------------------------------ SUMMARY -------------------------------"
echo "\n⚠️ This filter is here to help spotting changes that should be reviewed carefully."
echo "\n⚠️ It catches only index changes, deletions and value decreases".
find_deletions "$FILE"
find_index_changes "$FILE"
find_decreases "$FILE"
echo "\n----------------------------------------------------------------------\n"
@@ -1,148 +0,0 @@
# frozen_string_literal: true
require 'base64'
require 'changelogerator'
require 'erb'
require 'git'
require 'json'
require 'octokit'
require 'toml'
require_relative './lib.rb'
# A logger only active when NOT running in CI
def logger(s)
puts "▶ DEBUG: %s" % [s] if ENV['CI'] != 'true'
end
# Check if all the required ENV are set
# This is especially convenient when testing locally
def check_env()
if ENV['CI'] != 'true' then
logger("Running locally")
vars = ['GITHUB_REF', 'GITHUB_TOKEN', 'GITHUB_WORKSPACE', 'GITHUB_REPOSITORY', 'RUSTC_STABLE', 'RUSTC_NIGHTLY']
vars.each { |x|
env = (ENV[x] || "")
if env.length > 0 then
logger("- %s:\tset: %s, len: %d" % [x, env.length > 0 || false, env.length])
else
logger("- %s:\tset: %s, len: %d" % [x, env.length > 0 || false, env.length])
end
}
end
end
check_env()
current_ref = ENV['GITHUB_REF']
token = ENV['GITHUB_TOKEN']
logger("Connecting to Github")
github_client = Octokit::Client.new(
access_token: token
)
polkadot_path = ENV['GITHUB_WORKSPACE'] + '/polkadot/'
# Generate an ERB renderer based on the template .erb file
renderer = ERB.new(
File.read(File.join(polkadot_path, 'scripts/ci/github/polkadot_release.erb')),
trim_mode: '<>'
)
# get ref of last polkadot release
last_ref = 'refs/tags/' + github_client.latest_release(ENV['GITHUB_REPOSITORY']).tag_name
logger("Last ref: " + last_ref)
logger("Generate changelog for Polkadot")
polkadot_cl = Changelog.new(
'paritytech/polkadot', last_ref, current_ref, token: token
)
# Gets the substrate commit hash used for a given polkadot ref
def get_substrate_commit(client, ref)
cargo = TOML::Parser.new(
Base64.decode64(
client.contents(
ENV['GITHUB_REPOSITORY'],
path: 'Cargo.lock',
query: { ref: ref.to_s }
).content
)
).parsed
cargo['package'].find { |p| p['name'] == 'sc-cli' }['source'].split('#').last
end
substrate_prev_sha = get_substrate_commit(github_client, last_ref)
substrate_cur_sha = get_substrate_commit(github_client, current_ref)
logger("Generate changelog for Substrate")
substrate_cl = Changelog.new(
'paritytech/substrate', substrate_prev_sha, substrate_cur_sha,
token: token,
prefix: true
)
# Combine all changes into a single array and filter out companions
all_changes = (polkadot_cl.changes + substrate_cl.changes).reject do |c|
c[:title] =~ /[Cc]ompanion/
end
# Set all the variables needed for a release
misc_changes = Changelog.changes_with_label(all_changes, 'B1-releasenotes')
client_changes = Changelog.changes_with_label(all_changes, 'B5-clientnoteworthy')
runtime_changes = Changelog.changes_with_label(all_changes, 'B7-runtimenoteworthy')
# Add the audit status for runtime changes
runtime_changes.each do |c|
if c[:labels].any? { |l| l[:name] == 'D1-audited 👍' }
c[:pretty_title] = "✅ `audited` #{c[:pretty_title]}"
next
end
if c[:labels].any? { |l| l[:name] == 'D2-notlive 💤' }
c[:pretty_title] = "✅ `not live` #{c[:pretty_title]}"
next
end
if c[:labels].any? { |l| l[:name] == 'D3-trivial 🧸' }
c[:pretty_title] = "✅ `trivial` #{c[:pretty_title]}"
next
end
if c[:labels].any? { |l| l[:name] == 'D5-nicetohaveaudit ⚠️' }
c[:pretty_title] = "⏳ `pending non-critical audit` #{c[:pretty_title]}"
next
end
if c[:labels].any? { |l| l[:name] == 'D9-needsaudit 👮' }
c[:pretty_title] = "❌ `AWAITING AUDIT` #{c[:pretty_title]}"
next
end
c[:pretty_title] = "⭕️ `unknown audit requirements` #{c[:pretty_title]}"
end
# The priority of users upgraded is determined by the highest-priority
# *Client* change
release_priority = Changelog.highest_priority_for_changes(client_changes)
# Pulled from the previous Github step
rustc_stable = ENV['RUSTC_STABLE']
rustc_nightly = ENV['RUSTC_NIGHTLY']
polkadot_runtime = get_runtime('polkadot', polkadot_path)
kusama_runtime = get_runtime('kusama', polkadot_path)
westend_runtime = get_runtime('westend', polkadot_path)
rococo_runtime = get_runtime('rococo', polkadot_path)
# These json files should have been downloaded as part of the build-runtimes
# github action
polkadot_json = JSON.parse(
File.read(
"#{ENV['GITHUB_WORKSPACE']}/polkadot-srtool-json/polkadot_srtool_output.json"
)
)
kusama_json = JSON.parse(
File.read(
"#{ENV['GITHUB_WORKSPACE']}/kusama-srtool-json/kusama_srtool_output.json"
)
)
puts renderer.result
-10
View File
@@ -1,10 +0,0 @@
# frozen_string_literal: true
# Gets the runtime version for a given runtime from the filesystem.
# Optionally accepts a path that is the root of the project which defaults to
# the current working directory
def get_runtime(runtime: nil, path: '.', runtime_dir: 'runtime')
File.open(path + "/#{runtime_dir}/#{runtime}/src/lib.rs") do |f|
f.find { |l| l =~ /spec_version/ }.match(/[0-9]+/)[0]
end
end
@@ -1,42 +0,0 @@
<%= print release_priority[:text] %> <%= puts " due to changes: *#{Changelog.changes_with_label(all_changes, release_priority[:label]).map(&:pretty_title).join(", ")}*" if release_priority[:priority] > 1 %>
Native runtimes:
- Polkadot: **<%= polkadot_runtime %>**
- Kusama: **<%= kusama_runtime %>**
- Westend: **<%= westend_runtime %>**
This release was tested against the following versions of `rustc`. Other versions may work.
- <%= rustc_stable %>
- <%= rustc_nightly %>
WASM runtimes built with [<%= polkadot_json['info']['generator']['name'] %> v<%= polkadot_json['info']['generator']['version'] %>](https://github.com/paritytech/srtool) using `<%= polkadot_json['rustc'] %>`.
Proposal hashes:
* `polkadot_runtime-v<%= polkadot_runtime %>.compact.compressed.wasm`: `<%= polkadot_json['runtimes']['compressed']['prop'] %>`
* `kusama_runtime-v<%= kusama_runtime %>.compact.compressed.wasm`: `<%= kusama_json['runtimes']['compressed']['prop'] %>`
<% unless misc_changes.empty? %>
## Changes
<% misc_changes.each do |c| %>
* <%= c[:pretty_title] %>
<% end %>
<% end %>
<% unless client_changes.empty? %>
## Client
<% client_changes.each do |c| %>
* <%= c[:pretty_title] %>
<% end %>
<% end %>
<% unless runtime_changes.empty? %>
## Runtime
<% runtime_changes.each do |c| %>
* <%= c[:pretty_title] %>
<% end %>
<% end %>
-13
View File
@@ -1,13 +0,0 @@
#!/usr/bin/env bash
timeout --signal INT 5h cargo hfuzz run $1
status=$?
if [ $status -ne 124 ]; then
echo "Found a panic!"
# TODO: provide Minimal Reproducible Input
# TODO: message on Matrix
exit 1
else
echo "Didn't find any problem in 5 hours of fuzzing"
fi
@@ -1,56 +0,0 @@
#!/bin/bash
ROOT="$(dirname "$0")/../../.."
RUNTIME="$1"
# If we're on a mac, use gdate for date command (requires coreutils installed via brew)
if [[ "$OSTYPE" == "darwin"* ]]; then
DATE="gdate"
else
DATE="date"
fi
function check_date() {
# Get the dates as input arguments
LAST_RUN="$1"
TODAY="$($DATE +%Y-%m-%d)"
# Calculate the date two days before today
CUTOFF=$($DATE -d "$TODAY - 2 days" +%Y-%m-%d)
if [[ "$LAST_RUN" > "$CUTOFF" ]]; then
return 0
else
return 1
fi
}
check_weights(){
FILE=$1
CUR_DATE=$2
DATE_REGEX='[0-9]{4}-[0-9]{2}-[0-9]{2}'
LAST_UPDATE="$(grep -E "//! DATE: $DATE_REGEX" "$FILE" | sed -r "s/.*DATE: ($DATE_REGEX).*/\1/")"
# If the file does not contain a date, flag it as an error.
if [ -z "$LAST_UPDATE" ]; then
echo "Skipping $FILE, no date found."
return 0
fi
if ! check_date "$LAST_UPDATE" ; then
echo "ERROR: $FILE was not updated for the current date. Last update: $LAST_UPDATE"
return 1
fi
# echo "OK: $FILE"
}
echo "Checking weights for $RUNTIME"
CUR_DATE="$(date +%Y-%m-%d)"
HAS_ERROR=0
for FILE in "$ROOT"/runtime/"$RUNTIME"/src/weights/*.rs; do
if ! check_weights "$FILE" "$CUR_DATE"; then
HAS_ERROR=1
fi
done
if [ $HAS_ERROR -eq 1 ]; then
echo "ERROR: One or more weights files were not updated during the last benchmark run. Check the logs above."
exit 1
fi
@@ -1,82 +0,0 @@
#!/usr/bin/env bash
set -e
# Include the common functions library
#shellcheck source=../common/lib.sh
. "$(dirname "${0}")/../common/lib.sh"
HEAD_BIN=./artifacts/polkadot
HEAD_WS=ws://localhost:9944
RELEASE_WS=ws://localhost:9945
runtimes=(
"westend"
"kusama"
"polkadot"
)
# First we fetch the latest released binary
latest_release=$(latest_release 'paritytech/polkadot')
RELEASE_BIN="./polkadot-$latest_release"
echo "[+] Fetching binary for Polkadot version $latest_release"
curl -L "https://github.com/paritytech/polkadot/releases/download/$latest_release/polkadot" > "$RELEASE_BIN" || exit 1
chmod +x "$RELEASE_BIN"
for RUNTIME in "${runtimes[@]}"; do
echo "[+] Checking runtime: ${RUNTIME}"
release_transaction_version=$(
git show "origin/release:runtime/${RUNTIME}/src/lib.rs" | \
grep 'transaction_version'
)
current_transaction_version=$(
grep 'transaction_version' "./runtime/${RUNTIME}/src/lib.rs"
)
echo "[+] Release: ${release_transaction_version}"
echo "[+] Ours: ${current_transaction_version}"
if [ ! "$release_transaction_version" = "$current_transaction_version" ]; then
echo "[+] Transaction version for ${RUNTIME} has been bumped since last release."
exit 0
fi
# Start running the nodes in the background
$HEAD_BIN --chain="$RUNTIME-local" --tmp &
$RELEASE_BIN --chain="$RUNTIME-local" --ws-port 9945 --tmp &
jobs
# Sleep a little to allow the nodes to spin up and start listening
TIMEOUT=5
for i in $(seq $TIMEOUT); do
sleep 1
if [ "$(lsof -nP -iTCP -sTCP:LISTEN | grep -c '994[45]')" == 2 ]; then
echo "[+] Both nodes listening"
break
fi
if [ "$i" == $TIMEOUT ]; then
echo "[!] Both nodes not listening after $i seconds. Exiting"
exit 1
fi
done
sleep 5
changed_extrinsics=$(
polkadot-js-metadata-cmp "$RELEASE_WS" "$HEAD_WS" \
| sed 's/^ \+//g' | grep -e 'idx: [0-9]\+ -> [0-9]\+' || true
)
if [ -n "$changed_extrinsics" ]; then
echo "[!] Extrinsics indexing/ordering has changed in the ${RUNTIME} runtime! If this change is intentional, please bump transaction_version in lib.rs. Changed extrinsics:"
echo "$changed_extrinsics"
exit 1
fi
echo "[+] No change in extrinsics ordering for the ${RUNTIME} runtime"
jobs -p | xargs kill; sleep 5
done
# Sleep a little to let the jobs die properly
sleep 5
-204
View File
@@ -1,204 +0,0 @@
#!/usr/bin/env bash
# Check for any changes in any runtime directories (e.g., ^runtime/polkadot) as
# well as directories common to all runtimes (e.g., ^runtime/common). If there
# are no changes, check if the Substrate git SHA in Cargo.lock has been
# changed. If so, pull the repo and verify if {spec,impl}_versions have been
# altered since the previous Substrate version used.
#
# If there were changes to any runtimes or common dirs, we iterate over each
# runtime (defined in the $runtimes() array), and check if {spec,impl}_version
# have been changed since the last release.
set -e # fail on any error
#Include the common functions library
#shellcheck source=../common/lib.sh
. "$(dirname "${0}")/../common/lib.sh"
SUBSTRATE_REPO="https://github.com/paritytech/substrate"
SUBSTRATE_REPO_CARGO="git\+${SUBSTRATE_REPO}"
SUBSTRATE_VERSIONS_FILE="bin/node/runtime/src/lib.rs"
# figure out the latest release tag
boldprint "make sure we have all tags (including those from the release branch)"
git fetch --depth="${GIT_DEPTH:-100}" origin release
git fetch --depth="${GIT_DEPTH:-100}" origin 'refs/tags/*:refs/tags/*'
LATEST_TAG="$(git tag -l | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+-?[0-9]*$' | sort -V | tail -n 1)"
boldprint "latest release tag ${LATEST_TAG}"
boldprint "latest 10 commits of ${CI_COMMIT_REF_NAME}"
git --no-pager log --graph --oneline --decorate=short -n 10
boldprint "make sure the master branch is available in shallow clones"
git fetch --depth="${GIT_DEPTH:-100}" origin master
runtimes=(
"kusama"
"polkadot"
"westend"
"rococo"
)
common_dirs=(
"common"
)
# Helper function to join elements in an array with a multi-char delimiter
# https://stackoverflow.com/questions/1527049/how-can-i-join-elements-of-an-array-in-bash
function join_by { local d=$1; shift; echo -n "$1"; shift; printf "%s" "${@/#/$d}"; }
boldprint "check if the wasm sources changed since ${LATEST_TAG}"
if ! has_runtime_changes "${LATEST_TAG}" "${CI_COMMIT_SHA}"; then
boldprint "no changes to any runtime source code detected"
# continue checking if Cargo.lock was updated with a new substrate reference
# and if that change includes a {spec|impl}_version update.
SUBSTRATE_REFS_CHANGED="$(
git diff "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" Cargo.lock \
| sed -n -r "s~^[\+\-]source = \"${SUBSTRATE_REPO_CARGO}#([a-f0-9]+)\".*$~\1~p" | sort -u | wc -l
)"
# check Cargo.lock for substrate ref change
case "${SUBSTRATE_REFS_CHANGED}" in
(0)
boldprint "substrate refs not changed in Cargo.lock"
exit 0
;;
(2)
boldprint "substrate refs updated since ${LATEST_TAG}"
;;
(*)
boldprint "check unsupported: more than one commit targeted in repo ${SUBSTRATE_REPO_CARGO}"
exit 1
esac
SUBSTRATE_PREV_REF="$(
git diff "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" Cargo.lock \
| sed -n -r "s~^\-source = \"${SUBSTRATE_REPO_CARGO}#([a-f0-9]+)\".*$~\1~p" | sort -u | head -n 1
)"
SUBSTRATE_NEW_REF="$(
git diff "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" Cargo.lock \
| sed -n -r "s~^\+source = \"${SUBSTRATE_REPO_CARGO}#([a-f0-9]+)\".*$~\1~p" | sort -u | head -n 1
)"
boldcat <<EOT
previous substrate commit id ${SUBSTRATE_PREV_REF}
new substrate commit id ${SUBSTRATE_NEW_REF}
EOT
# okay so now need to fetch the substrate repository and check whether spec_version or impl_version has changed there
SUBSTRATE_CLONE_DIR="$(mktemp -t -d substrate-XXXXXX)"
trap 'rm -rf "${SUBSTRATE_CLONE_DIR}"' INT QUIT TERM ABRT EXIT
git clone --depth="${GIT_DEPTH:-100}" --no-tags \
"${SUBSTRATE_REPO}" "${SUBSTRATE_CLONE_DIR}"
# check if there are changes to the spec|impl versions
git -C "${SUBSTRATE_CLONE_DIR}" diff \
"${SUBSTRATE_PREV_REF}..${SUBSTRATE_NEW_REF}" "${SUBSTRATE_VERSIONS_FILE}" \
| grep -E '^[\+\-][[:space:]]+(spec|impl)_version: +([0-9]+),$' || exit 0
boldcat <<EOT
spec_version or or impl_version have changed in substrate after updating Cargo.lock
please make sure versions are bumped in polkadot accordingly
EOT
fi
failed_runtime_checks=()
# Iterate over each runtime defined at the start of the script
for RUNTIME in "${runtimes[@]}"
do
# Check if there were changes to this specific runtime or common directories.
# If not, we can skip to the next runtime
regex="^runtime/$(join_by '|^runtime/' "$RUNTIME" "${common_dirs[@]}")"
if ! git diff --name-only "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" \
| grep -E -q -e "$regex"; then
continue
fi
# check for spec_version updates: if the spec versions changed, then there is
# consensus-critical logic that has changed. the runtime wasm blobs must be
# rebuilt.
add_spec_version="$(
git diff "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" "runtime/${RUNTIME}/src/lib.rs" \
| sed -n -r "s/^\+[[:space:]]+spec_version: +([0-9]+),$/\1/p"
)"
sub_spec_version="$(
git diff "refs/tags/${LATEST_TAG}...${CI_COMMIT_SHA}" "runtime/${RUNTIME}/src/lib.rs" \
| sed -n -r "s/^\-[[:space:]]+spec_version: +([0-9]+),$/\1/p"
)"
# see if the version and the binary blob changed
if [ "${add_spec_version}" != "${sub_spec_version}" ]
then
boldcat <<EOT
## RUNTIME: ${RUNTIME} ##
changes to the ${RUNTIME} runtime sources and changes in the spec version.
spec_version: ${sub_spec_version} -> ${add_spec_version}
EOT
continue
else
# check for impl_version updates: if only the impl versions changed, we assume
# there is no consensus-critical logic that has changed.
add_impl_version="$(
git diff refs/tags/"${LATEST_TAG}...${CI_COMMIT_SHA}" "runtime/${RUNTIME}/src/lib.rs" \
| sed -n -r 's/^\+[[:space:]]+impl_version: +([0-9]+),$/\1/p'
)"
sub_impl_version="$(
git diff refs/tags/"${LATEST_TAG}...${CI_COMMIT_SHA}" "runtime/${RUNTIME}/src/lib.rs" \
| sed -n -r 's/^\-[[:space:]]+impl_version: +([0-9]+),$/\1/p'
)"
# see if the impl version changed
if [ "${add_impl_version}" != "${sub_impl_version}" ]
then
boldcat <<EOT
## RUNTIME: ${RUNTIME} ##
changes to the ${RUNTIME} runtime sources and changes in the impl version.
impl_version: ${sub_impl_version} -> ${add_impl_version}
EOT
continue
fi
failed_runtime_checks+=("$RUNTIME")
fi
done
if [ ${#failed_runtime_checks} -gt 0 ]; then
boldcat <<EOT
wasm source files changed or the spec version in the substrate reference in
the Cargo.lock but not the spec/impl version. If changes made do not alter
logic, just bump 'impl_version'. If they do change logic, bump
'spec_version'.
source file directories:
- runtime
version files: ${failed_runtime_checks[@]}
EOT
exit 1
fi
exit 0
-344
View File
@@ -1,344 +0,0 @@
150
2D
A&V
accessor/MS
AccountId
activations
acyclic
adversary/SM
allocator/SM
annualised
anonymize/D
Apache-2.0/M
API
APIs
arg/MS
assignee/SM
async
asynchrony
autogenerated
backable
backend/MS
benchmark/DSMG
BFT/M
bitfield/MS
bitwise
blake2/MS
blockchain/MS
borked
broadcast/UDSMG
BTC/S
canonicalization
canonicalize/D
CentOS
CLI/MS
codebase/SM
codec/SM
commit/D
comparator
computable
conclude/UD
config/MS
could've
crowdfund
crowdloan/MSG
crypto/MS
CSM
Cucumber/MS
customizable/B
DDoS
Debian/M
decodable/MS
decrement
deduplicated
deduplication
deinitializing
dequeue/SD
dequeuing
deregister
deserialize/G
DHT
disincentivize/D
dispatchable/SM
DLEQ
DM
DMP/SM
DMQ
DoS
DOT
DOTs
ECDSA
ed25519
encodable
enqueue/D
enqueue/DMSG
entrypoint/MS
enum
ERC-20
ETH/S
ethereum/MS
externality/MS
extrinsic
extrinsics
fedora/M
finalize/B
FRAME/MS
FSMs
functor
fungibility
gameable
getter/MS
GiB/S
GKE
GNUNet
GPL/M
GPLv3/M
Grafana/MS
Gurke/MS
gurke/MS
Handler/MS
HMP/SM
HRMP
HSM
https
iff
implementer/MS
includable
include/BG
increment/DSMG
inherent
inherents
initialize/CRG
initializer
instantiate/B
instantiation/SM
intrinsic
intrinsics
invariant/MS
invariants
inverter/MS
invertible
io
IP/S
isn
isolatable
isolate/BG
iterable
jaeger/MS
js
judgement/S
keccak256/M
keypair/MS
keystore/MS
Kovan
KSM/S
Kubernetes/MS
kusama/S
KYC/M
lib
libp2p
lifecycle/MS
liveness
lookahead/MS
lookup/MS
LRU
mainnet/MS
malus/MS
MB/M
Mbit
merkle/MS
Merklized
metadata/M
middleware/MS
Millau
misbehavior/SM
misbehaviors
misvalidate/D
MIT/M
MMR
modularity
mpsc
MPSC
MQC/SM
msg
multisig/S
multivalidator/SM
mutators
mutex
natively
NFA
NFT/SM
no_std
nonces
NPoS
NTB
offboard/DMSG
onboard/DMSG
oneshot/MS
onwards
OOM/S
OPENISH
others'
ourself
overseer/MS
ownerless
p2p
parablock/MS
parachain/MS
ParaId
parameterization
parameterize/D
parathread/MS
participations
passthrough
PDK
peerset/MS
permission/D
pessimization
phragmen
picosecond/SM
PoA/MS
polkadot/MS
Polkadot/MS
PoS/MS
PoV/MS
PoW/MS
PR
precheck
prechecking
preconfigured
preimage/MS
preopen
prepend/G
prevalidating
prevalidation
preverify/G
programmatically
prometheus/MS
provisioner/MS
proxy/DMSG
proxy/G
proxying
PRs
PVF/S
querier
README/MS
redhat/M
register/CD
relayer
repo/MS
requesters
reservable
responder/SM
retriability
reverify
ROC
roundtrip/MS
routable
rpc
RPC/MS
runtime/MS
rustc/MS
SAFT
scalability
scalable
Schnorr
schnorrkel
SDF
sending/S
sharding
shareable
Simnet/MS
spawn/SR
spawner
sr25519
SS58
SSL
startup/MS
stateful
Statemine
str
struct/MS
subcommand/SM
substream
subsystem/MS
subsystems'
supermajority
SURI
sybil
systemwide
taskmanager/MS
TCP
teleport/D
teleport/RG
teleportation/SM
teleporter/SM
teleporters
template/GSM
testnet/MS
tera/M
teleports
timeframe
timestamp/MS
topologies
tradeoff
transitionary
trie/MS
trustless/Y
TTL
tuple/SM
typesystem
ubuntu/M
UDP
UI
unapplied
unassign
unconcluded
unexpectable
unfinalize/B
unfinalized
union/MSG
unordered
unreceived
unreserve
unreserving
unroutable
unservable/B
unshare/D
untrusted
untyped
unvested
URI
utilize
v0
v1
v2
validator/SM
ve
vec
verifier
verify/R
versa
Versi
version/DMSG
VMP/SM
VPS
VRF/SM
vstaging
VStaging
w3f/MS
wakeup
wakeups
warming/S
wasm/M
wasmtime
Westend/M
wildcard/MS
WND/S
Wococo
WS
XCM/S
XCMP/M
yeet
yml
zsh
@@ -1,174 +0,0 @@
# This file is part of .gitlab-ci.yml
# Here are all jobs that are executed during "build" stage
build-linux-stable:
stage: build
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in check.yml
needs:
- job: job-starter
artifacts: false
extends:
- .docker-env
- .common-refs
- .compiler-info
- .collect-artifacts
variables:
RUST_TOOLCHAIN: stable
# Enable debug assertions since we are running optimized builds for testing
# but still want to have debug assertions.
RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings"
# Ensure we run the UI tests.
RUN_UI_TESTS: 1
script:
- time cargo build --locked --profile testnet --features pyroscope,fast-runtime --verbose --bins
# pack artifacts
- mkdir -p ./artifacts
- VERSION="${CI_COMMIT_REF_NAME}" # will be tag or branch name
- mv ./target/testnet/polkadot ./artifacts/.
- mv ./target/testnet/polkadot-prepare-worker ./artifacts/.
- mv ./target/testnet/polkadot-execute-worker ./artifacts/.
- pushd artifacts
- sha256sum polkadot | tee polkadot.sha256
- shasum -c polkadot.sha256
- popd
- EXTRATAG="${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}"
- echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})"
- echo -n ${VERSION} > ./artifacts/VERSION
- echo -n ${EXTRATAG} > ./artifacts/EXTRATAG
- echo -n ${CI_JOB_ID} > ./artifacts/BUILD_LINUX_JOB_ID
- RELEASE_VERSION=$(./artifacts/polkadot -V | awk '{print $2}'| awk -F "-" '{print $1}')
- echo -n "v${RELEASE_VERSION}" > ./artifacts/BUILD_RELEASE_VERSION
build-test-collators:
stage: build
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in check.yml
needs:
- job: job-starter
artifacts: false
extends:
- .docker-env
- .common-refs
- .compiler-info
- .collect-artifacts
script:
- time cargo build --locked --profile testnet --verbose -p test-parachain-adder-collator
- time cargo build --locked --profile testnet --verbose -p test-parachain-undying-collator
# pack artifacts
- mkdir -p ./artifacts
- mv ./target/testnet/adder-collator ./artifacts/.
- mv ./target/testnet/undying-collator ./artifacts/.
- echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION
- echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG
- echo "adder-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))"
- echo "undying-collator version = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))"
build-malus:
stage: build
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in check.yml
needs:
- job: job-starter
artifacts: false
extends:
- .docker-env
- .common-refs
- .compiler-info
- .collect-artifacts
script:
- time cargo build --locked --profile testnet --verbose -p polkadot-test-malus
# pack artifacts
- mkdir -p ./artifacts
- mv ./target/testnet/malus ./artifacts/.
- mv ./target/testnet/polkadot-execute-worker ./artifacts/.
- mv ./target/testnet/polkadot-prepare-worker ./artifacts/.
- echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION
- echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG
- echo "polkadot-test-malus = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))"
build-staking-miner:
stage: build
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in check.yml
needs:
- job: job-starter
artifacts: false
extends:
- .docker-env
- .common-refs
- .compiler-info
- .collect-artifacts
script:
- time cargo build --locked --release --package staking-miner
# pack artifacts
- mkdir -p ./artifacts
- mv ./target/release/staking-miner ./artifacts/.
- echo -n "${CI_COMMIT_REF_NAME}" > ./artifacts/VERSION
- echo -n "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" > ./artifacts/EXTRATAG
- echo "staking-miner = $(cat ./artifacts/VERSION) (EXTRATAG = $(cat ./artifacts/EXTRATAG))"
build-rustdoc:
stage: build
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in test.yml
needs:
- job: test-deterministic-wasm
artifacts: false
extends:
- .docker-env
- .test-refs
variables:
SKIP_WASM_BUILD: 1
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc"
when: on_success
expire_in: 1 days
paths:
- ./crate-docs/
script:
# FIXME: it fails with `RUSTDOCFLAGS="-Dwarnings"` and `--all-features`
# FIXME: return to stable when https://github.com/rust-lang/rust/issues/96937 gets into stable
- time cargo doc --workspace --verbose --no-deps
- rm -f ./target/doc/.lock
- mv ./target/doc ./crate-docs
# FIXME: remove me after CI image gets nonroot
- chown -R nonroot:nonroot ./crate-docs
- echo "<meta http-equiv=refresh content=0;url=polkadot_service/index.html>" > ./crate-docs/index.html
build-implementers-guide:
stage: build
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in test.yml
needs:
- job: test-deterministic-wasm
artifacts: false
extends:
- .kubernetes-env
- .test-refs
- .collect-artifacts-short
# git depth is set on purpose: https://github.com/paritytech/polkadot/issues/6284
variables:
GIT_STRATEGY: clone
GIT_DEPTH: 0
CI_IMAGE: paritytech/mdbook-utils:e14aae4a-20221123
script:
- mdbook build ./roadmap/implementers-guide
- mkdir -p artifacts
- mv roadmap/implementers-guide/book artifacts/
build-short-benchmark:
stage: build
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in check.yml
needs:
- job: job-starter
artifacts: false
extends:
- .docker-env
- .test-refs
- .collect-artifacts
script:
- cargo build --profile release --locked --features=runtime-benchmarks
- mkdir artifacts
- cp ./target/release/polkadot ./artifacts/
@@ -1,136 +0,0 @@
# This file is part of .gitlab-ci.yml
# Here are all jobs that are executed during "check" stage
check-runtime:
stage: check
image: paritytech/tools:latest
extends:
- .kubernetes-env
rules:
- if: $CI_COMMIT_REF_NAME =~ /^release-v[0-9]+\.[0-9]+.*$/ # i.e. release-v0.9.27
variables:
GITLAB_API: "https://gitlab.parity.io/api/v4"
GITHUB_API_PROJECT: "parity%2Finfrastructure%2Fgithub-api"
script:
- ./scripts/ci/gitlab/check_runtime.sh
allow_failure: true
cargo-fmt:
stage: check
extends:
- .docker-env
- .test-refs
script:
- cargo +nightly --version
- cargo +nightly fmt --all -- --check
allow_failure: true
# Disabled in https://github.com/paritytech/polkadot/pull/7512
.spellcheck_disabled:
stage: check
extends:
- .docker-env
- .test-refs
script:
- cargo spellcheck --version
# compare with the commit parent to the PR, given it's from a default branch
- git fetch origin +${CI_DEFAULT_BRANCH}:${CI_DEFAULT_BRANCH}
- echo "___Spellcheck is going to check your diff___"
- cargo spellcheck list-files -vvv $(git diff --diff-filter=AM --name-only $(git merge-base ${CI_COMMIT_SHA} ${CI_DEFAULT_BRANCH} -- :^bridges))
- time cargo spellcheck check -vvv --cfg=scripts/ci/gitlab/spellcheck.toml --checkers hunspell --code 1
$(git diff --diff-filter=AM --name-only $(git merge-base ${CI_COMMIT_SHA} ${CI_DEFAULT_BRANCH} -- :^bridges))
allow_failure: true
check-try-runtime:
stage: check
extends:
- .docker-env
- .test-refs
- .compiler-info
script:
# Check that everything compiles with `try-runtime` feature flag.
- cargo check --locked --features try-runtime --all
# More info can be found here: https://github.com/paritytech/polkadot/pull/5865
.check-runtime-migration:
stage: check
extends:
- .docker-env
- .test-pr-refs
- .compiler-info
script:
- |
export RUST_LOG=remote-ext=debug,runtime=debug
echo "---------- Running try-runtime for ${NETWORK} ----------"
time cargo install --locked --git https://github.com/paritytech/try-runtime-cli --rev a93c9b5abe5d31a4cf1936204f7e5c489184b521
time cargo build --release --locked -p "$NETWORK"-runtime --features try-runtime
time try-runtime \
--runtime ./target/release/wbuild/"$NETWORK"-runtime/target/wasm32-unknown-unknown/release/"$NETWORK"_runtime.wasm \
on-runtime-upgrade --checks=pre-and-post live --uri wss://${NETWORK}-try-runtime-node.parity-chains.parity.io:443
check-runtime-migration-polkadot:
stage: check
extends:
- .docker-env
- .test-pr-refs
- .compiler-info
- .check-runtime-migration
variables:
NETWORK: "polkadot"
allow_failure: true # FIXME https://github.com/paritytech/substrate/issues/13107
check-runtime-migration-kusama:
stage: check
extends:
- .docker-env
- .test-pr-refs
- .compiler-info
- .check-runtime-migration
variables:
NETWORK: "kusama"
allow_failure: true # FIXME https://github.com/paritytech/substrate/issues/13107
check-runtime-migration-westend:
stage: check
extends:
- .docker-env
- .test-pr-refs
- .compiler-info
- .check-runtime-migration
variables:
NETWORK: "westend"
allow_failure: true # FIXME https://github.com/paritytech/substrate/issues/13107
check-runtime-migration-rococo:
stage: check
extends:
- .docker-env
- .test-pr-refs
- .compiler-info
- .check-runtime-migration
variables:
NETWORK: "rococo"
allow_failure: true # FIXME https://github.com/paritytech/substrate/issues/13107
# is broken, need to fix
check-no-default-features:
stage: check
extends:
- .docker-env
- .test-refs
- .compiler-info
script:
# Check that polkadot-cli will compile no default features.
- pushd ./node/service && cargo check --locked --no-default-features && popd
- pushd ./cli && cargo check --locked --no-default-features --features "service" && popd
- exit 0
# this is artificial job to run some build and tests using DAG
job-starter:
stage: check
image: paritytech/tools:latest
extends:
- .kubernetes-env
- .common-refs
script:
- echo ok
@@ -1,276 +0,0 @@
# This file is part of .gitlab-ci.yml
# Here are all jobs that are executed during "publish" stage
# This image is used in testnets
# Release image is handled by the Github Action here:
# .github/workflows/publish-docker-release.yml
publish-polkadot-debug-image:
stage: publish
extends:
- .kubernetes-env
- .build-push-image
rules:
# Don't run when triggered from another pipeline
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
variables:
IMAGE_NAME: "polkadot-debug"
BINARY: "polkadot,polkadot-execute-worker,polkadot-prepare-worker"
needs:
- job: build-linux-stable
artifacts: true
after_script:
- !reference [.build-push-image, after_script]
# pass artifacts to the zombienet-tests job
# https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance
- echo "PARACHAINS_IMAGE_NAME=${IMAGE}" > ./artifacts/parachains.env
- echo "PARACHAINS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/parachains.env
artifacts:
reports:
# this artifact is used in zombienet-tests job
dotenv: ./artifacts/parachains.env
expire_in: 1 days
publish-test-collators-image:
# service image for Simnet
stage: publish
extends:
- .kubernetes-env
- .build-push-image
- .zombienet-refs
variables:
IMAGE_NAME: "colander"
BINARY: "adder-collator,undying-collator"
needs:
- job: build-test-collators
artifacts: true
after_script:
- !reference [.build-push-image, after_script]
# pass artifacts to the zombienet-tests job
- echo "COLLATOR_IMAGE_NAME=${IMAGE}" > ./artifacts/collator.env
- echo "COLLATOR_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/collator.env
artifacts:
reports:
# this artifact is used in zombienet-tests job
dotenv: ./artifacts/collator.env
publish-malus-image:
# service image for Simnet
stage: publish
extends:
- .kubernetes-env
- .build-push-image
- .zombienet-refs
variables:
IMAGE_NAME: "malus"
BINARY: "malus,polkadot-execute-worker,polkadot-prepare-worker"
needs:
- job: build-malus
artifacts: true
after_script:
- !reference [.build-push-image, after_script]
# pass artifacts to the zombienet-tests job
- echo "MALUS_IMAGE_NAME=${IMAGE}" > ./artifacts/malus.env
- echo "MALUS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/malus.env
artifacts:
reports:
# this artifact is used in zombienet-tests job
dotenv: ./artifacts/malus.env
publish-staking-miner-image:
stage: publish
extends:
- .kubernetes-env
- .build-push-image
- .publish-refs
variables:
IMAGE_NAME: "staking-miner"
BINARY: "staking-miner"
DOCKER_OWNER: "paritytech"
DOCKER_USER: "${Docker_Hub_User_Parity}"
DOCKER_PASS: "${Docker_Hub_Pass_Parity}"
needs:
- job: build-staking-miner
artifacts: true
publish-polkadot-image-description:
stage: publish
image: paritytech/dockerhub-description
variables:
DOCKER_USERNAME: ${Docker_Hub_User_Parity}
DOCKER_PASSWORD: ${Docker_Hub_Pass_Parity}
DOCKERHUB_REPOSITORY: parity/polkadot
SHORT_DESCRIPTION: "Polkadot Official Docker Image"
README_FILEPATH: $CI_PROJECT_DIR/scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md
rules:
- if: $CI_COMMIT_REF_NAME == "master"
changes:
- scripts/ci/dockerfiles/polkadot/polkadot_Dockerfile.README.md
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
script:
- cd / && sh entrypoint.sh
tags:
- kubernetes-parity-build
publish-staking-miner-image-description:
stage: publish
image: paritytech/dockerhub-description
variables:
DOCKER_USERNAME: ${Docker_Hub_User_Parity}
DOCKER_PASSWORD: ${Docker_Hub_Pass_Parity}
DOCKERHUB_REPOSITORY: paritytech/staking-miner
SHORT_DESCRIPTION: "Staking-miner Docker Image"
README_FILEPATH: $CI_PROJECT_DIR/scripts/ci/dockerfiles/staking-miner/staking-miner_Dockerfile.README.md
rules:
- if: $CI_COMMIT_REF_NAME == "master"
changes:
- scripts/ci/dockerfiles/staking-miner/staking-miner_Dockerfile.README.md
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
script:
- cd / && sh entrypoint.sh
tags:
- kubernetes-parity-build
publish-s3-release:
stage: publish
extends:
- .kubernetes-env
needs:
- job: build-linux-stable
artifacts: true
variables:
CI_IMAGE: paritytech/awscli:latest
GIT_STRATEGY: none
PREFIX: "builds/polkadot/${ARCH}-${DOCKER_OS}"
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
# publishing binaries nightly
- if: $CI_PIPELINE_SOURCE == "schedule"
before_script:
- !reference [.build-push-image, before_script]
script:
- echo "uploading objects to https://releases.parity.io/${PREFIX}/${VERSION}"
- aws s3 sync --acl public-read ./artifacts/ s3://${AWS_BUCKET}/${PREFIX}/${VERSION}/
- echo "update objects at https://releases.parity.io/${PREFIX}/${EXTRATAG}"
- find ./artifacts -type f | while read file; do
name="${file#./artifacts/}";
aws s3api copy-object
--copy-source ${AWS_BUCKET}/${PREFIX}/${VERSION}/${name}
--bucket ${AWS_BUCKET} --key ${PREFIX}/${EXTRATAG}/${name};
done
- |
cat <<-EOM
|
| polkadot binary paths:
|
| - https://releases.parity.io/${PREFIX}/${EXTRATAG}/polkadot
| - https://releases.parity.io/${PREFIX}/${VERSION}/polkadot
|
EOM
after_script:
- aws s3 ls s3://${AWS_BUCKET}/${PREFIX}/${EXTRATAG}/
--recursive --human-readable --summarize
publish-rustdoc:
stage: publish
extends:
- .kubernetes-env
variables:
CI_IMAGE: paritytech/tools:latest
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME == "master"
# `needs:` can be removed after CI image gets nonroot. In this case `needs:` stops other
# artifacts from being dowloaded by this job.
needs:
- job: build-rustdoc
artifacts: true
- job: build-implementers-guide
artifacts: true
script:
# Save README and docs
- cp -r ./crate-docs/ /tmp/doc/
- cp -r ./artifacts/book/ /tmp/
# setup ssh
- eval $(ssh-agent)
- ssh-add - <<< ${GITHUB_SSH_PRIV_KEY}
- mkdir ~/.ssh && touch ~/.ssh/known_hosts
- ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts
# Set git config
- git config user.email "devops-team@parity.io"
- git config user.name "${GITHUB_USER}"
- git config remote.origin.url "git@github.com:/paritytech/${CI_PROJECT_NAME}.git"
- git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"
- git fetch origin gh-pages
- git checkout gh-pages
# Remove everything and restore generated docs and README
- cp index.html /tmp
- cp README.md /tmp
- rm -rf ./*
# dir for rustdoc
- mkdir -p doc
# dir for implementors guide
- mkdir -p book
- mv /tmp/doc/* doc/
- mv /tmp/book/html/* book/
- mv /tmp/index.html .
- mv /tmp/README.md .
# Upload files
- git add --all --force
# `git commit` has an exit code of > 0 if there is nothing to commit.
# This causes GitLab to exit immediately and marks this job failed.
# We don't want to mark the entire job failed if there's nothing to
# publish though, hence the `|| true`.
- git commit -m "Updated docs for ${CI_COMMIT_REF_NAME}" ||
echo "___Nothing to commit___"
- git push origin gh-pages --force
- echo "___Rustdoc was successfully published to https://paritytech.github.io/polkadot/___"
after_script:
- rm -rf .git/ ./*
.update-substrate-template-repository:
stage: publish
extends: .kubernetes-env
variables:
GIT_STRATEGY: none
rules:
# The template is only updated for FINAL releases
# i.e. the rule should not cover RC or patch releases
- if: $CI_COMMIT_TAG =~ /^v[0-9]+\.[0-9]+$/ # e.g. v1.0
- if: $CI_COMMIT_TAG =~ /^v[0-9]+\.[0-9]+\.[0-9]+$/ # e.g. v1.0.0
script:
- git clone --depth=1 --branch="$PIPELINE_SCRIPTS_TAG" https://github.com/paritytech/pipeline-scripts
- export POLKADOT_BRANCH="polkadot-$CI_COMMIT_TAG"
- git clone --depth=1 --branch="$POLKADOT_BRANCH" https://github.com/paritytech/"$TEMPLATE_SOURCE"
- cd "$TEMPLATE_SOURCE"
- ../pipeline-scripts/update_substrate_template.sh
--repo-name "$TARGET_REPOSITORY"
--template-path "$TEMPLATE_PATH"
--github-api-token "$GITHUB_TOKEN"
--polkadot-branch "$POLKADOT_BRANCH"
# Ref: https://github.com/paritytech/opstooling/issues/111
update-node-template:
extends: .update-substrate-template-repository
variables:
TARGET_REPOSITORY: substrate-node-template
TEMPLATE_SOURCE: substrate
TEMPLATE_PATH: bin/node-template
# Ref: https://github.com/paritytech/opstooling/issues/111
update-parachain-template:
extends: .update-substrate-template-repository
variables:
TARGET_REPOSITORY: substrate-parachain-template
TEMPLATE_SOURCE: cumulus
TEMPLATE_PATH: parachain-template
@@ -1,27 +0,0 @@
# This file is part of .gitlab-ci.yml
# Here are all jobs that are executed during "short-benchmarks" stage
# Run all pallet benchmarks only once to check if there are any errors
short-benchmark-polkadot: &short-bench
stage: short-benchmarks
extends:
- .test-pr-refs
- .docker-env
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
needs:
- job: build-short-benchmark
artifacts: true
variables:
RUNTIME: polkadot
script:
- ./artifacts/polkadot benchmark pallet --wasm-execution compiled --chain $RUNTIME-dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1
short-benchmark-kusama:
<<: *short-bench
variables:
RUNTIME: kusama
short-benchmark-westend:
<<: *short-bench
variables:
RUNTIME: westend
@@ -1,119 +0,0 @@
# This file is part of .gitlab-ci.yml
# Here are all jobs that are executed during "test" stage
# It's more like a check and it belongs to the previous stage, but we want to run this job with real tests in parallel
find-fail-ci-phrase:
stage: test
variables:
CI_IMAGE: "paritytech/tools:latest"
ASSERT_REGEX: "FAIL-CI"
GIT_DEPTH: 1
extends:
- .kubernetes-env
script:
- set +e
- rg --line-number --hidden --type rust --glob '!{.git,target}' "$ASSERT_REGEX" .; exit_status=$?
- if [ $exit_status -eq 0 ]; then
echo "$ASSERT_REGEX was found, exiting with 1";
exit 1;
else
echo "No $ASSERT_REGEX was found, exiting with 0";
exit 0;
fi
test-linux-stable:
stage: test
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in check.yml
needs:
- job: job-starter
artifacts: false
extends:
- .docker-env
- .common-refs
- .pipeline-stopper-artifacts
before_script:
- !reference [.compiler-info, before_script]
- !reference [.pipeline-stopper-vars, before_script]
variables:
RUST_TOOLCHAIN: stable
# Enable debug assertions since we are running optimized builds for testing
# but still want to have debug assertions.
RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings"
script:
- time cargo test --workspace --profile testnet --verbose --locked --features=runtime-benchmarks,runtime-metrics,try-runtime,ci-only-tests
# Run `polkadot-runtime-parachains` tests a second time because `paras_inherent::enter` tests are gated by not having
# the `runtime-benchmarks` feature enabled.
- time cargo test --profile testnet --verbose --locked --features=runtime-metrics,try-runtime -p polkadot-runtime-parachains
test-linux-oldkernel-stable:
extends: test-linux-stable
tags:
- oldkernel-vm
.check-dependent-project: &check-dependent-project
stage: test
extends:
- .docker-env
- .test-pr-refs
script:
- git clone
--depth=1
"--branch=$PIPELINE_SCRIPTS_TAG"
https://github.com/paritytech/pipeline-scripts
- ./pipeline-scripts/check_dependent_project.sh
--org paritytech
--dependent-repo "$DEPENDENT_REPO"
--github-api-token "$GITHUB_PR_TOKEN"
--extra-dependencies "$EXTRA_DEPENDENCIES"
--companion-overrides "$COMPANION_OVERRIDES"
check-dependent-cumulus:
<<: *check-dependent-project
variables:
DEPENDENT_REPO: cumulus
EXTRA_DEPENDENCIES: substrate
COMPANION_OVERRIDES: |
polkadot: release-v*
cumulus: polkadot-v*
test-node-metrics:
stage: test
extends:
- .docker-env
- .test-refs
- .compiler-info
variables:
RUST_TOOLCHAIN: stable
# Enable debug assertions since we are running optimized builds for testing
# but still want to have debug assertions.
RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings"
script:
# Build the required workers.
- cargo build --bin polkadot-execute-worker --bin polkadot-prepare-worker --profile testnet --verbose --locked
# Run tests.
- time cargo test --profile testnet --verbose --locked --features=runtime-metrics -p polkadot-node-metrics
test-deterministic-wasm:
stage: test
extends:
- .docker-env
- .test-refs
- .compiler-info
script:
- ./scripts/ci/gitlab/test_deterministic_wasm.sh
cargo-clippy:
stage: test
# this is an artificial job dependency, for pipeline optimization using GitLab's DAGs
# the job can be found in check.yml
needs:
- job: job-starter
artifacts: false
extends:
- .docker-env
- .test-refs
script:
- echo $RUSTFLAGS
- cargo version && cargo clippy --version
- SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo clippy -q --locked --all-targets --workspace
@@ -1,39 +0,0 @@
# This file is part of .gitlab-ci.yml
# Here are all jobs that are executed during "weights" stage
update_polkadot_weights: &update-weights
# The update-weights pipeline defaults to `interruptible: false` so that we'll be able to
# reach and run the benchmarking jobs despite the "Auto-cancel redundant pipelines" CI setting.
# The setting is relevant because future pipelines (e.g. created for new commits or other schedules)
# might otherwise cancel the benchmark jobs early.
interruptible: false
stage: weights
timeout: 1d
when: manual
image: $CI_IMAGE
variables:
RUNTIME: polkadot
artifacts:
paths:
- ${RUNTIME}_weights_${CI_COMMIT_SHORT_SHA}.patch
script:
- ./scripts/ci/run_benches_for_runtime.sh $RUNTIME
- git diff -P > ${RUNTIME}_weights_${CI_COMMIT_SHORT_SHA}.patch
# uses the "shell" executors
tags:
- weights
update_kusama_weights:
<<: *update-weights
variables:
RUNTIME: kusama
update_westend_weights:
<<: *update-weights
variables:
RUNTIME: westend
update_rococo_weights:
<<: *update-weights
variables:
RUNTIME: rococo
@@ -1,444 +0,0 @@
# This file is part of .gitlab-ci.yml
# Here are all jobs that are executed during "zombienet" stage
zombienet-tests-parachains-smoke-test:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
- job: publish-malus-image
- job: publish-test-collators-image
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/smoke"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${MALUS_IMAGE_NAME} ${MALUS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG}
- export COL_IMAGE="docker.io/paritypr/colander:7292" # The collator image is fixed
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0001-parachains-smoke-test.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-parachains-pvf:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
- job: publish-test-collators-image
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG}
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0001-parachains-pvf.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-parachains-disputes:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
- job: publish-test-collators-image
- job: publish-malus-image
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${MALUS_IMAGE_NAME} ${MALUS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG}
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0002-parachains-disputes.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-parachains-disputes-garbage-candidate:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
- job: publish-test-collators-image
- job: publish-malus-image
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${MALUS_IMAGE_NAME} ${MALUS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG}
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0003-parachains-garbage-candidate.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-parachains-disputes-past-session:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
- job: publish-test-collators-image
- job: publish-malus-image
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${MALUS_IMAGE_NAME} ${MALUS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG}
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0004-parachains-disputes-past-session.zndsl"
allow_failure: true
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-test-parachains-upgrade-smoke-test:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
- job: publish-malus-image
- job: publish-test-collators-image
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/smoke"
before_script:
- echo "ZombieNet Tests Config"
- echo "${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}"
- echo "docker.io/parity/polkadot-collator:latest"
- echo "${ZOMBIENET_IMAGE}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export COL_IMAGE="docker.io/parity/polkadot-collator:latest" # Use cumulus lastest image
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0002-parachains-upgrade-smoke-test.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-misc-paritydb:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
- job: publish-test-collators-image
artifacts: true
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/misc"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0001-paritydb.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-misc-upgrade-node:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
- job: publish-test-collators-image
- job: build-linux-stable
artifacts: true
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/misc"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- export ZOMBIENET_INTEGRATION_TEST_IMAGE="docker.io/parity/polkadot:latest"
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
- BUILD_LINUX_JOB_ID="$(cat ./artifacts/BUILD_LINUX_JOB_ID)"
- export POLKADOT_PR_ARTIFACTS_URL="https://gitlab.parity.io/parity/mirrors/polkadot/-/jobs/${BUILD_LINUX_JOB_ID}/artifacts/raw/artifacts"
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0002-upgrade-node.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-malus-dispute-valid:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
- job: publish-malus-image
- job: publish-test-collators-image
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/node/malus/integrationtests"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${MALUS_IMAGE_NAME} ${MALUS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie*
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG}
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0001-dispute-valid-block.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-deregister-register-validator:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
artifacts: true
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/smoke"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie*
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export MALUS_IMAGE=${MALUS_IMAGE_NAME}:${MALUS_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0003-deregister-register-validator-smoke.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-beefy-and-mmr:
stage: zombienet
image: "${ZOMBIENET_IMAGE}"
extends:
- .kubernetes-env
- .zombienet-refs
needs:
- job: publish-polkadot-debug-image
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/functional"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie*
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="0003-beefy-and-mmr.zndsl"
allow_failure: true
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-async-backing-compatibility:
stage: zombienet
extends:
- .kubernetes-env
- .zombienet-refs
image: "${ZOMBIENET_IMAGE}"
needs:
- job: publish-polkadot-debug-image
- job: publish-test-collators-image
- job: build-linux-stable
artifacts: true
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/async_backing"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)"
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="001-async-backing-compatibility.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-async-backing-runtime-upgrade:
stage: zombienet
extends:
- .kubernetes-env
- .zombienet-refs
image: "${ZOMBIENET_IMAGE}"
needs:
- job: publish-polkadot-debug-image
- job: publish-test-collators-image
- job: build-linux-stable
artifacts: true
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/async_backing"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)"
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
- export POLKADOT_PR_BIN_URL="https://gitlab.parity.io/parity/mirrors/polkadot/-/jobs/${BUILD_LINUX_JOB_ID}/artifacts/raw/artifacts/polkadot"
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="002-async-backing-runtime-upgrade.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
zombienet-tests-async-backing-collator-mix:
stage: zombienet
extends:
- .kubernetes-env
- .zombienet-refs
image: "${ZOMBIENET_IMAGE}"
needs:
- job: publish-polkadot-debug-image
- job: publish-test-collators-image
- job: build-linux-stable
artifacts: true
variables:
RUN_IN_CONTAINER: "1"
GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/async_backing"
before_script:
- echo "Zombie-net Tests Config"
- echo "${ZOMBIENET_IMAGE_NAME}"
- echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}"
- echo "${GH_DIR}"
- export DEBUG=zombie,zombie::network-node
- BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)"
- export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG}
- export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"
- export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG}
script:
- /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh
--github-remote-dir="${GH_DIR}"
--test="003-async-backing-collator-mix.zndsl"
allow_failure: false
retry: 2
tags:
- zombienet-polkadot-integration-test
-6
View File
@@ -1,6 +0,0 @@
#!/bin/sh
# meant to be installed via
# git config filter.ci-prettier.clean "scripts/ci/gitlab/prettier.sh"
prettier --parser yaml
@@ -1,34 +0,0 @@
[hunspell]
lang = "en_US"
search_dirs = ["."]
extra_dictionaries = ["lingua.dic"]
skip_os_lookups = true
use_builtin = true
[hunspell.quirks]
# He tagged it as 'TheGreatestOfAllTimes'
transform_regex = [
# `Type`'s
"^'([^\\s])'$",
# 5x
# 10.7%
"^[0-9_]+(?:\\.[0-9]*)?(x|%)$",
# Transforms'
"^(.*)'$",
# backslashes
"^\\+$",
"^[0-9]*+k|MB|Mb|ms|Mbit|nd|th|rd$",
# single char `=` `>` `%` ..
"^=|>|<|%$",
# 22_100
"^(?:[0-9]+_)+[0-9]+$",
# V5, v5, P1.2, etc
"[A-Za-z][0-9]",
# ~50
"~[0-9]+",
"ABI",
"bool",
"sigil",
]
allow_concatenation = true
allow_dashes = true
@@ -1,15 +0,0 @@
#!/usr/bin/env bash
#shellcheck source=../common/lib.sh
source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh"
# build runtime
WASM_BUILD_NO_COLOR=1 cargo build --verbose --release -p kusama-runtime -p polkadot-runtime -p westend-runtime
# make checksum
sha256sum target/release/wbuild/*-runtime/target/wasm32-unknown-unknown/release/*.wasm > checksum.sha256
# clean up - FIXME: can we reuse some of the artifacts?
cargo clean
# build again
WASM_BUILD_NO_COLOR=1 cargo build --verbose --release -p kusama-runtime -p polkadot-runtime -p westend-runtime
# confirm checksum
sha256sum -c checksum.sha256
@@ -1,76 +0,0 @@
#!/bin/bash
# Runs all benchmarks for all pallets, for a given runtime, provided by $1
# Should be run on a reference machine to gain accurate benchmarks
# current reference machine: https://github.com/paritytech/substrate/pull/5848
runtime="$1"
echo "[+] Compiling benchmarks..."
cargo build --profile production --locked --features=runtime-benchmarks
# Load all pallet names in an array.
PALLETS=($(
./target/production/polkadot benchmark pallet --list --chain="${runtime}-dev" |\
tail -n+2 |\
cut -d',' -f1 |\
sort |\
uniq
))
echo "[+] Benchmarking ${#PALLETS[@]} pallets for runtime $runtime"
# Define the error file.
ERR_FILE="benchmarking_errors.txt"
# Delete the error file before each run.
rm -f $ERR_FILE
# Benchmark each pallet.
for PALLET in "${PALLETS[@]}"; do
echo "[+] Benchmarking $PALLET for $runtime";
output_file=""
if [[ $PALLET == *"::"* ]]; then
# translates e.g. "pallet_foo::bar" to "pallet_foo_bar"
output_file="${PALLET//::/_}.rs"
fi
OUTPUT=$(
./target/production/polkadot benchmark pallet \
--chain="${runtime}-dev" \
--steps=50 \
--repeat=20 \
--pallet="$PALLET" \
--extrinsic="*" \
--wasm-execution=compiled \
--header=./file_header.txt \
--output="./runtime/${runtime}/src/weights/${output_file}" 2>&1
)
if [ $? -ne 0 ]; then
echo "$OUTPUT" >> "$ERR_FILE"
echo "[-] Failed to benchmark $PALLET. Error written to $ERR_FILE; continuing..."
fi
done
# Update the block and extrinsic overhead weights.
echo "[+] Benchmarking block and extrinsic overheads..."
OUTPUT=$(
./target/production/polkadot benchmark overhead \
--chain="${runtime}-dev" \
--wasm-execution=compiled \
--weight-path="runtime/${runtime}/constants/src/weights/" \
--warmup=10 \
--repeat=100 \
--header=./file_header.txt
)
if [ $? -ne 0 ]; then
echo "$OUTPUT" >> "$ERR_FILE"
echo "[-] Failed to benchmark the block and extrinsic overheads. Error written to $ERR_FILE; continuing..."
fi
# Check if the error file exists.
if [ -f "$ERR_FILE" ]; then
echo "[-] Some benchmarks failed. See: $ERR_FILE"
else
echo "[+] All benchmarks passed."
fi