Import Polkadot

Signed-off-by: alvicsam <alvicsam@gmail.com>
This commit is contained in:
alvicsam
2023-08-25 11:03:34 +02:00
1179 changed files with 354108 additions and 0 deletions
+33
View File
@@ -0,0 +1,33 @@
#
# An auto defined `clippy` feature was introduced,
# but it was found to clash with user defined features,
# so was renamed to `cargo-clippy`.
#
# If you want standard clippy run:
# RUSTFLAGS= cargo clippy
[target.'cfg(feature = "cargo-clippy")']
rustflags = [
"-Aclippy::all",
"-Dclippy::correctness",
"-Aclippy::if-same-then-else",
"-Aclippy::clone-double-ref",
"-Dclippy::complexity",
"-Aclippy::zero-prefixed-literal", # 00_1000_000
"-Aclippy::type_complexity", # raison d'etre
"-Aclippy::nonminimal-bool", # maybe
"-Aclippy::borrowed-box", # Reasonable to fix this one
"-Aclippy::too-many-arguments", # (Turning this on would lead to)
"-Aclippy::unnecessary_cast", # Types may change
"-Aclippy::identity-op", # One case where we do 0 +
"-Aclippy::useless_conversion", # Types may change
"-Aclippy::unit_arg", # styalistic.
"-Aclippy::option-map-unit-fn", # styalistic
"-Aclippy::bind_instead_of_map", # styalistic
"-Aclippy::erasing_op", # E.g. 0 * DOLLARS
"-Aclippy::eq_op", # In tests we test equality.
"-Aclippy::while_immutable_condition", # false positives
"-Aclippy::needless_option_as_deref", # false positives
"-Aclippy::derivable_impls", # false positives
"-Aclippy::stable_sort_primitive", # prefer stable sort
"-Aclippy::extra-unused-type-parameters", # stylistic
]
+2
View File
@@ -0,0 +1,2 @@
doc
**/target
+26
View File
@@ -0,0 +1,26 @@
root = true
[*.rs]
indent_style=tab
indent_size=tab
tab_width=4
max_line_length=120
end_of_line=lf
charset=utf-8
trim_trailing_whitespace=true
insert_final_newline=true
[*.yml]
indent_style=space
indent_size=2
tab_width=8
end_of_line=lf
charset=utf-8
trim_trailing_whitespace=true
insert_final_newline=true
[*.sh]
indent_style=space
indent_size=2
tab_width=8
end_of_line=lf
+2
View File
@@ -0,0 +1,2 @@
/.gitlab-ci.yml filter=ci-prettier
/scripts/ci/gitlab/pipeline/*.yml filter=ci-prettier
+6
View File
@@ -0,0 +1,6 @@
# CI
/.github/ @paritytech/ci @chevdor
/scripts/ci/ @paritytech/ci @chevdor
/.gitlab-ci.yml @paritytech/ci
# lingua.dic is not managed by CI team
/scripts/ci/gitlab/lingua.dic
+13
View File
@@ -0,0 +1,13 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
- It would help if you submit info about the system you are running, e.g.: operating system, kernel version, amount of available memory and swap, etc.
- Logs could be very helpful. If possible, submit the whole log. Please format it as ```code blocks```.
- Describe the role your node plays, e.g. validator, full node or light client.
- Any command-line options were passed?
+52
View File
@@ -0,0 +1,52 @@
---
name: Release issue template
about: Tracking issue for new releases
title: Polkadot {{ env.VERSION }} Release checklist
---
# Release Checklist
This is the release checklist for Polkadot {{ env.VERSION }}. **All** following
checks should be completed before publishing a new release of the
Polkadot/Kusama/Westend/Rococo runtime or client. The current release candidate can be
checked out with `git checkout release-{{ env.VERSION }}`
### Runtime Releases
These checks should be performed on the codebase prior to forking to a release-
candidate branch.
- [ ] Verify [`spec_version`](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#spec-version) has been incremented since the
last release for any native runtimes from any existing use on public
(non-private) networks. If the runtime was published (release or pre-release), either
the `spec_version` or `impl` must be bumped.
- [ ] Verify previously [completed migrations](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#old-migrations-removed) are
removed for any public (non-private/test) networks.
- [ ] Verify pallet and [extrinsic ordering](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#extrinsic-ordering) has stayed
the same. Bump `transaction_version` if not.
- [ ] Verify new extrinsics have been correctly whitelisted/blacklisted for
[proxy filters](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#proxy-filtering).
- [ ] Verify [benchmarks](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#benchmarks) have been updated for any modified
runtime logic.
The following checks can be performed after we have forked off to the release-
candidate branch or started an additional release candidate branch (rc-2, rc-3, etc)
- [ ] Verify [new migrations](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#new-migrations) complete successfully, and the
runtime state is correctly updated for any public (non-private/test)
networks.
- [ ] Verify [Polkadot JS API](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#polkadot-js) are up to date with the latest
runtime changes.
- [ ] Check with the Signer's team to make sure metadata update QR are lined up
- [ ] Push runtime upgrade to Westend and verify network stability.
### All Releases
- [ ] Check that the new client versions have [run on the network](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#burn-in)
without issue for 12+ hours on >75% of our validator nodes.
- [ ] Check that a draft release has been created at
https://github.com/paritytech/polkadot/releases with relevant [release
notes](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#release-notes)
- [ ] Check that [build artifacts](https://github.com/paritytech/polkadot/blob/master/doc/release-checklist.md#build-artifacts) have been added to the
draft-release
- [ ] Check that all items listed in the [milestone](https://github.com/paritytech/polkadot/milestones) are included in the release.
- [ ] Ensure that no `freenotes` were added into the release branch after the latest generated RC
+26
View File
@@ -0,0 +1,26 @@
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
labels: ["A2-insubstantial", "B0-silent", "C1-low", "E2-dependencies"]
# Handle updates for crates from github.com/paritytech/substrate manually.
ignore:
- dependency-name: "substrate-*"
- dependency-name: "sc-*"
- dependency-name: "sp-*"
- dependency-name: "frame-*"
- dependency-name: "fork-tree"
- dependency-name: "frame-remote-externalities"
- dependency-name: "pallet-*"
- dependency-name: "beefy-*"
- dependency-name: "try-runtime-*"
- dependency-name: "test-runner"
- dependency-name: "generate-bags"
- dependency-name: "sub-tokens"
schedule:
interval: "daily"
- package-ecosystem: github-actions
directory: '/'
labels: ["A2-insubstantial", "B0-silent", "C1-low", "E2-dependencies"]
schedule:
interval: daily
+42
View File
@@ -0,0 +1,42 @@
# 🔒 PROTECTED: Changes to locks-review-team should be approved by the current locks-review-team
locks-review-team: locks-review
team-leads-team: polkadot-review
action-review-team: ci
rules:
- name: Runtime files
check_type: changed_files
condition:
include: ^runtime\/(kusama|polkadot)\/src\/.+\.rs$
exclude: ^runtime\/(kusama|polkadot)\/src\/weights\/.+\.rs$
all_distinct:
- min_approvals: 1
teams:
- locks-review
- min_approvals: 1
teams:
- polkadot-review
- name: Core developers
check_type: changed_files
condition:
include: .*
# excluding files from 'Runtime files' and 'CI files' rules
exclude: ^runtime/(kusama|polkadot)/src/[^/]+\.rs$|^\.gitlab-ci\.yml|^(?!.*\.dic$|.*spellcheck\.toml$)scripts/ci/.*|^\.github/.*
min_approvals: 3
teams:
- core-devs
- name: CI files
check_type: changed_files
condition:
# dictionary files are excluded
include: ^\.gitlab-ci\.yml|^(?!.*\.dic$|.*spellcheck\.toml$)scripts/ci/.*|^\.github/.*
min_approvals: 2
teams:
- ci
- release-engineering
prevent-review-request:
teams:
- core-devs
@@ -0,0 +1,24 @@
name: Notify devops when burn-in label applied
on:
pull_request:
types: [labeled]
jobs:
notify-devops:
runs-on: ubuntu-latest
strategy:
matrix:
channel:
- name: 'Team: DevOps'
room: '!lUslSijLMgNcEKcAiE:parity.io'
steps:
- name: Send Matrix message to ${{ matrix.channel.name }}
if: startsWith(github.event.label.name, 'A1-')
uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
with:
room_id: ${{ matrix.channel.room }}
access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
server: m.parity.io
message: |
@room Burn-in request received for the following PR: ${{ github.event.pull_request.html_url }}
+50
View File
@@ -0,0 +1,50 @@
name: Check D labels
on:
pull_request:
types: [labeled, opened, synchronize, unlabeled]
paths:
- runtime/polkadot/**
- runtime/kusama/**
- runtime/common/**
- primitives/src/**
jobs:
check-labels:
runs-on: ubuntu-latest
steps:
- name: Pull image
env:
IMAGE: paritytech/ruled_labels:0.4.0
run: docker pull $IMAGE
- name: Check labels
env:
IMAGE: paritytech/ruled_labels:0.4.0
MOUNT: /work
GITHUB_PR: ${{ github.event.pull_request.number }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
API_BASE: https://api.github.com/repos
REPO: ${{ github.repository }}
RULES_PATH: labels/ruled_labels
CHECK_SPECS: specs_polkadot.yaml
run: |
echo "REPO: ${REPO}"
echo "GITHUB_PR: ${GITHUB_PR}"
# Clone repo with labels specs
git clone https://github.com/paritytech/labels
# Fetch the labels for the PR under test
labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
if [ -z "${labels}" ]; then
docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags audit --no-label
fi
labels_args=${labels: :-1}
printf "Checking labels: %s\n" "${labels_args}"
# Prevent the shell from splitting labels with spaces
IFS=","
# --dev is more useful to debug mode to debug
docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags audit
+31
View File
@@ -0,0 +1,31 @@
# checks all networks we care about (kusama, polkadot, westend) and ensures
# the bootnodes in their respective chainspecs are contactable
name: Check all bootnodes
on:
push:
branches:
# Catches v1.2.3 and v1.2.3-rc1
- release-v[0-9]+.[0-9]+.[0-9]+*
jobs:
check_bootnodes:
strategy:
fail-fast: false
matrix:
runtime: [westend, kusama, polkadot]
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Install polkadot
shell: bash
run: |
curl -L "$(curl -s https://api.github.com/repos/paritytech/polkadot/releases/latest \
| jq -r '.assets | .[] | select(.name == "polkadot").browser_download_url')" \
| sudo tee /usr/local/bin/polkadot > /dev/null
sudo chmod +x /usr/local/bin/polkadot
polkadot --version
- name: Check ${{ matrix.runtime }} bootnodes
shell: bash
run: scripts/ci/github/check_bootnodes.sh node/service/chain-specs/${{ matrix.runtime }}.json
+45
View File
@@ -0,0 +1,45 @@
name: Check labels
on:
pull_request:
types: [labeled, opened, synchronize, unlabeled]
jobs:
check-labels:
runs-on: ubuntu-latest
steps:
- name: Pull image
env:
IMAGE: paritytech/ruled_labels:0.4.0
run: docker pull $IMAGE
- name: Check labels
env:
IMAGE: paritytech/ruled_labels:0.4.0
MOUNT: /work
GITHUB_PR: ${{ github.event.pull_request.number }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
API_BASE: https://api.github.com/repos
REPO: ${{ github.repository }}
RULES_PATH: labels/ruled_labels
CHECK_SPECS: specs_polkadot.yaml
run: |
echo "REPO: ${REPO}"
echo "GITHUB_PR: ${GITHUB_PR}"
# Clone repo with labels specs
git clone https://github.com/paritytech/labels
# Fetch the labels for the PR under test
labels=$( curl -H "Authorization: token ${GITHUB_TOKEN}" -s "$API_BASE/${REPO}/pulls/${GITHUB_PR}" | jq '.labels | .[] | .name' | tr "\n" ",")
if [ -z "${labels}" ]; then
docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --tags PR --no-label
fi
labels_args=${labels: :-1}
printf "Checking labels: %s\n" "${labels_args}"
# Prevent the shell from splitting labels with spaces
IFS=","
# --dev is more useful to debug mode to debug
docker run --rm -i -v $PWD/${RULES_PATH}/:$MOUNT $IMAGE check $MOUNT/$CHECK_SPECS --labels ${labels_args} --dev --tags PR
+26
View File
@@ -0,0 +1,26 @@
name: Check licenses
on:
pull_request:
jobs:
check-licenses:
runs-on: ubuntu-22.04
steps:
- name: Checkout sources
uses: actions/checkout@v3
- uses: actions/setup-node@v3.8.1
with:
node-version: '18.x'
registry-url: 'https://npm.pkg.github.com'
scope: '@paritytech'
- name: Check the licenses
run: |
shopt -s globstar
npx @paritytech/license-scanner@0.0.5 scan \
--ensure-licenses=Apache-2.0 \
--ensure-licenses=GPL-3.0-only \
./**/*.rs
env:
NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+28
View File
@@ -0,0 +1,28 @@
# If a chainspec file is updated with new bootnodes, we check to make sure those bootnodes are contactable
name: Check new bootnodes
on:
pull_request:
paths:
- 'node/service/chain-specs/*.json'
jobs:
check_bootnodes:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install polkadot
shell: bash
run: |
curl -L "$(curl -s https://api.github.com/repos/paritytech/polkadot/releases/latest \
| jq -r '.assets | .[] | select(.name == "polkadot").browser_download_url')" \
| sudo tee /usr/local/bin/polkadot > /dev/null
sudo chmod +x /usr/local/bin/polkadot
polkadot --version
- name: Check new bootnodes
shell: bash
run: |
scripts/ci/github/check_new_bootnodes.sh
+49
View File
@@ -0,0 +1,49 @@
name: Check updated weights
on:
pull_request:
paths:
 - 'runtime/*/src/weights/**'
jobs:
check_weights_files:
strategy:
fail-fast: false
matrix:
runtime: [westend, kusama, polkadot, rococo]
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Check weights files
shell: bash
run: |
scripts/ci/github/verify_updated_weights.sh ${{ matrix.runtime }}
# This job uses https://github.com/ggwpez/substrate-weight-compare to compare the weights of the current
# release with the last release, then adds them as a comment to the PR.
check_weight_changes:
strategy:
fail-fast: false
matrix:
runtime: [westend, kusama, polkadot, rococo]
runs-on: ubuntu-latest
steps:
- name: Get latest release
run: |
LAST_RELEASE=$(curl -s https://api.github.com/repos/paritytech/polkadot/releases/latest | jq -r .tag_name)
echo "LAST_RELEASE=$LAST_RELEASE" >> $GITHUB_ENV
- name: Checkout current sources
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Check weight changes
shell: bash
run: |
cargo install --git https://github.com/ggwpez/substrate-weight-compare swc
./scripts/ci/github/check_weights_swc.sh ${{ matrix.runtime }} "$LAST_RELEASE" | tee swc_output_${{ matrix.runtime }}.md
- name: Add comment
uses: thollander/actions-comment-pull-request@v2
with:
filePath: ./swc_output_${{ matrix.runtime }}.md
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+137
View File
@@ -0,0 +1,137 @@
name: Run nightly fuzzer jobs
on:
schedule:
- cron: '0 0 * * *'
jobs:
xcm-fuzzer:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Install minimal stable Rust
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Install minimal nightly Rust
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
target: wasm32-unknown-unknown
- name: Install honggfuzz deps
run: sudo apt-get install --no-install-recommends binutils-dev libunwind8-dev
- name: Install honggfuzz
uses: actions-rs/cargo@v1
with:
command: install
args: honggfuzz --version "0.5.54"
- name: Build fuzzer binaries
working-directory: xcm/xcm-simulator/fuzzer/
run: cargo hfuzz build
- name: Run fuzzer
working-directory: xcm/xcm-simulator/fuzzer/
run: bash $GITHUB_WORKSPACE/scripts/ci/github/run_fuzzer.sh xcm-fuzzer
erasure-coding-round-trip:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Cache Seed
id: cache-seed-round-trip
uses: actions/cache@v3
with:
path: erasure-coding/fuzzer/hfuzz_workspace
key: ${{ runner.os }}-erasure-coding
- name: Install minimal stable Rust
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Install minimal nightly Rust
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
target: wasm32-unknown-unknown
- name: Install honggfuzz deps
run: sudo apt-get install --no-install-recommends binutils-dev libunwind8-dev
- name: Install honggfuzz
uses: actions-rs/cargo@v1
with:
command: install
args: honggfuzz --version "0.5.54"
- name: Build fuzzer binaries
working-directory: erasure-coding/fuzzer
run: cargo hfuzz build
- name: Run fuzzer
working-directory: erasure-coding/fuzzer
run: bash $GITHUB_WORKSPACE/scripts/ci/github/run_fuzzer.sh round_trip
erasure-coding-reconstruct:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Cache Seed
id: cache-seed-reconstruct
uses: actions/cache@v3
with:
path: erasure-coding/fuzzer/hfuzz_workspace
key: ${{ runner.os }}-erasure-coding
- name: Install minimal stable Rust
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Install minimal nightly Rust
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
target: wasm32-unknown-unknown
- name: Install honggfuzz deps
run: sudo apt-get install --no-install-recommends binutils-dev libunwind8-dev
- name: Install honggfuzz
uses: actions-rs/cargo@v1
with:
command: install
args: honggfuzz --version "0.5.54"
- name: Build fuzzer binaries
working-directory: erasure-coding/fuzzer
run: cargo hfuzz build
- name: Run fuzzer
working-directory: erasure-coding/fuzzer
run: bash $GITHUB_WORKSPACE/scripts/ci/github/run_fuzzer.sh reconstruct
+42
View File
@@ -0,0 +1,42 @@
name: Assign reviewers
on:
pull_request:
branches:
- master
- main
types:
- opened
- reopened
- synchronize
- review_requested
- review_request_removed
- ready_for_review
- converted_to_draft
pull_request_review:
jobs:
pr-custom-review:
runs-on: ubuntu-latest
steps:
- name: Skip if pull request is in Draft
# `if: github.event.pull_request.draft == true` should be kept here, at
# the step level, rather than at the job level. The latter is not
# recommended because when the PR is moved from "Draft" to "Ready to
# review" the workflow will immediately be passing (since it was skipped),
# even though it hasn't actually ran, since it takes a few seconds for
# the workflow to start. This is also disclosed in:
# https://github.community/t/dont-run-actions-on-draft-pull-requests/16817/17
# That scenario would open an opportunity for the check to be bypassed:
# 1. Get your PR approved
# 2. Move it to Draft
# 3. Push whatever commits you want
# 4. Move it to "Ready for review"; now the workflow is passing (it was
# skipped) and "Check reviews" is also passing (it won't be updated
# until the workflow is finished)
if: github.event.pull_request.draft == true
run: exit 1
- name: pr-custom-review
uses: paritytech/pr-custom-review@action-v3
with:
checks-reviews-api: http://pcr.parity-prod.parity.io/api/v1/check_reviews
+21
View File
@@ -0,0 +1,21 @@
name: Release - Branch check
on:
push:
branches:
# Catches v1.2.3 and v1.2.3-rc1
- release-v[0-9]+.[0-9]+.[0-9]+*
workflow_dispatch:
jobs:
check_branch:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Run check
shell: bash
run: ./scripts/ci/github/check-rel-br
+71
View File
@@ -0,0 +1,71 @@
name: Release - RC automation
on:
push:
branches:
# Catches v1.2.3 and v1.2.3-rc1
- release-v[0-9]+.[0-9]+.[0-9]+*
jobs:
tag_rc:
runs-on: ubuntu-latest
strategy:
matrix:
channel:
- name: "RelEng: Polkadot Release Coordination"
room: '!cqAmzdIcbOFwrdrubV:parity.io'
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
fetch-depth: 0
- id: compute_tag
name: Compute next rc tag
shell: bash
run: |
# Get last rc tag if exists, else set it to {version}-rc1
version=${GITHUB_REF#refs/heads/release-}
echo "$version"
echo "version=$version" >> $GITHUB_OUTPUT
git tag -l
last_rc=$(git tag -l "$version-rc*" | sort -V | tail -n 1)
if [ -n "$last_rc" ]; then
suffix=$(echo "$last_rc" | grep -Eo '[0-9]+$')
echo $suffix
((suffix++))
echo $suffix
echo "new_tag=$version-rc$suffix" >> $GITHUB_OUTPUT
echo "first_rc=false" >> $GITHUB_OUTPUT
else
echo "new_tag=$version-rc1" >> $GITHUB_OUTPUT
echo "first_rc=true" >> $GITHUB_OUTPUT
fi
- name: Apply new tag
uses: tvdias/github-tagger@ed7350546e3e503b5e942dffd65bc8751a95e49d # v0.0.2
with:
# We can't use the normal GITHUB_TOKEN for the following reason:
# https://docs.github.com/en/actions/reference/events-that-trigger-workflows#triggering-new-workflows-using-a-personal-access-token
# RELEASE_BRANCH_TOKEN requires public_repo OAuth scope
repo-token: "${{ secrets.RELEASE_BRANCH_TOKEN }}"
tag: ${{ steps.compute_tag.outputs.new_tag }}
- id: create-issue
uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
# Only create the issue if it's the first release candidate
if: steps.compute_tag.outputs.first_rc == 'true'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VERSION: ${{ steps.compute_tag.outputs.version }}
with:
filename: .github/ISSUE_TEMPLATE/release.md
- name: Send Matrix message to ${{ matrix.channel.name }}
uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
if: steps.create-issue.outputs.url != ''
with:
room_id: ${{ matrix.channel.room }}
access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
server: m.parity.io
message: |
Release process for polkadot ${{ steps.compute_tag.outputs.version }} has been started.<br/>
Tracking issue: ${{ steps.create-issue.outputs.url }}
@@ -0,0 +1,81 @@
# This workflow performs the Extrinsic Ordering Check on demand using a binary
name: Release - Extrinsic Ordering Check
on:
workflow_dispatch:
inputs:
reference_url:
description: The WebSocket url of the reference node
default: wss://kusama-rpc.polkadot.io
required: true
binary_url:
description: A url to a Linux binary for the node containing the runtime to test
default: https://releases.parity.io/polkadot/x86_64-debian:stretch/v0.9.10/polkadot
required: true
chain:
description: The name of the chain under test. Usually, you would pass a local chain
default: kusama-local
required: true
jobs:
check:
name: Run check
runs-on: ubuntu-latest
env:
CHAIN: ${{github.event.inputs.chain}}
BIN_URL: ${{github.event.inputs.binary_url}}
REF_URL: ${{github.event.inputs.reference_url}}
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Fetch binary
run: |
echo Fetching $BIN_URL
wget $BIN_URL
chmod a+x polkadot
./polkadot --version
- name: Start local node
run: |
echo Running on $CHAIN
./polkadot --chain=$CHAIN &
- name: Prepare output
run: |
VERSION=$(./polkadot --version)
echo "Metadata comparison:" >> output.txt
echo "Date: $(date)" >> output.txt
echo "Reference: $REF_URL" >> output.txt
echo "Target version: $VERSION" >> output.txt
echo "Chain: $CHAIN" >> output.txt
echo "----------------------------------------------------------------------" >> output.txt
- name: Pull polkadot-js-tools image
run: docker pull jacogr/polkadot-js-tools
- name: Compare the metadata
run: |
CMD="docker run --pull always --network host jacogr/polkadot-js-tools metadata $REF_URL ws://localhost:9944"
echo -e "Running:\n$CMD"
$CMD >> output.txt
sed -z -i 's/\n\n/\n/g' output.txt
cat output.txt | egrep -n -i ''
SUMMARY=$(./scripts/ci/github/extrinsic-ordering-filter.sh output.txt)
echo -e $SUMMARY
echo -e $SUMMARY >> output.txt
- name: Show result
run: |
cat output.txt
- name: Stop our local node
run: pkill polkadot
- name: Save output as artifact
uses: actions/upload-artifact@v3
with:
name: ${{ env.CHAIN }}
path: |
output.txt
@@ -0,0 +1,97 @@
# This workflow performs the Extrinsic Ordering Check on demand using two reference binaries
name: Release - Extrinsic API Check with reference bins
on:
workflow_dispatch:
inputs:
reference_binary_url:
description: A url to a Linux binary for the node containing the reference runtime to test against
default: https://releases.parity.io/polkadot/x86_64-debian:stretch/v0.9.26/polkadot
required: true
binary_url:
description: A url to a Linux binary for the node containing the runtime to test
default: https://releases.parity.io/polkadot/x86_64-debian:stretch/v0.9.27-rc1/polkadot
required: true
jobs:
check:
name: Run check
runs-on: ubuntu-latest
env:
BIN_URL: ${{github.event.inputs.binary_url}}
REF_URL: ${{github.event.inputs.reference_binary_url}}
strategy:
fail-fast: false
matrix:
chain: [polkadot, kusama, westend, rococo]
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Fetch reference binary
run: |
echo Fetching $REF_URL
curl $REF_URL -o polkadot-ref
chmod a+x polkadot-ref
./polkadot-ref --version
- name: Fetch test binary
run: |
echo Fetching $BIN_URL
curl $BIN_URL -o polkadot
chmod a+x polkadot
./polkadot --version
- name: Start local reference node
run: |
echo Running reference on ${{ matrix.chain }}-local
./polkadot-ref --chain=${{ matrix.chain }}-local --rpc-port=9934 --ws-port=9945 --base-path=polkadot-ref-base/ &
- name: Start local test node
run: |
echo Running test on ${{ matrix.chain }}-local
./polkadot --chain=${{ matrix.chain }}-local &
- name: Prepare output
run: |
REF_VERSION=$(./polkadot-ref --version)
BIN_VERSION=$(./polkadot --version)
echo "Metadata comparison:" >> output.txt
echo "Date: $(date)" >> output.txt
echo "Ref. binary: $REF_URL" >> output.txt
echo "Test binary: $BIN_URL" >> output.txt
echo "Ref. version: $REF_VERSION" >> output.txt
echo "Test version: $BIN_VERSION" >> output.txt
echo "Chain: ${{ matrix.chain }}-local" >> output.txt
echo "----------------------------------------------------------------------" >> output.txt
- name: Pull polkadot-js-tools image
run: docker pull jacogr/polkadot-js-tools
- name: Compare the metadata
run: |
CMD="docker run --pull always --network host jacogr/polkadot-js-tools metadata ws://localhost:9945 ws://localhost:9944"
echo -e "Running:\n$CMD"
$CMD >> output.txt
sed -z -i 's/\n\n/\n/g' output.txt
cat output.txt | egrep -n -i ''
SUMMARY=$(./scripts/ci/github/extrinsic-ordering-filter.sh output.txt)
echo -e $SUMMARY
echo -e $SUMMARY >> output.txt
- name: Show result
run: |
cat output.txt
- name: Save output as artifact
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.chain }}
path: |
output.txt
- name: Stop our local nodes
run: |
pkill polkadot-ref
pkill polkadot
@@ -0,0 +1,199 @@
name: Release - Publish draft
on:
push:
tags:
# Catches v1.2.3 and v1.2.3-rc1
- v[0-9]+.[0-9]+.[0-9]+*
jobs:
get-rust-versions:
runs-on: ubuntu-latest
container:
image: paritytech/ci-linux:production
outputs:
rustc-stable: ${{ steps.get-rust-versions.outputs.stable }}
rustc-nightly: ${{ steps.get-rust-versions.outputs.nightly }}
steps:
- id: get-rust-versions
run: |
echo "stable=$(rustc +stable --version)" >> $GITHUB_OUTPUT
echo "nightly=$(rustc +nightly --version)" >> $GITHUB_OUTPUT
build-runtimes:
runs-on: ubuntu-latest
strategy:
matrix:
runtime: ["polkadot", "kusama", "westend", "rococo"]
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Cache target dir
uses: actions/cache@v3
with:
path: "${{ github.workspace }}/runtime/${{ matrix.runtime }}/target"
key: srtool-target-${{ matrix.runtime }}-${{ github.sha }}
restore-keys: |
srtool-target-${{ matrix.runtime }}-
srtool-target-
- name: Build ${{ matrix.runtime }} runtime
id: srtool_build
uses: chevdor/srtool-actions@v0.8.0
with:
image: paritytech/srtool
chain: ${{ matrix.runtime }}
- name: Store srtool digest to disk
run: |
echo '${{ steps.srtool_build.outputs.json }}' | jq > ${{ matrix.runtime }}_srtool_output.json
- name: Upload ${{ matrix.runtime }} srtool json
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.runtime }}-srtool-json
path: ${{ matrix.runtime }}_srtool_output.json
- name: Upload ${{ matrix.runtime }} runtime
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.runtime }}-runtime
path: |
${{ steps.srtool_build.outputs.wasm_compressed }}
publish-draft-release:
runs-on: ubuntu-latest
needs: ["get-rust-versions", "build-runtimes"]
outputs:
release_url: ${{ steps.create-release.outputs.html_url }}
asset_upload_url: ${{ steps.create-release.outputs.upload_url }}
steps:
- name: Checkout sources
uses: actions/checkout@v3
with:
fetch-depth: 0
path: polkadot
- name: Set up Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: 3.0.0
- name: Download srtool json output
uses: actions/download-artifact@v3
- name: Prepare tooling
run: |
cd polkadot/scripts/ci/changelog
gem install bundler changelogerator:0.9.1
bundle install
changelogerator --help
URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.1/tera-cli_linux_amd64.deb
wget $URL -O tera.deb
sudo dpkg -i tera.deb
tera --version
- name: Generate release notes
env:
RUSTC_STABLE: ${{ needs.get-rust-versions.outputs.rustc-stable }}
RUSTC_NIGHTLY: ${{ needs.get-rust-versions.outputs.rustc-nightly }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NO_CACHE: 1
DEBUG: 1
ROCOCO_DIGEST: ${{ github.workspace}}/rococo-srtool-json/rococo_srtool_output.json
WESTEND_DIGEST: ${{ github.workspace}}/westend-srtool-json/westend_srtool_output.json
KUSAMA_DIGEST: ${{ github.workspace}}/kusama-srtool-json/kusama_srtool_output.json
POLKADOT_DIGEST: ${{ github.workspace}}/polkadot-srtool-json/polkadot_srtool_output.json
PRE_RELEASE: ${{ github.event.inputs.pre_release }}
run: |
find ${{env.GITHUB_WORKSPACE}} -type f -name "*_srtool_output.json"
ls -al $ROCOCO_DIGEST
ls -al $WESTEND_DIGEST
ls -al $KUSAMA_DIGEST
ls -al $POLKADOT_DIGEST
cd polkadot/scripts/ci/changelog
./bin/changelog ${GITHUB_REF}
ls -al release-notes.md
ls -al context.json
- name: Archive artifact context.json
uses: actions/upload-artifact@v3
with:
name: release-notes-context
path: |
polkadot/scripts/ci/changelog/context.json
**/*_srtool_output.json
- name: Create draft release
id: create-release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Polkadot ${{ github.ref }}
body_path: ./polkadot/scripts/ci/changelog/release-notes.md
draft: true
publish-runtimes:
runs-on: ubuntu-latest
needs: ["publish-draft-release"]
env:
RUNTIME_DIR: runtime
strategy:
matrix:
runtime: ["polkadot", "kusama", "westend", "rococo"]
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Download artifacts
uses: actions/download-artifact@v3
- name: Set up Ruby
uses: ruby/setup-ruby@v1
with:
ruby-version: 3.0.0
- name: Get runtime version
id: get-runtime-ver
run: |
echo "require './scripts/ci/github/lib.rb'" > script.rb
echo "puts get_runtime(runtime: \"${{ matrix.runtime }}\", runtime_dir: \"$RUNTIME_DIR\")" >> script.rb
echo "Current folder: $PWD"
ls "$RUNTIME_DIR/${{ matrix.runtime }}"
runtime_ver=$(ruby script.rb)
echo "Found version: >$runtime_ver<"
echo "runtime_ver=$runtime_ver" >> $GITHUB_OUTPUT
- name: Upload compressed ${{ matrix.runtime }} wasm
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.publish-draft-release.outputs.asset_upload_url }}
asset_path: "${{ matrix.runtime }}-runtime/${{ matrix.runtime }}_runtime.compact.compressed.wasm"
asset_name: ${{ matrix.runtime }}_runtime-v${{ steps.get-runtime-ver.outputs.runtime_ver }}.compact.compressed.wasm
asset_content_type: application/wasm
post_to_matrix:
runs-on: ubuntu-latest
needs: publish-draft-release
strategy:
matrix:
channel:
- name: "RelEng: Polkadot Release Coordination"
room: '!cqAmzdIcbOFwrdrubV:parity.io'
steps:
- name: Send Matrix message to ${{ matrix.channel.name }}
uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
with:
room_id: ${{ matrix.channel.room }}
access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
server: m.parity.io
message: |
**New version of polkadot tagged**: ${{ github.ref }}<br/>
Draft release created: ${{ needs.publish-draft-release.outputs.release_url }}
@@ -0,0 +1,132 @@
name: Release - Publish RC Container image
# see https://github.com/paritytech/release-engineering/issues/97#issuecomment-1651372277
on:
workflow_dispatch:
inputs:
release_id:
description: |
Release ID.
You can find it using the command:
curl -s \
-H "Authorization: Bearer ${GITHUB_TOKEN}" https://api.github.com/repos/$OWNER/$REPO/releases | \
jq '.[] | { name: .name, id: .id }'
required: true
type: string
registry:
description: "Container registry"
required: true
type: string
default: docker.io
owner:
description: Owner of the container image repo
required: true
type: string
default: parity
env:
RELEASE_ID: ${{ inputs.release_id }}
ENGINE: docker
REGISTRY: ${{ inputs.registry }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCKER_OWNER: ${{ inputs.owner || github.repository_owner }}
REPO: ${{ github.repository }}
jobs:
fetch-artifacts:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Fetch all artifacts
run: |
. ./scripts/ci/common/lib.sh
fetch_release_artifacts
- name: Cache the artifacts
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
with:
key: artifacts-${{ github.sha }}
path: |
./release-artifacts/**/*
build-container:
runs-on: ubuntu-latest
needs: fetch-artifacts
strategy:
matrix:
binary: ["polkadot", "staking-miner"]
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Get artifacts from cache
uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
with:
key: artifacts-${{ github.sha }}
fail-on-cache-miss: true
path: |
./release-artifacts/**/*
- name: Check sha256 ${{ matrix.binary }}
working-directory: ./release-artifacts
run: |
. ../scripts/ci/common/lib.sh
echo "Checking binary ${{ matrix.binary }}"
check_sha256 ${{ matrix.binary }} && echo "OK" || echo "ERR"
- name: Check GPG ${{ matrix.binary }}
working-directory: ./release-artifacts
run: |
. ../scripts/ci/common/lib.sh
import_gpg_keys
check_gpg ${{ matrix.binary }}
- name: Fetch commit and tag
id: fetch_refs
run: |
release=release-${{ inputs.release_id }} && \
echo "release=${release}" >> $GITHUB_OUTPUT
commit=$(git rev-parse --short HEAD) && \
echo "commit=${commit}" >> $GITHUB_OUTPUT
tag=$(git name-rev --tags --name-only $(git rev-parse HEAD)) && \
[ "${tag}" != "undefined" ] && echo "tag=${tag}" >> $GITHUB_OUTPUT || \
echo "No tag, doing without"
- name: Build Injected Container image for ${{ matrix.binary }}
env:
BIN_FOLDER: ./release-artifacts
BINARY: ${{ matrix.binary }}
TAGS: ${{join(steps.fetch_refs.outputs.*, ',')}}
run: |
echo "Building container for ${{ matrix.binary }}"
./scripts/ci/dockerfiles/build-injected.sh
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Push Container image for ${{ matrix.binary }}
id: docker_push
env:
BINARY: ${{ matrix.binary }}
run: |
$ENGINE images | grep ${BINARY}
$ENGINE push --all-tags ${REGISTRY}/${DOCKER_OWNER}/${BINARY}
- name: Check version for the published image for ${{ matrix.binary }}
env:
BINARY: ${{ matrix.binary }}
RELEASE_TAG: ${{ steps.fetch_refs.outputs.release }}
run: |
echo "Checking tag ${RELEASE_TAG} for image ${REGISTRY}/${DOCKER_OWNER}/${BINARY}"
$ENGINE run -i ${REGISTRY}/${DOCKER_OWNER}/${BINARY}:${RELEASE_TAG} --version
@@ -0,0 +1,44 @@
name: Release - Publish Docker image for new releases
on:
release:
types:
- published
jobs:
main:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@95cb08cb2672c73d4ffd2f422e6d11953d2a9c70 # v2.1.0
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build
uses: docker/build-push-action@v4
with:
push: true
file: scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile
tags: |
parity/polkadot:latest
parity/polkadot:${{ github.event.release.tag_name }}
build-args: |
POLKADOT_VERSION=${{ github.event.release.tag_name }}
VCS_REF=${{ github.ref }}
BUILD_DATE=${{ github.event.release.published_at }}
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}
@@ -0,0 +1,51 @@
name: Release - Publish Docker image (manual dispatch)
on:
workflow_dispatch:
inputs:
version:
description: version to build/release
default: v0.9.18
required: true
date:
description: release date of version
default: "2022-02-23T19:11:58Z"
required: true
jobs:
main:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@95cb08cb2672c73d4ffd2f422e6d11953d2a9c70 # v2.1.0
- name: Cache Docker layers
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Login to Dockerhub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: docker_build
uses: docker/build-push-action@v4
with:
push: true
file: scripts/ci/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile
tags: |
parity/polkadot:latest
parity/polkadot:${{ github.event.inputs.version }}
build-args: |
POLKADOT_VERSION=${{ github.event.inputs.version }}
VCS_REF=${{ github.ref }}
BUILD_DATE=${{ github.event.inputs.date }}
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}
+49
View File
@@ -0,0 +1,49 @@
name: Release - Send new release notification to matrix channels
on:
release:
types:
- published
jobs:
ping_matrix:
strategy:
matrix:
channel:
- name: '#KusamaValidatorLounge:polkadot.builders'
room: '!LhjZccBOqFNYKLdmbb:polkadot.builders'
pre-releases: false
- name: '#kusama-announcements:matrix.parity.io'
room: '!FMwxpQnYhRCNDRsYGI:matrix.parity.io'
pre-release: false
- name: '#polkadotvalidatorlounge:web3.foundation'
room: '!NZrbtteFeqYKCUGQtr:matrix.parity.io'
pre-release: false
- name: '#polkadot-announcements:matrix.parity.io'
room: '!UqHPWiCBGZWxrmYBkF:matrix.parity.io'
pre-release: false
- name: "RelEng: Polkadot Release Coordination"
room: '!cqAmzdIcbOFwrdrubV:parity.io'
pre-release: true
- name: 'Ledger <> Polkadot Coordination'
room: '!EoIhaKfGPmFOBrNSHT:web3.foundation'
pre-release: true
- name: 'General: Rust, Polkadot, Substrate'
room: '!aJymqQYtCjjqImFLSb:parity.io'
pre-release: false
- name: 'Team: DevOps'
room: '!lUslSijLMgNcEKcAiE:parity.io'
pre-release: true
runs-on: ubuntu-latest
steps:
- name: Send Matrix message to ${{ matrix.channel.name }}
if: github.event.release.prerelease == false || matrix.channel.pre-release
uses: s3krit/matrix-message-action@70ad3fb812ee0e45ff8999d6af11cafad11a6ecf # v0.0.3
with:
room_id: ${{ matrix.channel.room }}
access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }}
server: m.parity.io
message: |
***Polkadot ${{github.event.release.tag_name}} has been released!***<br/>
${{github.event.release.html_url}}<br/><br/>
${{github.event.release.body}}<br/>
+16
View File
@@ -0,0 +1,16 @@
**/target/
**/*.rs.bk
*.swp
.wasm-binaries
runtime/wasm/target/
**/._*
.idea
.vscode
polkadot.*
!polkadot.service
.DS_Store
.env
artifacts
release-artifacts
release.json
+287
View File
@@ -0,0 +1,287 @@
# .gitlab-ci.yml
#
# polkadot
#
# Pipelines can be triggered manually in the web.
#
# Please do not add new jobs without "rules:" and "*-env". There are &test-refs for everything,
# "docker-env" is used for Rust jobs.
# And "kubernetes-env" for everything else. Please mention "image:" container name to be used
# with it, as there's no default one.
# All jobs are sorted according to their duration using DAG mechanism
# Currently, test-linux-stable job is the longest one and other jobs are
# sorted in order to complete during this job and occupy less runners in one
# moment of time.
stages:
- .pre
- weights
- check
- test
- build
- publish
- zombienet
- short-benchmarks
workflow:
rules:
- if: $CI_COMMIT_TAG
- if: $CI_COMMIT_BRANCH
variables:
GIT_STRATEGY: fetch
GIT_DEPTH: 100
CI_SERVER_NAME: "GitLab CI"
CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE]
BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29"
BUILDAH_COMMAND: "buildah --storage-driver overlay2"
DOCKER_OS: "debian:stretch"
ARCH: "x86_64"
ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.65"
default:
cache: {}
retry:
max: 2
when:
- runner_system_failure
- unknown_failure
- api_failure
interruptible: true
.common-before-script:
before_script:
- !reference [.job-switcher, before_script]
- !reference [.timestamp, before_script]
.collect-artifacts:
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
when: on_success
expire_in: 7 days
paths:
- ./artifacts/
.collect-artifacts-short:
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
when: on_success
expire_in: 1 days
paths:
- ./artifacts/
# collecting vars for pipeline stopper
# they will be used if the job fails
.pipeline-stopper-vars:
before_script:
- echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env
- echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env
- echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env
- echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env
.pipeline-stopper-artifacts:
artifacts:
reports:
dotenv: pipeline-stopper.env
.job-switcher:
before_script:
- if echo "$CI_DISABLED_JOBS" | grep -xF "$CI_JOB_NAME"; then echo "The job has been cancelled in CI settings"; exit 0; fi
.kubernetes-env:
image: "${CI_IMAGE}"
before_script:
- !reference [.common-before-script, before_script]
tags:
- kubernetes-parity-build
.docker-env:
image: "${CI_IMAGE}"
before_script:
- !reference [.common-before-script, before_script]
tags:
- linux-docker-vm-c2
.compiler-info:
before_script:
- !reference [.common-before-script, before_script]
- rustup show
- cargo --version
.test-refs:
rules:
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
.common-refs:
# these jobs run always*
rules:
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
- if: $CI_COMMIT_REF_NAME =~ /^release-v[0-9]+\.[0-9]+.*$/ # i.e. release-v0.9.27
.test-pr-refs:
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
.zombienet-refs:
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
.deploy-testnet-refs:
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
.publish-refs:
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_PIPELINE_SOURCE == "web" &&
$CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
.build-push-image:
variables:
CI_IMAGE: "${BUILDAH_IMAGE}"
REGISTRY: "docker.io"
DOCKER_OWNER: "paritypr"
DOCKER_USER: "${PARITYPR_USER}"
DOCKER_PASS: "${PARITYPR_PASS}"
IMAGE: "${REGISTRY}/${DOCKER_OWNER}/${IMAGE_NAME}"
ENGINE: "${BUILDAH_COMMAND}"
BUILDAH_FORMAT: "docker"
SKIP_IMAGE_VALIDATION: 1
PROJECT_ROOT: "."
BIN_FOLDER: "./artifacts"
VCS_REF: "${CI_COMMIT_SHA}"
before_script:
- !reference [.common-before-script, before_script]
- test -s ./artifacts/VERSION || exit 1
- test -s ./artifacts/EXTRATAG || exit 1
- export VERSION="$(cat ./artifacts/VERSION)"
- EXTRATAG="$(cat ./artifacts/EXTRATAG)"
- echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})"
script:
- test "$DOCKER_USER" -a "$DOCKER_PASS" ||
( echo "no docker credentials provided"; exit 1 )
- TAGS="${VERSION},${EXTRATAG}" scripts/ci/dockerfiles/build-injected.sh
- echo "$DOCKER_PASS" |
buildah login --username "$DOCKER_USER" --password-stdin "${REGISTRY}"
- $BUILDAH_COMMAND info
- $BUILDAH_COMMAND push --format=v2s2 "$IMAGE:$VERSION"
- $BUILDAH_COMMAND push --format=v2s2 "$IMAGE:$EXTRATAG"
after_script:
- buildah logout --all
#### stage: .pre
# By default our pipelines are interruptible, but some special pipelines shouldn't be interrupted:
# * multi-project pipelines such as the ones triggered by the scripts repo
#
# In those cases, we add an uninterruptible .pre job; once that one has started,
# the entire pipeline becomes uninterruptible.
uninterruptible-pipeline:
extends: .kubernetes-env
variables:
CI_IMAGE: "paritytech/tools:latest"
stage: .pre
interruptible: false
rules:
- if: $CI_PIPELINE_SOURCE == "pipeline"
script: "true"
include:
# weights jobs
- scripts/ci/gitlab/pipeline/weights.yml
# check jobs
- scripts/ci/gitlab/pipeline/check.yml
# test jobs
- scripts/ci/gitlab/pipeline/test.yml
# build jobs
- scripts/ci/gitlab/pipeline/build.yml
# short-benchmarks jobs
- scripts/ci/gitlab/pipeline/short-benchmarks.yml
# publish jobs
- scripts/ci/gitlab/pipeline/publish.yml
# zombienet jobs
- scripts/ci/gitlab/pipeline/zombienet.yml
# timestamp handler
- project: parity/infrastructure/ci_cd/shared
ref: main
file: /common/timestamp.yml
- project: parity/infrastructure/ci_cd/shared
ref: main
file: /common/ci-unified.yml
#### stage: .post
deploy-parity-testnet:
stage: .post
extends:
- .deploy-testnet-refs
variables:
POLKADOT_CI_COMMIT_NAME: "${CI_COMMIT_REF_NAME}"
POLKADOT_CI_COMMIT_REF: "${CI_COMMIT_SHORT_SHA}"
allow_failure: false
trigger: "parity/infrastructure/parity-testnet"
# This job cancels the whole pipeline if any of provided jobs fail.
# In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests
# to fail the pipeline as soon as possible to shorten the feedback loop.
.cancel-pipeline-template:
stage: .post
rules:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
when: on_failure
variables:
PROJECT_ID: "${CI_PROJECT_ID}"
PROJECT_NAME: "${CI_PROJECT_NAME}"
PIPELINE_ID: "${CI_PIPELINE_ID}"
FAILED_JOB_URL: "${FAILED_JOB_URL}"
FAILED_JOB_NAME: "${FAILED_JOB_NAME}"
PR_NUM: "${PR_NUM}"
trigger:
project: "parity/infrastructure/ci_cd/pipeline-stopper"
branch: "as-improve"
remove-cancel-pipeline-message:
stage: .post
rules:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
variables:
PROJECT_ID: "${CI_PROJECT_ID}"
PROJECT_NAME: "${CI_PROJECT_NAME}"
PIPELINE_ID: "${CI_PIPELINE_ID}"
FAILED_JOB_URL: "https://gitlab.com"
FAILED_JOB_NAME: "nope"
PR_NUM: "${CI_COMMIT_REF_NAME}"
trigger:
project: "parity/infrastructure/ci_cd/pipeline-stopper"
cancel-pipeline-test-linux-stable:
extends: .cancel-pipeline-template
needs:
- job: test-linux-stable
+48
View File
@@ -0,0 +1,48 @@
%define debug_package %{nil}
Name: polkadot
Summary: Implementation of a https://polkadot.network node in Rust based on the Substrate framework.
Version: @@VERSION@@
Release: @@RELEASE@@%{?dist}
License: GPLv3
Group: Applications/System
Source0: %{name}-%{version}.tar.gz
Requires: systemd, shadow-utils
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
%description
%{summary}
%prep
%setup -q
%install
rm -rf %{buildroot}
mkdir -p %{buildroot}
cp -a * %{buildroot}
%post
config_file="/etc/default/polkadot"
getent group polkadot >/dev/null || groupadd -r polkadot
getent passwd polkadot >/dev/null || \
useradd -r -g polkadot -d /home/polkadot -m -s /sbin/nologin \
-c "User account for running polkadot as a service" polkadot
if [ ! -e "$config_file" ]; then
echo 'POLKADOT_CLI_ARGS=""' > /etc/default/polkadot
fi
exit 0
%clean
rm -rf %{buildroot}
%files
%defattr(-,root,root,-)
%{_bindir}/*
/usr/lib/systemd/system/polkadot.service
+52
View File
@@ -0,0 +1,52 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
### Facilitation, Not Strongarming
We recognise that this software is merely a tool for users to create and maintain their blockchain of preference. We see that blockchains are naturally community platforms with users being the ultimate decision makers. We assert that good software will maximise user agency by facilitate user-expression on the network. As such:
* This project will strive to give users as much choice as is both reasonable and possible over what protocol they adhere to; but
* use of the project's technical forums, commenting systems, pull requests and issue trackers as a means to express individual protocol preferences is forbidden.
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at <admin@parity.io>. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://contributor-covenant.org/version/1/4
[homepage]: https://contributor-covenant.org
+46
View File
@@ -0,0 +1,46 @@
# Contributing
## Rules
There are a few basic ground-rules for contributors (including the maintainer(s) of the project):
- **No `--force` pushes** or modifying the Git history in any way. If you need to rebase, ensure you do it in your own repo.
- **Non-master branches**, prefixed with a short name moniker (e.g. `gav-my-feature`) must be used for ongoing work.
- **All modifications** must be made in a **pull-request** to solicit feedback from other contributors.
- A pull-request _must not be merged until CI_ has finished successfully.
- Contributors should adhere to the [house coding style](https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md).
### Merging pull requests once CI is successful
- A pull request that does not alter any logic (e.g. comments, dependencies, docs) may be tagged [`insubstantial`](https://github.com/paritytech/polkadot/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+label%3AA2-insubstantial) and merged by its author.
- A pull request with no large change to logic that is an urgent fix may be merged after a non-author contributor has reviewed it well.
- All other PRs should sit for 48 hours with the [`pleasereview`](https://github.com/paritytech/polkadot/pulls?q=is:pr+is:open+label:A0-pleasereview) tag in order to garner feedback.
- No PR should be merged until all reviews' comments are addressed.
### Reviewing pull requests
When reviewing a pull request, the end-goal is to suggest useful changes to the author. Reviews should finish with approval unless there are issues that would result in:
- Buggy behavior.
- Undue maintenance burden.
- Breaking with house coding style.
- Pessimization (i.e. reduction of speed as measured in the projects benchmarks).
- Feature reduction (i.e. it removes some aspect of functionality that a significant minority of users rely on).
- Uselessness (i.e. it does not strictly add a feature or fix a known issue).
### Reviews may not be used as an effective veto for a PR because
- There exists a somewhat cleaner/better/faster way of accomplishing the same feature/fix.
- It does not fit well with some other contributors' longer-term vision for the project.
## Releases
Declaring formal releases remains the prerogative of the project maintainer(s).
## Changes to this arrangement
This is an experiment and feedback is welcome! This document may also be subject to pull-requests or changes by contributors where you believe you have something valuable to add or change.
## Heritage
These contributing guidelines are modified from the "OPEN Open Source Project" guidelines for the Level project: <https://github.com/Level/community/blob/master/CONTRIBUTING.md>
+15397
View File
File diff suppressed because it is too large Load Diff
+256
View File
@@ -0,0 +1,256 @@
[[bin]]
name = "polkadot"
path = "src/main.rs"
[[bin]]
name = "polkadot-execute-worker"
path = "src/bin/execute-worker.rs"
[[bin]]
name = "polkadot-prepare-worker"
path = "src/bin/prepare-worker.rs"
[package]
name = "polkadot"
description = "Implementation of a `https://polkadot.network` node in Rust based on the Substrate framework."
rust-version = "1.64.0" # workspace properties
readme = "README.md"
default-run = "polkadot"
authors.workspace = true
edition.workspace = true
license.workspace = true
version.workspace = true
[workspace.package]
authors = ["Parity Technologies <admin@parity.io>"]
edition = "2021"
license = "GPL-3.0-only"
repository = "https://github.com/paritytech/polkadot.git"
version = "0.9.43"
[dependencies]
color-eyre = { version = "0.6.1", default-features = false }
tikv-jemallocator = "0.5.0"
# Crates in our workspace, defined as dependencies so we can pass them feature flags.
polkadot-cli = { path = "cli", features = [ "polkadot-native", "kusama-native", "westend-native", "rococo-native" ] }
polkadot-node-core-pvf = { path = "node/core/pvf" }
polkadot-node-core-pvf-prepare-worker = { path = "node/core/pvf/prepare-worker" }
polkadot-overseer = { path = "node/overseer" }
# Needed for worker binaries.
polkadot-node-core-pvf-common = { path = "node/core/pvf/common", features = ["test-utils"] }
polkadot-node-core-pvf-execute-worker = { path = "node/core/pvf/execute-worker" }
[dev-dependencies]
assert_cmd = "2.0.4"
nix = { version = "0.26.1", features = ["signal"] }
tempfile = "3.2.0"
tokio = "1.24.2"
substrate-rpc-client = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-core-primitives = { path = "core-primitives" }
[build-dependencies]
substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" }
[workspace]
members = [
"cli",
"core-primitives",
"erasure-coding",
"erasure-coding/fuzzer",
"primitives",
"primitives/test-helpers",
"runtime/common",
"runtime/common/slot_range_helper",
"runtime/metrics",
"runtime/parachains",
"runtime/polkadot",
"runtime/polkadot/constants",
"runtime/kusama",
"runtime/kusama/constants",
"runtime/rococo",
"runtime/rococo/constants",
"runtime/westend",
"runtime/westend/constants",
"runtime/test-runtime",
"runtime/test-runtime/constants",
"statement-table",
"xcm",
"xcm/xcm-builder",
"xcm/xcm-executor",
"xcm/xcm-executor/integration-tests",
"xcm/xcm-simulator",
"xcm/xcm-simulator/example",
"xcm/xcm-simulator/fuzzer",
"xcm/pallet-xcm",
"xcm/pallet-xcm-benchmarks",
"xcm/procedural",
"node/collation-generation",
"node/core/approval-voting",
"node/core/av-store",
"node/core/backing",
"node/core/bitfield-signing",
"node/core/candidate-validation",
"node/core/chain-api",
"node/core/chain-selection",
"node/core/dispute-coordinator",
"node/core/parachains-inherent",
"node/core/prospective-parachains",
"node/core/provisioner",
"node/core/pvf",
"node/core/pvf/common",
"node/core/pvf/execute-worker",
"node/core/pvf/prepare-worker",
"node/core/pvf-checker",
"node/core/runtime-api",
"node/network/approval-distribution",
"node/network/bridge",
"node/network/protocol",
"node/network/statement-distribution",
"node/network/bitfield-distribution",
"node/network/availability-distribution",
"node/network/availability-recovery",
"node/network/collator-protocol",
"node/network/gossip-support",
"node/network/dispute-distribution",
"node/overseer",
"node/malus",
"node/primitives",
"node/service",
"node/subsystem",
"node/subsystem-types",
"node/subsystem-test-helpers",
"node/subsystem-util",
"node/jaeger",
"node/gum",
"node/gum/proc-macro",
"node/metrics",
"node/test/client",
"node/test/performance-test",
"node/test/service",
"node/zombienet-backchannel",
"rpc",
"parachain",
"parachain/test-parachains",
"parachain/test-parachains/adder",
"parachain/test-parachains/adder/collator",
"parachain/test-parachains/halt",
"parachain/test-parachains/undying",
"parachain/test-parachains/undying/collator",
"utils/staking-miner",
"utils/remote-ext-tests/bags-list",
"utils/generate-bags",
]
[badges]
maintenance = { status = "actively-developed" }
# The list of dependencies below (which can be both direct and indirect dependencies) are crates
# that are suspected to be CPU-intensive, and that are unlikely to require debugging (as some of
# their debug info might be missing) or to require to be frequently recompiled. We compile these
# dependencies with `opt-level=3` even in "dev" mode in order to make "dev" mode more usable.
# The majority of these crates are cryptographic libraries.
#
# If you see an error mentioning "profile package spec ... did not match any packages", it
# probably concerns this list.
#
# This list is ordered alphabetically.
[profile.dev.package]
blake2 = { opt-level = 3 }
blake2b_simd = { opt-level = 3 }
chacha20poly1305 = { opt-level = 3 }
cranelift-codegen = { opt-level = 3 }
cranelift-wasm = { opt-level = 3 }
crc32fast = { opt-level = 3 }
crossbeam-deque = { opt-level = 3 }
crypto-mac = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 }
ed25519-dalek = { opt-level = 3 }
futures-channel = { opt-level = 3 }
hash-db = { opt-level = 3 }
hashbrown = { opt-level = 3 }
hmac = { opt-level = 3 }
httparse = { opt-level = 3 }
integer-sqrt = { opt-level = 3 }
keccak = { opt-level = 3 }
libm = { opt-level = 3 }
librocksdb-sys = { opt-level = 3 }
libsecp256k1 = { opt-level = 3 }
libz-sys = { opt-level = 3 }
mio = { opt-level = 3 }
nalgebra = { opt-level = 3 }
num-bigint = { opt-level = 3 }
parking_lot = { opt-level = 3 }
parking_lot_core = { opt-level = 3 }
percent-encoding = { opt-level = 3 }
primitive-types = { opt-level = 3 }
reed-solomon-novelpoly = { opt-level = 3 }
ring = { opt-level = 3 }
rustls = { opt-level = 3 }
sha2 = { opt-level = 3 }
sha3 = { opt-level = 3 }
smallvec = { opt-level = 3 }
snow = { opt-level = 3 }
substrate-bip39 = {opt-level = 3}
twox-hash = { opt-level = 3 }
uint = { opt-level = 3 }
x25519-dalek = { opt-level = 3 }
yamux = { opt-level = 3 }
zeroize = { opt-level = 3 }
[profile.release]
# Polkadot runtime requires unwinding.
panic = "unwind"
opt-level = 3
# make sure dev builds with backtrace do
# not slow us down
[profile.dev.package.backtrace]
inherits = "release"
[profile.production]
inherits = "release"
lto = true
codegen-units = 1
[profile.testnet]
inherits = "release"
debug = 1 # debug symbols are useful for profilers
debug-assertions = true
overflow-checks = true
[features]
runtime-benchmarks= [ "polkadot-cli/runtime-benchmarks" ]
try-runtime = [ "polkadot-cli/try-runtime" ]
fast-runtime = [ "polkadot-cli/fast-runtime" ]
runtime-metrics = [ "polkadot-cli/runtime-metrics" ]
pyroscope = ["polkadot-cli/pyroscope"]
jemalloc-allocator = ["polkadot-node-core-pvf-prepare-worker/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator"]
network-protocol-staging = ["polkadot-cli/network-protocol-staging"]
# Enables timeout-based tests supposed to be run only in CI environment as they may be flaky
# when run locally depending on system load
ci-only-tests = ["polkadot-node-core-pvf/ci-only-tests"]
# Configuration for building a .deb package - for use with `cargo-deb`
[package.metadata.deb]
name = "polkadot"
extended-description = "Implementation of a https://polkadot.network node in Rust based on the Substrate framework."
section = "misc"
maintainer = "security@parity.io"
license-file = ["LICENSE", "0"]
# https://www.debian.org/doc/debian-policy/ch-maintainerscripts.html
maintainer-scripts = "scripts/packaging/deb-maintainer-scripts"
assets = [
["target/release/polkadot", "/usr/bin/", "755"],
["target/release/polkadot-prepare-worker", "/usr/lib/polkadot/", "755"],
["target/release/polkadot-execute-worker", "/usr/lib/polkadot/", "755"],
["scripts/packaging/polkadot.service", "/lib/systemd/system/", "644"]
]
conf-files = [
"/etc/default/polkadot"
]
[package.metadata.spellcheck]
config = "./scripts/ci/gitlab/spellcheck.toml"
+675
View File
@@ -0,0 +1,675 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
{one line to give the program's name and a brief idea of what it does.}
Copyright (C) {year} {name of author}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
{project} Copyright (C) {year} {fullname}
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+13
View File
@@ -0,0 +1,13 @@
Dear contributors and users,
We would like to inform you that we have recently made significant changes to our repository structure. In order to streamline our development process and foster better contributions, we have merged three separate repositories Cumulus, Substrate and Polkadot into a single new repository: [the Polkadot SDK](https://github.com/paritytech/polkadot-sdk). Go ahead and make sure to support us by giving a star ⭐️ to the new repo.
By consolidating our codebase, we aim to enhance collaboration and provide a more efficient platform for future development.
If you currently have an open pull request in any of the merged repositories, we kindly request that you resubmit your PR in the new repository. This will ensure that your contributions are considered within the updated context and enable us to review and merge them more effectively.
We appreciate your understanding and ongoing support throughout this transition. Should you have any questions or require further assistance, please don't hesitate to [reach out to us](https://forum.polkadot.network/t/psa-parity-is-currently-working-on-merging-the-polkadot-stack-repositories-into-one-single-repository/2883).
Best Regards,
Parity Technologies
+57
View File
@@ -0,0 +1,57 @@
Polkadot Release Process
------------------------
### Branches
* release-candidate branch: The branch used for staging of the next release.
Named like `release-v0.8.26`
### Notes
* The release-candidate branch *must* be made in the paritytech/polkadot repo in
order for release automation to work correctly
* Any new pushes/merges to the release-candidate branch (for example,
refs/heads/release-v0.8.26) will result in the rc index being bumped (e.g., v0.8.26-rc1
to v0.8.26-rc2) and new wasms built.
### Release workflow
Below are the steps of the release workflow. Steps prefixed with NOACTION are
automated and require no human action.
1. To initiate the release process:
1. branch master off to a release candidate branch:
- `git checkout master; git pull; git checkout -b release-v0.8.26`
2. In the [substrate](https://github.com/paritytech/substrate) repo, check out the commit used by polkadot (this can be found using the following command in the *polkadot* repo: `grep 'paritytech/substrate' Cargo.lock | grep -E '[0-9a-f]{40}' | sort | uniq `
3. Branch off this **substrate** commit into its own branch: `git branch -b polkadot-v0.8.26; git push origin refs/heads/polkadot-v0.8.26`
4. In the **polkadot** repository, use [diener](https://github.com/bkchr/diener/) to switch to this branch: `diener update --branch "polkadot-v0.8.26" --substrate`. Update Cargo.lock (to do this, you can run `cargo build` and then ctrl+c once it finishes fetching and begins compiling)
5. Push the **polkadot** `release-v0.8.26` branch to Github: `git push origin refs/heads/release-v0.8.26`
2. NOACTION: The current HEAD of the release-candidate branch is tagged `v0.8.26-rc1`
3. NOACTION: A draft release and runtime WASMs are created for this
release-candidate automatically. A link to the draft release will be linked in
the internal polkadot matrix channel.
4. NOACTION: A new Github issue is created containing a checklist of manual
steps to be completed before we are confident with the release. This will be
linked in Matrix.
5. Complete the steps in the issue created in step 4, signing them off as
completed
6. (optional) If a fix is required to the release-candidate:
1. Merge the fix with `master` first
2. Cherry-pick the commit from `master` to `release-v0.8.26`, fixing any
merge conflicts. Try to avoid unnecessarily bumping crates.
3. Push the release-candidate branch to Github - this is now the new release-
candidate
4. Depending on the cherry-picked changes, it may be necessary to perform some
or all of the manual tests again.
5. If there are **substrate** changes required, these should be cherry-picked to the substrate `polkadot-v0.8.26` branch and pushed, and the version of substrate used in **polkadot** updated using `cargo update -p sp-io`
7. Once happy with the release-candidate, tag the current top commit in the release candidate branch and push to Github: `git tag -s -m 'v0.8.26' v0.8.26; git push --tags`
9. NOACTION: The HEAD of the `release` branch will be tagged with `v0.8.26`,
and a final draft release will be created on Github.
### Security releases
Occasionally there may be changes that need to be made to the most recently
released version of Polkadot, without taking *every* change to `master` since
the last release. For example, in the event of a security vulnerability being
found, where releasing a fixed version is a matter of some expediency. In cases
like this, the fix should first be merged with master, cherry-picked to a branch
forked from `release`, tested, and then finally merged with `release`. A
sensible versioning scheme for changes like this is `vX.Y.Z-1`.
+101
View File
@@ -0,0 +1,101 @@
# Security Policy
Parity Technologies is committed to resolving security vulnerabilities in our software quickly and carefully. We take the necessary steps to minimize risk, provide timely information, and deliver vulnerability fixes and mitigations required to address security issues.
## Reporting a Vulnerability
Security vulnerabilities in Parity software should be reported by email to security@parity.io. If you think your report might be eligible for the Parity Bug Bounty Program, your email should be sent to bugbounty@parity.io.
Your report should include the following:
- your name
- description of the vulnerability
- attack scenario (if any)
- components
- reproduction
- other details
Try to include as much information in your report as you can, including a description of the vulnerability, its potential impact, and steps for reproducing it. Be sure to use a descriptive subject line.
You'll receive a response to your email within two business days indicating the next steps in handling your report. We encourage finders to use encrypted communication channels to protect the confidentiality of vulnerability reports. You can encrypt your report using our public key. This key is [on MIT's key server](https://pgp.mit.edu/pks/lookup?op=get&search=0x5D0F03018D07DE73) server and reproduced below.
After the initial reply to your report, our team will endeavor to keep you informed of the progress being made towards a fix. These updates will be sent at least every five business days.
Thank you for taking the time to responsibly disclose any vulnerabilities you find.
## Responsible Investigation and Reporting
Responsible investigation and reporting includes, but isn't limited to, the following:
- Don't violate the privacy of other users, destroy data, etc.
- Dont defraud or harm Parity Technologies Ltd or its users during your research; you should make a good faith effort to not interrupt or degrade our services.
- Don't target our physical security measures, or attempt to use social engineering, spam, distributed denial of service (DDOS) attacks, etc.
- Initially report the bug only to us and not to anyone else.
- Give us a reasonable amount of time to fix the bug before disclosing it to anyone else, and give us adequate written warning before disclosing it to anyone else.
- In general, please investigate and report bugs in a way that makes a reasonable, good faith effort not to be disruptive or harmful to us or our users. Otherwise your actions might be interpreted as an attack rather than an effort to be helpful.
## Bug Bounty Program
Our Bug Bounty Program allows us to recognise and reward members of the Parity community for helping us find and address significant bugs, in accordance with the terms of the Parity Bug Bounty Program. A detailed description on eligibility, rewards, legal information and terms & conditions for contributors can be found on [our website](https://paritytech.io/bug-bounty.html).
## Plaintext PGP Key
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBF0vHwQBEADKui4qAo4bzdzRhMm+uhUpYGf8jjjmET3zJ8kKQIpp6JTsV+HJ
6m1We0QYeMRXoOYH1xVHBf2zNCuHS0nSQdUCQA7SHWsPB05STa2hvlR7fSdQnCCp
gnLOJWXvvedlRDIAhvqI6cwLdUlXgVSKEwrwmrpiBhh4NxI3qX+LyIa+Ovkchu2S
d/YCnE4GqojSGRfJYiGwe2N+sF7OfaoKhQuTrtdDExHrMU4cWnTXW2wyxTr4xkj9
jS2WeLVZWflvkDHT8JD9N6jNxBVEF/Qvjk83zI0kCOzkhek8x+YUgfLq3/rHOYbX
3pW21ccHYPacHjHWvKE+xRebjeEhJ4KxKHfCVjQcxybwDBqDka1AniZt4CQ7UORf
MU/ue2oSZ9nNg0uMdb/0AbQPZ04OlMcYPAPWzFL08nVPox9wT9uqlL6JtcOeC90h
oOeDmfgwmjMmdwWTRgt9qQjcbgXzVvuAzIGbzj1X3MdLspWdHs/d2+US4nji1TkN
oYIW7vE+xkd3aB+NZunIlm9Rwd/0mSgDg+DaNa5KceOLhq0/qKgcXC/RRU29I8II
tusRoR/oesGJGYTjh4k6PJkG+nvDPsoQrwYT44bhnniS1xYkxWYXF99JFI7LgMdD
e1SgKeIDVpvm873k82E6arp5655Wod1XOjaXBggCwFp84eKcEZEN+1qEWwARAQAB
tClQYXJpdHkgU2VjdXJpdHkgVGVhbSA8c2VjdXJpdHlAcGFyaXR5LmlvPokCVAQT
AQoAPhYhBJ1LK264+XFW0ZZpqf8IEtSRuWeYBQJdLx8EAhsDBQkDwmcABQsJCAcC
BhUKCQgLAgQWAgMBAh4BAheAAAoJEP8IEtSRuWeYL84QAI6NwnwS561DWYYRAd4y
ocGPr3CnwFSt1GjkSkRy3B+tMhzexBg1y7EbLRUefIrO4LwOlywtRk8tTRGgEI4i
5xRLHbOkeolfgCFSpOj5d8cMKCt5HEIv18hsv6dkrzlSYA5NLX/GRBEh3F/0sGny
vCXapfxa1cx72sU7631JBK7t2Tf+MfwxdfyFZ9TI9WdtP5AfVjgTkIVkEDFcZPTc
n3CYXqTYFIBCNUD8LP4iTi3xUt7pTGJQQoFT8l15nJCgzRYQ+tXpoTRlf+/LtXmw
6iidPV87E06jHdK9666rBouIabAtx7i0/4kwo+bSZ8DiSKRUaehiHGd212HSEmdF
jxquWE4pEzoUowYznhSIfR+WWIqRBHxEYarP4m98Hi+VXZ7Fw1ytzO8+BAKnLXnj
2W2+T9qJks5gqVEoaWNnqpvya6JA11QZvZ0w7Om2carDc2ILNm2Xx9J0mRUye8P0
KxcgqJuKNGFtugebQAsXagkxOKsdKna1PlDlxEfTf6AgI3ST8qSiMAwaaIMB/REF
VKUapGoslQX4tOCjibI2pzEgE//D8NAaSVu2A9+BUcFERdZRxsI7fydIXNeZ2R46
N2qfW+DP3YR/14QgdRxDItEavUoE1vByRXwIufKAkVemOZzIoFXKFsDeXwqTVW5i
6CXu6OddZ3QHDiT9TEbRny4QuQINBF0vKCwBEACnP5J7LEGbpxNBrPvGdxZUo0YA
U8RgeKDRPxJTvMo27V1IPZGaKRCRq8LBfg/eHhqZhQ7SLJBjBljd8kuT5dHDBTRe
jE1UIOhmnlSlrEJjAmpVO08irlGpq1o+8mGcvkBsR0poCVjeNeSnwYfRnR+c3GK5
Er6/JRqfN4mJvnEC9/Pbm6C7ql6YLKxC3yqzF97JL5brbbuozrW7nixY/yAI8619
VlBIMP7PAUbGcnSQyuV5b/Wr2Sgr6NJclnNSLjh2U9/Du6w/0tDGlMBts8HjRnWJ
BXbkTdQKCTaqgK68kTKSiN1/x+lynxHC2AavMpH/08Kopg2ZCzJowMKIgcB+4Z/I
DJKZWHWKumhaZMGXcWgzgcByog9IpamuROEZFJNEUAFf7YIncEckPSif4looiOdS
VurKZGvYXXaGSsZbGgHxI5CWu7ZxMdLBLvtOcCYmRQrG+g/h+PGU5BT0bNAfNTkm
V3/n1B/TWbpWRmB3AwT2emQivXHkaubGI0VivhaO43AuI9JWoqiMqFtxbuTeoxwD
xlu2Dzcp0v+AR4T5cIG9D5/+yiPc25aIY7cIKxuNFHIDL4td5fwSGC7vU6998PIG
2Y48TGBnw7zpEfDfMayqAeBjX0YU6PTNsvS5O6bP3j4ojTOUYD7Z8QdCvgISDID3
WMGAdmSwmCRvsQ/OJwARAQABiQI8BBgBCgAmFiEEnUsrbrj5cVbRlmmp/wgS1JG5
Z5gFAl0vKCwCGwwFCQB2pwAACgkQ/wgS1JG5Z5hdbw//ZqR+JcWm59NUIHjauETJ
sYDYhcAfa3txTacRn5uPz/TQiTd7wZ82+G8Et0ZnpEHy6eWyBqHpG0hiPhFBzxjY
nhjHl8jJeyo2mQIVJhzkL58BHBZk8WM2TlaU7VxZ6TYOmP2y3qf6FD6mCcrQ4Fml
E9f0lyVUoI/5Zs9oF0izRk8vkwaY3UvLM7XEY6nM8GnFG8kaiZMYmx26Zo7Uz31G
7EGGZFsrVDXfNhSJyz79Gyn+Lx9jOTdoR0sH/THYIIosE83awMGE6jKeuDYTbVWu
+ZtHQef+pRteki3wvNLJK+kC1y3BtHqDJS9Lqx0s8SCiVozlC+fZfC9hCtU7bXJK
0UJZ4qjSvj6whzfaNgOZAqJpmwgOnd8W/3YJk1DwUeX98FcU38MR23SOkx2EDdDE
77Kdu62vTs/tLmOTuyKBvYPaHaYulYjQTxurG+o8vhHtaL87ARvuq+83dj+nO5z3
5O9vkcVJYWjOEnJe7ZvCTxeLJehpCmHIbyUuDx5P24MWVbyXOxIlxNxTqlub5GlW
rQF6Qsa/0k9TRk7Htbct6fAA0/VahJS0g096MrTH8AxBXDNE8lIoNeGikVlaxK9Z
S+aannlWYIJymZ4FygIPPaRlzhAoXBuJd8OaR5giC7dS1xquxKOiQEXTGsLeGFaI
BZYiIhW7GG4ozvKDqyNm4eg=
=yKcB
-----END PGP PUBLIC KEY BLOCK-----
```
+22
View File
@@ -0,0 +1,22 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
fn main() {
substrate_build_script_utils::generate_cargo_keys();
// For the node/worker version check, make sure we always rebuild the node and binary workers
// when the version changes.
substrate_build_script_utils::rerun_if_git_head_changed();
}
+78
View File
@@ -0,0 +1,78 @@
[package]
name = "polkadot-cli"
description = "Polkadot Relay-chain Client Node"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[package.metadata.wasm-pack.profile.release]
# `wasm-opt` has some problems on Linux, see
# https://github.com/rustwasm/wasm-pack/issues/781 etc.
wasm-opt = false
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
clap = { version = "4.0.9", features = ["derive"], optional = true }
log = "0.4.17"
thiserror = "1.0.31"
futures = "0.3.21"
pyro = { package = "pyroscope", version = "0.5.3", optional = true }
pyroscope_pprofrs = { version = "0.2", optional = true }
service = { package = "polkadot-service", path = "../node/service", default-features = false, optional = true }
polkadot-performance-test = { path = "../node/test/performance-test", optional = true }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-maybe-compressed-blob = { git = "https://github.com/paritytech/substrate", branch = "master" }
frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
try-runtime-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
polkadot-node-metrics = { path = "../node/metrics" }
sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true }
sc-sysinfo = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-storage-monitor = { git = "https://github.com/paritytech/substrate", branch = "master" }
[build-dependencies]
substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" }
[features]
default = ["db", "cli", "full-node"]
db = ["service/db"]
cli = [
"clap",
"sc-cli",
"sc-service",
"sc-tracing",
"frame-benchmarking-cli",
"try-runtime-cli",
"service",
]
runtime-benchmarks = [
"service/runtime-benchmarks",
"polkadot-node-metrics/runtime-benchmarks",
"polkadot-performance-test?/runtime-benchmarks"
]
full-node = ["service/full-node"]
try-runtime = ["service/try-runtime", "try-runtime-cli/try-runtime"]
fast-runtime = ["service/fast-runtime"]
pyroscope = ["pyro", "pyroscope_pprofrs"]
hostperfcheck = ["polkadot-performance-test"]
# Configure the native runtimes to use. Polkadot is enabled by default.
#
# Validators require the native runtime currently
polkadot-native = ["service/polkadot-native"]
kusama-native = ["service/kusama-native"]
westend-native = ["service/westend-native"]
rococo-native = ["service/rococo-native"]
malus = ["full-node", "service/malus"]
runtime-metrics = ["service/runtime-metrics", "polkadot-node-metrics/runtime-metrics"]
network-protocol-staging = ["service/network-protocol-staging"]
+25
View File
@@ -0,0 +1,25 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
fn main() {
if let Ok(profile) = std::env::var("PROFILE") {
println!("cargo:rustc-cfg=build_type=\"{}\"", profile);
}
substrate_build_script_utils::generate_cargo_keys();
// For the node/worker version check, make sure we always rebuild the node when the version
// changes.
substrate_build_script_utils::rerun_if_git_head_changed();
}
+157
View File
@@ -0,0 +1,157 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Polkadot CLI library.
use clap::Parser;
use std::path::PathBuf;
/// The version of the node. The passed-in version of the workers should match this.
pub const NODE_VERSION: &'static str = env!("SUBSTRATE_CLI_IMPL_VERSION");
#[allow(missing_docs)]
#[derive(Debug, Parser)]
pub enum Subcommand {
/// Build a chain specification.
BuildSpec(sc_cli::BuildSpecCmd),
/// Validate blocks.
CheckBlock(sc_cli::CheckBlockCmd),
/// Export blocks.
ExportBlocks(sc_cli::ExportBlocksCmd),
/// Export the state of a given block into a chain spec.
ExportState(sc_cli::ExportStateCmd),
/// Import blocks.
ImportBlocks(sc_cli::ImportBlocksCmd),
/// Remove the whole chain.
PurgeChain(sc_cli::PurgeChainCmd),
/// Revert the chain to a previous state.
Revert(sc_cli::RevertCmd),
/// Sub-commands concerned with benchmarking.
/// The pallet benchmarking moved to the `pallet` sub-command.
#[command(subcommand)]
Benchmark(frame_benchmarking_cli::BenchmarkCmd),
/// Runs performance checks such as PVF compilation in order to measure machine
/// capabilities of running a validator.
HostPerfCheck,
/// Try-runtime has migrated to a standalone CLI
/// (<https://github.com/paritytech/try-runtime-cli>). The subcommand exists as a stub and
/// deprecation notice. It will be removed entirely some time after Janurary 2024.
TryRuntime,
/// Key management CLI utilities
#[command(subcommand)]
Key(sc_cli::KeySubcommand),
/// Db meta columns information.
ChainInfo(sc_cli::ChainInfoCmd),
}
#[allow(missing_docs)]
#[derive(Debug, Parser)]
#[group(skip)]
pub struct RunCmd {
#[clap(flatten)]
pub base: sc_cli::RunCmd,
/// Force using Kusama native runtime.
#[arg(long = "force-kusama")]
pub force_kusama: bool,
/// Force using Westend native runtime.
#[arg(long = "force-westend")]
pub force_westend: bool,
/// Force using Rococo native runtime.
#[arg(long = "force-rococo")]
pub force_rococo: bool,
/// Setup a GRANDPA scheduled voting pause.
///
/// This parameter takes two values, namely a block number and a delay (in
/// blocks). After the given block number is finalized the GRANDPA voter
/// will temporarily stop voting for new blocks until the given delay has
/// elapsed (i.e. until a block at height `pause_block + delay` is imported).
#[arg(long = "grandpa-pause", num_args = 2)]
pub grandpa_pause: Vec<u32>,
/// Disable the BEEFY gadget
/// (currently enabled by default on Rococo, Wococo and Versi).
#[arg(long)]
pub no_beefy: bool,
/// Add the destination address to the jaeger agent.
///
/// Must be valid socket address, of format `IP:Port`
/// commonly `127.0.0.1:6831`.
#[arg(long)]
pub jaeger_agent: Option<String>,
/// Add the destination address to the `pyroscope` agent.
///
/// Must be valid socket address, of format `IP:Port`
/// commonly `127.0.0.1:4040`.
#[arg(long)]
pub pyroscope_server: Option<String>,
/// Disable automatic hardware benchmarks.
///
/// By default these benchmarks are automatically ran at startup and measure
/// the CPU speed, the memory bandwidth and the disk speed.
///
/// The results are then printed out in the logs, and also sent as part of
/// telemetry, if telemetry is enabled.
#[arg(long)]
pub no_hardware_benchmarks: bool,
/// Overseer message capacity override.
///
/// **Dangerous!** Do not touch unless explicitly adviced to.
#[arg(long)]
pub overseer_channel_capacity_override: Option<usize>,
/// Path to the directory where auxiliary worker binaries reside. If not specified, the main
/// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY:
/// if the path points to an executable rather then directory, that executable is used both as
/// preparation and execution worker.
#[arg(long, value_name = "PATH")]
pub workers_path: Option<PathBuf>,
/// TESTING ONLY: disable the version check between nodes and workers.
#[arg(long, hide = true)]
pub disable_worker_version_check: bool,
}
#[allow(missing_docs)]
#[derive(Debug, Parser)]
pub struct Cli {
#[command(subcommand)]
pub subcommand: Option<Subcommand>,
#[clap(flatten)]
pub run: RunCmd,
#[clap(flatten)]
pub storage_monitor: sc_storage_monitor::StorageMonitorParams,
}
+567
View File
@@ -0,0 +1,567 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use crate::cli::{Cli, Subcommand, NODE_VERSION};
use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE};
use futures::future::TryFutureExt;
use log::{info, warn};
use sc_cli::SubstrateCli;
use service::{
self,
benchmarking::{benchmark_inherent_data, RemarkBuilder, TransferKeepAliveBuilder},
HeaderBackend, IdentifyVariant,
};
use sp_core::crypto::Ss58AddressFormatRegistry;
use sp_keyring::Sr25519Keyring;
use std::net::ToSocketAddrs;
pub use crate::{error::Error, service::BlockId};
#[cfg(feature = "hostperfcheck")]
pub use polkadot_performance_test::PerfCheckError;
#[cfg(feature = "pyroscope")]
use pyroscope_pprofrs::{pprof_backend, PprofConfig};
impl From<String> for Error {
fn from(s: String) -> Self {
Self::Other(s)
}
}
type Result<T> = std::result::Result<T, Error>;
fn get_exec_name() -> Option<String> {
std::env::current_exe()
.ok()
.and_then(|pb| pb.file_name().map(|s| s.to_os_string()))
.and_then(|s| s.into_string().ok())
}
impl SubstrateCli for Cli {
fn impl_name() -> String {
"Parity Polkadot".into()
}
fn impl_version() -> String {
NODE_VERSION.into()
}
fn description() -> String {
env!("CARGO_PKG_DESCRIPTION").into()
}
fn author() -> String {
env!("CARGO_PKG_AUTHORS").into()
}
fn support_url() -> String {
"https://github.com/paritytech/polkadot/issues/new".into()
}
fn copyright_start_year() -> i32 {
2017
}
fn executable_name() -> String {
"polkadot".into()
}
fn load_spec(&self, id: &str) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
let id = if id == "" {
let n = get_exec_name().unwrap_or_default();
["polkadot", "kusama", "westend", "rococo", "versi"]
.iter()
.cloned()
.find(|&chain| n.starts_with(chain))
.unwrap_or("polkadot")
} else {
id
};
Ok(match id {
"kusama" => Box::new(service::chain_spec::kusama_config()?),
#[cfg(feature = "kusama-native")]
"kusama-dev" => Box::new(service::chain_spec::kusama_development_config()?),
#[cfg(feature = "kusama-native")]
"kusama-local" => Box::new(service::chain_spec::kusama_local_testnet_config()?),
#[cfg(feature = "kusama-native")]
"kusama-staging" => Box::new(service::chain_spec::kusama_staging_testnet_config()?),
#[cfg(not(feature = "kusama-native"))]
name if name.starts_with("kusama-") && !name.ends_with(".json") =>
Err(format!("`{}` only supported with `kusama-native` feature enabled.", name))?,
"polkadot" => Box::new(service::chain_spec::polkadot_config()?),
#[cfg(feature = "polkadot-native")]
"polkadot-dev" | "dev" => Box::new(service::chain_spec::polkadot_development_config()?),
#[cfg(feature = "polkadot-native")]
"polkadot-local" => Box::new(service::chain_spec::polkadot_local_testnet_config()?),
"rococo" => Box::new(service::chain_spec::rococo_config()?),
#[cfg(feature = "rococo-native")]
"rococo-dev" => Box::new(service::chain_spec::rococo_development_config()?),
#[cfg(feature = "rococo-native")]
"rococo-local" => Box::new(service::chain_spec::rococo_local_testnet_config()?),
#[cfg(feature = "rococo-native")]
"rococo-staging" => Box::new(service::chain_spec::rococo_staging_testnet_config()?),
#[cfg(not(feature = "rococo-native"))]
name if name.starts_with("rococo-") && !name.ends_with(".json") =>
Err(format!("`{}` only supported with `rococo-native` feature enabled.", name))?,
"westend" => Box::new(service::chain_spec::westend_config()?),
#[cfg(feature = "westend-native")]
"westend-dev" => Box::new(service::chain_spec::westend_development_config()?),
#[cfg(feature = "westend-native")]
"westend-local" => Box::new(service::chain_spec::westend_local_testnet_config()?),
#[cfg(feature = "westend-native")]
"westend-staging" => Box::new(service::chain_spec::westend_staging_testnet_config()?),
#[cfg(not(feature = "westend-native"))]
name if name.starts_with("westend-") && !name.ends_with(".json") =>
Err(format!("`{}` only supported with `westend-native` feature enabled.", name))?,
"wococo" => Box::new(service::chain_spec::wococo_config()?),
#[cfg(feature = "rococo-native")]
"wococo-dev" => Box::new(service::chain_spec::wococo_development_config()?),
#[cfg(feature = "rococo-native")]
"wococo-local" => Box::new(service::chain_spec::wococo_local_testnet_config()?),
#[cfg(not(feature = "rococo-native"))]
name if name.starts_with("wococo-") =>
Err(format!("`{}` only supported with `rococo-native` feature enabled.", name))?,
#[cfg(feature = "rococo-native")]
"versi-dev" => Box::new(service::chain_spec::versi_development_config()?),
#[cfg(feature = "rococo-native")]
"versi-local" => Box::new(service::chain_spec::versi_local_testnet_config()?),
#[cfg(feature = "rococo-native")]
"versi-staging" => Box::new(service::chain_spec::versi_staging_testnet_config()?),
#[cfg(not(feature = "rococo-native"))]
name if name.starts_with("versi-") =>
Err(format!("`{}` only supported with `rococo-native` feature enabled.", name))?,
path => {
let path = std::path::PathBuf::from(path);
let chain_spec = Box::new(service::PolkadotChainSpec::from_json_file(path.clone())?)
as Box<dyn service::ChainSpec>;
// When `force_*` is given or the file name starts with the name of one of the known
// chains, we use the chain spec for the specific chain.
if self.run.force_rococo ||
chain_spec.is_rococo() ||
chain_spec.is_wococo() ||
chain_spec.is_versi()
{
Box::new(service::RococoChainSpec::from_json_file(path)?)
} else if self.run.force_kusama || chain_spec.is_kusama() {
Box::new(service::KusamaChainSpec::from_json_file(path)?)
} else if self.run.force_westend || chain_spec.is_westend() {
Box::new(service::WestendChainSpec::from_json_file(path)?)
} else {
chain_spec
}
},
})
}
}
fn set_default_ss58_version(spec: &Box<dyn service::ChainSpec>) {
let ss58_version = if spec.is_kusama() {
Ss58AddressFormatRegistry::KusamaAccount
} else if spec.is_westend() {
Ss58AddressFormatRegistry::SubstrateAccount
} else {
Ss58AddressFormatRegistry::PolkadotAccount
}
.into();
sp_core::crypto::set_default_ss58_version(ss58_version);
}
const DEV_ONLY_ERROR_PATTERN: &'static str =
"can only use subcommand with --chain [polkadot-dev, kusama-dev, westend-dev, rococo-dev, wococo-dev], got ";
fn ensure_dev(spec: &Box<dyn service::ChainSpec>) -> std::result::Result<(), String> {
if spec.is_dev() {
Ok(())
} else {
Err(format!("{}{}", DEV_ONLY_ERROR_PATTERN, spec.id()))
}
}
/// Runs performance checks.
/// Should only be used in release build since the check would take too much time otherwise.
fn host_perf_check() -> Result<()> {
#[cfg(not(feature = "hostperfcheck"))]
{
return Err(Error::FeatureNotEnabled { feature: "hostperfcheck" }.into())
}
#[cfg(all(not(build_type = "release"), feature = "hostperfcheck"))]
{
return Err(PerfCheckError::WrongBuildType.into())
}
#[cfg(all(feature = "hostperfcheck", build_type = "release"))]
{
crate::host_perf_check::host_perf_check()?;
return Ok(())
}
}
/// Launch a node, accepting arguments just like a regular node,
/// accepts an alternative overseer generator, to adjust behavior
/// for integration tests as needed.
/// `malus_finality_delay` restrict finality votes of this node
/// to be at most `best_block - malus_finality_delay` height.
#[cfg(feature = "malus")]
pub fn run_node(
run: Cli,
overseer_gen: impl service::OverseerGen,
malus_finality_delay: Option<u32>,
) -> Result<()> {
run_node_inner(run, overseer_gen, malus_finality_delay, |_logger_builder, _config| {})
}
fn run_node_inner<F>(
cli: Cli,
overseer_gen: impl service::OverseerGen,
maybe_malus_finality_delay: Option<u32>,
logger_hook: F,
) -> Result<()>
where
F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration),
{
let runner = cli
.create_runner_with_logger_hook::<sc_cli::RunCmd, F>(&cli.run.base, logger_hook)
.map_err(Error::from)?;
let chain_spec = &runner.config().chain_spec;
// By default, enable BEEFY on all networks except Polkadot (for now), unless
// explicitly disabled through CLI.
let mut enable_beefy = !chain_spec.is_polkadot() && !cli.run.no_beefy;
// BEEFY doesn't (yet) support warp sync:
// Until we implement https://github.com/paritytech/substrate/issues/14756
// - disallow warp sync for validators,
// - disable BEEFY when warp sync for non-validators.
if enable_beefy && runner.config().network.sync_mode.is_warp() {
if runner.config().role.is_authority() {
return Err(Error::Other(
"Warp sync not supported for validator nodes running BEEFY.".into(),
))
} else {
// disable BEEFY for non-validator nodes that are warp syncing
warn!("🥩 BEEFY not supported when warp syncing. Disabling BEEFY.");
enable_beefy = false;
}
}
set_default_ss58_version(chain_spec);
let grandpa_pause = if cli.run.grandpa_pause.is_empty() {
None
} else {
Some((cli.run.grandpa_pause[0], cli.run.grandpa_pause[1]))
};
if chain_spec.is_kusama() {
info!("----------------------------");
info!("This chain is not in any way");
info!(" endorsed by the ");
info!(" KUSAMA FOUNDATION ");
info!("----------------------------");
}
let jaeger_agent = if let Some(ref jaeger_agent) = cli.run.jaeger_agent {
Some(
jaeger_agent
.to_socket_addrs()
.map_err(Error::AddressResolutionFailure)?
.next()
.ok_or_else(|| Error::AddressResolutionMissing)?,
)
} else {
None
};
let node_version =
if cli.run.disable_worker_version_check { None } else { Some(NODE_VERSION.to_string()) };
runner.run_node_until_exit(move |config| async move {
let hwbench = (!cli.run.no_hardware_benchmarks)
.then_some(config.database.path().map(|database_path| {
let _ = std::fs::create_dir_all(&database_path);
sc_sysinfo::gather_hwbench(Some(database_path))
}))
.flatten();
let database_source = config.database.clone();
let task_manager = service::build_full(
config,
service::NewFullParams {
is_parachain_node: service::IsParachainNode::No,
grandpa_pause,
enable_beefy,
jaeger_agent,
telemetry_worker_handle: None,
node_version,
workers_path: cli.run.workers_path,
workers_names: None,
overseer_gen,
overseer_message_channel_capacity_override: cli
.run
.overseer_channel_capacity_override,
malus_finality_delay: maybe_malus_finality_delay,
hwbench,
},
)
.map(|full| full.task_manager)?;
sc_storage_monitor::StorageMonitorService::try_spawn(
cli.storage_monitor,
database_source,
&task_manager.spawn_essential_handle(),
)?;
Ok(task_manager)
})
}
/// Parses polkadot specific CLI arguments and run the service.
pub fn run() -> Result<()> {
let cli: Cli = Cli::from_args();
#[cfg(feature = "pyroscope")]
let mut pyroscope_agent_maybe = if let Some(ref agent_addr) = cli.run.pyroscope_server {
let address = agent_addr
.to_socket_addrs()
.map_err(Error::AddressResolutionFailure)?
.next()
.ok_or_else(|| Error::AddressResolutionMissing)?;
// The pyroscope agent requires a `http://` prefix, so we just do that.
let agent = pyro::PyroscopeAgent::builder(
"http://".to_owned() + address.to_string().as_str(),
"polkadot".to_owned(),
)
.backend(pprof_backend(PprofConfig::new().sample_rate(113)))
.build()?;
Some(agent.start()?)
} else {
None
};
#[cfg(not(feature = "pyroscope"))]
if cli.run.pyroscope_server.is_some() {
return Err(Error::PyroscopeNotCompiledIn)
}
match &cli.subcommand {
None => run_node_inner(
cli,
service::RealOverseerGen,
None,
polkadot_node_metrics::logger_hook(),
),
Some(Subcommand::BuildSpec(cmd)) => {
let runner = cli.create_runner(cmd)?;
Ok(runner.sync_run(|config| cmd.run(config.chain_spec, config.network))?)
},
Some(Subcommand::CheckBlock(cmd)) => {
let runner = cli.create_runner(cmd).map_err(Error::SubstrateCli)?;
let chain_spec = &runner.config().chain_spec;
set_default_ss58_version(chain_spec);
runner.async_run(|mut config| {
let (client, _, import_queue, task_manager) =
service::new_chain_ops(&mut config, None)?;
Ok((cmd.run(client, import_queue).map_err(Error::SubstrateCli), task_manager))
})
},
Some(Subcommand::ExportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
let chain_spec = &runner.config().chain_spec;
set_default_ss58_version(chain_spec);
Ok(runner.async_run(|mut config| {
let (client, _, _, task_manager) =
service::new_chain_ops(&mut config, None).map_err(Error::PolkadotService)?;
Ok((cmd.run(client, config.database).map_err(Error::SubstrateCli), task_manager))
})?)
},
Some(Subcommand::ExportState(cmd)) => {
let runner = cli.create_runner(cmd)?;
let chain_spec = &runner.config().chain_spec;
set_default_ss58_version(chain_spec);
Ok(runner.async_run(|mut config| {
let (client, _, _, task_manager) = service::new_chain_ops(&mut config, None)?;
Ok((cmd.run(client, config.chain_spec).map_err(Error::SubstrateCli), task_manager))
})?)
},
Some(Subcommand::ImportBlocks(cmd)) => {
let runner = cli.create_runner(cmd)?;
let chain_spec = &runner.config().chain_spec;
set_default_ss58_version(chain_spec);
Ok(runner.async_run(|mut config| {
let (client, _, import_queue, task_manager) =
service::new_chain_ops(&mut config, None)?;
Ok((cmd.run(client, import_queue).map_err(Error::SubstrateCli), task_manager))
})?)
},
Some(Subcommand::PurgeChain(cmd)) => {
let runner = cli.create_runner(cmd)?;
Ok(runner.sync_run(|config| cmd.run(config.database))?)
},
Some(Subcommand::Revert(cmd)) => {
let runner = cli.create_runner(cmd)?;
let chain_spec = &runner.config().chain_spec;
set_default_ss58_version(chain_spec);
Ok(runner.async_run(|mut config| {
let (client, backend, _, task_manager) = service::new_chain_ops(&mut config, None)?;
let aux_revert = Box::new(|client, backend, blocks| {
service::revert_backend(client, backend, blocks, config).map_err(|err| {
match err {
service::Error::Blockchain(err) => err.into(),
// Generic application-specific error.
err => sc_cli::Error::Application(err.into()),
}
})
});
Ok((
cmd.run(client, backend, Some(aux_revert)).map_err(Error::SubstrateCli),
task_manager,
))
})?)
},
Some(Subcommand::Benchmark(cmd)) => {
let runner = cli.create_runner(cmd)?;
let chain_spec = &runner.config().chain_spec;
match cmd {
#[cfg(not(feature = "runtime-benchmarks"))]
BenchmarkCmd::Storage(_) =>
return Err(sc_cli::Error::Input(
"Compile with --features=runtime-benchmarks \
to enable storage benchmarks."
.into(),
)
.into()),
#[cfg(feature = "runtime-benchmarks")]
BenchmarkCmd::Storage(cmd) => runner.sync_run(|mut config| {
let (client, backend, _, _) = service::new_chain_ops(&mut config, None)?;
let db = backend.expose_db();
let storage = backend.expose_storage();
cmd.run(config, client.clone(), db, storage).map_err(Error::SubstrateCli)
}),
BenchmarkCmd::Block(cmd) => runner.sync_run(|mut config| {
let (client, _, _, _) = service::new_chain_ops(&mut config, None)?;
cmd.run(client.clone()).map_err(Error::SubstrateCli)
}),
// These commands are very similar and can be handled in nearly the same way.
BenchmarkCmd::Extrinsic(_) | BenchmarkCmd::Overhead(_) => {
ensure_dev(chain_spec).map_err(Error::Other)?;
runner.sync_run(|mut config| {
let (client, _, _, _) = service::new_chain_ops(&mut config, None)?;
let header = client.header(client.info().genesis_hash).unwrap().unwrap();
let inherent_data = benchmark_inherent_data(header)
.map_err(|e| format!("generating inherent data: {:?}", e))?;
let remark_builder =
RemarkBuilder::new(client.clone(), config.chain_spec.identify_chain());
match cmd {
BenchmarkCmd::Extrinsic(cmd) => {
let tka_builder = TransferKeepAliveBuilder::new(
client.clone(),
Sr25519Keyring::Alice.to_account_id(),
config.chain_spec.identify_chain(),
);
let ext_factory = ExtrinsicFactory(vec![
Box::new(remark_builder),
Box::new(tka_builder),
]);
cmd.run(client.clone(), inherent_data, Vec::new(), &ext_factory)
.map_err(Error::SubstrateCli)
},
BenchmarkCmd::Overhead(cmd) => cmd
.run(
config,
client.clone(),
inherent_data,
Vec::new(),
&remark_builder,
)
.map_err(Error::SubstrateCli),
_ => unreachable!("Ensured by the outside match; qed"),
}
})
},
BenchmarkCmd::Pallet(cmd) => {
set_default_ss58_version(chain_spec);
ensure_dev(chain_spec).map_err(Error::Other)?;
if cfg!(feature = "runtime-benchmarks") {
runner.sync_run(|config| {
cmd.run::<service::Block, ()>(config)
.map_err(|e| Error::SubstrateCli(e))
})
} else {
Err(sc_cli::Error::Input(
"Benchmarking wasn't enabled when building the node. \
You can enable it with `--features runtime-benchmarks`."
.into(),
)
.into())
}
},
BenchmarkCmd::Machine(cmd) => runner.sync_run(|config| {
cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone())
.map_err(Error::SubstrateCli)
}),
// NOTE: this allows the Polkadot client to leniently implement
// new benchmark commands.
#[allow(unreachable_patterns)]
_ => Err(Error::CommandNotImplemented),
}
},
Some(Subcommand::HostPerfCheck) => {
let mut builder = sc_cli::LoggerBuilder::new("");
builder.with_colors(true);
builder.init()?;
host_perf_check()
},
Some(Subcommand::Key(cmd)) => Ok(cmd.run(&cli)?),
#[cfg(feature = "try-runtime")]
Some(Subcommand::TryRuntime) => Err(try_runtime_cli::DEPRECATION_NOTICE.to_owned().into()),
#[cfg(not(feature = "try-runtime"))]
Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \
You can enable it with `--features try-runtime`."
.to_owned()
.into()),
Some(Subcommand::ChainInfo(cmd)) => {
let runner = cli.create_runner(cmd)?;
Ok(runner.sync_run(|config| cmd.run::<service::Block>(&config))?)
},
}?;
#[cfg(feature = "pyroscope")]
if let Some(pyroscope_agent) = pyroscope_agent_maybe.take() {
let agent = pyroscope_agent.stop()?;
agent.shutdown();
}
Ok(())
}
+60
View File
@@ -0,0 +1,60 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
PolkadotService(#[from] service::Error),
#[error(transparent)]
SubstrateCli(#[from] sc_cli::Error),
#[error(transparent)]
SubstrateService(#[from] sc_service::Error),
#[error(transparent)]
SubstrateTracing(#[from] sc_tracing::logging::Error),
#[error(transparent)]
#[cfg(feature = "hostperfcheck")]
PerfCheck(#[from] polkadot_performance_test::PerfCheckError),
#[cfg(not(feature = "pyroscope"))]
#[error("Binary was not compiled with `--feature=pyroscope`")]
PyroscopeNotCompiledIn,
#[cfg(feature = "pyroscope")]
#[error("Failed to connect to pyroscope agent")]
PyroscopeError(#[from] pyro::error::PyroscopeError),
#[error("Failed to resolve provided URL")]
AddressResolutionFailure(#[from] std::io::Error),
#[error("URL did not resolve to anything")]
AddressResolutionMissing,
#[error("Command is not implemented")]
CommandNotImplemented,
#[error(transparent)]
Storage(#[from] sc_storage_monitor::Error),
#[error("Other: {0}")]
Other(String),
#[error("This subcommand is only available when compiled with `{feature}`")]
FeatureNotEnabled { feature: &'static str },
}
+74
View File
@@ -0,0 +1,74 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use log::info;
use polkadot_performance_test::{
measure_erasure_coding, measure_pvf_prepare, PerfCheckError, ERASURE_CODING_N_VALIDATORS,
ERASURE_CODING_TIME_LIMIT, PVF_PREPARE_TIME_LIMIT, VALIDATION_CODE_BOMB_LIMIT,
};
use std::time::Duration;
pub fn host_perf_check() -> Result<(), PerfCheckError> {
let pvf_prepare_time_limit = time_limit_from_baseline(PVF_PREPARE_TIME_LIMIT);
let erasure_coding_time_limit = time_limit_from_baseline(ERASURE_CODING_TIME_LIMIT);
let wasm_code =
polkadot_performance_test::WASM_BINARY.ok_or(PerfCheckError::WasmBinaryMissing)?;
// Decompress the code before running checks.
let code = sp_maybe_compressed_blob::decompress(wasm_code, VALIDATION_CODE_BOMB_LIMIT)
.or(Err(PerfCheckError::CodeDecompressionFailed))?;
info!("Running the performance checks...");
perf_check("PVF-prepare", pvf_prepare_time_limit, || measure_pvf_prepare(code.as_ref()))?;
perf_check("Erasure-coding", erasure_coding_time_limit, || {
measure_erasure_coding(ERASURE_CODING_N_VALIDATORS, code.as_ref())
})?;
Ok(())
}
/// Returns a no-warning threshold for the given time limit.
fn green_threshold(duration: Duration) -> Duration {
duration * 4 / 5
}
/// Returns an extended time limit to be used for the actual check.
fn time_limit_from_baseline(duration: Duration) -> Duration {
duration * 3 / 2
}
fn perf_check(
test_name: &str,
time_limit: Duration,
test: impl Fn() -> Result<Duration, PerfCheckError>,
) -> Result<(), PerfCheckError> {
let elapsed = test()?;
if elapsed < green_threshold(time_limit) {
info!("🟢 {} performance check passed, elapsed: {:?}", test_name, elapsed);
Ok(())
} else if elapsed <= time_limit {
info!(
"🟡 {} performance check passed, {:?} limit almost exceeded, elapsed: {:?}",
test_name, time_limit, elapsed
);
Ok(())
} else {
Err(PerfCheckError::TimeOut { elapsed, limit: time_limit })
}
}
+43
View File
@@ -0,0 +1,43 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Polkadot CLI library.
#![warn(missing_docs)]
#[cfg(feature = "cli")]
mod cli;
#[cfg(feature = "cli")]
mod command;
#[cfg(feature = "cli")]
mod error;
#[cfg(all(feature = "hostperfcheck", build_type = "release"))]
mod host_perf_check;
#[cfg(feature = "service")]
pub use service::{self, Block, CoreApi, IdentifyVariant, ProvideRuntimeApi, TFullClient};
#[cfg(feature = "malus")]
pub use service::overseer::prepared_overseer_builder;
#[cfg(feature = "cli")]
pub use cli::*;
#[cfg(feature = "cli")]
pub use command::*;
#[cfg(feature = "cli")]
pub use sc_cli::{Error, Result};
+24
View File
@@ -0,0 +1,24 @@
[package]
name = "polkadot-core-primitives"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
scale-info = { version = "2.5.0", default-features = false, features = ["derive"] }
parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive" ] }
[features]
default = [ "std" ]
std = [
"scale-info/std",
"sp-core/std",
"sp-runtime/std",
"sp-std/std",
"scale-info/std",
"parity-scale-codec/std",
]
+157
View File
@@ -0,0 +1,157 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
#![cfg_attr(not(feature = "std"), no_std)]
//! Core Polkadot types.
//!
//! These core Polkadot types are used by the relay chain and the Parachains.
use parity_scale_codec::{Decode, Encode};
use scale_info::TypeInfo;
use sp_runtime::{
generic,
traits::{IdentifyAccount, Verify},
MultiSignature,
};
pub use sp_runtime::traits::{BlakeTwo256, Hash as HashT};
/// The block number type used by Polkadot.
/// 32-bits will allow for 136 years of blocks assuming 1 block per second.
pub type BlockNumber = u32;
/// An instant or duration in time.
pub type Moment = u64;
/// Alias to type for a signature for a transaction on the relay chain. This allows one of several
/// kinds of underlying crypto to be used, so isn't a fixed size when encoded.
pub type Signature = MultiSignature;
/// Alias to the public key used for this chain, actually a `MultiSigner`. Like the signature, this
/// also isn't a fixed size when encoded, as different cryptos have different size public keys.
pub type AccountPublic = <Signature as Verify>::Signer;
/// Alias to the opaque account ID type for this chain, actually a `AccountId32`. This is always
/// 32 bytes.
pub type AccountId = <AccountPublic as IdentifyAccount>::AccountId;
/// The type for looking up accounts. We don't expect more than 4 billion of them.
pub type AccountIndex = u32;
/// Identifier for a chain. 32-bit should be plenty.
pub type ChainId = u32;
/// A hash of some data used by the relay chain.
pub type Hash = sp_core::H256;
/// Unit type wrapper around [`type@Hash`] that represents a candidate hash.
///
/// This type is produced by [`CandidateReceipt::hash`].
///
/// This type makes it easy to enforce that a hash is a candidate hash on the type level.
#[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, Default, PartialOrd, Ord, TypeInfo)]
pub struct CandidateHash(pub Hash);
#[cfg(feature = "std")]
impl std::ops::Deref for CandidateHash {
type Target = Hash;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[cfg(feature = "std")]
impl std::fmt::Display for CandidateHash {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl sp_std::fmt::Debug for CandidateHash {
fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result {
write!(f, "{:?}", self.0)
}
}
/// Index of a transaction in the relay chain. 32-bit should be plenty.
pub type Nonce = u32;
/// The balance of an account.
/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a
/// resolution to all for one second's worth of an annualised 50% reward be paid to a unit holder
/// (`10^11` unit denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years
/// (`10^9` multiplier) for an eventual total of `10^27` units (27 significant decimal figures).
/// We round denomination to `10^12` (12 SDF), and leave the other redundancy at the upper end so
/// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow.
pub type Balance = u128;
/// Header type.
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
/// Block type.
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
/// Block ID.
pub type BlockId = generic::BlockId<Block>;
/// Opaque, encoded, unchecked extrinsic.
pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic;
/// The information that goes alongside a `transfer_into_parachain` operation. Entirely opaque, it
/// will generally be used for identifying the reason for the transfer. Typically it will hold the
/// destination account to which the transfer should be credited. If still more information is
/// needed, then this should be a hash with the pre-image presented via an off-chain mechanism on
/// the parachain.
pub type Remark = [u8; 32];
/// A message sent from the relay-chain down to a parachain.
///
/// The size of the message is limited by the `config.max_downward_message_size` parameter.
pub type DownwardMessage = sp_std::vec::Vec<u8>;
/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number
/// when the message was sent.
#[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, TypeInfo)]
pub struct InboundDownwardMessage<BlockNumber = crate::BlockNumber> {
/// The block number at which these messages were put into the downward message queue.
pub sent_at: BlockNumber,
/// The actual downward message to processes.
pub msg: DownwardMessage,
}
/// An HRMP message seen from the perspective of a recipient.
#[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, TypeInfo)]
pub struct InboundHrmpMessage<BlockNumber = crate::BlockNumber> {
/// The block number at which this message was sent.
/// Specifically, it is the block number at which the candidate that sends this message was
/// enacted.
pub sent_at: BlockNumber,
/// The message payload.
pub data: sp_std::vec::Vec<u8>,
}
/// An HRMP message seen from the perspective of a sender.
#[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, Eq, Hash, TypeInfo)]
pub struct OutboundHrmpMessage<Id> {
/// The para that will get this message in its downward message queue.
pub recipient: Id,
/// The message payload.
pub data: sp_std::vec::Vec<u8>,
}
/// `V2` primitives.
pub mod v2 {
pub use super::*;
}
+157
View File
@@ -0,0 +1,157 @@
# Using Containers
The following commands should work no matter if you use Docker or Podman. In general, Podman is recommended. All commands are "engine neutral" so you can use the container engine of your choice while still being able to copy/paste the commands below.
Let's start defining Podman as our engine:
```
ENGINE=podman
```
If you prefer to stick with Docker, use:
```
ENGINE=docker
```
## The easiest way
The easiest/faster option to run Polkadot in Docker is to use the latest release images. These are small images that use the latest official release of the Polkadot binary, pulled from our Debian package.
**_The following examples are running on westend chain and without SSL. They can be used to quick start and learn how Polkadot needs to be configured. Please find out how to secure your node, if you want to operate it on the internet. Do not expose RPC and WS ports, if they are not correctly configured._**
Let's first check the version we have. The first time you run this command, the Polkadot docker image will be downloaded. This takes a bit of time and bandwidth, be patient:
```bash
$ENGINE run --rm -it parity/polkadot:latest --version
```
You can also pass any argument/flag that Polkadot supports:
```bash
$ENGINE run --rm -it parity/polkadot:latest --chain westend --name "PolkaDocker"
```
## Examples
Once you are done experimenting and picking the best node name :) you can start Polkadot as daemon, exposes the Polkadot ports and mount a volume that will keep your blockchain data locally. Make sure that you set the ownership of your local directory to the Polkadot user that is used by the container.
Set user id 1000 and group id 1000, by running `chown 1000.1000 /my/local/folder -R` if you use a bind mount.
To start a Polkadot node on default rpc port 9933 and default p2p port 30333 use the following command. If you want to connect to rpc port 9933, then must add Polkadot startup parameter: `--rpc-external`.
```bash
$ENGINE run -d -p 30333:30333 -p 9933:9933 \
-v /my/local/folder:/polkadot \
parity/polkadot:latest \
--chain westend --rpc-external --rpc-cors all \
--name "PolkaDocker
```
If you also want to expose the webservice port 9944 use the following command:
```bash
$ENGINE run -d -p 30333:30333 -p 9933:9933 -p 9944:9944 \
-v /my/local/folder:/polkadot \
parity/polkadot:latest \
--chain westend --ws-external --rpc-external --rpc-cors all --name "PolkaDocker"
```
## Using Docker compose
You can use the following docker-compose.yml file:
```bash
version: '2'
services:
polkadot:
container_name: polkadot
image: parity/polkadot
ports:
- 30333:30333 # p2p port
- 9933:9933 # rpc port
- 9944:9944 # ws port
- 9615:9615 # Prometheus port
volumes:
- /my/local/folder:/polkadot
command: [
"--name", "PolkaDocker",
"--ws-external",
"--rpc-external",
"--prometheus-external",
"--rpc-cors", "all"
]
```
With following `docker-compose.yml` you can set up a node and use polkadot-js-apps as the front end on port 80. After starting the node use a browser and enter your Docker host IP in the URL field: _<http://[YOUR_DOCKER_HOST_IP]>_
```bash
version: '2'
services:
polkadot:
container_name: polkadot
image: parity/polkadot
ports:
- 30333:30333 # p2p port
- 9933:9933 # rpc port
- 9944:9944 # ws port
- 9615:9615 # Prometheus port
command: [
"--name", "PolkaDocker",
"--ws-external",
"--rpc-external",
"--prometheus-external",
"--rpc-cors", "all"
]
polkadotui:
container_name: polkadotui
image: jacogr/polkadot-js-apps
environment:
- WS_URL=ws://[YOUR_DOCKER_HOST_IP]:9944
ports:
- 80:80
```
## Limiting Resources
Chain syncing will utilize all available memory and CPU power your server has to offer, which can lead to crashing.
If running on a low resource VPS, use `--memory` and `--cpus` to limit the resources used. E.g. To allow a maximum of 512MB memory and 50% of 1 CPU, use `--cpus=".5" --memory="512m"`. Read more about limiting a container's resources [here](https://docs.docker.com/config/containers/resource_constraints).
## Build your own image
There are 3 options to build a polkadot container image:
- using the builder image
- using the injected "Debian" image
- using the generic injected image
### Builder image
To get up and running with the smallest footprint on your system, you may use an existing Polkadot Container image.
You may also build a polkadot container image yourself (it takes a while...) using the container specs `scripts/ci/dockerfiles/polkadot/polkadot_builder.Dockerfile`.
### Debian injected
The Debian injected image is how the official polkadot container image is produced. It relies on the Debian package that is published upon each release. The Debian injected image is usually available a few minutes after a new release is published.
It has the benefit of relying on the GPG signatures embedded in the Debian package.
### Generic injected
For simple testing purposes, the easiest option for polkadot and also random binaries, is to use the `binary_injected.Dockerfile` container spec. This option is less secure since the injected binary is not checked at all but it has the benefit to be simple. This option requires to already have a valid `polkadot` binary, compiled for Linux.
This binary is then simply copied inside the `parity/base-bin` image.
## Reporting issues
If you run into issues with Polkadot when using docker, please run the following command
(replace the tag with the appropriate one if you do not use latest):
```bash
$ENGINE run --rm -it parity/polkadot:latest --version
```
This will show you the Polkadot version as well as the git commit ref that was used to build your container.
You can now paste the version information in a [new issue](https://github.com/paritytech/polkadot/issues/new/choose).
+98
View File
@@ -0,0 +1,98 @@
## Notes
### Burn In
Ensure that Parity DevOps has run the new release on Westend, Kusama, and
Polkadot validators for at least 12 hours prior to publishing the release.
### Build Artifacts
Add any necessary assets to the release. They should include:
- Linux binary
- GPG signature of the Linux binary
- SHA256 of binary
- Source code
- Wasm binaries of any runtimes
### Release notes
The release notes should list:
- The priority of the release (i.e., how quickly users should upgrade) - this is
based on the max priority of any *client* changes.
- Which native runtimes and their versions are included
- The proposal hashes of the runtimes as built with
[srtool](https://gitlab.com/chevdor/srtool)
- Any changes in this release that are still awaiting audit
The release notes may also list:
- Free text at the beginning of the notes mentioning anything important
regarding this release
- Notable changes (those labelled with B[1-9]-* labels) separated into sections
### Spec Version
A runtime upgrade must bump the spec number. This may follow a pattern with the
client release (e.g. runtime v12 corresponds to v0.8.12, even if the current
runtime is not v11).
### Old Migrations Removed
Any previous `on_runtime_upgrade` functions from old upgrades must be removed
to prevent them from executing a second time. The `on_runtime_upgrade` function
can be found in `runtime/<runtime>/src/lib.rs`.
### New Migrations
Ensure that any migrations that are required due to storage or logic changes
are included in the `on_runtime_upgrade` function of the appropriate pallets.
### Extrinsic Ordering
Offline signing libraries depend on a consistent ordering of call indices and
functions. Compare the metadata of the current and new runtimes and ensure that
the `module index, call index` tuples map to the same set of functions. In case
of a breaking change, increase `transaction_version`.
To verify the order has not changed, you may manually start the following [Github Action](https://github.com/paritytech/polkadot/actions/workflows/extrinsic-ordering-check-from-bin.yml). It takes around a minute to run and will produce the report as artifact you need to manually check.
The things to look for in the output are lines like:
- `[Identity] idx 28 -> 25 (calls 15)` - indicates the index for `Identity` has changed
- `[+] Society, Recovery` - indicates the new version includes 2 additional modules/pallets.
- If no indices have changed, every modules line should look something like `[Identity] idx 25 (calls 15)`
Note: Adding new functions to the runtime does not constitute a breaking change
as long as the indexes did not change.
### Proxy Filtering
The runtime contains proxy filters that map proxy types to allowable calls. If
the new runtime contains any new calls, verify that the proxy filters are up to
date to include them.
### Benchmarks
There are three benchmarking machines reserved for updating the weights at
release-time. To initialise a benchmark run for each production runtime
(westend, kusama, polkadot):
* Go to https://gitlab.parity.io/parity/polkadot/-/pipelines?page=1&scope=branches&ref=master
* Click the link to the last pipeline run for master
* Start each of the manual jobs:
* 'update_westend_weights'
* 'update_polkadot_weights'
* 'update_kusama_weights'
* When these jobs have completed (it takes a few hours), a git PATCH file will
be available to download as an artifact.
* On your local machine, branch off master
* Download the patch file and apply it to your branch with `git patch patchfile.patch`
* Commit the changes to your branch and submit a PR against master
* The weights should be (Currently manually) checked to make sure there are no
big outliers (i.e., twice or half the weight).
### Polkadot JS
Ensure that a release of [Polkadot JS API]() contains any new types or
interfaces necessary to interact with the new runtime.
+40
View File
@@ -0,0 +1,40 @@
# Shell completion
The Polkadot CLI command supports shell auto-completion. For this to work, you will need to run the completion script matching you build and system.
Assuming you built a release version using `cargo build --release` and use `bash` run the following:
```bash
source target/release/completion-scripts/polkadot.bash
```
You can find completion scripts for:
- bash
- fish
- zsh
- elvish
- powershell
To make this change persistent, you can proceed as follow:
## First install
```bash
COMPL_DIR=$HOME/.completion
mkdir -p $COMPL_DIR
cp -f target/release/completion-scripts/polkadot.bash $COMPL_DIR/
echo "source $COMPL_DIR/polkadot.bash" >> $HOME/.bash_profile
source $HOME/.bash_profile
```
## Update
When you build a new version of Polkadot, the following will ensure you auto-completion script matches the current binary:
```bash
COMPL_DIR=$HOME/.completion
mkdir -p $COMPL_DIR
cp -f target/release/completion-scripts/polkadot.bash $COMPL_DIR/
source $HOME/.bash_profile
```
+263
View File
@@ -0,0 +1,263 @@
# Testing
Automated testing is an essential tool to assure correctness.
## Scopes
The testing strategy for polkadot is 4-fold:
### Unit testing (1)
Boring, small scale correctness tests of individual functions.
### Integration tests
There are two variants of integration tests:
#### Subsystem tests (2)
One particular subsystem (subsystem under test) interacts with a
mocked overseer that is made to assert incoming and outgoing messages
of the subsystem under test.
This is largely present today, but has some fragmentation in the evolved
integration test implementation. A `proc-macro`/`macro_rules` would allow
for more consistent implementation and structure.
#### Behavior tests (3)
Launching small scale networks, with multiple adversarial nodes without any further tooling required.
This should include tests around the thresholds in order to evaluate the error handling once certain
assumed invariants fail.
For this purpose based on `AllSubsystems` and `proc-macro` `AllSubsystemsGen`.
This assumes a simplistic test runtime.
#### Testing at scale (4)
Launching many nodes with configurable network speed and node features in a cluster of nodes.
At this scale the [Simnet][simnet] comes into play which launches a full cluster of nodes.
The scale is handled by spawning a kubernetes cluster and the meta description
is covered by [Gurke][Gurke].
Asserts are made using Grafana rules, based on the existing prometheus metrics. This can
be extended by adding an additional service translating `jaeger` spans into addition
prometheus avoiding additional polkadot source changes.
_Behavior tests_ and _testing at scale_ have naturally soft boundary.
The most significant difference is the presence of a real network and
the number of nodes, since a single host often not capable to run
multiple nodes at once.
---
## Coverage
Coverage gives a _hint_ of the actually covered source lines by tests and test applications.
The state of the art is currently [tarpaulin][tarpaulin] which unfortunately yields a
lot of false negatives. Lines that are in fact covered, marked as uncovered due to a mere linebreak in a statement can cause these artifacts. This leads to
lower coverage percentages than there actually is.
Since late 2020 rust has gained [MIR based coverage tooling](
https://blog.rust-lang.org/inside-rust/2020/11/12/source-based-code-coverage.html).
```sh
# setup
rustup component add llvm-tools-preview
cargo install grcov miniserve
export CARGO_INCREMENTAL=0
# wasm is not happy with the instrumentation
export SKIP_BUILD_WASM=true
export BUILD_DUMMY_WASM_BINARY=true
# the actully collected coverage data
export LLVM_PROFILE_FILE="llvmcoveragedata-%p-%m.profraw"
# build wasm without instrumentation
export WASM_TARGET_DIRECTORY=/tmp/wasm
cargo +nightly build
# required rust flags
export RUSTFLAGS="-Zinstrument-coverage"
# assure target dir is clean
rm -r target/{debug,tests}
# run tests to get coverage data
cargo +nightly test --all
# create the *html* report out of all the test binaries
# mostly useful for local inspection
grcov . --binary-path ./target/debug -s . -t html --branch --ignore-not-existing -o ./coverage/
miniserve -r ./coverage
# create a *codecov* compatible report
grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "/*" -o lcov.info
```
The test coverage in `lcov` can the be published to <https://codecov.io>.
```sh
bash <(curl -s https://codecov.io/bash) -f lcov.info
```
or just printed as part of the PR using a github action i.e. [`jest-lcov-reporter`](https://github.com/marketplace/actions/jest-lcov-reporter).
For full examples on how to use [`grcov` /w polkadot specifics see the github repo](https://github.com/mozilla/grcov#coverallscodecov-output).
## Fuzzing
Fuzzing is an approach to verify correctness against arbitrary or partially structured inputs.
Currently implemented fuzzing targets:
* `erasure-coding`
The tooling of choice here is `honggfuzz-rs` as it allows _fastest_ coverage according to "some paper" which is a positive feature when run as part of PRs.
Fuzzing is generally not applicable for data secured by cryptographic hashes or signatures. Either the input has to be specifically crafted, such that the discarded input
percentage stays in an acceptable range.
System level fuzzing is hence simply not feasible due to the amount of state that is required.
Other candidates to implement fuzzing are:
* `rpc`
* ...
## Performance metrics
There are various ways of performance metrics.
* timing with `criterion`
* cache hits/misses w/ `iai` harness or `criterion-perf`
* `coz` a performance based compiler
Most of them are standard tools to aid in the creation of statistical tests regarding change in time of certain unit tests.
`coz` is meant for runtime. In our case, the system is far too large to yield a sufficient number of measurements in finite time.
An alternative approach could be to record incoming package streams per subsystem and store dumps of them, which in return could be replayed repeatedly at an
accelerated speed, with which enough metrics could be obtained to yield
information on which areas would improve the metrics.
This unfortunately will not yield much information, since most if not all of the subsystem code is linear based on the input to generate one or multiple output messages, it is unlikely to get any useful metrics without mocking a sufficiently large part of the other subsystem which overlaps with [#Integration tests] which is unfortunately not repeatable as of now.
As such the effort gain seems low and this is not pursued at the current time.
## Writing small scope integration tests with preconfigured workers
Requirements:
* spawn nodes with preconfigured behaviors
* allow multiple types of configuration to be specified
* allow extendability via external crates
* ...
---
## Implementation of different behavior strain nodes
### Goals
The main goals are is to allow creating a test node which
exhibits a certain behavior by utilizing a subset of _wrapped_ or _replaced_ subsystems easily.
The runtime must not matter at all for these tests and should be simplistic.
The execution must be fast, this mostly means to assure a close to zero network latency as
well as shorting the block time and epoch times down to a few `100ms` and a few dozend blocks per epoch.
### Approach
#### MVP
A simple small scale builder pattern would suffice for stage one implementation of allowing to
replace individual subsystems.
An alternative would be to harness the existing `AllSubsystems` type
and replace the subsystems as needed.
#### Full `proc-macro` implementation
`Overseer` is a common pattern.
It could be extracted as `proc` macro and generative `proc-macro`.
This would replace the `AllSubsystems` type as well as implicitly create
the `AllMessages` enum as `AllSubsystemsGen` does today.
The implementation is yet to be completed, see the [implementation PR](https://github.com/paritytech/polkadot/pull/2962) for details.
##### Declare an overseer implementation
```rust
struct BehaveMaleficient;
impl OverseerGen for BehaveMaleficient {
fn generate<'a, Spawner, RuntimeClient>(
&self,
args: OverseerGenArgs<'a, Spawner, RuntimeClient>,
) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, OverseerHandler), Error>
where
RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
Spawner: 'static + overseer::gen::Spawner + Clone + Unpin,
{
let spawner = args.spawner.clone();
let leaves = args.leaves.clone();
let runtime_client = args.runtime_client.clone();
let registry = args.registry.clone();
let candidate_validation_config = args.candidate_validation_config.clone();
// modify the subsystem(s) as needed:
let all_subsystems = create_default_subsystems(args)?.
// or spawn an entirely new set
replace_candidate_validation(
// create the filtered subsystem
FilteredSubsystem::new(
CandidateValidationSubsystem::with_config(
candidate_validation_config,
Metrics::register(registry)?,
),
// an implementation of
Skippy::default(),
),
);
Overseer::new(leaves, all_subsystems, registry, runtime_client, spawner)
.map_err(|e| e.into())
// A builder pattern will simplify this further
// WIP https://github.com/paritytech/polkadot/pull/2962
}
}
fn main() -> eyre::Result<()> {
color_eyre::install()?;
let cli = Cli::from_args();
assert_matches::assert_matches!(cli.subcommand, None);
polkadot_cli::run_node(cli, BehaveMaleficient)?;
Ok(())
}
```
[`variant-a`](../node/malus/src/variant-a.rs) is a fully working example.
#### Simnet
Spawn a kubernetes cluster based on a meta description using [Gurke] with the
[Simnet] scripts.
Coordinated attacks of multiple nodes or subsystems must be made possible via
a side-channel, that is out of scope for this document.
The individual node configurations are done as targets with a particular
builder configuration.
#### Behavior tests w/o Simnet
Commonly this will require multiple nodes, and most machines are limited to
running two or three nodes concurrently.
Hence, this is not the common case and is just an implementation _idea_.
```rust
behavior_testcase!{
"TestRuntime" =>
"Alice": <AvailabilityDistribution=DropAll, .. >,
"Bob": <AvailabilityDistribution=DuplicateSend, .. >,
"Charles": Default,
"David": "Charles",
"Eve": "Bob",
}
```
[Gurke]: https://github.com/paritytech/gurke
[simnet]: https://github.com/paritytech/simnet_scripts
+22
View File
@@ -0,0 +1,22 @@
[package]
name = "polkadot-erasure-coding"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
polkadot-primitives = { path = "../primitives" }
polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" }
novelpoly = { package = "reed-solomon-novelpoly", version = "1.0.0" }
parity-scale-codec = { version = "3.6.1", default-features = false, features = ["std", "derive"] }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" }
thiserror = "1.0.31"
[dev-dependencies]
criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support"] }
[[bench]]
name = "scaling_with_validators"
harness = false
+39
View File
@@ -0,0 +1,39 @@
### Run benches
```
$ cd erasure-coding # ensure you are in the right directory
$ cargo bench
```
### `scaling_with_validators`
This benchmark evaluates the performance of constructing the chunks and the erasure root from PoV and
reconstructing the PoV from chunks. You can see the results of running this bench on 5950x below.
Interestingly, with `10_000` chunks (validators) its slower than with `50_000` for both construction
and reconstruction.
```
construct/200 time: [93.924 ms 94.525 ms 95.214 ms]
thrpt: [52.513 MiB/s 52.896 MiB/s 53.234 MiB/s]
construct/500 time: [111.25 ms 111.52 ms 111.80 ms]
thrpt: [44.721 MiB/s 44.837 MiB/s 44.946 MiB/s]
construct/1000 time: [117.37 ms 118.28 ms 119.21 ms]
thrpt: [41.941 MiB/s 42.273 MiB/s 42.601 MiB/s]
construct/2000 time: [125.05 ms 125.72 ms 126.38 ms]
thrpt: [39.564 MiB/s 39.772 MiB/s 39.983 MiB/s]
construct/10000 time: [270.46 ms 275.11 ms 279.81 ms]
thrpt: [17.869 MiB/s 18.174 MiB/s 18.487 MiB/s]
construct/50000 time: [205.86 ms 209.66 ms 213.64 ms]
thrpt: [23.404 MiB/s 23.848 MiB/s 24.288 MiB/s]
reconstruct/200 time: [180.73 ms 184.09 ms 187.73 ms]
thrpt: [26.634 MiB/s 27.160 MiB/s 27.666 MiB/s]
reconstruct/500 time: [195.59 ms 198.58 ms 201.76 ms]
thrpt: [24.781 MiB/s 25.179 MiB/s 25.564 MiB/s]
reconstruct/1000 time: [207.92 ms 211.57 ms 215.57 ms]
thrpt: [23.195 MiB/s 23.633 MiB/s 24.048 MiB/s]
reconstruct/2000 time: [218.59 ms 223.68 ms 229.18 ms]
thrpt: [21.817 MiB/s 22.354 MiB/s 22.874 MiB/s]
reconstruct/10000 time: [496.35 ms 505.17 ms 515.42 ms]
thrpt: [9.7008 MiB/s 9.8977 MiB/s 10.074 MiB/s]
reconstruct/50000 time: [276.56 ms 277.53 ms 278.58 ms]
thrpt: [17.948 MiB/s 18.016 MiB/s 18.079 MiB/s]
```
@@ -0,0 +1,90 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use polkadot_primitives::Hash;
use std::time::Duration;
fn chunks(n_validators: usize, pov: &Vec<u8>) -> Vec<Vec<u8>> {
polkadot_erasure_coding::obtain_chunks(n_validators, pov).unwrap()
}
fn erasure_root(n_validators: usize, pov: &Vec<u8>) -> Hash {
let chunks = chunks(n_validators, pov);
polkadot_erasure_coding::branches(&chunks).root()
}
fn construct_and_reconstruct_5mb_pov(c: &mut Criterion) {
const N_VALIDATORS: [usize; 6] = [200, 500, 1000, 2000, 10_000, 50_000];
const KB: usize = 1024;
const MB: usize = 1024 * KB;
let pov = vec![0xfe; 5 * MB];
let mut group = c.benchmark_group("construct");
for n_validators in N_VALIDATORS {
let expected_root = erasure_root(n_validators, &pov);
group.throughput(Throughput::Bytes(pov.len() as u64));
group.bench_with_input(
BenchmarkId::from_parameter(n_validators),
&n_validators,
|b, &n| {
b.iter(|| {
let root = erasure_root(n, &pov);
assert_eq!(root, expected_root);
});
},
);
}
group.finish();
let mut group = c.benchmark_group("reconstruct");
for n_validators in N_VALIDATORS {
let all_chunks = chunks(n_validators, &pov);
let mut c: Vec<_> = all_chunks.iter().enumerate().map(|(i, c)| (&c[..], i)).collect();
let last_chunks = c.split_off((c.len() - 1) * 2 / 3);
group.throughput(Throughput::Bytes(pov.len() as u64));
group.bench_with_input(
BenchmarkId::from_parameter(n_validators),
&n_validators,
|b, &n| {
b.iter(|| {
let _pov: Vec<u8> =
polkadot_erasure_coding::reconstruct(n, last_chunks.clone()).unwrap();
});
},
);
}
group.finish();
}
fn criterion_config() -> Criterion {
Criterion::default()
.sample_size(15)
.warm_up_time(Duration::from_millis(200))
.measurement_time(Duration::from_secs(3))
}
criterion_group!(
name = re_construct;
config = criterion_config();
targets = construct_and_reconstruct_5mb_pov,
);
criterion_main!(re_construct);
@@ -0,0 +1,3 @@
hfuzz_target/
hfuzz_workspace/
Cargo.lock
+21
View File
@@ -0,0 +1,21 @@
[package]
name = "erasure_coding_fuzzer"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
publish = false
[dependencies]
polkadot-erasure-coding = { path = ".." }
honggfuzz = "0.5"
polkadot-primitives = { path = "../../primitives" }
primitives = { package = "polkadot-node-primitives", path = "../../node/primitives/" }
[[bin]]
name = "reconstruct"
path = "src/reconstruct.rs"
[[bin]]
name = "round_trip"
path = "src/round_trip.rs"
@@ -0,0 +1,32 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use honggfuzz::fuzz;
use polkadot_erasure_coding::*;
use primitives::AvailableData;
fn main() {
loop {
fuzz!(|data: (usize, Vec<(Vec<u8>, usize)>)| {
let (num_validators, chunk_input) = data;
let reconstructed: Result<AvailableData, _> = reconstruct_v1(
num_validators,
chunk_input.iter().map(|t| (&*t.0, t.1)).collect::<Vec<(&[u8], usize)>>(),
);
println!("reconstructed {:?}", reconstructed);
});
}
}
@@ -0,0 +1,49 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use honggfuzz::fuzz;
use polkadot_erasure_coding::*;
use polkadot_primitives::PersistedValidationData;
use primitives::{AvailableData, BlockData, PoV};
use std::sync::Arc;
fn main() {
loop {
fuzz!(|data: &[u8]| {
let pov_block = PoV { block_data: BlockData(data.iter().cloned().collect()) };
let available_data = AvailableData {
pov: Arc::new(pov_block),
validation_data: PersistedValidationData::default(),
};
let chunks = obtain_chunks_v1(10, &available_data).unwrap();
assert_eq!(chunks.len(), 10);
// any 4 chunks should work.
let reconstructed: AvailableData = reconstruct_v1(
10,
[(&*chunks[1], 1), (&*chunks[4], 4), (&*chunks[6], 6), (&*chunks[9], 9)]
.iter()
.cloned(),
)
.unwrap();
assert_eq!(reconstructed, available_data);
println!("{:?}", reconstructed);
});
}
}
+419
View File
@@ -0,0 +1,419 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! As part of Polkadot's availability system, certain pieces of data
//! for each block are required to be kept available.
//!
//! The way we accomplish this is by erasure coding the data into n pieces
//! and constructing a merkle root of the data.
//!
//! Each of n validators stores their piece of data. We assume `n = 3f + k`, `0 < k ≤ 3`.
//! f is the maximum number of faulty validators in the system.
//! The data is coded so any f+1 chunks can be used to reconstruct the full data.
use parity_scale_codec::{Decode, Encode};
use polkadot_node_primitives::{AvailableData, Proof};
use polkadot_primitives::{BlakeTwo256, Hash as H256, HashT};
use sp_core::Blake2Hasher;
use sp_trie::{
trie_types::{TrieDBBuilder, TrieDBMutBuilderV0 as TrieDBMutBuilder},
LayoutV0, MemoryDB, Trie, TrieMut, EMPTY_PREFIX,
};
use thiserror::Error;
use novelpoly::{CodeParams, WrappedShard};
// we are limited to the field order of GF(2^16), which is 65536
const MAX_VALIDATORS: usize = novelpoly::f2e16::FIELD_SIZE;
/// Errors in erasure coding.
#[derive(Debug, Clone, PartialEq, Error)]
pub enum Error {
/// Returned when there are too many validators.
#[error("There are too many validators")]
TooManyValidators,
/// Cannot encode something for zero or one validator
#[error("Expected at least 2 validators")]
NotEnoughValidators,
/// Cannot reconstruct: wrong number of validators.
#[error("Validator count mismatches between encoding and decoding")]
WrongValidatorCount,
/// Not enough chunks present.
#[error("Not enough chunks to reconstruct message")]
NotEnoughChunks,
/// Too many chunks present.
#[error("Too many chunks present")]
TooManyChunks,
/// Chunks not of uniform length or the chunks are empty.
#[error("Chunks are not uniform, mismatch in length or are zero sized")]
NonUniformChunks,
/// An uneven byte-length of a shard is not valid for `GF(2^16)` encoding.
#[error("Uneven length is not valid for field GF(2^16)")]
UnevenLength,
/// Chunk index out of bounds.
#[error("Chunk is out of bounds: {chunk_index} not included in 0..{n_validators}")]
ChunkIndexOutOfBounds { chunk_index: usize, n_validators: usize },
/// Bad payload in reconstructed bytes.
#[error("Reconstructed payload invalid")]
BadPayload,
/// Invalid branch proof.
#[error("Invalid branch proof")]
InvalidBranchProof,
/// Branch out of bounds.
#[error("Branch is out of bounds")]
BranchOutOfBounds,
/// Unknown error
#[error("An unknown error has appeared when reconstructing erasure code chunks")]
UnknownReconstruction,
/// Unknown error
#[error("An unknown error has appeared when deriving code parameters from validator count")]
UnknownCodeParam,
}
/// Obtain a threshold of chunks that should be enough to recover the data.
pub const fn recovery_threshold(n_validators: usize) -> Result<usize, Error> {
if n_validators > MAX_VALIDATORS {
return Err(Error::TooManyValidators)
}
if n_validators <= 1 {
return Err(Error::NotEnoughValidators)
}
let needed = n_validators.saturating_sub(1) / 3;
Ok(needed + 1)
}
fn code_params(n_validators: usize) -> Result<CodeParams, Error> {
// we need to be able to reconstruct from 1/3 - eps
let n_wanted = n_validators;
let k_wanted = recovery_threshold(n_wanted)?;
if n_wanted > MAX_VALIDATORS as usize {
return Err(Error::TooManyValidators)
}
CodeParams::derive_parameters(n_wanted, k_wanted).map_err(|e| match e {
novelpoly::Error::WantedShardCountTooHigh(_) => Error::TooManyValidators,
novelpoly::Error::WantedShardCountTooLow(_) => Error::NotEnoughValidators,
_ => Error::UnknownCodeParam,
})
}
/// Obtain erasure-coded chunks for v1 `AvailableData`, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks_v1(n_validators: usize, data: &AvailableData) -> Result<Vec<Vec<u8>>, Error> {
obtain_chunks(n_validators, data)
}
/// Obtain erasure-coded chunks, one for each validator.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn obtain_chunks<T: Encode>(n_validators: usize, data: &T) -> Result<Vec<Vec<u8>>, Error> {
let params = code_params(n_validators)?;
let encoded = data.encode();
if encoded.is_empty() {
return Err(Error::BadPayload)
}
let shards = params
.make_encoder()
.encode::<WrappedShard>(&encoded[..])
.expect("Payload non-empty, shard sizes are uniform, and validator numbers checked; qed");
Ok(shards.into_iter().map(|w: WrappedShard| w.into_inner()).collect())
}
/// Reconstruct the v1 available data from a set of chunks.
///
/// Provide an iterator containing chunk data and the corresponding index.
/// The indices of the present chunks must be indicated. If too few chunks
/// are provided, recovery is not possible.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct_v1<'a, I: 'a>(n_validators: usize, chunks: I) -> Result<AvailableData, Error>
where
I: IntoIterator<Item = (&'a [u8], usize)>,
{
reconstruct(n_validators, chunks)
}
/// Reconstruct decodable data from a set of chunks.
///
/// Provide an iterator containing chunk data and the corresponding index.
/// The indices of the present chunks must be indicated. If too few chunks
/// are provided, recovery is not possible.
///
/// Works only up to 65536 validators, and `n_validators` must be non-zero.
pub fn reconstruct<'a, I: 'a, T: Decode>(n_validators: usize, chunks: I) -> Result<T, Error>
where
I: IntoIterator<Item = (&'a [u8], usize)>,
{
let params = code_params(n_validators)?;
let mut received_shards: Vec<Option<WrappedShard>> = vec![None; n_validators];
let mut shard_len = None;
for (chunk_data, chunk_idx) in chunks.into_iter().take(n_validators) {
if chunk_idx >= n_validators {
return Err(Error::ChunkIndexOutOfBounds { chunk_index: chunk_idx, n_validators })
}
let shard_len = shard_len.get_or_insert_with(|| chunk_data.len());
if *shard_len % 2 != 0 {
return Err(Error::UnevenLength)
}
if *shard_len != chunk_data.len() || *shard_len == 0 {
return Err(Error::NonUniformChunks)
}
received_shards[chunk_idx] = Some(WrappedShard::new(chunk_data.to_vec()));
}
let res = params.make_encoder().reconstruct(received_shards);
let payload_bytes = match res {
Err(e) => match e {
novelpoly::Error::NeedMoreShards { .. } => return Err(Error::NotEnoughChunks),
novelpoly::Error::ParamterMustBePowerOf2 { .. } => return Err(Error::UnevenLength),
novelpoly::Error::WantedShardCountTooHigh(_) => return Err(Error::TooManyValidators),
novelpoly::Error::WantedShardCountTooLow(_) => return Err(Error::NotEnoughValidators),
novelpoly::Error::PayloadSizeIsZero { .. } => return Err(Error::BadPayload),
novelpoly::Error::InconsistentShardLengths { .. } =>
return Err(Error::NonUniformChunks),
_ => return Err(Error::UnknownReconstruction),
},
Ok(payload_bytes) => payload_bytes,
};
Decode::decode(&mut &payload_bytes[..]).or_else(|_e| Err(Error::BadPayload))
}
/// An iterator that yields merkle branches and chunk data for all chunks to
/// be sent to other validators.
pub struct Branches<'a, I> {
trie_storage: MemoryDB<Blake2Hasher>,
root: H256,
chunks: &'a [I],
current_pos: usize,
}
impl<'a, I: AsRef<[u8]>> Branches<'a, I> {
/// Get the trie root.
pub fn root(&self) -> H256 {
self.root
}
}
impl<'a, I: AsRef<[u8]>> Iterator for Branches<'a, I> {
type Item = (Proof, &'a [u8]);
fn next(&mut self) -> Option<Self::Item> {
use sp_trie::Recorder;
let mut recorder = Recorder::<LayoutV0<Blake2Hasher>>::new();
let res = {
let trie = TrieDBBuilder::new(&self.trie_storage, &self.root)
.with_recorder(&mut recorder)
.build();
(self.current_pos as u32).using_encoded(|s| trie.get(s))
};
match res.expect("all nodes in trie present; qed") {
Some(_) => {
let nodes: Vec<Vec<u8>> = recorder.drain().into_iter().map(|r| r.data).collect();
let chunk = self.chunks.get(self.current_pos).expect(
"there is a one-to-one mapping of chunks to valid merkle branches; qed",
);
self.current_pos += 1;
Proof::try_from(nodes).ok().map(|proof| (proof, chunk.as_ref()))
},
None => None,
}
}
}
/// Construct a trie from chunks of an erasure-coded value. This returns the root hash and an
/// iterator of merkle proofs, one for each validator.
pub fn branches<'a, I: 'a>(chunks: &'a [I]) -> Branches<'a, I>
where
I: AsRef<[u8]>,
{
let mut trie_storage: MemoryDB<Blake2Hasher> = MemoryDB::default();
let mut root = H256::default();
// construct trie mapping each chunk's index to its hash.
{
let mut trie = TrieDBMutBuilder::new(&mut trie_storage, &mut root).build();
for (i, chunk) in chunks.as_ref().iter().enumerate() {
(i as u32).using_encoded(|encoded_index| {
let chunk_hash = BlakeTwo256::hash(chunk.as_ref());
trie.insert(encoded_index, chunk_hash.as_ref())
.expect("a fresh trie stored in memory cannot have errors loading nodes; qed");
})
}
}
Branches { trie_storage, root, chunks, current_pos: 0 }
}
/// Verify a merkle branch, yielding the chunk hash meant to be present at that
/// index.
pub fn branch_hash(root: &H256, branch_nodes: &Proof, index: usize) -> Result<H256, Error> {
let mut trie_storage: MemoryDB<Blake2Hasher> = MemoryDB::default();
for node in branch_nodes.iter() {
(&mut trie_storage as &mut sp_trie::HashDB<_>).insert(EMPTY_PREFIX, node);
}
let trie = TrieDBBuilder::new(&trie_storage, &root).build();
let res = (index as u32).using_encoded(|key| {
trie.get_with(key, |raw_hash: &[u8]| H256::decode(&mut &raw_hash[..]))
});
match res {
Ok(Some(Ok(hash))) => Ok(hash),
Ok(Some(Err(_))) => Err(Error::InvalidBranchProof), // hash failed to decode
Ok(None) => Err(Error::BranchOutOfBounds),
Err(_) => Err(Error::InvalidBranchProof),
}
}
// input for `codec` which draws data from the data shards
struct ShardInput<'a, I> {
remaining_len: usize,
shards: I,
cur_shard: Option<(&'a [u8], usize)>,
}
impl<'a, I: Iterator<Item = &'a [u8]>> parity_scale_codec::Input for ShardInput<'a, I> {
fn remaining_len(&mut self) -> Result<Option<usize>, parity_scale_codec::Error> {
Ok(Some(self.remaining_len))
}
fn read(&mut self, into: &mut [u8]) -> Result<(), parity_scale_codec::Error> {
let mut read_bytes = 0;
loop {
if read_bytes == into.len() {
break
}
let cur_shard = self.cur_shard.take().or_else(|| self.shards.next().map(|s| (s, 0)));
let (active_shard, mut in_shard) = match cur_shard {
Some((s, i)) => (s, i),
None => break,
};
if in_shard >= active_shard.len() {
continue
}
let remaining_len_out = into.len() - read_bytes;
let remaining_len_shard = active_shard.len() - in_shard;
let write_len = std::cmp::min(remaining_len_out, remaining_len_shard);
into[read_bytes..][..write_len].copy_from_slice(&active_shard[in_shard..][..write_len]);
in_shard += write_len;
read_bytes += write_len;
self.cur_shard = Some((active_shard, in_shard))
}
self.remaining_len -= read_bytes;
if read_bytes == into.len() {
Ok(())
} else {
Err("slice provided too big for input".into())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use polkadot_node_primitives::{AvailableData, BlockData, PoV};
// In order to adequately compute the number of entries in the Merkle
// trie, we must account for the fixed 16-ary trie structure.
const KEY_INDEX_NIBBLE_SIZE: usize = 4;
#[test]
fn field_order_is_right_size() {
assert_eq!(MAX_VALIDATORS, 65536);
}
#[test]
fn round_trip_works() {
let pov = PoV { block_data: BlockData((0..255).collect()) };
let available_data = AvailableData { pov: pov.into(), validation_data: Default::default() };
let chunks = obtain_chunks(10, &available_data).unwrap();
assert_eq!(chunks.len(), 10);
// any 4 chunks should work.
let reconstructed: AvailableData = reconstruct(
10,
[(&*chunks[1], 1), (&*chunks[4], 4), (&*chunks[6], 6), (&*chunks[9], 9)]
.iter()
.cloned(),
)
.unwrap();
assert_eq!(reconstructed, available_data);
}
#[test]
fn reconstruct_does_not_panic_on_low_validator_count() {
let reconstructed = reconstruct_v1(1, [].iter().cloned());
assert_eq!(reconstructed, Err(Error::NotEnoughValidators));
}
fn generate_trie_and_generate_proofs(magnitude: u32) {
let n_validators = 2_u32.pow(magnitude) as usize;
let pov = PoV { block_data: BlockData(vec![2; n_validators / KEY_INDEX_NIBBLE_SIZE]) };
let available_data = AvailableData { pov: pov.into(), validation_data: Default::default() };
let chunks = obtain_chunks(magnitude as usize, &available_data).unwrap();
assert_eq!(chunks.len() as u32, magnitude);
let branches = branches(chunks.as_ref());
let root = branches.root();
let proofs: Vec<_> = branches.map(|(proof, _)| proof).collect();
assert_eq!(proofs.len() as u32, magnitude);
for (i, proof) in proofs.into_iter().enumerate() {
let encode = Encode::encode(&proof);
let decode = Decode::decode(&mut &encode[..]).unwrap();
assert_eq!(proof, decode);
assert_eq!(encode, Encode::encode(&decode));
assert_eq!(branch_hash(&root, &proof, i).unwrap(), BlakeTwo256::hash(&chunks[i]));
}
}
#[test]
fn roundtrip_proof_encoding() {
for i in 2..16 {
generate_trie_and_generate_proofs(i);
}
}
}
+15
View File
@@ -0,0 +1,15 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
+93
View File
@@ -0,0 +1,93 @@
# Do I need this ?
Polkadot nodes collect and produce Prometheus metrics and logs. These include health, performance and debug
information such as last finalized block, height of the chain, and many other deeper implementation details
of the Polkadot/Substrate node subsystems. These are crucial pieces of information that one needs to successfully
monitor the liveliness and performance of a network and its validators.
# How does it work ?
Just import the dashboard JSON files from this folder in your Grafana installation. All dashboards are grouped in
folder percategory (like for example `parachains`). The files have been created by Grafana export functionality and
follow the data model specified [here](https://grafana.com/docs/grafana/latest/dashboards/json-model/).
We aim to keep the dashboards here in sync with the implementation, except dashboards for development and
testing.
# Contributing
**Your contributions are most welcome!**
Please make sure to follow the following design guidelines:
- Add a new entry in this file and describe the usecase and key metrics
- Ensure proper names and descriptions for dashboard panels and add relevant documentation when needed.
This is very important as not all users have similar depth of understanding of the implementation
- Have labels for axis
- All values have proper units of measurement
- A crisp and clear color scheme is used
# Prerequisites
Before you continue make sure you have Grafana set up, or otherwise follow this
[guide](https://wiki.polkadot.network/docs/maintain-guides-how-to-monitor-your-node).
You might also need to [setup Loki](https://grafana.com/go/webinar/loki-getting-started/).
# Alerting
Alerts are currently out of the scope of the dashboards, but their setup can be done manually or automated
(see [installing and configuring Alert Manager](https://wiki.polkadot.network/docs/maintain-guides-how-to-monitor-your-node#installing-and-configuring-alertmanager-optional))
# Dashboards
This section is a list of dashboards, their use case as well as the key metrics that are covered.
## Node Versions
Useful for monitoring versions and logs of validator nodes. Includes time series panels that
track node warning and error log rates. These can be further investigated in Grafana Loki.
Requires Loki for log aggregation and querying.
[Dashboard JSON](general/kusama_deployment.json)
## Parachain Status
This dashboard allows you to see at a glance how fast are candidates approved, disputed and
finalized. It was originally designed for observing liveliness after parachain deployment in
Kusama/Polkadot, but can be useful generally in production or testing.
It includes panels covering key subsystems of the parachain node side implementation:
- Backing
- PVF execution
- Approval voting
- Disputes coordinator
- Chain selection
It is important to note that this dashboard applies only for validator nodes. The prometheus
queries assume the `instance` label value contains the string `validator` only for validator nodes.
[Dashboard JSON](parachains/status.json)
### Key liveliness indicators
- **Relay chain finality lag**. How far behind finality is compared to the current best block. By design,
GRANDPA never finalizes past last 2 blocks, so this value is always >=2 blocks.
- **Approval checking finality lag**. The distance (in blocks) between the chain head and the last block
on which Approval voting is happening. The block is generally the highest approved ancestor of the head
block and the metric is computed during relay chain selection.
- **Disputes finality lag**. How far behind the chain head is the last approved and non disputed block.
This value is always higher than approval checking lag as it further restricts finality to only undisputed
chains.
- **PVF preparation and execution time**. Each parachain has it's own PVF (parachain validation function):
a wasm blob that is executed by validators during backing, approval checking and disputing. The PVF
preparation time refers to the time it takes for the PVF wasm to be compiled. This step is done once and
then result cached. PVF execution will use the resulting artifact to execute the PVF for a given candidate.
PVFs are expected to have a limited execution time to ensure there is enough time left for the parachain
block to be included in the relay block.
- **Time to recover and check candidate**. This is part of approval voting and covers the time it takes
to recover the candidate block available data from other validators, check it (includes PVF execution time)
and issue statement or initiate dispute.
- **Assignment delay tranches**. Approval voting is designed such that validators assigned to check a specific
candidate are split up into equal delay tranches (0.5 seconds each). All validators checks are ordered by the delay
tranche index. Early tranches of validators have the opportunity to check the candidate first before later tranches
that act as as backups in case of no shows.
@@ -0,0 +1,928 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"description": "Monitors deployed versions, warnings and errors logged.",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 1,
"id": 88,
"iteration": 1640613477360,
"links": [],
"liveNow": false,
"panels": [
{
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 36,
"title": "Overview",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "P5CA6DFE95AABF258"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 13,
"w": 7,
"x": 0,
"y": 1
},
"id": 22,
"options": {
"displayLabels": [
"name",
"percent"
],
"legend": {
"displayMode": "hidden",
"placement": "bottom",
"values": [
"percent"
]
},
"pieType": "donut",
"reduceOptions": {
"calcs": [],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single"
}
},
"pluginVersion": "8.1.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "P5CA6DFE95AABF258"
},
"exemplar": false,
"expr": "sort_desc (count by (version) (polkadot_build_info{chain=\"kusama\", instance=~\".*validator.*\"}))",
"instant": true,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ version }}",
"refId": "A"
}
],
"title": "Versions",
"type": "piechart"
},
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 13,
"w": 7,
"x": 7,
"y": 1
},
"id": 31,
"links": [],
"options": {
"displayLabels": [
"name",
"percent"
],
"legend": {
"displayMode": "hidden",
"placement": "bottom"
},
"pieType": "donut",
"reduceOptions": {
"calcs": [],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single"
}
},
"pluginVersion": "8.1.3",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"expr": "sum(count_over_time({host=~\"kusama-validator.*\", level=\"WARN\"} [1h])) by (target)",
"instant": false,
"legendFormat": "{{ target }}",
"range": true,
"refId": "A"
}
],
"title": "Warnings / h / target",
"transformations": [],
"type": "piechart"
},
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 13,
"w": 7,
"x": 14,
"y": 1
},
"id": 33,
"links": [],
"options": {
"displayLabels": [
"name",
"percent"
],
"legend": {
"displayMode": "list",
"placement": "bottom"
},
"pieType": "donut",
"reduceOptions": {
"calcs": [],
"fields": "",
"values": false
},
"tooltip": {
"mode": "single"
}
},
"pluginVersion": "8.1.3",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"expr": "sum(count_over_time({host=~\"kusama-validator.*\", level=\"ERROR\"} [1h])) by (target)",
"instant": false,
"legendFormat": "{{ target }}",
"range": true,
"refId": "A"
}
],
"title": "Errors / h / target",
"transformations": [],
"type": "piechart"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 14
},
"id": 16,
"panels": [],
"title": "Validator versions",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "P5CA6DFE95AABF258"
},
"description": "Version information for all nodes.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "continuous-RdYlGr"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "red",
"value": null
},
{
"color": "green",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 14,
"x": 0,
"y": 15
},
"id": 13,
"options": {
"displayMode": "lcd",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showUnfilled": true,
"text": {
"titleSize": 12,
"valueSize": 22
}
},
"pluginVersion": "8.3.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "P5CA6DFE95AABF258"
},
"exemplar": false,
"expr": "sort_desc (count by (version) (polkadot_build_info{chain=\"kusama\", instance=~\".*validator.*\"}))",
"instant": true,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ version }}",
"refId": "A"
}
],
"title": "Versions",
"type": "bargauge"
},
{
"datasource": {
"type": "prometheus",
"uid": "P5CA6DFE95AABF258"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"align": "auto",
"displayMode": "auto"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Time"
},
"properties": [
{
"id": "custom.width",
"value": 232
}
]
},
{
"matcher": {
"id": "byName",
"options": "name"
},
"properties": [
{
"id": "custom.width",
"value": 220
}
]
}
]
},
"gridPos": {
"h": 12,
"w": 10,
"x": 14,
"y": 15
},
"id": 20,
"options": {
"footer": {
"fields": "",
"reducer": [
"sum"
],
"show": false
},
"frameIndex": 2,
"showHeader": true,
"sortBy": [
{
"desc": false,
"displayName": "version (lastNotNull)"
}
]
},
"pluginVersion": "8.3.3",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "P5CA6DFE95AABF258"
},
"exemplar": false,
"expr": "polkadot_build_info{instance=~\".*validator.*\",chain=\"kusama\"}",
"format": "table",
"instant": false,
"interval": "",
"legendFormat": "",
"refId": "A"
}
],
"title": "Node versions ",
"transformations": [
{
"id": "groupBy",
"options": {
"fields": {
"name": {
"aggregations": [],
"operation": "groupby"
},
"version": {
"aggregations": [
"lastNotNull"
],
"operation": "aggregate"
}
}
}
}
],
"type": "table"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 27
},
"id": 18,
"panels": [],
"title": "Warnings and errors",
"type": "row"
},
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic",
"seriesBy": "last"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "hue",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "orange",
"value": 500
},
{
"color": "red",
"value": 1000
}
]
},
"unit": "warnings/h"
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 12,
"x": 0,
"y": 28
},
"id": 8,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
},
"pluginVersion": "8.1.3",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"expr": "sum(count_over_time({host=~\"kusama-validator.*\", level=\"WARN\"} [1h])) by (target)",
"instant": false,
"legendFormat": "{{target}}",
"range": true,
"refId": "A"
}
],
"title": "All warnings / hour",
"transformations": [],
"type": "timeseries"
},
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic",
"seriesBy": "last"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "hue",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "#EAB839",
"value": 500
},
{
"color": "red",
"value": 1000
}
]
},
"unit": "warnings/h"
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 12,
"x": 12,
"y": 28
},
"id": 30,
"links": [],
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
},
"pluginVersion": "8.1.3",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"expr": "sum(count_over_time({host=~\"kusama-validator.*\", level=\"WARN\", target=\"parachain\"}[1h])) by (subtarget)",
"hide": false,
"instant": false,
"legendFormat": "{{subtarget}}",
"range": true,
"refId": "B"
}
],
"title": "Parachain warnings/hour",
"transformations": [],
"type": "timeseries"
},
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic",
"seriesBy": "last"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "hue",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": true,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "dark-red",
"value": 3
}
]
},
"unit": "warnings/h"
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 12,
"x": 0,
"y": 40
},
"id": 34,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
},
"pluginVersion": "8.1.3",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"expr": "sum(count_over_time({host=~\"kusama-validator.*\", level=\"ERROR\"} [1h])) by (target)",
"instant": false,
"legendFormat": "{{target}}",
"range": true,
"refId": "A"
}
],
"title": "All errors / hour",
"transformations": [],
"type": "timeseries"
},
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic",
"seriesBy": "last"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "hue",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "line"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "#EAB839",
"value": 5
},
{
"color": "red",
"value": 15
}
]
},
"unit": "warnings/h"
},
"overrides": []
},
"gridPos": {
"h": 12,
"w": 12,
"x": 12,
"y": 40
},
"id": 32,
"links": [],
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "multi"
}
},
"pluginVersion": "8.1.3",
"targets": [
{
"datasource": {
"type": "loki",
"uid": "P367D1C7027A603FA"
},
"expr": "sum(count_over_time({host=~\"kusama-validator.*\", level=\"ERROR\", target=\"parachain\"}[1h])) by (subtarget)",
"hide": false,
"instant": false,
"legendFormat": "{{subtarget}}",
"range": true,
"refId": "B"
}
],
"title": "Parachain errors/hour",
"transformations": [],
"type": "timeseries"
}
],
"refresh": "15m",
"schemaVersion": 34,
"style": "dark",
"tags": [
"Kusama",
"Loki",
"Logs"
],
"templating": {
"list": [
{
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": {
"type": "prometheus",
"uid": "P5CA6DFE95AABF258"
},
"definition": "polkadot_build_info{chain=\"$chain\"}",
"description": "Version of the node",
"hide": 0,
"includeAll": true,
"label": "Version",
"multi": true,
"name": "version",
"options": [],
"query": {
"query": "polkadot_build_info{chain=\"$chain\"}",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": ".*version=\"(.*?)\".*",
"skipUrlSync": false,
"sort": 5,
"type": "query"
},
{
"current": {
"selected": true,
"text": [
"All"
],
"value": [
"$__all"
]
},
"datasource": {
"type": "prometheus",
"uid": "P5CA6DFE95AABF258"
},
"definition": "polkadot_sync_peers{chain=\"$chain\"}",
"description": "Validator hosts",
"hide": 0,
"includeAll": true,
"label": "Instance",
"multi": true,
"name": "instance",
"options": [],
"query": {
"query": "polkadot_sync_peers{chain=\"$chain\"}",
"refId": "StandardVariableQuery"
},
"refresh": 1,
"regex": ".*instance=\"(.*validator.*)*",
"skipUrlSync": false,
"sort": 0,
"type": "query"
}
]
},
"time": {
"from": "now-7d",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Kusama Validators Overview",
"uid": "0i-QjQ82j",
"version": 29,
"weekStart": ""
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,25 @@
[package]
name = "polkadot-node-collation-generation"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
futures = "0.3.21"
gum = { package = "tracing-gum", path = "../gum" }
polkadot-erasure-coding = { path = "../../erasure-coding" }
polkadot-node-primitives = { path = "../primitives" }
polkadot-node-subsystem = { path = "../subsystem" }
polkadot-node-subsystem-util = { path = "../subsystem-util" }
polkadot-primitives = { path = "../../primitives" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-maybe-compressed-blob = { git = "https://github.com/paritytech/substrate", branch = "master" }
thiserror = "1.0.31"
parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] }
[dev-dependencies]
polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" }
test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" }
assert_matches = "1.4.0"
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -0,0 +1,33 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error {
#[error(transparent)]
Subsystem(#[from] polkadot_node_subsystem::SubsystemError),
#[error(transparent)]
OneshotRecv(#[from] futures::channel::oneshot::Canceled),
#[error(transparent)]
Runtime(#[from] polkadot_node_subsystem::errors::RuntimeApiError),
#[error(transparent)]
Util(#[from] polkadot_node_subsystem_util::Error),
#[error(transparent)]
Erasure(#[from] polkadot_erasure_coding::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
@@ -0,0 +1,591 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! The collation generation subsystem is the interface between polkadot and the collators.
//!
//! # Protocol
//!
//! On every `ActiveLeavesUpdate`:
//!
//! * If there is no collation generation config, ignore.
//! * Otherwise, for each `activated` head in the update:
//! * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime
//! API.
//! * Use the Runtime API subsystem to fetch the full validation data.
//! * Invoke the `collator`, and use its outputs to produce a [`CandidateReceipt`], signed with
//! the configuration's `key`.
//! * Dispatch a [`CollatorProtocolMessage::DistributeCollation`]`(receipt, pov)`.
#![deny(missing_docs)]
use futures::{channel::oneshot, future::FutureExt, join, select};
use parity_scale_codec::Encode;
use polkadot_node_primitives::{
AvailableData, Collation, CollationGenerationConfig, CollationSecondedSignal, PoV,
SubmitCollationParams,
};
use polkadot_node_subsystem::{
messages::{CollationGenerationMessage, CollatorProtocolMessage},
overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem,
SubsystemContext, SubsystemError, SubsystemResult,
};
use polkadot_node_subsystem_util::{
request_availability_cores, request_persisted_validation_data,
request_staging_async_backing_params, request_validation_code, request_validation_code_hash,
request_validators,
};
use polkadot_primitives::{
collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt,
CollatorPair, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData,
ValidationCodeHash,
};
use sp_core::crypto::Pair;
use std::sync::Arc;
mod error;
#[cfg(test)]
mod tests;
mod metrics;
use self::metrics::Metrics;
const LOG_TARGET: &'static str = "parachain::collation-generation";
/// Collation Generation Subsystem
pub struct CollationGenerationSubsystem {
config: Option<Arc<CollationGenerationConfig>>,
metrics: Metrics,
}
#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)]
impl CollationGenerationSubsystem {
/// Create a new instance of the `CollationGenerationSubsystem`.
pub fn new(metrics: Metrics) -> Self {
Self { config: None, metrics }
}
/// Run this subsystem
///
/// Conceptually, this is very simple: it just loops forever.
///
/// - On incoming overseer messages, it starts or stops jobs as appropriate.
/// - On other incoming messages, if they can be converted into `Job::ToJob` and include a hash,
/// then they're forwarded to the appropriate individual job.
/// - On outgoing messages from the jobs, it forwards them to the overseer.
///
/// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur.
/// Otherwise, most are logged and then discarded.
async fn run<Context>(mut self, mut ctx: Context) {
loop {
select! {
incoming = ctx.recv().fuse() => {
if self.handle_incoming::<Context>(incoming, &mut ctx).await {
break;
}
},
}
}
}
// handle an incoming message. return true if we should break afterwards.
// note: this doesn't strictly need to be a separate function; it's more an administrative
// function so that we don't clutter the run loop. It could in principle be inlined directly
// into there. it should hopefully therefore be ok that it's an async function mutably borrowing
// self.
async fn handle_incoming<Context>(
&mut self,
incoming: SubsystemResult<FromOrchestra<<Context as SubsystemContext>::Message>>,
ctx: &mut Context,
) -> bool {
match incoming {
Ok(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
activated,
..
}))) => {
// follow the procedure from the guide
if let Some(config) = &self.config {
let metrics = self.metrics.clone();
if let Err(err) = handle_new_activations(
config.clone(),
activated.into_iter().map(|v| v.hash),
ctx,
metrics,
)
.await
{
gum::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations");
}
}
false
},
Ok(FromOrchestra::Signal(OverseerSignal::Conclude)) => true,
Ok(FromOrchestra::Communication {
msg: CollationGenerationMessage::Initialize(config),
}) => {
if self.config.is_some() {
gum::error!(target: LOG_TARGET, "double initialization");
} else {
self.config = Some(Arc::new(config));
}
false
},
Ok(FromOrchestra::Communication {
msg: CollationGenerationMessage::SubmitCollation(params),
}) => {
if let Some(config) = &self.config {
if let Err(err) =
handle_submit_collation(params, config, ctx, &self.metrics).await
{
gum::error!(target: LOG_TARGET, ?err, "Failed to submit collation");
}
} else {
gum::error!(target: LOG_TARGET, "Collation submitted before initialization");
}
false
},
Ok(FromOrchestra::Signal(OverseerSignal::BlockFinalized(..))) => false,
Err(err) => {
gum::error!(
target: LOG_TARGET,
err = ?err,
"error receiving message from subsystem context: {:?}",
err
);
true
},
}
}
}
#[overseer::subsystem(CollationGeneration, error=SubsystemError, prefix=self::overseer)]
impl<Context> CollationGenerationSubsystem {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = async move {
self.run(ctx).await;
Ok(())
}
.boxed();
SpawnedSubsystem { name: "collation-generation-subsystem", future }
}
}
#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)]
async fn handle_new_activations<Context>(
config: Arc<CollationGenerationConfig>,
activated: impl IntoIterator<Item = Hash>,
ctx: &mut Context,
metrics: Metrics,
) -> crate::error::Result<()> {
// follow the procedure from the guide:
// https://paritytech.github.io/polkadot/book/node/collators/collation-generation.html
if config.collator.is_none() {
return Ok(())
}
let _overall_timer = metrics.time_new_activations();
for relay_parent in activated {
let _relay_parent_timer = metrics.time_new_activations_relay_parent();
let (availability_cores, validators, async_backing_params) = join!(
request_availability_cores(relay_parent, ctx.sender()).await,
request_validators(relay_parent, ctx.sender()).await,
request_staging_async_backing_params(relay_parent, ctx.sender()).await,
);
let availability_cores = availability_cores??;
let n_validators = validators??.len();
let async_backing_params = async_backing_params?.ok();
for (core_idx, core) in availability_cores.into_iter().enumerate() {
let _availability_core_timer = metrics.time_new_activations_availability_core();
let (scheduled_core, assumption) = match core {
CoreState::Scheduled(scheduled_core) =>
(scheduled_core, OccupiedCoreAssumption::Free),
CoreState::Occupied(occupied_core) => match async_backing_params {
Some(params) if params.max_candidate_depth >= 1 => {
// maximum candidate depth when building on top of a block
// pending availability is necessarily 1 - the depth of the
// pending block is 0 so the child has depth 1.
// TODO [now]: this assumes that next up == current.
// in practice we should only set `OccupiedCoreAssumption::Included`
// when the candidate occupying the core is also of the same para.
if let Some(scheduled) = occupied_core.next_up_on_available {
(scheduled, OccupiedCoreAssumption::Included)
} else {
continue
}
},
_ => {
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
relay_parent = ?relay_parent,
"core is occupied. Keep going.",
);
continue
},
},
CoreState::Free => {
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
"core is free. Keep going.",
);
continue
},
};
if scheduled_core.para_id != config.para_id {
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
relay_parent = ?relay_parent,
our_para = %config.para_id,
their_para = %scheduled_core.para_id,
"core is not assigned to our para. Keep going.",
);
continue
}
// we get validation data and validation code synchronously for each core instead of
// within the subtask loop, because we have only a single mutable handle to the
// context, so the work can't really be distributed
let validation_data = match request_persisted_validation_data(
relay_parent,
scheduled_core.para_id,
assumption,
ctx.sender(),
)
.await
.await??
{
Some(v) => v,
None => {
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
relay_parent = ?relay_parent,
our_para = %config.para_id,
their_para = %scheduled_core.para_id,
"validation data is not available",
);
continue
},
};
let validation_code_hash = match obtain_validation_code_hash_with_assumption(
relay_parent,
scheduled_core.para_id,
assumption,
ctx.sender(),
)
.await?
{
Some(v) => v,
None => {
gum::trace!(
target: LOG_TARGET,
core_idx = %core_idx,
relay_parent = ?relay_parent,
our_para = %config.para_id,
their_para = %scheduled_core.para_id,
"validation code hash is not found.",
);
continue
},
};
let task_config = config.clone();
let metrics = metrics.clone();
let mut task_sender = ctx.sender().clone();
ctx.spawn(
"collation-builder",
Box::pin(async move {
let collator_fn = match task_config.collator.as_ref() {
Some(x) => x,
None => return,
};
let (collation, result_sender) =
match collator_fn(relay_parent, &validation_data).await {
Some(collation) => collation.into_inner(),
None => {
gum::debug!(
target: LOG_TARGET,
para_id = %scheduled_core.para_id,
"collator returned no collation on collate",
);
return
},
};
construct_and_distribute_receipt(
PreparedCollation {
collation,
para_id: scheduled_core.para_id,
relay_parent,
validation_data,
validation_code_hash,
n_validators,
},
task_config.key.clone(),
&mut task_sender,
result_sender,
&metrics,
)
.await;
}),
)?;
}
}
Ok(())
}
#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)]
async fn handle_submit_collation<Context>(
params: SubmitCollationParams,
config: &CollationGenerationConfig,
ctx: &mut Context,
metrics: &Metrics,
) -> crate::error::Result<()> {
let _timer = metrics.time_submit_collation();
let SubmitCollationParams {
relay_parent,
collation,
parent_head,
validation_code_hash,
result_sender,
} = params;
let validators = request_validators(relay_parent, ctx.sender()).await.await??;
let n_validators = validators.len();
// We need to swap the parent-head data, but all other fields here will be correct.
let mut validation_data = match request_persisted_validation_data(
relay_parent,
config.para_id,
OccupiedCoreAssumption::TimedOut,
ctx.sender(),
)
.await
.await??
{
Some(v) => v,
None => {
gum::debug!(
target: LOG_TARGET,
relay_parent = ?relay_parent,
our_para = %config.para_id,
"No validation data for para - does it exist at this relay-parent?",
);
return Ok(())
},
};
validation_data.parent_head = parent_head;
let collation = PreparedCollation {
collation,
relay_parent,
para_id: config.para_id,
validation_data,
validation_code_hash,
n_validators,
};
construct_and_distribute_receipt(
collation,
config.key.clone(),
ctx.sender(),
result_sender,
metrics,
)
.await;
Ok(())
}
struct PreparedCollation {
collation: Collation,
para_id: ParaId,
relay_parent: Hash,
validation_data: PersistedValidationData,
validation_code_hash: ValidationCodeHash,
n_validators: usize,
}
/// Takes a prepared collation, along with its context, and produces a candidate receipt
/// which is distributed to validators.
async fn construct_and_distribute_receipt(
collation: PreparedCollation,
key: CollatorPair,
sender: &mut impl overseer::CollationGenerationSenderTrait,
result_sender: Option<oneshot::Sender<CollationSecondedSignal>>,
metrics: &Metrics,
) {
let PreparedCollation {
collation,
para_id,
relay_parent,
validation_data,
validation_code_hash,
n_validators,
} = collation;
let persisted_validation_data_hash = validation_data.hash();
let parent_head_data_hash = validation_data.parent_head.hash();
// Apply compression to the block data.
let pov = {
let pov = collation.proof_of_validity.into_compressed();
let encoded_size = pov.encoded_size();
// As long as `POV_BOMB_LIMIT` is at least `max_pov_size`, this ensures
// that honest collators never produce a PoV which is uncompressed.
//
// As such, honest collators never produce an uncompressed PoV which starts with
// a compression magic number, which would lead validators to reject the collation.
if encoded_size > validation_data.max_pov_size as usize {
gum::debug!(
target: LOG_TARGET,
para_id = %para_id,
size = encoded_size,
max_size = validation_data.max_pov_size,
"PoV exceeded maximum size"
);
return
}
pov
};
let pov_hash = pov.hash();
let signature_payload = collator_signature_payload(
&relay_parent,
&para_id,
&persisted_validation_data_hash,
&pov_hash,
&validation_code_hash,
);
let erasure_root = match erasure_root(n_validators, validation_data, pov.clone()) {
Ok(erasure_root) => erasure_root,
Err(err) => {
gum::error!(
target: LOG_TARGET,
para_id = %para_id,
err = ?err,
"failed to calculate erasure root",
);
return
},
};
let commitments = CandidateCommitments {
upward_messages: collation.upward_messages,
horizontal_messages: collation.horizontal_messages,
new_validation_code: collation.new_validation_code,
head_data: collation.head_data,
processed_downward_messages: collation.processed_downward_messages,
hrmp_watermark: collation.hrmp_watermark,
};
let ccr = CandidateReceipt {
commitments_hash: commitments.hash(),
descriptor: CandidateDescriptor {
signature: key.sign(&signature_payload),
para_id,
relay_parent,
collator: key.public(),
persisted_validation_data_hash,
pov_hash,
erasure_root,
para_head: commitments.head_data.hash(),
validation_code_hash,
},
};
gum::debug!(
target: LOG_TARGET,
candidate_hash = ?ccr.hash(),
?pov_hash,
?relay_parent,
para_id = %para_id,
"candidate is generated",
);
metrics.on_collation_generated();
sender
.send_message(CollatorProtocolMessage::DistributeCollation(
ccr,
parent_head_data_hash,
pov,
result_sender,
))
.await;
}
async fn obtain_validation_code_hash_with_assumption(
relay_parent: Hash,
para_id: ParaId,
assumption: OccupiedCoreAssumption,
sender: &mut impl overseer::CollationGenerationSenderTrait,
) -> crate::error::Result<Option<ValidationCodeHash>> {
match request_validation_code_hash(relay_parent, para_id, assumption, sender)
.await
.await?
{
Ok(Some(v)) => Ok(Some(v)),
Ok(None) => Ok(None),
Err(RuntimeApiError::NotSupported { .. }) => {
match request_validation_code(relay_parent, para_id, assumption, sender).await.await? {
Ok(Some(v)) => Ok(Some(v.hash())),
Ok(None) => Ok(None),
Err(e) => {
// We assume that the `validation_code` API is always available, so any error
// is unexpected.
Err(e.into())
},
}
},
Err(e @ RuntimeApiError::Execution { .. }) => Err(e.into()),
}
}
fn erasure_root(
n_validators: usize,
persisted_validation: PersistedValidationData,
pov: PoV,
) -> crate::error::Result<Hash> {
let available_data =
AvailableData { validation_data: persisted_validation, pov: Arc::new(pov) };
let chunks = polkadot_erasure_coding::obtain_chunks_v1(n_validators, &available_data)?;
Ok(polkadot_erasure_coding::branches(&chunks).root())
}
@@ -0,0 +1,117 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use polkadot_node_subsystem_util::metrics::{self, prometheus};
#[derive(Clone)]
pub(crate) struct MetricsInner {
pub(crate) collations_generated_total: prometheus::Counter<prometheus::U64>,
pub(crate) new_activations_overall: prometheus::Histogram,
pub(crate) new_activations_per_relay_parent: prometheus::Histogram,
pub(crate) new_activations_per_availability_core: prometheus::Histogram,
pub(crate) submit_collation: prometheus::Histogram,
}
/// `CollationGenerationSubsystem` metrics.
#[derive(Default, Clone)]
pub struct Metrics(pub(crate) Option<MetricsInner>);
impl Metrics {
pub fn on_collation_generated(&self) {
if let Some(metrics) = &self.0 {
metrics.collations_generated_total.inc();
}
}
/// Provide a timer for new activations which updates on drop.
pub fn time_new_activations(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.new_activations_overall.start_timer())
}
/// Provide a timer per relay parents which updates on drop.
pub fn time_new_activations_relay_parent(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0
.as_ref()
.map(|metrics| metrics.new_activations_per_relay_parent.start_timer())
}
/// Provide a timer per availability core which updates on drop.
pub fn time_new_activations_availability_core(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0
.as_ref()
.map(|metrics| metrics.new_activations_per_availability_core.start_timer())
}
/// Provide a timer for submitting a collation which updates on drop.
pub fn time_submit_collation(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.submit_collation.start_timer())
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
collations_generated_total: prometheus::register(
prometheus::Counter::new(
"polkadot_parachain_collations_generated_total",
"Number of collations generated."
)?,
registry,
)?,
new_activations_overall: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"polkadot_parachain_collation_generation_new_activations",
"Time spent within fn handle_new_activations",
)
)?,
registry,
)?,
new_activations_per_relay_parent: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"polkadot_parachain_collation_generation_per_relay_parent",
"Time spent handling a particular relay parent within fn handle_new_activations"
)
)?,
registry,
)?,
new_activations_per_availability_core: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"polkadot_parachain_collation_generation_per_availability_core",
"Time spent handling a particular availability core for a relay parent in fn handle_new_activations",
)
)?,
registry,
)?,
submit_collation: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"polkadot_parachain_collation_generation_submit_collation",
"Time spent preparing and submitting a collation to the network protocol",
)
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
@@ -0,0 +1,636 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use super::*;
use assert_matches::assert_matches;
use futures::{
lock::Mutex,
task::{Context as FuturesContext, Poll},
Future,
};
use polkadot_node_primitives::{BlockData, Collation, CollationResult, MaybeCompressedPoV, PoV};
use polkadot_node_subsystem::{
errors::RuntimeApiError,
messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest},
};
use polkadot_node_subsystem_test_helpers::{subsystem_test_harness, TestSubsystemContextHandle};
use polkadot_node_subsystem_util::TimeoutExt;
use polkadot_primitives::{
CollatorPair, HeadData, Id as ParaId, PersistedValidationData, ScheduledCore, ValidationCode,
};
use sp_keyring::sr25519::Keyring as Sr25519Keyring;
use std::pin::Pin;
use test_helpers::{dummy_hash, dummy_head_data, dummy_validator};
type VirtualOverseer = TestSubsystemContextHandle<CollationGenerationMessage>;
fn test_harness<T: Future<Output = VirtualOverseer>>(test: impl FnOnce(VirtualOverseer) -> T) {
let pool = sp_core::testing::TaskExecutor::new();
let (context, virtual_overseer) =
polkadot_node_subsystem_test_helpers::make_subsystem_context(pool);
let subsystem = async move {
let subsystem = crate::CollationGenerationSubsystem::new(Metrics::default());
subsystem.run(context).await;
};
let test_fut = test(virtual_overseer);
futures::pin_mut!(test_fut);
futures::executor::block_on(futures::future::join(
async move {
let mut virtual_overseer = test_fut.await;
// Ensure we have handled all responses.
if let Ok(Some(msg)) = virtual_overseer.rx.try_next() {
panic!("Did not handle all responses: {:?}", msg);
}
// Conclude.
virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
},
subsystem,
));
}
fn test_collation() -> Collation {
Collation {
upward_messages: Default::default(),
horizontal_messages: Default::default(),
new_validation_code: None,
head_data: dummy_head_data(),
proof_of_validity: MaybeCompressedPoV::Raw(PoV { block_data: BlockData(Vec::new()) }),
processed_downward_messages: 0_u32,
hrmp_watermark: 0_u32.into(),
}
}
fn test_collation_compressed() -> Collation {
let mut collation = test_collation();
let compressed = collation.proof_of_validity.clone().into_compressed();
collation.proof_of_validity = MaybeCompressedPoV::Compressed(compressed);
collation
}
fn test_validation_data() -> PersistedValidationData {
let mut persisted_validation_data = PersistedValidationData::default();
persisted_validation_data.max_pov_size = 1024;
persisted_validation_data
}
// Box<dyn Future<Output = Collation> + Unpin + Send
struct TestCollator;
impl Future for TestCollator {
type Output = Option<CollationResult>;
fn poll(self: Pin<&mut Self>, _cx: &mut FuturesContext) -> Poll<Self::Output> {
Poll::Ready(Some(CollationResult { collation: test_collation(), result_sender: None }))
}
}
impl Unpin for TestCollator {}
async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000);
overseer
.recv()
.timeout(TIMEOUT)
.await
.expect(&format!("{:?} is long enough to receive messages", TIMEOUT))
}
fn test_config<Id: Into<ParaId>>(para_id: Id) -> CollationGenerationConfig {
CollationGenerationConfig {
key: CollatorPair::generate().0,
collator: Some(Box::new(|_: Hash, _vd: &PersistedValidationData| TestCollator.boxed())),
para_id: para_id.into(),
}
}
fn test_config_no_collator<Id: Into<ParaId>>(para_id: Id) -> CollationGenerationConfig {
CollationGenerationConfig {
key: CollatorPair::generate().0,
collator: None,
para_id: para_id.into(),
}
}
fn scheduled_core_for<Id: Into<ParaId>>(para_id: Id) -> ScheduledCore {
ScheduledCore { para_id: para_id.into(), collator: None }
}
#[test]
fn requests_availability_per_relay_parent() {
let activated_hashes: Vec<Hash> =
vec![[1; 32].into(), [4; 32].into(), [9; 32].into(), [16; 32].into()];
let requested_availability_cores = Arc::new(Mutex::new(Vec::new()));
let overseer_requested_availability_cores = requested_availability_cores.clone();
let overseer = |mut handle: TestSubsystemContextHandle<CollationGenerationMessage>| async move {
loop {
match handle.try_recv().await {
None => break,
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::AvailabilityCores(tx)))) => {
overseer_requested_availability_cores.lock().await.push(hash);
tx.send(Ok(vec![])).unwrap();
}
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(_hash, RuntimeApiRequest::Validators(tx)))) => {
tx.send(Ok(vec![dummy_validator(); 3])).unwrap();
}
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::StagingAsyncBackingParams(
tx,
),
))) => {
tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter" })).unwrap();
},
Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg),
}
}
};
let subsystem_activated_hashes = activated_hashes.clone();
subsystem_test_harness(overseer, |mut ctx| async move {
handle_new_activations(
Arc::new(test_config(123u32)),
subsystem_activated_hashes,
&mut ctx,
Metrics(None),
)
.await
.unwrap();
});
let mut requested_availability_cores = Arc::try_unwrap(requested_availability_cores)
.expect("overseer should have shut down by now")
.into_inner();
requested_availability_cores.sort();
assert_eq!(requested_availability_cores, activated_hashes);
}
#[test]
fn requests_validation_data_for_scheduled_matches() {
let activated_hashes: Vec<Hash> = vec![
Hash::repeat_byte(1),
Hash::repeat_byte(4),
Hash::repeat_byte(9),
Hash::repeat_byte(16),
];
let requested_validation_data = Arc::new(Mutex::new(Vec::new()));
let overseer_requested_validation_data = requested_validation_data.clone();
let overseer = |mut handle: TestSubsystemContextHandle<CollationGenerationMessage>| async move {
loop {
match handle.try_recv().await {
None => break,
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
hash,
RuntimeApiRequest::AvailabilityCores(tx),
))) => {
tx.send(Ok(vec![
CoreState::Free,
// this is weird, see explanation below
CoreState::Scheduled(scheduled_core_for(
(hash.as_fixed_bytes()[0] * 4) as u32,
)),
CoreState::Scheduled(scheduled_core_for(
(hash.as_fixed_bytes()[0] * 5) as u32,
)),
]))
.unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
hash,
RuntimeApiRequest::PersistedValidationData(
_para_id,
_occupied_core_assumption,
tx,
),
))) => {
overseer_requested_validation_data.lock().await.push(hash);
tx.send(Ok(None)).unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::Validators(tx),
))) => {
tx.send(Ok(vec![dummy_validator(); 3])).unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::StagingAsyncBackingParams(tx),
))) => {
tx.send(Err(RuntimeApiError::NotSupported {
runtime_api_name: "doesnt_matter",
}))
.unwrap();
},
Some(msg) => {
panic!("didn't expect any other overseer requests; got {:?}", msg)
},
}
}
};
subsystem_test_harness(overseer, |mut ctx| async move {
handle_new_activations(
Arc::new(test_config(16)),
activated_hashes,
&mut ctx,
Metrics(None),
)
.await
.unwrap();
});
let requested_validation_data = Arc::try_unwrap(requested_validation_data)
.expect("overseer should have shut down by now")
.into_inner();
// the only activated hash should be from the 4 hash:
// each activated hash generates two scheduled cores: one with its value * 4, one with its value
// * 5 given that the test configuration has a `para_id` of 16, there's only one way to get that
// value: with the 4 hash.
assert_eq!(requested_validation_data, vec![[4; 32].into()]);
}
#[test]
fn sends_distribute_collation_message() {
let activated_hashes: Vec<Hash> = vec![
Hash::repeat_byte(1),
Hash::repeat_byte(4),
Hash::repeat_byte(9),
Hash::repeat_byte(16),
];
// empty vec doesn't allocate on the heap, so it's ok we throw it away
let to_collator_protocol = Arc::new(Mutex::new(Vec::new()));
let inner_to_collator_protocol = to_collator_protocol.clone();
let overseer = |mut handle: TestSubsystemContextHandle<CollationGenerationMessage>| async move {
loop {
match handle.try_recv().await {
None => break,
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
hash,
RuntimeApiRequest::AvailabilityCores(tx),
))) => {
tx.send(Ok(vec![
CoreState::Free,
// this is weird, see explanation below
CoreState::Scheduled(scheduled_core_for(
(hash.as_fixed_bytes()[0] * 4) as u32,
)),
CoreState::Scheduled(scheduled_core_for(
(hash.as_fixed_bytes()[0] * 5) as u32,
)),
]))
.unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::PersistedValidationData(
_para_id,
_occupied_core_assumption,
tx,
),
))) => {
tx.send(Ok(Some(test_validation_data()))).unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::Validators(tx),
))) => {
tx.send(Ok(vec![dummy_validator(); 3])).unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::ValidationCodeHash(
_para_id,
OccupiedCoreAssumption::Free,
tx,
),
))) => {
tx.send(Ok(Some(ValidationCode(vec![1, 2, 3]).hash()))).unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::StagingAsyncBackingParams(tx),
))) => {
tx.send(Err(RuntimeApiError::NotSupported {
runtime_api_name: "doesnt_matter",
}))
.unwrap();
},
Some(msg @ AllMessages::CollatorProtocol(_)) => {
inner_to_collator_protocol.lock().await.push(msg);
},
Some(msg) => {
panic!("didn't expect any other overseer requests; got {:?}", msg)
},
}
}
};
let config = Arc::new(test_config(16));
let subsystem_config = config.clone();
subsystem_test_harness(overseer, |mut ctx| async move {
handle_new_activations(subsystem_config, activated_hashes, &mut ctx, Metrics(None))
.await
.unwrap();
});
let mut to_collator_protocol = Arc::try_unwrap(to_collator_protocol)
.expect("subsystem should have shut down by now")
.into_inner();
// we expect a single message to be sent, containing a candidate receipt.
// we don't care too much about the `commitments_hash` right now, but let's ensure that we've
// calculated the correct descriptor
let expect_pov_hash = test_collation_compressed().proof_of_validity.into_compressed().hash();
let expect_validation_data_hash = test_validation_data().hash();
let expect_relay_parent = Hash::repeat_byte(4);
let expect_validation_code_hash = ValidationCode(vec![1, 2, 3]).hash();
let expect_payload = collator_signature_payload(
&expect_relay_parent,
&config.para_id,
&expect_validation_data_hash,
&expect_pov_hash,
&expect_validation_code_hash,
);
let expect_descriptor = CandidateDescriptor {
signature: config.key.sign(&expect_payload),
para_id: config.para_id,
relay_parent: expect_relay_parent,
collator: config.key.public(),
persisted_validation_data_hash: expect_validation_data_hash,
pov_hash: expect_pov_hash,
erasure_root: dummy_hash(), // this isn't something we're checking right now
para_head: test_collation().head_data.hash(),
validation_code_hash: expect_validation_code_hash,
};
assert_eq!(to_collator_protocol.len(), 1);
match AllMessages::from(to_collator_protocol.pop().unwrap()) {
AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation(
CandidateReceipt { descriptor, .. },
_pov,
..,
)) => {
// signature generation is non-deterministic, so we can't just assert that the
// expected descriptor is correct. What we can do is validate that the produced
// descriptor has a valid signature, then just copy in the generated signature
// and check the rest of the fields for equality.
assert!(CollatorPair::verify(
&descriptor.signature,
&collator_signature_payload(
&descriptor.relay_parent,
&descriptor.para_id,
&descriptor.persisted_validation_data_hash,
&descriptor.pov_hash,
&descriptor.validation_code_hash,
)
.as_ref(),
&descriptor.collator,
));
let expect_descriptor = {
let mut expect_descriptor = expect_descriptor;
expect_descriptor.signature = descriptor.signature.clone();
expect_descriptor.erasure_root = descriptor.erasure_root;
expect_descriptor
};
assert_eq!(descriptor, expect_descriptor);
},
_ => panic!("received wrong message type"),
}
}
#[test]
fn fallback_when_no_validation_code_hash_api() {
// This is a variant of the above test, but with the validation code hash API disabled.
let activated_hashes: Vec<Hash> = vec![
Hash::repeat_byte(1),
Hash::repeat_byte(4),
Hash::repeat_byte(9),
Hash::repeat_byte(16),
];
// empty vec doesn't allocate on the heap, so it's ok we throw it away
let to_collator_protocol = Arc::new(Mutex::new(Vec::new()));
let inner_to_collator_protocol = to_collator_protocol.clone();
let overseer = |mut handle: TestSubsystemContextHandle<CollationGenerationMessage>| async move {
loop {
match handle.try_recv().await {
None => break,
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
hash,
RuntimeApiRequest::AvailabilityCores(tx),
))) => {
tx.send(Ok(vec![
CoreState::Free,
CoreState::Scheduled(scheduled_core_for(
(hash.as_fixed_bytes()[0] * 4) as u32,
)),
CoreState::Scheduled(scheduled_core_for(
(hash.as_fixed_bytes()[0] * 5) as u32,
)),
]))
.unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::PersistedValidationData(
_para_id,
_occupied_core_assumption,
tx,
),
))) => {
tx.send(Ok(Some(test_validation_data()))).unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::Validators(tx),
))) => {
tx.send(Ok(vec![dummy_validator(); 3])).unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::ValidationCodeHash(
_para_id,
OccupiedCoreAssumption::Free,
tx,
),
))) => {
tx.send(Err(RuntimeApiError::NotSupported {
runtime_api_name: "validation_code_hash",
}))
.unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::ValidationCode(_para_id, OccupiedCoreAssumption::Free, tx),
))) => {
tx.send(Ok(Some(ValidationCode(vec![1, 2, 3])))).unwrap();
},
Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_hash,
RuntimeApiRequest::StagingAsyncBackingParams(tx),
))) => {
tx.send(Err(RuntimeApiError::NotSupported {
runtime_api_name: "doesnt_matter",
}))
.unwrap();
},
Some(msg @ AllMessages::CollatorProtocol(_)) => {
inner_to_collator_protocol.lock().await.push(msg);
},
Some(msg) => {
panic!("didn't expect any other overseer requests; got {:?}", msg)
},
}
}
};
let config = Arc::new(test_config(16u32));
let subsystem_config = config.clone();
// empty vec doesn't allocate on the heap, so it's ok we throw it away
subsystem_test_harness(overseer, |mut ctx| async move {
handle_new_activations(subsystem_config, activated_hashes, &mut ctx, Metrics(None))
.await
.unwrap();
});
let to_collator_protocol = Arc::try_unwrap(to_collator_protocol)
.expect("subsystem should have shut down by now")
.into_inner();
let expect_validation_code_hash = ValidationCode(vec![1, 2, 3]).hash();
assert_eq!(to_collator_protocol.len(), 1);
match &to_collator_protocol[0] {
AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation(
CandidateReceipt { descriptor, .. },
_pov,
..,
)) => {
assert_eq!(expect_validation_code_hash, descriptor.validation_code_hash);
},
_ => panic!("received wrong message type"),
}
}
#[test]
fn submit_collation_is_no_op_before_initialization() {
test_harness(|mut virtual_overseer| async move {
virtual_overseer
.send(FromOrchestra::Communication {
msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams {
relay_parent: Hash::repeat_byte(0),
collation: test_collation(),
parent_head: vec![1, 2, 3].into(),
validation_code_hash: Hash::repeat_byte(1).into(),
result_sender: None,
}),
})
.await;
virtual_overseer
});
}
#[test]
fn submit_collation_leads_to_distribution() {
let relay_parent = Hash::repeat_byte(0);
let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42));
let parent_head = HeadData::from(vec![1, 2, 3]);
let para_id = ParaId::from(5);
let expected_pvd = PersistedValidationData {
parent_head: parent_head.clone(),
relay_parent_number: 10,
relay_parent_storage_root: Hash::repeat_byte(1),
max_pov_size: 1024,
};
test_harness(|mut virtual_overseer| async move {
virtual_overseer
.send(FromOrchestra::Communication {
msg: CollationGenerationMessage::Initialize(test_config_no_collator(para_id)),
})
.await;
virtual_overseer
.send(FromOrchestra::Communication {
msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams {
relay_parent,
collation: test_collation(),
parent_head: vec![1, 2, 3].into(),
validation_code_hash,
result_sender: None,
}),
})
.await;
assert_matches!(
overseer_recv(&mut virtual_overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::Validators(tx))) => {
assert_eq!(rp, relay_parent);
let _ = tx.send(Ok(vec![
Sr25519Keyring::Alice.public().into(),
Sr25519Keyring::Bob.public().into(),
Sr25519Keyring::Charlie.public().into(),
]));
}
);
assert_matches!(
overseer_recv(&mut virtual_overseer).await,
AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => {
assert_eq!(rp, relay_parent);
assert_eq!(id, para_id);
assert_eq!(a, OccupiedCoreAssumption::TimedOut);
// Candidate receipt should be constructed with the real parent head.
let mut pvd = expected_pvd.clone();
pvd.parent_head = vec![4, 5, 6].into();
let _ = tx.send(Ok(Some(pvd)));
}
);
assert_matches!(
overseer_recv(&mut virtual_overseer).await,
AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation(
ccr,
parent_head_data_hash,
..
)) => {
assert_eq!(parent_head_data_hash, parent_head.hash());
assert_eq!(ccr.descriptor().persisted_validation_data_hash, expected_pvd.hash());
assert_eq!(ccr.descriptor().para_head, dummy_head_data().hash());
assert_eq!(ccr.descriptor().validation_code_hash, validation_code_hash);
}
);
virtual_overseer
});
}
+1
View File
@@ -0,0 +1 @@
This folder contains core subsystems, each with their own crate.
@@ -0,0 +1,45 @@
[package]
name = "polkadot-node-core-approval-voting"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
futures = "0.3.21"
futures-timer = "3.0.2"
parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] }
gum = { package = "tracing-gum", path = "../../gum" }
bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] }
lru = "0.11.0"
merlin = "2.0"
schnorrkel = "0.9.1"
kvdb = "0.13.0"
derive_more = "0.99.17"
thiserror = "1.0.31"
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-overseer = { path = "../../overseer" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
polkadot-node-jaeger = { path = "../../jaeger" }
sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, features = ["full_crypto"] }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
[dev-dependencies]
async-trait = "0.1.57"
parking_lot = "0.12.0"
rand_core = "0.5.1" # should match schnorrkel
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
assert_matches = "1.4.0"
kvdb-memorydb = "0.13.0"
test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" }
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,33 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Approval DB accessors and writers for on-disk persisted approval storage
//! data.
//!
//! We persist data to disk although it is not intended to be used across runs of the
//! program. This is because under medium to long periods of finality stalling, for whatever
//! reason that may be, the amount of data we'd need to keep would be potentially too large
//! for memory.
//!
//! With tens or hundreds of parachains, hundreds of validators, and parablocks
//! in every relay chain block, there can be a humongous amount of information to reference
//! at any given time.
//!
//! As such, we provide a function from this module to clear the database on start-up.
//! In the future, we may use a temporary DB which doesn't need to be wiped, but for the
//! time being we share the same DB with the rest of Substrate.
pub mod v1;
@@ -0,0 +1,351 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Version 1 of the DB schema.
//!
//! Note that the version here differs from the actual version of the parachains
//! database (check `CURRENT_VERSION` in `node/service/src/parachains_db/upgrade.rs`).
//! The code in this module implements the way approval voting works with
//! its data in the database. Any breaking changes here will still
//! require a db migration (check `node/service/src/parachains_db/upgrade.rs`).
use parity_scale_codec::{Decode, Encode};
use polkadot_node_primitives::approval::{AssignmentCert, DelayTranche};
use polkadot_node_subsystem::{SubsystemError, SubsystemResult};
use polkadot_node_subsystem_util::database::{DBTransaction, Database};
use polkadot_primitives::{
BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex,
ValidatorIndex, ValidatorSignature,
};
use sp_consensus_slots::Slot;
use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec};
use std::{collections::BTreeMap, sync::Arc};
use crate::{
backend::{Backend, BackendWriteOp},
persisted_entries,
};
const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks";
#[cfg(test)]
pub mod tests;
/// `DbBackend` is a concrete implementation of the higher-level Backend trait
pub struct DbBackend {
inner: Arc<dyn Database>,
config: Config,
}
impl DbBackend {
/// Create a new [`DbBackend`] with the supplied key-value store and
/// config.
pub fn new(db: Arc<dyn Database>, config: Config) -> Self {
DbBackend { inner: db, config }
}
}
impl Backend for DbBackend {
fn load_block_entry(
&self,
block_hash: &Hash,
) -> SubsystemResult<Option<persisted_entries::BlockEntry>> {
load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into))
}
fn load_candidate_entry(
&self,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<persisted_entries::CandidateEntry>> {
load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into))
}
fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult<Vec<Hash>> {
load_blocks_at_height(&*self.inner, &self.config, block_height)
}
fn load_all_blocks(&self) -> SubsystemResult<Vec<Hash>> {
load_all_blocks(&*self.inner, &self.config)
}
fn load_stored_blocks(&self) -> SubsystemResult<Option<StoredBlockRange>> {
load_stored_blocks(&*self.inner, &self.config)
}
/// Atomically write the list of operations, with later operations taking precedence over prior.
fn write<I>(&mut self, ops: I) -> SubsystemResult<()>
where
I: IntoIterator<Item = BackendWriteOp>,
{
let mut tx = DBTransaction::new();
for op in ops {
match op {
BackendWriteOp::WriteStoredBlockRange(stored_block_range) => {
tx.put_vec(
self.config.col_approval_data,
&STORED_BLOCKS_KEY,
stored_block_range.encode(),
);
},
BackendWriteOp::DeleteStoredBlockRange => {
tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY);
},
BackendWriteOp::WriteBlocksAtHeight(h, blocks) => {
tx.put_vec(
self.config.col_approval_data,
&blocks_at_height_key(h),
blocks.encode(),
);
},
BackendWriteOp::DeleteBlocksAtHeight(h) => {
tx.delete(self.config.col_approval_data, &blocks_at_height_key(h));
},
BackendWriteOp::WriteBlockEntry(block_entry) => {
let block_entry: BlockEntry = block_entry.into();
tx.put_vec(
self.config.col_approval_data,
&block_entry_key(&block_entry.block_hash),
block_entry.encode(),
);
},
BackendWriteOp::DeleteBlockEntry(hash) => {
tx.delete(self.config.col_approval_data, &block_entry_key(&hash));
},
BackendWriteOp::WriteCandidateEntry(candidate_entry) => {
let candidate_entry: CandidateEntry = candidate_entry.into();
tx.put_vec(
self.config.col_approval_data,
&candidate_entry_key(&candidate_entry.candidate.hash()),
candidate_entry.encode(),
);
},
BackendWriteOp::DeleteCandidateEntry(candidate_hash) => {
tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash));
},
}
}
self.inner.write(tx).map_err(|e| e.into())
}
}
/// A range from earliest..last block number stored within the DB.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber);
// slot_duration * 2 + DelayTranche gives the number of delay tranches since the
// unix epoch.
#[derive(Encode, Decode, Clone, Copy, Debug, PartialEq)]
pub struct Tick(u64);
/// Convenience type definition
pub type Bitfield = BitVec<u8, BitOrderLsb0>;
/// The database config.
#[derive(Debug, Clone, Copy)]
pub struct Config {
/// The column family in the database where data is stored.
pub col_approval_data: u32,
}
/// Details pertaining to our assignment on a block.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct OurAssignment {
pub cert: AssignmentCert,
pub tranche: DelayTranche,
pub validator_index: ValidatorIndex,
// Whether the assignment has been triggered already.
pub triggered: bool,
}
/// Metadata regarding a specific tranche of assignments for a specific candidate.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct TrancheEntry {
pub tranche: DelayTranche,
// Assigned validators, and the instant we received their assignment, rounded
// to the nearest tick.
pub assignments: Vec<(ValidatorIndex, Tick)>,
}
/// Metadata regarding approval of a particular candidate within the context of some
/// particular block.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct ApprovalEntry {
pub tranches: Vec<TrancheEntry>,
pub backing_group: GroupIndex,
pub our_assignment: Option<OurAssignment>,
pub our_approval_sig: Option<ValidatorSignature>,
// `n_validators` bits.
pub assignments: Bitfield,
pub approved: bool,
}
/// Metadata regarding approval of a particular candidate.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct CandidateEntry {
pub candidate: CandidateReceipt,
pub session: SessionIndex,
// Assignments are based on blocks, so we need to track assignments separately
// based on the block we are looking at.
pub block_assignments: BTreeMap<Hash, ApprovalEntry>,
pub approvals: Bitfield,
}
/// Metadata regarding approval of a particular block, by way of approval of the
/// candidates contained within it.
#[derive(Encode, Decode, Debug, Clone, PartialEq)]
pub struct BlockEntry {
pub block_hash: Hash,
pub block_number: BlockNumber,
pub parent_hash: Hash,
pub session: SessionIndex,
pub slot: Slot,
/// Random bytes derived from the VRF submitted within the block by the block
/// author as a credential and used as input to approval assignment criteria.
pub relay_vrf_story: [u8; 32],
// The candidates included as-of this block and the index of the core they are
// leaving. Sorted ascending by core index.
pub candidates: Vec<(CoreIndex, CandidateHash)>,
// A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`.
// The i'th bit is `true` iff the candidate has been approved in the context of this
// block. The block can be considered approved if the bitfield has all bits set to `true`.
pub approved_bitfield: Bitfield,
pub children: Vec<Hash>,
}
impl From<crate::Tick> for Tick {
fn from(tick: crate::Tick) -> Tick {
Tick(tick)
}
}
impl From<Tick> for crate::Tick {
fn from(tick: Tick) -> crate::Tick {
tick.0
}
}
/// Errors while accessing things from the DB.
#[derive(Debug, derive_more::From, derive_more::Display)]
pub enum Error {
Io(std::io::Error),
InvalidDecoding(parity_scale_codec::Error),
}
impl std::error::Error for Error {}
/// Result alias for DB errors.
pub type Result<T> = std::result::Result<T, Error>;
pub(crate) fn load_decode<D: Decode>(
store: &dyn Database,
col_approval_data: u32,
key: &[u8],
) -> Result<Option<D>> {
match store.get(col_approval_data, key)? {
None => Ok(None),
Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into),
}
}
/// The key a given block entry is stored under.
pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] {
const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck";
let mut key = [0u8; 14 + 32];
key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX);
key[14..][..32].copy_from_slice(block_hash.as_ref());
key
}
/// The key a given candidate entry is stored under.
pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] {
const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand";
let mut key = [0u8; 14 + 32];
key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX);
key[14..][..32].copy_from_slice(candidate_hash.0.as_ref());
key
}
/// The key a set of block hashes corresponding to a block number is stored under.
pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] {
const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at";
let mut key = [0u8; 12 + 4];
key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX);
block_number.using_encoded(|s| key[12..16].copy_from_slice(s));
key
}
/// Return all blocks which have entries in the DB, ascending, by height.
pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult<Vec<Hash>> {
let mut hashes = Vec::new();
if let Some(stored_blocks) = load_stored_blocks(store, config)? {
for height in stored_blocks.0..stored_blocks.1 {
let blocks = load_blocks_at_height(store, config, &height)?;
hashes.extend(blocks);
}
}
Ok(hashes)
}
/// Load the stored-blocks key from the state.
pub fn load_stored_blocks(
store: &dyn Database,
config: &Config,
) -> SubsystemResult<Option<StoredBlockRange>> {
load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY)
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
/// Load a blocks-at-height entry for a given block number.
pub fn load_blocks_at_height(
store: &dyn Database,
config: &Config,
block_number: &BlockNumber,
) -> SubsystemResult<Vec<Hash>> {
load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number))
.map(|x| x.unwrap_or_default())
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
/// Load a block entry from the aux store.
pub fn load_block_entry(
store: &dyn Database,
config: &Config,
block_hash: &Hash,
) -> SubsystemResult<Option<BlockEntry>> {
load_decode(store, config.col_approval_data, &block_entry_key(block_hash))
.map(|u: Option<BlockEntry>| u.map(|v| v.into()))
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
/// Load a candidate entry from the aux store.
pub fn load_candidate_entry(
store: &dyn Database,
config: &Config,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<CandidateEntry>> {
load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash))
.map(|u: Option<CandidateEntry>| u.map(|v| v.into()))
.map_err(|e| SubsystemError::with_origin("approval-voting", e))
}
@@ -0,0 +1,569 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Tests for the aux-schema of approval voting.
use super::{DbBackend, StoredBlockRange, *};
use crate::{
backend::{Backend, OverlayedBackend},
ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo},
};
use polkadot_node_subsystem_util::database::Database;
use polkadot_primitives::Id as ParaId;
use std::{collections::HashMap, sync::Arc};
use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash};
const DATA_COL: u32 = 0;
const NUM_COLUMNS: u32 = 1;
const TEST_CONFIG: Config = Config { col_approval_data: DATA_COL };
fn make_db() -> (DbBackend, Arc<dyn Database>) {
let db = kvdb_memorydb::create(NUM_COLUMNS);
let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]);
let db_writer: Arc<dyn Database> = Arc::new(db);
(DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer)
}
fn make_bitvec(len: usize) -> BitVec<u8, BitOrderLsb0> {
bitvec::bitvec![u8, BitOrderLsb0; 0; len]
}
fn make_block_entry(
block_hash: Hash,
parent_hash: Hash,
block_number: BlockNumber,
candidates: Vec<(CoreIndex, CandidateHash)>,
) -> BlockEntry {
BlockEntry {
block_hash,
parent_hash,
block_number,
session: 1,
slot: Slot::from(1),
relay_vrf_story: [0u8; 32],
approved_bitfield: make_bitvec(candidates.len()),
candidates,
children: Vec::new(),
}
}
fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt {
let mut c = dummy_candidate_receipt(dummy_hash());
c.descriptor.para_id = para_id;
c.descriptor.relay_parent = relay_parent;
c
}
#[test]
fn read_write() {
let (mut db, store) = make_db();
let hash_a = Hash::repeat_byte(1);
let hash_b = Hash::repeat_byte(2);
let candidate_hash = dummy_candidate_receipt_bad_sig(dummy_hash(), None).hash();
let range = StoredBlockRange(10, 20);
let at_height = vec![hash_a, hash_b];
let block_entry =
make_block_entry(hash_a, Default::default(), 1, vec![(CoreIndex(0), candidate_hash)]);
let candidate_entry = CandidateEntry {
candidate: dummy_candidate_receipt_bad_sig(dummy_hash(), None),
session: 5,
block_assignments: vec![(
hash_a,
ApprovalEntry {
tranches: Vec::new(),
backing_group: GroupIndex(1),
our_assignment: None,
our_approval_sig: None,
assignments: Default::default(),
approved: false,
},
)]
.into_iter()
.collect(),
approvals: Default::default(),
};
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(range.clone());
overlay_db.write_blocks_at_height(1, at_height.clone());
overlay_db.write_block_entry(block_entry.clone().into());
overlay_db.write_candidate_entry(candidate_entry.clone().into());
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), Some(range));
assert_eq!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap(), at_height);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(),
Some(block_entry.into())
);
assert_eq!(
load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(),
Some(candidate_entry.into()),
);
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.delete_blocks_at_height(1);
overlay_db.delete_block_entry(&hash_a);
overlay_db.delete_candidate_entry(&candidate_hash);
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap().is_empty());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none());
assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash)
.unwrap()
.is_none());
}
#[test]
fn add_block_entry_works() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let candidate_receipt_a = make_candidate(ParaId::from(1_u32), parent_hash);
let candidate_receipt_b = make_candidate(ParaId::from(2_u32), parent_hash);
let candidate_hash_a = candidate_receipt_a.hash();
let candidate_hash_b = candidate_receipt_b.hash();
let block_number = 10;
let block_entry_a = make_block_entry(
block_hash_a,
parent_hash,
block_number,
vec![(CoreIndex(0), candidate_hash_a)],
);
let block_entry_b = make_block_entry(
block_hash_b,
parent_hash,
block_number,
vec![(CoreIndex(0), candidate_hash_a), (CoreIndex(1), candidate_hash_b)],
);
let n_validators = 10;
let mut new_candidate_info = HashMap::new();
new_candidate_info
.insert(candidate_hash_a, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(0), None));
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |h| {
new_candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
new_candidate_info
.insert(candidate_hash_b, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(1), None));
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |h| {
new_candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(),
Some(block_entry_a.into())
);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(),
Some(block_entry_b.into())
);
let candidate_entry_a = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_a)
.unwrap()
.unwrap();
assert_eq!(
candidate_entry_a.block_assignments.keys().collect::<Vec<_>>(),
vec![&block_hash_a, &block_hash_b]
);
let candidate_entry_b = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_b)
.unwrap()
.unwrap();
assert_eq!(candidate_entry_b.block_assignments.keys().collect::<Vec<_>>(), vec![&block_hash_b]);
}
#[test]
fn add_block_entry_adds_child() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let mut block_entry_a = make_block_entry(block_hash_a, parent_hash, 1, Vec::new());
let block_entry_b = make_block_entry(block_hash_b, block_hash_a, 2, Vec::new());
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap();
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
block_entry_a.children.push(block_hash_b);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(),
Some(block_entry_a.into())
);
assert_eq!(
load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(),
Some(block_entry_b.into())
);
}
#[test]
fn canonicalize_works() {
let (mut db, store) = make_db();
// -> B1 -> C1 -> D1
// A -> B2 -> C2 -> D2
//
// We'll canonicalize C1. Everytning except D1 should disappear.
//
// Candidates:
// Cand1 in B2
// Cand2 in C2
// Cand3 in C2 and D1
// Cand4 in D1
// Cand5 in D2
// Only Cand3 and Cand4 should remain after canonicalize.
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(StoredBlockRange(1, 5));
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let genesis = Hash::repeat_byte(0);
let block_hash_a = Hash::repeat_byte(1);
let block_hash_b1 = Hash::repeat_byte(2);
let block_hash_b2 = Hash::repeat_byte(3);
let block_hash_c1 = Hash::repeat_byte(4);
let block_hash_c2 = Hash::repeat_byte(5);
let block_hash_d1 = Hash::repeat_byte(6);
let block_hash_d2 = Hash::repeat_byte(7);
let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis);
let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a);
let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a);
let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1);
let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1);
let cand_hash_1 = candidate_receipt_genesis.hash();
let cand_hash_2 = candidate_receipt_a.hash();
let cand_hash_3 = candidate_receipt_b.hash();
let cand_hash_4 = candidate_receipt_b1.hash();
let cand_hash_5 = candidate_receipt_c1.hash();
let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new());
let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new());
let block_entry_b2 =
make_block_entry(block_hash_b2, block_hash_a, 2, vec![(CoreIndex(0), cand_hash_1)]);
let block_entry_c1 = make_block_entry(block_hash_c1, block_hash_b1, 3, Vec::new());
let block_entry_c2 = make_block_entry(
block_hash_c2,
block_hash_b2,
3,
vec![(CoreIndex(0), cand_hash_2), (CoreIndex(1), cand_hash_3)],
);
let block_entry_d1 = make_block_entry(
block_hash_d1,
block_hash_c1,
4,
vec![(CoreIndex(0), cand_hash_3), (CoreIndex(1), cand_hash_4)],
);
let block_entry_d2 =
make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]);
let candidate_info = {
let mut candidate_info = HashMap::new();
candidate_info.insert(
cand_hash_1,
NewCandidateInfo::new(candidate_receipt_genesis, GroupIndex(1), None),
);
candidate_info
.insert(cand_hash_2, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(2), None));
candidate_info
.insert(cand_hash_3, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(3), None));
candidate_info
.insert(cand_hash_4, NewCandidateInfo::new(candidate_receipt_b1, GroupIndex(4), None));
candidate_info
.insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None));
candidate_info
};
// now insert all the blocks.
let blocks = vec![
block_entry_a.clone(),
block_entry_b1.clone(),
block_entry_b2.clone(),
block_entry_c1.clone(),
block_entry_c2.clone(),
block_entry_d1.clone(),
block_entry_d2.clone(),
];
let mut overlay_db = OverlayedBackend::new(&db);
for block_entry in blocks {
add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| {
candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
}
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let check_candidates_in_store = |expected: Vec<(CandidateHash, Option<Vec<_>>)>| {
for (c_hash, in_blocks) in expected {
let (entry, in_blocks) = match in_blocks {
None => {
assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash)
.unwrap()
.is_none());
continue
},
Some(i) => (
load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash).unwrap().unwrap(),
i,
),
};
assert_eq!(entry.block_assignments.len(), in_blocks.len());
for x in in_blocks {
assert!(entry.block_assignments.contains_key(&x));
}
}
};
let check_blocks_in_store = |expected: Vec<(Hash, Option<Vec<_>>)>| {
for (hash, with_candidates) in expected {
let (entry, with_candidates) = match with_candidates {
None => {
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash)
.unwrap()
.is_none());
continue
},
Some(i) =>
(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i),
};
assert_eq!(entry.candidates.len(), with_candidates.len());
for x in with_candidates {
assert!(entry.candidates.iter().any(|(_, c)| c == &x));
}
}
};
check_candidates_in_store(vec![
(cand_hash_1, Some(vec![block_hash_b2])),
(cand_hash_2, Some(vec![block_hash_c2])),
(cand_hash_3, Some(vec![block_hash_c2, block_hash_d1])),
(cand_hash_4, Some(vec![block_hash_d1])),
(cand_hash_5, Some(vec![block_hash_d2])),
]);
check_blocks_in_store(vec![
(block_hash_a, Some(vec![])),
(block_hash_b1, Some(vec![])),
(block_hash_b2, Some(vec![cand_hash_1])),
(block_hash_c1, Some(vec![])),
(block_hash_c2, Some(vec![cand_hash_2, cand_hash_3])),
(block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])),
(block_hash_d2, Some(vec![cand_hash_5])),
]);
let mut overlay_db = OverlayedBackend::new(&db);
canonicalize(&mut overlay_db, 3, block_hash_c1).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(),
StoredBlockRange(4, 5)
);
check_candidates_in_store(vec![
(cand_hash_1, None),
(cand_hash_2, None),
(cand_hash_3, Some(vec![block_hash_d1])),
(cand_hash_4, Some(vec![block_hash_d1])),
(cand_hash_5, None),
]);
check_blocks_in_store(vec![
(block_hash_a, None),
(block_hash_b1, None),
(block_hash_b2, None),
(block_hash_c1, None),
(block_hash_c2, None),
(block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])),
(block_hash_d2, None),
]);
}
#[test]
fn force_approve_works() {
let (mut db, store) = make_db();
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
overlay_db.write_stored_block_range(StoredBlockRange(1, 4));
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
let candidate_hash = CandidateHash(Hash::repeat_byte(42));
let single_candidate_vec = vec![(CoreIndex(0), candidate_hash)];
let candidate_info = {
let mut candidate_info = HashMap::new();
candidate_info.insert(
candidate_hash,
NewCandidateInfo::new(
make_candidate(ParaId::from(1_u32), Default::default()),
GroupIndex(1),
None,
),
);
candidate_info
};
let block_hash_a = Hash::repeat_byte(1); // 1
let block_hash_b = Hash::repeat_byte(2);
let block_hash_c = Hash::repeat_byte(3);
let block_hash_d = Hash::repeat_byte(4); // 4
let block_entry_a =
make_block_entry(block_hash_a, Default::default(), 1, single_candidate_vec.clone());
let block_entry_b =
make_block_entry(block_hash_b, block_hash_a, 2, single_candidate_vec.clone());
let block_entry_c =
make_block_entry(block_hash_c, block_hash_b, 3, single_candidate_vec.clone());
let block_entry_d =
make_block_entry(block_hash_d, block_hash_c, 4, single_candidate_vec.clone());
let blocks = vec![
block_entry_a.clone(),
block_entry_b.clone(),
block_entry_c.clone(),
block_entry_d.clone(),
];
let mut overlay_db = OverlayedBackend::new(&db);
for block_entry in blocks {
add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| {
candidate_info.get(h).map(|x| x.clone())
})
.unwrap();
}
let approved_hashes = force_approve(&mut overlay_db, block_hash_d, 2).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a,)
.unwrap()
.unwrap()
.approved_bitfield
.all());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b,)
.unwrap()
.unwrap()
.approved_bitfield
.all());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_c,)
.unwrap()
.unwrap()
.approved_bitfield
.not_any());
assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_d,)
.unwrap()
.unwrap()
.approved_bitfield
.not_any());
assert_eq!(approved_hashes, vec![block_hash_b, block_hash_a]);
}
#[test]
fn load_all_blocks_works() {
let (mut db, store) = make_db();
let parent_hash = Hash::repeat_byte(1);
let block_hash_a = Hash::repeat_byte(2);
let block_hash_b = Hash::repeat_byte(69);
let block_hash_c = Hash::repeat_byte(42);
let block_number = 10;
let block_entry_a = make_block_entry(block_hash_a, parent_hash, block_number, vec![]);
let block_entry_b = make_block_entry(block_hash_b, parent_hash, block_number, vec![]);
let block_entry_c = make_block_entry(block_hash_c, block_hash_a, block_number + 1, vec![]);
let n_validators = 10;
let mut overlay_db = OverlayedBackend::new(&db);
add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap();
// add C before B to test sorting.
add_block_entry(&mut overlay_db, block_entry_c.clone().into(), n_validators, |_| None).unwrap();
add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap();
let write_ops = overlay_db.into_write_ops();
db.write(write_ops).unwrap();
assert_eq!(
load_all_blocks(store.as_ref(), &TEST_CONFIG).unwrap(),
vec![block_hash_a, block_hash_b, block_hash_c],
)
}
@@ -0,0 +1,221 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! An abstraction over storage used by the chain selection subsystem.
//!
//! This provides both a [`Backend`] trait and an [`OverlayedBackend`]
//! struct which allows in-memory changes to be applied on top of a
//! [`Backend`], maintaining consistency between queries and temporary writes,
//! before any commit to the underlying storage is made.
use polkadot_node_subsystem::SubsystemResult;
use polkadot_primitives::{BlockNumber, CandidateHash, Hash};
use std::collections::HashMap;
use super::{
approval_db::v1::StoredBlockRange,
persisted_entries::{BlockEntry, CandidateEntry},
};
#[derive(Debug)]
pub enum BackendWriteOp {
WriteStoredBlockRange(StoredBlockRange),
WriteBlocksAtHeight(BlockNumber, Vec<Hash>),
WriteBlockEntry(BlockEntry),
WriteCandidateEntry(CandidateEntry),
DeleteStoredBlockRange,
DeleteBlocksAtHeight(BlockNumber),
DeleteBlockEntry(Hash),
DeleteCandidateEntry(CandidateHash),
}
/// An abstraction over backend storage for the logic of this subsystem.
pub trait Backend {
/// Load a block entry from the DB.
fn load_block_entry(&self, hash: &Hash) -> SubsystemResult<Option<BlockEntry>>;
/// Load a candidate entry from the DB.
fn load_candidate_entry(
&self,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<CandidateEntry>>;
/// Load all blocks at a specific height.
fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult<Vec<Hash>>;
/// Load all block from the DB.
fn load_all_blocks(&self) -> SubsystemResult<Vec<Hash>>;
/// Load stored block range form the DB.
fn load_stored_blocks(&self) -> SubsystemResult<Option<StoredBlockRange>>;
/// Atomically write the list of operations, with later operations taking precedence over prior.
fn write<I>(&mut self, ops: I) -> SubsystemResult<()>
where
I: IntoIterator<Item = BackendWriteOp>;
}
// Status of block range in the `OverlayedBackend`.
#[derive(PartialEq)]
enum BlockRangeStatus {
// Value has not been modified.
NotModified,
// Value has been deleted
Deleted,
// Value has been updated.
Inserted(StoredBlockRange),
}
/// An in-memory overlay over the backend.
///
/// This maintains read-only access to the underlying backend, but can be
/// converted into a set of write operations which will, when written to
/// the underlying backend, give the same view as the state of the overlay.
pub struct OverlayedBackend<'a, B: 'a> {
inner: &'a B,
// `Some(None)` means deleted. Missing (`None`) means query inner.
stored_block_range: BlockRangeStatus,
// `None` means 'deleted', missing means query inner.
blocks_at_height: HashMap<BlockNumber, Option<Vec<Hash>>>,
// `None` means 'deleted', missing means query inner.
block_entries: HashMap<Hash, Option<BlockEntry>>,
// `None` means 'deleted', missing means query inner.
candidate_entries: HashMap<CandidateHash, Option<CandidateEntry>>,
}
impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> {
pub fn new(backend: &'a B) -> Self {
OverlayedBackend {
inner: backend,
stored_block_range: BlockRangeStatus::NotModified,
blocks_at_height: HashMap::new(),
block_entries: HashMap::new(),
candidate_entries: HashMap::new(),
}
}
pub fn is_empty(&self) -> bool {
self.block_entries.is_empty() &&
self.candidate_entries.is_empty() &&
self.blocks_at_height.is_empty() &&
self.stored_block_range == BlockRangeStatus::NotModified
}
pub fn load_all_blocks(&self) -> SubsystemResult<Vec<Hash>> {
let mut hashes = Vec::new();
if let Some(stored_blocks) = self.load_stored_blocks()? {
for height in stored_blocks.0..stored_blocks.1 {
hashes.extend(self.load_blocks_at_height(&height)?);
}
}
Ok(hashes)
}
pub fn load_stored_blocks(&self) -> SubsystemResult<Option<StoredBlockRange>> {
match self.stored_block_range {
BlockRangeStatus::Inserted(ref value) => Ok(Some(value.clone())),
BlockRangeStatus::Deleted => Ok(None),
BlockRangeStatus::NotModified => self.inner.load_stored_blocks(),
}
}
pub fn load_blocks_at_height(&self, height: &BlockNumber) -> SubsystemResult<Vec<Hash>> {
if let Some(val) = self.blocks_at_height.get(&height) {
return Ok(val.clone().unwrap_or_default())
}
self.inner.load_blocks_at_height(height)
}
pub fn load_block_entry(&self, hash: &Hash) -> SubsystemResult<Option<BlockEntry>> {
if let Some(val) = self.block_entries.get(&hash) {
return Ok(val.clone())
}
self.inner.load_block_entry(hash)
}
pub fn load_candidate_entry(
&self,
candidate_hash: &CandidateHash,
) -> SubsystemResult<Option<CandidateEntry>> {
if let Some(val) = self.candidate_entries.get(&candidate_hash) {
return Ok(val.clone())
}
self.inner.load_candidate_entry(candidate_hash)
}
pub fn write_stored_block_range(&mut self, range: StoredBlockRange) {
self.stored_block_range = BlockRangeStatus::Inserted(range);
}
pub fn delete_stored_block_range(&mut self) {
self.stored_block_range = BlockRangeStatus::Deleted;
}
pub fn write_blocks_at_height(&mut self, height: BlockNumber, blocks: Vec<Hash>) {
self.blocks_at_height.insert(height, Some(blocks));
}
pub fn delete_blocks_at_height(&mut self, height: BlockNumber) {
self.blocks_at_height.insert(height, None);
}
pub fn write_block_entry(&mut self, entry: BlockEntry) {
self.block_entries.insert(entry.block_hash(), Some(entry));
}
pub fn delete_block_entry(&mut self, hash: &Hash) {
self.block_entries.insert(*hash, None);
}
pub fn write_candidate_entry(&mut self, entry: CandidateEntry) {
self.candidate_entries.insert(entry.candidate_receipt().hash(), Some(entry));
}
pub fn delete_candidate_entry(&mut self, hash: &CandidateHash) {
self.candidate_entries.insert(*hash, None);
}
/// Transform this backend into a set of write-ops to be written to the
/// inner backend.
pub fn into_write_ops(self) -> impl Iterator<Item = BackendWriteOp> {
let blocks_at_height_ops = self.blocks_at_height.into_iter().map(|(h, v)| match v {
Some(v) => BackendWriteOp::WriteBlocksAtHeight(h, v),
None => BackendWriteOp::DeleteBlocksAtHeight(h),
});
let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v {
Some(v) => BackendWriteOp::WriteBlockEntry(v),
None => BackendWriteOp::DeleteBlockEntry(h),
});
let candidate_entry_ops = self.candidate_entries.into_iter().map(|(h, v)| match v {
Some(v) => BackendWriteOp::WriteCandidateEntry(v),
None => BackendWriteOp::DeleteCandidateEntry(h),
});
let stored_block_range_ops = match self.stored_block_range {
BlockRangeStatus::Inserted(val) => Some(BackendWriteOp::WriteStoredBlockRange(val)),
BlockRangeStatus::Deleted => Some(BackendWriteOp::DeleteStoredBlockRange),
BlockRangeStatus::NotModified => None,
};
stored_block_range_ops
.into_iter()
.chain(blocks_at_height_ops)
.chain(block_entry_ops)
.chain(candidate_entry_ops)
}
}
@@ -0,0 +1,879 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Assignment criteria VRF generation and checking.
use parity_scale_codec::{Decode, Encode};
use polkadot_node_primitives::approval::{
self as approval_types, AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory,
};
use polkadot_primitives::{
AssignmentId, AssignmentPair, CandidateHash, CoreIndex, GroupIndex, IndexedVec, SessionInfo,
ValidatorIndex,
};
use sc_keystore::LocalKeystore;
use sp_application_crypto::ByteArray;
use merlin::Transcript;
use schnorrkel::vrf::VRFInOut;
use std::collections::{hash_map::Entry, HashMap};
use super::LOG_TARGET;
/// Details pertaining to our assignment on a block.
#[derive(Debug, Clone, Encode, Decode, PartialEq)]
pub struct OurAssignment {
cert: AssignmentCert,
tranche: DelayTranche,
validator_index: ValidatorIndex,
// Whether the assignment has been triggered already.
triggered: bool,
}
impl OurAssignment {
pub(crate) fn cert(&self) -> &AssignmentCert {
&self.cert
}
pub(crate) fn tranche(&self) -> DelayTranche {
self.tranche
}
pub(crate) fn validator_index(&self) -> ValidatorIndex {
self.validator_index
}
pub(crate) fn triggered(&self) -> bool {
self.triggered
}
pub(crate) fn mark_triggered(&mut self) {
self.triggered = true;
}
}
impl From<crate::approval_db::v1::OurAssignment> for OurAssignment {
fn from(entry: crate::approval_db::v1::OurAssignment) -> Self {
OurAssignment {
cert: entry.cert,
tranche: entry.tranche,
validator_index: entry.validator_index,
triggered: entry.triggered,
}
}
}
impl From<OurAssignment> for crate::approval_db::v1::OurAssignment {
fn from(entry: OurAssignment) -> Self {
Self {
cert: entry.cert,
tranche: entry.tranche,
validator_index: entry.validator_index,
triggered: entry.triggered,
}
}
}
fn relay_vrf_modulo_transcript(relay_vrf_story: RelayVRFStory, sample: u32) -> Transcript {
// combine the relay VRF story with a sample number.
let mut t = Transcript::new(approval_types::RELAY_VRF_MODULO_CONTEXT);
t.append_message(b"RC-VRF", &relay_vrf_story.0);
sample.using_encoded(|s| t.append_message(b"sample", s));
t
}
fn relay_vrf_modulo_core(vrf_in_out: &VRFInOut, n_cores: u32) -> CoreIndex {
let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::CORE_RANDOMNESS_CONTEXT);
// interpret as little-endian u32.
let random_core = u32::from_le_bytes(bytes) % n_cores;
CoreIndex(random_core)
}
fn relay_vrf_delay_transcript(relay_vrf_story: RelayVRFStory, core_index: CoreIndex) -> Transcript {
let mut t = Transcript::new(approval_types::RELAY_VRF_DELAY_CONTEXT);
t.append_message(b"RC-VRF", &relay_vrf_story.0);
core_index.0.using_encoded(|s| t.append_message(b"core", s));
t
}
fn relay_vrf_delay_tranche(
vrf_in_out: &VRFInOut,
num_delay_tranches: u32,
zeroth_delay_tranche_width: u32,
) -> DelayTranche {
let bytes: [u8; 4] = vrf_in_out.make_bytes(approval_types::TRANCHE_RANDOMNESS_CONTEXT);
// interpret as little-endian u32 and reduce by the number of tranches.
let wide_tranche =
u32::from_le_bytes(bytes) % (num_delay_tranches + zeroth_delay_tranche_width);
// Consolidate early results to tranche zero so tranche zero is extra wide.
wide_tranche.saturating_sub(zeroth_delay_tranche_width)
}
fn assigned_core_transcript(core_index: CoreIndex) -> Transcript {
let mut t = Transcript::new(approval_types::ASSIGNED_CORE_CONTEXT);
core_index.0.using_encoded(|s| t.append_message(b"core", s));
t
}
/// Information about the world assignments are being produced in.
#[derive(Clone)]
pub(crate) struct Config {
/// The assignment public keys for validators.
assignment_keys: Vec<AssignmentId>,
/// The groups of validators assigned to each core.
validator_groups: IndexedVec<GroupIndex, Vec<ValidatorIndex>>,
/// The number of availability cores used by the protocol during this session.
n_cores: u32,
/// The zeroth delay tranche width.
zeroth_delay_tranche_width: u32,
/// The number of samples we do of `relay_vrf_modulo`.
relay_vrf_modulo_samples: u32,
/// The number of delay tranches in total.
n_delay_tranches: u32,
}
impl<'a> From<&'a SessionInfo> for Config {
fn from(s: &'a SessionInfo) -> Self {
Config {
assignment_keys: s.assignment_keys.clone(),
validator_groups: s.validator_groups.clone(),
n_cores: s.n_cores,
zeroth_delay_tranche_width: s.zeroth_delay_tranche_width,
relay_vrf_modulo_samples: s.relay_vrf_modulo_samples,
n_delay_tranches: s.n_delay_tranches,
}
}
}
/// A trait for producing and checking assignments. Used to mock.
pub(crate) trait AssignmentCriteria {
fn compute_assignments(
&self,
keystore: &LocalKeystore,
relay_vrf_story: RelayVRFStory,
config: &Config,
leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>,
) -> HashMap<CoreIndex, OurAssignment>;
fn check_assignment_cert(
&self,
claimed_core_index: CoreIndex,
validator_index: ValidatorIndex,
config: &Config,
relay_vrf_story: RelayVRFStory,
assignment: &AssignmentCert,
backing_group: GroupIndex,
) -> Result<DelayTranche, InvalidAssignment>;
}
pub(crate) struct RealAssignmentCriteria;
impl AssignmentCriteria for RealAssignmentCriteria {
fn compute_assignments(
&self,
keystore: &LocalKeystore,
relay_vrf_story: RelayVRFStory,
config: &Config,
leaving_cores: Vec<(CandidateHash, CoreIndex, GroupIndex)>,
) -> HashMap<CoreIndex, OurAssignment> {
compute_assignments(keystore, relay_vrf_story, config, leaving_cores)
}
fn check_assignment_cert(
&self,
claimed_core_index: CoreIndex,
validator_index: ValidatorIndex,
config: &Config,
relay_vrf_story: RelayVRFStory,
assignment: &AssignmentCert,
backing_group: GroupIndex,
) -> Result<DelayTranche, InvalidAssignment> {
check_assignment_cert(
claimed_core_index,
validator_index,
config,
relay_vrf_story,
assignment,
backing_group,
)
}
}
/// Compute the assignments for a given block. Returns a map containing all assignments to cores in
/// the block. If more than one assignment targets the given core, only the earliest assignment is
/// kept.
///
/// The `leaving_cores` parameter indicates all cores within the block where a candidate was
/// included, as well as the group index backing those.
///
/// The current description of the protocol assigns every validator to check every core. But at
/// different times. The idea is that most assignments are never triggered and fall by the wayside.
///
/// This will not assign to anything the local validator was part of the backing group for.
pub(crate) fn compute_assignments(
keystore: &LocalKeystore,
relay_vrf_story: RelayVRFStory,
config: &Config,
leaving_cores: impl IntoIterator<Item = (CandidateHash, CoreIndex, GroupIndex)> + Clone,
) -> HashMap<CoreIndex, OurAssignment> {
if config.n_cores == 0 ||
config.assignment_keys.is_empty() ||
config.validator_groups.is_empty()
{
gum::trace!(
target: LOG_TARGET,
n_cores = config.n_cores,
has_assignment_keys = !config.assignment_keys.is_empty(),
has_validator_groups = !config.validator_groups.is_empty(),
"Not producing assignments because config is degenerate",
);
return HashMap::new()
}
let (index, assignments_key): (ValidatorIndex, AssignmentPair) = {
let key = config.assignment_keys.iter().enumerate().find_map(|(i, p)| {
match keystore.key_pair(p) {
Ok(Some(pair)) => Some((ValidatorIndex(i as _), pair)),
Ok(None) => None,
Err(sc_keystore::Error::Unavailable) => None,
Err(sc_keystore::Error::Io(e)) if e.kind() == std::io::ErrorKind::NotFound => None,
Err(e) => {
gum::warn!(target: LOG_TARGET, "Encountered keystore error: {:?}", e);
None
},
}
});
match key {
None => {
gum::trace!(target: LOG_TARGET, "No assignment key");
return HashMap::new()
},
Some(k) => k,
}
};
// Ignore any cores where the assigned group is our own.
let leaving_cores = leaving_cores
.into_iter()
.filter(|(_, _, g)| !is_in_backing_group(&config.validator_groups, index, *g))
.map(|(c_hash, core, _)| (c_hash, core))
.collect::<Vec<_>>();
gum::trace!(
target: LOG_TARGET,
assignable_cores = leaving_cores.len(),
"Assigning to candidates from different backing groups"
);
let assignments_key: &sp_application_crypto::sr25519::Pair = assignments_key.as_ref();
let assignments_key: &schnorrkel::Keypair = assignments_key.as_ref();
let mut assignments = HashMap::new();
// First run `RelayVRFModulo` for each sample.
compute_relay_vrf_modulo_assignments(
&assignments_key,
index,
config,
relay_vrf_story.clone(),
leaving_cores.iter().cloned(),
&mut assignments,
);
// Then run `RelayVRFDelay` once for the whole block.
compute_relay_vrf_delay_assignments(
&assignments_key,
index,
config,
relay_vrf_story,
leaving_cores,
&mut assignments,
);
assignments
}
fn compute_relay_vrf_modulo_assignments(
assignments_key: &schnorrkel::Keypair,
validator_index: ValidatorIndex,
config: &Config,
relay_vrf_story: RelayVRFStory,
leaving_cores: impl IntoIterator<Item = (CandidateHash, CoreIndex)> + Clone,
assignments: &mut HashMap<CoreIndex, OurAssignment>,
) {
for rvm_sample in 0..config.relay_vrf_modulo_samples {
let mut core = CoreIndex::default();
let maybe_assignment = {
// Extra scope to ensure borrowing instead of moving core
// into closure.
let core = &mut core;
assignments_key.vrf_sign_extra_after_check(
relay_vrf_modulo_transcript(relay_vrf_story.clone(), rvm_sample),
|vrf_in_out| {
*core = relay_vrf_modulo_core(&vrf_in_out, config.n_cores);
if let Some((candidate_hash, _)) =
leaving_cores.clone().into_iter().find(|(_, c)| c == core)
{
gum::trace!(
target: LOG_TARGET,
?candidate_hash,
?core,
?validator_index,
tranche = 0,
"RelayVRFModulo Assignment."
);
Some(assigned_core_transcript(*core))
} else {
None
}
},
)
};
if let Some((vrf_in_out, vrf_proof, _)) = maybe_assignment {
// Sanity: `core` is always initialized to non-default here, as the closure above
// has been executed.
let cert = AssignmentCert {
kind: AssignmentCertKind::RelayVRFModulo { sample: rvm_sample },
vrf: approval_types::VrfSignature {
output: approval_types::VrfOutput(vrf_in_out.to_output()),
proof: approval_types::VrfProof(vrf_proof),
},
};
// All assignments of type RelayVRFModulo have tranche 0.
assignments.entry(core).or_insert(OurAssignment {
cert,
tranche: 0,
validator_index,
triggered: false,
});
}
}
}
fn compute_relay_vrf_delay_assignments(
assignments_key: &schnorrkel::Keypair,
validator_index: ValidatorIndex,
config: &Config,
relay_vrf_story: RelayVRFStory,
leaving_cores: impl IntoIterator<Item = (CandidateHash, CoreIndex)>,
assignments: &mut HashMap<CoreIndex, OurAssignment>,
) {
for (candidate_hash, core) in leaving_cores {
let (vrf_in_out, vrf_proof, _) =
assignments_key.vrf_sign(relay_vrf_delay_transcript(relay_vrf_story.clone(), core));
let tranche = relay_vrf_delay_tranche(
&vrf_in_out,
config.n_delay_tranches,
config.zeroth_delay_tranche_width,
);
let cert = AssignmentCert {
kind: AssignmentCertKind::RelayVRFDelay { core_index: core },
vrf: approval_types::VrfSignature {
output: approval_types::VrfOutput(vrf_in_out.to_output()),
proof: approval_types::VrfProof(vrf_proof),
},
};
let our_assignment = OurAssignment { cert, tranche, validator_index, triggered: false };
let used = match assignments.entry(core) {
Entry::Vacant(e) => {
let _ = e.insert(our_assignment);
true
},
Entry::Occupied(mut e) =>
if e.get().tranche > our_assignment.tranche {
e.insert(our_assignment);
true
} else {
false
},
};
if used {
gum::trace!(
target: LOG_TARGET,
?candidate_hash,
?core,
?validator_index,
tranche,
"RelayVRFDelay Assignment",
);
}
}
}
/// Assignment invalid.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct InvalidAssignment(pub(crate) InvalidAssignmentReason);
impl std::fmt::Display for InvalidAssignment {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Invalid Assignment: {:?}", self.0)
}
}
impl std::error::Error for InvalidAssignment {}
/// Failure conditions when checking an assignment cert.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum InvalidAssignmentReason {
ValidatorIndexOutOfBounds,
SampleOutOfBounds,
CoreIndexOutOfBounds,
InvalidAssignmentKey,
IsInBackingGroup,
VRFModuloCoreIndexMismatch,
VRFModuloOutputMismatch,
VRFDelayCoreIndexMismatch,
VRFDelayOutputMismatch,
}
/// Checks the crypto of an assignment cert. Failure conditions:
/// * Validator index out of bounds
/// * VRF signature check fails
/// * VRF output doesn't match assigned core
/// * Core is not covered by extra data in signature
/// * Core index out of bounds
/// * Sample is out of bounds
/// * Validator is present in backing group.
///
/// This function does not check whether the core is actually a valid assignment or not. That should
/// be done outside the scope of this function.
pub(crate) fn check_assignment_cert(
claimed_core_index: CoreIndex,
validator_index: ValidatorIndex,
config: &Config,
relay_vrf_story: RelayVRFStory,
assignment: &AssignmentCert,
backing_group: GroupIndex,
) -> Result<DelayTranche, InvalidAssignment> {
use InvalidAssignmentReason as Reason;
let validator_public = config
.assignment_keys
.get(validator_index.0 as usize)
.ok_or(InvalidAssignment(Reason::ValidatorIndexOutOfBounds))?;
let public = schnorrkel::PublicKey::from_bytes(validator_public.as_slice())
.map_err(|_| InvalidAssignment(Reason::InvalidAssignmentKey))?;
if claimed_core_index.0 >= config.n_cores {
return Err(InvalidAssignment(Reason::CoreIndexOutOfBounds))
}
// Check that the validator was not part of the backing group
// and not already assigned.
let is_in_backing =
is_in_backing_group(&config.validator_groups, validator_index, backing_group);
if is_in_backing {
return Err(InvalidAssignment(Reason::IsInBackingGroup))
}
let vrf_signature = &assignment.vrf;
match assignment.kind {
AssignmentCertKind::RelayVRFModulo { sample } => {
if sample >= config.relay_vrf_modulo_samples {
return Err(InvalidAssignment(Reason::SampleOutOfBounds))
}
let (vrf_in_out, _) = public
.vrf_verify_extra(
relay_vrf_modulo_transcript(relay_vrf_story, sample),
&vrf_signature.output.0,
&vrf_signature.proof.0,
assigned_core_transcript(claimed_core_index),
)
.map_err(|_| InvalidAssignment(Reason::VRFModuloOutputMismatch))?;
// ensure that the `vrf_in_out` actually gives us the claimed core.
if relay_vrf_modulo_core(&vrf_in_out, config.n_cores) == claimed_core_index {
Ok(0)
} else {
Err(InvalidAssignment(Reason::VRFModuloCoreIndexMismatch))
}
},
AssignmentCertKind::RelayVRFDelay { core_index } => {
if core_index != claimed_core_index {
return Err(InvalidAssignment(Reason::VRFDelayCoreIndexMismatch))
}
let (vrf_in_out, _) = public
.vrf_verify(
relay_vrf_delay_transcript(relay_vrf_story, core_index),
&vrf_signature.output.0,
&vrf_signature.proof.0,
)
.map_err(|_| InvalidAssignment(Reason::VRFDelayOutputMismatch))?;
Ok(relay_vrf_delay_tranche(
&vrf_in_out,
config.n_delay_tranches,
config.zeroth_delay_tranche_width,
))
},
}
}
fn is_in_backing_group(
validator_groups: &IndexedVec<GroupIndex, Vec<ValidatorIndex>>,
validator: ValidatorIndex,
group: GroupIndex,
) -> bool {
validator_groups.get(group).map_or(false, |g| g.contains(&validator))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::import::tests::garbage_vrf_signature;
use polkadot_primitives::{Hash, ASSIGNMENT_KEY_TYPE_ID};
use sp_application_crypto::sr25519;
use sp_core::crypto::Pair as PairT;
use sp_keyring::sr25519::Keyring as Sr25519Keyring;
use sp_keystore::Keystore;
// sets up a keystore with the given keyring accounts.
fn make_keystore(accounts: &[Sr25519Keyring]) -> LocalKeystore {
let store = LocalKeystore::in_memory();
for s in accounts.iter().copied().map(|k| k.to_seed()) {
store.sr25519_generate_new(ASSIGNMENT_KEY_TYPE_ID, Some(s.as_str())).unwrap();
}
store
}
fn assignment_keys(accounts: &[Sr25519Keyring]) -> Vec<AssignmentId> {
assignment_keys_plus_random(accounts, 0)
}
fn assignment_keys_plus_random(
accounts: &[Sr25519Keyring],
random: usize,
) -> Vec<AssignmentId> {
let gen_random =
(0..random).map(|_| AssignmentId::from(sr25519::Pair::generate().0.public()));
accounts
.iter()
.map(|k| AssignmentId::from(k.public()))
.chain(gen_random)
.collect()
}
fn basic_groups(
n_validators: usize,
n_groups: usize,
) -> IndexedVec<GroupIndex, Vec<ValidatorIndex>> {
let size = n_validators / n_groups;
let big_groups = n_validators % n_groups;
let scraps = n_groups * size;
(0..n_groups)
.map(|i| {
(i * size..(i + 1) * size)
.chain(if i < big_groups { Some(scraps + i) } else { None })
.map(|j| ValidatorIndex(j as _))
.collect::<Vec<_>>()
})
.collect()
}
#[test]
fn assignments_produced_for_non_backing() {
let keystore = make_keystore(&[Sr25519Keyring::Alice]);
let c_a = CandidateHash(Hash::repeat_byte(0));
let c_b = CandidateHash(Hash::repeat_byte(1));
let relay_vrf_story = RelayVRFStory([42u8; 32]);
let assignments = compute_assignments(
&keystore,
relay_vrf_story,
&Config {
assignment_keys: assignment_keys(&[
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
]),
validator_groups: IndexedVec::<GroupIndex, Vec<ValidatorIndex>>::from(vec![
vec![ValidatorIndex(0)],
vec![ValidatorIndex(1), ValidatorIndex(2)],
]),
n_cores: 2,
zeroth_delay_tranche_width: 10,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 40,
},
vec![(c_a, CoreIndex(0), GroupIndex(1)), (c_b, CoreIndex(1), GroupIndex(0))],
);
// Note that alice is in group 0, which was the backing group for core 1.
// Alice should have self-assigned to check core 0 but not 1.
assert_eq!(assignments.len(), 1);
assert!(assignments.get(&CoreIndex(0)).is_some());
}
#[test]
fn assign_to_nonzero_core() {
let keystore = make_keystore(&[Sr25519Keyring::Alice]);
let c_a = CandidateHash(Hash::repeat_byte(0));
let c_b = CandidateHash(Hash::repeat_byte(1));
let relay_vrf_story = RelayVRFStory([42u8; 32]);
let assignments = compute_assignments(
&keystore,
relay_vrf_story,
&Config {
assignment_keys: assignment_keys(&[
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
]),
validator_groups: IndexedVec::<GroupIndex, Vec<ValidatorIndex>>::from(vec![
vec![ValidatorIndex(0)],
vec![ValidatorIndex(1), ValidatorIndex(2)],
]),
n_cores: 2,
zeroth_delay_tranche_width: 10,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 40,
},
vec![(c_a, CoreIndex(0), GroupIndex(0)), (c_b, CoreIndex(1), GroupIndex(1))],
);
assert_eq!(assignments.len(), 1);
assert!(assignments.get(&CoreIndex(1)).is_some());
}
#[test]
fn succeeds_empty_for_0_cores() {
let keystore = make_keystore(&[Sr25519Keyring::Alice]);
let relay_vrf_story = RelayVRFStory([42u8; 32]);
let assignments = compute_assignments(
&keystore,
relay_vrf_story,
&Config {
assignment_keys: assignment_keys(&[
Sr25519Keyring::Alice,
Sr25519Keyring::Bob,
Sr25519Keyring::Charlie,
]),
validator_groups: Default::default(),
n_cores: 0,
zeroth_delay_tranche_width: 10,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 40,
},
vec![],
);
assert!(assignments.is_empty());
}
struct MutatedAssignment {
core: CoreIndex,
cert: AssignmentCert,
group: GroupIndex,
own_group: GroupIndex,
val_index: ValidatorIndex,
config: Config,
}
// This fails if the closure requests to skip everything.
fn check_mutated_assignments(
n_validators: usize,
n_cores: usize,
rotation_offset: usize,
f: impl Fn(&mut MutatedAssignment) -> Option<bool>, // None = skip
) {
let keystore = make_keystore(&[Sr25519Keyring::Alice]);
let group_for_core = |i| GroupIndex(((i + rotation_offset) % n_cores) as _);
let config = Config {
assignment_keys: assignment_keys_plus_random(
&[Sr25519Keyring::Alice],
n_validators - 1,
),
validator_groups: basic_groups(n_validators, n_cores),
n_cores: n_cores as u32,
zeroth_delay_tranche_width: 10,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 40,
};
let relay_vrf_story = RelayVRFStory([42u8; 32]);
let assignments = compute_assignments(
&keystore,
relay_vrf_story.clone(),
&config,
(0..n_cores)
.map(|i| {
(
CandidateHash(Hash::repeat_byte(i as u8)),
CoreIndex(i as u32),
group_for_core(i),
)
})
.collect::<Vec<_>>(),
);
let mut counted = 0;
for (core, assignment) in assignments {
let mut mutated = MutatedAssignment {
core,
group: group_for_core(core.0 as _),
cert: assignment.cert,
own_group: GroupIndex(0),
val_index: ValidatorIndex(0),
config: config.clone(),
};
let expected = match f(&mut mutated) {
None => continue,
Some(e) => e,
};
counted += 1;
let is_good = check_assignment_cert(
mutated.core,
mutated.val_index,
&mutated.config,
relay_vrf_story.clone(),
&mutated.cert,
mutated.group,
)
.is_ok();
assert_eq!(expected, is_good)
}
assert!(counted > 0);
}
#[test]
fn computed_assignments_pass_checks() {
check_mutated_assignments(200, 100, 25, |_| Some(true));
}
#[test]
fn check_rejects_claimed_core_out_of_bounds() {
check_mutated_assignments(200, 100, 25, |m| {
m.core.0 += 100;
Some(false)
});
}
#[test]
fn check_rejects_in_backing_group() {
check_mutated_assignments(200, 100, 25, |m| {
m.group = m.own_group;
Some(false)
});
}
#[test]
fn check_rejects_nonexistent_key() {
check_mutated_assignments(200, 100, 25, |m| {
m.val_index.0 += 200;
Some(false)
});
}
#[test]
fn check_rejects_delay_bad_vrf() {
check_mutated_assignments(40, 10, 8, |m| {
match m.cert.kind.clone() {
AssignmentCertKind::RelayVRFDelay { .. } => {
m.cert.vrf = garbage_vrf_signature();
Some(false)
},
_ => None, // skip everything else.
}
});
}
#[test]
fn check_rejects_modulo_bad_vrf() {
check_mutated_assignments(200, 100, 25, |m| {
match m.cert.kind.clone() {
AssignmentCertKind::RelayVRFModulo { .. } => {
m.cert.vrf = garbage_vrf_signature();
Some(false)
},
_ => None, // skip everything else.
}
});
}
#[test]
fn check_rejects_modulo_sample_out_of_bounds() {
check_mutated_assignments(200, 100, 25, |m| {
match m.cert.kind.clone() {
AssignmentCertKind::RelayVRFModulo { sample } => {
m.config.relay_vrf_modulo_samples = sample;
Some(false)
},
_ => None, // skip everything else.
}
});
}
#[test]
fn check_rejects_delay_claimed_core_wrong() {
check_mutated_assignments(200, 100, 25, |m| {
match m.cert.kind.clone() {
AssignmentCertKind::RelayVRFDelay { .. } => {
m.core = CoreIndex((m.core.0 + 1) % 100);
Some(false)
},
_ => None, // skip everything else.
}
});
}
#[test]
fn check_rejects_modulo_core_wrong() {
check_mutated_assignments(200, 100, 25, |m| {
match m.cert.kind.clone() {
AssignmentCertKind::RelayVRFModulo { .. } => {
m.core = CoreIndex((m.core.0 + 1) % 100);
Some(false)
},
_ => None, // skip everything else.
}
});
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,426 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Middleware interface that leverages low-level database operations
//! to provide a clean API for processing block and candidate imports.
use polkadot_node_subsystem::{SubsystemError, SubsystemResult};
use bitvec::order::Lsb0 as BitOrderLsb0;
use polkadot_primitives::{BlockNumber, CandidateHash, CandidateReceipt, GroupIndex, Hash};
use std::collections::{hash_map::Entry, BTreeMap, HashMap};
use super::{
approval_db::v1::{OurAssignment, StoredBlockRange},
backend::{Backend, OverlayedBackend},
persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry},
LOG_TARGET,
};
/// Information about a new candidate necessary to instantiate the requisite
/// candidate and approval entries.
#[derive(Clone)]
pub struct NewCandidateInfo {
candidate: CandidateReceipt,
backing_group: GroupIndex,
our_assignment: Option<OurAssignment>,
}
impl NewCandidateInfo {
/// Convenience constructor
pub fn new(
candidate: CandidateReceipt,
backing_group: GroupIndex,
our_assignment: Option<OurAssignment>,
) -> Self {
Self { candidate, backing_group, our_assignment }
}
}
fn visit_and_remove_block_entry(
block_hash: Hash,
overlayed_db: &mut OverlayedBackend<'_, impl Backend>,
visited_candidates: &mut HashMap<CandidateHash, CandidateEntry>,
) -> SubsystemResult<Vec<Hash>> {
let block_entry = match overlayed_db.load_block_entry(&block_hash)? {
None => return Ok(Vec::new()),
Some(b) => b,
};
overlayed_db.delete_block_entry(&block_hash);
for (_, candidate_hash) in block_entry.candidates() {
let candidate = match visited_candidates.entry(*candidate_hash) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
e.insert(match overlayed_db.load_candidate_entry(candidate_hash)? {
None => continue, // Should not happen except for corrupt DB
Some(c) => c,
})
},
};
candidate.block_assignments.remove(&block_hash);
}
Ok(block_entry.children)
}
/// Canonicalize some particular block, pruning everything before it and
/// pruning any competing branches at the same height.
pub fn canonicalize(
overlay_db: &mut OverlayedBackend<'_, impl Backend>,
canon_number: BlockNumber,
canon_hash: Hash,
) -> SubsystemResult<()> {
let range = match overlay_db.load_stored_blocks()? {
None => return Ok(()),
Some(range) if range.0 >= canon_number => return Ok(()),
Some(range) => range,
};
// Storing all candidates in memory is potentially heavy, but should be fine
// as long as finality doesn't stall for a long while. We could optimize this
// by keeping only the metadata about which blocks reference each candidate.
let mut visited_candidates = HashMap::new();
// All the block heights we visited but didn't necessarily delete everything from.
let mut visited_heights = HashMap::new();
// First visit everything before the height.
for i in range.0..canon_number {
let at_height = overlay_db.load_blocks_at_height(&i)?;
overlay_db.delete_blocks_at_height(i);
for b in at_height {
let _ = visit_and_remove_block_entry(b, overlay_db, &mut visited_candidates)?;
}
}
// Then visit everything at the height.
let pruned_branches = {
let at_height = overlay_db.load_blocks_at_height(&canon_number)?;
overlay_db.delete_blocks_at_height(canon_number);
// Note that while there may be branches descending from blocks at earlier heights,
// we have already covered them by removing everything at earlier heights.
let mut pruned_branches = Vec::new();
for b in at_height {
let children = visit_and_remove_block_entry(b, overlay_db, &mut visited_candidates)?;
if b != canon_hash {
pruned_branches.extend(children);
}
}
pruned_branches
};
// Follow all children of non-canonicalized blocks.
{
let mut frontier: Vec<(BlockNumber, Hash)> =
pruned_branches.into_iter().map(|h| (canon_number + 1, h)).collect();
while let Some((height, next_child)) = frontier.pop() {
let children =
visit_and_remove_block_entry(next_child, overlay_db, &mut visited_candidates)?;
// extend the frontier of branches to include the given height.
frontier.extend(children.into_iter().map(|h| (height + 1, h)));
// visit the at-height key for this deleted block's height.
let at_height = match visited_heights.entry(height) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => e.insert(overlay_db.load_blocks_at_height(&height)?),
};
if let Some(i) = at_height.iter().position(|x| x == &next_child) {
at_height.remove(i);
}
}
}
// Update all `CandidateEntry`s, deleting all those which now have empty `block_assignments`.
for (candidate_hash, candidate) in visited_candidates.into_iter() {
if candidate.block_assignments.is_empty() {
overlay_db.delete_candidate_entry(&candidate_hash);
} else {
overlay_db.write_candidate_entry(candidate);
}
}
// Update all blocks-at-height keys, deleting all those which now have empty
// `block_assignments`.
for (h, at) in visited_heights.into_iter() {
if at.is_empty() {
overlay_db.delete_blocks_at_height(h);
} else {
overlay_db.write_blocks_at_height(h, at);
}
}
// due to the fork pruning, this range actually might go too far above where our actual highest
// block is, if a relatively short fork is canonicalized.
// TODO https://github.com/paritytech/polkadot/issues/3389
let new_range = StoredBlockRange(canon_number + 1, std::cmp::max(range.1, canon_number + 2));
overlay_db.write_stored_block_range(new_range);
Ok(())
}
/// Record a new block entry.
///
/// This will update the blocks-at-height mapping, the stored block range, if necessary,
/// and add block and candidate entries. It will also add approval entries to existing
/// candidate entries and add this as a child of any block entry corresponding to the
/// parent hash.
///
/// Has no effect if there is already an entry for the block or `candidate_info` returns
/// `None` for any of the candidates referenced by the block entry. In these cases,
/// no information about new candidates will be referred to by this function.
pub fn add_block_entry(
store: &mut OverlayedBackend<'_, impl Backend>,
entry: BlockEntry,
n_validators: usize,
candidate_info: impl Fn(&CandidateHash) -> Option<NewCandidateInfo>,
) -> SubsystemResult<Vec<(CandidateHash, CandidateEntry)>> {
let session = entry.session();
let parent_hash = entry.parent_hash();
let number = entry.block_number();
// Update the stored block range.
{
let new_range = match store.load_stored_blocks()? {
None => Some(StoredBlockRange(number, number + 1)),
Some(range) if range.1 <= number => Some(StoredBlockRange(range.0, number + 1)),
Some(_) => None,
};
new_range.map(|n| store.write_stored_block_range(n));
};
// Update the blocks at height meta key.
{
let mut blocks_at_height = store.load_blocks_at_height(&number)?;
if blocks_at_height.contains(&entry.block_hash()) {
// seems we already have a block entry for this block. nothing to do here.
return Ok(Vec::new())
}
blocks_at_height.push(entry.block_hash());
store.write_blocks_at_height(number, blocks_at_height)
};
let mut candidate_entries = Vec::with_capacity(entry.candidates().len());
// read and write all updated entries.
{
for (_, candidate_hash) in entry.candidates() {
let NewCandidateInfo { candidate, backing_group, our_assignment } =
match candidate_info(candidate_hash) {
None => return Ok(Vec::new()),
Some(info) => info,
};
let mut candidate_entry =
store.load_candidate_entry(&candidate_hash)?.unwrap_or_else(move || {
CandidateEntry {
candidate,
session,
block_assignments: BTreeMap::new(),
approvals: bitvec::bitvec![u8, BitOrderLsb0; 0; n_validators],
}
});
candidate_entry.block_assignments.insert(
entry.block_hash(),
ApprovalEntry::new(
Vec::new(),
backing_group,
our_assignment.map(|v| v.into()),
None,
bitvec::bitvec![u8, BitOrderLsb0; 0; n_validators],
false,
),
);
store.write_candidate_entry(candidate_entry.clone());
candidate_entries.push((*candidate_hash, candidate_entry));
}
};
// Update the child index for the parent.
store.load_block_entry(&parent_hash)?.map(|mut e| {
e.children.push(entry.block_hash());
store.write_block_entry(e);
});
// Put the new block entry in.
store.write_block_entry(entry);
Ok(candidate_entries)
}
/// Forcibly approve all candidates included at up to the given relay-chain height in the indicated
/// chain.
pub fn force_approve(
store: &mut OverlayedBackend<'_, impl Backend>,
chain_head: Hash,
up_to: BlockNumber,
) -> SubsystemResult<Vec<Hash>> {
#[derive(PartialEq, Eq)]
enum State {
WalkTo,
Approving,
}
let mut approved_hashes = Vec::new();
let mut cur_hash = chain_head;
let mut state = State::WalkTo;
let mut cur_block_number: BlockNumber = 0;
// iterate back to the `up_to` block, and then iterate backwards until all blocks
// are updated.
while let Some(mut entry) = store.load_block_entry(&cur_hash)? {
cur_block_number = entry.block_number();
if cur_block_number <= up_to {
if state == State::WalkTo {
gum::debug!(
target: LOG_TARGET,
block_hash = ?chain_head,
?cur_hash,
?cur_block_number,
"Start forced approval from block",
);
}
state = State::Approving;
}
cur_hash = entry.parent_hash();
match state {
State::WalkTo => {},
State::Approving => {
entry.approved_bitfield.iter_mut().for_each(|mut b| *b = true);
approved_hashes.push(entry.block_hash());
store.write_block_entry(entry);
},
}
}
if state == State::WalkTo {
gum::warn!(
target: LOG_TARGET,
?chain_head,
?cur_hash,
?cur_block_number,
?up_to,
"Missing block in the chain, cannot start force approval"
);
}
Ok(approved_hashes)
}
/// Revert to the block corresponding to the specified `hash`.
/// The operation is not allowed for blocks older than the last finalized one.
pub fn revert_to(
overlay: &mut OverlayedBackend<'_, impl Backend>,
hash: Hash,
) -> SubsystemResult<()> {
let mut stored_range = overlay.load_stored_blocks()?.ok_or_else(|| {
SubsystemError::Context("no available blocks to infer revert point height".to_string())
})?;
let (children, children_height) = match overlay.load_block_entry(&hash)? {
Some(mut entry) => {
let children_height = entry.block_number() + 1;
let children = std::mem::take(&mut entry.children);
// Write revert point block entry without the children.
overlay.write_block_entry(entry);
(children, children_height)
},
None => {
let children_height = stored_range.0;
let children = overlay.load_blocks_at_height(&children_height)?;
let child_entry = children
.first()
.and_then(|hash| overlay.load_block_entry(hash).ok())
.flatten()
.ok_or_else(|| {
SubsystemError::Context("lookup failure for first block".to_string())
})?;
// The parent is expected to be the revert point
if child_entry.parent_hash() != hash {
return Err(SubsystemError::Context(
"revert below last finalized block or corrupted storage".to_string(),
))
}
(children, children_height)
},
};
let mut stack: Vec<_> = children.into_iter().map(|h| (h, children_height)).collect();
let mut range_end = stored_range.1;
while let Some((hash, number)) = stack.pop() {
let mut blocks_at_height = overlay.load_blocks_at_height(&number)?;
blocks_at_height.retain(|h| h != &hash);
// Check if we need to update the range top
if blocks_at_height.is_empty() && number < range_end {
range_end = number;
}
overlay.write_blocks_at_height(number, blocks_at_height);
if let Some(entry) = overlay.load_block_entry(&hash)? {
overlay.delete_block_entry(&hash);
// Cleanup the candidate entries by removing any reference to the
// removed block. If for a candidate entry the block block_assignments
// drops to zero then we remove the entry.
for (_, candidate_hash) in entry.candidates() {
if let Some(mut candidate_entry) = overlay.load_candidate_entry(candidate_hash)? {
candidate_entry.block_assignments.remove(&hash);
if candidate_entry.block_assignments.is_empty() {
overlay.delete_candidate_entry(candidate_hash);
} else {
overlay.write_candidate_entry(candidate_entry);
}
}
}
stack.extend(entry.children.into_iter().map(|h| (h, number + 1)));
}
}
// Check if our modifications to the dag has reduced the range top
if range_end != stored_range.1 {
if stored_range.0 < range_end {
stored_range.1 = range_end;
overlay.write_stored_block_range(stored_range);
} else {
overlay.delete_stored_block_range();
}
}
Ok(())
}
@@ -0,0 +1,447 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Entries pertaining to approval which need to be persisted.
//!
//! The actual persisting of data is handled by the `approval_db` module.
//! Within that context, things are plain-old-data. Within this module,
//! data and logic are intertwined.
use polkadot_node_primitives::approval::{AssignmentCert, DelayTranche, RelayVRFStory};
use polkadot_primitives::{
BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex,
ValidatorIndex, ValidatorSignature,
};
use sp_consensus_slots::Slot;
use bitvec::{order::Lsb0 as BitOrderLsb0, slice::BitSlice, vec::BitVec};
use std::collections::BTreeMap;
use super::{criteria::OurAssignment, time::Tick};
/// Metadata regarding a specific tranche of assignments for a specific candidate.
#[derive(Debug, Clone, PartialEq)]
pub struct TrancheEntry {
tranche: DelayTranche,
// Assigned validators, and the instant we received their assignment, rounded
// to the nearest tick.
assignments: Vec<(ValidatorIndex, Tick)>,
}
impl TrancheEntry {
/// Get the tranche of this entry.
pub fn tranche(&self) -> DelayTranche {
self.tranche
}
/// Get the assignments for this entry.
pub fn assignments(&self) -> &[(ValidatorIndex, Tick)] {
&self.assignments
}
}
impl From<crate::approval_db::v1::TrancheEntry> for TrancheEntry {
fn from(entry: crate::approval_db::v1::TrancheEntry) -> Self {
TrancheEntry {
tranche: entry.tranche,
assignments: entry.assignments.into_iter().map(|(v, t)| (v, t.into())).collect(),
}
}
}
impl From<TrancheEntry> for crate::approval_db::v1::TrancheEntry {
fn from(entry: TrancheEntry) -> Self {
Self {
tranche: entry.tranche,
assignments: entry.assignments.into_iter().map(|(v, t)| (v, t.into())).collect(),
}
}
}
/// Metadata regarding approval of a particular candidate within the context of some
/// particular block.
#[derive(Debug, Clone, PartialEq)]
pub struct ApprovalEntry {
tranches: Vec<TrancheEntry>,
backing_group: GroupIndex,
our_assignment: Option<OurAssignment>,
our_approval_sig: Option<ValidatorSignature>,
// `n_validators` bits.
assignments: BitVec<u8, BitOrderLsb0>,
approved: bool,
}
impl ApprovalEntry {
/// Convenience constructor
pub fn new(
tranches: Vec<TrancheEntry>,
backing_group: GroupIndex,
our_assignment: Option<OurAssignment>,
our_approval_sig: Option<ValidatorSignature>,
// `n_validators` bits.
assignments: BitVec<u8, BitOrderLsb0>,
approved: bool,
) -> Self {
Self { tranches, backing_group, our_assignment, our_approval_sig, assignments, approved }
}
// Access our assignment for this approval entry.
pub fn our_assignment(&self) -> Option<&OurAssignment> {
self.our_assignment.as_ref()
}
// Note that our assignment is triggered. No-op if already triggered.
pub fn trigger_our_assignment(
&mut self,
tick_now: Tick,
) -> Option<(AssignmentCert, ValidatorIndex, DelayTranche)> {
let our = self.our_assignment.as_mut().and_then(|a| {
if a.triggered() {
return None
}
a.mark_triggered();
Some(a.clone())
});
our.map(|a| {
self.import_assignment(a.tranche(), a.validator_index(), tick_now);
(a.cert().clone(), a.validator_index(), a.tranche())
})
}
/// Import our local approval vote signature for this candidate.
pub fn import_approval_sig(&mut self, approval_sig: ValidatorSignature) {
self.our_approval_sig = Some(approval_sig);
}
/// Whether a validator is already assigned.
pub fn is_assigned(&self, validator_index: ValidatorIndex) -> bool {
self.assignments.get(validator_index.0 as usize).map(|b| *b).unwrap_or(false)
}
/// Import an assignment. No-op if already assigned on the same tranche.
pub fn import_assignment(
&mut self,
tranche: DelayTranche,
validator_index: ValidatorIndex,
tick_now: Tick,
) {
// linear search probably faster than binary. not many tranches typically.
let idx = match self.tranches.iter().position(|t| t.tranche >= tranche) {
Some(pos) => {
if self.tranches[pos].tranche > tranche {
self.tranches.insert(pos, TrancheEntry { tranche, assignments: Vec::new() });
}
pos
},
None => {
self.tranches.push(TrancheEntry { tranche, assignments: Vec::new() });
self.tranches.len() - 1
},
};
self.tranches[idx].assignments.push((validator_index, tick_now));
self.assignments.set(validator_index.0 as _, true);
}
// Produce a bitvec indicating the assignments of all validators up to and
// including `tranche`.
pub fn assignments_up_to(&self, tranche: DelayTranche) -> BitVec<u8, BitOrderLsb0> {
self.tranches.iter().take_while(|e| e.tranche <= tranche).fold(
bitvec::bitvec![u8, BitOrderLsb0; 0; self.assignments.len()],
|mut a, e| {
for &(v, _) in &e.assignments {
a.set(v.0 as _, true);
}
a
},
)
}
/// Whether the approval entry is approved
pub fn is_approved(&self) -> bool {
self.approved
}
/// Mark the approval entry as approved.
pub fn mark_approved(&mut self) {
self.approved = true;
}
/// Access the tranches.
pub fn tranches(&self) -> &[TrancheEntry] {
&self.tranches
}
/// Get the number of validators in this approval entry.
pub fn n_validators(&self) -> usize {
self.assignments.len()
}
/// Get the number of assignments by validators, including the local validator.
pub fn n_assignments(&self) -> usize {
self.assignments.count_ones()
}
/// Get the backing group index of the approval entry.
pub fn backing_group(&self) -> GroupIndex {
self.backing_group
}
/// Get the assignment cert & approval signature.
///
/// The approval signature will only be `Some` if the assignment is too.
pub fn local_statements(&self) -> (Option<OurAssignment>, Option<ValidatorSignature>) {
let approval_sig = self.our_approval_sig.clone();
if let Some(our_assignment) = self.our_assignment.as_ref().filter(|a| a.triggered()) {
(Some(our_assignment.clone()), approval_sig)
} else {
(None, None)
}
}
}
impl From<crate::approval_db::v1::ApprovalEntry> for ApprovalEntry {
fn from(entry: crate::approval_db::v1::ApprovalEntry) -> Self {
ApprovalEntry {
tranches: entry.tranches.into_iter().map(Into::into).collect(),
backing_group: entry.backing_group,
our_assignment: entry.our_assignment.map(Into::into),
our_approval_sig: entry.our_approval_sig.map(Into::into),
assignments: entry.assignments,
approved: entry.approved,
}
}
}
impl From<ApprovalEntry> for crate::approval_db::v1::ApprovalEntry {
fn from(entry: ApprovalEntry) -> Self {
Self {
tranches: entry.tranches.into_iter().map(Into::into).collect(),
backing_group: entry.backing_group,
our_assignment: entry.our_assignment.map(Into::into),
our_approval_sig: entry.our_approval_sig.map(Into::into),
assignments: entry.assignments,
approved: entry.approved,
}
}
}
/// Metadata regarding approval of a particular candidate.
#[derive(Debug, Clone, PartialEq)]
pub struct CandidateEntry {
pub candidate: CandidateReceipt,
pub session: SessionIndex,
// Assignments are based on blocks, so we need to track assignments separately
// based on the block we are looking at.
pub block_assignments: BTreeMap<Hash, ApprovalEntry>,
pub approvals: BitVec<u8, BitOrderLsb0>,
}
impl CandidateEntry {
/// Access the bit-vec of approvals.
pub fn approvals(&self) -> &BitSlice<u8, BitOrderLsb0> {
&self.approvals
}
/// Note that a given validator has approved. Return the previous approval state.
pub fn mark_approval(&mut self, validator: ValidatorIndex) -> bool {
let prev = self.has_approved(validator);
self.approvals.set(validator.0 as usize, true);
prev
}
/// Query whether a given validator has approved the candidate.
pub fn has_approved(&self, validator: ValidatorIndex) -> bool {
self.approvals.get(validator.0 as usize).map(|b| *b).unwrap_or(false)
}
/// Get the candidate receipt.
pub fn candidate_receipt(&self) -> &CandidateReceipt {
&self.candidate
}
/// Get the approval entry, mutably, for this candidate under a specific block.
pub fn approval_entry_mut(&mut self, block_hash: &Hash) -> Option<&mut ApprovalEntry> {
self.block_assignments.get_mut(block_hash)
}
/// Get the approval entry for this candidate under a specific block.
pub fn approval_entry(&self, block_hash: &Hash) -> Option<&ApprovalEntry> {
self.block_assignments.get(block_hash)
}
}
impl From<crate::approval_db::v1::CandidateEntry> for CandidateEntry {
fn from(entry: crate::approval_db::v1::CandidateEntry) -> Self {
CandidateEntry {
candidate: entry.candidate,
session: entry.session,
block_assignments: entry
.block_assignments
.into_iter()
.map(|(h, ae)| (h, ae.into()))
.collect(),
approvals: entry.approvals,
}
}
}
impl From<CandidateEntry> for crate::approval_db::v1::CandidateEntry {
fn from(entry: CandidateEntry) -> Self {
Self {
candidate: entry.candidate,
session: entry.session,
block_assignments: entry
.block_assignments
.into_iter()
.map(|(h, ae)| (h, ae.into()))
.collect(),
approvals: entry.approvals,
}
}
}
/// Metadata regarding approval of a particular block, by way of approval of the
/// candidates contained within it.
#[derive(Debug, Clone, PartialEq)]
pub struct BlockEntry {
block_hash: Hash,
parent_hash: Hash,
block_number: BlockNumber,
session: SessionIndex,
slot: Slot,
relay_vrf_story: RelayVRFStory,
// The candidates included as-of this block and the index of the core they are
// leaving. Sorted ascending by core index.
candidates: Vec<(CoreIndex, CandidateHash)>,
// A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`.
// The i'th bit is `true` iff the candidate has been approved in the context of this
// block. The block can be considered approved if the bitfield has all bits set to `true`.
pub approved_bitfield: BitVec<u8, BitOrderLsb0>,
pub children: Vec<Hash>,
}
impl BlockEntry {
/// Mark a candidate as fully approved in the bitfield.
pub fn mark_approved_by_hash(&mut self, candidate_hash: &CandidateHash) {
if let Some(p) = self.candidates.iter().position(|(_, h)| h == candidate_hash) {
self.approved_bitfield.set(p, true);
}
}
/// Whether a candidate is approved in the bitfield.
pub fn is_candidate_approved(&self, candidate_hash: &CandidateHash) -> bool {
self.candidates
.iter()
.position(|(_, h)| h == candidate_hash)
.and_then(|p| self.approved_bitfield.get(p).map(|b| *b))
.unwrap_or(false)
}
/// Whether the block entry is fully approved.
pub fn is_fully_approved(&self) -> bool {
self.approved_bitfield.all()
}
/// Iterate over all unapproved candidates.
pub fn unapproved_candidates(&self) -> impl Iterator<Item = CandidateHash> + '_ {
self.approved_bitfield.iter().enumerate().filter_map(move |(i, a)| {
if !*a {
Some(self.candidates[i].1)
} else {
None
}
})
}
/// Get the slot of the block.
pub fn slot(&self) -> Slot {
self.slot
}
/// Get the relay-vrf-story of the block.
pub fn relay_vrf_story(&self) -> RelayVRFStory {
self.relay_vrf_story.clone()
}
/// Get the session index of the block.
pub fn session(&self) -> SessionIndex {
self.session
}
/// Get the i'th candidate.
pub fn candidate(&self, i: usize) -> Option<&(CoreIndex, CandidateHash)> {
self.candidates.get(i)
}
/// Access the underlying candidates as a slice.
pub fn candidates(&self) -> &[(CoreIndex, CandidateHash)] {
&self.candidates
}
/// Access the block number of the block entry.
pub fn block_number(&self) -> BlockNumber {
self.block_number
}
/// Access the block hash of the block entry.
pub fn block_hash(&self) -> Hash {
self.block_hash
}
/// Access the parent hash of the block entry.
pub fn parent_hash(&self) -> Hash {
self.parent_hash
}
}
impl From<crate::approval_db::v1::BlockEntry> for BlockEntry {
fn from(entry: crate::approval_db::v1::BlockEntry) -> Self {
BlockEntry {
block_hash: entry.block_hash,
parent_hash: entry.parent_hash,
block_number: entry.block_number,
session: entry.session,
slot: entry.slot,
relay_vrf_story: RelayVRFStory(entry.relay_vrf_story),
candidates: entry.candidates,
approved_bitfield: entry.approved_bitfield,
children: entry.children,
}
}
}
impl From<BlockEntry> for crate::approval_db::v1::BlockEntry {
fn from(entry: BlockEntry) -> Self {
Self {
block_hash: entry.block_hash,
parent_hash: entry.parent_hash,
block_number: entry.block_number,
session: entry.session,
slot: entry.slot,
relay_vrf_story: entry.relay_vrf_story.0,
candidates: entry.candidates,
approved_bitfield: entry.approved_bitfield,
children: entry.children,
}
}
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,90 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! Time utilities for approval voting.
use futures::prelude::*;
use polkadot_node_primitives::approval::DelayTranche;
use sp_consensus_slots::Slot;
use std::{
pin::Pin,
time::{Duration, SystemTime},
};
const TICK_DURATION_MILLIS: u64 = 500;
/// A base unit of time, starting from the Unix epoch, split into half-second intervals.
pub(crate) type Tick = u64;
/// A clock which allows querying of the current tick as well as
/// waiting for a tick to be reached.
pub(crate) trait Clock {
/// Yields the current tick.
fn tick_now(&self) -> Tick;
/// Yields a future which concludes when the given tick is reached.
fn wait(&self, tick: Tick) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>>;
}
/// Extension methods for clocks.
pub(crate) trait ClockExt {
fn tranche_now(&self, slot_duration_millis: u64, base_slot: Slot) -> DelayTranche;
}
impl<C: Clock + ?Sized> ClockExt for C {
fn tranche_now(&self, slot_duration_millis: u64, base_slot: Slot) -> DelayTranche {
self.tick_now()
.saturating_sub(slot_number_to_tick(slot_duration_millis, base_slot)) as u32
}
}
/// A clock which uses the actual underlying system clock.
pub(crate) struct SystemClock;
impl Clock for SystemClock {
/// Yields the current tick.
fn tick_now(&self) -> Tick {
match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
Err(_) => 0,
Ok(d) => d.as_millis() as u64 / TICK_DURATION_MILLIS,
}
}
/// Yields a future which concludes when the given tick is reached.
fn wait(&self, tick: Tick) -> Pin<Box<dyn Future<Output = ()> + Send>> {
let fut = async move {
let now = SystemTime::now();
let tick_onset = tick_to_time(tick);
if now < tick_onset {
if let Some(until) = tick_onset.duration_since(now).ok() {
futures_timer::Delay::new(until).await;
}
}
};
Box::pin(fut)
}
}
fn tick_to_time(tick: Tick) -> SystemTime {
SystemTime::UNIX_EPOCH + Duration::from_millis(TICK_DURATION_MILLIS * tick)
}
/// assumes `slot_duration_millis` evenly divided by tick duration.
pub(crate) fn slot_number_to_tick(slot_duration_millis: u64, slot: Slot) -> Tick {
let ticks_per_slot = slot_duration_millis / TICK_DURATION_MILLIS;
u64::from(slot) * ticks_per_slot
}
+37
View File
@@ -0,0 +1,37 @@
[package]
name = "polkadot-node-core-av-store"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
futures = "0.3.21"
futures-timer = "3.0.2"
kvdb = "0.13.0"
thiserror = "1.0.31"
gum = { package = "tracing-gum", path = "../../gum" }
bitvec = "1.0.0"
parity-scale-codec = { version = "3.6.1", features = ["derive"] }
erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" }
polkadot-node-subsystem = {path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-overseer = { path = "../../overseer" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
polkadot-node-jaeger = { path = "../../jaeger" }
[dev-dependencies]
log = "0.4.17"
env_logger = "0.9.0"
assert_matches = "1.4.0"
kvdb-memorydb = "0.13.0"
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
parking_lot = "0.12.0"
test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" }
File diff suppressed because it is too large Load Diff
+158
View File
@@ -0,0 +1,158 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use polkadot_node_subsystem_util::metrics::{self, prometheus};
#[derive(Clone)]
pub(crate) struct MetricsInner {
received_availability_chunks_total: prometheus::Counter<prometheus::U64>,
pruning: prometheus::Histogram,
process_block_finalized: prometheus::Histogram,
block_activated: prometheus::Histogram,
process_message: prometheus::Histogram,
store_available_data: prometheus::Histogram,
store_chunk: prometheus::Histogram,
get_chunk: prometheus::Histogram,
}
/// Availability metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
impl Metrics {
pub(crate) fn on_chunks_received(&self, count: usize) {
if let Some(metrics) = &self.0 {
// assume usize fits into u64
let by = u64::try_from(count).unwrap_or_default();
metrics.received_availability_chunks_total.inc_by(by);
}
}
/// Provide a timer for `prune_povs` which observes on drop.
pub(crate) fn time_pruning(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.pruning.start_timer())
}
/// Provide a timer for `process_block_finalized` which observes on drop.
pub(crate) fn time_process_block_finalized(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.process_block_finalized.start_timer())
}
/// Provide a timer for `block_activated` which observes on drop.
pub(crate) fn time_block_activated(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.block_activated.start_timer())
}
/// Provide a timer for `process_message` which observes on drop.
pub(crate) fn time_process_message(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.process_message.start_timer())
}
/// Provide a timer for `store_available_data` which observes on drop.
pub(crate) fn time_store_available_data(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.store_available_data.start_timer())
}
/// Provide a timer for `store_chunk` which observes on drop.
pub(crate) fn time_store_chunk(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.store_chunk.start_timer())
}
/// Provide a timer for `get_chunk` which observes on drop.
pub(crate) fn time_get_chunk(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.get_chunk.start_timer())
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
received_availability_chunks_total: prometheus::register(
prometheus::Counter::new(
"polkadot_parachain_received_availability_chunks_total",
"Number of availability chunks received.",
)?,
registry,
)?,
pruning: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_av_store_pruning",
"Time spent within `av_store::prune_all`",
))?,
registry,
)?,
process_block_finalized: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_av_store_process_block_finalized",
"Time spent within `av_store::process_block_finalized`",
))?,
registry,
)?,
block_activated: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_av_store_block_activated",
"Time spent within `av_store::process_block_activated`",
))?,
registry,
)?,
process_message: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_av_store_process_message",
"Time spent within `av_store::process_message`",
))?,
registry,
)?,
store_available_data: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_av_store_store_available_data",
"Time spent within `av_store::store_available_data`",
))?,
registry,
)?,
store_chunk: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_av_store_store_chunk",
"Time spent within `av_store::store_chunk`",
))?,
registry,
)?,
get_chunk: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"polkadot_parachain_av_store_get_chunk",
"Time spent fetching requested chunks.`",
)
.buckets(vec![
0.000625, 0.00125, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25,
0.5, 1.0, 2.5, 5.0, 10.0,
]),
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
File diff suppressed because it is too large Load Diff
+31
View File
@@ -0,0 +1,31 @@
[package]
name = "polkadot-node-core-backing"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
futures = "0.3.21"
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-primitives = { path = "../../primitives" }
polkadot-node-subsystem = {path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" }
statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" }
bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] }
gum = { package = "tracing-gum", path = "../../gum" }
thiserror = "1.0.31"
fatality = "0.0.6"
[dev-dependencies]
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
futures = { version = "0.3.21", features = ["thread-pool"] }
assert_matches = "1.4.0"
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" }
+123
View File
@@ -0,0 +1,123 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use fatality::Nested;
use futures::channel::{mpsc, oneshot};
use polkadot_node_subsystem::{
messages::{StoreAvailableDataError, ValidationFailed},
RuntimeApiError, SubsystemError,
};
use polkadot_node_subsystem_util::{runtime, Error as UtilError};
use polkadot_primitives::{BackedCandidate, ValidationCodeHash};
use crate::LOG_TARGET;
pub type Result<T> = std::result::Result<T, Error>;
pub type FatalResult<T> = std::result::Result<T, FatalError>;
/// Errors that can occur in candidate backing.
#[allow(missing_docs)]
#[fatality::fatality(splitable)]
pub enum Error {
#[fatal]
#[error("Failed to spawn background task")]
FailedToSpawnBackgroundTask,
#[fatal(forward)]
#[error("Error while accessing runtime information")]
Runtime(#[from] runtime::Error),
#[fatal]
#[error(transparent)]
BackgroundValidationMpsc(#[from] mpsc::SendError),
#[error("Candidate is not found")]
CandidateNotFound,
#[error("Signature is invalid")]
InvalidSignature,
#[error("Failed to send candidates {0:?}")]
Send(Vec<BackedCandidate>),
#[error("FetchPoV failed")]
FetchPoV,
#[error("Fetching validation code by hash failed {0:?}, {1:?}")]
FetchValidationCode(ValidationCodeHash, RuntimeApiError),
#[error("Fetching Runtime API version failed {0:?}")]
FetchRuntimeApiVersion(RuntimeApiError),
#[error("No validation code {0:?}")]
NoValidationCode(ValidationCodeHash),
#[error("Candidate rejected by prospective parachains subsystem")]
RejectedByProspectiveParachains,
#[error("ValidateFromExhaustive channel closed before receipt")]
ValidateFromExhaustive(#[source] oneshot::Canceled),
#[error("StoreAvailableData channel closed before receipt")]
StoreAvailableDataChannel(#[source] oneshot::Canceled),
#[error("RuntimeAPISubsystem channel closed before receipt")]
RuntimeApiUnavailable(#[source] oneshot::Canceled),
#[error("a channel was closed before receipt in try_join!")]
JoinMultiple(#[source] oneshot::Canceled),
#[error("Obtaining erasure chunks failed")]
ObtainErasureChunks(#[from] erasure_coding::Error),
#[error(transparent)]
ValidationFailed(#[from] ValidationFailed),
#[error(transparent)]
UtilError(#[from] UtilError),
#[error(transparent)]
SubsystemError(#[from] SubsystemError),
#[fatal]
#[error(transparent)]
OverseerExited(SubsystemError),
#[error("Availability store error")]
StoreAvailableData(#[source] StoreAvailableDataError),
}
/// Utility for eating top level errors and log them.
///
/// We basically always want to try and continue on error. This utility function is meant to
/// consume top-level errors by simply logging them
pub fn log_error(result: Result<()>) -> std::result::Result<(), FatalError> {
match result.into_nested()? {
Ok(()) => Ok(()),
Err(jfyi) => {
jfyi.log();
Ok(())
},
}
}
impl JfyiError {
/// Log a `JfyiError`.
pub fn log(self) {
gum::debug!(target: LOG_TARGET, error = ?self);
}
}
File diff suppressed because it is too large Load Diff
+107
View File
@@ -0,0 +1,107 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use polkadot_node_subsystem_util::metrics::{self, prometheus};
#[derive(Clone)]
pub(crate) struct MetricsInner {
pub(crate) signed_statements_total: prometheus::Counter<prometheus::U64>,
pub(crate) candidates_seconded_total: prometheus::Counter<prometheus::U64>,
pub(crate) process_second: prometheus::Histogram,
pub(crate) process_statement: prometheus::Histogram,
pub(crate) get_backed_candidates: prometheus::Histogram,
}
/// Candidate backing metrics.
#[derive(Default, Clone)]
pub struct Metrics(pub(crate) Option<MetricsInner>);
impl Metrics {
pub fn on_statement_signed(&self) {
if let Some(metrics) = &self.0 {
metrics.signed_statements_total.inc();
}
}
pub fn on_candidate_seconded(&self) {
if let Some(metrics) = &self.0 {
metrics.candidates_seconded_total.inc();
}
}
/// Provide a timer for handling `CandidateBackingMessage:Second` which observes on drop.
pub fn time_process_second(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.process_second.start_timer())
}
/// Provide a timer for handling `CandidateBackingMessage::Statement` which observes on drop.
pub fn time_process_statement(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.process_statement.start_timer())
}
/// Provide a timer for handling `CandidateBackingMessage::GetBackedCandidates` which observes
/// on drop.
pub fn time_get_backed_candidates(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.get_backed_candidates.start_timer())
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
signed_statements_total: prometheus::register(
prometheus::Counter::new(
"polkadot_parachain_candidate_backing_signed_statements_total",
"Number of statements signed.",
)?,
registry,
)?,
candidates_seconded_total: prometheus::register(
prometheus::Counter::new(
"polkadot_parachain_candidate_backing_candidates_seconded_total",
"Number of candidates seconded.",
)?,
registry,
)?,
process_second: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_candidate_backing_process_second",
"Time spent within `candidate_backing::process_second`",
))?,
registry,
)?,
process_statement: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_candidate_backing_process_statement",
"Time spent within `candidate_backing::process_statement`",
))?,
registry,
)?,
get_backed_candidates: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_candidate_backing_get_backed_candidates",
"Time spent within `candidate_backing::get_backed_candidates`",
))?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,20 @@
[package]
name = "polkadot-node-core-bitfield-signing"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
futures = "0.3.21"
gum = { package = "tracing-gum", path = "../../gum" }
polkadot-primitives = { path = "../../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
wasm-timer = "0.2.5"
thiserror = "1.0.31"
[dev-dependencies]
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" }
@@ -0,0 +1,334 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! The bitfield signing subsystem produces `SignedAvailabilityBitfield`s once per block.
#![deny(unused_crate_dependencies)]
#![warn(missing_docs)]
#![recursion_limit = "256"]
use futures::{
channel::{mpsc, oneshot},
future,
lock::Mutex,
FutureExt,
};
use polkadot_node_subsystem::{
errors::RuntimeApiError,
jaeger,
messages::{
AvailabilityStoreMessage, BitfieldDistributionMessage, RuntimeApiMessage, RuntimeApiRequest,
},
overseer, ActivatedLeaf, FromOrchestra, LeafStatus, OverseerSignal, PerLeafSpan,
SpawnedSubsystem, SubsystemError, SubsystemResult, SubsystemSender,
};
use polkadot_node_subsystem_util::{self as util, Validator};
use polkadot_primitives::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex};
use sp_keystore::{Error as KeystoreError, KeystorePtr};
use std::{collections::HashMap, iter::FromIterator, time::Duration};
use wasm_timer::{Delay, Instant};
mod metrics;
use self::metrics::Metrics;
#[cfg(test)]
mod tests;
/// Delay between starting a bitfield signing job and its attempting to create a bitfield.
const SPAWNED_TASK_DELAY: Duration = Duration::from_millis(1500);
const LOG_TARGET: &str = "parachain::bitfield-signing";
// TODO: use `fatality` (https://github.com/paritytech/polkadot/issues/5540).
/// Errors we may encounter in the course of executing the `BitfieldSigningSubsystem`.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
pub enum Error {
#[error(transparent)]
Util(#[from] util::Error),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
#[error(transparent)]
MpscSend(#[from] mpsc::SendError),
#[error(transparent)]
Runtime(#[from] RuntimeApiError),
#[error("Keystore failed: {0:?}")]
Keystore(KeystoreError),
}
/// If there is a candidate pending availability, query the Availability Store
/// for whether we have the availability chunk for our validator index.
async fn get_core_availability(
core: &CoreState,
validator_idx: ValidatorIndex,
sender: &Mutex<&mut impl SubsystemSender<overseer::BitfieldSigningOutgoingMessages>>,
span: &jaeger::Span,
) -> Result<bool, Error> {
if let CoreState::Occupied(core) = core {
let _span = span.child("query-chunk-availability");
let (tx, rx) = oneshot::channel();
sender
.lock()
.await
.send_message(
AvailabilityStoreMessage::QueryChunkAvailability(
core.candidate_hash,
validator_idx,
tx,
)
.into(),
)
.await;
let res = rx.await.map_err(Into::into);
gum::trace!(
target: LOG_TARGET,
para_id = %core.para_id(),
availability = ?res,
?core.candidate_hash,
"Candidate availability",
);
res
} else {
Ok(false)
}
}
/// delegates to the v1 runtime API
async fn get_availability_cores(
relay_parent: Hash,
sender: &mut impl SubsystemSender<overseer::BitfieldSigningOutgoingMessages>,
) -> Result<Vec<CoreState>, Error> {
let (tx, rx) = oneshot::channel();
sender
.send_message(
RuntimeApiMessage::Request(relay_parent, RuntimeApiRequest::AvailabilityCores(tx))
.into(),
)
.await;
match rx.await {
Ok(Ok(out)) => Ok(out),
Ok(Err(runtime_err)) => Err(runtime_err.into()),
Err(err) => Err(err.into()),
}
}
/// - get the list of core states from the runtime
/// - for each core, concurrently determine chunk availability (see `get_core_availability`)
/// - return the bitfield if there were no errors at any point in this process (otherwise, it's
/// prone to false negatives)
async fn construct_availability_bitfield(
relay_parent: Hash,
span: &jaeger::Span,
validator_idx: ValidatorIndex,
sender: &mut impl SubsystemSender<overseer::BitfieldSigningOutgoingMessages>,
) -> Result<AvailabilityBitfield, Error> {
// get the set of availability cores from the runtime
let availability_cores = {
let _span = span.child("get-availability-cores");
get_availability_cores(relay_parent, sender).await?
};
// Wrap the sender in a Mutex to share it between the futures.
//
// We use a `Mutex` here to not `clone` the sender inside the future, because
// cloning the sender will always increase the capacity of the channel by one.
// (for the lifetime of the sender)
let sender = Mutex::new(sender);
// Handle all cores concurrently
// `try_join_all` returns all results in the same order as the input futures.
let results = future::try_join_all(
availability_cores
.iter()
.map(|core| get_core_availability(core, validator_idx, &sender, span)),
)
.await?;
let core_bits = FromIterator::from_iter(results.into_iter());
gum::debug!(
target: LOG_TARGET,
?relay_parent,
"Signing Bitfield for {core_count} cores: {core_bits}",
core_count = availability_cores.len(),
core_bits = core_bits,
);
Ok(AvailabilityBitfield(core_bits))
}
/// The bitfield signing subsystem.
pub struct BitfieldSigningSubsystem {
keystore: KeystorePtr,
metrics: Metrics,
}
impl BitfieldSigningSubsystem {
/// Create a new instance of the `BitfieldSigningSubsystem`.
pub fn new(keystore: KeystorePtr, metrics: Metrics) -> Self {
Self { keystore, metrics }
}
}
#[overseer::subsystem(BitfieldSigning, error=SubsystemError, prefix=self::overseer)]
impl<Context> BitfieldSigningSubsystem {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = async move {
run(ctx, self.keystore, self.metrics)
.await
.map_err(|e| SubsystemError::with_origin("bitfield-signing", e))
}
.boxed();
SpawnedSubsystem { name: "bitfield-signing-subsystem", future }
}
}
#[overseer::contextbounds(BitfieldSigning, prefix = self::overseer)]
async fn run<Context>(
mut ctx: Context,
keystore: KeystorePtr,
metrics: Metrics,
) -> SubsystemResult<()> {
// Track spawned jobs per active leaf.
let mut running = HashMap::<Hash, future::AbortHandle>::new();
loop {
match ctx.recv().await? {
FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => {
// Abort jobs for deactivated leaves.
for leaf in &update.deactivated {
if let Some(handle) = running.remove(leaf) {
handle.abort();
}
}
if let Some(leaf) = update.activated {
let sender = ctx.sender().clone();
let leaf_hash = leaf.hash;
let (fut, handle) = future::abortable(handle_active_leaves_update(
sender,
leaf,
keystore.clone(),
metrics.clone(),
));
running.insert(leaf_hash, handle);
ctx.spawn("bitfield-signing-job", fut.map(drop).boxed())?;
}
},
FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {},
FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()),
FromOrchestra::Communication { .. } => {},
}
}
}
async fn handle_active_leaves_update<Sender>(
mut sender: Sender,
leaf: ActivatedLeaf,
keystore: KeystorePtr,
metrics: Metrics,
) -> Result<(), Error>
where
Sender: overseer::BitfieldSigningSenderTrait,
{
if let LeafStatus::Stale = leaf.status {
gum::debug!(
target: LOG_TARGET,
relay_parent = ?leaf.hash,
block_number = ?leaf.number,
"Skip bitfield signing for stale leaf"
);
return Ok(())
}
let span = PerLeafSpan::new(leaf.span, "bitfield-signing");
let span_delay = span.child("delay");
let wait_until = Instant::now() + SPAWNED_TASK_DELAY;
// now do all the work we can before we need to wait for the availability store
// if we're not a validator, we can just succeed effortlessly
let validator = match Validator::new(leaf.hash, keystore.clone(), &mut sender).await {
Ok(validator) => validator,
Err(util::Error::NotAValidator) => return Ok(()),
Err(err) => return Err(Error::Util(err)),
};
// wait a bit before doing anything else
Delay::new_at(wait_until).await?;
// this timer does not appear at the head of the function because we don't want to include
// SPAWNED_TASK_DELAY each time.
let _timer = metrics.time_run();
drop(span_delay);
let span_availability = span.child("availability");
let bitfield = match construct_availability_bitfield(
leaf.hash,
&span_availability,
validator.index(),
&mut sender,
)
.await
{
Err(Error::Runtime(runtime_err)) => {
// Don't take down the node on runtime API errors.
gum::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error");
return Ok(())
},
Err(err) => return Err(err),
Ok(bitfield) => bitfield,
};
drop(span_availability);
let span_signing = span.child("signing");
let signed_bitfield =
match validator.sign(keystore, bitfield).map_err(|e| Error::Keystore(e))? {
Some(b) => b,
None => {
gum::error!(
target: LOG_TARGET,
"Key was found at construction, but while signing it could not be found.",
);
return Ok(())
},
};
metrics.on_bitfield_signed();
drop(span_signing);
let _span_gossip = span.child("gossip");
sender
.send_message(BitfieldDistributionMessage::DistributeBitfield(leaf.hash, signed_bitfield))
.await;
Ok(())
}
@@ -0,0 +1,68 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use polkadot_node_subsystem_util::metrics::{self, prometheus};
#[derive(Clone)]
pub(crate) struct MetricsInner {
pub(crate) bitfields_signed_total: prometheus::Counter<prometheus::U64>,
pub(crate) run: prometheus::Histogram,
}
/// Bitfield signing metrics.
#[derive(Default, Clone)]
pub struct Metrics(pub(crate) Option<MetricsInner>);
impl Metrics {
pub fn on_bitfield_signed(&self) {
if let Some(metrics) = &self.0 {
metrics.bitfields_signed_total.inc();
}
}
/// Provide a timer for `prune_povs` which observes on drop.
pub fn time_run(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.run.start_timer())
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
bitfields_signed_total: prometheus::register(
prometheus::Counter::new(
"polkadot_parachain_bitfields_signed_total",
"Number of bitfields signed.",
)?,
registry,
)?,
run: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"polkadot_parachain_bitfield_signing_run",
"Time spent within `bitfield_signing::run`",
)
.buckets(vec![
0.000625, 0.00125, 0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.1, 0.25,
0.5, 1.0, 2.5, 5.0, 10.0,
]),
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
@@ -0,0 +1,85 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use super::*;
use futures::{executor::block_on, pin_mut, StreamExt};
use polkadot_node_subsystem::messages::AllMessages;
use polkadot_primitives::{CandidateHash, OccupiedCore};
use test_helpers::dummy_candidate_descriptor;
fn occupied_core(para_id: u32, candidate_hash: CandidateHash) -> CoreState {
CoreState::Occupied(OccupiedCore {
group_responsible: para_id.into(),
next_up_on_available: None,
occupied_since: 100_u32,
time_out_at: 200_u32,
next_up_on_time_out: None,
availability: Default::default(),
candidate_hash,
candidate_descriptor: dummy_candidate_descriptor(Hash::zero()),
})
}
#[test]
fn construct_availability_bitfield_works() {
block_on(async move {
let relay_parent = Hash::default();
let validator_index = ValidatorIndex(1u32);
let (mut sender, mut receiver) = polkadot_node_subsystem_test_helpers::sender_receiver();
let future = construct_availability_bitfield(
relay_parent,
&jaeger::Span::Disabled,
validator_index,
&mut sender,
)
.fuse();
pin_mut!(future);
let hash_a = CandidateHash(Hash::repeat_byte(1));
let hash_b = CandidateHash(Hash::repeat_byte(2));
loop {
futures::select! {
m = receiver.next() => match m.unwrap() {
AllMessages::RuntimeApi(
RuntimeApiMessage::Request(rp, RuntimeApiRequest::AvailabilityCores(tx)),
) => {
assert_eq!(relay_parent, rp);
tx.send(Ok(vec![CoreState::Free, occupied_core(1, hash_a), occupied_core(2, hash_b)])).unwrap();
}
AllMessages::AvailabilityStore(
AvailabilityStoreMessage::QueryChunkAvailability(c_hash, vidx, tx),
) => {
assert_eq!(validator_index, vidx);
tx.send(c_hash == hash_a).unwrap();
},
o => panic!("Unknown message: {:?}", o),
},
r = future => match r {
Ok(r) => {
assert!(!r.0.get(0).unwrap());
assert!(r.0.get(1).unwrap());
assert!(!r.0.get(2).unwrap());
break
},
Err(e) => panic!("Failed: {:?}", e),
},
}
}
});
}
@@ -0,0 +1,35 @@
[package]
name = "polkadot-node-core-candidate-validation"
description = "Polkadot crate that implements the Candidate Validation subsystem. Handles requests to validate candidates according to a PVF."
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
async-trait = "0.1.57"
futures = "0.3.21"
futures-timer = "3.0.2"
gum = { package = "tracing-gum", path = "../../gum" }
sp-maybe-compressed-blob = { package = "sp-maybe-compressed-blob", git = "https://github.com/paritytech/substrate", branch = "master" }
parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] }
polkadot-primitives = { path = "../../../primitives" }
polkadot-parachain = { path = "../../../parachain" }
polkadot-node-primitives = { path = "../../primitives" }
polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }
polkadot-node-metrics = { path = "../../metrics" }
polkadot-overseer = { path = "../../overseer" }
[target.'cfg(not(any(target_os = "android", target_os = "unknown")))'.dependencies]
polkadot-node-core-pvf = { path = "../pvf" }
[dev-dependencies]
sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
futures = { version = "0.3.21", features = ["thread-pool"] }
assert_matches = "1.4.0"
polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" }
@@ -0,0 +1,903 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
//! The Candidate Validation subsystem.
//!
//! This handles incoming requests from other subsystems to validate candidates
//! according to a validation function. This delegates validation to an underlying
//! pool of processes used for execution of the Wasm.
#![deny(unused_crate_dependencies, unused_results)]
#![warn(missing_docs)]
use polkadot_node_core_pvf::{
InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PrepareError,
PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost,
};
use polkadot_node_primitives::{
BlockData, InvalidCandidate, PoV, ValidationResult, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT,
};
use polkadot_node_subsystem::{
errors::RuntimeApiError,
messages::{
CandidateValidationMessage, PreCheckOutcome, RuntimeApiMessage, RuntimeApiRequest,
ValidationFailed,
},
overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, SubsystemResult,
SubsystemSender,
};
use polkadot_node_subsystem_util::executor_params_at_relay_parent;
use polkadot_parachain::primitives::{ValidationParams, ValidationResult as WasmValidationResult};
use polkadot_primitives::{
CandidateCommitments, CandidateDescriptor, CandidateReceipt, ExecutorParams, Hash,
OccupiedCoreAssumption, PersistedValidationData, PvfExecTimeoutKind, PvfPrepTimeoutKind,
ValidationCode, ValidationCodeHash,
};
use parity_scale_codec::Encode;
use futures::{channel::oneshot, prelude::*};
use std::{
path::PathBuf,
sync::Arc,
time::{Duration, Instant},
};
use async_trait::async_trait;
mod metrics;
use self::metrics::Metrics;
#[cfg(test)]
mod tests;
const LOG_TARGET: &'static str = "parachain::candidate-validation";
/// The amount of time to wait before retrying after a retry-able backing validation error. We use a
/// lower value for the backing case, to fit within the lower backing timeout.
#[cfg(not(test))]
const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(500);
#[cfg(test)]
const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200);
/// The amount of time to wait before retrying after a retry-able approval validation error. We use
/// a higher value for the approval case since we have more time, and if we wait longer it is more
/// likely that transient conditions will resolve.
#[cfg(not(test))]
const PVF_APPROVAL_EXECUTION_RETRY_DELAY: Duration = Duration::from_secs(3);
#[cfg(test)]
const PVF_APPROVAL_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200);
// Default PVF timeouts. Must never be changed! Use executor environment parameters in
// `session_info` pallet to adjust them. See also `PvfTimeoutKind` docs.
const DEFAULT_PRECHECK_PREPARATION_TIMEOUT: Duration = Duration::from_secs(60);
const DEFAULT_LENIENT_PREPARATION_TIMEOUT: Duration = Duration::from_secs(360);
const DEFAULT_BACKING_EXECUTION_TIMEOUT: Duration = Duration::from_secs(2);
const DEFAULT_APPROVAL_EXECUTION_TIMEOUT: Duration = Duration::from_secs(12);
/// Configuration for the candidate validation subsystem
#[derive(Clone)]
pub struct Config {
/// The path where candidate validation can store compiled artifacts for PVFs.
pub artifacts_cache_path: PathBuf,
/// The version of the node. `None` can be passed to skip the version check (only for tests).
pub node_version: Option<String>,
/// Path to the preparation worker binary
pub prep_worker_path: PathBuf,
/// Path to the execution worker binary
pub exec_worker_path: PathBuf,
}
/// The candidate validation subsystem.
pub struct CandidateValidationSubsystem {
#[allow(missing_docs)]
pub metrics: Metrics,
#[allow(missing_docs)]
pub pvf_metrics: polkadot_node_core_pvf::Metrics,
config: Option<Config>,
}
impl CandidateValidationSubsystem {
/// Create a new `CandidateValidationSubsystem` with the given task spawner and isolation
/// strategy.
///
/// Check out [`IsolationStrategy`] to get more details.
pub fn with_config(
config: Option<Config>,
metrics: Metrics,
pvf_metrics: polkadot_node_core_pvf::Metrics,
) -> Self {
CandidateValidationSubsystem { config, metrics, pvf_metrics }
}
}
#[overseer::subsystem(CandidateValidation, error=SubsystemError, prefix=self::overseer)]
impl<Context> CandidateValidationSubsystem {
fn start(self, ctx: Context) -> SpawnedSubsystem {
if let Some(config) = self.config {
let future = run(ctx, self.metrics, self.pvf_metrics, config)
.map_err(|e| SubsystemError::with_origin("candidate-validation", e))
.boxed();
SpawnedSubsystem { name: "candidate-validation-subsystem", future }
} else {
polkadot_overseer::DummySubsystem.start(ctx)
}
}
}
#[overseer::contextbounds(CandidateValidation, prefix = self::overseer)]
async fn run<Context>(
mut ctx: Context,
metrics: Metrics,
pvf_metrics: polkadot_node_core_pvf::Metrics,
Config { artifacts_cache_path, node_version, prep_worker_path, exec_worker_path }: Config,
) -> SubsystemResult<()> {
let (validation_host, task) = polkadot_node_core_pvf::start(
polkadot_node_core_pvf::Config::new(
artifacts_cache_path,
node_version,
prep_worker_path,
exec_worker_path,
),
pvf_metrics,
);
ctx.spawn_blocking("pvf-validation-host", task.boxed())?;
loop {
match ctx.recv().await? {
FromOrchestra::Signal(OverseerSignal::ActiveLeaves(_)) => {},
FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {},
FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()),
FromOrchestra::Communication { msg } => match msg {
CandidateValidationMessage::ValidateFromChainState(
candidate_receipt,
pov,
timeout,
response_sender,
) => {
let bg = {
let mut sender = ctx.sender().clone();
let metrics = metrics.clone();
let validation_host = validation_host.clone();
async move {
let _timer = metrics.time_validate_from_chain_state();
let res = validate_from_chain_state(
&mut sender,
validation_host,
candidate_receipt,
pov,
timeout,
&metrics,
)
.await;
metrics.on_validation_event(&res);
let _ = response_sender.send(res);
}
};
ctx.spawn("validate-from-chain-state", bg.boxed())?;
},
CandidateValidationMessage::ValidateFromExhaustive(
persisted_validation_data,
validation_code,
candidate_receipt,
pov,
timeout,
response_sender,
) => {
let bg = {
let mut sender = ctx.sender().clone();
let metrics = metrics.clone();
let validation_host = validation_host.clone();
async move {
let _timer = metrics.time_validate_from_exhaustive();
let res = validate_candidate_exhaustive(
&mut sender,
validation_host,
persisted_validation_data,
validation_code,
candidate_receipt,
pov,
timeout,
&metrics,
)
.await;
metrics.on_validation_event(&res);
let _ = response_sender.send(res);
}
};
ctx.spawn("validate-from-exhaustive", bg.boxed())?;
},
CandidateValidationMessage::PreCheck(
relay_parent,
validation_code_hash,
response_sender,
) => {
let bg = {
let mut sender = ctx.sender().clone();
let validation_host = validation_host.clone();
async move {
let precheck_result = precheck_pvf(
&mut sender,
validation_host,
relay_parent,
validation_code_hash,
)
.await;
let _ = response_sender.send(precheck_result);
}
};
ctx.spawn("candidate-validation-pre-check", bg.boxed())?;
},
},
}
}
}
struct RuntimeRequestFailed;
async fn runtime_api_request<T, Sender>(
sender: &mut Sender,
relay_parent: Hash,
request: RuntimeApiRequest,
receiver: oneshot::Receiver<Result<T, RuntimeApiError>>,
) -> Result<T, RuntimeRequestFailed>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
sender
.send_message(RuntimeApiMessage::Request(relay_parent, request).into())
.await;
receiver
.await
.map_err(|_| {
gum::debug!(target: LOG_TARGET, ?relay_parent, "Runtime API request dropped");
RuntimeRequestFailed
})
.and_then(|res| {
res.map_err(|e| {
gum::debug!(
target: LOG_TARGET,
?relay_parent,
err = ?e,
"Runtime API request internal error"
);
RuntimeRequestFailed
})
})
}
async fn request_validation_code_by_hash<Sender>(
sender: &mut Sender,
relay_parent: Hash,
validation_code_hash: ValidationCodeHash,
) -> Result<Option<ValidationCode>, RuntimeRequestFailed>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
let (tx, rx) = oneshot::channel();
runtime_api_request(
sender,
relay_parent,
RuntimeApiRequest::ValidationCodeByHash(validation_code_hash, tx),
rx,
)
.await
}
async fn precheck_pvf<Sender>(
sender: &mut Sender,
mut validation_backend: impl ValidationBackend,
relay_parent: Hash,
validation_code_hash: ValidationCodeHash,
) -> PreCheckOutcome
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
let validation_code =
match request_validation_code_by_hash(sender, relay_parent, validation_code_hash).await {
Ok(Some(code)) => code,
_ => {
// The reasoning why this is "failed" and not invalid is because we assume that
// during pre-checking voting the relay-chain will pin the code. In case the code
// actually is not there, we issue failed since this looks more like a bug.
gum::warn!(
target: LOG_TARGET,
?relay_parent,
?validation_code_hash,
"precheck: requested validation code is not found on-chain!",
);
return PreCheckOutcome::Failed
},
};
let executor_params =
if let Ok(executor_params) = executor_params_at_relay_parent(relay_parent, sender).await {
gum::debug!(
target: LOG_TARGET,
?relay_parent,
?validation_code_hash,
"precheck: acquired executor params for the session: {:?}",
executor_params,
);
executor_params
} else {
gum::warn!(
target: LOG_TARGET,
?relay_parent,
?validation_code_hash,
"precheck: failed to acquire executor params for the session, thus voting against.",
);
return PreCheckOutcome::Invalid
};
let timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Precheck);
let pvf = match sp_maybe_compressed_blob::decompress(
&validation_code.0,
VALIDATION_CODE_BOMB_LIMIT,
) {
Ok(code) => PvfPrepData::from_code(
code.into_owned(),
executor_params,
timeout,
PrepareJobKind::Prechecking,
),
Err(e) => {
gum::debug!(target: LOG_TARGET, err=?e, "precheck: cannot decompress validation code");
return PreCheckOutcome::Invalid
},
};
match validation_backend.precheck_pvf(pvf).await {
Ok(_) => PreCheckOutcome::Valid,
Err(prepare_err) =>
if prepare_err.is_deterministic() {
PreCheckOutcome::Invalid
} else {
PreCheckOutcome::Failed
},
}
}
#[derive(Debug)]
enum AssumptionCheckOutcome {
Matches(PersistedValidationData, ValidationCode),
DoesNotMatch,
BadRequest,
}
async fn check_assumption_validation_data<Sender>(
sender: &mut Sender,
descriptor: &CandidateDescriptor,
assumption: OccupiedCoreAssumption,
) -> AssumptionCheckOutcome
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
let validation_data = {
let (tx, rx) = oneshot::channel();
let d = runtime_api_request(
sender,
descriptor.relay_parent,
RuntimeApiRequest::PersistedValidationData(descriptor.para_id, assumption, tx),
rx,
)
.await;
match d {
Ok(None) | Err(RuntimeRequestFailed) => return AssumptionCheckOutcome::BadRequest,
Ok(Some(d)) => d,
}
};
let persisted_validation_data_hash = validation_data.hash();
if descriptor.persisted_validation_data_hash == persisted_validation_data_hash {
let (code_tx, code_rx) = oneshot::channel();
let validation_code = runtime_api_request(
sender,
descriptor.relay_parent,
RuntimeApiRequest::ValidationCode(descriptor.para_id, assumption, code_tx),
code_rx,
)
.await;
match validation_code {
Ok(None) | Err(RuntimeRequestFailed) => AssumptionCheckOutcome::BadRequest,
Ok(Some(v)) => AssumptionCheckOutcome::Matches(validation_data, v),
}
} else {
AssumptionCheckOutcome::DoesNotMatch
}
}
async fn find_assumed_validation_data<Sender>(
sender: &mut Sender,
descriptor: &CandidateDescriptor,
) -> AssumptionCheckOutcome
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
// The candidate descriptor has a `persisted_validation_data_hash` which corresponds to
// one of up to two possible values that we can derive from the state of the
// relay-parent. We can fetch these values by getting the persisted validation data
// based on the different `OccupiedCoreAssumption`s.
const ASSUMPTIONS: &[OccupiedCoreAssumption] = &[
OccupiedCoreAssumption::Included,
OccupiedCoreAssumption::TimedOut,
// `TimedOut` and `Free` both don't perform any speculation and therefore should be the
// same for our purposes here. In other words, if `TimedOut` matched then the `Free` must
// be matched as well.
];
// Consider running these checks in parallel to reduce validation latency.
for assumption in ASSUMPTIONS {
let outcome = check_assumption_validation_data(sender, descriptor, *assumption).await;
match outcome {
AssumptionCheckOutcome::Matches(_, _) => return outcome,
AssumptionCheckOutcome::BadRequest => return outcome,
AssumptionCheckOutcome::DoesNotMatch => continue,
}
}
AssumptionCheckOutcome::DoesNotMatch
}
/// Returns validation data for a given candidate.
pub async fn find_validation_data<Sender>(
sender: &mut Sender,
descriptor: &CandidateDescriptor,
) -> Result<Option<(PersistedValidationData, ValidationCode)>, ValidationFailed>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
match find_assumed_validation_data(sender, &descriptor).await {
AssumptionCheckOutcome::Matches(validation_data, validation_code) =>
Ok(Some((validation_data, validation_code))),
AssumptionCheckOutcome::DoesNotMatch => {
// If neither the assumption of the occupied core having the para included or the
// assumption of the occupied core timing out are valid, then the
// persisted_validation_data_hash in the descriptor is not based on the relay parent and
// is thus invalid.
Ok(None)
},
AssumptionCheckOutcome::BadRequest =>
Err(ValidationFailed("Assumption Check: Bad request".into())),
}
}
async fn validate_from_chain_state<Sender>(
sender: &mut Sender,
validation_host: ValidationHost,
candidate_receipt: CandidateReceipt,
pov: Arc<PoV>,
exec_timeout_kind: PvfExecTimeoutKind,
metrics: &Metrics,
) -> Result<ValidationResult, ValidationFailed>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
let mut new_sender = sender.clone();
let (validation_data, validation_code) =
match find_validation_data(&mut new_sender, &candidate_receipt.descriptor).await? {
Some((validation_data, validation_code)) => (validation_data, validation_code),
None => return Ok(ValidationResult::Invalid(InvalidCandidate::BadParent)),
};
let validation_result = validate_candidate_exhaustive(
sender,
validation_host,
validation_data,
validation_code,
candidate_receipt.clone(),
pov,
exec_timeout_kind,
metrics,
)
.await;
if let Ok(ValidationResult::Valid(ref outputs, _)) = validation_result {
let (tx, rx) = oneshot::channel();
match runtime_api_request(
sender,
candidate_receipt.descriptor.relay_parent,
RuntimeApiRequest::CheckValidationOutputs(
candidate_receipt.descriptor.para_id,
outputs.clone(),
tx,
),
rx,
)
.await
{
Ok(true) => {},
Ok(false) => return Ok(ValidationResult::Invalid(InvalidCandidate::InvalidOutputs)),
Err(RuntimeRequestFailed) =>
return Err(ValidationFailed("Check Validation Outputs: Bad request".into())),
}
}
validation_result
}
async fn validate_candidate_exhaustive<Sender>(
sender: &mut Sender,
mut validation_backend: impl ValidationBackend + Send,
persisted_validation_data: PersistedValidationData,
validation_code: ValidationCode,
candidate_receipt: CandidateReceipt,
pov: Arc<PoV>,
exec_timeout_kind: PvfExecTimeoutKind,
metrics: &Metrics,
) -> Result<ValidationResult, ValidationFailed>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
let _timer = metrics.time_validate_candidate_exhaustive();
let validation_code_hash = validation_code.hash();
let para_id = candidate_receipt.descriptor.para_id;
gum::debug!(
target: LOG_TARGET,
?validation_code_hash,
?para_id,
"About to validate a candidate.",
);
if let Err(e) = perform_basic_checks(
&candidate_receipt.descriptor,
persisted_validation_data.max_pov_size,
&pov,
&validation_code_hash,
) {
gum::info!(target: LOG_TARGET, ?para_id, "Invalid candidate (basic checks)");
return Ok(ValidationResult::Invalid(e))
}
let raw_validation_code = match sp_maybe_compressed_blob::decompress(
&validation_code.0,
VALIDATION_CODE_BOMB_LIMIT,
) {
Ok(code) => code,
Err(e) => {
gum::info!(target: LOG_TARGET, ?para_id, err=?e, "Invalid candidate (validation code)");
// Code already passed pre-checking, if decompression fails now this most likley means
// some local corruption happened.
return Err(ValidationFailed("Code decompression failed".to_string()))
},
};
metrics.observe_code_size(raw_validation_code.len());
metrics.observe_pov_size(pov.block_data.0.len(), true);
let raw_block_data =
match sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT) {
Ok(block_data) => BlockData(block_data.to_vec()),
Err(e) => {
gum::info!(target: LOG_TARGET, ?para_id, err=?e, "Invalid candidate (PoV code)");
// If the PoV is invalid, the candidate certainly is.
return Ok(ValidationResult::Invalid(InvalidCandidate::PoVDecompressionFailure))
},
};
metrics.observe_pov_size(raw_block_data.0.len(), false);
let params = ValidationParams {
parent_head: persisted_validation_data.parent_head.clone(),
block_data: raw_block_data,
relay_parent_number: persisted_validation_data.relay_parent_number,
relay_parent_storage_root: persisted_validation_data.relay_parent_storage_root,
};
let executor_params = if let Ok(executor_params) =
executor_params_at_relay_parent(candidate_receipt.descriptor.relay_parent, sender).await
{
gum::debug!(
target: LOG_TARGET,
?validation_code_hash,
?para_id,
"Acquired executor params for the session: {:?}",
executor_params,
);
executor_params
} else {
gum::warn!(
target: LOG_TARGET,
?validation_code_hash,
?para_id,
"Failed to acquire executor params for the session",
);
return Ok(ValidationResult::Invalid(InvalidCandidate::BadParent))
};
let result = validation_backend
.validate_candidate_with_retry(
raw_validation_code.to_vec(),
pvf_exec_timeout(&executor_params, exec_timeout_kind),
exec_timeout_kind,
params,
executor_params,
)
.await;
if let Err(ref error) = result {
gum::info!(target: LOG_TARGET, ?para_id, ?error, "Failed to validate candidate");
}
match result {
Err(ValidationError::InternalError(e)) => {
gum::warn!(
target: LOG_TARGET,
?para_id,
?e,
"An internal error occurred during validation, will abstain from voting",
);
Err(ValidationFailed(e.to_string()))
},
Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout)) =>
Ok(ValidationResult::Invalid(InvalidCandidate::Timeout)),
Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::WorkerReportedError(e))) =>
Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e))),
Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)) =>
Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(
"ambiguous worker death".to_string(),
))),
Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(err))) =>
Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))),
Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::PrepareError(e))) => {
// In principle if preparation of the `WASM` fails, the current candidate can not be the
// reason for that. So we can't say whether it is invalid or not. In addition, with
// pre-checking enabled only valid runtimes should ever get enacted, so we can be
// reasonably sure that this is some local problem on the current node. However, as this
// particular error *seems* to indicate a deterministic error, we raise a warning.
gum::warn!(
target: LOG_TARGET,
?para_id,
?e,
"Deterministic error occurred during preparation (should have been ruled out by pre-checking phase)",
);
Err(ValidationFailed(e))
},
Ok(res) =>
if res.head_data.hash() != candidate_receipt.descriptor.para_head {
gum::info!(target: LOG_TARGET, ?para_id, "Invalid candidate (para_head)");
Ok(ValidationResult::Invalid(InvalidCandidate::ParaHeadHashMismatch))
} else {
let outputs = CandidateCommitments {
head_data: res.head_data,
upward_messages: res.upward_messages,
horizontal_messages: res.horizontal_messages,
new_validation_code: res.new_validation_code,
processed_downward_messages: res.processed_downward_messages,
hrmp_watermark: res.hrmp_watermark,
};
if candidate_receipt.commitments_hash != outputs.hash() {
gum::info!(
target: LOG_TARGET,
?para_id,
"Invalid candidate (commitments hash)"
);
// If validation produced a new set of commitments, we treat the candidate as
// invalid.
Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))
} else {
Ok(ValidationResult::Valid(outputs, persisted_validation_data))
}
},
}
}
#[async_trait]
trait ValidationBackend {
/// Tries executing a PVF a single time (no retries).
async fn validate_candidate(
&mut self,
pvf: PvfPrepData,
exec_timeout: Duration,
encoded_params: Vec<u8>,
) -> Result<WasmValidationResult, ValidationError>;
/// Tries executing a PVF. Will retry once if an error is encountered that may have been
/// transient.
///
/// NOTE: Should retry only on errors that are a result of execution itself, and not of
/// preparation.
async fn validate_candidate_with_retry(
&mut self,
raw_validation_code: Vec<u8>,
exec_timeout: Duration,
exec_timeout_kind: PvfExecTimeoutKind,
params: ValidationParams,
executor_params: ExecutorParams,
) -> Result<WasmValidationResult, ValidationError> {
let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Lenient);
// Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap.
let pvf = PvfPrepData::from_code(
raw_validation_code,
executor_params,
prep_timeout,
PrepareJobKind::Compilation,
);
// We keep track of the total time that has passed and stop retrying if we are taking too
// long.
let total_time_start = Instant::now();
let mut validation_result =
self.validate_candidate(pvf.clone(), exec_timeout, params.encode()).await;
if validation_result.is_ok() {
return validation_result
}
let retry_delay = match exec_timeout_kind {
PvfExecTimeoutKind::Backing => PVF_BACKING_EXECUTION_RETRY_DELAY,
PvfExecTimeoutKind::Approval => PVF_APPROVAL_EXECUTION_RETRY_DELAY,
};
// Allow limited retries for each kind of error.
let mut num_internal_retries_left = 1;
let mut num_awd_retries_left = 1;
let mut num_panic_retries_left = 1;
loop {
// Stop retrying if we exceeded the timeout.
if total_time_start.elapsed() + retry_delay > exec_timeout {
break
}
match validation_result {
Err(ValidationError::InvalidCandidate(
WasmInvalidCandidate::AmbiguousWorkerDeath,
)) if num_awd_retries_left > 0 => num_awd_retries_left -= 1,
Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(_)))
if num_panic_retries_left > 0 =>
num_panic_retries_left -= 1,
Err(ValidationError::InternalError(_)) if num_internal_retries_left > 0 =>
num_internal_retries_left -= 1,
_ => break,
}
// If we got a possibly transient error, retry once after a brief delay, on the
// assumption that the conditions that caused this error may have resolved on their own.
{
// Wait a brief delay before retrying.
futures_timer::Delay::new(retry_delay).await;
let new_timeout = exec_timeout.saturating_sub(total_time_start.elapsed());
gum::warn!(
target: LOG_TARGET,
?pvf,
?new_timeout,
"Re-trying failed candidate validation due to possible transient error: {:?}",
validation_result
);
// Encode the params again when re-trying. We expect the retry case to be relatively
// rare, and we want to avoid unconditionally cloning data.
validation_result =
self.validate_candidate(pvf.clone(), new_timeout, params.encode()).await;
}
}
validation_result
}
async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<PrepareStats, PrepareError>;
}
#[async_trait]
impl ValidationBackend for ValidationHost {
/// Tries executing a PVF a single time (no retries).
async fn validate_candidate(
&mut self,
pvf: PvfPrepData,
exec_timeout: Duration,
encoded_params: Vec<u8>,
) -> Result<WasmValidationResult, ValidationError> {
let priority = polkadot_node_core_pvf::Priority::Normal;
let (tx, rx) = oneshot::channel();
if let Err(err) = self.execute_pvf(pvf, exec_timeout, encoded_params, priority, tx).await {
return Err(InternalValidationError::HostCommunication(format!(
"cannot send pvf to the validation host, it might have shut down: {:?}",
err
))
.into())
}
rx.await.map_err(|_| {
ValidationError::from(InternalValidationError::HostCommunication(
"validation was cancelled".into(),
))
})?
}
async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<PrepareStats, PrepareError> {
let (tx, rx) = oneshot::channel();
if let Err(err) = self.precheck_pvf(pvf, tx).await {
// Return an IO error if there was an error communicating with the host.
return Err(PrepareError::IoErr(err))
}
let precheck_result = rx.await.map_err(|err| PrepareError::IoErr(err.to_string()))?;
precheck_result
}
}
/// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks
/// are passed, `Err` otherwise.
fn perform_basic_checks(
candidate: &CandidateDescriptor,
max_pov_size: u32,
pov: &PoV,
validation_code_hash: &ValidationCodeHash,
) -> Result<(), InvalidCandidate> {
let pov_hash = pov.hash();
let encoded_pov_size = pov.encoded_size();
if encoded_pov_size > max_pov_size as usize {
return Err(InvalidCandidate::ParamsTooLarge(encoded_pov_size as u64))
}
if pov_hash != candidate.pov_hash {
return Err(InvalidCandidate::PoVHashMismatch)
}
if *validation_code_hash != candidate.validation_code_hash {
return Err(InvalidCandidate::CodeHashMismatch)
}
if let Err(()) = candidate.check_collator_signature() {
return Err(InvalidCandidate::BadSignature)
}
Ok(())
}
fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepTimeoutKind) -> Duration {
if let Some(timeout) = executor_params.pvf_prep_timeout(kind) {
return timeout
}
match kind {
PvfPrepTimeoutKind::Precheck => DEFAULT_PRECHECK_PREPARATION_TIMEOUT,
PvfPrepTimeoutKind::Lenient => DEFAULT_LENIENT_PREPARATION_TIMEOUT,
}
}
fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecTimeoutKind) -> Duration {
if let Some(timeout) = executor_params.pvf_exec_timeout(kind) {
return timeout
}
match kind {
PvfExecTimeoutKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT,
PvfExecTimeoutKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT,
}
}
@@ -0,0 +1,154 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Polkadot.
// Polkadot is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Polkadot is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
use super::{ValidationFailed, ValidationResult};
use polkadot_node_metrics::metrics::{self, prometheus};
#[derive(Clone)]
pub(crate) struct MetricsInner {
pub(crate) validation_requests: prometheus::CounterVec<prometheus::U64>,
pub(crate) validate_from_chain_state: prometheus::Histogram,
pub(crate) validate_from_exhaustive: prometheus::Histogram,
pub(crate) validate_candidate_exhaustive: prometheus::Histogram,
pub(crate) pov_size: prometheus::HistogramVec,
pub(crate) code_size: prometheus::Histogram,
}
/// Candidate validation metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
impl Metrics {
pub fn on_validation_event(&self, event: &Result<ValidationResult, ValidationFailed>) {
if let Some(metrics) = &self.0 {
match event {
Ok(ValidationResult::Valid(_, _)) => {
metrics.validation_requests.with_label_values(&["valid"]).inc();
},
Ok(ValidationResult::Invalid(_)) => {
metrics.validation_requests.with_label_values(&["invalid"]).inc();
},
Err(_) => {
metrics.validation_requests.with_label_values(&["validation failure"]).inc();
},
}
}
}
/// Provide a timer for `validate_from_chain_state` which observes on drop.
pub fn time_validate_from_chain_state(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.validate_from_chain_state.start_timer())
}
/// Provide a timer for `validate_from_exhaustive` which observes on drop.
pub fn time_validate_from_exhaustive(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.validate_from_exhaustive.start_timer())
}
/// Provide a timer for `validate_candidate_exhaustive` which observes on drop.
pub fn time_validate_candidate_exhaustive(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0
.as_ref()
.map(|metrics| metrics.validate_candidate_exhaustive.start_timer())
}
pub fn observe_code_size(&self, code_size: usize) {
if let Some(metrics) = &self.0 {
metrics.code_size.observe(code_size as f64);
}
}
pub fn observe_pov_size(&self, pov_size: usize, compressed: bool) {
if let Some(metrics) = &self.0 {
metrics
.pov_size
.with_label_values(&[if compressed { "true" } else { "false" }])
.observe(pov_size as f64);
}
}
}
impl metrics::Metrics for Metrics {
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
validation_requests: prometheus::register(
prometheus::CounterVec::new(
prometheus::Opts::new(
"polkadot_parachain_validation_requests_total",
"Number of validation requests served.",
),
&["validity"],
)?,
registry,
)?,
validate_from_chain_state: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_candidate_validation_validate_from_chain_state",
"Time spent within `candidate_validation::validate_from_chain_state`",
))?,
registry,
)?,
validate_from_exhaustive: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_candidate_validation_validate_from_exhaustive",
"Time spent within `candidate_validation::validate_from_exhaustive`",
))?,
registry,
)?,
validate_candidate_exhaustive: prometheus::register(
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
"polkadot_parachain_candidate_validation_validate_candidate_exhaustive",
"Time spent within `candidate_validation::validate_candidate_exhaustive`",
))?,
registry,
)?,
pov_size: prometheus::register(
prometheus::HistogramVec::new(
prometheus::HistogramOpts::new(
"polkadot_parachain_candidate_validation_pov_size",
"The compressed and decompressed size of the proof of validity of a candidate",
)
.buckets(
prometheus::exponential_buckets(16384.0, 2.0, 10)
.expect("arguments are always valid; qed"),
),
&["compressed"],
)?,
registry,
)?,
code_size: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"polkadot_parachain_candidate_validation_code_size",
"The size of the decompressed WASM validation blob used for checking a candidate",
)
.buckets(
prometheus::exponential_buckets(16384.0, 2.0, 10)
.expect("arguments are always valid; qed"),
),
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}

Some files were not shown because too many files have changed in this diff Show More