Squashed 'bridges/' changes from b2099c5..23dda62 (#3369)

23dda62 Rococo <> Wococo messages relay (#1030)
bcde21d Update the wasm builder to substrate master (#1029)
a8318ce Make target signer optional when sending message. (#1018)
f8602e1 Fix insufficient balance when send message. (#1020)
d95c0a7 greedy relayer don't need message dispatch to be prepaid if dispatch is supposed to be paid at the target chain (#1016)
ad5876f Update types. (#1027)
116cbbc CI: fix starting the pipeline (#1022)
7e0fadd Add temporary `canary` job (#1019)
6787091 Update types to contain dispatch_fee_payment (#1017)
03f79ad Allow Root to assume SourceAccount. (#1011)
372d019 Return dispatch_fee_payment from message details RPC (#1014)
604eb1c Relay basic single-bit message dispatch results back to the source chain (#935)
bf52fff Use plain source_queue view when selecting nonces for delivery (#1010)
fc5cf7d pay dispatch fee at target chain (#911)
1e35477 Bump Substrate to `286d7ce` (#1006)
7ad07b3 Add --only-mandatory-headers mode (#1004)
5351dc9 Messages relayer operating mode (#995)
9bc29a7 Rococo <> Wococo relayer balance guard (#998)
bc17341 rename messages_dispatch_weight -> message_details (#996)
95be244 Bump Rococo and Wococo spec versions (#999)
c35567b Move ChainWithBalances::NativeBalance -> Chain::Balance (#990)
1bfece1 Fix some nits (#988)
334ea0f Increase pause before starting relays again (#989)
7fb8248 Fix clippy in test code (#993)
d60ae50 fix clippy issues (#991)
75ca813 Make sure GRANDPA shares state with RPC. (#987)
da2a38a Bump Substrate (#986)
5a9862f Update submit finality proof weight formula (#981)
69df513 Flag for rejecting all outbound messages (#982)
14d0506 Add script to setup bench machine. (#984)
e74e8ab Move CI from GitHub Actions to GitLab (#814)
c5ca5dd Custom justification verification (#979)
643f10d Always run on-demand headers relay in complex relay (#975)
a35b0ef Add JSON type definitions for Rococo<>Wococo bridge (#977)
0eb83f2 Update cargo.deny (#980)
e1d1f4c Bump Rococo/Wococo spec_version (#976)
deac90d increase pause before starting relays (#974)
68d6d79 Revert to use InspectCmd, bump substrate `6bef4f4` (#966)
66e1508 Avoid hashing headers twice in verify_justification (#973)
a31844f Bump `environmental` dependency (#972)
2a4c29a in auto-relays keep trying to connect to nodes until connection is established (#971)
0e767b3 removed stray file (#969)
b9545dc Serve multiple lanes with single complex relay instance (#964)
73419f4 Correct type error (#968)
bac256f Start finality relay spec-version guards for Rococo <> Wococo finality relays (#965)
bfd7037 pass source and target chain ids to account_ownership_proof (#963)
8436073 Upstream changes from Polkadot repo (#961)
e58d851 Increase account endowment amount (#960)

git-subtree-dir: bridges
git-subtree-split: 23dda6248236b27f20d76cbedc30e189cc6f736c
This commit is contained in:
Svyatoslav Nikolsky
2021-06-25 16:45:02 +03:00
committed by GitHub
parent 022e8bc11c
commit feefc34567
167 changed files with 7023 additions and 3239 deletions
+7 -3
View File
@@ -711,6 +711,7 @@ dependencies = [
name = "bp-header-chain"
version = "0.1.0"
dependencies = [
"assert_matches",
"bp-test-utils",
"finality-grandpa",
"frame-support",
@@ -726,10 +727,13 @@ dependencies = [
name = "bp-messages"
version = "0.1.0"
dependencies = [
"bitvec",
"bp-runtime",
"frame-support",
"frame-system",
"impl-trait-for-tuples",
"parity-scale-codec",
"serde",
"sp-std",
]
@@ -754,11 +758,12 @@ dependencies = [
name = "bp-rococo"
version = "0.1.0"
dependencies = [
"bp-header-chain",
"bp-messages",
"bp-polkadot-core",
"bp-runtime",
"frame-support",
"parity-scale-codec",
"smallvec 1.6.1",
"sp-api",
"sp-runtime",
"sp-std",
@@ -799,15 +804,14 @@ dependencies = [
name = "bp-wococo"
version = "0.1.0"
dependencies = [
"bp-header-chain",
"bp-messages",
"bp-polkadot-core",
"bp-rococo",
"bp-runtime",
"parity-scale-codec",
"sp-api",
"sp-runtime",
"sp-std",
"sp-version",
]
[[package]]
+1
View File
@@ -12,6 +12,7 @@ args
aren
async
Best/MS
benchmarking/MS
BlockId
BFT/M
bitfield/MS
+2
View File
@@ -2,6 +2,8 @@
lang = "en_US"
search_dirs = ["."]
extra_dictionaries = ["lingua.dic"]
skip_os_lookups = true
use_builtin = true
[hunspell.quirks]
# `Type`'s
-39
View File
@@ -1,39 +0,0 @@
name: Cargo deny
on:
pull_request:
schedule:
- cron: '0 0 * * *'
push:
branches:
- master
tags:
- v*
paths-ignore:
- '**.md'
- diagrams/*
- docs/*
jobs:
cargo-deny:
runs-on: ubuntu-latest
strategy:
matrix:
checks:
- advisories
- bans licenses sources
# Prevent sudden announcement of a new advisory from failing CI:
continue-on-error: ${{ matrix.checks == 'advisories' }}
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.4.1
with:
access_token: ${{ github.token }}
- name: Checkout sources & submodules
uses: actions/checkout@master
with:
fetch-depth: 5
submodules: recursive
- name: Cargo deny
uses: EmbarkStudios/cargo-deny-action@v1
with:
command: check ${{ matrix.checks }}
-65
View File
@@ -1,65 +0,0 @@
name: Check style
on:
pull_request:
push:
branches:
- master
tags:
- v*
paths-ignore:
- '**.md'
- diagrams/*
- docs/*
schedule: # Weekly build
- cron: '0 0 * * 0'
jobs:
## Check stage
check-fmt:
name: Check RustFmt
runs-on: ubuntu-latest
env:
RUST_BACKTRACE: full
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.4.1
with:
access_token: ${{ github.token }}
- name: Checkout sources & submodules
uses: actions/checkout@master
with:
fetch-depth: 5
submodules: recursive
- name: Add rustfmt
run: rustup component add rustfmt
- name: rust-fmt check
uses: actions-rs/cargo@master
with:
command: fmt
args: --all -- --check
check-spellcheck:
name: Check For Spelling and/or Grammar Mistakes
runs-on: ubuntu-latest
env:
RUST_BACKTRACE: full
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.4.1
with:
access_token: ${{ github.token }}
- name: Checkout sources & submodules
uses: actions/checkout@master
with:
fetch-depth: 5
submodules: recursive
- name: Add cargo-spellcheck
run: cargo install cargo-spellcheck
- name: Run spellcheck
run: cargo spellcheck check -m 1 -vv $(find modules/currency-exchange/src -name "*.rs")
-76
View File
@@ -1,76 +0,0 @@
name: Publish Dependencies to Docker hub
on:
push:
tags:
- v*
paths-ignore:
- '**.md'
- diagrams/*
- docs/*
schedule: # Weekly build
- cron: '0 0 * * 0'
jobs:
## Publish to Docker hub
publish:
name: Publishing
runs-on: ubuntu-latest
container:
image: docker:git
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.4.1
with:
access_token: ${{ github.token }}
- name: Checkout sources & submodules
uses: actions/checkout@v2
with:
fetch-depth: 5
submodules: recursive
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=paritytech/bridge-dependencies
VERSION=latest
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
elif [[ $GITHUB_REF == refs/heads/* ]]; then
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
fi
TAGS=${DOCKER_IMAGE}:${VERSION}
TAGS=$TAGS,${DOCKER_IMAGE}:sha-${GITHUB_SHA::8}
echo ::set-output name=TAGS::${TAGS}
echo ::set-output name=DATE::$(date +%d-%m-%Y)
- name: Build and push
uses: docker/build-push-action@v2
with:
file: deployments/BridgeDeps.Dockerfile
push: true
cache-from: type=registry,ref=paritytech/bridge-dependencies:latest
cache-to: type=inline
tags: ${{ steps.prep.outputs.TAGS }}
labels: |
org.opencontainers.image.title=bridge-dependencies
org.opencontainers.image.description=bridge-dependencies - component of Parity Bridges Common
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.url=https://github.com/paritytech/parity-bridges-common
org.opencontainers.image.documentation=https://github.com/paritytech/parity-bridges-common/README.md
org.opencontainers.image.created=${{ steps.prep.outputs.DATE }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.authors=devops-team@parity.io
org.opencontainers.image.vendor=Parity Technologies
org.opencontainers.image.licenses=GPL-3.0 License
-93
View File
@@ -1,93 +0,0 @@
name: Publish images to Docker hub
on:
push:
tags:
- v*
paths-ignore:
- '**.md'
- diagrams/*
- docs/*
schedule: # Nightly build
- cron: '0 1 * * *'
jobs:
## Publish to Docker hub
publish:
name: Publishing
strategy:
matrix:
project:
- rialto-bridge-node
- millau-bridge-node
- ethereum-poa-relay
- substrate-relay
include:
- project: rialto-bridge-node
healthcheck: http://localhost:9933/health
- project: millau-bridge-node
healthcheck: http://localhost:9933/health
- project: ethereum-poa-relay
healthcheck: http://localhost:9616/metrics
- project: substrate-relay
healthcheck: http://localhost:9616/metrics
runs-on: ubuntu-latest
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.4.1
with:
access_token: ${{ github.token }}
- name: Checkout sources & submodules
uses: actions/checkout@v2
with:
fetch-depth: 5
submodules: recursive
- name: Prepare
id: prep
run: |
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
elif [[ $GITHUB_REF == refs/heads/* ]]; then
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
fi
TAGS="${VERSION} sha-${GITHUB_SHA::8} latest"
echo ::set-output name=TAGS::${VERSION}
echo ::set-output name=TAGS::${TAGS}
echo ::set-output name=DATE::$(date +%d-%m-%Y)
- name: Workaround rootless build
run: |
sudo apt-get install fuse-overlayfs
mkdir -vp ~/.config/containers
printf "[storage.options]\nmount_program=\"/usr/bin/fuse-overlayfs\"" > ~/.config/containers/storage.conf
- name: Build image for ${{ matrix.project }}
uses: redhat-actions/buildah-build@v2.2
with:
image: ${{ matrix.project }}
tags: ${{ steps.prep.outputs.TAGS }}
dockerfiles: ./Dockerfile
build-args: |
PROJECT=${{ matrix.project }}
HEALTH=${{ matrix.healthcheck }}
VCS_REF=sha-${GITHUB_SHA::8}
BUILD_DATE=${{ steps.prep.outputs.DATE }}
VERSION=${{ steps.prep.outputs.VERSION }}
- name: Push ${{ matrix.project }} image to docker.io
id: push-to-dockerhub
uses: redhat-actions/push-to-registry@v2.1.1
with:
registry: docker.io/paritytech
image: ${{ matrix.project }}
tags: ${{ steps.prep.outputs.TAGS }}
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Check the image
run: |
echo "New image has been pushed to ${{ steps.push-to-dockerhub.outputs.registry-path }}"
-175
View File
@@ -1,175 +0,0 @@
name: Compilation and Testing Suite
on:
pull_request:
push:
branches:
- master
tags:
- v*
paths-ignore:
- '**.md'
- diagrams/*
- docs/*
schedule: # Weekly build
- cron: '0 0 * * 0'
jobs:
## Check Stage
check-test:
name: Check and test
strategy:
matrix:
toolchain:
- stable
#- beta
- nightly-2021-04-10
runs-on: ubuntu-latest
env:
RUST_BACKTRACE: full
NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc.
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.4.1
with:
access_token: ${{ github.token }}
- name: Checkout sources & submodules
uses: actions/checkout@master
with:
fetch-depth: 5
submodules: recursive
- name: Install Toolchain
run: rustup toolchain add $NIGHTLY
- name: Add WASM Utilities
run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY
- name: Checking rust-${{ matrix.toolchain }}
uses: actions-rs/cargo@master
with:
command: check
toolchain: ${{ matrix.toolchain }}
args: --all --verbose
## Test Stage
- name: Testing rust-${{ matrix.toolchain }}
uses: actions-rs/cargo@master
if: matrix.toolchain == 'stable'
with:
command: test
toolchain: ${{ matrix.toolchain }}
args: --all --verbose
## Check Node Benchmarks
- name: Check Rialto benchmarks runtime ${{ matrix.platform }} rust-${{ matrix.toolchain }}
uses: actions-rs/cargo@master
with:
command: check
toolchain: ${{ matrix.toolchain }}
args: -p rialto-runtime --features runtime-benchmarks --verbose
- name: Check Millau benchmarks runtime ${{ matrix.platform }} rust-${{ matrix.toolchain }}
uses: actions-rs/cargo@master
with:
command: check
toolchain: ${{ matrix.toolchain }}
args: -p millau-runtime --features runtime-benchmarks --verbose
## Build Stage
build:
name: Build
strategy:
matrix:
toolchain:
- stable
#- beta
- nightly-2021-04-10
runs-on: ubuntu-latest
env:
RUST_BACKTRACE: full
NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc.
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.4.1
with:
access_token: ${{ github.token }}
- name: Checkout sources & submodules
uses: actions/checkout@master
with:
fetch-depth: 5
submodules: recursive
- name: Install Toolchain
run: rustup toolchain add $NIGHTLY
- name: Add WASM Utilities
run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY
- name: Building rust-${{ matrix.toolchain }}
uses: actions-rs/cargo@master
if: github.ref == 'refs/heads/master'
with:
command: build
toolchain: ${{ matrix.toolchain }}
args: --all --verbose
- name: Prepare artifacts
if: github.ref == 'refs/heads/master'
run: |
mkdir -p ./artifacts;
mv -v target/debug/rialto-bridge-node ./artifacts/;
mv -v target/debug/millau-bridge-node ./artifacts/;
mv -v target/debug/ethereum-poa-relay ./artifacts/;
mv -v target/debug/substrate-relay ./artifacts/;
shell: bash
- name: Upload artifacts
if: github.ref == 'refs/heads/master'
uses: actions/upload-artifact@v1
with:
name: ${{ matrix.toolchain }}.zip
path: artifacts/
## Linting Stage
clippy:
name: Clippy
runs-on: ubuntu-latest
env:
RUST_BACKTRACE: full
NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc.
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.4.1
with:
access_token: ${{ github.token }}
- name: Checkout sources & submodules
uses: actions/checkout@master
with:
fetch-depth: 5
submodules: recursive
- name: Install Toolchain
run: rustup toolchain add $NIGHTLY
- name: Add WASM Utilities
run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY
- name: Add clippy
run: rustup component add clippy --toolchain $NIGHTLY
- name: Rust Cache
uses: Swatinem/rust-cache@v1.2.0
- name: Clippy
uses: actions-rs/cargo@master
with:
command: clippy
toolchain: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc.
args: --all-targets -- -D warnings
+276
View File
@@ -0,0 +1,276 @@
stages:
- lint
- check
- test
- build
- publish
workflow:
rules:
- if: $CI_COMMIT_TAG
- if: $CI_COMMIT_BRANCH
variables: &default-vars
GIT_STRATEGY: fetch
GIT_DEPTH: 100
CARGO_INCREMENTAL: 0
ARCH: "x86_64"
CI_IMAGE: "paritytech/bridges-ci:production"
RUST_BACKTRACE: full
default:
cache: {}
.collect-artifacts: &collect-artifacts
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
when: on_success
expire_in: 7 days
paths:
- artifacts/
.kubernetes-build: &kubernetes-build
tags:
- kubernetes-parity-build
interruptible: true
.docker-env: &docker-env
image: "${CI_IMAGE}"
before_script:
- rustup show
- cargo --version
- rustup +nightly show
- cargo +nightly --version
- sccache -s
retry:
max: 2
when:
- runner_system_failure
- unknown_failure
- api_failure
interruptible: true
tags:
- linux-docker
.test-refs: &test-refs
rules:
# FIXME: This is the cause why pipelines wouldn't start. The problem might be in our custom
# mirroring. This should be investigated further, but for now let's have the working
# pipeline.
# - if: $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH
# changes:
# - '**.md'
# - diagrams/*
# - docs/*
# when: never
- if: $CI_PIPELINE_SOURCE == "pipeline"
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_PIPELINE_SOURCE == "schedule"
- if: $CI_COMMIT_REF_NAME == "master"
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
.build-refs: &build-refs
rules:
# won't run on the CI image update pipeline
- if: $CI_PIPELINE_SOURCE == "pipeline"
when: never
- if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1
# there are two types of nightly pipelines:
# 1. this one is triggered by the schedule with $PIPELINE == "nightly", it's for releasing.
# this job runs only on nightly pipeline with the mentioned variable, against `master` branch
- if: $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly"
.nightly-test: &nightly-test
rules:
# 2. another is triggered by scripts repo $CI_PIPELINE_SOURCE == "pipeline" it's for the CI image
# update, it also runs all the nightly checks.
- if: $CI_PIPELINE_SOURCE == "pipeline"
#### stage: lint
clippy-nightly:
stage: lint
<<: *docker-env
<<: *test-refs
variables:
RUSTFLAGS: "-D warnings"
script:
- cargo +nightly clippy --all-targets
# FIXME: remove when all the warns are fixed
allow_failure: true
fmt:
stage: lint
<<: *docker-env
<<: *test-refs
script:
- cargo fmt --all -- --check
spellcheck:
stage: lint
<<: *docker-env
<<: *test-refs
script:
- cargo spellcheck check -m 1 -vv $(find modules/currency-exchange/src -name "*.rs")
#### stage: check
check:
stage: check
<<: *docker-env
<<: *test-refs
script: &check-script
- time cargo check --verbose --workspace
# Check Rialto benchmarks runtime
- time cargo check -p rialto-runtime --features runtime-benchmarks --verbose
# Check Millau benchmarks runtime
- time cargo check -p millau-runtime --features runtime-benchmarks --verbose
check-nightly:
stage: check
<<: *docker-env
<<: *nightly-test
script:
- rustup default nightly
- *check-script
#### stage: test
test:
stage: test
<<: *docker-env
<<: *test-refs
script: &test-script
- time cargo test --verbose --workspace
test-nightly:
stage: test
<<: *docker-env
<<: *nightly-test
script:
- rustup default nightly
- *test-script
deny:
stage: test
<<: *docker-env
<<: *nightly-test
<<: *collect-artifacts
script:
- cargo deny check advisories --hide-inclusion-graph
- cargo deny check bans sources --hide-inclusion-graph
after_script:
- mkdir -p ./artifacts
- echo "___Complete logs can be found in the artifacts___"
- cargo deny check advisories 2> advisories.log
- cargo deny check bans sources 2> bans_sources.log
# this job is allowed to fail, only licenses check is important
allow_failure: true
deny-licenses:
stage: test
<<: *docker-env
<<: *test-refs
<<: *collect-artifacts
script:
- cargo deny check licenses --hide-inclusion-graph
after_script:
- mkdir -p ./artifacts
- echo "___Complete logs can be found in the artifacts___"
- cargo deny check licenses 2> licenses.log
#### stage: build
build:
stage: build
<<: *docker-env
<<: *build-refs
<<: *collect-artifacts
# master
script: &build-script
- time cargo build --release --verbose --workspace
after_script:
# Prepare artifacts
- mkdir -p ./artifacts
- strip ./target/release/rialto-bridge-node
- mv -v ./target/release/rialto-bridge-node ./artifacts/
- strip ./target/release/millau-bridge-node
- mv -v ./target/release/millau-bridge-node ./artifacts/
- strip ./target/release/ethereum-poa-relay
- mv -v ./target/release/ethereum-poa-relay ./artifacts/
- strip ./target/release/substrate-relay
- mv -v ./target/release/substrate-relay ./artifacts/
- mv -v ./deployments/local-scripts/bridge-entrypoint.sh ./artifacts/
- mv -v ./ci.Dockerfile ./artifacts/
build-nightly:
stage: build
<<: *docker-env
<<: *collect-artifacts
<<: *nightly-test
script:
- rustup default nightly
- *build-script
#### stage: publish
.build-push-image: &build-push-image
<<: *kubernetes-build
image: quay.io/buildah/stable
<<: *build-refs
variables: &image-variables
GIT_STRATEGY: none
DOCKERFILE: ci.Dockerfile
IMAGE_NAME: docker.io/paritytech/$CI_JOB_NAME
needs:
- job: build
artifacts: true
before_script: &check-versions
- if [[ "${CI_COMMIT_TAG}" ]]; then
VERSION=${CI_COMMIT_TAG};
elif [[ "${CI_COMMIT_REF_NAME}" ]]; then
VERSION=$(echo ${CI_COMMIT_REF_NAME} | sed -r 's#/+#-#g');
fi
- echo "Effective tags = ${VERSION} sha-${CI_COMMIT_SHORT_SHA} latest"
script:
- test "${Docker_Hub_User_Parity}" -a "${Docker_Hub_Pass_Parity}" ||
( echo "no docker credentials provided"; exit 1 )
- cd ./artifacts
- buildah bud
--format=docker
--build-arg VCS_REF="${CI_COMMIT_SHORT_SHA}"
--build-arg BUILD_DATE="$(date +%d-%m-%Y)"
--build-arg PROJECT="${CI_JOB_NAME}"
--build-arg VERSION="${VERSION}"
--tag "${IMAGE_NAME}:${VERSION}"
--tag "${IMAGE_NAME}:sha-${CI_COMMIT_SHORT_SHA}"
--tag "${IMAGE_NAME}:latest"
--file "${DOCKERFILE}" .
# The job will success only on the protected branch
- echo "$Docker_Hub_Pass_Parity" |
buildah login --username "$Docker_Hub_User_Parity" --password-stdin docker.io
- buildah info
- buildah push --format=v2s2 "${IMAGE_NAME}:${VERSION}"
- buildah push --format=v2s2 "${IMAGE_NAME}:sha-${CI_COMMIT_SHORT_SHA}"
- buildah push --format=v2s2 "${IMAGE_NAME}:latest"
after_script:
- env REGISTRY_AUTH_FILE= buildah logout "$IMAGE_NAME"
rialto-bridge-node:
stage: publish
<<: *build-push-image
millau-bridge-node:
stage: publish
<<: *build-push-image
ethereum-poa-relay:
stage: publish
<<: *build-push-image
substrate-relay:
stage: publish
<<: *build-push-image
# FIXME: publish binaries
+523 -419
View File
File diff suppressed because it is too large Load Diff
+6 -6
View File
@@ -8,14 +8,14 @@
#
# See the `deployments/README.md` for all the available `PROJECT` values.
FROM paritytech/bridge-dependencies as builder
FROM paritytech/bridges-ci:latest as builder
WORKDIR /parity-bridges-common
COPY . .
ARG PROJECT=ethereum-poa-relay
RUN cargo build --release --verbose -p ${PROJECT}
RUN strip ./target/release/${PROJECT}
RUN cargo build --release --verbose -p ${PROJECT} && \
strip ./target/release/${PROJECT}
# In this final stage we copy over the final binary and do some checks
# to make sure that everything looks good.
@@ -27,9 +27,9 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN set -eux; \
apt-get update && \
apt-get install -y curl ca-certificates && \
apt-get install -y --no-install-recommends libssl-dev && \
update-ca-certificates && \
apt-get install -y --no-install-recommends \
curl ca-certificates libssl-dev && \
update-ca-certificates && \
groupadd -g 1000 user && \
useradd -u 1000 -g user -s /bin/sh -m user && \
# apt clean up
+3 -2
View File
@@ -24,7 +24,7 @@ Substrate chains or Ethereum Proof-of-Authority chains.
To get up and running you need both stable and nightly Rust. Rust nightly is used to build the Web
Assembly (WASM) runtime for the node. You can configure the WASM support as so:
```
```bash
rustup install nightly
rustup target add wasm32-unknown-unknown --toolchain nightly
```
@@ -94,7 +94,7 @@ the `relays` which are used to pass messages between chains.
│ └── ...
├── relays // Application for sending headers and messages between chains
│ └── ...
└── scripts // Useful development and maintenence scripts
└── scripts // Useful development and maintenance scripts
```
## Running the Bridge
@@ -103,6 +103,7 @@ To run the Bridge you need to be able to connect the bridge relay node to the RP
on each side of the bridge (source and target chain).
There are 3 ways to run the bridge, described below:
- building & running from source,
- building or using Docker images for each individual component,
- running a Docker Compose setup (recommended).
+1 -1
View File
@@ -51,7 +51,7 @@ sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "mast
substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
[build-dependencies]
substrate-build-script-utils = "3.0.0"
substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" }
frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" }
[features]
@@ -72,7 +72,7 @@ impl Alternative {
"tokenDecimals": 9,
"tokenSymbol": "MLAU",
"bridgeIds": {
"Rialto": bp_runtime::RIALTO_BRIDGE_INSTANCE,
"Rialto": bp_runtime::RIALTO_CHAIN_ID,
}
})
.as_object()
@@ -144,12 +144,21 @@ impl Alternative {
derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Alice"),
)),
derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Bob"),
)),
derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Charlie"),
)),
derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Dave"),
)),
derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Eve"),
)),
derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
)),
],
true,
)
@@ -180,7 +189,7 @@ fn testnet_genesis(
changes_trie_config: Default::default(),
},
balances: BalancesConfig {
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 40)).collect(),
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(),
},
aura: AuraConfig {
authorities: Vec::new(),
+1 -1
View File
@@ -63,7 +63,7 @@ pub enum Subcommand {
Revert(sc_cli::RevertCmd),
/// Inspect blocks or extrinsics.
Inspect(node_inspect::cli::InspectKeyCmd),
Inspect(node_inspect::cli::InspectCmd),
/// Benchmark runtime pallets.
Benchmark(frame_benchmarking_cli::BenchmarkCmd),
+58 -50
View File
@@ -33,14 +33,13 @@ use sc_client_api::{ExecutorProvider, RemoteBackend};
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
use sc_executor::native_executor_instance;
pub use sc_executor::NativeExecutor;
use sc_finality_grandpa::SharedVoterState;
use sc_keystore::LocalKeystore;
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
use sc_telemetry::{Telemetry, TelemetryWorker};
use sp_consensus::SlotData;
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use std::sync::Arc;
use std::time::Duration;
use std::{sync::Arc, time::Duration};
// Our native executor instance.
native_executor_instance!(
@@ -65,12 +64,7 @@ pub fn new_partial(
sp_consensus::DefaultImportQueue<Block, FullClient>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
sc_consensus_aura::AuraBlockImport<
Block,
FullClient,
sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
AuraPair,
>,
sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
),
@@ -80,6 +74,7 @@ pub fn new_partial(
if config.keystore_remote.is_some() {
return Err(ServiceError::Other("Remote Keystores are not supported.".to_string()));
}
let telemetry = config
.telemetry_endpoints
.clone()
@@ -92,7 +87,7 @@ pub fn new_partial(
.transpose()?;
let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
&config,
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let client = Arc::new(client);
@@ -108,7 +103,7 @@ pub fn new_partial(
config.transaction_pool.clone(),
config.role.is_authority().into(),
config.prometheus_registry(),
task_manager.spawn_handle(),
task_manager.spawn_essential_handle(),
client.clone(),
);
@@ -119,14 +114,11 @@ pub fn new_partial(
telemetry.as_ref().map(|x| x.handle()),
)?;
let aura_block_import =
sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone());
let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
block_import: aura_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import)),
block_import: grandpa_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import.clone())),
client: client.clone(),
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
@@ -153,7 +145,7 @@ pub fn new_partial(
keystore_container,
select_chain,
transaction_pool,
other: (aura_block_import, grandpa_link, telemetry),
other: (grandpa_block_import, grandpa_link, telemetry),
})
}
@@ -194,16 +186,15 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
.extra_sets
.push(sc_finality_grandpa::grandpa_peers_set_config());
let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: None,
block_announce_validator_builder: None,
})?;
let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: None,
block_announce_validator_builder: None,
})?;
if config.offchain_worker.enabled {
sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
@@ -215,6 +206,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
let name = config.network.node_name.clone();
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();
let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty();
let rpc_extensions_builder = {
use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider;
@@ -230,7 +222,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
let justification_stream = grandpa_link.justification_stream();
let shared_authority_set = grandpa_link.shared_authority_set().clone();
let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty();
let shared_voter_state = shared_voter_state.clone();
let finality_proof_provider =
GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone()));
@@ -266,7 +258,6 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
on_demand: None,
remote_blockchain: None,
backend,
network_status_sinks,
system_rpc_tx,
config,
telemetry: telemetry.as_mut(),
@@ -286,7 +277,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
let raw_slot_duration = slot_duration.slot_duration();
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _>(StartAuraParams {
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _, _>(StartAuraParams {
slot_duration,
client,
select_chain,
@@ -307,7 +298,9 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
keystore: keystore_container.sync_keystore(),
can_author_with,
sync_oracle: network.clone(),
justification_sync_link: network.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
max_block_proposal_slot_portion: None,
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;
@@ -331,7 +324,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
name: Some(name),
observer_enabled: false,
keystore,
is_authority: role.is_authority(),
local_role: role,
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
@@ -348,7 +341,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
network,
voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(),
prometheus_registry,
shared_voter_state: SharedVoterState::empty(),
shared_voter_state,
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
@@ -397,24 +390,22 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
config.transaction_pool.clone(),
config.prometheus_registry(),
task_manager.spawn_handle(),
task_manager.spawn_essential_handle(),
client.clone(),
on_demand.clone(),
));
let (grandpa_block_import, _) = sc_finality_grandpa::block_import(
let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain,
telemetry.as_ref().map(|x| x.handle()),
)?;
let aura_block_import =
sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone());
let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
block_import: aura_block_import,
block_import: grandpa_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import)),
client: client.clone(),
create_inherent_data_providers: move |_, ()| async move {
@@ -434,21 +425,40 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;
let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
})?;
let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
})?;
if config.offchain_worker.enabled {
sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
}
let enable_grandpa = !config.disable_grandpa;
if enable_grandpa {
let name = config.network.node_name.clone();
let config = sc_finality_grandpa::Config {
gossip_duration: std::time::Duration::from_millis(333),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore: None,
local_role: config.role.clone(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
task_manager.spawn_handle().spawn_blocking(
"grandpa-observer",
sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?,
);
}
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
remote_blockchain: Some(backend.remote_blockchain()),
transaction_pool,
@@ -460,12 +470,10 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
keystore: keystore_container.sync_keystore(),
backend,
network,
network_status_sinks,
system_rpc_tx,
telemetry: telemetry.as_mut(),
})?;
network_starter.start_network();
Ok(task_manager)
}
@@ -56,7 +56,7 @@ sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" ,
sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
[build-dependencies]
substrate-wasm-builder = "3.0.0"
substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
[features]
default = ["std"]
+20 -7
View File
@@ -252,6 +252,7 @@ parameter_types! {
// For weight estimation, we assume that the most locks on an individual account will be 50.
// This number may need to be adjusted in the future if this assumption no longer holds true.
pub const MaxLocks: u32 = 50;
pub const MaxReserves: u32 = 50;
}
impl pallet_balances::Config for Runtime {
@@ -265,6 +266,8 @@ impl pallet_balances::Config for Runtime {
// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
type WeightInfo = ();
type MaxLocks = MaxLocks;
type MaxReserves = MaxReserves;
type ReserveIdentifier = [u8; 8];
}
parameter_types! {
@@ -381,6 +384,7 @@ impl pallet_bridge_messages::Config<WithRialtoMessagesInstance> for Runtime {
GetDeliveryConfirmationTransactionFee,
RootAccountForPayments,
>;
type OnDeliveryConfirmed = ();
type SourceHeaderChain = crate::rialto_messages::Rialto;
type MessageDispatch = crate::rialto_messages::FromRialtoMessageDispatch;
@@ -600,17 +604,23 @@ impl_runtime_apis! {
).ok()
}
fn messages_dispatch_weight(
fn message_details(
lane: bp_messages::LaneId,
begin: bp_messages::MessageNonce,
end: bp_messages::MessageNonce,
) -> Vec<(bp_messages::MessageNonce, Weight, u32)> {
) -> Vec<bp_messages::MessageDetails<Balance>> {
(begin..=end).filter_map(|nonce| {
let encoded_payload = BridgeRialtoMessages::outbound_message_payload(lane, nonce)?;
let message_data = BridgeRialtoMessages::outbound_message_data(lane, nonce)?;
let decoded_payload = rialto_messages::ToRialtoMessagePayload::decode(
&mut &encoded_payload[..]
&mut &message_data.payload[..]
).ok()?;
Some((nonce, decoded_payload.weight, encoded_payload.len() as _))
Some(bp_messages::MessageDetails {
nonce,
dispatch_weight: decoded_payload.weight,
size: message_data.payload.len() as _,
delivery_and_dispatch_fee: message_data.fee,
dispatch_fee_payment: decoded_payload.dispatch_fee_payment,
})
})
.collect()
}
@@ -644,7 +654,7 @@ impl_runtime_apis! {
/// The byte vector returned by this function should be signed with a Rialto account private key.
/// This way, the owner of `millau_account_id` on Millau proves that the Rialto account private key
/// is also under his control.
pub fn rialto_account_ownership_digest<Call, AccountId, SpecVersion>(
pub fn millau_to_rialto_account_ownership_digest<Call, AccountId, SpecVersion>(
rialto_call: &Call,
millau_account_id: AccountId,
rialto_spec_version: SpecVersion,
@@ -658,7 +668,8 @@ where
rialto_call,
millau_account_id,
rialto_spec_version,
bp_runtime::MILLAU_BRIDGE_INSTANCE,
bp_runtime::MILLAU_CHAIN_ID,
bp_runtime::RIALTO_CHAIN_ID,
)
}
@@ -676,6 +687,7 @@ mod tests {
bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT,
bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT,
bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT,
bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT,
);
let max_incoming_message_proof_size = bp_rialto::EXTRA_STORAGE_PROOF_SIZE.saturating_add(
@@ -691,6 +703,7 @@ mod tests {
let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint(
bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _,
bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _,
)
.unwrap_or(u32::MAX);
pallet_bridge_messages::ensure_able_to_receive_confirmation::<Weights>(
@@ -23,7 +23,7 @@ use bp_messages::{
target_chain::{ProvedMessages, SourceHeaderChain},
InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter,
};
use bp_runtime::{InstanceId, RIALTO_BRIDGE_INSTANCE};
use bp_runtime::{ChainId, MILLAU_CHAIN_ID, RIALTO_CHAIN_ID};
use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction};
use codec::{Decode, Encode};
use frame_support::{
@@ -52,7 +52,7 @@ pub type ToRialtoMessageVerifier = messages::source::FromThisChainMessageVerifie
pub type FromRialtoMessagePayload = messages::target::FromBridgedChainMessagePayload<WithRialtoMessageBridge>;
/// Encoded Millau Call as it comes from Rialto.
pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall<WithRialtoMessageBridge>;
pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall<crate::Call>;
/// Messages proof for Rialto -> Millau messages.
type FromRialtoMessagesProof = messages::target::FromBridgedChainMessagesProof<bp_rialto::Hash>;
@@ -64,6 +64,7 @@ type ToRialtoMessagesDeliveryProof = messages::source::FromBridgedChainMessagesD
pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDispatch<
WithRialtoMessageBridge,
crate::Runtime,
pallet_balances::Pallet<Runtime>,
pallet_bridge_dispatch::DefaultInstance,
>;
@@ -72,12 +73,13 @@ pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDi
pub struct WithRialtoMessageBridge;
impl MessageBridge for WithRialtoMessageBridge {
const INSTANCE: InstanceId = RIALTO_BRIDGE_INSTANCE;
const RELAYER_FEE_PERCENT: u32 = 10;
const THIS_CHAIN_ID: ChainId = MILLAU_CHAIN_ID;
const BRIDGED_CHAIN_ID: ChainId = RIALTO_CHAIN_ID;
type ThisChain = Millau;
type BridgedChain = Rialto;
type BridgedMessagesInstance = crate::WithRialtoMessagesInstance;
fn bridged_balance_to_this_balance(bridged_balance: bp_rialto::Balance) -> bp_millau::Balance {
bp_millau::Balance::try_from(RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance))
@@ -96,8 +98,6 @@ impl messages::ChainWithMessages for Millau {
type Signature = bp_millau::Signature;
type Weight = Weight;
type Balance = bp_millau::Balance;
type MessagesInstance = crate::WithRialtoMessagesInstance;
}
impl messages::ThisChainWithMessages for Millau {
@@ -112,9 +112,12 @@ impl messages::ThisChainWithMessages for Millau {
}
fn estimate_delivery_confirmation_transaction() -> MessageTransaction<Weight> {
let inbound_data_size =
InboundLaneData::<bp_millau::AccountId>::encoded_size_hint(bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1)
.unwrap_or(u32::MAX);
let inbound_data_size = InboundLaneData::<bp_millau::AccountId>::encoded_size_hint(
bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
1,
1,
)
.unwrap_or(u32::MAX);
MessageTransaction {
dispatch_weight: bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT,
@@ -147,8 +150,6 @@ impl messages::ChainWithMessages for Rialto {
type Signature = bp_rialto::Signature;
type Weight = Weight;
type Balance = bp_rialto::Balance;
type MessagesInstance = pallet_bridge_messages::DefaultInstance;
}
impl messages::BridgedChainWithMessages for Rialto {
@@ -170,6 +171,7 @@ impl messages::BridgedChainWithMessages for Rialto {
fn estimate_delivery_transaction(
message_payload: &[u8],
include_pay_dispatch_fee_cost: bool,
message_dispatch_weight: Weight,
) -> MessageTransaction<Weight> {
let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX);
@@ -180,6 +182,11 @@ impl messages::BridgedChainWithMessages for Rialto {
dispatch_weight: extra_bytes_in_payload
.saturating_mul(bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT)
.saturating_add(bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT)
.saturating_sub(if include_pay_dispatch_fee_cost {
0
} else {
bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT
})
.saturating_add(message_dispatch_weight),
size: message_payload_len
.saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE)
+1 -1
View File
@@ -52,7 +52,7 @@ sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "mast
substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
[build-dependencies]
substrate-build-script-utils = "3.0.0"
substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" }
frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" }
[features]
@@ -73,7 +73,7 @@ impl Alternative {
"tokenDecimals": 9,
"tokenSymbol": "RLT",
"bridgeIds": {
"Millau": bp_runtime::MILLAU_BRIDGE_INSTANCE,
"Millau": bp_runtime::MILLAU_CHAIN_ID,
}
})
.as_object()
@@ -142,12 +142,21 @@ impl Alternative {
rialto_runtime::Runtime,
pallet_bridge_messages::DefaultInstance,
>::relayer_fund_account_id(),
derive_account_from_millau_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Alice"),
)),
derive_account_from_millau_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Bob"),
)),
derive_account_from_millau_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Charlie"),
)),
derive_account_from_millau_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Dave"),
)),
derive_account_from_millau_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Eve"),
)),
derive_account_from_millau_id(bp_runtime::SourceAccount::Account(
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
)),
@@ -181,7 +190,7 @@ fn testnet_genesis(
changes_trie_config: Default::default(),
},
balances: BalancesConfig {
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 40)).collect(),
balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(),
},
aura: AuraConfig {
authorities: Vec::new(),
+1 -1
View File
@@ -63,7 +63,7 @@ pub enum Subcommand {
Revert(sc_cli::RevertCmd),
/// Inspect blocks or extrinsics.
Inspect(node_inspect::cli::InspectKeyCmd),
Inspect(node_inspect::cli::InspectCmd),
/// Benchmark runtime pallets.
Benchmark(frame_benchmarking_cli::BenchmarkCmd),
+61 -50
View File
@@ -28,19 +28,20 @@
// =====================================================================================
// =====================================================================================
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
use rialto_runtime::{self, opaque::Block, RuntimeApi};
use sc_client_api::{ExecutorProvider, RemoteBackend};
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
use sc_executor::native_executor_instance;
pub use sc_executor::NativeExecutor;
use sc_finality_grandpa::SharedVoterState;
use sc_keystore::LocalKeystore;
use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
use sc_telemetry::{Telemetry, TelemetryWorker};
use sp_consensus::SlotData;
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use std::sync::Arc;
use std::time::Duration;
use std::{sync::Arc, time::Duration};
// Our native executor instance.
native_executor_instance!(
@@ -65,12 +66,7 @@ pub fn new_partial(
sp_consensus::DefaultImportQueue<Block, FullClient>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
sc_consensus_aura::AuraBlockImport<
Block,
FullClient,
sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
AuraPair,
>,
sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
),
@@ -93,7 +89,7 @@ pub fn new_partial(
.transpose()?;
let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
&config,
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let client = Arc::new(client);
@@ -109,7 +105,7 @@ pub fn new_partial(
config.transaction_pool.clone(),
config.role.is_authority().into(),
config.prometheus_registry(),
task_manager.spawn_handle(),
task_manager.spawn_essential_handle(),
client.clone(),
);
@@ -120,14 +116,11 @@ pub fn new_partial(
telemetry.as_ref().map(|x| x.handle()),
)?;
let aura_block_import =
sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone());
let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
block_import: aura_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import)),
block_import: grandpa_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import.clone())),
client: client.clone(),
create_inherent_data_providers: move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
@@ -154,7 +147,7 @@ pub fn new_partial(
keystore_container,
select_chain,
transaction_pool,
other: (aura_block_import, grandpa_link, telemetry),
other: (grandpa_block_import, grandpa_link, telemetry),
})
}
@@ -195,16 +188,15 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
.extra_sets
.push(sc_finality_grandpa::grandpa_peers_set_config());
let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: None,
block_announce_validator_builder: None,
})?;
let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: None,
block_announce_validator_builder: None,
})?;
if config.offchain_worker.enabled {
sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
@@ -217,6 +209,8 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();
let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty();
let rpc_extensions_builder = {
use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider;
@@ -231,7 +225,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
let justification_stream = grandpa_link.justification_stream();
let shared_authority_set = grandpa_link.shared_authority_set().clone();
let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty();
let shared_voter_state = shared_voter_state.clone();
let finality_proof_provider =
GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone()));
@@ -268,7 +262,6 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
on_demand: None,
remote_blockchain: None,
backend,
network_status_sinks,
system_rpc_tx,
config,
telemetry: telemetry.as_mut(),
@@ -287,7 +280,8 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
let raw_slot_duration = slot_duration.slot_duration();
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _>(StartAuraParams {
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _, _>(StartAuraParams {
slot_duration,
client,
select_chain,
@@ -308,7 +302,9 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
keystore: keystore_container.sync_keystore(),
can_author_with,
sync_oracle: network.clone(),
justification_sync_link: network.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
max_block_proposal_slot_portion: None,
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;
@@ -332,7 +328,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
name: Some(name),
observer_enabled: false,
keystore,
is_authority: role.is_authority(),
local_role: role,
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
@@ -349,7 +345,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
network,
voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(),
prometheus_registry,
shared_voter_state: SharedVoterState::empty(),
shared_voter_state,
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
@@ -398,24 +394,22 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
config.transaction_pool.clone(),
config.prometheus_registry(),
task_manager.spawn_handle(),
task_manager.spawn_essential_handle(),
client.clone(),
on_demand.clone(),
));
let (grandpa_block_import, _) = sc_finality_grandpa::block_import(
let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain,
telemetry.as_ref().map(|x| x.handle()),
)?;
let aura_block_import =
sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone());
let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
block_import: aura_block_import,
block_import: grandpa_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import)),
client: client.clone(),
create_inherent_data_providers: move |_, ()| async move {
@@ -435,21 +429,40 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;
let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
})?;
let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
})?;
if config.offchain_worker.enabled {
sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
}
let enable_grandpa = !config.disable_grandpa;
if enable_grandpa {
let name = config.network.node_name.clone();
let config = sc_finality_grandpa::Config {
gossip_duration: std::time::Duration::from_millis(333),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore: None,
local_role: config.role.clone(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
task_manager.spawn_handle().spawn_blocking(
"grandpa-observer",
sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?,
);
}
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
remote_blockchain: Some(backend.remote_blockchain()),
transaction_pool,
@@ -461,12 +474,10 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
keystore: keystore_container.sync_keystore(),
backend,
network,
network_status_sinks,
system_rpc_tx,
telemetry: telemetry.as_mut(),
})?;
network_starter.start_network();
Ok(task_manager)
}
@@ -68,7 +68,7 @@ sp-version = { git = "https://github.com/paritytech/substrate", branch = "master
libsecp256k1 = { version = "0.3.4", features = ["hmac"] }
[build-dependencies]
substrate-wasm-builder = "3.0.0"
substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
[features]
default = ["std"]
+51 -20
View File
@@ -359,6 +359,7 @@ parameter_types! {
// For weight estimation, we assume that the most locks on an individual account will be 50.
// This number may need to be adjusted in the future if this assumption no longer holds true.
pub const MaxLocks: u32 = 50;
pub const MaxReserves: u32 = 50;
}
impl pallet_balances::Config for Runtime {
@@ -372,6 +373,8 @@ impl pallet_balances::Config for Runtime {
// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
type WeightInfo = ();
type MaxLocks = MaxLocks;
type MaxReserves = MaxReserves;
type ReserveIdentifier = [u8; 8];
}
parameter_types! {
@@ -488,6 +491,7 @@ impl pallet_bridge_messages::Config<WithMillauMessagesInstance> for Runtime {
GetDeliveryConfirmationTransactionFee,
RootAccountForPayments,
>;
type OnDeliveryConfirmed = ();
type SourceHeaderChain = crate::millau_messages::Millau;
type MessageDispatch = crate::millau_messages::FromMillauMessageDispatch;
@@ -751,17 +755,23 @@ impl_runtime_apis! {
).ok()
}
fn messages_dispatch_weight(
fn message_details(
lane: bp_messages::LaneId,
begin: bp_messages::MessageNonce,
end: bp_messages::MessageNonce,
) -> Vec<(bp_messages::MessageNonce, Weight, u32)> {
) -> Vec<bp_messages::MessageDetails<Balance>> {
(begin..=end).filter_map(|nonce| {
let encoded_payload = BridgeMillauMessages::outbound_message_payload(lane, nonce)?;
let message_data = BridgeMillauMessages::outbound_message_data(lane, nonce)?;
let decoded_payload = millau_messages::ToMillauMessagePayload::decode(
&mut &encoded_payload[..]
&mut &message_data.payload[..]
).ok()?;
Some((nonce, decoded_payload.weight, encoded_payload.len() as _))
Some(bp_messages::MessageDetails {
nonce,
dispatch_weight: decoded_payload.weight,
size: message_data.payload.len() as _,
delivery_and_dispatch_fee: message_data.fee,
dispatch_fee_payment: decoded_payload.dispatch_fee_payment,
})
})
.collect()
}
@@ -853,6 +863,7 @@ impl_runtime_apis! {
}
use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge};
use bp_runtime::messages::DispatchFeePayment;
use bridge_runtime_common::messages;
use pallet_bridge_messages::benchmarking::{
Pallet as MessagesBench,
@@ -896,6 +907,7 @@ impl_runtime_apis! {
weight: params.size as _,
origin: dispatch_origin,
call: message_payload,
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
};
(message, pallet_bridge_messages::benchmarking::MESSAGE_FEE.into())
}
@@ -903,16 +915,16 @@ impl_runtime_apis! {
fn prepare_message_proof(
params: MessageProofParams,
) -> (millau_messages::FromMillauMessagesProof, Weight) {
use crate::millau_messages::{Millau, WithMillauMessageBridge};
use crate::millau_messages::WithMillauMessageBridge;
use bp_messages::MessageKey;
use bridge_runtime_common::{
messages::ChainWithMessages,
messages::MessageBridge,
messages_benchmarking::{ed25519_sign, prepare_message_proof},
};
use codec::Encode;
use frame_support::weights::GetDispatchInfo;
use pallet_bridge_messages::storage_keys;
use sp_runtime::traits::Header;
use sp_runtime::traits::{Header, IdentifyAccount};
let remark = match params.size {
MessagesProofSize::Minimal(ref size) => vec![0u8; *size as _],
@@ -925,20 +937,26 @@ impl_runtime_apis! {
let (rialto_raw_public, rialto_raw_signature) = ed25519_sign(
&call,
&millau_account_id,
VERSION.spec_version,
bp_runtime::MILLAU_CHAIN_ID,
bp_runtime::RIALTO_CHAIN_ID,
);
let rialto_public = MultiSigner::Ed25519(sp_core::ed25519::Public::from_raw(rialto_raw_public));
let rialto_signature = MultiSignature::Ed25519(sp_core::ed25519::Signature::from_raw(
rialto_raw_signature,
));
if params.dispatch_fee_payment == DispatchFeePayment::AtTargetChain {
Self::endow_account(&rialto_public.clone().into_account());
}
let make_millau_message_key = |message_key: MessageKey| storage_keys::message_key::<
Runtime,
<Millau as ChainWithMessages>::MessagesInstance,
<WithMillauMessageBridge as MessageBridge>::BridgedMessagesInstance,
>(
&message_key.lane_id, message_key.nonce,
).0;
let make_millau_outbound_lane_data_key = |lane_id| storage_keys::outbound_lane_data_key::<
<Millau as ChainWithMessages>::MessagesInstance,
<WithMillauMessageBridge as MessageBridge>::BridgedMessagesInstance,
>(
&lane_id,
).0;
@@ -951,6 +969,7 @@ impl_runtime_apis! {
Default::default(),
);
let dispatch_fee_payment = params.dispatch_fee_payment.clone();
prepare_message_proof::<WithMillauMessageBridge, bp_millau::Hasher, Runtime, (), _, _, _>(
params,
make_millau_message_key,
@@ -969,6 +988,7 @@ impl_runtime_apis! {
rialto_public,
rialto_signature,
),
dispatch_fee_payment,
call: call.encode(),
}.encode(),
)
@@ -977,18 +997,14 @@ impl_runtime_apis! {
fn prepare_message_delivery_proof(
params: MessageDeliveryProofParams<Self::AccountId>,
) -> millau_messages::ToMillauMessagesDeliveryProof {
use crate::millau_messages::{Millau, WithMillauMessageBridge};
use bridge_runtime_common::{
messages::ChainWithMessages,
messages_benchmarking::prepare_message_delivery_proof,
};
use crate::millau_messages::WithMillauMessageBridge;
use bridge_runtime_common::{messages_benchmarking::prepare_message_delivery_proof};
use sp_runtime::traits::Header;
prepare_message_delivery_proof::<WithMillauMessageBridge, bp_millau::Hasher, Runtime, (), _, _>(
params,
|lane_id| pallet_bridge_messages::storage_keys::inbound_lane_data_key::<
Runtime,
<Millau as ChainWithMessages>::MessagesInstance,
<WithMillauMessageBridge as MessageBridge>::BridgedMessagesInstance,
>(
&lane_id,
).0,
@@ -1001,6 +1017,18 @@ impl_runtime_apis! {
),
)
}
fn is_message_dispatched(nonce: bp_messages::MessageNonce) -> bool {
frame_system::Pallet::<Runtime>::events()
.into_iter()
.map(|event_record| event_record.event)
.any(|event| matches!(
event,
Event::BridgeDispatch(pallet_bridge_dispatch::Event::<Runtime, _>::MessageDispatched(
_, ([0, 0, 0, 0], nonce_from_event), _,
)) if nonce_from_event == nonce
))
}
}
add_benchmark!(
@@ -1028,7 +1056,7 @@ impl_runtime_apis! {
/// The byte vector returned by this function should be signed with a Millau account private key.
/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private key
/// is also under his control.
pub fn millau_account_ownership_digest<Call, AccountId, SpecVersion>(
pub fn rialto_to_millau_account_ownership_digest<Call, AccountId, SpecVersion>(
millau_call: &Call,
rialto_account_id: AccountId,
millau_spec_version: SpecVersion,
@@ -1042,7 +1070,8 @@ where
millau_call,
rialto_account_id,
millau_spec_version,
bp_runtime::RIALTO_BRIDGE_INSTANCE,
bp_runtime::RIALTO_CHAIN_ID,
bp_runtime::MILLAU_CHAIN_ID,
)
}
@@ -1095,6 +1124,7 @@ mod tests {
bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT,
bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT,
bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT,
bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT,
);
let max_incoming_message_proof_size = bp_millau::EXTRA_STORAGE_PROOF_SIZE.saturating_add(
@@ -1110,6 +1140,7 @@ mod tests {
let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint(
bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _,
bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _,
)
.unwrap_or(u32::MAX);
pallet_bridge_messages::ensure_able_to_receive_confirmation::<Weights>(
@@ -23,7 +23,7 @@ use bp_messages::{
target_chain::{ProvedMessages, SourceHeaderChain},
InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter,
};
use bp_runtime::{InstanceId, MILLAU_BRIDGE_INSTANCE};
use bp_runtime::{ChainId, MILLAU_CHAIN_ID, RIALTO_CHAIN_ID};
use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction};
use codec::{Decode, Encode};
use frame_support::{
@@ -52,12 +52,13 @@ pub type ToMillauMessageVerifier = messages::source::FromThisChainMessageVerifie
pub type FromMillauMessagePayload = messages::target::FromBridgedChainMessagePayload<WithMillauMessageBridge>;
/// Encoded Rialto Call as it comes from Millau.
pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall<WithMillauMessageBridge>;
pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall<crate::Call>;
/// Call-dispatch based message dispatch for Millau -> Rialto messages.
pub type FromMillauMessageDispatch = messages::target::FromBridgedChainMessageDispatch<
WithMillauMessageBridge,
crate::Runtime,
pallet_balances::Pallet<Runtime>,
pallet_bridge_dispatch::DefaultInstance,
>;
@@ -72,12 +73,13 @@ pub type ToMillauMessagesDeliveryProof = messages::source::FromBridgedChainMessa
pub struct WithMillauMessageBridge;
impl MessageBridge for WithMillauMessageBridge {
const INSTANCE: InstanceId = MILLAU_BRIDGE_INSTANCE;
const RELAYER_FEE_PERCENT: u32 = 10;
const THIS_CHAIN_ID: ChainId = RIALTO_CHAIN_ID;
const BRIDGED_CHAIN_ID: ChainId = MILLAU_CHAIN_ID;
type ThisChain = Rialto;
type BridgedChain = Millau;
type BridgedMessagesInstance = crate::WithMillauMessagesInstance;
fn bridged_balance_to_this_balance(bridged_balance: bp_millau::Balance) -> bp_rialto::Balance {
bp_rialto::Balance::try_from(MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance))
@@ -96,8 +98,6 @@ impl messages::ChainWithMessages for Rialto {
type Signature = bp_rialto::Signature;
type Weight = Weight;
type Balance = bp_rialto::Balance;
type MessagesInstance = crate::WithMillauMessagesInstance;
}
impl messages::ThisChainWithMessages for Rialto {
@@ -112,9 +112,12 @@ impl messages::ThisChainWithMessages for Rialto {
}
fn estimate_delivery_confirmation_transaction() -> MessageTransaction<Weight> {
let inbound_data_size =
InboundLaneData::<bp_rialto::AccountId>::encoded_size_hint(bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1)
.unwrap_or(u32::MAX);
let inbound_data_size = InboundLaneData::<bp_rialto::AccountId>::encoded_size_hint(
bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
1,
1,
)
.unwrap_or(u32::MAX);
MessageTransaction {
dispatch_weight: bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT,
@@ -147,8 +150,6 @@ impl messages::ChainWithMessages for Millau {
type Signature = bp_millau::Signature;
type Weight = Weight;
type Balance = bp_millau::Balance;
type MessagesInstance = pallet_bridge_messages::DefaultInstance;
}
impl messages::BridgedChainWithMessages for Millau {
@@ -170,6 +171,7 @@ impl messages::BridgedChainWithMessages for Millau {
fn estimate_delivery_transaction(
message_payload: &[u8],
include_pay_dispatch_fee_cost: bool,
message_dispatch_weight: Weight,
) -> MessageTransaction<Weight> {
let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX);
@@ -180,6 +182,11 @@ impl messages::BridgedChainWithMessages for Millau {
dispatch_weight: extra_bytes_in_payload
.saturating_mul(bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT)
.saturating_add(bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT)
.saturating_sub(if include_pay_dispatch_fee_cost {
0
} else {
bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT
})
.saturating_add(message_dispatch_weight),
size: message_payload_len
.saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE)
@@ -256,3 +263,87 @@ impl MessagesParameter for RialtoToMillauMessagesParameter {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{AccountId, Call, ExistentialDeposit, Runtime, SystemCall, SystemConfig, VERSION};
use bp_message_dispatch::CallOrigin;
use bp_messages::{
target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch},
MessageKey,
};
use bp_runtime::{derive_account_id, messages::DispatchFeePayment, SourceAccount};
use bridge_runtime_common::messages::target::{FromBridgedChainEncodedMessageCall, FromBridgedChainMessagePayload};
use frame_support::{
traits::Currency,
weights::{GetDispatchInfo, WeightToFeePolynomial},
};
use sp_runtime::traits::Convert;
#[test]
fn transfer_happens_when_dispatch_fee_is_paid_at_target_chain() {
// this test actually belongs to the `bridge-runtime-common` crate, but there we have no
// mock runtime. Making another one there just for this test, given that both crates
// live n single repo is an overkill
let mut ext: sp_io::TestExternalities = SystemConfig::default().build_storage::<Runtime>().unwrap().into();
ext.execute_with(|| {
let bridge = MILLAU_CHAIN_ID;
let call: Call = SystemCall::remark(vec![]).into();
let dispatch_weight = call.get_dispatch_info().weight;
let dispatch_fee = <Runtime as pallet_transaction_payment::Config>::WeightToFee::calc(&dispatch_weight);
assert!(dispatch_fee > 0);
// create relayer account with minimal balance
let relayer_account: AccountId = [1u8; 32].into();
let initial_amount = ExistentialDeposit::get();
let _ = <pallet_balances::Pallet<Runtime> as Currency<AccountId>>::deposit_creating(
&relayer_account,
initial_amount,
);
// create dispatch account with minimal balance + dispatch fee
let dispatch_account = derive_account_id::<<Runtime as pallet_bridge_dispatch::Config>::SourceChainAccountId>(
bridge,
SourceAccount::Root,
);
let dispatch_account =
<Runtime as pallet_bridge_dispatch::Config>::AccountIdConverter::convert(dispatch_account);
let _ = <pallet_balances::Pallet<Runtime> as Currency<AccountId>>::deposit_creating(
&dispatch_account,
initial_amount + dispatch_fee,
);
// dispatch message with intention to pay dispatch fee at the target chain
FromMillauMessageDispatch::dispatch(
&relayer_account,
DispatchMessage {
key: MessageKey {
lane_id: Default::default(),
nonce: 0,
},
data: DispatchMessageData {
payload: Ok(FromBridgedChainMessagePayload::<WithMillauMessageBridge> {
spec_version: VERSION.spec_version,
weight: dispatch_weight,
origin: CallOrigin::SourceRoot,
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
call: FromBridgedChainEncodedMessageCall::new(call.encode()),
}),
fee: 1,
},
},
);
// ensure that fee has been transferred from dispatch to relayer account
assert_eq!(
<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(&relayer_account),
initial_amount + dispatch_fee,
);
assert_eq!(
<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(&dispatch_account),
initial_amount,
);
});
}
}
@@ -24,6 +24,7 @@ pallet-bridge-messages = { path = "../../modules/messages", default-features = f
# Substrate dependencies
frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true }
@@ -42,6 +43,7 @@ std = [
"pallet-bridge-dispatch/std",
"pallet-bridge-grandpa/std",
"pallet-bridge-messages/std",
"pallet-transaction-payment/std",
"sp-core/std",
"sp-runtime/std",
"sp-state-machine/std",
@@ -102,7 +102,9 @@ This trait represents this chain from bridge point of view. Let's review every m
have declared dispatch weight larger than 50% of the maximal bridged extrinsic weight.
- `MessageBridge::estimate_delivery_transaction`: you will need to return estimated dispatch weight and
size of the delivery transaction that delivers a given message to the target chain.
size of the delivery transaction that delivers a given message to the target chain. The transaction
weight must or must not include the weight of pay-dispatch-fee operation, depending on the value
of `include_pay_dispatch_fee_cost` argument.
- `MessageBridge::transaction_payment`: you'll need to return fee that the submitter
must pay for given transaction on bridged chain. The best case is when you have the same conversion
@@ -26,9 +26,16 @@ use bp_messages::{
target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages},
InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData,
};
use bp_runtime::{InstanceId, Size, StorageProofChecker};
use bp_runtime::{
messages::{DispatchFeePayment, MessageDispatchResult},
ChainId, Size, StorageProofChecker,
};
use codec::{Decode, Encode};
use frame_support::{traits::Instance, weights::Weight, RuntimeDebug};
use frame_support::{
traits::{Currency, ExistenceRequirement, Instance},
weights::{Weight, WeightToFeePolynomial},
RuntimeDebug,
};
use hash_db::Hasher;
use sp_runtime::{
traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul},
@@ -39,16 +46,20 @@ use sp_trie::StorageProof;
/// Bidirectional message bridge.
pub trait MessageBridge {
/// Instance id of this bridge.
const INSTANCE: InstanceId;
/// Relayer interest (in percents).
const RELAYER_FEE_PERCENT: u32;
/// Identifier of this chain.
const THIS_CHAIN_ID: ChainId;
/// Identifier of the Bridged chain.
const BRIDGED_CHAIN_ID: ChainId;
/// This chain in context of message bridge.
type ThisChain: ThisChainWithMessages;
/// Bridged chain in context of message bridge.
type BridgedChain: BridgedChainWithMessages;
/// Instance of the `pallet-bridge-messages` pallet at the Bridged chain.
type BridgedMessagesInstance: Instance;
/// Convert Bridged chain balance into This chain balance.
fn bridged_balance_to_this_balance(bridged_balance: BalanceOf<BridgedChain<Self>>) -> BalanceOf<ThisChain<Self>>;
@@ -71,9 +82,6 @@ pub trait ChainWithMessages {
type Weight: From<frame_support::weights::Weight> + PartialOrd;
/// Type of balances that is used on the chain.
type Balance: Encode + Decode + CheckedAdd + CheckedDiv + CheckedMul + PartialOrd + From<u32> + Copy;
/// Instance of the `pallet-bridge-messages` pallet.
type MessagesInstance: Instance;
}
/// Message related transaction parameters estimation.
@@ -124,6 +132,7 @@ pub trait BridgedChainWithMessages: ChainWithMessages {
/// Estimate size and weight of single message delivery transaction at the Bridged chain.
fn estimate_delivery_transaction(
message_payload: &[u8],
include_pay_dispatch_fee_cost: bool,
message_dispatch_weight: WeightOf<Self>,
) -> MessageTransaction<WeightOf<Self>>;
@@ -139,7 +148,6 @@ pub(crate) type SignerOf<C> = <C as ChainWithMessages>::Signer;
pub(crate) type SignatureOf<C> = <C as ChainWithMessages>::Signature;
pub(crate) type WeightOf<C> = <C as ChainWithMessages>::Weight;
pub(crate) type BalanceOf<C> = <C as ChainWithMessages>::Balance;
pub(crate) type MessagesInstanceOf<C> = <C as ChainWithMessages>::MessagesInstance;
pub(crate) type CallOf<C> = <C as ThisChainWithMessages>::Call;
@@ -326,8 +334,19 @@ pub mod source {
relayer_fee_percent: u32,
) -> Result<BalanceOf<ThisChain<B>>, &'static str> {
// the fee (in Bridged tokens) of all transactions that are made on the Bridged chain
let delivery_transaction =
BridgedChain::<B>::estimate_delivery_transaction(&payload.call, payload.weight.into());
//
// if we're going to pay dispatch fee at the target chain, then we don't include weight
// of the message dispatch in the delivery transaction cost
let pay_dispatch_fee_at_target_chain = payload.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
let delivery_transaction = BridgedChain::<B>::estimate_delivery_transaction(
&payload.call,
pay_dispatch_fee_at_target_chain,
if pay_dispatch_fee_at_target_chain {
0.into()
} else {
payload.weight.into()
},
);
let delivery_transaction_fee = BridgedChain::<B>::transaction_payment(delivery_transaction);
// the fee (in This tokens) of all transactions that are made on This chain
@@ -357,7 +376,6 @@ pub mod source {
) -> Result<ParsedMessagesDeliveryProofFromBridgedChain<B>, &'static str>
where
ThisRuntime: pallet_bridge_grandpa::Config<GrandpaInstance>,
ThisRuntime: pallet_bridge_messages::Config<MessagesInstanceOf<BridgedChain<B>>>,
HashOf<BridgedChain<B>>:
Into<bp_runtime::HashOf<<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain>>,
{
@@ -372,10 +390,8 @@ pub mod source {
|storage| {
// Messages delivery proof is just proof of single storage key read => any error
// is fatal.
let storage_inbound_lane_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::<
ThisRuntime,
MessagesInstanceOf<BridgedChain<B>>,
>(&lane);
let storage_inbound_lane_data_key =
pallet_bridge_messages::storage_keys::inbound_lane_data_key::<B::BridgedMessagesInstance>(&lane);
let raw_inbound_lane_data = storage
.read_value(storage_inbound_lane_data_key.0.as_ref())
.map_err(|_| "Failed to read inbound lane state from storage proof")?
@@ -406,7 +422,7 @@ pub mod target {
AccountIdOf<BridgedChain<B>>,
SignerOf<ThisChain<B>>,
SignatureOf<ThisChain<B>>,
FromBridgedChainEncodedMessageCall<B>,
FromBridgedChainEncodedMessageCall<CallOf<ThisChain<B>>>,
>;
/// Messages proof from bridged chain:
@@ -444,33 +460,51 @@ pub mod target {
/// Our Call is opaque (`Vec<u8>`) for Bridged chain. So it is encoded, prefixed with
/// vector length. Custom decode implementation here is exactly to deal with this.
#[derive(Decode, Encode, RuntimeDebug, PartialEq)]
pub struct FromBridgedChainEncodedMessageCall<B> {
pub(crate) encoded_call: Vec<u8>,
pub(crate) _marker: PhantomData<B>,
pub struct FromBridgedChainEncodedMessageCall<DecodedCall> {
encoded_call: Vec<u8>,
_marker: PhantomData<DecodedCall>,
}
impl<B: MessageBridge> From<FromBridgedChainEncodedMessageCall<B>> for Result<CallOf<ThisChain<B>>, ()> {
fn from(encoded_call: FromBridgedChainEncodedMessageCall<B>) -> Self {
CallOf::<ThisChain<B>>::decode(&mut &encoded_call.encoded_call[..]).map_err(drop)
impl<DecodedCall> FromBridgedChainEncodedMessageCall<DecodedCall> {
/// Create encoded call.
pub fn new(encoded_call: Vec<u8>) -> Self {
FromBridgedChainEncodedMessageCall {
encoded_call,
_marker: PhantomData::default(),
}
}
}
impl<DecodedCall: Decode> From<FromBridgedChainEncodedMessageCall<DecodedCall>> for Result<DecodedCall, ()> {
fn from(encoded_call: FromBridgedChainEncodedMessageCall<DecodedCall>) -> Self {
DecodedCall::decode(&mut &encoded_call.encoded_call[..]).map_err(drop)
}
}
/// Dispatching Bridged -> This chain messages.
#[derive(RuntimeDebug, Clone, Copy)]
pub struct FromBridgedChainMessageDispatch<B, ThisRuntime, ThisDispatchInstance> {
_marker: PhantomData<(B, ThisRuntime, ThisDispatchInstance)>,
pub struct FromBridgedChainMessageDispatch<B, ThisRuntime, ThisCurrency, ThisDispatchInstance> {
_marker: PhantomData<(B, ThisRuntime, ThisCurrency, ThisDispatchInstance)>,
}
impl<B: MessageBridge, ThisRuntime, ThisDispatchInstance>
MessageDispatch<<BridgedChain<B> as ChainWithMessages>::Balance>
for FromBridgedChainMessageDispatch<B, ThisRuntime, ThisDispatchInstance>
impl<B: MessageBridge, ThisRuntime, ThisCurrency, ThisDispatchInstance>
MessageDispatch<AccountIdOf<ThisChain<B>>, BalanceOf<BridgedChain<B>>>
for FromBridgedChainMessageDispatch<B, ThisRuntime, ThisCurrency, ThisDispatchInstance>
where
ThisDispatchInstance: frame_support::traits::Instance,
ThisRuntime: pallet_bridge_dispatch::Config<ThisDispatchInstance, MessageId = (LaneId, MessageNonce)>,
<ThisRuntime as pallet_bridge_dispatch::Config<ThisDispatchInstance>>::Event:
From<pallet_bridge_dispatch::RawEvent<(LaneId, MessageNonce), ThisDispatchInstance>>,
pallet_bridge_dispatch::Pallet<ThisRuntime, ThisDispatchInstance>:
bp_message_dispatch::MessageDispatch<(LaneId, MessageNonce), Message = FromBridgedChainMessagePayload<B>>,
ThisRuntime: pallet_bridge_dispatch::Config<ThisDispatchInstance, MessageId = (LaneId, MessageNonce)>
+ pallet_transaction_payment::Config,
<ThisRuntime as pallet_transaction_payment::Config>::OnChargeTransaction:
pallet_transaction_payment::OnChargeTransaction<ThisRuntime, Balance = BalanceOf<ThisChain<B>>>,
ThisCurrency: Currency<AccountIdOf<ThisChain<B>>, Balance = BalanceOf<ThisChain<B>>>,
<ThisRuntime as pallet_bridge_dispatch::Config<ThisDispatchInstance>>::Event: From<
pallet_bridge_dispatch::RawEvent<(LaneId, MessageNonce), AccountIdOf<ThisChain<B>>, ThisDispatchInstance>,
>,
pallet_bridge_dispatch::Pallet<ThisRuntime, ThisDispatchInstance>: bp_message_dispatch::MessageDispatch<
AccountIdOf<ThisChain<B>>,
(LaneId, MessageNonce),
Message = FromBridgedChainMessagePayload<B>,
>,
{
type DispatchPayload = FromBridgedChainMessagePayload<B>;
@@ -480,13 +514,26 @@ pub mod target {
message.data.payload.as_ref().map(|payload| payload.weight).unwrap_or(0)
}
fn dispatch(message: DispatchMessage<Self::DispatchPayload, BalanceOf<BridgedChain<B>>>) {
fn dispatch(
relayer_account: &AccountIdOf<ThisChain<B>>,
message: DispatchMessage<Self::DispatchPayload, BalanceOf<BridgedChain<B>>>,
) -> MessageDispatchResult {
let message_id = (message.key.lane_id, message.key.nonce);
pallet_bridge_dispatch::Pallet::<ThisRuntime, ThisDispatchInstance>::dispatch(
B::INSTANCE,
B::BRIDGED_CHAIN_ID,
B::THIS_CHAIN_ID,
message_id,
message.data.payload.map_err(drop),
);
|dispatch_origin, dispatch_weight| {
ThisCurrency::transfer(
dispatch_origin,
relayer_account,
ThisRuntime::WeightToFee::calc(&dispatch_weight),
ExistenceRequirement::AllowDeath,
)
.map_err(drop)
},
)
}
}
@@ -511,7 +558,7 @@ pub mod target {
) -> Result<ProvedMessages<Message<BalanceOf<BridgedChain<B>>>>, &'static str>
where
ThisRuntime: pallet_bridge_grandpa::Config<GrandpaInstance>,
ThisRuntime: pallet_bridge_messages::Config<MessagesInstanceOf<BridgedChain<B>>>,
ThisRuntime: pallet_bridge_messages::Config<B::BridgedMessagesInstance>,
HashOf<BridgedChain<B>>:
Into<bp_runtime::HashOf<<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain>>,
{
@@ -524,7 +571,7 @@ pub mod target {
StorageProof::new(bridged_storage_proof),
|storage_adapter| storage_adapter,
)
.map(|storage| StorageProofCheckerAdapter::<_, B, ThisRuntime> {
.map(|storage| StorageProofCheckerAdapter::<_, B> {
storage,
_dummy: Default::default(),
})
@@ -564,31 +611,29 @@ pub mod target {
fn read_raw_message(&self, message_key: &MessageKey) -> Option<Vec<u8>>;
}
struct StorageProofCheckerAdapter<H: Hasher, B, ThisRuntime> {
struct StorageProofCheckerAdapter<H: Hasher, B> {
storage: StorageProofChecker<H>,
_dummy: sp_std::marker::PhantomData<(B, ThisRuntime)>,
_dummy: sp_std::marker::PhantomData<B>,
}
impl<H, B, ThisRuntime> MessageProofParser for StorageProofCheckerAdapter<H, B, ThisRuntime>
impl<H, B> MessageProofParser for StorageProofCheckerAdapter<H, B>
where
H: Hasher,
B: MessageBridge,
ThisRuntime: pallet_bridge_messages::Config<MessagesInstanceOf<BridgedChain<B>>>,
{
fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option<Vec<u8>> {
let storage_outbound_lane_data_key = pallet_bridge_messages::storage_keys::outbound_lane_data_key::<
MessagesInstanceOf<BridgedChain<B>>,
>(lane_id);
let storage_outbound_lane_data_key =
pallet_bridge_messages::storage_keys::outbound_lane_data_key::<B::BridgedMessagesInstance>(lane_id);
self.storage
.read_value(storage_outbound_lane_data_key.0.as_ref())
.ok()?
}
fn read_raw_message(&self, message_key: &MessageKey) -> Option<Vec<u8>> {
let storage_message_key = pallet_bridge_messages::storage_keys::message_key::<
ThisRuntime,
MessagesInstanceOf<BridgedChain<B>>,
>(&message_key.lane_id, message_key.nonce);
let storage_message_key = pallet_bridge_messages::storage_keys::message_key::<B::BridgedMessagesInstance>(
&message_key.lane_id,
message_key.nonce,
);
self.storage.read_value(storage_message_key.0.as_ref()).ok()?
}
}
@@ -692,11 +737,13 @@ mod tests {
struct OnThisChainBridge;
impl MessageBridge for OnThisChainBridge {
const INSTANCE: InstanceId = *b"this";
const RELAYER_FEE_PERCENT: u32 = 10;
const THIS_CHAIN_ID: ChainId = *b"this";
const BRIDGED_CHAIN_ID: ChainId = *b"brdg";
type ThisChain = ThisChain;
type BridgedChain = BridgedChain;
type BridgedMessagesInstance = pallet_bridge_messages::DefaultInstance;
fn bridged_balance_to_this_balance(bridged_balance: BridgedChainBalance) -> ThisChainBalance {
ThisChainBalance(bridged_balance.0 * BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE as u32)
@@ -708,11 +755,13 @@ mod tests {
struct OnBridgedChainBridge;
impl MessageBridge for OnBridgedChainBridge {
const INSTANCE: InstanceId = *b"brdg";
const RELAYER_FEE_PERCENT: u32 = 20;
const THIS_CHAIN_ID: ChainId = *b"brdg";
const BRIDGED_CHAIN_ID: ChainId = *b"this";
type ThisChain = BridgedChain;
type BridgedChain = ThisChain;
type BridgedMessagesInstance = pallet_bridge_messages::DefaultInstance;
fn bridged_balance_to_this_balance(_this_balance: ThisChainBalance) -> BridgedChainBalance {
unreachable!()
@@ -815,8 +864,6 @@ mod tests {
type Signature = ThisChainSignature;
type Weight = frame_support::weights::Weight;
type Balance = ThisChainBalance;
type MessagesInstance = pallet_bridge_messages::DefaultInstance;
}
impl ThisChainWithMessages for ThisChain {
@@ -853,6 +900,7 @@ mod tests {
fn estimate_delivery_transaction(
_message_payload: &[u8],
_include_pay_dispatch_fee_cost: bool,
_message_dispatch_weight: WeightOf<Self>,
) -> MessageTransaction<WeightOf<Self>> {
unreachable!()
@@ -872,8 +920,6 @@ mod tests {
type Signature = BridgedChainSignature;
type Weight = frame_support::weights::Weight;
type Balance = BridgedChainBalance;
type MessagesInstance = pallet_bridge_messages::DefaultInstance;
}
impl ThisChainWithMessages for BridgedChain {
@@ -908,6 +954,7 @@ mod tests {
fn estimate_delivery_transaction(
_message_payload: &[u8],
_include_pay_dispatch_fee_cost: bool,
message_dispatch_weight: WeightOf<Self>,
) -> MessageTransaction<WeightOf<Self>> {
MessageTransaction {
@@ -932,6 +979,7 @@ mod tests {
spec_version: 1,
weight: 100,
origin: bp_message_dispatch::CallOrigin::SourceRoot,
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
call: ThisChainCall::Transfer.encode(),
}
.encode();
@@ -946,10 +994,10 @@ mod tests {
spec_version: 1,
weight: 100,
origin: bp_message_dispatch::CallOrigin::SourceRoot,
call: target::FromBridgedChainEncodedMessageCall::<OnThisChainBridge> {
encoded_call: ThisChainCall::Transfer.encode(),
_marker: PhantomData::default(),
},
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
call: target::FromBridgedChainEncodedMessageCall::<ThisChainCall>::new(
ThisChainCall::Transfer.encode(),
),
}
);
assert_eq!(Ok(ThisChainCall::Transfer), message_on_this_chain.call.into());
@@ -963,6 +1011,7 @@ mod tests {
spec_version: 1,
weight: 100,
origin: bp_message_dispatch::CallOrigin::SourceRoot,
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
call: vec![42],
}
}
@@ -983,12 +1032,27 @@ mod tests {
Ok(ThisChainBalance(EXPECTED_MINIMAL_FEE)),
);
// let's check if estimation is less than hardcoded, if dispatch is paid at target chain
let mut payload_with_pay_on_target = regular_outbound_message_payload();
payload_with_pay_on_target.dispatch_fee_payment = DispatchFeePayment::AtTargetChain;
let fee_at_source = source::estimate_message_dispatch_and_delivery_fee::<OnThisChainBridge>(
&payload_with_pay_on_target,
OnThisChainBridge::RELAYER_FEE_PERCENT,
)
.expect("estimate_message_dispatch_and_delivery_fee failed for pay-at-target-chain message");
assert!(
fee_at_source < EXPECTED_MINIMAL_FEE.into(),
"Computed fee {:?} without prepaid dispatch must be less than the fee with prepaid dispatch {}",
fee_at_source,
EXPECTED_MINIMAL_FEE,
);
// and now check that the verifier checks the fee
assert_eq!(
source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
&Sender::Root,
&ThisChainBalance(1),
&TEST_LANE_ID,
TEST_LANE_ID,
&test_lane_outbound_data(),
&payload,
),
@@ -998,7 +1062,7 @@ mod tests {
source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
&Sender::Root,
&ThisChainBalance(1_000_000),
&TEST_LANE_ID,
TEST_LANE_ID,
&test_lane_outbound_data(),
&payload,
)
@@ -1013,6 +1077,7 @@ mod tests {
spec_version: 1,
weight: 100,
origin: bp_message_dispatch::CallOrigin::SourceRoot,
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
call: vec![42],
};
@@ -1021,7 +1086,7 @@ mod tests {
source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
&Sender::Signed(ThisChainAccountId(0)),
&ThisChainBalance(1_000_000),
&TEST_LANE_ID,
TEST_LANE_ID,
&test_lane_outbound_data(),
&payload,
),
@@ -1031,7 +1096,7 @@ mod tests {
source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
&Sender::None,
&ThisChainBalance(1_000_000),
&TEST_LANE_ID,
TEST_LANE_ID,
&test_lane_outbound_data(),
&payload,
),
@@ -1041,7 +1106,7 @@ mod tests {
source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
&Sender::Root,
&ThisChainBalance(1_000_000),
&TEST_LANE_ID,
TEST_LANE_ID,
&test_lane_outbound_data(),
&payload,
)
@@ -1056,6 +1121,7 @@ mod tests {
spec_version: 1,
weight: 100,
origin: bp_message_dispatch::CallOrigin::SourceAccount(ThisChainAccountId(1)),
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
call: vec![42],
};
@@ -1064,7 +1130,7 @@ mod tests {
source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
&Sender::Signed(ThisChainAccountId(0)),
&ThisChainBalance(1_000_000),
&TEST_LANE_ID,
TEST_LANE_ID,
&test_lane_outbound_data(),
&payload,
),
@@ -1074,7 +1140,7 @@ mod tests {
source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
&Sender::Signed(ThisChainAccountId(1)),
&ThisChainBalance(1_000_000),
&TEST_LANE_ID,
TEST_LANE_ID,
&test_lane_outbound_data(),
&payload,
)
@@ -1102,7 +1168,7 @@ mod tests {
source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
&Sender::Root,
&ThisChainBalance(1_000_000),
&TEST_LANE_ID,
TEST_LANE_ID,
&OutboundLaneData {
latest_received_nonce: 100,
latest_generated_nonce: 100 + MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE + 1,
@@ -1123,6 +1189,7 @@ mod tests {
spec_version: 1,
weight: 5,
origin: bp_message_dispatch::CallOrigin::SourceRoot,
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
call: vec![1, 2, 3, 4, 5, 6],
},)
.is_err()
@@ -1138,6 +1205,7 @@ mod tests {
spec_version: 1,
weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + 1,
origin: bp_message_dispatch::CallOrigin::SourceRoot,
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
call: vec![1, 2, 3, 4, 5, 6],
},)
.is_err()
@@ -1153,6 +1221,7 @@ mod tests {
spec_version: 1,
weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT,
origin: bp_message_dispatch::CallOrigin::SourceRoot,
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
call: vec![0; source::maximal_message_size::<OnThisChainBridge>() as usize + 1],
},)
.is_err()
@@ -1168,6 +1237,7 @@ mod tests {
spec_version: 1,
weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT,
origin: bp_message_dispatch::CallOrigin::SourceRoot,
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
call: vec![0; source::maximal_message_size::<OnThisChainBridge>() as _],
},),
Ok(()),
@@ -25,6 +25,7 @@ use crate::messages::{
};
use bp_messages::{LaneId, MessageData, MessageKey, MessagePayload};
use bp_runtime::ChainId;
use codec::Encode;
use ed25519_dalek::{PublicKey, SecretKey, Signer, KEYPAIR_LENGTH, SECRET_KEY_LENGTH};
use frame_support::weights::Weight;
@@ -37,7 +38,13 @@ use sp_trie::{record_all_keys, trie_types::TrieDBMut, Layout, MemoryDB, Recorder
/// Generate ed25519 signature to be used in `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`.
///
/// Returns public key of the signer and the signature itself.
pub fn ed25519_sign(target_call: &impl Encode, source_account_id: &impl Encode) -> ([u8; 32], [u8; 64]) {
pub fn ed25519_sign(
target_call: &impl Encode,
source_account_id: &impl Encode,
target_spec_version: u32,
source_chain_id: ChainId,
target_chain_id: ChainId,
) -> ([u8; 32], [u8; 64]) {
// key from the repo example (https://docs.rs/ed25519-dalek/1.0.1/ed25519_dalek/struct.SecretKey.html)
let target_secret = SecretKey::from_bytes(&[
157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073, 197, 105, 123, 050,
@@ -51,9 +58,13 @@ pub fn ed25519_sign(target_call: &impl Encode, source_account_id: &impl Encode)
target_pair_bytes[SECRET_KEY_LENGTH..].copy_from_slice(&target_public.to_bytes());
let target_pair = ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid");
let mut signature_message = Vec::new();
target_call.encode_to(&mut signature_message);
source_account_id.encode_to(&mut signature_message);
let signature_message = pallet_bridge_dispatch::account_ownership_digest(
target_call,
source_account_id,
target_spec_version,
source_chain_id,
target_chain_id,
);
let target_origin_signature = target_pair
.try_sign(&signature_message)
.expect("Ed25519 try_sign should not fail in benchmarks");
+53
View File
@@ -0,0 +1,53 @@
# This file is a "runtime" part from a builder-pattern in Dockerfile, it's used in CI.
# The only different part is that the compilation happens externally,
# so COPY has a different source.
FROM ubuntu:20.04
# show backtraces
ENV RUST_BACKTRACE 1
ENV DEBIAN_FRONTEND=noninteractive
RUN set -eux; \
apt-get update && \
apt-get install -y --no-install-recommends \
curl ca-certificates libssl-dev && \
update-ca-certificates && \
groupadd -g 1000 user && \
useradd -u 1000 -g user -s /bin/sh -m user && \
# apt clean up
apt-get autoremove -y && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# switch to non-root user
USER user
WORKDIR /home/user
ARG PROJECT=ethereum-poa-relay
COPY --chown=user:user ./${PROJECT} ./
COPY --chown=user:user ./bridge-entrypoint.sh ./
# check if executable works in this container
RUN ./${PROJECT} --version
ENV PROJECT=$PROJECT
ENTRYPOINT ["/home/user/bridge-entrypoint.sh"]
# metadata
ARG VCS_REF=master
ARG BUILD_DATE=""
ARG VERSION=""
LABEL org.opencontainers.image.title="${PROJECT}" \
org.opencontainers.image.description="${PROJECT} - component of Parity Bridges Common" \
org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/ci.Dockerfile" \
org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/ci.Dockerfile" \
org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/README.md" \
org.opencontainers.image.created="${BUILD_DATE}" \
org.opencontainers.image.version="${VERSION}" \
org.opencontainers.image.revision="${VCS_REF}" \
org.opencontainers.image.authors="devops-team@parity.io" \
org.opencontainers.image.vendor="Parity Technologies" \
org.opencontainers.image.licenses="GPL-3.0 License"
+17 -6
View File
@@ -53,12 +53,22 @@ ignore = [
"RUSTSEC-2020-0146",
# yaml-rust < clap. Not feasible to upgrade and also not possible to trigger in practice.
"RUSTSEC-2018-0006",
# We need to wait until Substrate updates their `wasmtime` dependency to fix this.
# TODO: See issue #676: https://github.com/paritytech/parity-bridges-common/issues/676
"RUSTSEC-2021-0013",
# We need to wait until Substrate updates their `hyper` dependency to fix this.
# TODO: See issue #710: https://github.com/paritytech/parity-bridges-common/issues/681
"RUSTSEC-2021-0020",
# Comes from wasmtime via Substrate: 'cranelift-codegen'
"RUSTSEC-2021-0067",
# Comes from libp2p via Substrate: 'aes-soft', 'aesni', 'block-cipher', 'stream-cipher'
"RUSTSEC-2021-0060",
"RUSTSEC-2021-0059",
"RUSTSEC-2020-0057",
"RUSTSEC-2021-0064",
# Comes from jsonrpc via Substrate: 'failure', 'net2', 'lock_api'
"RUSTSEC-2020-0036",
"RUSTSEC-2020-0077",
"RUSTSEC-2019-0036",
"RUSTSEC-2020-0070",
# Comes from honggfuzz via storage-proof-fuzzer: 'memmap'
"RUSTSEC-2020-0077",
# Comes from time: 'stweb' (will be fixed in upcoming time 0.3)
"RUSTSEC-2020-0056"
]
# Threshold for security vulnerabilities, any vulnerability with a CVSS score
# lower than the range specified will be ignored. Note that ignored advisories
@@ -134,6 +144,7 @@ license-files = [
# Each entry is a crate relative path, and the (opaque) hash of its contents
{ path = "LICENSE", hash = 0xbd0eed23 }
]
[[licenses.clarify]]
name = "webpki"
expression = "ISC"
@@ -1,7 +1,7 @@
#!/bin/bash
set -xeu
sleep 3
sleep 20
curl -v http://poa-node-arthur:8545/api/health
curl -v http://poa-node-bertha:8545/api/health
curl -v http://poa-node-carlos:8545/api/health
@@ -1,7 +1,7 @@
#!/bin/bash
set -xeu
sleep 10
sleep 20
curl -v http://rialto-node-bob:9933/health
curl -v http://poa-node-bertha:8545/api/health
@@ -1,7 +1,7 @@
#!/bin/bash
set -xeu
sleep 3
sleep 20
curl -v http://poa-node-arthur:8545/api/health
curl -v http://poa-node-bertha:8545/api/health
curl -v http://poa-node-carlos:8545/api/health
@@ -1,7 +1,7 @@
#!/bin/bash
set -xeu
sleep 3
sleep 20
curl -v http://millau-node-bob:9933/health
curl -v http://rialto-node-bob:9933/health
@@ -1,7 +1,7 @@
#!/bin/bash
set -xeu
sleep 3
sleep 20
curl -v http://millau-node-bob:9933/health
curl -v http://rialto-node-bob:9933/health
@@ -1,7 +1,7 @@
#!/bin/bash
set -xeu
sleep 3
sleep 20
curl -v http://millau-node-alice:9933/health
curl -v http://rialto-node-alice:9933/health
@@ -1,7 +1,7 @@
#!/bin/bash
set -xeu
sleep 3
sleep 20
curl -v http://millau-node-alice:9933/health
curl -v https://westend-rpc.polkadot.io:443/health
@@ -0,0 +1,24 @@
#!/bin/bash
#
# Run an instance of the Rococo -> Wococo header sync.
#
# Right now this relies on local Wococo and Rococo networks
# running (which include `pallet-bridge-grandpa` in their
# runtimes), but in the future it could use use public RPC nodes.
set -xeu
RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge RococoToWococo \
--source-host 127.0.0.1 \
--source-port 9955 \
--target-host 127.0.0.1 \
--target-port 9944 \
--target-signer //Alice
RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers RococoToWococo \
--source-host 127.0.0.1 \
--source-port 9955 \
--target-host 127.0.0.1 \
--target-port 9944 \
--target-signer //Bob \
--prometheus-host=0.0.0.0 \
@@ -0,0 +1,24 @@
#!/bin/bash
#
# Run an instance of the Wococo -> Rococo header sync.
#
# Right now this relies on local Wococo and Rococo networks
# running (which include `pallet-bridge-grandpa` in their
# runtimes), but in the future it could use use public RPC nodes.
set -xeu
RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge WococoToRococo \
--source-host 127.0.0.1 \
--source-port 9944 \
--target-host 127.0.0.1 \
--target-port 9955 \
--target-signer //Alice
RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers WococoToRococo \
--source-host 127.0.0.1 \
--source-port 9944 \
--target-host 127.0.0.1 \
--target-port 9955 \
--target-signer //Charlie \
--prometheus-host=0.0.0.0 \
@@ -1,14 +0,0 @@
#!/bin/bash
# Run a development instance of the Rococo Substrate bridge node.
# To override the default port just export ROCOCO_PORT=9966
#
# Note: This script will not work out of the box with the bridges
# repo since it relies on a Polkadot binary.
ROCOCO_BOB_PORT="${ROCOCO_BOB_PORT:-9966}"
RUST_LOG=runtime=trace,runtime::bridge=trace \
./target/debug/polkadot --chain=rococo-local --bob --tmp \
--rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \
--port 33055 --rpc-port 9935 --ws-port $ROCOCO_BOB_PORT \
@@ -9,6 +9,6 @@
ROCOCO_PORT="${ROCOCO_PORT:-9955}"
RUST_LOG=runtime=trace,runtime::bridge=trace \
./target/debug/polkadot --chain=rococo-local --alice --tmp \
./target/debug/polkadot --chain=rococo-dev --alice --tmp \
--rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \
--port 33044 --rpc-port 9934 --ws-port $ROCOCO_PORT \
@@ -0,0 +1,14 @@
#!/bin/bash
# Run a development instance of the Wococo Substrate bridge node.
# To override the default port just export WOCOCO_PORT=9955
#
# Note: This script will not work out of the box with the bridges
# repo since it relies on a Polkadot binary.
WOCOCO_PORT="${WOCOCO_PORT:-9944}"
RUST_LOG=runtime=trace,runtime::bridge=trace \
./target/debug/polkadot --chain=wococo-dev --alice --tmp \
--rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \
--port 33033 --rpc-port 9933 --ws-port $WOCOCO_PORT \
+18 -2
View File
@@ -67,7 +67,7 @@
"set_id": "SetId"
},
"Id": "[u8; 4]",
"InstanceId": "Id",
"ChainId": "Id",
"LaneId": "Id",
"MessageNonce": "u64",
"MessageId": "(Id, u64)",
@@ -77,9 +77,18 @@
},
"InboundRelayer": "AccountId",
"InboundLaneData": {
"relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>",
"relayers": "Vec<UnrewardedRelayer>",
"last_confirmed_nonce": "MessageNonce"
},
"UnrewardedRelayer": {
"relayer": "RelayerId",
"messages": "DeliveredMessages"
},
"DeliveredMessages": {
"begin": "MessageNonce",
"end": "MessageNonce",
"dispatch_results": "BitVec"
},
"OutboundLaneData": {
"latest_generated_nonce": "MessageNonce",
"latest_received_nonce": "MessageNonce",
@@ -96,6 +105,7 @@
"spec_version": "SpecVersion",
"weight": "Weight",
"origin": "CallOrigin",
"dispatch_fee_payment": "DispatchFeePayment",
"call": "BridgedOpaqueCall"
},
"CallOrigin": {
@@ -105,6 +115,12 @@
"SourceAccount": "SourceAccountId"
}
},
"DispatchFeePayment": {
"_enum": {
"AtSourceChain": "()",
"AtTargetChain": "()"
}
},
"MultiSigner": {
"_enum": {
"Ed25519": "H256",
+18 -2
View File
@@ -67,7 +67,7 @@
"set_id": "SetId"
},
"Id": "[u8; 4]",
"InstanceId": "Id",
"ChainId": "Id",
"LaneId": "Id",
"MessageNonce": "u64",
"MessageId": "(Id, u64)",
@@ -77,9 +77,18 @@
},
"InboundRelayer": "AccountId",
"InboundLaneData": {
"relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>",
"relayers": "Vec<UnrewardedRelayer>",
"last_confirmed_nonce": "MessageNonce"
},
"UnrewardedRelayer": {
"relayer": "RelayerId",
"messages": "DeliveredMessages"
},
"DeliveredMessages": {
"begin": "MessageNonce",
"end": "MessageNonce",
"dispatch_results": "BitVec"
},
"OutboundLaneData": {
"latest_generated_nonce": "MessageNonce",
"latest_received_nonce": "MessageNonce",
@@ -96,6 +105,7 @@
"spec_version": "SpecVersion",
"weight": "Weight",
"origin": "CallOrigin",
"dispatch_fee_payment": "DispatchFeePayment",
"call": "BridgedOpaqueCall"
},
"CallOrigin": {
@@ -105,6 +115,12 @@
"SourceAccount": "SourceAccountId"
}
},
"DispatchFeePayment": {
"_enum": {
"AtSourceChain": "()",
"AtTargetChain": "()"
}
},
"MultiSigner": {
"_enum": {
"Ed25519": "H256",
@@ -0,0 +1,147 @@
{
"--1": "Rococo Types",
"RococoBalance": "u128",
"RococoBlockHash": "H256",
"RococoBlockNumber": "u32",
"RococoHeader": "Header",
"--2": "Wococo Types",
"WococoBalance": "RococoBalance",
"WococoBlockHash": "RococoBlockHash",
"WococoBlockNumber": "RococoBlockNumber",
"WococoHeader": "RococoHeader",
"--3": "Common types",
"Address": "AccountId",
"LookupSource": "AccountId",
"AccountSigner": "MultiSigner",
"SpecVersion": "u32",
"RelayerId": "AccountId",
"SourceAccountId": "AccountId",
"ImportedHeader": {
"header": "BridgedHeader",
"requires_justification": "bool",
"is_finalized": "bool",
"signal_hash": "Option<BridgedBlockHash>"
},
"AuthoritySet": {
"authorities": "AuthorityList",
"set_id": "SetId"
},
"Id": "[u8; 4]",
"ChainId": "Id",
"LaneId": "Id",
"MessageNonce": "u64",
"MessageId": "(Id, u64)",
"MessageKey": {
"lane_id": "LaneId",
"nonce:": "MessageNonce"
},
"InboundRelayer": "AccountId",
"InboundLaneData": {
"relayers": "Vec<UnrewardedRelayer>",
"last_confirmed_nonce": "MessageNonce"
},
"UnrewardedRelayer": {
"relayer": "RelayerId",
"messages": "DeliveredMessages"
},
"DeliveredMessages": {
"begin": "MessageNonce",
"end": "MessageNonce",
"dispatch_results": "BitVec"
},
"OutboundLaneData": {
"latest_generated_nonce": "MessageNonce",
"latest_received_nonce": "MessageNonce",
"oldest_unpruned_nonce": "MessageNonce"
},
"MessageData": {
"payload": "MessagePayload",
"fee": "Fee"
},
"MessagePayload": "Vec<u8>",
"BridgedOpaqueCall": "Vec<u8>",
"OutboundMessageFee": "Fee",
"OutboundPayload": {
"spec_version": "SpecVersion",
"weight": "Weight",
"origin": "CallOrigin",
"dispatch_fee_payment": "DispatchFeePayment",
"call": "BridgedOpaqueCall"
},
"CallOrigin": {
"_enum": {
"SourceRoot": "()",
"TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)",
"SourceAccount": "SourceAccountId"
}
},
"DispatchFeePayment": {
"_enum": {
"AtSourceChain": "()",
"AtTargetChain": "()"
}
},
"MultiSigner": {
"_enum": {
"Ed25519": "H256",
"Sr25519": "H256",
"Ecdsa": "[u8;33]"
}
},
"MessagesProofOf": {
"bridged_header_hash": "BridgedBlockHash",
"storage_proof": "Vec<StorageProofItem>",
"lane": "LaneId",
"nonces_start": "MessageNonce",
"nonces_end": "MessageNonce"
},
"StorageProofItem": "Vec<u8>",
"MessagesDeliveryProofOf": {
"bridged_header_hash": "BridgedBlockHash",
"storage_proof": "Vec<StorageProofItem>",
"lane": "LaneId"
},
"UnrewardedRelayersState": {
"unrewarded_relayer_entries": "MessageNonce",
"messages_in_oldest_entry": "MessageNonce",
"total_messages": "MessageNonce"
},
"AncestryProof": "()",
"MessageFeeData": {
"lane_id": "LaneId",
"payload": "OutboundPayload"
},
"Precommit": {
"target_hash": "BridgedBlockHash",
"target_number": "BridgedBlockNumber"
},
"AuthoritySignature": "[u8;64]",
"AuthorityId": "[u8;32]",
"SignedPrecommit": {
"precommit": "Precommit",
"signature": "AuthoritySignature",
"id": "AuthorityId"
},
"Commit": {
"target_hash": "BridgedBlockHash",
"target_number": "BridgedBlockNumber",
"precommits": "Vec<SignedPrecommit>"
},
"GrandpaJustification": {
"round": "u64",
"commit": "Commit",
"votes_ancestries": "Vec<BridgedHeader>"
},
"Fee": "RococoBalance",
"Balance": "RococoBalance",
"BlockHash": "RococoBlockHash",
"BlockNumber": "RococoBlockNumber",
"BridgedBlockHash": "WococoBlockHash",
"BridgedBlockNumber": "WococoBlockNumber",
"BridgedHeader": "WococoHeader",
"Parameter": {
"_enum": {
"RococoToWococoConversionRate": "u128"
}
}
}
@@ -0,0 +1,148 @@
{
"--1": "Rococo Types",
"RococoBalance": "u128",
"RococoBlockHash": "H256",
"RococoBlockNumber": "u32",
"RococoHeader": "Header",
"--2": "Wococo Types",
"WococoBalance": "RococoBalance",
"WococoBlockHash": "RococoBlockHash",
"WococoBlockNumber": "RococoBlockNumber",
"WococoHeader": "RococoHeader",
"--3": "Common types",
"Address": "AccountId",
"LookupSource": "AccountId",
"AccountSigner": "MultiSigner",
"SpecVersion": "u32",
"RelayerId": "AccountId",
"SourceAccountId": "AccountId",
"ImportedHeader": {
"header": "BridgedHeader",
"requires_justification": "bool",
"is_finalized": "bool",
"signal_hash": "Option<BridgedBlockHash>"
},
"AuthoritySet": {
"authorities": "AuthorityList",
"set_id": "SetId"
},
"Id": "[u8; 4]",
"ChainId": "Id",
"LaneId": "Id",
"MessageNonce": "u64",
"MessageId": "(Id, u64)",
"MessageKey": {
"lane_id": "LaneId",
"nonce:": "MessageNonce"
},
"InboundRelayer": "AccountId",
"InboundLaneData": {
"relayers": "Vec<UnrewardedRelayer>",
"last_confirmed_nonce": "MessageNonce"
},
"UnrewardedRelayer": {
"relayer": "RelayerId",
"messages": "DeliveredMessages"
},
"DeliveredMessages": {
"begin": "MessageNonce",
"end": "MessageNonce",
"dispatch_results": "BitVec"
},
"OutboundLaneData": {
"latest_generated_nonce": "MessageNonce",
"latest_received_nonce": "MessageNonce",
"oldest_unpruned_nonce": "MessageNonce"
},
"MessageData": {
"payload": "MessagePayload",
"fee": "Fee"
},
"MessagePayload": "Vec<u8>",
"BridgedOpaqueCall": "Vec<u8>",
"OutboundMessageFee": "Fee",
"OutboundPayload": {
"spec_version": "SpecVersion",
"weight": "Weight",
"origin": "CallOrigin",
"dispatch_fee_payment": "DispatchFeePayment",
"call": "BridgedOpaqueCall"
},
"CallOrigin": {
"_enum": {
"SourceRoot": "()",
"TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)",
"SourceAccount": "SourceAccountId"
}
},
"DispatchFeePayment": {
"_enum": {
"AtSourceChain": "()",
"AtTargetChain": "()"
}
},
"MultiSigner": {
"_enum": {
"Ed25519": "H256",
"Sr25519": "H256",
"Ecdsa": "[u8;33]"
}
},
"MessagesProofOf": {
"bridged_header_hash": "BridgedBlockHash",
"storage_proof": "Vec<StorageProofItem>",
"lane": "LaneId",
"nonces_start": "MessageNonce",
"nonces_end": "MessageNonce"
},
"StorageProofItem": "Vec<u8>",
"MessagesDeliveryProofOf": {
"bridged_header_hash": "BridgedBlockHash",
"storage_proof": "Vec<StorageProofItem>",
"lane": "LaneId"
},
"UnrewardedRelayersState": {
"unrewarded_relayer_entries": "MessageNonce",
"messages_in_oldest_entry": "MessageNonce",
"total_messages": "MessageNonce"
},
"AncestryProof": "()",
"MessageFeeData": {
"lane_id": "LaneId",
"payload": "OutboundPayload"
},
"Precommit": {
"target_hash": "BridgedBlockHash",
"target_number": "BridgedBlockNumber"
},
"AuthoritySignature": "[u8;64]",
"AuthorityId": "[u8;32]",
"SignedPrecommit": {
"precommit": "Precommit",
"signature": "AuthoritySignature",
"id": "AuthorityId"
},
"Commit": {
"target_hash": "BridgedBlockHash",
"target_number": "BridgedBlockNumber",
"precommits": "Vec<SignedPrecommit>"
},
"GrandpaJustification": {
"round": "u64",
"commit": "Commit",
"votes_ancestries": "Vec<BridgedHeader>"
},
"Fee": "WococoBalance",
"Balance": "WococoBalance",
"Hash": "WococoBlockHash",
"BlockHash": "WococoBlockHash",
"BlockNumber": "WococoBlockNumber",
"BridgedBlockHash": "RococoBlockHash",
"BridgedBlockNumber": "RococoBlockNumber",
"BridgedHeader": "RococoHeader",
"Parameter": {
"_enum": {
"WococoToRococoConversionRate": "u128"
}
}
}
+13 -6
View File
@@ -2,14 +2,21 @@
# The script generates JSON type definition files in `./deployment` directory to be used for
# JS clients.
# Both networks have a lot of common types, so to avoid duplication we merge `common.json` file with
# chain-specific definitions in `rialto|millau.json`.
#
# It works by creating definitions for each side of the different bridge pairs we support
# (Rialto<>Millau and Rococo<>Wococo at the moment).
#
# To avoid duplication each bridge pair has a JSON file with common definitions, as well as a
# general JSON file with common definitions regardless of the bridge pair. These files are then
# merged with chain-specific type definitions.
set -exu
set -eux
# Make sure we are in the right dir.
cd $(dirname $(realpath $0))
# Create rialto and millau types.
jq -s '.[0] * .[1]' common.json rialto.json > ../types-rialto.json
jq -s '.[0] * .[1]' common.json millau.json > ../types-millau.json
# Create types for our supported bridge pairs (Rialto<>Millau, Rococo<>Wococo)
jq -s '.[0] * .[1] * .[2]' rialto-millau.json common.json rialto.json > ../types-rialto.json
jq -s '.[0] * .[1] * .[2]' rialto-millau.json common.json millau.json > ../types-millau.json
jq -s '.[0] * .[1] * .[2]' rococo-wococo.json common.json rococo.json > ../types-rococo.json
jq -s '.[0] * .[1] * .[2]' rococo-wococo.json common.json wococo.json > ../types-wococo.json
+18 -52
View File
@@ -1,54 +1,4 @@
{
"--1": "Millau Types",
"MillauBalance": "u64",
"MillauBlockHash": "H512",
"MillauBlockNumber": "u64",
"MillauHeader": {
"parent_Hash": "MillauBlockHash",
"number": "Compact<MillauBlockNumber>",
"state_root": "MillauBlockHash",
"extrinsics_root": "MillauBlockHash",
"digest": "MillauDigest"
},
"MillauDigest": {
"logs": "Vec<MillauDigestItem>"
},
"MillauDigestItem": {
"_enum": {
"Other": "Vec<u8>",
"AuthoritiesChange": "Vec<AuthorityId>",
"ChangesTrieRoot": "MillauBlockHash",
"SealV0": "SealV0",
"Consensus": "Consensus",
"Seal": "Seal",
"PreRuntime": "PreRuntime"
}
},
"--2": "Rialto Types",
"RialtoBalance": "u128",
"RialtoBlockHash": "H256",
"RialtoBlockNumber": "u32",
"RialtoHeader": {
"parent_Hash": "RialtoBlockHash",
"number": "Compact<RialtoBlockNumber>",
"state_root": "RialtoBlockHash",
"extrinsics_root": "RialtoBlockHash",
"digest": "RialtoDigest"
},
"RialtoDigest": {
"logs": "Vec<RialtoDigestItem>"
},
"RialtoDigestItem": {
"_enum": {
"Other": "Vec<u8>",
"AuthoritiesChange": "Vec<AuthorityId>",
"ChangesTrieRoot": "RialtoBlockHash",
"SealV0": "SealV0",
"Consensus": "Consensus",
"Seal": "Seal",
"PreRuntime": "PreRuntime"
}
},
"--3": "Common types",
"Address": "AccountId",
"LookupSource": "AccountId",
@@ -67,7 +17,7 @@
"set_id": "SetId"
},
"Id": "[u8; 4]",
"InstanceId": "Id",
"ChainId": "Id",
"LaneId": "Id",
"MessageNonce": "u64",
"MessageId": "(Id, u64)",
@@ -77,9 +27,18 @@
},
"InboundRelayer": "AccountId",
"InboundLaneData": {
"relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>",
"relayers": "Vec<UnrewardedRelayer>",
"last_confirmed_nonce": "MessageNonce"
},
"UnrewardedRelayer": {
"relayer": "RelayerId",
"messages": "DeliveredMessages"
},
"DeliveredMessages": {
"begin": "MessageNonce",
"end": "MessageNonce",
"dispatch_results": "BitVec"
},
"OutboundLaneData": {
"latest_generated_nonce": "MessageNonce",
"latest_received_nonce": "MessageNonce",
@@ -96,6 +55,7 @@
"spec_version": "SpecVersion",
"weight": "Weight",
"origin": "CallOrigin",
"dispatch_fee_payment": "DispatchFeePayment",
"call": "BridgedOpaqueCall"
},
"CallOrigin": {
@@ -105,6 +65,12 @@
"SourceAccount": "SourceAccountId"
}
},
"DispatchFeePayment": {
"_enum": {
"AtSourceChain": "()",
"AtTargetChain": "()"
}
},
"MultiSigner": {
"_enum": {
"Ed25519": "H256",
@@ -12,5 +12,4 @@
"MillauToRialtoConversionRate": "u128"
}
}
}
@@ -0,0 +1,52 @@
{
"--1": "Millau Types",
"MillauBalance": "u64",
"MillauBlockHash": "H512",
"MillauBlockNumber": "u64",
"MillauHeader": {
"parent_Hash": "MillauBlockHash",
"number": "Compact<MillauBlockNumber>",
"state_root": "MillauBlockHash",
"extrinsics_root": "MillauBlockHash",
"digest": "MillauDigest"
},
"MillauDigest": {
"logs": "Vec<MillauDigestItem>"
},
"MillauDigestItem": {
"_enum": {
"Other": "Vec<u8>",
"AuthoritiesChange": "Vec<AuthorityId>",
"ChangesTrieRoot": "MillauBlockHash",
"SealV0": "SealV0",
"Consensus": "Consensus",
"Seal": "Seal",
"PreRuntime": "PreRuntime"
}
},
"--2": "Rialto Types",
"RialtoBalance": "u128",
"RialtoBlockHash": "H256",
"RialtoBlockNumber": "u32",
"RialtoHeader": {
"parent_Hash": "RialtoBlockHash",
"number": "Compact<RialtoBlockNumber>",
"state_root": "RialtoBlockHash",
"extrinsics_root": "RialtoBlockHash",
"digest": "RialtoDigest"
},
"RialtoDigest": {
"logs": "Vec<RialtoDigestItem>"
},
"RialtoDigestItem": {
"_enum": {
"Other": "Vec<u8>",
"AuthoritiesChange": "Vec<AuthorityId>",
"ChangesTrieRoot": "RialtoBlockHash",
"SealV0": "SealV0",
"Consensus": "Consensus",
"Seal": "Seal",
"PreRuntime": "PreRuntime"
}
}
}
@@ -0,0 +1,12 @@
{
"--1": "Rococo Types",
"RococoBalance": "u128",
"RococoBlockHash": "H256",
"RococoBlockNumber": "u32",
"RococoHeader": "Header",
"--2": "Wococo Types",
"WococoBalance": "RococoBalance",
"WococoBlockHash": "RococoBlockHash",
"WococoBlockNumber": "RococoBlockNumber",
"WococoHeader": "RococoHeader"
}
@@ -0,0 +1,14 @@
{
"Fee": "RococoBalance",
"Balance": "RococoBalance",
"BlockHash": "RococoBlockHash",
"BlockNumber": "RococoBlockNumber",
"BridgedBlockHash": "WococoBlockHash",
"BridgedBlockNumber": "WococoBlockNumber",
"BridgedHeader": "WococoHeader",
"Parameter": {
"_enum": {
"RococoToWococoConversionRate": "u128"
}
}
}
@@ -0,0 +1,15 @@
{
"Fee": "WococoBalance",
"Balance": "WococoBalance",
"Hash": "WococoBlockHash",
"BlockHash": "WococoBlockHash",
"BlockNumber": "WococoBlockNumber",
"BridgedBlockHash": "RococoBlockHash",
"BridgedBlockNumber": "RococoBlockNumber",
"BridgedHeader": "RococoHeader",
"Parameter": {
"_enum": {
"WococoToRococoConversionRate": "u128"
}
}
}
@@ -8,8 +8,8 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
codec = { package = "parity-scale-codec", version = "1.3.1" }
finality-grandpa = "0.12.3"
codec = { package = "parity-scale-codec", version = "2.0.0" }
finality-grandpa = "0.14.0"
hash-db = "0.15.2"
honggfuzz = "0.5.54"
log = "0.4.0"
@@ -44,6 +44,8 @@ module events set:
weight, the dispatch is rejected. Keep in mind, that even if post-dispatch weight will be less
than specified, the submitter still have to declare (and pay for) the maximal possible weight
(that is the pre-dispatch weight);
- `MessageDispatchPaymentFailed` event is emitted if the message submitter has selected to pay
dispatch fee at the target chain, but has failed to do that;
- `MessageDispatched` event is emitted if the message has passed all checks and we have actually
dispatched it. The dispatch may still fail, though - that's why we are including the dispatch
result in the event payload.
+264 -84
View File
@@ -23,9 +23,15 @@
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]
// Generated by `decl_event!`
#![allow(clippy::unused_unit)]
use bp_message_dispatch::{CallOrigin, MessageDispatch, MessagePayload, SpecVersion, Weight};
use bp_runtime::{derive_account_id, InstanceId, SourceAccount};
use bp_runtime::{
derive_account_id,
messages::{DispatchFeePayment, MessageDispatchResult},
ChainId, SourceAccount,
};
use codec::{Decode, Encode};
use frame_support::{
decl_event, decl_module, decl_storage,
@@ -87,24 +93,27 @@ decl_storage! {
decl_event!(
pub enum Event<T, I = DefaultInstance> where
<T as Config<I>>::MessageId
<T as Config<I>>::MessageId,
AccountId = <T as frame_system::Config>::AccountId,
{
/// Message has been rejected before reaching dispatch.
MessageRejected(InstanceId, MessageId),
MessageRejected(ChainId, MessageId),
/// Message has been rejected by dispatcher because of spec version mismatch.
/// Last two arguments are: expected and passed spec version.
MessageVersionSpecMismatch(InstanceId, MessageId, SpecVersion, SpecVersion),
MessageVersionSpecMismatch(ChainId, MessageId, SpecVersion, SpecVersion),
/// Message has been rejected by dispatcher because of weight mismatch.
/// Last two arguments are: expected and passed call weight.
MessageWeightMismatch(InstanceId, MessageId, Weight, Weight),
MessageWeightMismatch(ChainId, MessageId, Weight, Weight),
/// Message signature mismatch.
MessageSignatureMismatch(InstanceId, MessageId),
/// Message has been dispatched with given result.
MessageDispatched(InstanceId, MessageId, DispatchResult),
MessageSignatureMismatch(ChainId, MessageId),
/// We have failed to decode Call from the message.
MessageCallDecodeFailed(InstanceId, MessageId),
MessageCallDecodeFailed(ChainId, MessageId),
/// The call from the message has been rejected by the call filter.
MessageCallRejected(InstanceId, MessageId),
MessageCallRejected(ChainId, MessageId),
/// The origin account has failed to pay fee for dispatching the message.
MessageDispatchPaymentFailed(ChainId, MessageId, AccountId, Weight),
/// Message has been dispatched with given result.
MessageDispatched(ChainId, MessageId, DispatchResult),
/// Phantom member, never used. Needed to handle multiple pallet instances.
_Dummy(PhantomData<I>),
}
@@ -118,7 +127,7 @@ decl_module! {
}
}
impl<T: Config<I>, I: Instance> MessageDispatch<T::MessageId> for Pallet<T, I> {
impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for Pallet<T, I> {
type Message =
MessagePayload<T::SourceChainAccountId, T::TargetChainAccountPublic, T::TargetChainSignature, T::EncodedCall>;
@@ -126,77 +135,108 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::MessageId> for Pallet<T, I> {
message.weight
}
fn dispatch(bridge: InstanceId, id: T::MessageId, message: Result<Self::Message, ()>) {
fn dispatch<P: FnOnce(&T::AccountId, Weight) -> Result<(), ()>>(
source_chain: ChainId,
target_chain: ChainId,
id: T::MessageId,
message: Result<Self::Message, ()>,
pay_dispatch_fee: P,
) -> MessageDispatchResult {
// emit special even if message has been rejected by external component
let message = match message {
Ok(message) => message,
Err(_) => {
log::trace!(target: "runtime::bridge-dispatch", "Message {:?}/{:?}: rejected before actual dispatch", bridge, id);
Self::deposit_event(RawEvent::MessageRejected(bridge, id));
return;
log::trace!(
target: "runtime::bridge-dispatch",
"Message {:?}/{:?}: rejected before actual dispatch",
source_chain,
id,
);
Self::deposit_event(RawEvent::MessageRejected(source_chain, id));
return MessageDispatchResult {
dispatch_result: false,
unspent_weight: 0,
dispatch_fee_paid_during_dispatch: false,
};
}
};
// verify spec version
// (we want it to be the same, because otherwise we may decode Call improperly)
let mut dispatch_result = MessageDispatchResult {
dispatch_result: false,
unspent_weight: message.weight,
dispatch_fee_paid_during_dispatch: false,
};
let expected_version = <T as frame_system::Config>::Version::get().spec_version;
if message.spec_version != expected_version {
log::trace!(
"Message {:?}/{:?}: spec_version mismatch. Expected {:?}, got {:?}",
bridge,
source_chain,
id,
expected_version,
message.spec_version,
);
Self::deposit_event(RawEvent::MessageVersionSpecMismatch(
bridge,
source_chain,
id,
expected_version,
message.spec_version,
));
return;
return dispatch_result;
}
// now that we have spec version checked, let's decode the call
let call = match message.call.into() {
Ok(call) => call,
Err(_) => {
log::trace!(target: "runtime::bridge-dispatch", "Failed to decode Call from message {:?}/{:?}", bridge, id,);
Self::deposit_event(RawEvent::MessageCallDecodeFailed(bridge, id));
return;
log::trace!(
target: "runtime::bridge-dispatch",
"Failed to decode Call from message {:?}/{:?}",
source_chain,
id,
);
Self::deposit_event(RawEvent::MessageCallDecodeFailed(source_chain, id));
return dispatch_result;
}
};
// prepare dispatch origin
let origin_account = match message.origin {
CallOrigin::SourceRoot => {
let hex_id = derive_account_id::<T::SourceChainAccountId>(bridge, SourceAccount::Root);
let hex_id = derive_account_id::<T::SourceChainAccountId>(source_chain, SourceAccount::Root);
let target_id = T::AccountIdConverter::convert(hex_id);
log::trace!(target: "runtime::bridge-dispatch", "Root Account: {:?}", &target_id);
target_id
}
CallOrigin::TargetAccount(source_account_id, target_public, target_signature) => {
let digest = account_ownership_digest(&call, source_account_id, message.spec_version, bridge);
let digest = account_ownership_digest(
&call,
source_account_id,
message.spec_version,
source_chain,
target_chain,
);
let target_account = target_public.into_account();
if !target_signature.verify(&digest[..], &target_account) {
log::trace!(
target: "runtime::bridge-dispatch",
"Message {:?}/{:?}: origin proof is invalid. Expected account: {:?} from signature: {:?}",
bridge,
source_chain,
id,
target_account,
target_signature,
);
Self::deposit_event(RawEvent::MessageSignatureMismatch(bridge, id));
return;
Self::deposit_event(RawEvent::MessageSignatureMismatch(source_chain, id));
return dispatch_result;
}
log::trace!(target: "runtime::bridge-dispatch", "Target Account: {:?}", &target_account);
target_account
}
CallOrigin::SourceAccount(source_account_id) => {
let hex_id = derive_account_id(bridge, SourceAccount::Account(source_account_id));
let hex_id = derive_account_id(source_chain, SourceAccount::Account(source_account_id));
let target_id = T::AccountIdConverter::convert(hex_id);
log::trace!(target: "runtime::bridge-dispatch", "Source Account: {:?}", &target_id);
target_id
@@ -208,12 +248,12 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::MessageId> for Pallet<T, I> {
log::trace!(
target: "runtime::bridge-dispatch",
"Message {:?}/{:?}: the call ({:?}) is rejected by filter",
bridge,
source_chain,
id,
call,
);
Self::deposit_event(RawEvent::MessageCallRejected(bridge, id));
return;
Self::deposit_event(RawEvent::MessageCallRejected(source_chain, id));
return dispatch_result;
}
// verify weight
@@ -225,41 +265,67 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::MessageId> for Pallet<T, I> {
log::trace!(
target: "runtime::bridge-dispatch",
"Message {:?}/{:?}: passed weight is too low. Expected at least {:?}, got {:?}",
bridge,
source_chain,
id,
expected_weight,
message.weight,
);
Self::deposit_event(RawEvent::MessageWeightMismatch(
bridge,
source_chain,
id,
expected_weight,
message.weight,
));
return;
return dispatch_result;
}
// pay dispatch fee right before dispatch
let pay_dispatch_fee_at_target_chain = message.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
if pay_dispatch_fee_at_target_chain && pay_dispatch_fee(&origin_account, message.weight).is_err() {
log::trace!(
target: "runtime::bridge-dispatch",
"Failed to pay dispatch fee for dispatching message {:?}/{:?} with weight {}",
source_chain,
id,
message.weight,
);
Self::deposit_event(RawEvent::MessageDispatchPaymentFailed(
source_chain,
id,
origin_account,
message.weight,
));
return dispatch_result;
}
dispatch_result.dispatch_fee_paid_during_dispatch = pay_dispatch_fee_at_target_chain;
// finally dispatch message
let origin = RawOrigin::Signed(origin_account).into();
log::trace!(target: "runtime::bridge-dispatch", "Message being dispatched is: {:.4096?}", &call);
let dispatch_result = call.dispatch(origin);
let actual_call_weight = extract_actual_weight(&dispatch_result, &dispatch_info);
let result = call.dispatch(origin);
let actual_call_weight = extract_actual_weight(&result, &dispatch_info);
dispatch_result.dispatch_result = result.is_ok();
dispatch_result.unspent_weight = message.weight.saturating_sub(actual_call_weight);
log::trace!(
target: "runtime::bridge-dispatch",
"Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}",
bridge,
"Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}. Call dispatch result: {:?}",
source_chain,
id,
actual_call_weight,
dispatch_result.unspent_weight,
message.weight,
dispatch_result,
result,
);
Self::deposit_event(RawEvent::MessageDispatched(
bridge,
source_chain,
id,
dispatch_result.map(drop).map_err(|e| e.error),
result.map(drop).map_err(|e| e.error),
));
dispatch_result
}
}
@@ -290,7 +356,7 @@ where
}
CallOrigin::SourceAccount(ref source_account_id) => {
ensure!(
sender_origin == &RawOrigin::Signed(source_account_id.clone()),
sender_origin == &RawOrigin::Signed(source_account_id.clone()) || sender_origin == &RawOrigin::Root,
BadOrigin
);
Ok(Some(source_account_id.clone()))
@@ -303,23 +369,24 @@ where
/// The byte vector returned by this function will be signed with a target chain account
/// private key. This way, the owner of `source_account_id` on the source chain proves that
/// the target chain account private key is also under his control.
pub fn account_ownership_digest<Call, AccountId, SpecVersion, BridgeId>(
pub fn account_ownership_digest<Call, AccountId, SpecVersion>(
call: &Call,
source_account_id: AccountId,
target_spec_version: SpecVersion,
source_instance_id: BridgeId,
source_chain_id: ChainId,
target_chain_id: ChainId,
) -> Vec<u8>
where
Call: Encode,
AccountId: Encode,
SpecVersion: Encode,
BridgeId: Encode,
{
let mut proof = Vec::new();
call.encode_to(&mut proof);
source_account_id.encode_to(&mut proof);
target_spec_version.encode_to(&mut proof);
source_instance_id.encode_to(&mut proof);
source_chain_id.encode_to(&mut proof);
target_chain_id.encode_to(&mut proof);
proof
}
@@ -342,6 +409,9 @@ mod tests {
type AccountId = u64;
type MessageId = [u8; 4];
const SOURCE_CHAIN_ID: ChainId = *b"srce";
const TARGET_CHAIN_ID: ChainId = *b"trgt";
#[derive(Debug, Encode, Decode, Clone, PartialEq, Eq)]
pub struct TestAccountPublic(AccountId);
@@ -463,31 +533,32 @@ mod tests {
fn prepare_message(
origin: CallOrigin<AccountId, TestAccountPublic, TestSignature>,
call: Call,
) -> <Pallet<TestRuntime> as MessageDispatch<<TestRuntime as Config>::MessageId>>::Message {
) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::MessageId>>::Message {
MessagePayload {
spec_version: TEST_SPEC_VERSION,
weight: TEST_WEIGHT,
origin,
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
call: EncodedCall(call.encode()),
}
}
fn prepare_root_message(
call: Call,
) -> <Pallet<TestRuntime> as MessageDispatch<<TestRuntime as Config>::MessageId>>::Message {
) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::MessageId>>::Message {
prepare_message(CallOrigin::SourceRoot, call)
}
fn prepare_target_message(
call: Call,
) -> <Pallet<TestRuntime> as MessageDispatch<<TestRuntime as Config>::MessageId>>::Message {
) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::MessageId>>::Message {
let origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(1));
prepare_message(origin, call)
}
fn prepare_source_message(
call: Call,
) -> <Pallet<TestRuntime> as MessageDispatch<<TestRuntime as Config>::MessageId>>::Message {
) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::MessageId>>::Message {
let origin = CallOrigin::SourceAccount(1);
prepare_message(origin, call)
}
@@ -495,23 +566,25 @@ mod tests {
#[test]
fn should_fail_on_spec_version_mismatch() {
new_test_ext().execute_with(|| {
let bridge = b"ethb".to_owned();
let id = [0; 4];
const BAD_SPEC_VERSION: SpecVersion = 99;
let mut message =
prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
let weight = message.weight;
message.spec_version = BAD_SPEC_VERSION;
System::set_block_number(1);
Dispatch::dispatch(bridge, id, Ok(message));
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
assert_eq!(result.unspent_weight, weight);
assert!(!result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageVersionSpecMismatch(
bridge,
SOURCE_CHAIN_ID,
id,
TEST_SPEC_VERSION,
BAD_SPEC_VERSION
@@ -525,21 +598,25 @@ mod tests {
#[test]
fn should_fail_on_weight_mismatch() {
new_test_ext().execute_with(|| {
let bridge = b"ethb".to_owned();
let id = [0; 4];
let mut message =
prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
message.weight = 0;
message.weight = 7;
System::set_block_number(1);
Dispatch::dispatch(bridge, id, Ok(message));
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
assert_eq!(result.unspent_weight, 7);
assert!(!result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageWeightMismatch(
bridge, id, 1345000, 0,
SOURCE_CHAIN_ID,
id,
1038000,
7,
)),
topics: vec![],
}],
@@ -550,7 +627,6 @@ mod tests {
#[test]
fn should_fail_on_signature_mismatch() {
new_test_ext().execute_with(|| {
let bridge = b"ethb".to_owned();
let id = [0; 4];
let call_origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(99));
@@ -558,16 +634,20 @@ mod tests {
call_origin,
Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])),
);
let weight = message.weight;
System::set_block_number(1);
Dispatch::dispatch(bridge, id, Ok(message));
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
assert_eq!(result.unspent_weight, weight);
assert!(!result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageSignatureMismatch(
bridge, id
SOURCE_CHAIN_ID,
id
)),
topics: vec![],
}],
@@ -578,17 +658,19 @@ mod tests {
#[test]
fn should_emit_event_for_rejected_messages() {
new_test_ext().execute_with(|| {
let bridge = b"ethb".to_owned();
let id = [0; 4];
System::set_block_number(1);
Dispatch::dispatch(bridge, id, Err(()));
Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Err(()), |_, _| unreachable!());
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageRejected(bridge, id)),
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageRejected(
SOURCE_CHAIN_ID,
id
)),
topics: vec![],
}],
);
@@ -598,22 +680,25 @@ mod tests {
#[test]
fn should_fail_on_call_decode() {
new_test_ext().execute_with(|| {
let bridge = b"ethb".to_owned();
let id = [0; 4];
let mut message =
prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
let weight = message.weight;
message.call.0 = vec![];
System::set_block_number(1);
Dispatch::dispatch(bridge, id, Ok(message));
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
assert_eq!(result.unspent_weight, weight);
assert!(!result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageCallDecodeFailed(
bridge, id
SOURCE_CHAIN_ID,
id
)),
topics: vec![],
}],
@@ -624,7 +709,6 @@ mod tests {
#[test]
fn should_emit_event_for_rejected_calls() {
new_test_ext().execute_with(|| {
let bridge = b"ethb".to_owned();
let id = [0; 4];
let call = Call::System(<frame_system::Call<TestRuntime>>::fill_block(Perbill::from_percent(75)));
@@ -633,13 +717,18 @@ mod tests {
message.weight = weight;
System::set_block_number(1);
Dispatch::dispatch(bridge, id, Ok(message));
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
assert_eq!(result.unspent_weight, weight);
assert!(!result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageCallRejected(bridge, id)),
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageCallRejected(
SOURCE_CHAIN_ID,
id
)),
topics: vec![],
}],
);
@@ -647,21 +736,113 @@ mod tests {
}
#[test]
fn should_dispatch_bridge_message_from_root_origin() {
fn should_emit_event_for_unpaid_calls() {
new_test_ext().execute_with(|| {
let bridge = b"ethb".to_owned();
let id = [0; 4];
let message = prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
let mut message =
prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
let weight = message.weight;
message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain;
System::set_block_number(1);
Dispatch::dispatch(bridge, id, Ok(message));
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Err(()));
assert_eq!(result.unspent_weight, weight);
assert!(!result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageDispatchPaymentFailed(
SOURCE_CHAIN_ID,
id,
AccountIdConverter::convert(derive_account_id::<AccountId>(
SOURCE_CHAIN_ID,
SourceAccount::Root
)),
TEST_WEIGHT,
)),
topics: vec![],
}],
);
});
}
#[test]
fn should_dispatch_calls_paid_at_target_chain() {
new_test_ext().execute_with(|| {
let id = [0; 4];
let mut message =
prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain;
System::set_block_number(1);
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Ok(()));
assert!(result.dispatch_fee_paid_during_dispatch);
assert!(result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageDispatched(
bridge,
SOURCE_CHAIN_ID,
id,
Ok(())
)),
topics: vec![],
}],
);
});
}
#[test]
fn should_return_dispatch_failed_flag_if_dispatch_happened_but_failed() {
new_test_ext().execute_with(|| {
let id = [0; 4];
let call = Call::System(<frame_system::Call<TestRuntime>>::set_heap_pages(1));
let message = prepare_target_message(call);
System::set_block_number(1);
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
assert!(!result.dispatch_fee_paid_during_dispatch);
assert!(!result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageDispatched(
SOURCE_CHAIN_ID,
id,
Err(sp_runtime::DispatchError::BadOrigin)
)),
topics: vec![],
}],
);
})
}
#[test]
fn should_dispatch_bridge_message_from_root_origin() {
new_test_ext().execute_with(|| {
let id = [0; 4];
let message = prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
System::set_block_number(1);
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
assert!(!result.dispatch_fee_paid_during_dispatch);
assert!(result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageDispatched(
SOURCE_CHAIN_ID,
id,
Ok(())
)),
@@ -675,20 +856,21 @@ mod tests {
fn should_dispatch_bridge_message_from_target_origin() {
new_test_ext().execute_with(|| {
let id = [0; 4];
let bridge = b"ethb".to_owned();
let call = Call::System(<frame_system::Call<TestRuntime>>::remark(vec![]));
let message = prepare_target_message(call);
System::set_block_number(1);
Dispatch::dispatch(bridge, id, Ok(message));
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
assert!(!result.dispatch_fee_paid_during_dispatch);
assert!(result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageDispatched(
bridge,
SOURCE_CHAIN_ID,
id,
Ok(())
)),
@@ -702,20 +884,21 @@ mod tests {
fn should_dispatch_bridge_message_from_source_origin() {
new_test_ext().execute_with(|| {
let id = [0; 4];
let bridge = b"ethb".to_owned();
let call = Call::System(<frame_system::Call<TestRuntime>>::remark(vec![]));
let message = prepare_source_message(call);
System::set_block_number(1);
Dispatch::dispatch(bridge, id, Ok(message));
let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
assert!(!result.dispatch_fee_paid_during_dispatch);
assert!(result.dispatch_result);
assert_eq!(
System::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageDispatched(
bridge,
SOURCE_CHAIN_ID,
id,
Ok(())
)),
@@ -782,10 +965,7 @@ mod tests {
Err(BadOrigin)
));
// If we try and send the message from Root, it is also rejected
assert!(matches!(
verify_message_origin(&RawOrigin::Root, &message),
Err(BadOrigin)
));
// The Root account is allowed to assume any expected origin account
assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(Some(1))));
}
}
@@ -151,7 +151,7 @@ pub fn verify_substrate_finality_proof(
let best_set = best_set?;
let verify_result = sc_finality_grandpa::GrandpaJustification::<Block>::decode_and_verify_finalizes(
&raw_finality_proof,
raw_finality_proof,
(finality_target_hash, finality_target_number),
best_set_id,
&best_set,
+63 -83
View File
@@ -1381,15 +1381,12 @@ pub(crate) mod tests {
fn verify_transaction_finalized_works_for_best_finalized_header() {
run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
assert_eq!(
verify_transaction_finalized(
&storage,
example_header().compute_hash(),
0,
&[(example_tx(), example_tx_receipt(true))],
),
true,
);
assert!(verify_transaction_finalized(
&storage,
example_header().compute_hash(),
0,
&[(example_tx(), example_tx_receipt(true))],
));
});
}
@@ -1400,15 +1397,12 @@ pub(crate) mod tests {
insert_header(&mut storage, example_header_parent());
insert_header(&mut storage, example_header());
storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0);
assert_eq!(
verify_transaction_finalized(
&storage,
example_header_parent().compute_hash(),
0,
&[(example_tx(), example_tx_receipt(true))],
),
true,
);
assert!(verify_transaction_finalized(
&storage,
example_header_parent().compute_hash(),
0,
&[(example_tx(), example_tx_receipt(true))],
));
});
}
@@ -1416,10 +1410,12 @@ pub(crate) mod tests {
fn verify_transaction_finalized_rejects_proof_with_missing_tx() {
run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
assert_eq!(
verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],),
false,
);
assert!(!verify_transaction_finalized(
&storage,
example_header().compute_hash(),
1,
&[],
),);
});
}
@@ -1427,10 +1423,12 @@ pub(crate) mod tests {
fn verify_transaction_finalized_rejects_unknown_header() {
run_test(TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
assert_eq!(
verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],),
false,
);
assert!(!verify_transaction_finalized(
&storage,
example_header().compute_hash(),
1,
&[],
));
});
}
@@ -1440,15 +1438,12 @@ pub(crate) mod tests {
let mut storage = BridgeStorage::<TestRuntime>::new();
insert_header(&mut storage, example_header_parent());
insert_header(&mut storage, example_header());
assert_eq!(
verify_transaction_finalized(
&storage,
example_header().compute_hash(),
0,
&[(example_tx(), example_tx_receipt(true))],
),
false,
);
assert!(!verify_transaction_finalized(
&storage,
example_header().compute_hash(),
0,
&[(example_tx(), example_tx_receipt(true))],
));
});
}
@@ -1464,15 +1459,12 @@ pub(crate) mod tests {
insert_header(&mut storage, example_header());
insert_header(&mut storage, finalized_header_sibling);
storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0);
assert_eq!(
verify_transaction_finalized(
&storage,
finalized_header_sibling_hash,
0,
&[(example_tx(), example_tx_receipt(true))],
),
false,
);
assert!(!verify_transaction_finalized(
&storage,
finalized_header_sibling_hash,
0,
&[(example_tx(), example_tx_receipt(true))],
));
});
}
@@ -1488,15 +1480,12 @@ pub(crate) mod tests {
insert_header(&mut storage, finalized_header_uncle);
insert_header(&mut storage, example_header());
storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0);
assert_eq!(
verify_transaction_finalized(
&storage,
finalized_header_uncle_hash,
0,
&[(example_tx(), example_tx_receipt(true))],
),
false,
);
assert!(!verify_transaction_finalized(
&storage,
finalized_header_uncle_hash,
0,
&[(example_tx(), example_tx_receipt(true))],
));
});
}
@@ -1504,18 +1493,15 @@ pub(crate) mod tests {
fn verify_transaction_finalized_rejects_invalid_transactions_in_proof() {
run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
assert_eq!(
verify_transaction_finalized(
&storage,
example_header().compute_hash(),
0,
&[
(example_tx(), example_tx_receipt(true)),
(example_tx(), example_tx_receipt(true))
],
),
false,
);
assert!(!verify_transaction_finalized(
&storage,
example_header().compute_hash(),
0,
&[
(example_tx(), example_tx_receipt(true)),
(example_tx(), example_tx_receipt(true))
],
));
});
}
@@ -1523,15 +1509,12 @@ pub(crate) mod tests {
fn verify_transaction_finalized_rejects_invalid_receipts_in_proof() {
run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
assert_eq!(
verify_transaction_finalized(
&storage,
example_header().compute_hash(),
0,
&[(example_tx(), vec![42])],
),
false,
);
assert!(!verify_transaction_finalized(
&storage,
example_header().compute_hash(),
0,
&[(example_tx(), vec![42])],
));
});
}
@@ -1539,15 +1522,12 @@ pub(crate) mod tests {
fn verify_transaction_finalized_rejects_failed_transaction() {
run_test_with_genesis(example_header_with_failed_receipt(), TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
assert_eq!(
verify_transaction_finalized(
&storage,
example_header_with_failed_receipt().compute_hash(),
0,
&[(example_tx(), example_tx_receipt(false))],
),
false,
);
assert!(!verify_transaction_finalized(
&storage,
example_header_with_failed_receipt().compute_hash(),
0,
&[(example_tx(), example_tx_receipt(false))],
));
});
}
}
@@ -214,7 +214,7 @@ impl HeaderBuilder {
/// Helper function for getting a genesis header which has been signed by an authority.
pub fn build_genesis_header(author: &SecretKey) -> AuraHeader {
let genesis = HeaderBuilder::genesis();
genesis.header.sign_by(&author)
genesis.header.sign_by(author)
}
/// Helper function for building a custom child header which has been signed by an authority.
@@ -222,7 +222,7 @@ pub fn build_custom_header<F>(author: &SecretKey, previous: &AuraHeader, customi
where
F: FnOnce(AuraHeader) -> AuraHeader,
{
let new_header = HeaderBuilder::with_parent(&previous);
let new_header = HeaderBuilder::with_parent(previous);
let custom_header = customize_header(new_header.header);
custom_header.sign_by(author)
}
@@ -396,7 +396,7 @@ mod tests {
fn verify_with_config(config: &AuraConfiguration, header: &AuraHeader) -> Result<ImportContext<AccountId>, Error> {
run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| {
let storage = BridgeStorage::<TestRuntime>::new();
verify_aura_header(&storage, &config, None, header, &ConstChainTime::default())
verify_aura_header(&storage, config, None, header, &ConstChainTime::default())
})
}
@@ -787,7 +787,7 @@ mod tests {
fn pool_verifies_future_block_number() {
// when header is too far from the future
assert_eq!(
default_accept_into_pool(|validators| (HeaderBuilder::with_number(100).sign_by_set(&validators), None),),
default_accept_into_pool(|validators| (HeaderBuilder::with_number(100).sign_by_set(validators), None),),
Err(Error::UnsignedTooFarInTheFuture),
);
}
@@ -800,7 +800,7 @@ mod tests {
default_accept_into_pool(|validators| (
HeaderBuilder::with_parent_number(3)
.step(GENESIS_STEP + 3)
.sign_by_set(&validators),
.sign_by_set(validators),
None,
),),
Err(Error::DoubleVote),
@@ -38,24 +38,18 @@
//!
//! Note that the worst case scenario here would be a justification where each validator has it's
//! own fork which is `SESSION_LENGTH` blocks long.
//!
//! As far as benchmarking results go, the only benchmark that should be used in
//! `pallet-bridge-grandpa` to annotate weights is the `submit_finality_proof` one. The others are
//! looking at the effects of specific code paths and do not actually reflect the overall worst case
//! scenario.
use crate::*;
use bp_test_utils::{
accounts, authority_list, make_justification_for_header, test_keyring, JustificationGeneratorParams, ALICE,
TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID,
accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID,
};
use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller};
use frame_support::traits::Get;
use frame_system::RawOrigin;
use sp_finality_grandpa::AuthorityId;
use sp_runtime::traits::Zero;
use sp_std::{vec, vec::Vec};
use sp_std::vec::Vec;
// The maximum number of vote ancestries to include in a justification.
//
@@ -75,81 +69,46 @@ fn header_number<T: Config<I>, I: 'static, N: From<u32>>() -> N {
(T::HeadersToKeep::get() + 1).into()
}
/// Prepare header and its justification to submit using `submit_finality_proof`.
fn prepare_benchmark_data<T: Config<I>, I: 'static>(
precommits: u32,
ancestors: u32,
) -> (BridgedHeader<T, I>, GrandpaJustification<BridgedHeader<T, I>>) {
let authority_list = accounts(precommits as u16)
.iter()
.map(|id| (AuthorityId::from(*id), 1))
.collect::<Vec<_>>();
let init_data = InitializationData {
header: bp_test_utils::test_header(Zero::zero()),
authority_list,
set_id: TEST_GRANDPA_SET_ID,
is_halted: false,
};
bootstrap_bridge::<T, I>(init_data);
let header: BridgedHeader<T, I> = bp_test_utils::test_header(header_number::<T, I, _>());
let params = JustificationGeneratorParams {
header: header.clone(),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: accounts(precommits as u16).iter().map(|k| (*k, 1)).collect::<Vec<_>>(),
ancestors,
forks: 1,
};
let justification = make_justification_for_header(params);
(header, justification)
}
benchmarks_instance_pallet! {
// This is the "gold standard" benchmark for this extrinsic, and it's what should be used to
// annotate the weight in the pallet.
//
// The other benchmarks related to `submit_finality_proof` are looking at the effect of specific
// parameters and are there mostly for seeing how specific codepaths behave.
submit_finality_proof {
let v in 1..MAX_VOTE_ANCESTRIES;
let p in 1..MAX_VALIDATOR_SET_SIZE;
let caller: T::AccountId = whitelisted_caller();
let authority_list = accounts(p as u16)
.iter()
.map(|id| (AuthorityId::from(*id), 1))
.collect::<Vec<_>>();
let init_data = InitializationData {
header: bp_test_utils::test_header(Zero::zero()),
authority_list,
set_id: TEST_GRANDPA_SET_ID,
is_halted: false,
};
bootstrap_bridge::<T, I>(init_data);
let header: BridgedHeader<T, I> = bp_test_utils::test_header(header_number::<T, I, _>());
let params = JustificationGeneratorParams {
header: header.clone(),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: accounts(p as u16).iter().map(|k| (*k, 1)).collect::<Vec<_>>(),
votes: v,
forks: 1,
};
let justification = make_justification_for_header(params);
}: _(RawOrigin::Signed(caller), header, justification)
verify {
let header: BridgedHeader<T, I> = bp_test_utils::test_header(header_number::<T, I, _>());
let expected_hash = header.hash();
assert_eq!(<BestFinalized<T, I>>::get(), expected_hash);
assert!(<ImportedHeaders<T, I>>::contains_key(expected_hash));
}
// What we want to check here is the effect of vote ancestries on justification verification
// do this by varying the number of headers between `finality_target` and `header_of_chain`.
submit_finality_proof_on_single_fork {
let v in 1..MAX_VOTE_ANCESTRIES;
let caller: T::AccountId = whitelisted_caller();
let init_data = InitializationData {
header: bp_test_utils::test_header(Zero::zero()),
authority_list: authority_list(),
set_id: TEST_GRANDPA_SET_ID,
is_halted: false,
};
bootstrap_bridge::<T, I>(init_data);
let header: BridgedHeader<T, I> = bp_test_utils::test_header(header_number::<T, I, _>());
let params = JustificationGeneratorParams {
header: header.clone(),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: test_keyring(),
votes: v,
forks: 1,
};
let justification = make_justification_for_header(params);
let (header, justification) = prepare_benchmark_data::<T, I>(p, v);
}: submit_finality_proof(RawOrigin::Signed(caller), header, justification)
verify {
let header: BridgedHeader<T, I> = bp_test_utils::test_header(header_number::<T, I, _>());
@@ -158,124 +117,4 @@ benchmarks_instance_pallet! {
assert_eq!(<BestFinalized<T, I>>::get(), expected_hash);
assert!(<ImportedHeaders<T, I>>::contains_key(expected_hash));
}
// What we want to check here is the effect of many pre-commits on justification verification.
// We do this by creating many forks, whose head will be used as a signed pre-commit in the
// final justification.
submit_finality_proof_on_many_forks {
let p in 1..MAX_VALIDATOR_SET_SIZE;
let caller: T::AccountId = whitelisted_caller();
let authority_list = accounts(p as u16)
.iter()
.map(|id| (AuthorityId::from(*id), 1))
.collect::<Vec<_>>();
let init_data = InitializationData {
header: bp_test_utils::test_header(Zero::zero()),
authority_list,
set_id: TEST_GRANDPA_SET_ID,
is_halted: false,
};
bootstrap_bridge::<T, I>(init_data);
let header: BridgedHeader<T, I> = bp_test_utils::test_header(header_number::<T, I, _>());
let params = JustificationGeneratorParams {
header: header.clone(),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: accounts(p as u16).iter().map(|k| (*k, 1)).collect::<Vec<_>>(),
votes: p,
forks: p,
};
let justification = make_justification_for_header(params);
}: submit_finality_proof(RawOrigin::Signed(caller), header, justification)
verify {
let header: BridgedHeader<T, I> = bp_test_utils::test_header(header_number::<T, I, _>());
let expected_hash = header.hash();
assert_eq!(<BestFinalized<T, I>>::get(), expected_hash);
assert!(<ImportedHeaders<T, I>>::contains_key(expected_hash));
}
// Here we want to find out the overheaded of looking through consensus digests found in a
// header. As the number of logs in a header grows, how much more work do we require to look
// through them?
//
// Note that this should be the same for looking through scheduled changes and forces changes,
// which is why we only have one benchmark for this.
find_scheduled_change {
// Not really sure what a good bound for this is.
let n in 1..1000;
let mut logs = vec![];
for i in 0..n {
// We chose a non-consensus log on purpose since that way we have to look through all
// the logs in the header
logs.push(sp_runtime::DigestItem::Other(vec![]));
}
let mut header: BridgedHeader<T, I> = bp_test_utils::test_header(Zero::zero());
let digest = header.digest_mut();
*digest = sp_runtime::Digest {
logs,
};
}: {
crate::find_scheduled_change(&header)
}
// What we want to check here is how long it takes to read and write the authority set tracked
// by the pallet as the number of authorities grows.
read_write_authority_sets {
// The current max target number of validators on Polkadot/Kusama
let n in 1..1000;
let mut authorities = vec![];
for i in 0..n {
authorities.push((ALICE, 1));
}
let authority_set = bp_header_chain::AuthoritySet {
authorities: authorities.iter().map(|(id, w)| (AuthorityId::from(*id), *w)).collect(),
set_id: 0
};
<CurrentAuthoritySet<T, I>>::put(&authority_set);
}: {
let authority_set = <CurrentAuthoritySet<T, I>>::get();
<CurrentAuthoritySet<T, I>>::put(&authority_set);
}
}
#[cfg(test)]
mod tests {
use super::*;
use frame_support::assert_ok;
#[test]
fn finality_proof_is_valid() {
mock::run_test(|| {
assert_ok!(test_benchmark_submit_finality_proof::<mock::TestRuntime>());
});
}
#[test]
fn single_fork_finality_proof_is_valid() {
mock::run_test(|| {
assert_ok!(test_benchmark_submit_finality_proof_on_single_fork::<mock::TestRuntime>());
});
}
#[test]
fn multi_fork_finality_proof_is_valid() {
mock::run_test(|| {
assert_ok!(test_benchmark_submit_finality_proof_on_many_forks::<mock::TestRuntime>());
});
}
}
+11 -5
View File
@@ -46,6 +46,7 @@ use frame_support::{ensure, fail};
use frame_system::{ensure_signed, RawOrigin};
use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID};
use sp_runtime::traits::{BadOrigin, Header as HeaderT, Zero};
use sp_std::convert::TryInto;
#[cfg(test)]
mod mock;
@@ -124,8 +125,8 @@ pub mod pallet {
/// If successful in verification, it will write the target header to the underlying storage
/// pallet.
#[pallet::weight(T::WeightInfo::submit_finality_proof(
justification.votes_ancestries.len() as u32,
justification.commit.precommits.len() as u32,
justification.commit.precommits.len().try_into().unwrap_or(u32::MAX),
justification.votes_ancestries.len().try_into().unwrap_or(u32::MAX),
))]
pub fn submit_finality_proof(
origin: OriginFor<T>,
@@ -414,9 +415,14 @@ pub mod pallet {
let set_id = authority_set.set_id;
Ok(
verify_justification::<BridgedHeader<T, I>>((hash, number), set_id, &voter_set, &justification).map_err(
verify_justification::<BridgedHeader<T, I>>((hash, number), set_id, &voter_set, justification).map_err(
|e| {
log::error!(target: "runtime::bridge-grandpa", "Received invalid justification for {:?}: {:?}", hash, e);
log::error!(
target: "runtime::bridge-grandpa",
"Received invalid justification for {:?}: {:?}",
hash,
e,
);
<Error<T, I>>::InvalidJustification
},
)?,
@@ -693,7 +699,7 @@ mod tests {
CurrentAuthoritySet::<TestRuntime>::get().authorities,
init_data.authority_list
);
assert_eq!(IsHalted::<TestRuntime>::get(), false);
assert!(!IsHalted::<TestRuntime>::get());
})
}
@@ -17,7 +17,7 @@
//! Autogenerated weights for pallet_bridge_grandpa
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
//! DATE: 2021-04-21, STEPS: [50, ], REPEAT: 20
//! DATE: 2021-06-03, STEPS: [50, ], REPEAT: 20
//! LOW RANGE: [], HIGH RANGE: []
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled
//! CHAIN: Some("dev"), DB CACHE: 128
@@ -48,74 +48,28 @@ use sp_std::marker::PhantomData;
/// Weight functions needed for pallet_bridge_grandpa.
pub trait WeightInfo {
fn submit_finality_proof(v: u32, p: u32) -> Weight;
fn submit_finality_proof_on_single_fork(v: u32) -> Weight;
fn submit_finality_proof_on_many_forks(p: u32) -> Weight;
fn find_scheduled_change(n: u32) -> Weight;
fn read_write_authority_sets(n: u32) -> Weight;
fn submit_finality_proof(p: u32, v: u32) -> Weight;
}
/// Weights for pallet_bridge_grandpa using the Rialto node and recommended hardware.
pub struct RialtoWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for RialtoWeight<T> {
fn submit_finality_proof(v: u32, p: u32) -> Weight {
fn submit_finality_proof(p: u32, v: u32) -> Weight {
(0 as Weight)
.saturating_add((756_462_000 as Weight).saturating_mul(v as Weight))
.saturating_add((791_236_000 as Weight).saturating_mul(p as Weight))
.saturating_add((59_692_000 as Weight).saturating_mul(p as Weight))
.saturating_add((6_876_000 as Weight).saturating_mul(v as Weight))
.saturating_add(T::DbWeight::get().reads(7 as Weight))
.saturating_add(T::DbWeight::get().writes(6 as Weight))
}
fn submit_finality_proof_on_single_fork(v: u32) -> Weight {
(280_121_000 as Weight)
.saturating_add((14_098_000 as Weight).saturating_mul(v as Weight))
.saturating_add(T::DbWeight::get().reads(7 as Weight))
.saturating_add(T::DbWeight::get().writes(6 as Weight))
}
fn submit_finality_proof_on_many_forks(p: u32) -> Weight {
(10_370_940_000 as Weight)
.saturating_add((96_902_000 as Weight).saturating_mul(p as Weight))
.saturating_add(T::DbWeight::get().reads(7 as Weight))
.saturating_add(T::DbWeight::get().writes(6 as Weight))
}
fn find_scheduled_change(n: u32) -> Weight {
(479_000 as Weight).saturating_add((11_000 as Weight).saturating_mul(n as Weight))
}
fn read_write_authority_sets(n: u32) -> Weight {
(8_030_000 as Weight)
.saturating_add((232_000 as Weight).saturating_mul(n as Weight))
.saturating_add(T::DbWeight::get().reads(1 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
}
// For backwards compatibility and tests
impl WeightInfo for () {
fn submit_finality_proof(v: u32, p: u32) -> Weight {
fn submit_finality_proof(p: u32, v: u32) -> Weight {
(0 as Weight)
.saturating_add((756_462_000 as Weight).saturating_mul(v as Weight))
.saturating_add((791_236_000 as Weight).saturating_mul(p as Weight))
.saturating_add((59_692_000 as Weight).saturating_mul(p as Weight))
.saturating_add((6_876_000 as Weight).saturating_mul(v as Weight))
.saturating_add(RocksDbWeight::get().reads(7 as Weight))
.saturating_add(RocksDbWeight::get().writes(6 as Weight))
}
fn submit_finality_proof_on_single_fork(v: u32) -> Weight {
(280_121_000 as Weight)
.saturating_add((14_098_000 as Weight).saturating_mul(v as Weight))
.saturating_add(RocksDbWeight::get().reads(7 as Weight))
.saturating_add(RocksDbWeight::get().writes(6 as Weight))
}
fn submit_finality_proof_on_many_forks(p: u32) -> Weight {
(10_370_940_000 as Weight)
.saturating_add((96_902_000 as Weight).saturating_mul(p as Weight))
.saturating_add(RocksDbWeight::get().reads(7 as Weight))
.saturating_add(RocksDbWeight::get().writes(6 as Weight))
}
fn find_scheduled_change(n: u32) -> Weight {
(479_000 as Weight).saturating_add((11_000 as Weight).saturating_mul(n as Weight))
}
fn read_write_authority_sets(n: u32) -> Weight {
(8_030_000 as Weight)
.saturating_add((232_000 as Weight).saturating_mul(n as Weight))
.saturating_add(RocksDbWeight::get().reads(1 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
}
}
@@ -7,6 +7,7 @@ edition = "2018"
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[dependencies]
bitvec = { version = "0.20", default-features = false, features = ["alloc"] }
codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
log = { version = "0.4.14", default-features = false }
num-traits = { version = "0.2", default-features = false }
@@ -14,6 +15,7 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] }
# Bridge dependencies
bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false }
bp-messages = { path = "../../primitives/messages", default-features = false }
bp-rialto = { path = "../../primitives/chain-rialto", default-features = false }
bp-runtime = { path = "../../primitives/runtime", default-features = false }
@@ -36,6 +38,7 @@ pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "m
[features]
default = ["std"]
std = [
"bp-message-dispatch/std",
"bp-messages/std",
"bp-runtime/std",
"bp-rialto/std",
+27 -3
View File
@@ -101,7 +101,14 @@ the `MessageAccepted` event is emitted in the `send_message()` transaction. The
message lane identifier and nonce that has been assigned to the message. When a message is delivered
to the target chain, the `MessagesDelivered` event is emitted from the
`receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane
identifier and inclusive range of delivered message nonces.
identifier, inclusive range of delivered message nonces and their single-bit dispatch results.
Please note that the meaning of the 'dispatch result' is determined by the message dispatcher at
the target chain. For example, in case of immediate call dispatcher it will be the `true` if call
has been successfully dispatched and `false` if it has only been delivered. This simple mechanism
built into the messages module allows building basic bridge applications, which only care whether
their messages have been successfully dispatched or not. More sophisticated applications may use
their own dispatch result delivery mechanism to deliver something larger than single bit.
### How to plug-in Messages Module to Send Messages to the Bridged Chain?
@@ -152,7 +159,7 @@ all required traits and will simply reject all transactions, related to outbound
The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with
inbound messages. The `pallet_bridge_messages::Config::SourceHeaderChain` defines how we see the
bridged chain as the source or our inbound messages. When relayer sends us a delivery transaction,
bridged chain as the source or our inbound messages. When relayer sends us a delivery transaction,
this implementation must be able to parse and verify the proof of messages wrapped in this
transaction. Normally, you would reuse the same (configurable) type on all chains that are sending
messages to the same bridged chain.
@@ -194,7 +201,7 @@ message needs to be read. So there's another
When choosing values for these parameters, you must also keep in mind that if proof in your scheme
is based on finality of headers (and it is the most obvious option for Substrate-based chains with
finality notion), then choosing too small values for these parameters may cause significant delays
in message delivery. That's because there too many actors involved in this scheme: 1) authorities
in message delivery. That's because there are too many actors involved in this scheme: 1) authorities
that are finalizing headers of the target chain need to finalize header with non-empty map; 2) the
headers relayer then needs to submit this header and its finality proof to the source chain; 3) the
messages relayer must then send confirmation transaction (storage proof of this map) to the source
@@ -347,6 +354,23 @@ Both conditions are verified by `pallet_bridge_messages::ensure_weights_are_corr
`pallet_bridge_messages::ensure_able_to_receive_messages` functions, which must be called from every
runtime's tests.
### Post-dispatch weight refunds of the `receive_messages_proof` call
Weight formula of the `receive_messages_proof` call assumes that the dispatch fee of every message is
paid at the target chain (where call is executed), that every message will be dispatched and that
dispatch weight of the message will be exactly the weight that is returned from the
`MessageDispatch::dispatch_weight` method call. This isn't true for all messages, so the call returns
actual weight used to dispatch messages.
This actual weight is the weight, returned by the weight formula, minus:
- the weight of undispatched messages, if we have failed to dispatch because of different issues;
- the unspent dispatch weight if the declared weight of some messages is less than their actual post-dispatch weight;
- the pay-dispatch-fee weight for every message that had dispatch fee paid at the source chain.
The last component is computed as a difference between two benchmarks results - the `receive_single_message_proof`
benchmark (that assumes that the fee is paid during dispatch) and the `receive_single_prepaid_message_proof`
(that assumes that the dispatch fee is already paid).
### Weight of `receive_messages_delivery_proof` call
#### Related benchmarks
@@ -17,16 +17,25 @@
//! Messages pallet benchmarking.
use crate::weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH;
use crate::{inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, Call, Instance};
use crate::{
inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, outbound_lane::ReceivalConfirmationResult,
Call, Instance,
};
use bp_messages::{
source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, InboundLaneData, LaneId, MessageData,
MessageNonce, OutboundLaneData, UnrewardedRelayersState,
source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, InboundLaneData, LaneId,
MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState,
};
use bp_runtime::messages::DispatchFeePayment;
use frame_benchmarking::{account, benchmarks_instance};
use frame_support::{traits::Get, weights::Weight};
use frame_system::RawOrigin;
use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, ops::RangeInclusive, prelude::*};
use sp_std::{
collections::{btree_map::BTreeMap, vec_deque::VecDeque},
convert::TryInto,
ops::RangeInclusive,
prelude::*,
};
/// Fee paid by submitter for single message delivery.
pub const MESSAGE_FEE: u64 = 10_000_000_000;
@@ -67,6 +76,8 @@ pub struct MessageProofParams {
pub outbound_lane_data: Option<OutboundLaneData>,
/// Proof size requirements.
pub size: ProofSize,
/// Where the fee for dispatching message is paid?
pub dispatch_fee_payment: DispatchFeePayment,
}
/// Benchmark-specific message delivery proof parameters.
@@ -108,6 +119,8 @@ pub trait Config<I: Instance>: crate::Config<I> {
fn prepare_message_delivery_proof(
params: MessageDeliveryProofParams<Self::AccountId>,
) -> <Self::TargetHeaderChain as TargetHeaderChain<Self::OutboundPayload, Self::AccountId>>::MessagesDeliveryProof;
/// Returns true if message has been dispatched (either successfully or not).
fn is_message_dispatched(nonce: MessageNonce) -> bool;
}
benchmarks_instance! {
@@ -242,7 +255,8 @@ benchmarks_instance! {
// * proof does not include outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is successfully dispatched;
// * message requires all heavy checks done by dispatcher.
// * message requires all heavy checks done by dispatcher;
// * message dispatch fee is paid at target (this) chain.
//
// This is base benchmark for all other message delivery benchmarks.
receive_single_message_proof {
@@ -257,6 +271,7 @@ benchmarks_instance! {
message_nonces: 21..=21,
outbound_lane_data: None,
size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH),
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
@@ -264,13 +279,15 @@ benchmarks_instance! {
crate::Pallet::<T, I>::inbound_latest_received_nonce(T::bench_lane_id()),
21,
);
assert!(T::is_message_dispatched(21));
}
// Benchmark `receive_messages_proof` extrinsic with two minimal-weight messages and following conditions:
// * proof does not include outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is successfully dispatched;
// * message requires all heavy checks done by dispatcher.
// * message requires all heavy checks done by dispatcher;
// * message dispatch fee is paid at target (this) chain.
//
// The weight of single message delivery could be approximated as
// `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`.
@@ -288,6 +305,7 @@ benchmarks_instance! {
message_nonces: 21..=22,
outbound_lane_data: None,
size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH),
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight)
verify {
@@ -295,13 +313,15 @@ benchmarks_instance! {
crate::Pallet::<T, I>::inbound_latest_received_nonce(T::bench_lane_id()),
22,
);
assert!(T::is_message_dispatched(22));
}
// Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions:
// * proof includes outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is successfully dispatched;
// * message requires all heavy checks done by dispatcher.
// * message requires all heavy checks done by dispatcher;
// * message dispatch fee is paid at target (this) chain.
//
// The weight of outbound lane state delivery would be
// `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`.
@@ -323,6 +343,7 @@ benchmarks_instance! {
latest_generated_nonce: 21,
}),
size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH),
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
@@ -334,6 +355,7 @@ benchmarks_instance! {
crate::Pallet::<T, I>::inbound_latest_confirmed_nonce(T::bench_lane_id()),
20,
);
assert!(T::is_message_dispatched(21));
}
// Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions:
@@ -357,6 +379,7 @@ benchmarks_instance! {
message_nonces: 21..=21,
outbound_lane_data: None,
size: ProofSize::HasExtraNodes(1024),
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
@@ -364,6 +387,7 @@ benchmarks_instance! {
crate::Pallet::<T, I>::inbound_latest_received_nonce(T::bench_lane_id()),
21,
);
assert!(T::is_message_dispatched(21));
}
// Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions:
@@ -389,6 +413,7 @@ benchmarks_instance! {
message_nonces: 21..=21,
outbound_lane_data: None,
size: ProofSize::HasExtraNodes(16 * 1024),
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
@@ -396,6 +421,40 @@ benchmarks_instance! {
crate::Pallet::<T, I>::inbound_latest_received_nonce(T::bench_lane_id()),
21,
);
assert!(T::is_message_dispatched(21));
}
// Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions:
// * proof does not include outbound lane state proof;
// * inbound lane already has state, so it needs to be read and decoded;
// * message is successfully dispatched;
// * message requires all heavy checks done by dispatcher;
// * message dispatch fee is paid at source (bridged) chain.
//
// This benchmark is used to compute extra weight spent at target chain when fee is paid there. Then we use
// this information in two places: (1) to reduce weight of delivery tx if sender pays fee at the source chain
// and (2) to refund relayer with this weight if fee has been paid at the source chain.
receive_single_prepaid_message_proof {
let relayer_id_on_source = T::bridged_relayer_id();
let relayer_id_on_target = account("relayer", 0, SEED);
// mark messages 1..=20 as delivered
receive_messages::<T, I>(20);
let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams {
lane: T::bench_lane_id(),
message_nonces: 21..=21,
outbound_lane_data: None,
size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH),
dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
});
}: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight)
verify {
assert_eq!(
crate::Pallet::<T, I>::inbound_latest_received_nonce(T::bench_lane_id()),
21,
);
assert!(T::is_message_dispatched(21));
}
// Benchmark `receive_messages_delivery_proof` extrinsic with following conditions:
@@ -420,7 +479,10 @@ benchmarks_instance! {
let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams {
lane: T::bench_lane_id(),
inbound_lane_data: InboundLaneData {
relayers: vec![(1, 1, relayer_id.clone())].into_iter().collect(),
relayers: vec![UnrewardedRelayer {
relayer: relayer_id.clone(),
messages: DeliveredMessages::new(1, true),
}].into_iter().collect(),
last_confirmed_nonce: 0,
},
size: ProofSize::Minimal(0),
@@ -455,10 +517,15 @@ benchmarks_instance! {
messages_in_oldest_entry: 2,
total_messages: 2,
};
let mut delivered_messages = DeliveredMessages::new(1, true);
delivered_messages.note_dispatched_message(true);
let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams {
lane: T::bench_lane_id(),
inbound_lane_data: InboundLaneData {
relayers: vec![(1, 2, relayer_id.clone())].into_iter().collect(),
relayers: vec![UnrewardedRelayer {
relayer: relayer_id.clone(),
messages: delivered_messages,
}].into_iter().collect(),
last_confirmed_nonce: 0,
},
size: ProofSize::Minimal(0),
@@ -496,8 +563,14 @@ benchmarks_instance! {
lane: T::bench_lane_id(),
inbound_lane_data: InboundLaneData {
relayers: vec![
(1, 1, relayer1_id.clone()),
(2, 2, relayer2_id.clone()),
UnrewardedRelayer {
relayer: relayer1_id.clone(),
messages: DeliveredMessages::new(1, true),
},
UnrewardedRelayer {
relayer: relayer2_id.clone(),
messages: DeliveredMessages::new(2, true),
},
].into_iter().collect(),
last_confirmed_nonce: 0,
},
@@ -569,6 +642,7 @@ benchmarks_instance! {
message_nonces: 21..=(20 + i as MessageNonce),
outbound_lane_data: None,
size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH),
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
});
}: receive_messages_proof(
RawOrigin::Signed(relayer_id_on_target),
@@ -606,6 +680,7 @@ benchmarks_instance! {
message_nonces: 21..=21,
outbound_lane_data: None,
size: ProofSize::HasExtraNodes(i as _),
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
});
}: receive_messages_proof(
RawOrigin::Signed(relayer_id_on_target),
@@ -643,6 +718,7 @@ benchmarks_instance! {
message_nonces: 21..=21,
outbound_lane_data: None,
size: ProofSize::HasLargeLeaf(i as _),
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
});
}: receive_messages_proof(
RawOrigin::Signed(relayer_id_on_target),
@@ -686,6 +762,7 @@ benchmarks_instance! {
latest_generated_nonce: 21,
}),
size: ProofSize::Minimal(0),
dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
});
}: receive_messages_proof(
RawOrigin::Signed(relayer_id_on_target),
@@ -728,10 +805,17 @@ benchmarks_instance! {
messages_in_oldest_entry: 1,
total_messages: i as MessageNonce,
};
let mut delivered_messages = DeliveredMessages::new(1, true);
for nonce in 2..=i {
delivered_messages.note_dispatched_message(true);
}
let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams {
lane: T::bench_lane_id(),
inbound_lane_data: InboundLaneData {
relayers: vec![(1, i as MessageNonce, relayer_id.clone())].into_iter().collect(),
relayers: vec![UnrewardedRelayer {
relayer: relayer_id.clone(),
messages: delivered_messages,
}].into_iter().collect(),
last_confirmed_nonce: 0,
},
size: ProofSize::Minimal(0),
@@ -776,7 +860,10 @@ benchmarks_instance! {
relayers: relayers
.keys()
.enumerate()
.map(|(j, relayer_id)| (j as MessageNonce + 1, j as MessageNonce + 1, relayer_id.clone()))
.map(|(j, relayer)| UnrewardedRelayer {
relayer: relayer.clone(),
messages: DeliveredMessages::new(j as MessageNonce + 1, true),
})
.collect(),
last_confirmed_nonce: 0,
},
@@ -808,13 +895,29 @@ fn send_regular_message_with_payload<T: Config<I>, I: Instance>(payload: Vec<u8>
fn confirm_message_delivery<T: Config<I>, I: Instance>(nonce: MessageNonce) {
let mut outbound_lane = outbound_lane::<T, I>(T::bench_lane_id());
assert!(outbound_lane.confirm_delivery(nonce).is_some());
let latest_received_nonce = outbound_lane.data().latest_received_nonce;
let mut relayers = VecDeque::with_capacity((nonce - latest_received_nonce) as usize);
for nonce in latest_received_nonce + 1..=nonce {
relayers.push_back(UnrewardedRelayer {
relayer: (),
messages: DeliveredMessages::new(nonce, true),
});
}
assert!(matches!(
outbound_lane.confirm_delivery(nonce, &relayers),
ReceivalConfirmationResult::ConfirmedMessages(_),
));
}
fn receive_messages<T: Config<I>, I: Instance>(nonce: MessageNonce) {
let mut inbound_lane_storage = inbound_lane_storage::<T, I>(T::bench_lane_id());
inbound_lane_storage.set_data(InboundLaneData {
relayers: vec![(1, nonce, T::bridged_relayer_id())].into_iter().collect(),
relayers: vec![UnrewardedRelayer {
relayer: T::bridged_relayer_id(),
messages: DeliveredMessages::new(nonce, true),
}]
.into_iter()
.collect(),
last_confirmed_nonce: 0,
});
}
@@ -18,8 +18,10 @@
use bp_messages::{
target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch},
InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData,
DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, UnrewardedRelayer,
};
use bp_runtime::messages::MessageDispatchResult;
use frame_support::RuntimeDebug;
use sp_std::prelude::PartialEq;
/// Inbound lane storage.
@@ -27,7 +29,7 @@ pub trait InboundLaneStorage {
/// Delivery and dispatch fee type on source chain.
type MessageFee;
/// Id of relayer on source chain.
type Relayer: PartialEq;
type Relayer: Clone + PartialEq;
/// Lane id.
fn id(&self) -> LaneId;
@@ -41,6 +43,22 @@ pub trait InboundLaneStorage {
fn set_data(&mut self, data: InboundLaneData<Self::Relayer>);
}
/// Result of single message receival.
#[derive(RuntimeDebug, PartialEq, Eq)]
pub enum ReceivalResult {
/// Message has been received and dispatched. Note that we don't care whether dispatch has
/// been successful or not - in both case message falls into this category.
///
/// The message dispatch result is also returned.
Dispatched(MessageDispatchResult),
/// Message has invalid nonce and lane has rejected to accept this message.
InvalidNonce,
/// There are too many unrewarded relayer entires at the lane.
TooManyUnrewardedRelayers,
/// There are too many unconfirmed messages at the lane.
TooManyUnconfirmedMessages,
}
/// Inbound messages lane.
pub struct InboundLane<S> {
storage: S,
@@ -71,7 +89,7 @@ impl<S: InboundLaneStorage> InboundLane<S> {
while data
.relayers
.front()
.map(|(_, nonce_high, _)| *nonce_high <= new_confirmed_nonce)
.map(|entry| entry.messages.end <= new_confirmed_nonce)
.unwrap_or(false)
{
data.relayers.pop_front();
@@ -79,8 +97,12 @@ impl<S: InboundLaneStorage> InboundLane<S> {
// Secondly, update the next record with lower nonce equal to new confirmed nonce if needed.
// Note: There will be max. 1 record to update as we don't allow messages from relayers to overlap.
match data.relayers.front_mut() {
Some((nonce_low, _, _)) if *nonce_low < new_confirmed_nonce => {
*nonce_low = new_confirmed_nonce + 1;
Some(entry) if entry.messages.begin < new_confirmed_nonce => {
entry.messages.dispatch_results = entry
.messages
.dispatch_results
.split_off((new_confirmed_nonce + 1 - entry.messages.begin) as _);
entry.messages.begin = new_confirmed_nonce + 1;
}
_ => {}
}
@@ -90,51 +112,61 @@ impl<S: InboundLaneStorage> InboundLane<S> {
}
/// Receive new message.
pub fn receive_message<P: MessageDispatch<S::MessageFee>>(
pub fn receive_message<P: MessageDispatch<AccountId, S::MessageFee>, AccountId>(
&mut self,
relayer: S::Relayer,
relayer_at_bridged_chain: &S::Relayer,
relayer_at_this_chain: &AccountId,
nonce: MessageNonce,
message_data: DispatchMessageData<P::DispatchPayload, S::MessageFee>,
) -> bool {
) -> ReceivalResult {
let mut data = self.storage.data();
let is_correct_message = nonce == data.last_delivered_nonce() + 1;
if !is_correct_message {
return false;
return ReceivalResult::InvalidNonce;
}
// if there are more unrewarded relayer entries than we may accept, reject this message
if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() {
return false;
return ReceivalResult::TooManyUnrewardedRelayers;
}
// if there are more unconfirmed messages than we may accept, reject this message
let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce);
if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() {
return false;
return ReceivalResult::TooManyUnconfirmedMessages;
}
// dispatch message before updating anything in the storage. If dispatch would panic,
// (which should not happen in the runtime) then we simply won't consider message as
// delivered (no changes to the inbound lane storage have been made).
let dispatch_result = P::dispatch(
relayer_at_this_chain,
DispatchMessage {
key: MessageKey {
lane_id: self.storage.id(),
nonce,
},
data: message_data,
},
);
// now let's update inbound lane storage
let push_new = match data.relayers.back_mut() {
Some((_, nonce_high, last_relayer)) if last_relayer == &relayer => {
*nonce_high = nonce;
Some(entry) if entry.relayer == *relayer_at_bridged_chain => {
entry.messages.note_dispatched_message(dispatch_result.dispatch_result);
false
}
_ => true,
};
if push_new {
data.relayers.push_back((nonce, nonce, relayer));
data.relayers.push_back(UnrewardedRelayer {
relayer: (*relayer_at_bridged_chain).clone(),
messages: DeliveredMessages::new(nonce, dispatch_result.dispatch_result),
});
}
self.storage.set_data(data);
P::dispatch(DispatchMessage {
key: MessageKey {
lane_id: self.storage.id(),
nonce,
},
data: message_data,
});
true
ReceivalResult::Dispatched(dispatch_result)
}
}
@@ -144,8 +176,8 @@ mod tests {
use crate::{
inbound_lane,
mock::{
message_data, run_test, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A,
TEST_RELAYER_B, TEST_RELAYER_C,
dispatch_result, message_data, run_test, unrewarded_relayer, TestMessageDispatch, TestRuntime,
REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C,
},
DefaultInstance, RuntimeInboundLaneStorage,
};
@@ -154,11 +186,15 @@ mod tests {
lane: &mut InboundLane<RuntimeInboundLaneStorage<TestRuntime, DefaultInstance>>,
nonce: MessageNonce,
) {
assert!(lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A,
nonce,
message_data(REGULAR_PAYLOAD).into()
));
assert_eq!(
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_A,
&TEST_RELAYER_A,
nonce,
message_data(REGULAR_PAYLOAD).into()
),
ReceivalResult::Dispatched(dispatch_result(0))
);
}
#[test]
@@ -213,7 +249,10 @@ mod tests {
receive_regular_message(&mut lane, 2);
receive_regular_message(&mut lane, 3);
assert_eq!(lane.storage.data().last_confirmed_nonce, 0);
assert_eq!(lane.storage.data().relayers, vec![(1, 3, TEST_RELAYER_A)]);
assert_eq!(
lane.storage.data().relayers,
vec![unrewarded_relayer(1, 3, TEST_RELAYER_A)]
);
assert_eq!(
lane.receive_state_update(OutboundLaneData {
@@ -223,7 +262,10 @@ mod tests {
Some(2),
);
assert_eq!(lane.storage.data().last_confirmed_nonce, 2);
assert_eq!(lane.storage.data().relayers, vec![(3, 3, TEST_RELAYER_A)]);
assert_eq!(
lane.storage.data().relayers,
vec![unrewarded_relayer(3, 3, TEST_RELAYER_A)]
);
assert_eq!(
lane.receive_state_update(OutboundLaneData {
@@ -244,10 +286,16 @@ mod tests {
let mut seed_storage_data = lane.storage.data();
// Prepare data
seed_storage_data.last_confirmed_nonce = 0;
seed_storage_data.relayers.push_back((1, 1, TEST_RELAYER_A));
seed_storage_data
.relayers
.push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A));
// Simulate messages batch (2, 3, 4) from relayer #2
seed_storage_data.relayers.push_back((2, 4, TEST_RELAYER_B));
seed_storage_data.relayers.push_back((5, 5, TEST_RELAYER_C));
seed_storage_data
.relayers
.push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B));
seed_storage_data
.relayers
.push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C));
lane.storage.set_data(seed_storage_data);
// Check
assert_eq!(
@@ -260,7 +308,10 @@ mod tests {
assert_eq!(lane.storage.data().last_confirmed_nonce, 3);
assert_eq!(
lane.storage.data().relayers,
vec![(4, 4, TEST_RELAYER_B), (5, 5, TEST_RELAYER_C)]
vec![
unrewarded_relayer(4, 4, TEST_RELAYER_B),
unrewarded_relayer(5, 5, TEST_RELAYER_C)
]
);
});
}
@@ -269,11 +320,15 @@ mod tests {
fn fails_to_receive_message_with_incorrect_nonce() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
assert!(!lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A,
10,
message_data(REGULAR_PAYLOAD).into()
));
assert_eq!(
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_A,
&TEST_RELAYER_A,
10,
message_data(REGULAR_PAYLOAD).into()
),
ReceivalResult::InvalidNonce
);
assert_eq!(lane.storage.data().last_delivered_nonce(), 0);
});
}
@@ -284,29 +339,35 @@ mod tests {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
let max_nonce = <TestRuntime as crate::Config>::MaxUnrewardedRelayerEntriesAtInboundLane::get();
for current_nonce in 1..max_nonce + 1 {
assert!(lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A + current_nonce,
current_nonce,
message_data(REGULAR_PAYLOAD).into()
));
assert_eq!(
lane.receive_message::<TestMessageDispatch, _>(
&(TEST_RELAYER_A + current_nonce),
&(TEST_RELAYER_A + current_nonce),
current_nonce,
message_data(REGULAR_PAYLOAD).into()
),
ReceivalResult::Dispatched(dispatch_result(0))
);
}
// Fails to dispatch new message from different than latest relayer.
assert_eq!(
false,
lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A + max_nonce + 1,
lane.receive_message::<TestMessageDispatch, _>(
&(TEST_RELAYER_A + max_nonce + 1),
&(TEST_RELAYER_A + max_nonce + 1),
max_nonce + 1,
message_data(REGULAR_PAYLOAD).into()
)
),
ReceivalResult::TooManyUnrewardedRelayers,
);
// Fails to dispatch new messages from latest relayer. Prevents griefing attacks.
assert_eq!(
false,
lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A + max_nonce,
lane.receive_message::<TestMessageDispatch, _>(
&(TEST_RELAYER_A + max_nonce),
&(TEST_RELAYER_A + max_nonce),
max_nonce + 1,
message_data(REGULAR_PAYLOAD).into()
)
),
ReceivalResult::TooManyUnrewardedRelayers,
);
});
}
@@ -317,29 +378,35 @@ mod tests {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
let max_nonce = <TestRuntime as crate::Config>::MaxUnconfirmedMessagesAtInboundLane::get();
for current_nonce in 1..=max_nonce {
assert!(lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A,
current_nonce,
message_data(REGULAR_PAYLOAD).into()
));
assert_eq!(
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_A,
&TEST_RELAYER_A,
current_nonce,
message_data(REGULAR_PAYLOAD).into()
),
ReceivalResult::Dispatched(dispatch_result(0))
);
}
// Fails to dispatch new message from different than latest relayer.
assert_eq!(
false,
lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_B,
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_B,
&TEST_RELAYER_B,
max_nonce + 1,
message_data(REGULAR_PAYLOAD).into()
)
),
ReceivalResult::TooManyUnconfirmedMessages,
);
// Fails to dispatch new messages from latest relayer.
assert_eq!(
false,
lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A,
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_A,
&TEST_RELAYER_A,
max_nonce + 1,
message_data(REGULAR_PAYLOAD).into()
)
),
ReceivalResult::TooManyUnconfirmedMessages,
);
});
}
@@ -348,24 +415,40 @@ mod tests {
fn correctly_receives_following_messages_from_two_relayers_alternately() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
assert!(lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A,
1,
message_data(REGULAR_PAYLOAD).into()
));
assert!(lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_B,
2,
message_data(REGULAR_PAYLOAD).into()
));
assert!(lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A,
3,
message_data(REGULAR_PAYLOAD).into()
));
assert_eq!(
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_A,
&TEST_RELAYER_A,
1,
message_data(REGULAR_PAYLOAD).into()
),
ReceivalResult::Dispatched(dispatch_result(0))
);
assert_eq!(
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_B,
&TEST_RELAYER_B,
2,
message_data(REGULAR_PAYLOAD).into()
),
ReceivalResult::Dispatched(dispatch_result(0))
);
assert_eq!(
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_A,
&TEST_RELAYER_A,
3,
message_data(REGULAR_PAYLOAD).into()
),
ReceivalResult::Dispatched(dispatch_result(0))
);
assert_eq!(
lane.storage.data().relayers,
vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B), (3, 3, TEST_RELAYER_A)]
vec![
unrewarded_relayer(1, 1, TEST_RELAYER_A),
unrewarded_relayer(2, 2, TEST_RELAYER_B),
unrewarded_relayer(3, 3, TEST_RELAYER_A)
]
);
});
}
@@ -374,14 +457,23 @@ mod tests {
fn rejects_same_message_from_two_different_relayers() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
assert!(lane.receive_message::<TestMessageDispatch>(
TEST_RELAYER_A,
1,
message_data(REGULAR_PAYLOAD).into()
));
assert_eq!(
false,
lane.receive_message::<TestMessageDispatch>(TEST_RELAYER_B, 1, message_data(REGULAR_PAYLOAD).into())
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_A,
&TEST_RELAYER_A,
1,
message_data(REGULAR_PAYLOAD).into()
),
ReceivalResult::Dispatched(dispatch_result(0))
);
assert_eq!(
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_B,
&TEST_RELAYER_B,
1,
message_data(REGULAR_PAYLOAD).into()
),
ReceivalResult::InvalidNonce,
);
});
}
@@ -394,4 +486,22 @@ mod tests {
assert_eq!(lane.storage.data().last_delivered_nonce(), 1);
});
}
#[test]
fn unspent_weight_is_returned_by_receive_message() {
run_test(|| {
let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
let mut payload = REGULAR_PAYLOAD;
payload.dispatch_result.unspent_weight = 1;
assert_eq!(
lane.receive_message::<TestMessageDispatch, _>(
&TEST_RELAYER_A,
&TEST_RELAYER_A,
1,
message_data(payload).into()
),
ReceivalResult::Dispatched(dispatch_result(1))
);
});
}
}
+451 -108
View File
@@ -34,28 +34,34 @@
//! or some benchmarks assumptions are broken for your runtime.
#![cfg_attr(not(feature = "std"), no_std)]
// Generated by `decl_event!`
#![allow(clippy::unused_unit)]
pub use crate::weights_ext::{
ensure_able_to_receive_confirmation, ensure_able_to_receive_message, ensure_weights_are_correct, WeightInfoExt,
EXPECTED_DEFAULT_MESSAGE_LENGTH,
};
use crate::inbound_lane::{InboundLane, InboundLaneStorage};
use crate::outbound_lane::{OutboundLane, OutboundLaneStorage};
use crate::inbound_lane::{InboundLane, InboundLaneStorage, ReceivalResult};
use crate::outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationResult};
use crate::weights::WeightInfo;
use bp_messages::{
source_chain::{LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, TargetHeaderChain},
source_chain::{
LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, RelayersRewards, TargetHeaderChain,
},
target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain},
total_unrewarded_messages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce, MessagePayload,
OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayersState,
total_unrewarded_messages, DeliveredMessages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce,
OperatingMode, OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayersState,
};
use bp_runtime::Size;
use codec::{Decode, Encode};
use frame_support::{
decl_error, decl_event, decl_module, decl_storage, ensure,
decl_error, decl_event, decl_module, decl_storage,
dispatch::DispatchResultWithPostInfo,
ensure, fail,
traits::Get,
weights::{DispatchClass, Weight},
weights::{DispatchClass, Pays, PostDispatchInfo, Weight},
Parameter, StorageMap,
};
use frame_system::{ensure_signed, RawOrigin};
@@ -142,13 +148,19 @@ pub trait Config<I = DefaultInstance>: frame_system::Config {
type LaneMessageVerifier: LaneMessageVerifier<Self::AccountId, Self::OutboundPayload, Self::OutboundMessageFee>;
/// Message delivery payment.
type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment<Self::AccountId, Self::OutboundMessageFee>;
/// Handler for delivered messages.
type OnDeliveryConfirmed: OnDeliveryConfirmed;
// Types that are used by inbound_lane (on target chain).
/// Source header chain, as it is represented on target chain.
type SourceHeaderChain: SourceHeaderChain<Self::InboundMessageFee>;
/// Message dispatch.
type MessageDispatch: MessageDispatch<Self::InboundMessageFee, DispatchPayload = Self::InboundPayload>;
type MessageDispatch: MessageDispatch<
Self::AccountId,
Self::InboundMessageFee,
DispatchPayload = Self::InboundPayload,
>;
}
/// Shortcut to messages proof type for Config.
@@ -178,6 +190,8 @@ decl_error! {
InvalidMessagesDispatchWeight,
/// Invalid messages delivery proof has been submitted.
InvalidMessagesDeliveryProof,
/// The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane).
InvalidUnrewardedRelayers,
/// The relayer has declared invalid unrewarded relayers state in the `receive_messages_delivery_proof` call.
InvalidUnrewardedRelayersState,
/// The message someone is trying to work with (i.e. increase fee) is already-delivered.
@@ -196,8 +210,10 @@ decl_storage! {
/// runtime methods may still be used to do that (i.e. democracy::referendum to update halt
/// flag directly or call the `halt_operations`).
pub PalletOwner get(fn module_owner): Option<T::AccountId>;
/// If true, all pallet transactions are failed immediately.
pub IsHalted get(fn is_halted) config(): bool;
/// The current operating mode of the pallet.
///
/// Depending on the mode either all, some, or no transactions will be allowed.
pub PalletOperatingMode get(fn operating_mode) config(): OperatingMode;
/// Map of lane id => inbound lane data.
pub InboundLanes: map hasher(blake2_128_concat) LaneId => InboundLaneData<T::InboundRelayer>;
/// Map of lane id => outbound lane data.
@@ -226,8 +242,8 @@ decl_event!(
ParameterUpdated(Parameter),
/// Message has been accepted and is waiting to be delivered.
MessageAccepted(LaneId, MessageNonce),
/// Messages in the inclusive range have been delivered and processed by the bridged chain.
MessagesDelivered(LaneId, MessageNonce, MessageNonce),
/// Messages in the inclusive range have been delivered to the bridged chain.
MessagesDelivered(LaneId, DeliveredMessages),
/// Phantom member, never used.
Dummy(PhantomData<(AccountId, I)>),
}
@@ -264,19 +280,18 @@ decl_module! {
}
}
/// Halt or resume all pallet operations.
/// Halt or resume all/some pallet operations.
///
/// May only be called either by root, or by `PalletOwner`.
#[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)]
pub fn set_operational(origin, operational: bool) {
pub fn set_operating_mode(origin, operating_mode: OperatingMode) {
ensure_owner_or_root::<T, I>(origin)?;
<IsHalted<I>>::put(operational);
if operational {
log::info!(target: "runtime::bridge-messages", "Resuming pallet operations.");
} else {
log::warn!(target: "runtime::bridge-messages", "Stopping pallet operations.");
}
<PalletOperatingMode<I>>::put(operating_mode);
log::info!(
target: "runtime::bridge-messages",
"Setting messages pallet operating mode to {:?}.",
operating_mode,
);
}
/// Update pallet parameter.
@@ -299,7 +314,7 @@ decl_module! {
payload: T::OutboundPayload,
delivery_and_dispatch_fee: T::OutboundMessageFee,
) -> DispatchResult {
ensure_operational::<T, I>()?;
ensure_normal_operating_mode::<T, I>()?;
let submitter = origin.into().map_err(|_| BadOrigin)?;
// let's first check if message can be delivered to target chain
@@ -382,6 +397,7 @@ decl_module! {
nonce: MessageNonce,
additional_fee: T::OutboundMessageFee,
) -> DispatchResult {
ensure_not_halted::<T, I>()?;
// if someone tries to pay for already-delivered message, we're rejecting this intention
// (otherwise this additional fee will be locked forever in relayers fund)
//
@@ -434,13 +450,13 @@ decl_module! {
#[weight = T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight)]
pub fn receive_messages_proof(
origin,
relayer_id: T::InboundRelayer,
relayer_id_at_bridged_chain: T::InboundRelayer,
proof: MessagesProofOf<T, I>,
messages_count: u32,
dispatch_weight: Weight,
) -> DispatchResult {
ensure_operational::<T, I>()?;
let _ = ensure_signed(origin)?;
) -> DispatchResultWithPostInfo {
ensure_not_halted::<T, I>()?;
let relayer_id_at_this_chain = ensure_signed(origin)?;
// reject transactions that are declaring too many messages
ensure!(
@@ -448,6 +464,23 @@ decl_module! {
Error::<T, I>::TooManyMessagesInTheProof
);
// why do we need to know the weight of this (`receive_messages_proof`) call? Because
// we may want to return some funds for not-dispatching (or partially dispatching) some
// messages to the call origin (relayer). And this is done by returning actual weight
// from the call. But we only know dispatch weight of every messages. So to refund relayer
// because we have not dispatched Message, we need to:
//
// ActualWeight = DeclaredWeight - Message.DispatchWeight
//
// The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible
// to get pre-computed value (and it has been already computed by the executive).
let declared_weight = T::WeightInfo::receive_messages_proof_weight(
&proof,
messages_count,
dispatch_weight,
);
let mut actual_weight = declared_weight;
// verify messages proof && convert proof into messages
let messages = verify_and_decode_messages_proof::<
T::SourceHeaderChain,
@@ -507,20 +540,57 @@ decl_module! {
debug_assert_eq!(message.key.lane_id, lane_id);
total_messages += 1;
if lane.receive_message::<T::MessageDispatch>(relayer_id.clone(), message.key.nonce, message.data) {
valid_messages += 1;
}
let dispatch_weight = T::MessageDispatch::dispatch_weight(&message);
let receival_result = lane.receive_message::<T::MessageDispatch, T::AccountId>(
&relayer_id_at_bridged_chain,
&relayer_id_at_this_chain,
message.key.nonce,
message.data,
);
// note that we're returning unspent weight to relayer even if message has been
// rejected by the lane. This allows relayers to submit spam transactions with
// e.g. the same set of already delivered messages over and over again, without
// losing funds for messages dispatch. But keep in mind that relayer pays base
// delivery transaction cost anyway. And base cost covers everything except
// dispatch, so we have a balance here.
let (unspent_weight, refund_pay_dispatch_fee) = match receival_result {
ReceivalResult::Dispatched(dispatch_result) => {
valid_messages += 1;
(dispatch_result.unspent_weight, !dispatch_result.dispatch_fee_paid_during_dispatch)
},
ReceivalResult::InvalidNonce
| ReceivalResult::TooManyUnrewardedRelayers
| ReceivalResult::TooManyUnconfirmedMessages => (dispatch_weight, true),
};
actual_weight = actual_weight
.saturating_sub(sp_std::cmp::min(unspent_weight, dispatch_weight))
.saturating_sub(
// delivery call weight formula assumes that the fee is paid at
// this (target) chain. If the message is prepaid at the source
// chain, let's refund relayer with this extra cost.
if refund_pay_dispatch_fee {
T::WeightInfo::pay_inbound_dispatch_fee_overhead()
} else {
0
}
);
}
}
log::trace!(
target: "runtime::bridge-messages",
"Received messages: total={}, valid={}",
"Received messages: total={}, valid={}. Weight used: {}/{}",
total_messages,
valid_messages,
actual_weight,
declared_weight,
);
Ok(())
Ok(PostDispatchInfo {
actual_weight: Some(actual_weight),
pays_fee: Pays::Yes,
})
}
/// Receive messages delivery proof from bridged chain.
@@ -530,7 +600,7 @@ decl_module! {
proof: MessagesDeliveryProofOf<T, I>,
relayers_state: UnrewardedRelayersState,
) -> DispatchResult {
ensure_operational::<T, I>()?;
ensure_not_halted::<T, I>()?;
let confirmation_relayer = ensure_signed(origin)?;
let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof).map_err(|err| {
@@ -556,19 +626,36 @@ decl_module! {
let mut lane = outbound_lane::<T, I>(lane_id);
let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new();
let last_delivered_nonce = lane_data.last_delivered_nonce();
let received_range = lane.confirm_delivery(last_delivered_nonce);
if let Some(received_range) = received_range {
Self::deposit_event(RawEvent::MessagesDelivered(lane_id, received_range.0, received_range.1));
let confirmed_messages = match lane.confirm_delivery(last_delivered_nonce, &lane_data.relayers) {
ReceivalConfirmationResult::ConfirmedMessages(confirmed_messages) => Some(confirmed_messages),
ReceivalConfirmationResult::NoNewConfirmations => None,
error => {
log::trace!(
target: "runtime::bridge-messages",
"Messages delivery proof contains invalid unrewarded relayers vec: {:?}",
error,
);
fail!(Error::<T, I>::InvalidUnrewardedRelayers);
},
};
if let Some(confirmed_messages) = confirmed_messages {
// handle messages delivery confirmation
T::OnDeliveryConfirmed::on_messages_delivered(&lane_id, &confirmed_messages);
// emit 'delivered' event
let received_range = confirmed_messages.begin..=confirmed_messages.end;
Self::deposit_event(RawEvent::MessagesDelivered(lane_id, confirmed_messages));
// remember to reward relayers that have delivered messages
// this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain
for (nonce_low, nonce_high, relayer) in lane_data.relayers {
let nonce_begin = sp_std::cmp::max(nonce_low, received_range.0);
let nonce_end = sp_std::cmp::min(nonce_high, received_range.1);
for entry in lane_data.relayers {
let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start());
let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end());
// loop won't proceed if current entry is ahead of received range (begin > end).
// this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain
let mut relayer_reward = relayers_rewards.entry(relayer).or_default();
let mut relayer_reward = relayers_rewards.entry(entry.relayer).or_default();
for nonce in nonce_begin..nonce_end + 1 {
let message_data = OutboundMessages::<T, I>::get(MessageKey {
lane_id,
@@ -603,9 +690,9 @@ decl_module! {
}
impl<T: Config<I>, I: Instance> Pallet<T, I> {
/// Get payload of given outbound message.
pub fn outbound_message_payload(lane: LaneId, nonce: MessageNonce) -> Option<MessagePayload> {
OutboundMessages::<T, I>::get(MessageKey { lane_id: lane, nonce }).map(|message_data| message_data.payload)
/// Get stored data of the outbound message with given nonce.
pub fn outbound_message_data(lane: LaneId, nonce: MessageNonce) -> Option<MessageData<T::OutboundMessageFee>> {
OutboundMessages::<T, I>::get(MessageKey { lane_id: lane, nonce })
}
/// Get nonce of latest generated message at given outbound lane.
@@ -633,7 +720,10 @@ impl<T: Config<I>, I: Instance> Pallet<T, I> {
let relayers = InboundLanes::<T, I>::get(&lane).relayers;
bp_messages::UnrewardedRelayersState {
unrewarded_relayer_entries: relayers.len() as _,
messages_in_oldest_entry: relayers.front().map(|(begin, end, _)| 1 + end - begin).unwrap_or(0),
messages_in_oldest_entry: relayers
.front()
.map(|entry| 1 + entry.messages.end - entry.messages.begin)
.unwrap_or(0),
total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX),
}
}
@@ -665,24 +755,38 @@ impl<T: Config<I>, I: Instance> Pallet<T, I> {
/// trying to avoid here) - by using strings like "Instance2", "OutboundMessages", etc.
pub mod storage_keys {
use super::*;
use frame_support::storage::generator::StorageMap;
use frame_support::{traits::Instance, StorageHasher};
use sp_core::storage::StorageKey;
/// Storage key of the outbound message in the runtime storage.
pub fn message_key<T: Config<I>, I: Instance>(lane: &LaneId, nonce: MessageNonce) -> StorageKey {
let message_key = MessageKey { lane_id: *lane, nonce };
let raw_storage_key = OutboundMessages::<T, I>::storage_map_final_key(message_key);
StorageKey(raw_storage_key)
pub fn message_key<I: Instance>(lane: &LaneId, nonce: MessageNonce) -> StorageKey {
storage_map_final_key::<I>("OutboundMessages", &MessageKey { lane_id: *lane, nonce }.encode())
}
/// Storage key of the outbound message lane state in the runtime storage.
pub fn outbound_lane_data_key<I: Instance>(lane: &LaneId) -> StorageKey {
StorageKey(OutboundLanes::<I>::storage_map_final_key(*lane))
storage_map_final_key::<I>("OutboundLanes", lane)
}
/// Storage key of the inbound message lane state in the runtime storage.
pub fn inbound_lane_data_key<T: Config<I>, I: Instance>(lane: &LaneId) -> StorageKey {
StorageKey(InboundLanes::<T, I>::storage_map_final_key(*lane))
pub fn inbound_lane_data_key<I: Instance>(lane: &LaneId) -> StorageKey {
storage_map_final_key::<I>("InboundLanes", lane)
}
/// This is a copypaste of the `frame_support::storage::generator::StorageMap::storage_map_final_key`.
fn storage_map_final_key<I: Instance>(map_name: &str, key: &[u8]) -> StorageKey {
let module_prefix_hashed = frame_support::Twox128::hash(I::PREFIX.as_bytes());
let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes());
let key_hashed = frame_support::Blake2_128Concat::hash(key);
let mut final_key =
Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len());
final_key.extend_from_slice(&module_prefix_hashed[..]);
final_key.extend_from_slice(&storage_prefix_hashed[..]);
final_key.extend_from_slice(key_hashed.as_ref());
StorageKey(final_key)
}
}
@@ -695,9 +799,18 @@ fn ensure_owner_or_root<T: Config<I>, I: Instance>(origin: T::Origin) -> Result<
}
}
/// Ensure that the pallet is in operational mode (not halted).
fn ensure_operational<T: Config<I>, I: Instance>() -> Result<(), Error<T, I>> {
if IsHalted::<I>::get() {
/// Ensure that the pallet is in normal operational mode.
fn ensure_normal_operating_mode<T: Config<I>, I: Instance>() -> Result<(), Error<T, I>> {
if PalletOperatingMode::<I>::get() != OperatingMode::Normal {
Err(Error::<T, I>::Halted)
} else {
Ok(())
}
}
/// Ensure that the pallet is not halted.
fn ensure_not_halted<T: Config<I>, I: Instance>() -> Result<(), Error<T, I>> {
if PalletOperatingMode::<I>::get() == OperatingMode::Halted {
Err(Error::<T, I>::Halted)
} else {
Ok(())
@@ -847,12 +960,12 @@ fn verify_and_decode_messages_proof<Chain: SourceHeaderChain<Fee>, Fee, Dispatch
mod tests {
use super::*;
use crate::mock::{
message, run_test, Event as TestEvent, Origin, TestMessageDeliveryAndDispatchPayment,
TestMessagesDeliveryProof, TestMessagesParameter, TestMessagesProof, TestPayload, TestRuntime,
TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A,
TEST_RELAYER_B,
message, message_payload, run_test, unrewarded_relayer, Event as TestEvent, Origin,
TestMessageDeliveryAndDispatchPayment, TestMessagesDeliveryProof, TestMessagesParameter, TestMessagesProof,
TestRuntime, TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID,
TEST_RELAYER_A, TEST_RELAYER_B,
};
use bp_messages::UnrewardedRelayersState;
use bp_messages::{UnrewardedRelayer, UnrewardedRelayersState};
use frame_support::{assert_noop, assert_ok};
use frame_system::{EventRecord, Pallet as System, Phase};
use hex_literal::hex;
@@ -866,11 +979,15 @@ mod tests {
fn send_regular_message() {
get_ready_for_events();
let message_nonce = outbound_lane::<TestRuntime, DefaultInstance>(TEST_LANE_ID)
.data()
.latest_generated_nonce
+ 1;
assert_ok!(Pallet::<TestRuntime>::send_message(
Origin::signed(1),
TEST_LANE_ID,
REGULAR_PAYLOAD,
REGULAR_PAYLOAD.1,
REGULAR_PAYLOAD.declared_weight,
));
// check event with assigned nonce
@@ -878,13 +995,16 @@ mod tests {
System::<TestRuntime>::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Messages(RawEvent::MessageAccepted(TEST_LANE_ID, 1)),
event: TestEvent::Messages(RawEvent::MessageAccepted(TEST_LANE_ID, message_nonce)),
topics: vec![],
}],
);
// check that fee has been withdrawn from submitter
assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(1, REGULAR_PAYLOAD.1));
assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(
1,
REGULAR_PAYLOAD.declared_weight
));
}
fn receive_messages_delivery_proof() {
@@ -897,17 +1017,29 @@ mod tests {
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 1,
..Default::default()
relayers: vec![UnrewardedRelayer {
relayer: 0,
messages: DeliveredMessages::new(1, true),
}]
.into_iter()
.collect(),
},
))),
Default::default(),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
total_messages: 1,
..Default::default()
},
));
assert_eq!(
System::<TestRuntime>::events(),
vec![EventRecord {
phase: Phase::Initialization,
event: TestEvent::Messages(RawEvent::MessagesDelivered(TEST_LANE_ID, 1, 1)),
event: TestEvent::Messages(RawEvent::MessagesDelivered(
TEST_LANE_ID,
DeliveredMessages::new(1, true),
)),
topics: vec![],
}],
);
@@ -920,29 +1052,41 @@ mod tests {
assert_ok!(Pallet::<TestRuntime>::set_owner(Origin::root(), Some(1)));
assert_noop!(
Pallet::<TestRuntime>::set_operational(Origin::signed(2), false),
Pallet::<TestRuntime>::set_operating_mode(Origin::signed(2), OperatingMode::Halted),
DispatchError::BadOrigin,
);
assert_ok!(Pallet::<TestRuntime>::set_operational(Origin::root(), false));
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
Origin::root(),
OperatingMode::Halted
));
assert_ok!(Pallet::<TestRuntime>::set_owner(Origin::signed(1), None));
assert_noop!(
Pallet::<TestRuntime>::set_operational(Origin::signed(1), true),
Pallet::<TestRuntime>::set_operating_mode(Origin::signed(1), OperatingMode::Normal),
DispatchError::BadOrigin,
);
assert_noop!(
Pallet::<TestRuntime>::set_operational(Origin::signed(2), true),
Pallet::<TestRuntime>::set_operating_mode(Origin::signed(2), OperatingMode::Normal),
DispatchError::BadOrigin,
);
assert_ok!(Pallet::<TestRuntime>::set_operational(Origin::root(), true));
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
Origin::root(),
OperatingMode::Normal
));
});
}
#[test]
fn pallet_may_be_halted_by_root() {
run_test(|| {
assert_ok!(Pallet::<TestRuntime>::set_operational(Origin::root(), false));
assert_ok!(Pallet::<TestRuntime>::set_operational(Origin::root(), true));
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
Origin::root(),
OperatingMode::Halted
));
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
Origin::root(),
OperatingMode::Normal
));
});
}
@@ -951,21 +1095,30 @@ mod tests {
run_test(|| {
PalletOwner::<TestRuntime>::put(2);
assert_ok!(Pallet::<TestRuntime>::set_operational(Origin::signed(2), false));
assert_ok!(Pallet::<TestRuntime>::set_operational(Origin::signed(2), true));
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
Origin::signed(2),
OperatingMode::Halted
));
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
Origin::signed(2),
OperatingMode::Normal
));
assert_noop!(
Pallet::<TestRuntime>::set_operational(Origin::signed(1), false),
Pallet::<TestRuntime>::set_operating_mode(Origin::signed(1), OperatingMode::Halted),
DispatchError::BadOrigin,
);
assert_noop!(
Pallet::<TestRuntime>::set_operational(Origin::signed(1), true),
Pallet::<TestRuntime>::set_operating_mode(Origin::signed(1), OperatingMode::Normal),
DispatchError::BadOrigin,
);
assert_ok!(Pallet::<TestRuntime>::set_operational(Origin::signed(2), false));
assert_ok!(Pallet::<TestRuntime>::set_operating_mode(
Origin::signed(2),
OperatingMode::Halted
));
assert_noop!(
Pallet::<TestRuntime>::set_operational(Origin::signed(1), true),
Pallet::<TestRuntime>::set_operating_mode(Origin::signed(1), OperatingMode::Normal),
DispatchError::BadOrigin,
);
});
@@ -1072,25 +1225,30 @@ mod tests {
// send message first to be able to check that delivery_proof fails later
send_regular_message();
IsHalted::<DefaultInstance>::put(true);
PalletOperatingMode::<DefaultInstance>::put(OperatingMode::Halted);
assert_noop!(
Pallet::<TestRuntime>::send_message(
Origin::signed(1),
TEST_LANE_ID,
REGULAR_PAYLOAD,
REGULAR_PAYLOAD.1,
REGULAR_PAYLOAD.declared_weight,
),
Error::<TestRuntime, DefaultInstance>::Halted,
);
assert_noop!(
Pallet::<TestRuntime>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1,),
Error::<TestRuntime, DefaultInstance>::Halted,
);
assert_noop!(
Pallet::<TestRuntime>::receive_messages_proof(
Origin::signed(1),
TEST_RELAYER_A,
Ok(vec![message(2, REGULAR_PAYLOAD)]).into(),
1,
REGULAR_PAYLOAD.1,
REGULAR_PAYLOAD.declared_weight,
),
Error::<TestRuntime, DefaultInstance>::Halted,
);
@@ -1112,6 +1270,53 @@ mod tests {
});
}
#[test]
fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() {
run_test(|| {
// send message first to be able to check that delivery_proof fails later
send_regular_message();
PalletOperatingMode::<DefaultInstance>::put(OperatingMode::RejectingOutboundMessages);
assert_noop!(
Pallet::<TestRuntime>::send_message(
Origin::signed(1),
TEST_LANE_ID,
REGULAR_PAYLOAD,
REGULAR_PAYLOAD.declared_weight,
),
Error::<TestRuntime, DefaultInstance>::Halted,
);
assert_ok!(Pallet::<TestRuntime>::increase_message_fee(
Origin::signed(1),
TEST_LANE_ID,
1,
1,
));
assert_ok!(Pallet::<TestRuntime>::receive_messages_proof(
Origin::signed(1),
TEST_RELAYER_A,
Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
1,
REGULAR_PAYLOAD.declared_weight,
),);
assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
Origin::signed(1),
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 1,
..Default::default()
},
))),
Default::default(),
));
});
}
#[test]
fn send_message_works() {
run_test(|| {
@@ -1128,7 +1333,7 @@ mod tests {
Origin::signed(1),
TEST_LANE_ID,
PAYLOAD_REJECTED_BY_TARGET_CHAIN,
PAYLOAD_REJECTED_BY_TARGET_CHAIN.1
PAYLOAD_REJECTED_BY_TARGET_CHAIN.declared_weight
),
Error::<TestRuntime, DefaultInstance>::MessageRejectedByChainVerifier,
);
@@ -1155,7 +1360,7 @@ mod tests {
Origin::signed(1),
TEST_LANE_ID,
REGULAR_PAYLOAD,
REGULAR_PAYLOAD.1
REGULAR_PAYLOAD.declared_weight
),
Error::<TestRuntime, DefaultInstance>::FailedToWithdrawMessageFee,
);
@@ -1170,7 +1375,7 @@ mod tests {
TEST_RELAYER_A,
Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
1,
REGULAR_PAYLOAD.1,
REGULAR_PAYLOAD.declared_weight,
));
assert_eq!(InboundLanes::<TestRuntime>::get(TEST_LANE_ID).last_delivered_nonce(), 1);
@@ -1185,9 +1390,12 @@ mod tests {
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 8,
relayers: vec![(9, 9, TEST_RELAYER_A), (10, 10, TEST_RELAYER_B)]
.into_iter()
.collect(),
relayers: vec![
unrewarded_relayer(9, 9, TEST_RELAYER_A),
unrewarded_relayer(10, 10, TEST_RELAYER_B),
]
.into_iter()
.collect(),
},
);
assert_eq!(
@@ -1211,16 +1419,19 @@ mod tests {
TEST_RELAYER_A,
message_proof,
1,
REGULAR_PAYLOAD.1,
REGULAR_PAYLOAD.declared_weight,
));
assert_eq!(
InboundLanes::<TestRuntime>::get(TEST_LANE_ID),
InboundLaneData {
last_confirmed_nonce: 9,
relayers: vec![(10, 10, TEST_RELAYER_B), (11, 11, TEST_RELAYER_A)]
.into_iter()
.collect(),
relayers: vec![
unrewarded_relayer(10, 10, TEST_RELAYER_B),
unrewarded_relayer(11, 11, TEST_RELAYER_A)
]
.into_iter()
.collect(),
},
);
assert_eq!(
@@ -1243,7 +1454,7 @@ mod tests {
TEST_RELAYER_A,
Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
1,
REGULAR_PAYLOAD.1 - 1,
REGULAR_PAYLOAD.declared_weight - 1,
),
Error::<TestRuntime, DefaultInstance>::InvalidMessagesDispatchWeight,
);
@@ -1317,7 +1528,7 @@ mod tests {
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
relayers: vec![(1, 1, TEST_RELAYER_A)].into_iter().collect(),
relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(),
..Default::default()
}
))),
@@ -1342,9 +1553,12 @@ mod tests {
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)]
.into_iter()
.collect(),
relayers: vec![
unrewarded_relayer(1, 1, TEST_RELAYER_A),
unrewarded_relayer(2, 2, TEST_RELAYER_B)
]
.into_iter()
.collect(),
..Default::default()
}
))),
@@ -1389,9 +1603,12 @@ mod tests {
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)]
.into_iter()
.collect(),
relayers: vec![
unrewarded_relayer(1, 1, TEST_RELAYER_A),
unrewarded_relayer(2, 2, TEST_RELAYER_B)
]
.into_iter()
.collect(),
..Default::default()
}
))),
@@ -1411,9 +1628,12 @@ mod tests {
TestMessagesDeliveryProof(Ok((
TEST_LANE_ID,
InboundLaneData {
relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)]
.into_iter()
.collect(),
relayers: vec![
unrewarded_relayer(1, 1, TEST_RELAYER_A),
unrewarded_relayer(2, 2, TEST_RELAYER_B)
]
.into_iter()
.collect(),
..Default::default()
}
))),
@@ -1465,7 +1685,7 @@ mod tests {
])
.into(),
3,
REGULAR_PAYLOAD.1 + REGULAR_PAYLOAD.1,
REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight,
),);
assert_eq!(
@@ -1479,7 +1699,7 @@ mod tests {
fn storage_message_key_computed_properly() {
// If this test fails, then something has been changed in module storage that is breaking all
// previously crafted messages proofs.
let storage_key = storage_keys::message_key::<TestRuntime, DefaultInstance>(&*b"test", 42).0;
let storage_key = storage_keys::message_key::<DefaultInstance>(&*b"test", 42).0;
assert_eq!(
storage_key,
hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(),
@@ -1505,7 +1725,7 @@ mod tests {
fn inbound_lane_data_key_computed_properly() {
// If this test fails, then something has been changed in module storage that is breaking all
// previously crafted inbound lane state proofs.
let storage_key = storage_keys::inbound_lane_data_key::<TestRuntime, DefaultInstance>(&*b"test").0;
let storage_key = storage_keys::inbound_lane_data_key::<DefaultInstance>(&*b"test").0;
assert_eq!(
storage_key,
hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(),
@@ -1517,9 +1737,9 @@ mod tests {
#[test]
fn actual_dispatch_weight_does_not_overlow() {
run_test(|| {
let message1 = message(1, TestPayload(0, Weight::MAX / 2));
let message2 = message(2, TestPayload(0, Weight::MAX / 2));
let message3 = message(2, TestPayload(0, Weight::MAX / 2));
let message1 = message(1, message_payload(0, Weight::MAX / 2));
let message2 = message(2, message_payload(0, Weight::MAX / 2));
let message3 = message(2, message_payload(0, Weight::MAX / 2));
assert_noop!(
Pallet::<TestRuntime, DefaultInstance>::receive_messages_proof(
@@ -1586,4 +1806,127 @@ mod tests {
assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(1, 100));
});
}
#[test]
fn weight_refund_from_receive_messages_proof_works() {
run_test(|| {
fn submit_with_unspent_weight(
nonce: MessageNonce,
unspent_weight: Weight,
is_prepaid: bool,
) -> (Weight, Weight) {
let mut payload = REGULAR_PAYLOAD;
payload.dispatch_result.unspent_weight = unspent_weight;
payload.dispatch_result.dispatch_fee_paid_during_dispatch = !is_prepaid;
let proof = Ok(vec![message(nonce, payload)]).into();
let messages_count = 1;
let pre_dispatch_weight = <TestRuntime as Config>::WeightInfo::receive_messages_proof_weight(
&proof,
messages_count,
REGULAR_PAYLOAD.declared_weight,
);
let post_dispatch_weight = Pallet::<TestRuntime>::receive_messages_proof(
Origin::signed(1),
TEST_RELAYER_A,
proof,
messages_count,
REGULAR_PAYLOAD.declared_weight,
)
.expect("delivery has failed")
.actual_weight
.expect("receive_messages_proof always returns Some");
(pre_dispatch_weight, post_dispatch_weight)
}
// when dispatch is returning `unspent_weight < declared_weight`
let (pre, post) = submit_with_unspent_weight(1, 1, false);
assert_eq!(post, pre - 1);
// when dispatch is returning `unspent_weight = declared_weight`
let (pre, post) = submit_with_unspent_weight(2, REGULAR_PAYLOAD.declared_weight, false);
assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight);
// when dispatch is returning `unspent_weight > declared_weight`
let (pre, post) = submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight + 1, false);
assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight);
// when there's no unspent weight
let (pre, post) = submit_with_unspent_weight(4, 0, false);
assert_eq!(post, pre);
// when dispatch is returning `unspent_weight < declared_weight` AND message is prepaid
let (pre, post) = submit_with_unspent_weight(5, 1, true);
assert_eq!(
post,
pre - 1 - <TestRuntime as Config>::WeightInfo::pay_inbound_dispatch_fee_overhead()
);
});
}
#[test]
fn messages_delivered_callbacks_are_called() {
run_test(|| {
send_regular_message();
send_regular_message();
send_regular_message();
// messages 1+2 are confirmed in 1 tx, message 3 in a separate tx
// dispatch of message 2 has failed
let mut delivered_messages_1_and_2 = DeliveredMessages::new(1, true);
delivered_messages_1_and_2.note_dispatched_message(false);
let messages_1_and_2_proof = Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 0,
relayers: vec![UnrewardedRelayer {
relayer: 0,
messages: delivered_messages_1_and_2.clone(),
}]
.into_iter()
.collect(),
},
));
let delivered_message_3 = DeliveredMessages::new(3, true);
let messages_3_proof = Ok((
TEST_LANE_ID,
InboundLaneData {
last_confirmed_nonce: 0,
relayers: vec![UnrewardedRelayer {
relayer: 0,
messages: delivered_message_3.clone(),
}]
.into_iter()
.collect(),
},
));
// first tx with messages 1+2
assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
Origin::signed(1),
TestMessagesDeliveryProof(messages_1_and_2_proof),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
total_messages: 2,
..Default::default()
},
));
// second tx with message 3
assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
Origin::signed(1),
TestMessagesDeliveryProof(messages_3_proof),
UnrewardedRelayersState {
unrewarded_relayer_entries: 1,
total_messages: 1,
..Default::default()
},
));
// ensure that both callbacks have been called twice: for 1+2, then for 3
crate::mock::TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2);
crate::mock::TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_message_3);
crate::mock::TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2);
crate::mock::TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_message_3);
});
}
}
+109 -10
View File
@@ -19,15 +19,17 @@
use crate::Config;
use bitvec::prelude::*;
use bp_messages::{
source_chain::{
LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, Sender, TargetHeaderChain,
LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, RelayersRewards, Sender,
TargetHeaderChain,
},
target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain},
InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData,
Parameter as MessagesParameter,
DeliveredMessages, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData,
Parameter as MessagesParameter, UnrewardedRelayer,
};
use bp_runtime::Size;
use bp_runtime::{messages::MessageDispatchResult, Size};
use codec::{Decode, Encode};
use frame_support::{parameter_types, weights::Weight};
use sp_core::H256;
@@ -41,7 +43,17 @@ use std::collections::BTreeMap;
pub type AccountId = u64;
pub type Balance = u64;
#[derive(Decode, Encode, Clone, Debug, PartialEq, Eq)]
pub struct TestPayload(pub u64, pub Weight);
pub struct TestPayload {
/// Field that may be used to identify messages.
pub id: u64,
/// Dispatch weight that is declared by the message sender.
pub declared_weight: Weight,
/// Message dispatch result.
///
/// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`, but for test
/// purposes we'll be making it larger than `declared_weight` sometimes.
pub dispatch_result: MessageDispatchResult,
}
pub type TestMessageFee = u64;
pub type TestRelayer = u64;
@@ -115,6 +127,8 @@ impl pallet_balances::Config for TestRuntime {
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = frame_system::Pallet<TestRuntime>;
type WeightInfo = ();
type MaxReserves = ();
type ReserveIdentifier = ();
}
parameter_types! {
@@ -157,6 +171,7 @@ impl Config for TestRuntime {
type TargetHeaderChain = TestTargetHeaderChain;
type LaneMessageVerifier = TestLaneMessageVerifier;
type MessageDeliveryAndDispatchPayment = TestMessageDeliveryAndDispatchPayment;
type OnDeliveryConfirmed = (TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2);
type SourceHeaderChain = TestSourceHeaderChain;
type MessageDispatch = TestMessageDispatch;
@@ -187,10 +202,10 @@ pub const TEST_ERROR: &str = "Test error";
pub const TEST_LANE_ID: LaneId = [0, 0, 0, 1];
/// Regular message payload.
pub const REGULAR_PAYLOAD: TestPayload = TestPayload(0, 50);
pub const REGULAR_PAYLOAD: TestPayload = message_payload(0, 50);
/// Payload that is rejected by `TestTargetHeaderChain`.
pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = TestPayload(1, 50);
pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = message_payload(1, 50);
/// Vec of proved messages, grouped by lane.
pub type MessagesByLaneVec = Vec<(LaneId, ProvedLaneMessages<Message<TestMessageFee>>)>;
@@ -333,6 +348,44 @@ impl MessageDeliveryAndDispatchPayment<AccountId, TestMessageFee> for TestMessag
}
}
/// First on-messages-delivered callback.
#[derive(Debug)]
pub struct TestOnDeliveryConfirmed1;
impl TestOnDeliveryConfirmed1 {
/// Verify that the callback has been called with given delivered messages.
pub fn ensure_called(lane: &LaneId, messages: &DeliveredMessages) {
let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode();
assert_eq!(frame_support::storage::unhashed::get(&key), Some(true));
}
}
impl OnDeliveryConfirmed for TestOnDeliveryConfirmed1 {
fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) {
let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode();
frame_support::storage::unhashed::put(&key, &true);
}
}
/// Seconde on-messages-delivered callback.
#[derive(Debug)]
pub struct TestOnDeliveryConfirmed2;
impl TestOnDeliveryConfirmed2 {
/// Verify that the callback has been called with given delivered messages.
pub fn ensure_called(lane: &LaneId, messages: &DeliveredMessages) {
let key = (b"TestOnDeliveryConfirmed2", lane, messages).encode();
assert_eq!(frame_support::storage::unhashed::get(&key), Some(true));
}
}
impl OnDeliveryConfirmed for TestOnDeliveryConfirmed2 {
fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) {
let key = (b"TestOnDeliveryConfirmed2", lane, messages).encode();
frame_support::storage::unhashed::put(&key, &true);
}
}
/// Source header chain that is used in tests.
#[derive(Debug)]
pub struct TestSourceHeaderChain;
@@ -357,17 +410,25 @@ impl SourceHeaderChain<TestMessageFee> for TestSourceHeaderChain {
#[derive(Debug)]
pub struct TestMessageDispatch;
impl MessageDispatch<TestMessageFee> for TestMessageDispatch {
impl MessageDispatch<AccountId, TestMessageFee> for TestMessageDispatch {
type DispatchPayload = TestPayload;
fn dispatch_weight(message: &DispatchMessage<TestPayload, TestMessageFee>) -> Weight {
match message.data.payload.as_ref() {
Ok(payload) => payload.1,
Ok(payload) => payload.declared_weight,
Err(_) => 0,
}
}
fn dispatch(_message: DispatchMessage<TestPayload, TestMessageFee>) {}
fn dispatch(
_relayer_account: &AccountId,
message: DispatchMessage<TestPayload, TestMessageFee>,
) -> MessageDispatchResult {
match message.data.payload.as_ref() {
Ok(payload) => payload.dispatch_result.clone(),
Err(_) => dispatch_result(0),
}
}
}
/// Return test lane message with given nonce and payload.
@@ -381,6 +442,15 @@ pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message<TestMessage
}
}
/// Constructs message payload using given arguments and zero unspent weight.
pub const fn message_payload(id: u64, declared_weight: Weight) -> TestPayload {
TestPayload {
id,
declared_weight,
dispatch_result: dispatch_result(0),
}
}
/// Return message data with valid fee for given payload.
pub fn message_data(payload: TestPayload) -> MessageData<TestMessageFee> {
MessageData {
@@ -389,6 +459,35 @@ pub fn message_data(payload: TestPayload) -> MessageData<TestMessageFee> {
}
}
/// Returns message dispatch result with given unspent weight.
pub const fn dispatch_result(unspent_weight: Weight) -> MessageDispatchResult {
MessageDispatchResult {
dispatch_result: true,
unspent_weight,
dispatch_fee_paid_during_dispatch: true,
}
}
/// Constructs unrewarded relayer entry from nonces range and relayer id.
pub fn unrewarded_relayer(
begin: MessageNonce,
end: MessageNonce,
relayer: TestRelayer,
) -> UnrewardedRelayer<TestRelayer> {
UnrewardedRelayer {
relayer,
messages: DeliveredMessages {
begin,
end,
dispatch_results: if end >= begin {
bitvec![Msb0, u8; 1; (end - begin + 1) as _]
} else {
Default::default()
},
},
}
}
/// Run pallet test.
pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
let mut t = frame_system::GenesisConfig::default()
@@ -16,7 +16,12 @@
//! Everything about outgoing messages sending.
use bp_messages::{LaneId, MessageData, MessageNonce, OutboundLaneData};
use bitvec::prelude::*;
use bp_messages::{
DeliveredMessages, DispatchResultsBitVec, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer,
};
use frame_support::RuntimeDebug;
use sp_std::collections::vec_deque::VecDeque;
/// Outbound lane storage.
pub trait OutboundLaneStorage {
@@ -38,6 +43,28 @@ pub trait OutboundLaneStorage {
fn remove_message(&mut self, nonce: &MessageNonce);
}
/// Result of messages receival confirmation.
#[derive(RuntimeDebug, PartialEq, Eq)]
pub enum ReceivalConfirmationResult {
/// New messages have been confirmed by the confirmation transaction.
ConfirmedMessages(DeliveredMessages),
/// Confirmation transaction brings no new confirmation. This may be a result of relayer
/// error or several relayers runnng.
NoNewConfirmations,
/// Bridged chain is trying to confirm more messages than we have generated. May be a result
/// of invalid bridged chain storage.
FailedToConfirmFutureMessages,
/// The unrewarded relayers vec contains an empty entry. May be a result of invalid bridged
/// chain storage.
EmptyUnrewardedRelayerEntry,
/// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid bridged
/// chain storage.
NonConsecutiveUnrewardedRelayerEntries,
/// The unrewarded relayers vec contains entry with mismatched number of dispatch results. May be
/// a result of invalid bridged chain storage.
InvalidNumberOfDispatchResults,
}
/// Outbound messages lane.
pub struct OutboundLane<S> {
storage: S,
@@ -69,20 +96,34 @@ impl<S: OutboundLaneStorage> OutboundLane<S> {
}
/// Confirm messages delivery.
///
/// Returns `None` if confirmation is wrong/duplicate.
/// Returns `Some` with inclusive ranges of message nonces that have been received.
pub fn confirm_delivery(&mut self, latest_received_nonce: MessageNonce) -> Option<(MessageNonce, MessageNonce)> {
pub fn confirm_delivery<RelayerId>(
&mut self,
latest_received_nonce: MessageNonce,
relayers: &VecDeque<UnrewardedRelayer<RelayerId>>,
) -> ReceivalConfirmationResult {
let mut data = self.storage.data();
if latest_received_nonce <= data.latest_received_nonce || latest_received_nonce > data.latest_generated_nonce {
return None;
if latest_received_nonce <= data.latest_received_nonce {
return ReceivalConfirmationResult::NoNewConfirmations;
}
if latest_received_nonce > data.latest_generated_nonce {
return ReceivalConfirmationResult::FailedToConfirmFutureMessages;
}
let dispatch_results =
match extract_dispatch_results(data.latest_received_nonce, latest_received_nonce, relayers) {
Ok(dispatch_results) => dispatch_results,
Err(extract_error) => return extract_error,
};
let prev_latest_received_nonce = data.latest_received_nonce;
data.latest_received_nonce = latest_received_nonce;
self.storage.set_data(data);
Some((prev_latest_received_nonce + 1, latest_received_nonce))
ReceivalConfirmationResult::ConfirmedMessages(DeliveredMessages {
begin: prev_latest_received_nonce + 1,
end: latest_received_nonce,
dispatch_results,
})
}
/// Prune at most `max_messages_to_prune` already received messages.
@@ -108,13 +149,108 @@ impl<S: OutboundLaneStorage> OutboundLane<S> {
}
}
/// Extract new dispatch results from the unrewarded relayers vec.
///
/// Returns `Err(_)` if unrewarded relayers vec contains invalid data, meaning that the bridged
/// chain has invalid runtime storage.
fn extract_dispatch_results<RelayerId>(
prev_latest_received_nonce: MessageNonce,
latest_received_nonce: MessageNonce,
relayers: &VecDeque<UnrewardedRelayer<RelayerId>>,
) -> Result<DispatchResultsBitVec, ReceivalConfirmationResult> {
// the only caller of this functions checks that the prev_latest_received_nonce..=latest_received_nonce
// is valid, so we're ready to accept messages in this range
// => with_capacity call must succeed here or we'll be unable to receive confirmations at all
let mut received_dispatch_result =
BitVec::with_capacity((latest_received_nonce - prev_latest_received_nonce + 1) as _);
let mut last_entry_end: Option<MessageNonce> = None;
for entry in relayers {
// unrewarded relayer entry must have at least 1 unconfirmed message
// (guaranteed by the `InboundLane::receive_message()`)
if entry.messages.end < entry.messages.begin {
return Err(ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry);
}
// every entry must confirm range of messages that follows previous entry range
// (guaranteed by the `InboundLane::receive_message()`)
if let Some(last_entry_end) = last_entry_end {
let expected_entry_begin = last_entry_end.checked_add(1);
if expected_entry_begin != Some(entry.messages.begin) {
return Err(ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries);
}
}
last_entry_end = Some(entry.messages.end);
// entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()`
// (guaranteed by the `InboundLane::receive_message()`)
if entry.messages.end > latest_received_nonce {
// technically this will be detected in the next loop iteration as `InvalidNumberOfDispatchResults`
// but to guarantee safety of loop operations below this is detected now
return Err(ReceivalConfirmationResult::FailedToConfirmFutureMessages);
}
// entry must have single dispatch result for every message
// (guaranteed by the `InboundLane::receive_message()`)
if entry.messages.dispatch_results.len() as MessageNonce != entry.messages.end - entry.messages.begin + 1 {
return Err(ReceivalConfirmationResult::InvalidNumberOfDispatchResults);
}
// now we know that the entry is valid
// => let's check if it brings new confirmations
let new_messages_begin = sp_std::cmp::max(entry.messages.begin, prev_latest_received_nonce + 1);
let new_messages_end = sp_std::cmp::min(entry.messages.end, latest_received_nonce);
let new_messages_range = new_messages_begin..=new_messages_end;
if new_messages_range.is_empty() {
continue;
}
// now we know that entry brings new confirmations
// => let's extract dispatch results
received_dispatch_result.extend_from_bitslice(
&entry.messages.dispatch_results[(new_messages_begin - entry.messages.begin) as usize..],
);
}
Ok(received_dispatch_result)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
mock::{message_data, run_test, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID},
mock::{message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID},
outbound_lane,
};
use sp_std::ops::RangeInclusive;
fn unrewarded_relayers(nonces: RangeInclusive<MessageNonce>) -> VecDeque<UnrewardedRelayer<TestRelayer>> {
vec![unrewarded_relayer(*nonces.start(), *nonces.end(), 0)]
.into_iter()
.collect()
}
fn delivered_messages(nonces: RangeInclusive<MessageNonce>) -> DeliveredMessages {
DeliveredMessages {
begin: *nonces.start(),
end: *nonces.end(),
dispatch_results: bitvec![Msb0, u8; 1; (nonces.end() - nonces.start() + 1) as _],
}
}
fn assert_3_messages_confirmation_fails(
latest_received_nonce: MessageNonce,
relayers: &VecDeque<UnrewardedRelayer<TestRelayer>>,
) -> ReceivalConfirmationResult {
run_test(|| {
let mut lane = outbound_lane::<TestRuntime, _>(TEST_LANE_ID);
lane.send_message(message_data(REGULAR_PAYLOAD));
lane.send_message(message_data(REGULAR_PAYLOAD));
lane.send_message(message_data(REGULAR_PAYLOAD));
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
let result = lane.confirm_delivery(latest_received_nonce, relayers);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
result
})
}
#[test]
fn send_message_works() {
@@ -136,7 +272,10 @@ mod tests {
assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 3);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
assert_eq!(lane.confirm_delivery(3), Some((1, 3)));
assert_eq!(
lane.confirm_delivery(3, &unrewarded_relayers(1..=3)),
ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)),
);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
});
@@ -151,12 +290,21 @@ mod tests {
lane.send_message(message_data(REGULAR_PAYLOAD));
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
assert_eq!(lane.confirm_delivery(3), Some((1, 3)));
assert_eq!(lane.confirm_delivery(3), None);
assert_eq!(
lane.confirm_delivery(3, &unrewarded_relayers(1..=3)),
ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)),
);
assert_eq!(
lane.confirm_delivery(3, &unrewarded_relayers(1..=3)),
ReceivalConfirmationResult::NoNewConfirmations,
);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
assert_eq!(lane.confirm_delivery(2), None);
assert_eq!(
lane.confirm_delivery(2, &unrewarded_relayers(1..=1)),
ReceivalConfirmationResult::NoNewConfirmations,
);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 3);
});
@@ -164,17 +312,70 @@ mod tests {
#[test]
fn confirm_delivery_rejects_nonce_larger_than_last_generated() {
run_test(|| {
let mut lane = outbound_lane::<TestRuntime, _>(TEST_LANE_ID);
lane.send_message(message_data(REGULAR_PAYLOAD));
lane.send_message(message_data(REGULAR_PAYLOAD));
lane.send_message(message_data(REGULAR_PAYLOAD));
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
assert_eq!(lane.confirm_delivery(10), None);
assert_eq!(lane.storage.data().latest_generated_nonce, 3);
assert_eq!(lane.storage.data().latest_received_nonce, 0);
});
assert_eq!(
assert_3_messages_confirmation_fails(10, &unrewarded_relayers(1..=10),),
ReceivalConfirmationResult::FailedToConfirmFutureMessages,
);
}
#[test]
fn confirm_delivery_fails_if_entry_confirms_future_messages() {
assert_eq!(
assert_3_messages_confirmation_fails(
3,
&unrewarded_relayers(1..=1)
.into_iter()
.chain(unrewarded_relayers(2..=30).into_iter())
.chain(unrewarded_relayers(3..=3).into_iter())
.collect(),
),
ReceivalConfirmationResult::FailedToConfirmFutureMessages,
);
}
#[test]
#[allow(clippy::reversed_empty_ranges)]
fn confirm_delivery_fails_if_entry_is_empty() {
assert_eq!(
assert_3_messages_confirmation_fails(
3,
&unrewarded_relayers(1..=1)
.into_iter()
.chain(unrewarded_relayers(2..=1).into_iter())
.chain(unrewarded_relayers(2..=3).into_iter())
.collect(),
),
ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry,
);
}
#[test]
fn confirm_delivery_fails_if_entries_are_non_consecutive() {
assert_eq!(
assert_3_messages_confirmation_fails(
3,
&unrewarded_relayers(1..=1)
.into_iter()
.chain(unrewarded_relayers(3..=3).into_iter())
.chain(unrewarded_relayers(2..=2).into_iter())
.collect(),
),
ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries,
);
}
#[test]
fn confirm_delivery_fails_if_number_of_dispatch_results_in_entry_is_invalid() {
let mut relayers: VecDeque<_> = unrewarded_relayers(1..=1)
.into_iter()
.chain(unrewarded_relayers(2..=2).into_iter())
.chain(unrewarded_relayers(3..=3).into_iter())
.collect();
relayers[0].messages.dispatch_results.clear();
assert_eq!(
assert_3_messages_confirmation_fails(3, &relayers),
ReceivalConfirmationResult::InvalidNumberOfDispatchResults,
);
}
#[test]
@@ -191,11 +392,17 @@ mod tests {
assert_eq!(lane.prune_messages(100), 0);
assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
// after confirmation, some messages are received
assert_eq!(lane.confirm_delivery(2), Some((1, 2)));
assert_eq!(
lane.confirm_delivery(2, &unrewarded_relayers(1..=2)),
ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=2)),
);
assert_eq!(lane.prune_messages(100), 2);
assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3);
// after last message is confirmed, everything is pruned
assert_eq!(lane.confirm_delivery(3), Some((3, 3)));
assert_eq!(
lane.confirm_delivery(3, &unrewarded_relayers(3..=3)),
ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(3..=3)),
);
assert_eq!(lane.prune_messages(100), 1);
assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4);
});
@@ -17,7 +17,7 @@
//! Autogenerated weights for pallet_bridge_messages
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
//! DATE: 2021-04-21, STEPS: [50, ], REPEAT: 20
//! DATE: 2021-06-18, STEPS: [50, ], REPEAT: 20
//! LOW RANGE: [], HIGH RANGE: []
//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled
//! CHAIN: Some("dev"), DB CACHE: 128
@@ -57,6 +57,7 @@ pub trait WeightInfo {
fn receive_single_message_proof_with_outbound_lane_state() -> Weight;
fn receive_single_message_proof_1_kb() -> Weight;
fn receive_single_message_proof_16_kb() -> Weight;
fn receive_single_prepaid_message_proof() -> Weight;
fn receive_delivery_proof_for_single_message() -> Weight;
fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight;
fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight;
@@ -73,105 +74,110 @@ pub trait WeightInfo {
pub struct RialtoWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for RialtoWeight<T> {
fn send_minimal_message_worst_case() -> Weight {
(149_643_000 as Weight)
(159_305_000 as Weight)
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(12 as Weight))
}
fn send_1_kb_message_worst_case() -> Weight {
(153_329_000 as Weight)
(164_394_000 as Weight)
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(12 as Weight))
}
fn send_16_kb_message_worst_case() -> Weight {
(200_113_000 as Weight)
(223_521_000 as Weight)
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(12 as Weight))
}
fn increase_message_fee() -> Weight {
(6_407_252_000 as Weight)
.saturating_add(T::DbWeight::get().reads(4 as Weight))
(6_709_925_000 as Weight)
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_single_message_proof() -> Weight {
(141_256_000 as Weight)
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(206_769_000 as Weight)
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_two_messages_proof() -> Weight {
(247_723_000 as Weight)
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(343_982_000 as Weight)
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_single_message_proof_with_outbound_lane_state() -> Weight {
(159_731_000 as Weight)
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(223_738_000 as Weight)
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_single_message_proof_1_kb() -> Weight {
(168_546_000 as Weight)
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
(235_369_000 as Weight)
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_single_message_proof_16_kb() -> Weight {
(450_087_000 as Weight)
(510_338_000 as Weight)
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_single_prepaid_message_proof() -> Weight {
(141_536_000 as Weight)
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
}
fn receive_delivery_proof_for_single_message() -> Weight {
(164_519_000 as Weight)
(128_805_000 as Weight)
.saturating_add(T::DbWeight::get().reads(6 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight {
(173_300_000 as Weight)
(137_143_000 as Weight)
.saturating_add(T::DbWeight::get().reads(7 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight {
(246_205_000 as Weight)
(193_108_000 as Weight)
.saturating_add(T::DbWeight::get().reads(8 as Weight))
.saturating_add(T::DbWeight::get().writes(4 as Weight))
}
fn send_messages_of_various_lengths(i: u32) -> Weight {
(149_551_000 as Weight)
.saturating_add((3_000 as Weight).saturating_mul(i as Weight))
(133_632_000 as Weight)
.saturating_add((4_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(12 as Weight))
}
fn receive_multiple_messages_proof(i: u32) -> Weight {
(0 as Weight)
.saturating_add((114_817_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
.saturating_add((145_006_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight {
(437_797_000 as Weight)
(486_301_000 as Weight)
.saturating_add((10_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_message_proofs_with_large_leaf(i: u32) -> Weight {
(137_633_000 as Weight)
(178_139_000 as Weight)
.saturating_add((7_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight {
(0 as Weight)
.saturating_add((118_482_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().reads(3 as Weight))
.saturating_add(T::DbWeight::get().writes(1 as Weight))
.saturating_add((150_844_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight {
(116_036_000 as Weight)
.saturating_add((7_118_000 as Weight).saturating_mul(i as Weight))
(113_140_000 as Weight)
.saturating_add((7_656_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight)))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
}
fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight {
(172_780_000 as Weight)
.saturating_add((63_718_000 as Weight).saturating_mul(i as Weight))
(97_424_000 as Weight)
.saturating_add((63_128_000 as Weight).saturating_mul(i as Weight))
.saturating_add(T::DbWeight::get().reads(5 as Weight))
.saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(i as Weight)))
.saturating_add(T::DbWeight::get().writes(3 as Weight))
@@ -182,105 +188,110 @@ impl<T: frame_system::Config> WeightInfo for RialtoWeight<T> {
// For backwards compatibility and tests
impl WeightInfo for () {
fn send_minimal_message_worst_case() -> Weight {
(149_643_000 as Weight)
(159_305_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(12 as Weight))
}
fn send_1_kb_message_worst_case() -> Weight {
(153_329_000 as Weight)
(164_394_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(12 as Weight))
}
fn send_16_kb_message_worst_case() -> Weight {
(200_113_000 as Weight)
(223_521_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(12 as Weight))
}
fn increase_message_fee() -> Weight {
(6_407_252_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(4 as Weight))
(6_709_925_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_single_message_proof() -> Weight {
(141_256_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
(206_769_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_two_messages_proof() -> Weight {
(247_723_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
(343_982_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_single_message_proof_with_outbound_lane_state() -> Weight {
(159_731_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
(223_738_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_single_message_proof_1_kb() -> Weight {
(168_546_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
(235_369_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_single_message_proof_16_kb() -> Weight {
(450_087_000 as Weight)
(510_338_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_single_prepaid_message_proof() -> Weight {
(141_536_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
}
fn receive_delivery_proof_for_single_message() -> Weight {
(164_519_000 as Weight)
(128_805_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(6 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight {
(173_300_000 as Weight)
(137_143_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(7 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight {
(246_205_000 as Weight)
(193_108_000 as Weight)
.saturating_add(RocksDbWeight::get().reads(8 as Weight))
.saturating_add(RocksDbWeight::get().writes(4 as Weight))
}
fn send_messages_of_various_lengths(i: u32) -> Weight {
(149_551_000 as Weight)
.saturating_add((3_000 as Weight).saturating_mul(i as Weight))
(133_632_000 as Weight)
.saturating_add((4_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(12 as Weight))
}
fn receive_multiple_messages_proof(i: u32) -> Weight {
(0 as Weight)
.saturating_add((114_817_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
.saturating_add((145_006_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight {
(437_797_000 as Weight)
(486_301_000 as Weight)
.saturating_add((10_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_message_proofs_with_large_leaf(i: u32) -> Weight {
(137_633_000 as Weight)
(178_139_000 as Weight)
.saturating_add((7_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight {
(0 as Weight)
.saturating_add((118_482_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().reads(3 as Weight))
.saturating_add(RocksDbWeight::get().writes(1 as Weight))
.saturating_add((150_844_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight {
(116_036_000 as Weight)
.saturating_add((7_118_000 as Weight).saturating_mul(i as Weight))
(113_140_000 as Weight)
.saturating_add((7_656_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight)))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
}
fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight {
(172_780_000 as Weight)
.saturating_add((63_718_000 as Weight).saturating_mul(i as Weight))
(97_424_000 as Weight)
.saturating_add((63_128_000 as Weight).saturating_mul(i as Weight))
.saturating_add(RocksDbWeight::get().reads(5 as Weight))
.saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(i as Weight)))
.saturating_add(RocksDbWeight::get().writes(3 as Weight))
@@ -34,6 +34,7 @@ pub fn ensure_weights_are_correct<W: WeightInfoExt>(
expected_default_message_delivery_tx_weight: Weight,
expected_additional_byte_delivery_weight: Weight,
expected_messages_delivery_confirmation_tx_weight: Weight,
expected_pay_inbound_dispatch_fee_weight: Weight,
) {
// verify `send_message` weight components
assert_ne!(W::send_message_overhead(), 0);
@@ -88,6 +89,15 @@ pub fn ensure_weights_are_correct<W: WeightInfoExt>(
actual_messages_delivery_confirmation_tx_weight,
expected_messages_delivery_confirmation_tx_weight,
);
// verify pay-dispatch-fee overhead for inbound messages
let actual_pay_inbound_dispatch_fee_weight = W::pay_inbound_dispatch_fee_overhead();
assert!(
actual_pay_inbound_dispatch_fee_weight <= expected_pay_inbound_dispatch_fee_weight,
"Weight {} of pay-dispatch-fee overhead for inbound messages is larger than expected weight {}",
actual_pay_inbound_dispatch_fee_weight,
expected_pay_inbound_dispatch_fee_weight,
);
}
/// Ensure that we're able to receive maximal (by-size and by-weight) message from other chain.
@@ -304,6 +314,13 @@ pub trait WeightInfoExt: WeightInfo {
(Self::receive_single_message_proof_16_kb() - Self::receive_single_message_proof_1_kb()) / (15 * 1024);
proof_size_in_bytes * byte_weight
}
/// Returns weight of the pay-dispatch-fee operation for inbound messages.
///
/// This function may return zero if runtime doesn't support pay-dispatch-fee-at-target-chain option.
fn pay_inbound_dispatch_fee_overhead() -> Weight {
Self::receive_single_message_proof().saturating_sub(Self::receive_single_prepaid_message_proof())
}
}
impl WeightInfoExt for () {
@@ -20,7 +20,7 @@
// Runtime-generated DecodeLimit::decode_all_with_depth_limit
#![allow(clippy::unnecessary_mut_passed)]
use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight};
use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
use sp_std::prelude::*;
pub use bp_polkadot_core::*;
@@ -31,7 +31,7 @@ pub type Kusama = PolkadotLike;
// We use this to get the account on Kusama (target) which is derived from Polkadot's (source)
// account.
pub fn derive_account_from_polkadot_id(id: bp_runtime::SourceAccount<AccountId>) -> AccountId {
let encoded_id = bp_runtime::derive_account_id(bp_runtime::POLKADOT_BRIDGE_INSTANCE, id);
let encoded_id = bp_runtime::derive_account_id(bp_runtime::POLKADOT_CHAIN_ID, id);
AccountIdConverter::convert(encoded_id)
}
@@ -43,8 +43,8 @@ pub const IS_KNOWN_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_is_known_head
/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
pub const TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD: &str =
"ToKusamaOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
/// Name of the `ToKusamaOutboundLaneApi::messages_dispatch_weight` runtime method.
pub const TO_KUSAMA_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToKusamaOutboundLaneApi_messages_dispatch_weight";
/// Name of the `ToKusamaOutboundLaneApi::message_details` runtime method.
pub const TO_KUSAMA_MESSAGE_DETAILS_METHOD: &str = "ToKusamaOutboundLaneApi_message_details";
/// Name of the `ToKusamaOutboundLaneApi::latest_generated_nonce` runtime method.
pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_generated_nonce";
/// Name of the `ToKusamaOutboundLaneApi::latest_received_nonce` runtime method.
@@ -87,15 +87,16 @@ sp_api::decl_runtime_apis! {
lane_id: LaneId,
payload: OutboundPayload,
) -> Option<OutboundMessageFee>;
/// Returns total dispatch weight and encoded payload size of all messages in given inclusive range.
/// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all
/// messages in given inclusive range.
///
/// If some (or all) messages are missing from the storage, they'll also will
/// be missing from the resulting vector. The vector is ordered by the nonce.
fn messages_dispatch_weight(
fn message_details(
lane: LaneId,
begin: MessageNonce,
end: MessageNonce,
) -> Vec<(MessageNonce, Weight, u32)>;
) -> Vec<MessageDetails<OutboundMessageFee>>;
/// Returns nonce of the latest message, received by bridged chain.
fn latest_received_nonce(lane: LaneId) -> MessageNonce;
/// Returns nonce of the latest message, generated by given lane.
@@ -23,6 +23,7 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] }
frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
max-encoded-len = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, features = ["derive"] }
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
@@ -41,6 +42,7 @@ std = [
"hash256-std-hasher/std",
"impl-codec/std",
"impl-serde",
"max-encoded-len/std",
"parity-util-mem/std",
"serde",
"sp-api/std",
@@ -22,7 +22,7 @@
mod millau_hash;
use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState};
use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
use bp_runtime::Chain;
use frame_support::{
weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight},
@@ -80,7 +80,7 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 1024;
/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered.
/// The message must have dispatch weight set to zero. The result then must be rounded up to account
/// possible future runtime upgrades.
pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000;
pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000;
/// Increase of delivery transaction weight on Millau chain with every additional message byte.
///
@@ -95,6 +95,13 @@ pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000;
/// runtime upgrades.
pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000;
/// Weight of pay-dispatch-fee operation for inbound messages at Millau chain.
///
/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()`
/// call for your chain. Don't put too much reserve there, because it is used to **decrease**
/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper.
pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
/// The target length of a session (how often authorities change) on Millau measured in of number of
/// blocks.
///
@@ -201,7 +208,7 @@ impl sp_runtime::traits::Convert<sp_core::H256, AccountId> for AccountIdConverte
///
/// Note that this should only be used for testing.
pub fn derive_account_from_rialto_id(id: bp_runtime::SourceAccount<AccountId>) -> AccountId {
let encoded_id = bp_runtime::derive_account_id(bp_runtime::RIALTO_BRIDGE_INSTANCE, id);
let encoded_id = bp_runtime::derive_account_id(bp_runtime::RIALTO_CHAIN_ID, id);
AccountIdConverter::convert(encoded_id)
}
@@ -244,8 +251,8 @@ pub const BEST_FINALIZED_MILLAU_HEADER_METHOD: &str = "MillauFinalityApi_best_fi
/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
pub const TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD: &str =
"ToMillauOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
/// Name of the `ToMillauOutboundLaneApi::messages_dispatch_weight` runtime method.
pub const TO_MILLAU_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToMillauOutboundLaneApi_messages_dispatch_weight";
/// Name of the `ToMillauOutboundLaneApi::message_details` runtime method.
pub const TO_MILLAU_MESSAGE_DETAILS_METHOD: &str = "ToMillauOutboundLaneApi_message_details";
/// Name of the `ToMillauOutboundLaneApi::latest_received_nonce` runtime method.
pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_received_nonce";
/// Name of the `ToMillauOutboundLaneApi::latest_generated_nonce` runtime method.
@@ -288,15 +295,16 @@ sp_api::decl_runtime_apis! {
lane_id: LaneId,
payload: OutboundPayload,
) -> Option<OutboundMessageFee>;
/// Returns total dispatch weight and encoded payload size of all messages in given inclusive range.
/// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all
/// messages in given inclusive range.
///
/// If some (or all) messages are missing from the storage, they'll also will
/// be missing from the resulting vector. The vector is ordered by the nonce.
fn messages_dispatch_weight(
fn message_details(
lane: LaneId,
begin: MessageNonce,
end: MessageNonce,
) -> Vec<(MessageNonce, Weight, u32)>;
) -> Vec<MessageDetails<OutboundMessageFee>>;
/// Returns nonce of the latest message, received by bridged chain.
fn latest_received_nonce(lane: LaneId) -> MessageNonce;
/// Returns nonce of the latest message, generated by given lane.
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
use frame_support::traits::MaxEncodedLen;
use parity_util_mem::MallocSizeOf;
use sp_runtime::traits::CheckEqual;
@@ -22,7 +23,7 @@ use sp_runtime::traits::CheckEqual;
fixed_hash::construct_fixed_hash! {
/// Hash type used in Millau chain.
#[derive(MallocSizeOf)]
#[derive(MallocSizeOf, MaxEncodedLen)]
pub struct MillauHash(64);
}
@@ -20,7 +20,7 @@
// Runtime-generated DecodeLimit::decode_all_with_depth_limit
#![allow(clippy::unnecessary_mut_passed)]
use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight};
use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
use sp_std::prelude::*;
pub use bp_polkadot_core::*;
@@ -31,7 +31,7 @@ pub type Polkadot = PolkadotLike;
// We use this to get the account on Polkadot (target) which is derived from Kusama's (source)
// account.
pub fn derive_account_from_kusama_id(id: bp_runtime::SourceAccount<AccountId>) -> AccountId {
let encoded_id = bp_runtime::derive_account_id(bp_runtime::KUSAMA_BRIDGE_INSTANCE, id);
let encoded_id = bp_runtime::derive_account_id(bp_runtime::KUSAMA_CHAIN_ID, id);
AccountIdConverter::convert(encoded_id)
}
@@ -43,8 +43,8 @@ pub const IS_KNOWN_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_is_known_
/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
pub const TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD: &str =
"ToPolkadotOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
/// Name of the `ToPolkadotOutboundLaneApi::messages_dispatch_weight` runtime method.
pub const TO_POLKADOT_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToPolkadotOutboundLaneApi_messages_dispatch_weight";
/// Name of the `ToPolkadotOutboundLaneApi::message_details` runtime method.
pub const TO_POLKADOT_MESSAGE_DETAILS_METHOD: &str = "ToPolkadotOutboundLaneApi_message_details";
/// Name of the `ToPolkadotOutboundLaneApi::latest_generated_nonce` runtime method.
pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_generated_nonce";
/// Name of the `ToPolkadotOutboundLaneApi::latest_received_nonce` runtime method.
@@ -87,15 +87,16 @@ sp_api::decl_runtime_apis! {
lane_id: LaneId,
payload: OutboundPayload,
) -> Option<OutboundMessageFee>;
/// Returns total dispatch weight and encoded payload size of all messages in given inclusive range.
/// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all
/// messages in given inclusive range.
///
/// If some (or all) messages are missing from the storage, they'll also will
/// be missing from the resulting vector. The vector is ordered by the nonce.
fn messages_dispatch_weight(
fn message_details(
lane: LaneId,
begin: MessageNonce,
end: MessageNonce,
) -> Vec<(MessageNonce, Weight, u32)>;
) -> Vec<MessageDetails<OutboundMessageFee>>;
/// Returns nonce of the latest message, received by bridged chain.
fn latest_received_nonce(lane: LaneId) -> MessageNonce;
/// Returns nonce of the latest message, generated by given lane.
@@ -20,7 +20,7 @@
// Runtime-generated DecodeLimit::decode_all_With_depth_limit
#![allow(clippy::unnecessary_mut_passed)]
use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState};
use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
use bp_runtime::Chain;
use frame_support::{
weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight},
@@ -71,7 +71,7 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 128;
/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered.
/// The message must have dispatch weight set to zero. The result then must be rounded up to account
/// possible future runtime upgrades.
pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000;
pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000;
/// Increase of delivery transaction weight on Rialto chain with every additional message byte.
///
@@ -86,6 +86,13 @@ pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000;
/// runtime upgrades.
pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000;
/// Weight of pay-dispatch-fee operation for inbound messages at Rialto chain.
///
/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()`
/// call for your chain. Don't put too much reserve there, because it is used to **decrease**
/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper.
pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
/// The target length of a session (how often authorities change) on Rialto measured in of number of
/// blocks.
///
@@ -162,7 +169,7 @@ impl Convert<sp_core::H256, AccountId> for AccountIdConverter {
//
// Note that this should only be used for testing.
pub fn derive_account_from_millau_id(id: bp_runtime::SourceAccount<AccountId>) -> AccountId {
let encoded_id = bp_runtime::derive_account_id(bp_runtime::MILLAU_BRIDGE_INSTANCE, id);
let encoded_id = bp_runtime::derive_account_id(bp_runtime::MILLAU_CHAIN_ID, id);
AccountIdConverter::convert(encoded_id)
}
@@ -205,8 +212,8 @@ pub const BEST_FINALIZED_RIALTO_HEADER_METHOD: &str = "RialtoFinalityApi_best_fi
/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
pub const TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD: &str =
"ToRialtoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
/// Name of the `ToRialtoOutboundLaneApi::messages_dispatch_weight` runtime method.
pub const TO_RIALTO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToRialtoOutboundLaneApi_messages_dispatch_weight";
/// Name of the `ToRialtoOutboundLaneApi::message_details` runtime method.
pub const TO_RIALTO_MESSAGE_DETAILS_METHOD: &str = "ToRialtoOutboundLaneApi_message_details";
/// Name of the `ToRialtoOutboundLaneApi::latest_generated_nonce` runtime method.
pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_generated_nonce";
/// Name of the `ToRialtoOutboundLaneApi::latest_received_nonce` runtime method.
@@ -249,15 +256,16 @@ sp_api::decl_runtime_apis! {
lane_id: LaneId,
payload: OutboundPayload,
) -> Option<OutboundMessageFee>;
/// Returns total dispatch weight and encoded payload size of all messages in given inclusive range.
/// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all
/// messages in given inclusive range.
///
/// If some (or all) messages are missing from the storage, they'll also will
/// be missing from the resulting vector. The vector is ordered by the nonce.
fn messages_dispatch_weight(
fn message_details(
lane: LaneId,
begin: MessageNonce,
end: MessageNonce,
) -> Vec<(MessageNonce, Weight, u32)>;
) -> Vec<MessageDetails<OutboundMessageFee>>;
/// Returns nonce of the latest message, received by bridged chain.
fn latest_received_nonce(lane: LaneId) -> MessageNonce;
/// Returns nonce of the latest message, generated by given lane.
@@ -8,14 +8,15 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[dependencies]
parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
smallvec = "1.6"
# Bridge Dependencies
bp-header-chain = { path = "../header-chain", default-features = false }
bp-messages = { path = "../messages", default-features = false }
bp-polkadot-core = { path = "../polkadot-core", default-features = false }
bp-runtime = { path = "../runtime", default-features = false }
# Substrate Based Dependencies
frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
@@ -24,10 +25,10 @@ sp-version = { git = "https://github.com/paritytech/substrate", branch = "master
[features]
default = ["std"]
std = [
"bp-header-chain/std",
"bp-messages/std",
"bp-polkadot-core/std",
"bp-runtime/std",
"frame-support/std",
"parity-scale-codec/std",
"sp-api/std",
"sp-runtime/std",
@@ -20,8 +20,8 @@
// Runtime-generated DecodeLimit::decode_all_with_depth_limit
#![allow(clippy::unnecessary_mut_passed)]
use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight};
use bp_runtime::Chain;
use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
use frame_support::weights::{WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial};
use sp_std::prelude::*;
use sp_version::RuntimeVersion;
@@ -30,59 +30,48 @@ pub use bp_polkadot_core::*;
/// Rococo Chain
pub type Rococo = PolkadotLike;
pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic<Call>;
/// The target length of a session (how often authorities change) on Westend measured in of number of
/// blocks.
///
/// Note that since this is a target sessions may change before/after this time depending on network
/// conditions.
pub const SESSION_LENGTH: BlockNumber = 10 * time_units::MINUTES;
// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo.
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: sp_version::create_runtime_str!("rococo"),
impl_name: sp_version::create_runtime_str!("parity-rococo-v1.5"),
impl_name: sp_version::create_runtime_str!("parity-rococo-v1.6"),
authoring_version: 0,
spec_version: 232,
spec_version: 9004,
impl_version: 0,
apis: sp_version::create_apis_vec![[]],
transaction_version: 0,
};
/// Rococo Runtime `Call` enum.
///
/// The enum represents a subset of possible `Call`s we can send to Rococo chain.
/// Ideally this code would be auto-generated from Metadata, because we want to
/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s.
///
/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo
/// `construct_runtime`, so that we maintain SCALE-compatibility.
///
/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs
#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)]
pub enum Call {
/// Wococo bridge pallet.
#[codec(index = 41)]
BridgeGrandpaWococo(BridgeGrandpaWococoCall),
}
#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)]
#[allow(non_camel_case_types)]
pub enum BridgeGrandpaWococoCall {
#[codec(index = 0)]
submit_finality_proof(
<PolkadotLike as Chain>::Header,
bp_header_chain::justification::GrandpaJustification<<PolkadotLike as Chain>::Header>,
),
#[codec(index = 1)]
initialize(bp_header_chain::InitializationData<<PolkadotLike as Chain>::Header>),
}
impl sp_runtime::traits::Dispatchable for Call {
type Origin = ();
type Config = ();
type Info = ();
type PostInfo = ();
fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo<Self::PostInfo> {
unimplemented!("The Call is not expected to be dispatched.")
// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo.
pub struct WeightToFee;
impl WeightToFeePolynomial for WeightToFee {
type Balance = Balance;
fn polynomial() -> WeightToFeeCoefficients<Balance> {
const CENTS: Balance = 1_000_000_000_000 / 100;
let p = CENTS;
let q = 10 * Balance::from(ExtrinsicBaseWeight::get());
smallvec::smallvec![WeightToFeeCoefficient {
degree: 1,
negative: false,
coeff_frac: Perbill::from_rational(p % q, q),
coeff_integer: p / q,
}]
}
}
// We use this to get the account on Rococo (target) which is derived from Wococo's (source)
// account.
pub fn derive_account_from_wococo_id(id: bp_runtime::SourceAccount<AccountId>) -> AccountId {
let encoded_id = bp_runtime::derive_account_id(bp_runtime::WOCOCO_CHAIN_ID, id);
AccountIdConverter::convert(encoded_id)
}
/// Name of the `RococoFinalityApi::best_finalized` runtime method.
pub const BEST_FINALIZED_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_best_finalized";
/// Name of the `RococoFinalityApi::is_known_header` runtime method.
@@ -91,8 +80,8 @@ pub const IS_KNOWN_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_is_known_head
/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
pub const TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str =
"ToRococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
/// Name of the `ToRococoOutboundLaneApi::messages_dispatch_weight` runtime method.
pub const TO_ROCOCO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToRococoOutboundLaneApi_messages_dispatch_weight";
/// Name of the `ToRococoOutboundLaneApi::message_details` runtime method.
pub const TO_ROCOCO_MESSAGE_DETAILS_METHOD: &str = "ToRococoOutboundLaneApi_message_details";
/// Name of the `ToRococoOutboundLaneApi::latest_generated_nonce` runtime method.
pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_generated_nonce";
/// Name of the `ToRococoOutboundLaneApi::latest_received_nonce` runtime method.
@@ -135,15 +124,16 @@ sp_api::decl_runtime_apis! {
lane_id: LaneId,
payload: OutboundPayload,
) -> Option<OutboundMessageFee>;
/// Returns total dispatch weight and encoded payload size of all messages in given inclusive range.
/// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all
/// messages in given inclusive range.
///
/// If some (or all) messages are missing from the storage, they'll also will
/// be missing from the resulting vector. The vector is ordered by the nonce.
fn messages_dispatch_weight(
fn message_details(
lane: LaneId,
begin: MessageNonce,
end: MessageNonce,
) -> Vec<(MessageNonce, Weight, u32)>;
) -> Vec<MessageDetails<OutboundMessageFee>>;
/// Returns nonce of the latest message, received by bridged chain.
fn latest_received_nonce(lane: LaneId) -> MessageNonce;
/// Returns nonce of the latest message, generated by given lane.
@@ -20,7 +20,7 @@
// Runtime-generated DecodeLimit::decode_all_with_depth_limit
#![allow(clippy::unnecessary_mut_passed)]
use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight};
use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
use bp_runtime::Chain;
use sp_std::prelude::*;
use sp_version::RuntimeVersion;
@@ -86,7 +86,7 @@ impl sp_runtime::traits::Dispatchable for Call {
// We use this to get the account on Westend (target) which is derived from Rococo's (source)
// account.
pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount<AccountId>) -> AccountId {
let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_BRIDGE_INSTANCE, id);
let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_CHAIN_ID, id);
AccountIdConverter::convert(encoded_id)
}
@@ -98,8 +98,8 @@ pub const IS_KNOWN_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_is_known_he
/// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
pub const TO_WESTEND_ESTIMATE_MESSAGE_FEE_METHOD: &str =
"ToWestendOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
/// Name of the `ToWestendOutboundLaneApi::messages_dispatch_weight` runtime method.
pub const TO_WESTEND_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToWestendOutboundLaneApi_messages_dispatch_weight";
/// Name of the `ToWestendOutboundLaneApi::message_details` runtime method.
pub const TO_WESTEND_MESSAGE_DETAILS_METHOD: &str = "ToWestendOutboundLaneApi_message_details";
/// Name of the `ToWestendOutboundLaneApi::latest_generated_nonce` runtime method.
pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_generated_nonce";
/// Name of the `ToWestendOutboundLaneApi::latest_received_nonce` runtime method.
@@ -149,15 +149,16 @@ sp_api::decl_runtime_apis! {
lane_id: LaneId,
payload: OutboundPayload,
) -> Option<OutboundMessageFee>;
/// Returns total dispatch weight and encoded payload size of all messages in given inclusive range.
/// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all
/// messages in given inclusive range.
///
/// If some (or all) messages are missing from the storage, they'll also will
/// be missing from the resulting vector. The vector is ordered by the nonce.
fn messages_dispatch_weight(
fn message_details(
lane: LaneId,
begin: MessageNonce,
end: MessageNonce,
) -> Vec<(MessageNonce, Weight, u32)>;
) -> Vec<MessageDetails<OutboundMessageFee>>;
/// Returns nonce of the latest message, received by bridged chain.
fn latest_received_nonce(lane: LaneId) -> MessageNonce;
/// Returns nonce of the latest message, generated by given lane.
@@ -10,27 +10,25 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
# Bridge Dependencies
bp-header-chain = { path = "../header-chain", default-features = false }
bp-messages = { path = "../messages", default-features = false }
bp-polkadot-core = { path = "../polkadot-core", default-features = false }
bp-rococo = { path = "../chain-rococo", default-features = false }
bp-runtime = { path = "../runtime", default-features = false }
# Substrate Based Dependencies
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
[features]
default = ["std"]
std = [
"bp-header-chain/std",
"bp-messages/std",
"bp-polkadot-core/std",
"bp-runtime/std",
"bp-rococo/std",
"parity-scale-codec/std",
"sp-api/std",
"sp-runtime/std",
"sp-std/std",
"sp-version/std",
]
@@ -20,73 +20,20 @@
// Runtime-generated DecodeLimit::decode_all_with_depth_limit
#![allow(clippy::unnecessary_mut_passed)]
use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight};
use bp_runtime::Chain;
use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
use sp_std::prelude::*;
use sp_version::RuntimeVersion;
pub use bp_polkadot_core::*;
// Rococo runtime = Wococo runtime
pub use bp_rococo::{WeightToFee, SESSION_LENGTH, VERSION};
/// Wococo Chain
pub type Wococo = PolkadotLike;
pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic<Call>;
// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo.
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: sp_version::create_runtime_str!("rococo"),
impl_name: sp_version::create_runtime_str!("parity-rococo-v1.5"),
authoring_version: 0,
spec_version: 232,
impl_version: 0,
apis: sp_version::create_apis_vec![[]],
transaction_version: 0,
};
/// Wococo Runtime `Call` enum.
///
/// The enum represents a subset of possible `Call`s we can send to Rococo chain.
/// Ideally this code would be auto-generated from Metadata, because we want to
/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s.
///
/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo
/// `construct_runtime`, so that we maintain SCALE-compatibility.
///
/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs
#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)]
pub enum Call {
/// Rococo bridge pallet.
#[codec(index = 40)]
BridgeGrandpaRococo(BridgeGrandpaRococoCall),
}
#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)]
#[allow(non_camel_case_types)]
pub enum BridgeGrandpaRococoCall {
#[codec(index = 0)]
submit_finality_proof(
<PolkadotLike as Chain>::Header,
bp_header_chain::justification::GrandpaJustification<<PolkadotLike as Chain>::Header>,
),
#[codec(index = 1)]
initialize(bp_header_chain::InitializationData<<PolkadotLike as Chain>::Header>),
}
impl sp_runtime::traits::Dispatchable for Call {
type Origin = ();
type Config = ();
type Info = ();
type PostInfo = ();
fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo<Self::PostInfo> {
unimplemented!("The Call is not expected to be dispatched.")
}
}
// We use this to get the account on Wococo (target) which is derived from Rococo's (source)
// account.
pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount<AccountId>) -> AccountId {
let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_BRIDGE_INSTANCE, id);
let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_CHAIN_ID, id);
AccountIdConverter::convert(encoded_id)
}
@@ -98,8 +45,8 @@ pub const IS_KNOWN_WOCOCO_HEADER_METHOD: &str = "WococoFinalityApi_is_known_head
/// Name of the `ToWococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
pub const TO_WOCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str =
"ToWococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
/// Name of the `ToWococoOutboundLaneApi::messages_dispatch_weight` runtime method.
pub const TO_WOCOCO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToWococoOutboundLaneApi_messages_dispatch_weight";
/// Name of the `ToWococoOutboundLaneApi::message_details` runtime method.
pub const TO_WOCOCO_MESSAGE_DETAILS_METHOD: &str = "ToWococoOutboundLaneApi_message_details";
/// Name of the `ToWococoOutboundLaneApi::latest_generated_nonce` runtime method.
pub const TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToWococoOutboundLaneApi_latest_generated_nonce";
/// Name of the `ToWococoOutboundLaneApi::latest_received_nonce` runtime method.
@@ -142,15 +89,16 @@ sp_api::decl_runtime_apis! {
lane_id: LaneId,
payload: OutboundPayload,
) -> Option<OutboundMessageFee>;
/// Returns total dispatch weight and encoded payload size of all messages in given inclusive range.
/// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all
/// messages in given inclusive range.
///
/// If some (or all) messages are missing from the storage, they'll also will
/// be missing from the resulting vector. The vector is ordered by the nonce.
fn messages_dispatch_weight(
fn message_details(
lane: LaneId,
begin: MessageNonce,
end: MessageNonce,
) -> Vec<(MessageNonce, Weight, u32)>;
) -> Vec<MessageDetails<OutboundMessageFee>>;
/// Returns nonce of the latest message, received by bridged chain.
fn latest_received_nonce(lane: LaneId) -> MessageNonce;
/// Returns nonce of the latest message, generated by given lane.
@@ -245,7 +245,7 @@ impl AuraHeader {
/// Get step this header is generated for.
pub fn step(&self) -> Option<u64> {
self.seal.get(0).map(|x| Rlp::new(&x)).and_then(|x| x.as_val().ok())
self.seal.get(0).map(|x| Rlp::new(x)).and_then(|x| x.as_val().ok())
}
/// Get header author' signature.
@@ -496,7 +496,7 @@ pub fn transaction_decode_rlp(raw_tx: &[u8]) -> Result<Transaction, DecoderError
let message = unsigned.message(chain_id);
// recover tx sender
let sender_public = sp_io::crypto::secp256k1_ecdsa_recover(&signature, &message.as_fixed_bytes())
let sender_public = sp_io::crypto::secp256k1_ecdsa_recover(&signature, message.as_fixed_bytes())
.map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?;
let sender_address = public_to_address(&sender_public);
@@ -20,6 +20,7 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master
sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
[dev-dependencies]
assert_matches = "1.5"
bp-test-utils = { path = "../test-utils" }
[features]
@@ -20,107 +20,13 @@
//! will ever be moved to the sp_finality_grandpa, we should reuse that implementation.
use codec::{Decode, Encode};
use finality_grandpa::{voter_set::VoterSet, Chain, Error as GrandpaError};
use finality_grandpa::voter_set::VoterSet;
use frame_support::RuntimeDebug;
use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId};
use sp_runtime::traits::Header as HeaderT;
use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet};
use sp_std::prelude::*;
/// Justification verification error.
#[derive(RuntimeDebug, PartialEq)]
pub enum Error {
/// Failed to decode justification.
JustificationDecode,
/// Justification is finalizing unexpected header.
InvalidJustificationTarget,
/// Invalid commit in justification.
InvalidJustificationCommit,
/// Justification has invalid authority singature.
InvalidAuthoritySignature,
/// The justification has precommit for the header that has no route from the target header.
InvalidPrecommitAncestryProof,
/// The justification has 'unused' headers in its precommit ancestries.
InvalidPrecommitAncestries,
}
/// Decode justification target.
pub fn decode_justification_target<Header: HeaderT>(
raw_justification: &[u8],
) -> Result<(Header::Hash, Header::Number), Error> {
GrandpaJustification::<Header>::decode(&mut &*raw_justification)
.map(|justification| (justification.commit.target_hash, justification.commit.target_number))
.map_err(|_| Error::JustificationDecode)
}
/// Verify that justification, that is generated by given authority set, finalizes given header.
pub fn verify_justification<Header: HeaderT>(
finalized_target: (Header::Hash, Header::Number),
authorities_set_id: SetId,
authorities_set: &VoterSet<AuthorityId>,
justification: &GrandpaJustification<Header>,
) -> Result<(), Error>
where
Header::Number: finality_grandpa::BlockNumberOps,
{
// Ensure that it is justification for the expected header
if (justification.commit.target_hash, justification.commit.target_number) != finalized_target {
return Err(Error::InvalidJustificationTarget);
}
// Validate commit of the justification. Note that `validate_commit()` assumes that all
// signatures are valid. We'll check the validity of the signatures later since they're more
// resource intensive to verify.
let ancestry_chain = AncestryChain::new(&justification.votes_ancestries);
match finality_grandpa::validate_commit(&justification.commit, authorities_set, &ancestry_chain) {
Ok(ref result) if result.ghost().is_some() => {}
_ => return Err(Error::InvalidJustificationCommit),
}
// Now that we know that the commit is correct, check authorities signatures
let mut buf = Vec::new();
let mut visited_hashes = BTreeSet::new();
for signed in &justification.commit.precommits {
if !sp_finality_grandpa::check_message_signature_with_buffer(
&finality_grandpa::Message::Precommit(signed.precommit.clone()),
&signed.id,
&signed.signature,
justification.round,
authorities_set_id,
&mut buf,
) {
return Err(Error::InvalidAuthoritySignature);
}
if justification.commit.target_hash == signed.precommit.target_hash {
continue;
}
match ancestry_chain.ancestry(justification.commit.target_hash, signed.precommit.target_hash) {
Ok(route) => {
// ancestry starts from parent hash but the precommit target hash has been visited
visited_hashes.insert(signed.precommit.target_hash);
visited_hashes.extend(route);
}
_ => {
// could this happen in practice? I don't think so, but original code has this check
return Err(Error::InvalidPrecommitAncestryProof);
}
}
}
let ancestry_hashes = justification
.votes_ancestries
.iter()
.map(|h: &Header| h.hash())
.collect();
if visited_hashes != ancestry_hashes {
return Err(Error::InvalidPrecommitAncestries);
}
Ok(())
}
/// A GRANDPA Justification is a proof that a given header was finalized
/// at a certain height and with a certain set of authorities.
///
@@ -142,44 +48,172 @@ impl<H: HeaderT> crate::FinalityProof<H::Number> for GrandpaJustification<H> {
}
}
/// A utility trait implementing `finality_grandpa::Chain` using a given set of headers.
#[derive(RuntimeDebug)]
struct AncestryChain<Header: HeaderT> {
ancestry: BTreeMap<Header::Hash, Header::Hash>,
/// Justification verification error.
#[derive(RuntimeDebug, PartialEq)]
pub enum Error {
/// Failed to decode justification.
JustificationDecode,
/// Justification is finalizing unexpected header.
InvalidJustificationTarget,
/// The authority has provided an invalid signature.
InvalidAuthoritySignature,
/// The justification contains precommit for header that is not a descendant of the commit header.
PrecommitIsNotCommitDescendant,
/// The cumulative weight of all votes in the justification is not enough to justify commit
/// header finalization.
TooLowCumulativeWeight,
/// The justification contains extra (unused) headers in its `votes_ancestries` field.
ExtraHeadersInVotesAncestries,
}
impl<Header: HeaderT> AncestryChain<Header> {
fn new(ancestry: &[Header]) -> AncestryChain<Header> {
AncestryChain {
ancestry: ancestry
.iter()
.map(|header| (header.hash(), *header.parent_hash()))
.collect(),
}
}
/// Decode justification target.
pub fn decode_justification_target<Header: HeaderT>(
raw_justification: &[u8],
) -> Result<(Header::Hash, Header::Number), Error> {
GrandpaJustification::<Header>::decode(&mut &*raw_justification)
.map(|justification| (justification.commit.target_hash, justification.commit.target_number))
.map_err(|_| Error::JustificationDecode)
}
impl<Header: HeaderT> finality_grandpa::Chain<Header::Hash, Header::Number> for AncestryChain<Header>
/// Verify that justification, that is generated by given authority set, finalizes given header.
pub fn verify_justification<Header: HeaderT>(
finalized_target: (Header::Hash, Header::Number),
authorities_set_id: SetId,
authorities_set: &VoterSet<AuthorityId>,
justification: &GrandpaJustification<Header>,
) -> Result<(), Error>
where
Header::Number: finality_grandpa::BlockNumberOps,
{
fn ancestry(&self, base: Header::Hash, block: Header::Hash) -> Result<Vec<Header::Hash>, GrandpaError> {
let mut route = Vec::new();
let mut current_hash = block;
loop {
if current_hash == base {
break;
}
match self.ancestry.get(&current_hash).cloned() {
Some(parent_hash) => {
current_hash = parent_hash;
route.push(current_hash);
}
_ => return Err(GrandpaError::NotDescendent),
}
}
route.pop(); // remove the base
// ensure that it is justification for the expected header
if (justification.commit.target_hash, justification.commit.target_number) != finalized_target {
return Err(Error::InvalidJustificationTarget);
}
Ok(route)
let mut chain = AncestryChain::new(&justification.votes_ancestries);
let mut signature_buffer = Vec::new();
let mut votes = BTreeSet::new();
let mut cumulative_weight = 0u64;
for signed in &justification.commit.precommits {
// authority must be in the set
let authority_info = match authorities_set.get(&signed.id) {
Some(authority_info) => authority_info,
None => {
// just ignore precommit from unknown authority as `finality_grandpa::import_precommit` does
continue;
}
};
// check if authority has already voted in the same round.
//
// there's a lot of code in `validate_commit` and `import_precommit` functions inside
// `finality-grandpa` crate (mostly related to reporing equivocations). But the only thing that we
// care about is that only first vote from the authority is accepted
if !votes.insert(signed.id.clone()) {
continue;
}
// everything below this line can't just `continue`, because state is already altered
// all precommits must be for block higher than the target
if signed.precommit.target_number < justification.commit.target_number {
return Err(Error::PrecommitIsNotCommitDescendant);
}
// all precommits must be for target block descendents
chain = chain.ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?;
// since we know now that the precommit target is the descendant of the justification target,
// we may increase 'weight' of the justification target
//
// there's a lot of code in the `VoteGraph::insert` method inside `finality-grandpa` crate,
// but in the end it is only used to find GHOST, which we don't care about. The only thing
// that we care about is that the justification target has enough weight
cumulative_weight = cumulative_weight.checked_add(authority_info.weight().0.into()).expect(
"sum of weights of ALL authorities is expected not to overflow - this is guaranteed by\
existence of VoterSet;\
the order of loop conditions guarantees that we can account vote from same authority\
multiple times;\
thus we'll never overflow the u64::MAX;\
qed",
);
// verify authority signature
if !sp_finality_grandpa::check_message_signature_with_buffer(
&finality_grandpa::Message::Precommit(signed.precommit.clone()),
&signed.id,
&signed.signature,
justification.round,
authorities_set_id,
&mut signature_buffer,
) {
return Err(Error::InvalidAuthoritySignature);
}
}
// check that there are no extra headers in the justification
if !chain.unvisited.is_empty() {
return Err(Error::ExtraHeadersInVotesAncestries);
}
// check that the cumulative weight of validators voted for the justification target (or one
// of its descendents) is larger than required threshold.
let threshold = authorities_set.threshold().0.into();
if cumulative_weight >= threshold {
Ok(())
} else {
Err(Error::TooLowCumulativeWeight)
}
}
/// Votes ancestries with useful methods.
#[derive(RuntimeDebug)]
pub struct AncestryChain<Header: HeaderT> {
/// Header hash => parent header hash mapping.
pub parents: BTreeMap<Header::Hash, Header::Hash>,
/// Hashes of headers that weren't visited by `is_ancestor` method.
pub unvisited: BTreeSet<Header::Hash>,
}
impl<Header: HeaderT> AncestryChain<Header> {
/// Create new ancestry chain.
pub fn new(ancestry: &[Header]) -> AncestryChain<Header> {
let mut parents = BTreeMap::new();
let mut unvisited = BTreeSet::new();
for ancestor in ancestry {
let hash = ancestor.hash();
let parent_hash = *ancestor.parent_hash();
parents.insert(hash, parent_hash);
unvisited.insert(hash);
}
AncestryChain { parents, unvisited }
}
/// Returns `Err(_)` if `precommit_target` is a descendant of the `commit_target` block and `Ok(_)` otherwise.
pub fn ensure_descendant(
mut self,
commit_target: &Header::Hash,
precommit_target: &Header::Hash,
) -> Result<Self, Error> {
let mut current_hash = *precommit_target;
loop {
if current_hash == *commit_target {
break;
}
let is_visited_before = !self.unvisited.remove(&current_hash);
current_hash = match self.parents.get(&current_hash) {
Some(parent_hash) => {
if is_visited_before {
// `Some(parent_hash)` means that the `current_hash` is in the `parents` container
// `is_visited_before` means that it has been visited before in some of previous calls
// => since we assume that previous call has finished with `true`, this also will
// be finished with `true`
return Ok(self);
}
*parent_hash
}
None => return Err(Error::PrecommitIsNotCommitDescendant),
};
}
Ok(self)
}
}
@@ -0,0 +1,317 @@
// Copyright 2020-2021 Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Tests inside this module are made to ensure that our custom justification verification
//! implementation works exactly as `fn finality_grandpa::validate_commit`.
//!
//! Some of tests in this module may partially duplicate tests from `justification.rs`,
//! but their purpose is different.
use assert_matches::assert_matches;
use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification};
use bp_test_utils::{
header_id, make_justification_for_header, signed_precommit, test_header, Account, JustificationGeneratorParams,
ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID,
};
use finality_grandpa::voter_set::VoterSet;
use sp_finality_grandpa::{AuthorityId, AuthorityWeight};
use sp_runtime::traits::Header as HeaderT;
type TestHeader = sp_runtime::testing::Header;
type TestHash = <TestHeader as HeaderT>::Hash;
type TestNumber = <TestHeader as HeaderT>::Number;
/// Implementation of `finality_grandpa::Chain` that is used in tests.
struct AncestryChain(bp_header_chain::justification::AncestryChain<TestHeader>);
impl AncestryChain {
fn new(ancestry: &[TestHeader]) -> Self {
Self(bp_header_chain::justification::AncestryChain::new(ancestry))
}
}
impl finality_grandpa::Chain<TestHash, TestNumber> for AncestryChain {
fn ancestry(&self, base: TestHash, block: TestHash) -> Result<Vec<TestHash>, finality_grandpa::Error> {
let mut route = Vec::new();
let mut current_hash = block;
loop {
if current_hash == base {
break;
}
match self.0.parents.get(&current_hash).cloned() {
Some(parent_hash) => {
current_hash = parent_hash;
route.push(current_hash);
}
_ => return Err(finality_grandpa::Error::NotDescendent),
}
}
route.pop(); // remove the base
Ok(route)
}
}
/// Get a full set of accounts.
fn full_accounts_set() -> Vec<(Account, AuthorityWeight)> {
vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)]
}
/// Get a full set of GRANDPA authorities.
fn full_voter_set() -> VoterSet<AuthorityId> {
VoterSet::new(full_accounts_set().iter().map(|(id, w)| (AuthorityId::from(*id), *w))).unwrap()
}
/// Get a minimal set of accounts.
fn minimal_accounts_set() -> Vec<(Account, AuthorityWeight)> {
// there are 5 accounts in the full set => we need 2/3 + 1 accounts, which results in 4 accounts
vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)]
}
/// Get a minimal subset of GRANDPA authorities that have enough cumulative vote weight to justify a header finality.
pub fn minimal_voter_set() -> VoterSet<AuthorityId> {
VoterSet::new(
minimal_accounts_set()
.iter()
.map(|(id, w)| (AuthorityId::from(*id), *w)),
)
.unwrap()
}
/// Make a valid GRANDPA justification with sensible defaults.
pub fn make_default_justification(header: &TestHeader) -> GrandpaJustification<TestHeader> {
make_justification_for_header(JustificationGeneratorParams {
header: header.clone(),
authorities: minimal_accounts_set(),
..Default::default()
})
}
// the `finality_grandpa::validate_commit` function has two ways to report an unsuccessful
// commit validation:
//
// 1) to return `Err()` (which only may happen if `finality_grandpa::Chain` implementation
// returns an error);
// 2) to return `Ok(validation_result) if validation_result.ghost().is_none()`.
//
// Our implementation would just return error in both cases.
#[test]
fn same_result_when_precommit_target_has_lower_number_than_commit_target() {
let mut justification = make_default_justification(&test_header(1));
// the number of header in precommit (0) is lower than number of header in commit (1)
justification.commit.precommits[0].precommit.target_number = 0;
// our implementation returns an error
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
TEST_GRANDPA_SET_ID,
&full_voter_set(),
&justification,
),
Err(Error::PrecommitIsNotCommitDescendant),
);
// original implementation returns empty GHOST
assert_matches!(
finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification.votes_ancestries),
)
.map(|result| result.ghost().cloned()),
Ok(None)
);
}
#[test]
fn same_result_when_precommit_target_is_not_descendant_of_commit_target() {
let not_descendant = test_header::<TestHeader>(10);
let mut justification = make_default_justification(&test_header(1));
// the route from header of commit (1) to header of precommit (10) is missing from
// the votes ancestries
justification.commit.precommits[0].precommit.target_number = *not_descendant.number();
justification.commit.precommits[0].precommit.target_hash = not_descendant.hash();
justification.votes_ancestries.push(not_descendant);
// our implementation returns an error
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
TEST_GRANDPA_SET_ID,
&full_voter_set(),
&justification,
),
Err(Error::PrecommitIsNotCommitDescendant),
);
// original implementation returns empty GHOST
assert_matches!(
finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification.votes_ancestries),
)
.map(|result| result.ghost().cloned()),
Ok(None)
);
}
#[test]
fn same_result_when_justification_contains_duplicate_vote() {
let mut justification = make_default_justification(&test_header(1));
// the justification may contain exactly the same vote (i.e. same precommit and same signature)
// multiple times && it isn't treated as an error by original implementation
justification
.commit
.precommits
.push(justification.commit.precommits[0].clone());
justification
.commit
.precommits
.push(justification.commit.precommits[0].clone());
// our implementation succeeds
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
TEST_GRANDPA_SET_ID,
&full_voter_set(),
&justification,
),
Ok(()),
);
// original implementation returns non-empty GHOST
assert_matches!(
finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification.votes_ancestries),
)
.map(|result| result.ghost().cloned()),
Ok(Some(_))
);
}
#[test]
fn same_result_when_authority_equivocates_once_in_a_round() {
let mut justification = make_default_justification(&test_header(1));
// the justification original implementation allows authority to submit two different
// votes in a single round, of which only first is 'accepted'
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&ALICE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
// our implementation succeeds
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
TEST_GRANDPA_SET_ID,
&full_voter_set(),
&justification,
),
Ok(()),
);
// original implementation returns non-empty GHOST
assert_matches!(
finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification.votes_ancestries),
)
.map(|result| result.ghost().cloned()),
Ok(Some(_))
);
}
#[test]
fn same_result_when_authority_equivocates_twice_in_a_round() {
let mut justification = make_default_justification(&test_header(1));
// there's some code in the original implementation that should return an error when
// same authority submits more than two different votes in a single round:
// https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/lib.rs#L473
// but there's also a code that prevents this from happening:
// https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/round.rs#L287
// => so now we are also just ignoring all votes from the same authority, except the first one
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&ALICE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
justification.commit.precommits.push(signed_precommit::<TestHeader>(
&ALICE,
header_id::<TestHeader>(1),
justification.round,
TEST_GRANDPA_SET_ID,
));
// our implementation succeeds
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
TEST_GRANDPA_SET_ID,
&full_voter_set(),
&justification,
),
Ok(()),
);
// original implementation returns non-empty GHOST
assert_matches!(
finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification.votes_ancestries),
)
.map(|result| result.ghost().cloned()),
Ok(Some(_))
);
}
#[test]
fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_target() {
// just remove one authority from the minimal set and we shall not reach the threshold
let mut authorities_set = minimal_accounts_set();
authorities_set.pop();
let justification = make_justification_for_header(JustificationGeneratorParams {
header: test_header(1),
authorities: authorities_set,
..Default::default()
});
// our implementation returns an error
assert_eq!(
verify_justification::<TestHeader>(
header_id::<TestHeader>(1),
TEST_GRANDPA_SET_ID,
&full_voter_set(),
&justification,
),
Err(Error::TooLowCumulativeWeight),
);
// original implementation returns empty GHOST
assert_matches!(
finality_grandpa::validate_commit(
&justification.commit,
&full_voter_set(),
&AncestryChain::new(&justification.votes_ancestries),
)
.map(|result| result.ghost().cloned()),
Ok(None)
);
}
@@ -23,13 +23,13 @@ type TestHeader = sp_runtime::testing::Header;
#[test]
fn valid_justification_accepted() {
let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)];
let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)];
let params = JustificationGeneratorParams {
header: test_header(1),
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: authorities.clone(),
votes: 7,
ancestors: 7,
forks: 3,
};
@@ -45,7 +45,7 @@ fn valid_justification_accepted() {
);
assert_eq!(justification.commit.precommits.len(), authorities.len());
assert_eq!(justification.votes_ancestries.len(), params.votes as usize);
assert_eq!(justification.votes_ancestries.len(), params.ancestors as usize);
}
#[test]
@@ -55,7 +55,7 @@ fn valid_justification_accepted_with_single_fork() {
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)],
votes: 5,
ancestors: 5,
forks: 1,
};
@@ -83,7 +83,7 @@ fn valid_justification_accepted_with_arbitrary_number_of_authorities() {
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: authorities.clone(),
votes: n.into(),
ancestors: n.into(),
forks: n.into(),
};
@@ -129,7 +129,7 @@ fn justification_with_invalid_commit_rejected() {
&voter_set(),
&justification,
),
Err(Error::InvalidJustificationCommit),
Err(Error::ExtraHeadersInVotesAncestries),
);
}
@@ -161,7 +161,7 @@ fn justification_with_invalid_precommit_ancestry() {
&voter_set(),
&justification,
),
Err(Error::InvalidPrecommitAncestries),
Err(Error::ExtraHeadersInVotesAncestries),
);
}
@@ -175,7 +175,7 @@ fn justification_is_invalid_if_we_dont_meet_threshold() {
round: TEST_GRANDPA_ROUND,
set_id: TEST_GRANDPA_SET_ID,
authorities: authorities.clone(),
votes: 2 * authorities.len() as u32,
ancestors: 2 * authorities.len() as u32,
forks: 2,
};
@@ -186,6 +186,6 @@ fn justification_is_invalid_if_we_dont_meet_threshold() {
&voter_set(),
&make_justification_for_header::<TestHeader>(params)
),
Err(Error::InvalidJustificationCommit),
Err(Error::TooLowCumulativeWeight),
);
}
@@ -19,7 +19,10 @@
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]
use bp_runtime::{InstanceId, Size};
use bp_runtime::{
messages::{DispatchFeePayment, MessageDispatchResult},
ChainId, Size,
};
use codec::{Decode, Encode};
use frame_support::RuntimeDebug;
use sp_std::prelude::*;
@@ -31,7 +34,7 @@ pub type Weight = u64;
pub type SpecVersion = u32;
/// A generic trait to dispatch arbitrary messages delivered over the bridge.
pub trait MessageDispatch<MessageId> {
pub trait MessageDispatch<AccountId, MessageId> {
/// A type of the message to be dispatched.
type Message: codec::Decode;
@@ -43,7 +46,8 @@ pub trait MessageDispatch<MessageId> {
/// Dispatches the message internally.
///
/// `bridge` indicates instance of deployed bridge where the message came from.
/// `source_chain` indicates the chain where the message came from.
/// `target_chain` indicates the chain where message dispatch happens.
///
/// `id` is a short unique identifier of the message.
///
@@ -51,7 +55,15 @@ pub trait MessageDispatch<MessageId> {
/// a sign that some other component has rejected the message even before it has
/// reached `dispatch` method (right now this may only be caused if we fail to decode
/// the whole message).
fn dispatch(bridge: InstanceId, id: MessageId, message: Result<Self::Message, ()>);
///
/// Returns unspent dispatch weight.
fn dispatch<P: FnOnce(&AccountId, Weight) -> Result<(), ()>>(
source_chain: ChainId,
target_chain: ChainId,
id: MessageId,
message: Result<Self::Message, ()>,
pay_dispatch_fee: P,
) -> MessageDispatchResult;
}
/// Origin of a Call when it is dispatched on the target chain.
@@ -90,7 +102,7 @@ pub enum CallOrigin<SourceChainAccountId, TargetChainAccountPublic, TargetChainS
/// Call is sent by the `SourceChainAccountId` on the source chain. On the target chain it is
/// dispatched from a derived account ID.
///
/// The account ID on the target chain is derived from the source account ID This is useful if
/// The account ID on the target chain is derived from the source account ID. This is useful if
/// you need a way to represent foreign accounts on this chain for call dispatch purposes.
///
/// Note that the derived account does not need to have a private key on the target chain. This
@@ -109,6 +121,8 @@ pub struct MessagePayload<SourceChainAccountId, TargetChainAccountPublic, Target
pub weight: Weight,
/// Call origin to be used during dispatch.
pub origin: CallOrigin<SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature>,
/// Where the fee for dispatching message is paid?
pub dispatch_fee_payment: DispatchFeePayment,
/// The call itself.
pub call: Call,
}
@@ -7,7 +7,10 @@ edition = "2018"
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[dependencies]
codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] }
bitvec = { version = "0.20", default-features = false, features = ["alloc"] }
codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive", "bit-vec"] }
impl-trait-for-tuples = "0.2"
serde = { version = "1.0.101", optional = true, features = ["derive"] }
# Bridge dependencies
@@ -26,5 +29,6 @@ std = [
"codec/std",
"frame-support/std",
"frame-system/std",
"serde",
"sp-std/std"
]
+187 -25
View File
@@ -22,6 +22,8 @@
// Generated by `DecodeLimit::decode_with_depth_limit`
#![allow(clippy::unnecessary_mut_passed)]
use bitvec::prelude::*;
use bp_runtime::messages::DispatchFeePayment;
use codec::{Decode, Encode};
use frame_support::RuntimeDebug;
use sp_std::{collections::vec_deque::VecDeque, prelude::*};
@@ -32,12 +34,40 @@ pub mod target_chain;
// Weight is reexported to avoid additional frame-support dependencies in related crates.
pub use frame_support::weights::Weight;
/// Messages pallet operating mode.
#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)]
#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))]
pub enum OperatingMode {
/// Normal mode, when all operations are allowed.
Normal,
/// The pallet is not accepting outbound messages. Inbound messages and receival proofs
/// are still accepted.
///
/// This mode may be used e.g. when bridged chain expects upgrade. Then to avoid dispatch
/// failures, the pallet owner may stop accepting new messages, while continuing to deliver
/// queued messages to the bridged chain. Once upgrade is completed, the mode may be switched
/// back to `Normal`.
RejectingOutboundMessages,
/// The pallet is halted. All operations (except operating mode change) are prohibited.
Halted,
}
impl Default for OperatingMode {
fn default() -> Self {
OperatingMode::Normal
}
}
/// Messages pallet parameter.
pub trait Parameter: frame_support::Parameter {
/// Save parameter value in the runtime storage.
fn save(&self);
}
impl Parameter for () {
fn save(&self) {}
}
/// Lane identifier.
pub type LaneId = [u8; 4];
@@ -96,7 +126,7 @@ pub struct InboundLaneData<RelayerId> {
/// When a relayer sends a single message, both of MessageNonces are the same.
/// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the highest nonce.
/// Multiple dispatches from the same relayer are allowed.
pub relayers: VecDeque<(MessageNonce, MessageNonce, RelayerId)>,
pub relayers: VecDeque<UnrewardedRelayer<RelayerId>>,
/// Nonce of the last message that
/// a) has been delivered to the target (this) chain and
@@ -123,22 +153,106 @@ impl<RelayerId> InboundLaneData<RelayerId> {
/// size of each entry.
///
/// Returns `None` if size overflows `u32` limits.
pub fn encoded_size_hint(relayer_id_encoded_size: u32, relayers_entries: u32) -> Option<u32> {
pub fn encoded_size_hint(relayer_id_encoded_size: u32, relayers_entries: u32, messages_count: u32) -> Option<u32> {
let message_nonce_size = 8;
let relayers_entry_size = relayer_id_encoded_size.checked_add(2 * message_nonce_size)?;
let relayers_size = relayers_entries.checked_mul(relayers_entry_size)?;
relayers_size.checked_add(message_nonce_size)
let dispatch_results_per_byte = 8;
let dispatch_result_size = sp_std::cmp::max(relayers_entries, messages_count / dispatch_results_per_byte);
relayers_size
.checked_add(message_nonce_size)
.and_then(|result| result.checked_add(dispatch_result_size))
}
/// Nonce of the last message that has been delivered to this (target) chain.
pub fn last_delivered_nonce(&self) -> MessageNonce {
self.relayers
.back()
.map(|(_, last_nonce, _)| *last_nonce)
.map(|entry| entry.messages.end)
.unwrap_or(self.last_confirmed_nonce)
}
}
/// Message details, returned by runtime APIs.
#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)]
pub struct MessageDetails<OutboundMessageFee> {
/// Nonce assigned to the message.
pub nonce: MessageNonce,
/// Message dispatch weight, declared by the submitter.
pub dispatch_weight: Weight,
/// Size of the encoded message.
pub size: u32,
/// Delivery+dispatch fee paid by the message submitter at the source chain.
pub delivery_and_dispatch_fee: OutboundMessageFee,
/// Where the fee for dispatching message is paid?
pub dispatch_fee_payment: DispatchFeePayment,
}
/// Bit vector of message dispatch results.
pub type DispatchResultsBitVec = BitVec<Msb0, u8>;
/// Unrewarded relayer entry stored in the inbound lane data.
///
/// This struct represents a continuous range of messages that have been delivered by the same relayer
/// and whose confirmations are still pending.
#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)]
pub struct UnrewardedRelayer<RelayerId> {
/// Identifier of the relayer.
pub relayer: RelayerId,
/// Messages range, delivered by this relayer.
pub messages: DeliveredMessages,
}
/// Delivered messages with their dispatch result.
#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq)]
pub struct DeliveredMessages {
/// Nonce of the first message that has been delivered (inclusive).
pub begin: MessageNonce,
/// Nonce of the last message that has been delivered (inclusive).
pub end: MessageNonce,
/// Dispatch result (`false`/`true`), returned by the message dispatcher for every
/// message in the `[begin; end]` range. See `dispatch_result` field of the
/// `bp_runtime::messages::MessageDispatchResult` structure for more information.
pub dispatch_results: DispatchResultsBitVec,
}
impl DeliveredMessages {
/// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given dispatch result.
pub fn new(nonce: MessageNonce, dispatch_result: bool) -> Self {
DeliveredMessages {
begin: nonce,
end: nonce,
dispatch_results: bitvec![Msb0, u8; if dispatch_result { 1 } else { 0 }],
}
}
/// Note new dispatched message.
pub fn note_dispatched_message(&mut self, dispatch_result: bool) {
self.end += 1;
self.dispatch_results.push(dispatch_result);
}
/// Returns true if delivered messages contain message with given nonce.
pub fn contains_message(&self, nonce: MessageNonce) -> bool {
(self.begin..=self.end).contains(&nonce)
}
/// Get dispatch result flag by message nonce.
///
/// Dispatch result flag must be interpreted using the knowledge of dispatch mechanism
/// at the target chain. See `dispatch_result` field of the
/// `bp_runtime::messages::MessageDispatchResult` structure for more information.
///
/// Panics if message nonce is not in the `begin..=end` range. Typically you'll first
/// check if message is within the range by calling `contains_message`.
pub fn message_dispatch_result(&self, nonce: MessageNonce) -> bool {
const INVALID_NONCE: &str = "Invalid nonce used to index dispatch_results";
let index = nonce.checked_sub(self.begin).expect(INVALID_NONCE) as usize;
*self.dispatch_results.get(index).expect(INVALID_NONCE)
}
}
/// Gist of `InboundLaneData::relayers` field used by runtime APIs.
#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq)]
pub struct UnrewardedRelayersState {
@@ -177,12 +291,10 @@ impl Default for OutboundLaneData {
/// Returns total number of messages in the `InboundLaneData::relayers` vector.
///
/// Returns `None` if there are more messages that `MessageNonce` may fit (i.e. `MessageNonce + 1`).
pub fn total_unrewarded_messages<RelayerId>(
relayers: &VecDeque<(MessageNonce, MessageNonce, RelayerId)>,
) -> Option<MessageNonce> {
pub fn total_unrewarded_messages<RelayerId>(relayers: &VecDeque<UnrewardedRelayer<RelayerId>>) -> Option<MessageNonce> {
match (relayers.front(), relayers.back()) {
(Some((begin, _, _)), Some((_, end, _))) => {
if let Some(difference) = end.checked_sub(*begin) {
(Some(front), Some(back)) => {
if let Some(difference) = back.messages.end.checked_sub(front.messages.begin) {
difference.checked_add(1)
} else {
Some(0)
@@ -200,9 +312,18 @@ mod tests {
fn total_unrewarded_messages_does_not_overflow() {
assert_eq!(
total_unrewarded_messages(
&vec![(0, 0, 1), (MessageNonce::MAX, MessageNonce::MAX, 2)]
.into_iter()
.collect()
&vec![
UnrewardedRelayer {
relayer: 1,
messages: DeliveredMessages::new(0, true)
},
UnrewardedRelayer {
relayer: 2,
messages: DeliveredMessages::new(MessageNonce::MAX, true)
},
]
.into_iter()
.collect()
),
None,
);
@@ -210,19 +331,60 @@ mod tests {
#[test]
fn inbound_lane_data_returns_correct_hint() {
let expected_size = InboundLaneData::<u8>::encoded_size_hint(1, 13);
let actual_size = InboundLaneData {
relayers: (1u8..=13u8).map(|i| (i as _, i as _, i)).collect(),
last_confirmed_nonce: 13,
let test_cases = vec![
// single relayer, multiple messages
(1, 128u8),
// multiple relayers, single message per relayer
(128u8, 128u8),
// several messages per relayer
(13u8, 128u8),
];
for (relayer_entries, messages_count) in test_cases {
let expected_size = InboundLaneData::<u8>::encoded_size_hint(1, relayer_entries as _, messages_count as _);
let actual_size = InboundLaneData {
relayers: (1u8..=relayer_entries)
.map(|i| {
let mut entry = UnrewardedRelayer {
relayer: i,
messages: DeliveredMessages::new(i as _, true),
};
entry.messages.dispatch_results = bitvec![
Msb0, u8;
1;
(messages_count / relayer_entries) as _
];
entry
})
.collect(),
last_confirmed_nonce: messages_count as _,
}
.encode()
.len();
let difference = (expected_size.unwrap() as f64 - actual_size as f64).abs();
assert!(
difference / (std::cmp::min(actual_size, expected_size.unwrap() as usize) as f64) < 0.1,
"Too large difference between actual ({}) and expected ({:?}) inbound lane data size. Test case: {}+{}",
actual_size,
expected_size,
relayer_entries,
messages_count,
);
}
.encode()
.len();
let difference = (expected_size.unwrap() as f64 - actual_size as f64).abs();
assert!(
difference / (std::cmp::min(actual_size, expected_size.unwrap() as usize) as f64) < 0.1,
"Too large difference between actual ({}) and expected ({:?}) inbound lane data size",
actual_size,
expected_size,
);
}
#[test]
fn message_dispatch_result_works() {
let delivered_messages = DeliveredMessages {
begin: 100,
end: 150,
dispatch_results: bitvec![Msb0, u8; 1; 151],
};
assert!(!delivered_messages.contains_message(99));
assert!(delivered_messages.contains_message(100));
assert!(delivered_messages.contains_message(150));
assert!(!delivered_messages.contains_message(151));
assert!(delivered_messages.message_dispatch_result(125));
}
}
@@ -16,7 +16,7 @@
//! Primitives of messages module, that are used on the source chain.
use crate::{InboundLaneData, LaneId, MessageNonce, OutboundLaneData};
use crate::{DeliveredMessages, InboundLaneData, LaneId, MessageNonce, OutboundLaneData};
use bp_runtime::Size;
use frame_support::{Parameter, RuntimeDebug};
@@ -135,6 +135,15 @@ pub trait MessageDeliveryAndDispatchPayment<AccountId, Balance> {
}
}
/// Handler for messages delivery confirmation.
#[impl_trait_for_tuples::impl_for_tuples(30)]
pub trait OnDeliveryConfirmed {
/// Called when we receive confirmation that our messages have been delivered to the
/// target chain. The confirmation also has single bit dispatch result for every
/// confirmed message (see `DeliveredMessages` for details).
fn on_messages_delivered(_lane: &LaneId, _messages: &DeliveredMessages) {}
}
/// Structure that may be used in place of `TargetHeaderChain`, `LaneMessageVerifier` and
/// `MessageDeliveryAndDispatchPayment` on chains, where outbound messages are forbidden.
pub struct ForbidOutboundMessages;
@@ -18,7 +18,7 @@
use crate::{LaneId, Message, MessageData, MessageKey, OutboundLaneData};
use bp_runtime::Size;
use bp_runtime::{messages::MessageDispatchResult, Size};
use codec::{Decode, Encode, Error as CodecError};
use frame_support::{weights::Weight, Parameter, RuntimeDebug};
use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, prelude::*};
@@ -84,7 +84,7 @@ pub trait SourceHeaderChain<Fee> {
}
/// Called when inbound message is received.
pub trait MessageDispatch<Fee> {
pub trait MessageDispatch<AccountId, Fee> {
/// Decoded message payload type. Valid message may contain invalid payload. In this case
/// message is delivered, but dispatch fails. Therefore, two separate types of payload
/// (opaque `MessagePayload` used in delivery and this `DispatchPayload` used in dispatch).
@@ -100,7 +100,13 @@ pub trait MessageDispatch<Fee> {
///
/// It is up to the implementers of this trait to determine whether the message
/// is invalid (i.e. improperly encoded, has too large weight, ...) or not.
fn dispatch(message: DispatchMessage<Self::DispatchPayload, Fee>);
///
/// If your configuration allows paying dispatch fee at the target chain, then
/// it must be paid inside this method to the `relayer_account`.
fn dispatch(
relayer_account: &AccountId,
message: DispatchMessage<Self::DispatchPayload, Fee>,
) -> MessageDispatchResult;
}
impl<Message> Default for ProvedLaneMessages<Message> {
@@ -149,12 +155,18 @@ impl<Fee> SourceHeaderChain<Fee> for ForbidInboundMessages {
}
}
impl<Fee> MessageDispatch<Fee> for ForbidInboundMessages {
impl<AccountId, Fee> MessageDispatch<AccountId, Fee> for ForbidInboundMessages {
type DispatchPayload = ();
fn dispatch_weight(_message: &DispatchMessage<Self::DispatchPayload, Fee>) -> Weight {
Weight::MAX
}
fn dispatch(_message: DispatchMessage<Self::DispatchPayload, Fee>) {}
fn dispatch(_: &AccountId, _: DispatchMessage<Self::DispatchPayload, Fee>) -> MessageDispatchResult {
MessageDispatchResult {
dispatch_result: false,
unspent_weight: 0,
dispatch_fee_paid_during_dispatch: false,
}
}
}
@@ -22,7 +22,7 @@ use frame_support::{
dispatch::Dispatchable,
parameter_types,
weights::{
constants::{BlockExecutionWeight, ExtrinsicBaseWeight, WEIGHT_PER_SECOND},
constants::{BlockExecutionWeight, WEIGHT_PER_SECOND},
DispatchClass, Weight,
},
Blake2_128Concat, RuntimeDebug, StorageHasher, Twox128,
@@ -33,13 +33,13 @@ use sp_core::Hasher as HasherT;
use sp_runtime::{
generic,
traits::{BlakeTwo256, IdentifyAccount, Verify},
MultiAddress, MultiSignature, OpaqueExtrinsic, Perbill,
MultiAddress, MultiSignature, OpaqueExtrinsic,
};
use sp_std::prelude::Vec;
// Re-export's to avoid extra substrate dependencies in chain-specific crates.
pub use frame_support::Parameter;
pub use sp_runtime::traits::Convert;
pub use frame_support::{weights::constants::ExtrinsicBaseWeight, Parameter};
pub use sp_runtime::{traits::Convert, Perbill};
/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at
/// Polkadot-like chain. This mostly depends on number of entries in the storage trie.
+18 -14
View File
@@ -29,29 +29,31 @@ pub use storage_proof::{Error as StorageProofError, StorageProofChecker};
#[cfg(feature = "std")]
pub use storage_proof::craft_valid_storage_proof;
pub mod messages;
mod chain;
mod storage_proof;
/// Use this when something must be shared among all instances.
pub const NO_INSTANCE_ID: InstanceId = [0, 0, 0, 0];
pub const NO_INSTANCE_ID: ChainId = [0, 0, 0, 0];
/// Bridge-with-Rialto instance id.
pub const RIALTO_BRIDGE_INSTANCE: InstanceId = *b"rlto";
pub const RIALTO_CHAIN_ID: ChainId = *b"rlto";
/// Bridge-with-Millau instance id.
pub const MILLAU_BRIDGE_INSTANCE: InstanceId = *b"mlau";
pub const MILLAU_CHAIN_ID: ChainId = *b"mlau";
/// Bridge-with-Polkadot instance id.
pub const POLKADOT_BRIDGE_INSTANCE: InstanceId = *b"pdot";
pub const POLKADOT_CHAIN_ID: ChainId = *b"pdot";
/// Bridge-with-Kusama instance id.
pub const KUSAMA_BRIDGE_INSTANCE: InstanceId = *b"ksma";
pub const KUSAMA_CHAIN_ID: ChainId = *b"ksma";
/// Bridge-with-Rococo instance id.
pub const ROCOCO_BRIDGE_INSTANCE: InstanceId = *b"roco";
pub const ROCOCO_CHAIN_ID: ChainId = *b"roco";
/// Bridge-with-Wococo instance id.
pub const WOCOCO_BRIDGE_INSTANCE: InstanceId = *b"woco";
pub const WOCOCO_CHAIN_ID: ChainId = *b"woco";
/// Call-dispatch module prefix.
pub const CALL_DISPATCH_MODULE_PREFIX: &[u8] = b"pallet-bridge/dispatch";
@@ -62,11 +64,13 @@ pub const ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/
/// A unique prefix for entropy when generating a cross-chain account ID for the Root account.
pub const ROOT_ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/root";
/// Id of deployed module instance. We have a bunch of pallets that may be used in
/// different bridges. E.g. messages pallet may be deployed twice in the same
/// runtime to bridge ThisChain with Chain1 and Chain2. Sometimes we need to be able
/// to identify deployed instance dynamically. This type is used for that.
pub type InstanceId = [u8; 4];
/// Unique identifier of the chain.
///
/// In addition to its main function (identifying the chain), this type may also be used to
/// identify module instance. We have a bunch of pallets that may be used in different bridges. E.g.
/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and Chain2.
/// Sometimes we need to be able to identify deployed instance dynamically. This type may be used for that.
pub type ChainId = [u8; 4];
/// Type of accounts on the source chain.
pub enum SourceAccount<T> {
@@ -90,7 +94,7 @@ pub enum SourceAccount<T> {
/// Note: If the same `bridge_id` is used across different chains (for example, if one source chain
/// is bridged to multiple target chains), then all the derived accounts would be the same across
/// the different chains. This could negatively impact users' privacy across chains.
pub fn derive_account_id<AccountId>(bridge_id: InstanceId, id: SourceAccount<AccountId>) -> H256
pub fn derive_account_id<AccountId>(bridge_id: ChainId, id: SourceAccount<AccountId>) -> H256
where
AccountId: Encode,
{
@@ -107,7 +111,7 @@ where
///
/// The account ID can be the same across different instances of `pallet-bridge-messages` if the same
/// `bridge_id` is used.
pub fn derive_relayer_fund_account_id(bridge_id: InstanceId) -> H256 {
pub fn derive_relayer_fund_account_id(bridge_id: ChainId) -> H256 {
("relayer-fund-account", bridge_id).using_encoded(blake2_256).into()
}
@@ -0,0 +1,56 @@
// Copyright 2019-2021 Parity Technologies (UK) Ltd.
// This file is part of Parity Bridges Common.
// Parity Bridges Common is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Bridges Common is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.
//! Primitives that may be used by different message delivery and dispatch mechanisms.
use codec::{Decode, Encode};
use frame_support::{weights::Weight, RuntimeDebug};
/// Where message dispatch fee is paid?
#[derive(Encode, Decode, RuntimeDebug, Clone, Copy, PartialEq, Eq)]
pub enum DispatchFeePayment {
/// The dispacth fee is paid at the source chain.
AtSourceChain,
/// The dispatch fee is paid at the target chain.
///
/// The fee will be paid right before the message is dispatched. So in case of any other
/// issues (like invalid call encoding, invalid signature, ...) the dispatch module won't
/// do any direct transfers. Instead, it'll return fee related to this message dispatch to the
/// relayer.
AtTargetChain,
}
/// Message dispatch result.
#[derive(Encode, Decode, RuntimeDebug, Clone, PartialEq, Eq)]
pub struct MessageDispatchResult {
/// Dispatch result flag. This flag is relayed back to the source chain and, generally
/// speaking, may bring any (that fits in single bit) information from the dispatcher at
/// the target chain to the message submitter at the source chain. If you're using immediate
/// call dispatcher, then it'll be result of the dispatch - `true` if dispatch has succeeded
/// and `false` otherwise.
pub dispatch_result: bool,
/// Unspent dispatch weight. This weight that will be deducted from total delivery transaction
/// weight, thus reducing the transaction cost. This shall not be zero in (at least) two cases:
///
/// 1) if message has been dispatched successfully, but post-dispatch weight is less than
/// the weight, declared by the message sender;
/// 2) if message has not been dispatched at all.
pub unspent_weight: Weight,
/// Whether the message dispatch fee has been paid during dispatch. This will be true if your
/// configuration supports pay-dispatch-fee-at-target-chain option and message sender has enabled
/// this option.
pub dispatch_fee_paid_during_dispatch: bool,
}
@@ -7,9 +7,9 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
[dependencies]
bp-header-chain = { path = "../header-chain", default-features = false }
codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
ed25519-dalek = { version = "1.0", default-features = false, features = ["u64_backend"] }
finality-grandpa = { version = "0.14.1", default-features = false }
parity-scale-codec = { version = "2.0.0", default-features = false }
sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
@@ -19,9 +19,9 @@ sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", d
default = ["std"]
std = [
"bp-header-chain/std",
"codec/std",
"ed25519-dalek/std",
"finality-grandpa/std",
"parity-scale-codec/std",
"sp-application-crypto/std",
"sp-finality-grandpa/std",
"sp-runtime/std",
@@ -16,9 +16,9 @@
//! Utilities for working with test accounts.
use codec::Encode;
use ed25519_dalek::{Keypair, PublicKey, SecretKey, Signature};
use finality_grandpa::voter_set::VoterSet;
use parity_scale_codec::Encode;
use sp_application_crypto::Public;
use sp_finality_grandpa::{AuthorityId, AuthorityList, AuthorityWeight};
use sp_runtime::RuntimeDebug;

Some files were not shown because too many files have changed in this diff Show More